From 778b7819059e963cfe74e042028683a42db79725 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 19 Mar 2024 11:24:59 +0100 Subject: [PATCH 001/179] Remove outdated `--silence-libraries` switch from script --- maintainer/test-ploughshare-conversion.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainer/test-ploughshare-conversion.sh b/maintainer/test-ploughshare-conversion.sh index fbcdf7015..0bb0a41da 100755 --- a/maintainer/test-ploughshare-conversion.sh +++ b/maintainer/test-ploughshare-conversion.sh @@ -100,7 +100,7 @@ for grid in ${grids[@]}; do fi converted_grid="${grid}".pineappl.lz4 - pineappl --silence-lhapdf import --silence-libraries --accuracy 1e-12 "${grid}" "${converted_grid}" NNPDF31_nnlo_as_0118_luxqed + pineappl --silence-lhapdf import --accuracy 1e-12 "${grid}" "${converted_grid}" NNPDF31_nnlo_as_0118_luxqed du -h "${grid}" "${converted_grid}" done rm -r ${archive%.tgz} From 5aac006fd0d66a61990518b68812f3b6a7db28e5 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Mar 2024 13:08:00 +0100 Subject: [PATCH 002/179] Switch from `lhapdf` to `managed-lhapdf` --- CHANGELOG.md | 2 ++ pineappl/Cargo.toml | 2 +- pineappl/tests/drell_yan_lo.rs | 2 -- pineappl_cli/Cargo.toml | 2 +- pineappl_cli/src/helpers.rs | 23 ++---------------- pineappl_cli/src/plot.rs | 12 +++++----- pineappl_cli/src/pull.rs | 4 ++-- pineappl_cli/src/uncert.rs | 2 +- pineappl_cli/tests/convolute.rs | 42 --------------------------------- 9 files changed, 15 insertions(+), 76 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c4993c38f..c0281e2e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - `Grid::evolve` has now been marked deprecated +- switched from `lhapdf` to `managed-lhapdf` crate which automatically + downloads PDF sets when they are needed ### Fixed diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index ebbc0f34d..cf03c9b5a 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -33,7 +33,7 @@ thiserror = "1.0.30" [dev-dependencies] anyhow = "1.0.48" -lhapdf = "0.2.0" +lhapdf = { git = "https://github.com/cschwan/managed-lhapdf.git", package = "managed-lhapdf" } num-complex = "0.4.4" rand = { default-features = false, version = "0.8.4" } rand_pcg = { default-features = false, version = "0.3.1" } diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index 4d3196d53..79d6816f3 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -285,8 +285,6 @@ fn perform_grid_tests( let pdf_set = "NNPDF31_nlo_as_0118_luxqed"; - assert!(lhapdf::available_pdf_sets().iter().any(|x| x == pdf_set)); - let pdf = Pdf::with_setname_and_member(pdf_set, 0)?; let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); let mut alphas = |_| 0.0; diff --git a/pineappl_cli/Cargo.toml b/pineappl_cli/Cargo.toml index fbbef62b5..b6ad73633 100644 --- a/pineappl_cli/Cargo.toml +++ b/pineappl_cli/Cargo.toml @@ -26,7 +26,7 @@ flate2 = { optional = true, version = "1.0.22" } float-cmp = "0.9.0" git-version = "0.3.5" itertools = "0.10.1" -lhapdf = "0.2.1" +lhapdf = { git = "https://github.com/cschwan/managed-lhapdf.git", package = "managed-lhapdf" } lz4_flex = { optional = true, version = "0.9.2" } ndarray = "0.15.4" ndarray-npy = { optional = true, version = "0.8.1" } diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 283d0d865..c9b814ad3 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -282,27 +282,8 @@ pub fn convolute_subgrid( } pub fn parse_pdfset(argument: &str) -> std::result::Result { - let lhapdf_name = argument.rsplit_once('=').map_or(argument, |(name, _)| name); - - if let Ok(lhaid) = lhapdf_name.parse() { - if lhapdf::lookup_pdf(lhaid).is_some() { - return Ok(argument.to_owned()); - } - - return Err(format!( - "The PDF set for the LHAPDF ID `{lhapdf_name}` was not found" - )); - } else if lhapdf::available_pdf_sets().iter().any(|set| { - // there's no function in LHAPDF to validate the 'setname/member' syntax; there is a - // function that returns the LHAPDF ID, but that ID might not exist - *set == lhapdf_name - .split_once('/') - .map_or(lhapdf_name, |(setname, _)| setname) - }) { - return Ok(argument.to_owned()); - } - - Err(format!("The PDF set `{lhapdf_name}` was not found")) + // TODO: figure out how to validate `argument` with `managed-lhapdf` + Ok(argument.to_owned()) } pub fn parse_integer_range(range: &str) -> Result> { diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index 9749068ab..a1a203bed 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -272,12 +272,12 @@ impl Subcommand for Opts { let results = helpers::convolute(&grid, &mut pdf, &[], &bins, &[], 1, mode, cfg); - vec![results; 3] + Ok(vec![results; 3]) } else { let (set, member) = helpers::create_pdfset(pdfset).unwrap(); let pdf_results: Vec<_> = set - .mk_pdfs() + .mk_pdfs()? .into_par_iter() .flat_map(|mut pdf| { helpers::convolute( @@ -316,10 +316,10 @@ impl Subcommand for Opts { max.push(uncertainty.central + uncertainty.errplus); } - vec![central, min, max] + Ok(vec![central, min, max]) } }) - .collect(); + .collect::>()?; let central: Vec<_> = results.iter().step_by(self.scales).copied().collect(); let min: Vec<_> = results @@ -517,8 +517,8 @@ impl Subcommand for Opts { let (set1, member1) = helpers::create_pdfset(pdfset1)?; let (set2, member2) = helpers::create_pdfset(pdfset2)?; - let mut pdfset1 = set1.mk_pdfs(); - let mut pdfset2 = set2.mk_pdfs(); + let mut pdfset1 = set1.mk_pdfs()?; + let mut pdfset2 = set2.mk_pdfs()?; let values1: Vec<_> = pdfset1 .par_iter_mut() diff --git a/pineappl_cli/src/pull.rs b/pineappl_cli/src/pull.rs index 5b35a5861..e57a61530 100644 --- a/pineappl_cli/src/pull.rs +++ b/pineappl_cli/src/pull.rs @@ -53,8 +53,8 @@ impl Subcommand for Opts { let (set1, member1) = helpers::create_pdfset(&self.pdfset1)?; let (set2, member2) = helpers::create_pdfset(&self.pdfset2)?; - let mut pdfset1 = set1.mk_pdfs(); - let mut pdfset2 = set2.mk_pdfs(); + let mut pdfset1 = set1.mk_pdfs()?; + let mut pdfset2 = set2.mk_pdfs()?; ThreadPoolBuilder::new() .num_threads(self.threads) diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index b08ecc998..515f87e89 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -116,7 +116,7 @@ impl Subcommand for Opts { .build_global() .unwrap(); - set.mk_pdfs() + set.mk_pdfs()? .into_par_iter() .flat_map(|mut pdf| { helpers::convolute( diff --git a/pineappl_cli/tests/convolute.rs b/pineappl_cli/tests/convolute.rs index 69843f7bd..50cc18106 100644 --- a/pineappl_cli/tests/convolute.rs +++ b/pineappl_cli/tests/convolute.rs @@ -110,18 +110,6 @@ const THREE_PDFS_STR: &str = 7 4 4.5 2.7517266e1 2.7259743e1 -0.94 2.8446007e1 3.38 "; -const WRONG_LHAID_STR: &str = - "error: invalid value '0' for '...': The PDF set for the LHAPDF ID `0` was not found - -For more information, try '--help'. -"; - -const WRONG_PDFSET_STR: &str = - "error: invalid value 'IDONTEXIST' for '...': The PDF set `IDONTEXIST` was not found - -For more information, try '--help'. -"; - const BINS_13567_STR: &str = "b etal dsig/detal [] [pb] -+----+---+----------- @@ -299,36 +287,6 @@ fn three_pdfs() { .stdout(THREE_PDFS_STR); } -#[test] -fn wrong_lhaid() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "--silence-lhapdf", - "convolute", - "../test-data/LHCB_WP_7TEV.pineappl.lz4", - "0", - ]) - .assert() - .failure() - .stderr(WRONG_LHAID_STR); -} - -#[test] -fn wrong_pdfset() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "--silence-lhapdf", - "convolute", - "../test-data/LHCB_WP_7TEV.pineappl.lz4", - "IDONTEXIST", - ]) - .assert() - .failure() - .stderr(WRONG_PDFSET_STR); -} - #[test] fn bins_13567() { Command::cargo_bin("pineappl") From d6c3e2665577adcd9d2aa189982eac53780c88e7 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Mar 2024 13:25:35 +0100 Subject: [PATCH 003/179] Update lock file --- Cargo.lock | 841 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 819 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cec591a4c..04897385c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,15 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -52,7 +61,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -62,7 +71,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -113,6 +122,21 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + [[package]] name = "base64" version = "0.21.7" @@ -160,12 +184,24 @@ dependencies = [ "serde", ] +[[package]] +name = "bumpalo" +version = "3.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" + [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +[[package]] +name = "bytes" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" + [[package]] name = "cc" version = "1.0.89" @@ -253,7 +289,7 @@ dependencies = [ "encode_unicode 0.3.6", "lazy_static", "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -369,6 +405,15 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-next" version = "2.0.0" @@ -379,6 +424,18 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -442,7 +499,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -460,7 +517,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -482,6 +539,71 @@ dependencies = [ "num-traits", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -503,6 +625,12 @@ dependencies = [ "wasi", ] +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + [[package]] name = "git-version" version = "0.3.9" @@ -565,6 +693,112 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "hyper" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "socket2", + "tokio", + "tower", + "tower-service", + "tracing", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "ignore" version = "0.4.22" @@ -609,6 +843,12 @@ version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + [[package]] name = "is-terminal" version = "0.4.10" @@ -617,7 +857,7 @@ checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ "hermit-abi", "rustix", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -636,22 +876,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] -name = "lazy_static" -version = "1.4.0" +name = "js-sys" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +dependencies = [ + "wasm-bindgen", +] [[package]] -name = "lhapdf" -version = "0.2.4" +name = "lazy_static" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6092fc26088a6878f4a6694379aa5fb9d8d3ba2992930b68c45124600a2da7" -dependencies = [ - "cxx", - "cxx-build", - "pkg-config", - "thiserror", -] +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" @@ -710,6 +947,24 @@ dependencies = [ "twox-hash", ] +[[package]] +name = "managed-lhapdf" +version = "0.2.4" +source = "git+https://github.com/cschwan/managed-lhapdf.git#e3b18307adf41d82baceaebad995d542e6a3811e" +dependencies = [ + "anyhow", + "cxx", + "cxx-build", + "dirs", + "flate2", + "pkg-config", + "reqwest", + "serde", + "tar", + "thiserror", + "toml", +] + [[package]] name = "matrixmultiply" version = "0.3.8" @@ -735,6 +990,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "miniz_oxide" version = "0.7.1" @@ -744,6 +1005,17 @@ dependencies = [ "adler", ] +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.48.0", +] + [[package]] name = "ndarray" version = "0.15.6" @@ -817,6 +1089,16 @@ dependencies = [ "autocfg", ] +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + [[package]] name = "number_prefix" version = "0.4.0" @@ -838,12 +1120,27 @@ dependencies = [ "rustc-hash", ] +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "parking_lot" version = "0.12.1" @@ -867,6 +1164,12 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + [[package]] name = "pest" version = "2.7.6" @@ -912,6 +1215,38 @@ dependencies = [ "sha2", ] +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + [[package]] name = "pineappl" version = "0.7.4-rc.1" @@ -925,8 +1260,8 @@ dependencies = [ "git-version", "indicatif", "itertools", - "lhapdf", "lz4_flex", + "managed-lhapdf", "ndarray", "ndarray-npy", "num-complex", @@ -972,8 +1307,8 @@ dependencies = [ "float-cmp", "git-version", "itertools", - "lhapdf", "lz4_flex", + "managed-lhapdf", "ndarray", "ndarray-npy", "pineappl", @@ -1247,12 +1582,75 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "reqwest" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d66674f2b6fb864665eea7a3c1ac4e3dfacd2fda83cf6f935a612e01b0e3338" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.52.0", +] + [[package]] name = "roff" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b833d8d034ea094b1ea68aa6d5c740e0d04bad9d16568d08ba6f76823a114316" +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -1269,7 +1667,47 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64", +] + +[[package]] +name = "rustls-pki-types" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "868e20fada228fefaf6b652e00cc73623d54f8171e7352c18bb281571f2d92da" + +[[package]] +name = "rustls-webpki" +version = "0.102.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", ] [[package]] @@ -1325,6 +1763,38 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_json" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + [[package]] name = "serde_yaml" version = "0.9.30" @@ -1349,12 +1819,37 @@ dependencies = [ "digest", ] +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + [[package]] name = "smallvec" version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +[[package]] +name = "socket2" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "static_assertions" version = "1.1.0" @@ -1367,6 +1862,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + [[package]] name = "syn" version = "2.0.48" @@ -1378,6 +1879,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "tar" version = "0.4.40" @@ -1405,7 +1912,7 @@ dependencies = [ "fastrand", "redox_syscall", "rustix", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -1454,6 +1961,136 @@ dependencies = [ "syn", ] +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "pin-project-lite", + "socket2", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls", + "rustls-pki-types", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "log", + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + [[package]] name = "twox-hash" version = "1.6.3" @@ -1476,12 +2113,27 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "unicode-normalization" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-width" version = "0.1.11" @@ -1500,6 +2152,23 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + [[package]] name = "utf8parse" version = "0.2.1" @@ -1531,12 +2200,106 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasm-bindgen" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" + +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" @@ -1568,6 +2331,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -1691,6 +2463,25 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winnow" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "xattr" version = "1.3.1" @@ -1713,6 +2504,12 @@ dependencies = [ "pineappl_cli", ] +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" + [[package]] name = "zip" version = "0.5.13" From 7dba671a1ee55846a191eb0bf2a034e8a2a8e000 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 27 Mar 2024 15:45:38 +0100 Subject: [PATCH 004/179] Add support for static linking --- pineappl_applgrid/Cargo.toml | 3 +++ pineappl_applgrid/build.rs | 25 +++++++++++++++++++------ pineappl_cli/Cargo.toml | 1 + pineappl_fastnlo/Cargo.toml | 3 +++ pineappl_fastnlo/build.rs | 8 +++++++- 5 files changed, 33 insertions(+), 7 deletions(-) diff --git a/pineappl_applgrid/Cargo.toml b/pineappl_applgrid/Cargo.toml index f5a9f6932..6eb5f4164 100644 --- a/pineappl_applgrid/Cargo.toml +++ b/pineappl_applgrid/Cargo.toml @@ -22,3 +22,6 @@ cxx = "1.0.65" cc = "1.0.49" cxx-build = "1.0.65" pkg-config = "0.3.26" + +[features] +static = [] diff --git a/pineappl_applgrid/build.rs b/pineappl_applgrid/build.rs index ae521bb56..c30b2f9a5 100644 --- a/pineappl_applgrid/build.rs +++ b/pineappl_applgrid/build.rs @@ -1,6 +1,7 @@ #![allow(missing_docs)] use cc::Build; +use pkg_config::Config; use std::env; use std::path::Path; use std::process::Command; @@ -59,11 +60,26 @@ fn main() { ) .unwrap(); + let link_modifier = if cfg!(feature = "static") { + // for some reason `libz.a` isn't found, although `libz.so` is + for link_path in Config::new().probe("zlib").unwrap().link_paths { + println!("cargo:rustc-link-search={}", link_path.to_str().unwrap()); + } + + "static=" + } else { + "" + }; + for lib in libs .split_whitespace() .filter_map(|token| token.strip_prefix("-l")) { - println!("cargo:rustc-link-lib={lib}"); + match lib { + // we can't link gfortran statically - to avoid it compile APPLgrid without HOPPET + "gfortran" => println!("cargo:rustc-link-lib={lib}"), + _ => println!("cargo:rustc-link-lib={link_modifier}{lib}"), + } } Build::new() @@ -79,17 +95,14 @@ fn main() { println!("cargo:rerun-if-env-changed=APPL_IGRID_DIR"); - let lhapdf = pkg_config::Config::new() - .atleast_version("6") - .probe("lhapdf") - .unwrap(); + let lhapdf = Config::new().atleast_version("6").probe("lhapdf").unwrap(); for lib_path in lhapdf.link_paths { println!("cargo:rustc-link-search={}", lib_path.to_str().unwrap()); } for lib in lhapdf.libs { - println!("cargo:rustc-link-lib={lib}"); + println!("cargo:rustc-link-lib={link_modifier}{lib}"); } cxx_build::bridge("src/lib.rs") diff --git a/pineappl_cli/Cargo.toml b/pineappl_cli/Cargo.toml index b6ad73633..793cd7cf9 100644 --- a/pineappl_cli/Cargo.toml +++ b/pineappl_cli/Cargo.toml @@ -56,3 +56,4 @@ applgrid = ["dep:cxx", "dep:pineappl_applgrid"] evolve = ["dep:base64", "dep:either", "dep:tar", "dep:lz4_flex", "dep:ndarray-npy", "dep:serde", "dep:serde_yaml"] fastnlo = ["dep:pineappl_fastnlo"] fktable = ["dep:flate2", "dep:tar"] +static = ["lhapdf/static", "pineappl_applgrid?/static", "pineappl_fastnlo?/static"] diff --git a/pineappl_fastnlo/Cargo.toml b/pineappl_fastnlo/Cargo.toml index 729837be3..6d3e2a2a4 100644 --- a/pineappl_fastnlo/Cargo.toml +++ b/pineappl_fastnlo/Cargo.toml @@ -21,3 +21,6 @@ thiserror = "1.0.30" [build-dependencies] cxx-build = { version = "1.0.65" } + +[features] +static = [] diff --git a/pineappl_fastnlo/build.rs b/pineappl_fastnlo/build.rs index f8642882b..c384b86ee 100644 --- a/pineappl_fastnlo/build.rs +++ b/pineappl_fastnlo/build.rs @@ -23,7 +23,13 @@ fn main() { ) .unwrap(); - println!("cargo:rustc-link-lib=fastnlotoolkit"); + let link_modifier = if cfg!(feature = "static") { + "static=" + } else { + "" + }; + + println!("cargo:rustc-link-lib={link_modifier}fastnlotoolkit"); cxx_build::bridge("src/lib.rs") .file("src/fastnlo.cpp") From 52406e9ed3d87ecdc901707c24ed9302464e80b5 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 27 Mar 2024 16:00:40 +0100 Subject: [PATCH 005/179] Update `Cargo.lock` to solve building error --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 04897385c..3b5390adb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -950,7 +950,7 @@ dependencies = [ [[package]] name = "managed-lhapdf" version = "0.2.4" -source = "git+https://github.com/cschwan/managed-lhapdf.git#e3b18307adf41d82baceaebad995d542e6a3811e" +source = "git+https://github.com/cschwan/managed-lhapdf.git#789512841c3e6e1bb82216b83806a59702b18a5f" dependencies = [ "anyhow", "cxx", From 9c9e698940f093537a83e5948e3e9632ceb75832 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 27 Mar 2024 16:43:58 +0100 Subject: [PATCH 006/179] Enable installation of static libraries in container --- maintainer/pineappl-ci/script.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/maintainer/pineappl-ci/script.sh b/maintainer/pineappl-ci/script.sh index 49e65078d..304cdc6c3 100755 --- a/maintainer/pineappl-ci/script.sh +++ b/maintainer/pineappl-ci/script.sh @@ -30,7 +30,7 @@ rm -r /usr/local/cargo/registry ( curl "https://lhapdf.hepforge.org/downloads/?f=LHAPDF-${LHAPDF_V}.tar.gz" || \ curl "https://web.archive.org/web/20211018095814/https://lhapdf.hepforge.org/downloads/?f=LHAPDF-6.4.0.tar.gz" ) | tar xzf - cd LHAPDF-${LHAPDF_V} -./configure --disable-python --disable-static +./configure --disable-python make -j make install ldconfig @@ -45,7 +45,7 @@ done # install APPLgrid curl "https://applgrid.hepforge.org/downloads?f=applgrid-${APPLGRID_V}.tgz" | tar xzf - cd applgrid-${APPLGRID_V} -./configure --disable-static --without-root +./configure --without-root make -j make install ldconfig @@ -57,7 +57,7 @@ cd .. # install fastNLO curl "https://fastnlo.hepforge.org/code/v25/fastnlo_toolkit-${FASTNLO_V}.tar.gz" | tar xzf - cd fastnlo_toolkit-${FASTNLO_V} -./configure --disable-static --prefix=/usr/local/ +./configure --prefix=/usr/local/ make -j make install ldconfig From eb3310beb96000e83e9e7e86e65455efe4599f7e Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 27 Mar 2024 17:38:42 +0100 Subject: [PATCH 007/179] Remove Rust versions to save disk space --- maintainer/pineappl-ci/Containerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainer/pineappl-ci/Containerfile b/maintainer/pineappl-ci/Containerfile index 5db5c9b18..6981817f9 100644 --- a/maintainer/pineappl-ci/Containerfile +++ b/maintainer/pineappl-ci/Containerfile @@ -7,7 +7,7 @@ ARG FASTNLO_V=2.5.0-2826 ARG LHAPDF_V=6.4.0 # the last version is the default Rust version used in the container -ARG RUST_V="1.64.0 1.70.0 nightly-2024-01-25" +ARG RUST_V="1.70.0" ENV APPL_IGRID_DIR="/usr/local/src/applgrid-${APPLGRID_V}/src" ENV CARGO_HOME="/usr/local/cargo" From 2fa99e01518c6214b843d4d5bc18b01d9643e977 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 10:00:06 +0100 Subject: [PATCH 008/179] Free disk space in container and prepare building the CLI (#267) * Upgrade version of checkout action * Query disk-space and largest packages * Free disk space when building the container * Remove more packages from image * Install `cargo-c` with `--locked` to lower MSRV * Install `zlib-static` into container * Add missing `-y` parameter * Disable some features to debug linking problems * Print compiler information inside container * Try static relocation model * Try `-no-pie` instead * Try both flags * Try with `-Clink-args=-pie` * Remove `-Clink-args=-pie` and only build static libraries * Compile packages in container with `--with-pic=no` * Re-enable dynamic libraries and PIC * Build with `-Crelocation-model=pic` in container * Build CLI only to locate the build failures * Add missing `--locked` switch * Do not use `--features=static` for CI tests * Try compiling the CLI with `-Crelocation-model=dynamic-no-pic` * Re-enable nightly Rust in the container * Compile dependencies in container always with PIC * Build the CLI with all features * Compile zlib manually with PIC in the container * Remove building CLI in Rust workflow --- .github/workflows/container.yml | 22 +++++++++++++++- .github/workflows/rust.yml | 5 ++-- maintainer/pineappl-ci/Containerfile | 4 ++- maintainer/pineappl-ci/script.sh | 38 +++++++++++++++++++++------- 4 files changed, 56 insertions(+), 13 deletions(-) diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml index b4041ddb9..5fc727b75 100644 --- a/.github/workflows/container.yml +++ b/.github/workflows/container.yml @@ -11,7 +11,27 @@ jobs: publish: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - name: Free disk space + run: | + # inspired by: https://github.com/apache/flink/blob/master/tools/azure-pipelines/free_disk_space.sh + df -h + # 100 largest packages, in ascending order + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + sudo apt-get remove -y google-cloud-cli + sudo apt-get remove -y azure-cli + sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y '^temurin-.*-jdk' + sudo apt-get remove -y google-chrome-stable + sudo apt-get remove -y '^llvm-.*-dev' + sudo apt-get remove -y firefox + sudo apt-get remove -y powershell + sudo apt-get remove -y mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + # disk space after removing packages + df -h - name: Build image uses: redhat-actions/buildah-build@v2 with: diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 98b0798be..9a37e7ea8 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -9,7 +9,6 @@ defaults: env: CARGO_TERM_COLOR: always - RUSTFLAGS: '-Cinstrument-coverage' jobs: build: @@ -61,9 +60,11 @@ jobs: echo "RUSTDOCFLAGS=-Cinstrument-coverage -Z unstable-options --persist-doctests $(pwd)/target/debug/doctestbins" >> "$GITHUB_ENV" - name: Run tests + env: + RUSTFLAGS: '-Cinstrument-coverage' run: | # we need stderr, but we can't run test twice because it'll regenerate/modify the binaries which interferes with `llvm-cov` - cargo test --all-features --no-fail-fast 2> >(tee stderr 1>&2) + cargo test --features=applgrid,evolve,fastnlo,fktable --no-fail-fast 2> >(tee stderr 1>&2) # from https://stackoverflow.com/a/51141872/812178 sed -i 's/\x1B\[[0-9;]\{1,\}[A-Za-z]//g' stderr diff --git a/maintainer/pineappl-ci/Containerfile b/maintainer/pineappl-ci/Containerfile index 6981817f9..71da5db7e 100644 --- a/maintainer/pineappl-ci/Containerfile +++ b/maintainer/pineappl-ci/Containerfile @@ -5,9 +5,11 @@ ARG APPLGRID_V=1.6.27 ARG CARGOC_V=0.9.24+cargo-0.73.0 ARG FASTNLO_V=2.5.0-2826 ARG LHAPDF_V=6.4.0 +ARG ZLIB_V=1.3.1 # the last version is the default Rust version used in the container -ARG RUST_V="1.70.0" +# as long as we're using `persist-doctests` in the `Rust` workflow we need nightly as default +ARG RUST_V="1.70.0 nightly-2024-01-25" ENV APPL_IGRID_DIR="/usr/local/src/applgrid-${APPLGRID_V}/src" ENV CARGO_HOME="/usr/local/cargo" diff --git a/maintainer/pineappl-ci/script.sh b/maintainer/pineappl-ci/script.sh index 304cdc6c3..c5b613124 100755 --- a/maintainer/pineappl-ci/script.sh +++ b/maintainer/pineappl-ci/script.sh @@ -2,6 +2,13 @@ set -euo pipefail +# print this so we can see whether the compiler/linker has `--enable-default-pie` enabled; if it's +# not enabled we need to build our dependencies with `--with-pic=yes` (see below) +echo "--- COMPILER/LINKER INFORMATION" +echo "int main() {}" > test.c +cc -Q -v test.c +echo "---" + # install rustup curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y @@ -20,8 +27,11 @@ done # needed by the vendored OpenSSL used by `cargo-c` yum -y install perl-IPC-Cmd +# needed by `pineappl_applgrid` with `--features=static` +yum -y install zlib-static + # install cargo-c needed for the CAPI -cargo install cargo-c --version ${CARGOC_V} --features=vendored-openssl +cargo install --locked cargo-c --version ${CARGOC_V} --features=vendored-openssl # remove files generated by cargo rm -r /usr/local/cargo/registry @@ -30,11 +40,12 @@ rm -r /usr/local/cargo/registry ( curl "https://lhapdf.hepforge.org/downloads/?f=LHAPDF-${LHAPDF_V}.tar.gz" || \ curl "https://web.archive.org/web/20211018095814/https://lhapdf.hepforge.org/downloads/?f=LHAPDF-6.4.0.tar.gz" ) | tar xzf - cd LHAPDF-${LHAPDF_V} -./configure --disable-python -make -j +# compile static libraries with PIC to make statically linking PineAPPL's CLI work +# see also https://users.rust-lang.org/t/why-does-crelocation-model-dynamic-no-pic-help-although-it-shouldnt/109012 +./configure --disable-python --with-pic=yes +make -j V=1 make install ldconfig - cd .. # install PDF sets @@ -42,24 +53,33 @@ for pdf in NNPDF31_nlo_as_0118_luxqed NNPDF40_nnlo_as_01180 NNPDF40_nlo_as_01180 curl "https://lhapdfsets.web.cern.ch/current/${pdf}.tar.gz" | tar xzf - -C /usr/local/share/LHAPDF done +# install zlib compiled with `-fPIC` +curl "https://www.zlib.net/zlib-${ZLIB_V}.tar.gz" | tar xzf - +cd zlib-${ZLIB_V} +CFLAGS=-fPIC ./configure --prefix=/usr/local +make -j +make install +ldconfig +cd .. + # install APPLgrid curl "https://applgrid.hepforge.org/downloads?f=applgrid-${APPLGRID_V}.tgz" | tar xzf - cd applgrid-${APPLGRID_V} -./configure --without-root +# compile static libraries with PIC to make statically linking PineAPPL's CLI work +./configure --without-root --with-pic=yes make -j make install ldconfig mkdir -p ${APPL_IGRID_DIR} cp src/*.h ${APPL_IGRID_DIR} - cd .. # install fastNLO curl "https://fastnlo.hepforge.org/code/v25/fastnlo_toolkit-${FASTNLO_V}.tar.gz" | tar xzf - cd fastnlo_toolkit-${FASTNLO_V} -./configure --prefix=/usr/local/ -make -j +# compile static libraries with PIC to make statically linking PineAPPL's CLI work +./configure --prefix=/usr/local/ --with-pic=yes +make -j V=1 make install ldconfig - cd .. From 4c97f1f257bcb9930236f52b8213decd6e8c8ba2 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 10:15:04 +0100 Subject: [PATCH 009/179] Ignore `managed-lhapdf` from code coverage --- .github/workflows/rust.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 9a37e7ea8..a2751695b 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -74,11 +74,12 @@ jobs: ( sed -nE 's/[[:space:]]+Running( unittests|) [^[:space:]]+ \(([^)]+)\)/\2/p' stderr && echo target/debug/doctestbins/*/rust_out | tr ' ' "\n" ) | \ xargs printf ' --object %s' | \ xargs $(rustc --print target-libdir)/../bin/llvm-cov export \ - --ignore-filename-regex='index.crates.io' \ - --ignore-filename-regex='rustc' \ - --ignore-filename-regex='pineappl/tests' \ - --ignore-filename-regex='pineappl_capi' \ - --ignore-filename-regex='pineappl_cli/tests' \ + --ignore-filename-regex=index.crates.io \ + --ignore-filename-regex=rustc \ + --ignore-filename-regex=managed-lhapdf \ + --ignore-filename-regex=pineappl/tests \ + --ignore-filename-regex=pineappl_capi \ + --ignore-filename-regex=pineappl_cli/tests \ --instr-profile=pineappl.profdata \ --skip-functions \ --object target/debug/pineappl \ From 05ba6a1fcf9f50b48665092a25493f34b1fc12c2 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 10:07:53 +0100 Subject: [PATCH 010/179] Compile CLI on Linux when making a release --- .github/workflows/release.yml | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 14a0360a4..70ff56ff7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -67,13 +67,37 @@ jobs: name: pineappl_capi-${{ matrix.target }} path: pineappl_capi-${{ matrix.target }}.tar.gz + cli-linux: + runs-on: ubuntu-latest + container: ghcr.io/nnpdf/pineappl-ci:latest + strategy: + matrix: + target: [x86_64-unknown-linux-gnu] + steps: + # checkout@v4 uses a newer version of Node that's incompatible with our container's GLIBC + - uses: actions/checkout@v3 + - name: Compile binary + - run: | + cargo build --release --all-features --bin pineappl + mkdir -p prefix/bin + cp target/release/pineappl prefix/bin/ + cd prefix + tar czf ../pineappl_cli-${{ matrix.target }}.tar.gz . + - name: Upload artifact + # upload-artifact@v4 uses a newer version of Node that's incompatible with our container's GLIBC + uses: actions/upload-artifact@v3 + with: + name: pineappl_cli-${{ matrix.target }} + path: pineappl_cli-${{ matrix.target }}.tar.gz + # publish a release on github and upload pre-built CAPI publish-release: - needs: [capi-macos, capi-linux] + needs: [capi-macos, capi-linux, cli-linux] runs-on: ubuntu-latest if: "startsWith(github.ref, 'refs/tags/')" steps: - uses: actions/checkout@v4 + # version must match the one used in `actions/upload-artifact` - uses: actions/download-artifact@v3 with: path: artifacts @@ -94,6 +118,7 @@ jobs: gh release edit v${version} -n "" --prerelease fi find artifacts -name 'pineappl_capi*' -type f -exec gh release upload v${version} {} + + find artifacts -name 'pineappl_cli*' -type f -exec gh release upload v${version} {} + gh release edit v${version} --draft=false publish-crates: From c34db082d173704185fe55b0b5aa3e8b11215acd Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 10:18:35 +0100 Subject: [PATCH 011/179] Fix `Release` workflow --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 70ff56ff7..3be54278a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -77,7 +77,7 @@ jobs: # checkout@v4 uses a newer version of Node that's incompatible with our container's GLIBC - uses: actions/checkout@v3 - name: Compile binary - - run: | + run: | cargo build --release --all-features --bin pineappl mkdir -p prefix/bin cp target/release/pineappl prefix/bin/ From 9b194b62012f3412bf6c15a52a4eb44440a7c96c Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 11:05:58 +0100 Subject: [PATCH 012/179] Fix compilation problem for Python 3.6 wheel --- pineappl_py/src/grid.rs | 52 +++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 0f62d5d8a..509e7828c 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -650,31 +650,33 @@ impl PyGrid { ren1: Vec, alphas: Vec, ) -> PyResult { - Ok(self - .grid - .evolve_with_slice_iter( - slices.map(|result| { - // TODO: check whether we can avoid the `.unwrap` calls - let any = result.unwrap(); - let tuple = any.downcast::().unwrap(); - let item0 = tuple.get_item(0).unwrap(); - let item1 = tuple.get_item(1).unwrap(); - let slice_info = item0.extract::().unwrap(); - let operator = item1.extract::>().unwrap(); - // TODO: can we get rid of the `into_owned` call? - let array = CowArray::from(operator.as_array().into_owned()); - - // TODO: change `PyErr` into something appropriate - Ok::<_, PyErr>((slice_info.slice_info, array)) - }), - // TODO: what if it's non-contiguous? - order_mask.as_slice().unwrap(), - xi, - &AlphasTable { ren1, alphas }, - ) - .map(|fk_table| PyFkTable { fk_table }) - // TODO: get rid of this `.unwrap` call - .unwrap()) + todo!() + //Ok(self + // .grid + // .evolve_with_slice_iter( + // slices.map(|result| { + // // TODO: check whether we can avoid the `.unwrap` calls + // let any = result.unwrap(); + // let tuple = any.downcast::().unwrap(); + // // TODO: `get_item` changes return type from pyo3-0.14 to 0.15 + // let item0 = tuple.get_item(0).unwrap(); + // let item1 = tuple.get_item(1).unwrap(); + // let slice_info = item0.extract::().unwrap(); + // let operator = item1.extract::>().unwrap(); + // // TODO: can we get rid of the `into_owned` call? + // let array = CowArray::from(operator.as_array().into_owned()); + + // // TODO: change `PyErr` into something appropriate + // Ok::<_, PyErr>((slice_info.slice_info, array)) + // }), + // // TODO: what if it's non-contiguous? + // order_mask.as_slice().unwrap(), + // xi, + // &AlphasTable { ren1, alphas }, + // ) + // .map(|fk_table| PyFkTable { fk_table }) + // // TODO: get rid of this `.unwrap` call + // .unwrap()) } /// Load grid from file. From 511f2c79293aae2ef26950a305f31dd1f9dbd5e3 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 11:07:14 +0100 Subject: [PATCH 013/179] Run `cargo fmt` in `pineappl_py` --- pineappl_py/src/grid.rs | 3 ++- pineappl_py/src/import_only_subgrid.rs | 25 +++++++++++++------------ pineappl_py/src/subgrid.rs | 5 +++-- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 509e7828c..f227e92e3 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -451,7 +451,8 @@ impl PyGrid { let mut xfx1 = |id, x, q2| f64::extract(xfx1.call1((id, x, q2)).unwrap()).unwrap(); let mut xfx2 = |id, x, q2| f64::extract(xfx2.call1((id, x, q2)).unwrap()).unwrap(); let mut alphas = |q2| f64::extract(alphas.call1((q2,)).unwrap()).unwrap(); - let mut lumi_cache = LumiCache::with_two(pdg_id1, &mut xfx1, pdg_id2, &mut xfx2, &mut alphas); + let mut lumi_cache = + LumiCache::with_two(pdg_id1, &mut xfx1, pdg_id2, &mut xfx2, &mut alphas); self.grid .convolute( &mut lumi_cache, diff --git a/pineappl_py/src/import_only_subgrid.rs b/pineappl_py/src/import_only_subgrid.rs index 6a62b6a74..991ed7c1e 100644 --- a/pineappl_py/src/import_only_subgrid.rs +++ b/pineappl_py/src/import_only_subgrid.rs @@ -35,19 +35,20 @@ impl PyImportOnlySubgridV2 { { sparse_array[[imu2, ix1, ix2]] = *value; } - Self{ + Self { import_only_subgrid: ImportOnlySubgridV2::new( - sparse_array, - mu2_grid - .iter() - .map(|(ren, fac)| Mu2 { - ren: *ren, - fac: *fac, - }) - .collect(), - x1_grid.to_vec().unwrap(), - x2_grid.to_vec().unwrap(), - )} + sparse_array, + mu2_grid + .iter() + .map(|(ren, fac)| Mu2 { + ren: *ren, + fac: *fac, + }) + .collect(), + x1_grid.to_vec().unwrap(), + x2_grid.to_vec().unwrap(), + ), + } } /// Wrapper to match :meth:`pineappl.pineappl.PyGrid.set_subgrid()` diff --git a/pineappl_py/src/subgrid.rs b/pineappl_py/src/subgrid.rs index 0dd3a8f2e..9a6360df7 100644 --- a/pineappl_py/src/subgrid.rs +++ b/pineappl_py/src/subgrid.rs @@ -152,12 +152,13 @@ pub struct PyMu2 { pub mu2: Mu2, } - #[pymethods] impl PyMu2 { #[new] pub fn new(ren: f64, fac: f64) -> Self { - Self { mu2: Mu2 { ren, fac } } + Self { + mu2: Mu2 { ren, fac }, + } } #[getter] From 24961253caab230bffe88e846f30571616d828c8 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 11:25:34 +0100 Subject: [PATCH 014/179] Build CLI for MacOS --- .github/workflows/release.yml | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3be54278a..a5e41bc4c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -43,6 +43,27 @@ jobs: name: pineappl_capi-${{ matrix.target }} path: pineappl_capi-${{ matrix.target }}.tar.gz + cli-macos: + runs-on: macos-latest + strategy: + matrix: + target: [x86_64-apple-darwin, aarch64-apple-darwin] + steps: + - uses: actions/checkout@v4 + - name: Compile binary + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --target=${{ matrix.target }} + cargo build --release --all-features --bin pineappl --target=${{ matrix.target }} + cp target/release/pineappl prefix/bin/ + cd prefix + tar czf ../pineappl_cli-${{ matrix.target }}.tar.gz . + - name: Upload artifact + # as long as we need v3 in `cli-linux` we also must use it here + uses: actions/upload-artifact@v3 + with: + name: pineappl_cli-${{ matrix.target }} + path: pineappl_cli-${{ matrix.target }}.tar.gz + capi-linux: runs-on: ubuntu-latest container: ghcr.io/nnpdf/pineappl-ci:latest @@ -92,7 +113,7 @@ jobs: # publish a release on github and upload pre-built CAPI publish-release: - needs: [capi-macos, capi-linux, cli-linux] + needs: [capi-macos, cli-macos, capi-linux, cli-linux] runs-on: ubuntu-latest if: "startsWith(github.ref, 'refs/tags/')" steps: @@ -117,8 +138,7 @@ jobs: else gh release edit v${version} -n "" --prerelease fi - find artifacts -name 'pineappl_capi*' -type f -exec gh release upload v${version} {} + - find artifacts -name 'pineappl_cli*' -type f -exec gh release upload v${version} {} + + find artifacts -name 'pineappl*' -type f -exec gh release upload v${version} {} + gh release edit v${version} --draft=false publish-crates: From 93b206b46426a3f86139d9cbd4e67ff75a432ddf Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 11:36:29 +0100 Subject: [PATCH 015/179] Build CLI for MacOS without APPLgrid and fastNLO support --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a5e41bc4c..b653dd403 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -53,7 +53,7 @@ jobs: - name: Compile binary run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --target=${{ matrix.target }} - cargo build --release --all-features --bin pineappl --target=${{ matrix.target }} + cargo build --release --features=evolve,fktable,static --bin pineappl --target=${{ matrix.target }} cp target/release/pineappl prefix/bin/ cd prefix tar czf ../pineappl_cli-${{ matrix.target }}.tar.gz . From 95b92b8d7e46a1943876b19780aff67405733f84 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 30 Mar 2024 14:54:56 +0100 Subject: [PATCH 016/179] Try to fix building CLI for aarch64-apple-darwin target --- .github/workflows/release.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b653dd403..c816d503f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -44,10 +44,15 @@ jobs: path: pineappl_capi-${{ matrix.target }}.tar.gz cli-macos: - runs-on: macos-latest strategy: matrix: - target: [x86_64-apple-darwin, aarch64-apple-darwin] + os: [macos-latest, macos-14] + include: + - os: macos-latest + target: x86_64-apple-darwin + - os: macos-14 + target: aarch64-apple-darwin + runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 - name: Compile binary From 0af05ecbe97e0f3ce02d4a483343f718a8157e29 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 1 Apr 2024 17:18:02 +0200 Subject: [PATCH 017/179] Improve `install-capi.sh` - fixed warnings report by `shellcheck` - fixed resolving paths involving `~` - script now checks configuration after installation --- install-capi.sh | 65 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/install-capi.sh b/install-capi.sh index 741abe64c..93b4e1bcd 100755 --- a/install-capi.sh +++ b/install-capi.sh @@ -1,5 +1,10 @@ #!/bin/sh +# WARNING: do not commit changes to this file unless you've checked it against +# `shellcheck` (https://www.shellcheck.net/); run `shellcheck install-capi.sh` +# to make sure this script is POSIX shell compatible; we cannot rely on bash +# being present + set -eu prefix= @@ -51,21 +56,26 @@ if [ -z ${target+x} ]; then target=x86_64-unknown-linux-gnu;; *) echo "Error: unknown target, uname = '$(uname -a)'" + exit 1;; esac fi # if no prefix is given, prompt for one -if [ -z ${prefix} ]; then +if [ -z "${prefix}" ]; then # read from stdin (`<&1`), even if piped into a shell - read -p "Enter installation path: " <&1 prefix + printf "Enter installation path: " + read -r <&1 prefix + echo fi -if [ ! -d "${prefix}" ]; then - mkdir -p "${prefix}" -fi +# we need the absolute path; use `eval` to expand possible tilde `~` +eval mkdir -p "${prefix}" +eval cd "${prefix}" +prefix=$(pwd) +cd - >/dev/null # if no version is given, use the latest version -if [ -z ${version} ]; then +if [ -z "${version}" ]; then version=$(curl -s https://api.github.com/repos/NNPDF/pineappl/releases/latest | \ sed -n 's/[ ]*"tag_name"[ ]*:[ ]*"v\([^"]*\)"[ ]*,[ ]*$/\1/p') fi @@ -76,27 +86,46 @@ echo "prefix: ${prefix}" echo "target: ${target}" echo "version: ${version}" -# we need the absolute path -cd "${prefix}" -prefix=$(pwd) -cd - >/dev/null - -curl -s -LJ "${base_url}"/v${version}/pineappl_capi-${target}.tar.gz \ +curl -s -LJ "${base_url}/v${version}/pineappl_capi-${target}.tar.gz" \ | tar xzf - -C "${prefix}" # instead of `sed` and `mv` we could use `sed -i`, but on Mac it doesn't work as expected from GNU sed -sed s:prefix=/:prefix=${prefix}: "${prefix}"/lib/pkgconfig/pineappl_capi.pc > \ +sed "s:prefix=/:prefix=${prefix}:" "${prefix}"/lib/pkgconfig/pineappl_capi.pc > \ "${prefix}"/lib/pkgconfig/pineappl_capi.pc.new mv "${prefix}"/lib/pkgconfig/pineappl_capi.pc.new "${prefix}"/lib/pkgconfig/pineappl_capi.pc +pcbin= + if command -v pkg-config >/dev/null; then - if ! pkg-config --libs pineappl_capi >/dev/null; then + pcbin=$(command -v pkg-config) +elif command -v pkgconf >/dev/null; then + pcbin=$(command -v pkgconf) +else + echo + echo "Warning: neither \`pkg-config\` nor \`pkgconf\` not found. At least one is needed for the CAPI to be found" + exit 1 +fi + +# check whether the library can be found +if "${pcbin}" --libs pineappl_capi >/dev/null 2>/dev/null; then + prefix_lib=$(cd "${prefix}"/lib && pwd) + found_lib=$("${pcbin}" --keep-system-libs --libs-only-L pineappl_capi | sed 's/-L[[:space:]]*//') + + if [ "${prefix_lib}" != "${found_lib}" ]; then echo - echo "Warning: Your PKG_CONFIG_PATH environment variable isn't properly set. Try adding" - echo " export PKG_CONFIG_PATH=${prefix}/lib/pkgconfig" - echo "to your shell configuration file" + echo "Warning: Your PKG_CONFIG_PATH environment variable isn't properly set." + echo "It appears a different installation of PineAPPL is found:" + echo + echo " ${found_lib}" + echo + echo "Remove this installation or reorder your PKG_CONFIG_PATH" fi else echo - echo "Warning: \`pkg-config\` binary not found. Without it the CAPI may not be found." + echo "Warning: Your PKG_CONFIG_PATH environment variable isn't properly set." + echo "Try adding" + echo + echo " export PKG_CONFIG_PATH=${prefix}/lib/pkgconfig${PKG_CONFIG_PATH:+:\"\${PKG_CONFIG_PATH\}\"}" + echo + echo "to your shell configuration file" fi From b2cc40dc1f0a467c6ad69534ce36a5a22fabd982 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 7 Apr 2024 10:32:34 +0200 Subject: [PATCH 018/179] Add new option `--delete-channels` to `write` subcommand --- pineappl/src/grid.rs | 35 ++++++++++++++++++ pineappl_cli/src/write.rs | 17 ++++++++- pineappl_cli/tests/write.rs | 71 ++++++++++++++++++++++++++----------- 3 files changed, 102 insertions(+), 21 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index c8db84cf4..d30dbaf55 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -2176,6 +2176,41 @@ impl Grid { } } + /// Deletes channels with the corresponding `channel_indices`. Repeated indices and indices + /// larger or equal than the number of channels are ignored. + pub fn delete_channels(&mut self, channel_indices: &[usize]) { + let mut channel_indices: Vec<_> = channel_indices + .iter() + .copied() + // ignore indices corresponding to bin that don't exist + .filter(|&index| index < self.lumi().len()) + .collect(); + + // sort and remove repeated indices + channel_indices.sort_unstable(); + channel_indices.dedup(); + let channel_indices = channel_indices; + + let mut channel_ranges: Vec> = Vec::new(); + + // convert indices into consecutive ranges + for &channel_index in &channel_indices { + match channel_ranges.last_mut() { + Some(range) if range.end == channel_index => range.end += 1, + _ => channel_ranges.push(channel_index..(channel_index + 1)), + } + } + + // reverse order so we don't invalidate indices + channel_ranges.reverse(); + let channel_ranges = channel_ranges; + + for range in channel_ranges.into_iter() { + self.lumi.drain(range.clone()); + self.subgrids.slice_axis_inplace(Axis(2), range.into()); + } + } + pub(crate) fn rewrite_lumi(&mut self, add: &[(i32, i32)], del: &[i32]) { self.lumi = self .lumi diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 4b9e125c1..2e8fbdb40 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -34,6 +34,7 @@ enum OpsArg { Cc2(bool), DedupChannels(i64), DeleteBins(Vec>), + DeleteChannels(Vec>), DeleteKey(String), MergeBins(Vec>), Optimize(bool), @@ -131,7 +132,7 @@ impl FromArgMatches for MoreArgs { }); } } - "delete_bins" | "merge_bins" => { + "delete_bins" | "delete_channels" | "merge_bins" => { for (index, arg) in indices.into_iter().zip( matches .remove_occurrences(&id) @@ -140,6 +141,7 @@ impl FromArgMatches for MoreArgs { ) { args[index] = Some(match id.as_str() { "delete_bins" => OpsArg::DeleteBins(arg), + "delete_channels" => OpsArg::DeleteChannels(arg), "merge_bins" => OpsArg::MergeBins(arg), _ => unreachable!(), }); @@ -280,6 +282,16 @@ impl Args for MoreArgs { .value_name("BIN1-BIN2,...") .value_parser(helpers::parse_integer_range), ) + .arg( + Arg::new("delete_channels") + .action(ArgAction::Append) + .help("Delete channels with the specified indices") + .long("delete-channels") + .num_args(1) + .value_delimiter(',') + .value_name("CH1-CH2,...") + .value_parser(helpers::parse_integer_range), + ) .arg( Arg::new("delete_key") .action(ArgAction::Append) @@ -492,6 +504,9 @@ impl Subcommand for Opts { OpsArg::DeleteBins(ranges) => { grid.delete_bins(&ranges.iter().flat_map(Clone::clone).collect::>()); } + OpsArg::DeleteChannels(ranges) => { + grid.delete_channels(&ranges.iter().flat_map(Clone::clone).collect::>()); + } OpsArg::DeleteKey(key) => { grid.key_values_mut().remove(key); } diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index 071e78dec..cb84dd43a 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -10,26 +10,27 @@ Arguments: Path of the modified PineAPPL file Options: - --cc1[=] Charge conjugate the first initial state [possible values: true, false] - --cc2[=] Charge conjugate the second initial state [possible values: true, false] - --dedup-channels[=] Deduplicate channels assuming numbers differing by ULPS are the same - --delete-bins Delete bins with the specified indices - --delete-key Delete an internal key-value pair - --merge-bins Merge specific bins together - --optimize[=] Optimize internal data structure to minimize memory and disk usage [possible values: true, false] - --optimize-fk-table Optimize internal data structure of an FkTable to minimize memory and disk usage [possible values: Nf6Ind, Nf6Sym, Nf5Ind, Nf5Sym, Nf4Ind, Nf4Sym, Nf3Ind, Nf3Sym] - --remap Modify the bin dimensions and widths - --remap-norm Modify the bin normalizations with a common factor - --remap-norm-ignore Modify the bin normalizations by multiplying with the bin lengths for the given dimensions - --rewrite-channel Rewrite the definition of the channel with index IDX - -s, --scale Scales all grids with the given factor - --scale-by-bin Scale each bin with a different factor - --scale-by-order Scales all grids with order-dependent factors - --set-key-value Set an internal key-value pair - --set-key-file Set an internal key-value pair, with value being read from a file - --split-lumi[=] Split the grid such that the luminosity function contains only a single combination per channel [possible values: true, false] - --upgrade[=] Convert the file format to the most recent version [possible values: true, false] - -h, --help Print help + --cc1[=] Charge conjugate the first initial state [possible values: true, false] + --cc2[=] Charge conjugate the second initial state [possible values: true, false] + --dedup-channels[=] Deduplicate channels assuming numbers differing by ULPS are the same + --delete-bins Delete bins with the specified indices + --delete-channels Delete channels with the specified indices + --delete-key Delete an internal key-value pair + --merge-bins Merge specific bins together + --optimize[=] Optimize internal data structure to minimize memory and disk usage [possible values: true, false] + --optimize-fk-table Optimize internal data structure of an FkTable to minimize memory and disk usage [possible values: Nf6Ind, Nf6Sym, Nf5Ind, Nf5Sym, Nf4Ind, Nf4Sym, Nf3Ind, Nf3Sym] + --remap Modify the bin dimensions and widths + --remap-norm Modify the bin normalizations with a common factor + --remap-norm-ignore Modify the bin normalizations by multiplying with the bin lengths for the given dimensions + --rewrite-channel Rewrite the definition of the channel with index IDX + -s, --scale Scales all grids with the given factor + --scale-by-bin Scale each bin with a different factor + --scale-by-order Scales all grids with order-dependent factors + --set-key-value Set an internal key-value pair + --set-key-file Set an internal key-value pair, with value being read from a file + --split-lumi[=] Split the grid such that the luminosity function contains only a single combination per channel [possible values: true, false] + --upgrade[=] Convert the file format to the most recent version [possible values: true, false] + -h, --help Print help "; const CHANNEL_STR: &str = "l entry entry @@ -82,6 +83,12 @@ const DELETE_BINS_25_STR: &str = "b etal dsig/detal 3 4 4.5 2.7517266e1 "; +const DELETE_CHANNELS_STR: &str = "l entry entry +-+------------+------------ +0 1 × ( 2, -1) 1 × ( 4, -3) +1 1 × (22, -3) 1 × (22, -1) +"; + const KEY_VALUE_STR: &str = r"arxiv: 1505.07024 description: LHCb differential W-boson production cross section at 7 TeV hepdata: 10.17182/hepdata.2114.v1/t4 @@ -346,6 +353,30 @@ fn delete_bins_25() { .stdout(DELETE_BINS_25_STR); } +#[test] +fn delete_channels() { + let output = NamedTempFile::new("deleted3.pineappl.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--delete-channels=1,3-4", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + output.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + Command::cargo_bin("pineappl") + .unwrap() + .args(["read", "--lumis", output.path().to_str().unwrap()]) + .assert() + .success() + .stdout(DELETE_CHANNELS_STR); +} + #[test] fn key_value() { let output = NamedTempFile::new("set.pineappl.lz4").unwrap(); From 958ae03dae87e4d5856dc8bfd7b5d10b0d890fe1 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 7 Apr 2024 11:04:26 +0200 Subject: [PATCH 019/179] Fix calculation of `diff` We assume that the first grid is the original and the second grid the new one like in the command-line program `diff` for two textfiles. Then the relative difference should be calculated using the first grid in the denominator --- pineappl_cli/src/diff.rs | 4 ++-- pineappl_cli/tests/diff.rs | 40 +++++++++++++++++++------------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index 2992529c7..35a9d545b 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -186,7 +186,7 @@ impl Subcommand for Opts { row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result1))); row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result2))); row.add_cell(cell!(r->format!("{:.*e}", self.digits_rel, - if result1 == result2 { 0.0 } else { result1 / result2 - 1.0 }))); + if result1 == result2 { 0.0 } else { result2 / result1 - 1.0 }))); } } else { let orders = orders1; @@ -245,7 +245,7 @@ impl Subcommand for Opts { row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result1))); row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result2))); row.add_cell(cell!(r->format!("{:.*e}", self.digits_rel, - if result1 == result2 { 0.0 } else { result1 / result2 - 1.0 }))); + if result1 == result2 { 0.0 } else { result2 / result1 - 1.0 }))); } } } diff --git a/pineappl_cli/tests/diff.rs b/pineappl_cli/tests/diff.rs index 3cbe78a09..003cc98ac 100644 --- a/pineappl_cli/tests/diff.rs +++ b/pineappl_cli/tests/diff.rs @@ -48,28 +48,28 @@ const ORDERS1_A2_A2AS1_ORDERS2_A2_A2AS1_STR: &str = 7 4 4.5 2.2383492e1 2.2383492e1 0.000e0 5.3540011e0 5.3540011e0 0.000e0 "; -const ORDERS1_A2_A2AS1_IGNORE_ORDERS_STR: &str = "b x1 diff --+----+----+-----------+-----------+-------- -0 2 2.25 7.6246034e2 7.5459110e2 1.043e-2 -1 2.25 2.5 6.9684577e2 6.9028342e2 9.507e-3 -2 2.5 2.75 6.0548681e2 6.0025198e2 8.721e-3 -3 2.75 3 4.8928139e2 4.8552235e2 7.742e-3 -4 3 3.25 3.6454175e2 3.6195456e2 7.148e-3 -5 3.25 3.5 2.4754316e2 2.4586691e2 6.818e-3 -6 3.5 4 1.1667878e2 1.1586851e2 6.993e-3 -7 4 4.5 2.7737493e1 2.7517266e1 8.003e-3 +const ORDERS1_A2_A2AS1_IGNORE_ORDERS_STR: &str = "b x1 diff +-+----+----+-----------+-----------+--------- +0 2 2.25 7.6246034e2 7.5459110e2 -1.032e-2 +1 2.25 2.5 6.9684577e2 6.9028342e2 -9.417e-3 +2 2.5 2.75 6.0548681e2 6.0025198e2 -8.646e-3 +3 2.75 3 4.8928139e2 4.8552235e2 -7.683e-3 +4 3 3.25 3.6454175e2 3.6195456e2 -7.097e-3 +5 3.25 3.5 2.4754316e2 2.4586691e2 -6.772e-3 +6 3.5 4 1.1667878e2 1.1586851e2 -6.944e-3 +7 4 4.5 2.7737493e1 2.7517266e1 -7.940e-3 "; -const SCALE2_2_STR: &str = "b x1 O(as^0 a^2) O(as^0 a^3) O(as^1 a^2) --+----+----+-----------+-----------+---------+-------------+-------------+---------+-----------+-----------+--------- -0 2 2.25 6.5070305e2 1.3014061e3 -5.000e-1 -7.8692484e0 -1.5738497e1 -5.000e-1 1.1175729e2 2.2351458e2 -5.000e-1 -1 2.25 2.5 5.9601236e2 1.1920247e3 -5.000e-1 -6.5623495e0 -1.3124699e1 -5.000e-1 1.0083341e2 2.0166682e2 -5.000e-1 -2 2.5 2.75 5.1561247e2 1.0312249e3 -5.000e-1 -5.2348261e0 -1.0469652e1 -5.000e-1 8.9874343e1 1.7974869e2 -5.000e-1 -3 2.75 3 4.1534629e2 8.3069258e2 -5.000e-1 -3.7590420e0 -7.5180840e0 -5.000e-1 7.3935106e1 1.4787021e2 -5.000e-1 -4 3 3.25 3.0812719e2 6.1625439e2 -5.000e-1 -2.5871885e0 -5.1743770e0 -5.000e-1 5.6414554e1 1.1282911e2 -5.000e-1 -5 3.25 3.5 2.0807482e2 4.1614964e2 -5.000e-1 -1.6762487e0 -3.3524974e0 -5.000e-1 3.9468336e1 7.8936673e1 -5.000e-1 -6 3.5 4 9.6856769e1 1.9371354e2 -5.000e-1 -8.1027456e-1 -1.6205491e0 -5.000e-1 1.9822014e1 3.9644028e1 -5.000e-1 -7 4 4.5 2.2383492e1 4.4766985e1 -5.000e-1 -2.2022770e-1 -4.4045540e-1 -5.000e-1 5.3540011e0 1.0708002e1 -5.000e-1 +const SCALE2_2_STR: &str = "b x1 O(as^0 a^2) O(as^0 a^3) O(as^1 a^2) +-+----+----+-----------+-----------+-------+-------------+-------------+-------+-----------+-----------+------- +0 2 2.25 6.5070305e2 1.3014061e3 1.000e0 -7.8692484e0 -1.5738497e1 1.000e0 1.1175729e2 2.2351458e2 1.000e0 +1 2.25 2.5 5.9601236e2 1.1920247e3 1.000e0 -6.5623495e0 -1.3124699e1 1.000e0 1.0083341e2 2.0166682e2 1.000e0 +2 2.5 2.75 5.1561247e2 1.0312249e3 1.000e0 -5.2348261e0 -1.0469652e1 1.000e0 8.9874343e1 1.7974869e2 1.000e0 +3 2.75 3 4.1534629e2 8.3069258e2 1.000e0 -3.7590420e0 -7.5180840e0 1.000e0 7.3935106e1 1.4787021e2 1.000e0 +4 3 3.25 3.0812719e2 6.1625439e2 1.000e0 -2.5871885e0 -5.1743770e0 1.000e0 5.6414554e1 1.1282911e2 1.000e0 +5 3.25 3.5 2.0807482e2 4.1614964e2 1.000e0 -1.6762487e0 -3.3524974e0 1.000e0 3.9468336e1 7.8936673e1 1.000e0 +6 3.5 4 9.6856769e1 1.9371354e2 1.000e0 -8.1027456e-1 -1.6205491e0 1.000e0 1.9822014e1 3.9644028e1 1.000e0 +7 4 4.5 2.2383492e1 4.4766985e1 1.000e0 -2.2022770e-1 -4.4045540e-1 1.000e0 5.3540011e0 1.0708002e1 1.000e0 "; const ORDERS_DIFFER_STR: &str = "Error: selected orders differ From 0189f87c385ed82785fa2b12a3ccd09a1249108e Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 9 Apr 2024 15:21:07 +0200 Subject: [PATCH 020/179] Suppress LHAPDF output by default --- CHANGELOG.md | 2 ++ pineappl_cli/src/lib.rs | 4 ++-- pineappl_cli/src/main.rs | 2 +- pineappl_cli/tests/analyze.rs | 3 --- pineappl_cli/tests/channels.rs | 7 ------- pineappl_cli/tests/convolute.rs | 12 ------------ pineappl_cli/tests/diff.rs | 8 -------- pineappl_cli/tests/evolve.rs | 11 ----------- pineappl_cli/tests/export.rs | 2 -- pineappl_cli/tests/import.rs | 27 --------------------------- pineappl_cli/tests/main.rs | 2 +- pineappl_cli/tests/merge.rs | 1 - pineappl_cli/tests/orders.rs | 5 ----- pineappl_cli/tests/plot.rs | 4 ---- pineappl_cli/tests/pull.rs | 5 ----- pineappl_cli/tests/uncert.rs | 10 ---------- pineappl_cli/tests/write.rs | 26 ++------------------------ 17 files changed, 8 insertions(+), 123 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0281e2e5..d033a75b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- changed switch `--silence-lhapdf` to `--lhapdf-banner` and suppress LHAPDF's + banners by default, unless `--lhapdf-banner` is given - `Grid::evolve` has now been marked deprecated - switched from `lhapdf` to `managed-lhapdf` crate which automatically downloads PDF sets when they are needed diff --git a/pineappl_cli/src/lib.rs b/pineappl_cli/src/lib.rs index 44952e378..87c3ecffb 100644 --- a/pineappl_cli/src/lib.rs +++ b/pineappl_cli/src/lib.rs @@ -29,9 +29,9 @@ use std::process::ExitCode; #[derive(Parser)] pub struct GlobalConfiguration { - /// Prevents LHAPDF from printing banners. + /// Allow LHAPDF to print banners. #[arg(long)] - pub silence_lhapdf: bool, + pub lhapdf_banner: bool, /// Forces negative PDF values to zero. #[arg(long)] pub force_positive: bool, diff --git a/pineappl_cli/src/main.rs b/pineappl_cli/src/main.rs index 6ec87f922..1fdeb236f 100644 --- a/pineappl_cli/src/main.rs +++ b/pineappl_cli/src/main.rs @@ -7,7 +7,7 @@ use std::process::{ExitCode, Termination}; fn main() -> ExitCode { let opts = Opts::parse(); - if opts.configuration.silence_lhapdf { + if !opts.configuration.lhapdf_banner { lhapdf::set_verbosity(0); } diff --git a/pineappl_cli/tests/analyze.rs b/pineappl_cli/tests/analyze.rs index 9cd8a39ca..ca94a7648 100644 --- a/pineappl_cli/tests/analyze.rs +++ b/pineappl_cli/tests/analyze.rs @@ -86,7 +86,6 @@ fn ckf() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "analyze", "ckf", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -104,7 +103,6 @@ fn ckf_with_default_denominator() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "analyze", "ckf", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -121,7 +119,6 @@ fn ckf_with_bad_limit() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "analyze", "ckf", "--limit=0", diff --git a/pineappl_cli/tests/channels.rs b/pineappl_cli/tests/channels.rs index c7d1566e0..2ee7293b0 100644 --- a/pineappl_cli/tests/channels.rs +++ b/pineappl_cli/tests/channels.rs @@ -119,7 +119,6 @@ fn default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "channels", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -134,7 +133,6 @@ fn absolute() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "channels", "--absolute", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -150,7 +148,6 @@ fn absolute_integrated() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "channels", "--absolute", "--integrated", @@ -167,7 +164,6 @@ fn limit_3() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "channels", "--limit=3", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -183,7 +179,6 @@ fn bad_limit() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "channels", "--limit=0", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -199,7 +194,6 @@ fn lumis_0123() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "channels", "--lumis=0-3", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -215,7 +209,6 @@ fn orders_a2_as1a2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "channels", "--orders=a2,as1a2", "../test-data/LHCB_WP_7TEV.pineappl.lz4", diff --git a/pineappl_cli/tests/convolute.rs b/pineappl_cli/tests/convolute.rs index 50cc18106..36265c0f2 100644 --- a/pineappl_cli/tests/convolute.rs +++ b/pineappl_cli/tests/convolute.rs @@ -179,7 +179,6 @@ fn default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -195,7 +194,6 @@ fn force_positive() { .unwrap() .args([ "--force-positive", - "--silence-lhapdf", "convolute", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -210,7 +208,6 @@ fn default_multiple_pdfs() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -226,7 +223,6 @@ fn multiple_pdfs_with_new_construction() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed/0", @@ -242,7 +238,6 @@ fn multiple_pdfs_with_relabeling() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -258,7 +253,6 @@ fn two_pdfs_with_order_subset() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "--orders=a2", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -275,7 +269,6 @@ fn three_pdfs() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed/0", @@ -292,7 +285,6 @@ fn bins_13567() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "--bins=1,3,5-7", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -308,7 +300,6 @@ fn integrated() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "--integrated", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -324,7 +315,6 @@ fn integrated_multiple_pdfs() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "--integrated", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -341,7 +331,6 @@ fn orders_a2_a3() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "--orders=a2,a3", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -357,7 +346,6 @@ fn wrong_orders() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", "--orders=a2a2as2", "../test-data/LHCB_WP_7TEV.pineappl.lz4", diff --git a/pineappl_cli/tests/diff.rs b/pineappl_cli/tests/diff.rs index 003cc98ac..e8369420f 100644 --- a/pineappl_cli/tests/diff.rs +++ b/pineappl_cli/tests/diff.rs @@ -99,7 +99,6 @@ fn orders1_a2_orders2_a2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "--orders1=a2", "--orders2=a2", @@ -117,7 +116,6 @@ fn orders1_a2_a2as1_orders2_a2_a2as1() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "--orders1=a2,a2as1", "--orders2=a2,a2as1", @@ -135,7 +133,6 @@ fn orders1_a2_a2as1_ignore_orders() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "--orders1=a2,a2as1", "--ignore-orders", @@ -153,7 +150,6 @@ fn scale2_2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "--scale2=2", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -170,7 +166,6 @@ fn orders_differ() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "--orders1=a2", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -202,7 +197,6 @@ fn bin_limits_differ() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "../test-data/LHCB_WP_7TEV.pineappl.lz4", output.path().to_str().unwrap(), @@ -233,7 +227,6 @@ fn bin_number_differs() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "--ignore-bin-limits", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -251,7 +244,6 @@ fn lumis_differ() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "../test-data/LHCB_WP_7TEV_old.pineappl.lz4", "../test-data/LHCB_WP_7TEV.pineappl.lz4", diff --git a/pineappl_cli/tests/evolve.rs b/pineappl_cli/tests/evolve.rs index 65ca27bd3..710365d16 100644 --- a/pineappl_cli/tests/evolve.rs +++ b/pineappl_cli/tests/evolve.rs @@ -188,7 +188,6 @@ fn lhcb_wp_7tev() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "../test-data/LHCB_WP_7TEV.tar", @@ -218,7 +217,6 @@ fn lhcb_wp_7tev() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", optimized.path().to_str().unwrap(), "NNPDF40_nlo_as_01180", @@ -235,7 +233,6 @@ fn lhcb_wp_7tev_use_old_evolve() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "../test-data/LHCB_WP_7TEV.tar", @@ -271,7 +268,6 @@ fn lhcb_wp_7tev_v2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "--digits-abs=16", "--digits-rel=16", @@ -308,7 +304,6 @@ fn lhcb_wp_7tev_v2_xir_2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "--digits-abs=16", "--digits-rel=16", @@ -346,7 +341,6 @@ fn lhcb_wp_7tev_v2_xif_2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "--digits-abs=16", "--digits-rel=16", @@ -384,7 +378,6 @@ fn lhcb_wp_7tev_v2_xif_2_error() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "--digits-abs=16", "--digits-rel=16", @@ -422,7 +415,6 @@ fn e906nlo_bin_00() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", input.path().to_str().unwrap(), "../test-data/E906nlo_bin_00.tar", @@ -441,7 +433,6 @@ fn nutev_cc_nu_fe_sigmared() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "../test-data/NUTEV_CC_NU_FE_SIGMARED.pineappl.lz4", "../test-data/NUTEV_CC_NU_FE_SIGMARED.tar", @@ -461,7 +452,6 @@ fn lhcb_dy_8tev() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "../test-data/LHCB_DY_8TEV.pineappl.lz4", "../test-data/LHCB_DY_8TEV.tar", @@ -481,7 +471,6 @@ fn cms_ttb_8tev_2d_ttm_trap_tot() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "evolve", "--orders=as2,as3,as4", "../test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT-opt.pineappl.lz4", diff --git a/pineappl_cli/tests/export.rs b/pineappl_cli/tests/export.rs index b29c0584b..392c3dc06 100644 --- a/pineappl_cli/tests/export.rs +++ b/pineappl_cli/tests/export.rs @@ -120,7 +120,6 @@ fn export_applgrid() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "export", "../test-data/LHCB_DY_8TEV.pineappl.lz4", output.path().to_str().unwrap(), @@ -153,7 +152,6 @@ fn export_dis_applgrid() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "export", output1.path().to_str().unwrap(), output2.path().to_str().unwrap(), diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index 8ad9e63b7..a016c0d2a 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -324,7 +324,6 @@ fn import_fix_grid() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/NJetEvents_0-0-2.tab.gz", output.path().to_str().unwrap(), @@ -343,7 +342,6 @@ fn import_flex_grid() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -362,7 +360,6 @@ fn import_flex_grid_scale_1() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -383,7 +380,6 @@ fn import_flex_grid_scale_2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -404,7 +400,6 @@ fn import_flex_grid_quadratic_sum() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -427,7 +422,6 @@ fn import_flex_grid_quadratic_mean() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -450,7 +444,6 @@ fn import_flex_grid_5() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -471,7 +464,6 @@ fn import_flex_grid_6() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -492,7 +484,6 @@ fn import_flex_grid_7() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -513,7 +504,6 @@ fn import_flex_grid_8() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -534,7 +524,6 @@ fn import_flex_grid_9() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -555,7 +544,6 @@ fn import_flex_grid_10() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -576,7 +564,6 @@ fn import_flex_grid_11() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -597,7 +584,6 @@ fn import_flex_grid_12() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -618,7 +604,6 @@ fn import_flex_grid_13() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -639,7 +624,6 @@ fn import_flex_grid_14() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -660,7 +644,6 @@ fn import_flex_grid_15() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-incjets-fnlo-arxiv-0706.3722-xsec000.tab.gz", output.path().to_str().unwrap(), @@ -685,7 +668,6 @@ fn import_dis_fktable() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/FK_POSXDQ.dat", output.path().to_str().unwrap(), @@ -698,7 +680,6 @@ fn import_dis_fktable() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -865,7 +846,6 @@ fn import_hadronic_fktable() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/FK_ATLASTTBARTOT13TEV.dat", output.path().to_str().unwrap(), @@ -878,7 +858,6 @@ fn import_hadronic_fktable() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -1170,7 +1149,6 @@ fn import_photon_grid() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/LHCBWZMU7TEV_PI_part1.appl", output.path().to_str().unwrap(), @@ -1189,7 +1167,6 @@ fn import_applgrid() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/ATLASWPT11-Wplus_tot.appl", output.path().to_str().unwrap(), @@ -1208,7 +1185,6 @@ fn import_new_applgrid() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/atlas-atlas-wpm-arxiv-1109.5141-xsec001.appl", output.path().to_str().unwrap(), @@ -1240,7 +1216,6 @@ fn import_grid_comparison_failure() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "--accuracy=0", "../test-data/NJetEvents_0-0-2.tab.gz", @@ -1261,7 +1236,6 @@ fn import_dis_applgrid() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-h1-dijets-appl-arxiv-0010054-xsec000.appl", output.path().to_str().unwrap(), @@ -1280,7 +1254,6 @@ fn import_double_hadronic_fastnlo() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "import", "../test-data/applfast-atlas-dijets-fnlo-arxiv-1312.3524-xsec000.tab.gz", output.path().to_str().unwrap(), diff --git a/pineappl_cli/tests/main.rs b/pineappl_cli/tests/main.rs index 66d67802b..0577a05ce 100644 --- a/pineappl_cli/tests/main.rs +++ b/pineappl_cli/tests/main.rs @@ -23,7 +23,7 @@ Commands: write Write a grid modified by various operations Options: - --silence-lhapdf Prevents LHAPDF from printing banners + --lhapdf-banner Allow LHAPDF to print banners --force-positive Forces negative PDF values to zero --allow-extrapolation Allow extrapolation of PDFs outside their region of validity -h, --help Print help diff --git a/pineappl_cli/tests/merge.rs b/pineappl_cli/tests/merge.rs index 8002016b0..134d7aff9 100644 --- a/pineappl_cli/tests/merge.rs +++ b/pineappl_cli/tests/merge.rs @@ -55,7 +55,6 @@ fn default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF40_nnlo_as_01180", diff --git a/pineappl_cli/tests/orders.rs b/pineappl_cli/tests/orders.rs index 3af7e05e8..a159c3c4c 100644 --- a/pineappl_cli/tests/orders.rs +++ b/pineappl_cli/tests/orders.rs @@ -98,7 +98,6 @@ fn default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "orders", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -113,7 +112,6 @@ fn absolute() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "orders", "--absolute", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -129,7 +127,6 @@ fn absolute_integrated() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "orders", "--absolute", "--integrated", @@ -146,7 +143,6 @@ fn integrated() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "orders", "--integrated", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -162,7 +158,6 @@ fn normalize_a2_as1a2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "orders", "--normalize=a2,as1a2", "../test-data/LHCB_WP_7TEV.pineappl.lz4", diff --git a/pineappl_cli/tests/plot.rs b/pineappl_cli/tests/plot.rs index c4b5636be..41c167789 100644 --- a/pineappl_cli/tests/plot.rs +++ b/pineappl_cli/tests/plot.rs @@ -1421,7 +1421,6 @@ fn default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "plot", "--threads=1", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -1438,7 +1437,6 @@ fn subgrid_pull() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "plot", "--subgrid-pull=0,0,0", "--threads=1", @@ -1456,7 +1454,6 @@ fn drell_yan_afb() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "plot", "--asymmetry", "--threads=1", @@ -1473,7 +1470,6 @@ fn drell_yan_mass_slices() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "plot", "--no-pdf-unc", "--threads=1", diff --git a/pineappl_cli/tests/pull.rs b/pineappl_cli/tests/pull.rs index b08eb6a94..cc605ab08 100644 --- a/pineappl_cli/tests/pull.rs +++ b/pineappl_cli/tests/pull.rs @@ -107,7 +107,6 @@ fn default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "pull", "--threads=1", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -124,7 +123,6 @@ fn orders() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "pull", "--orders=a2", "--threads=1", @@ -142,7 +140,6 @@ fn cl_90() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "pull", "--cl=90", "--threads=1", @@ -160,7 +157,6 @@ fn limit() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "pull", "--limit=1", "--threads=1", @@ -178,7 +174,6 @@ fn replica0() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "pull", "--threads=1", "../test-data/LHCB_WP_7TEV.pineappl.lz4", diff --git a/pineappl_cli/tests/uncert.rs b/pineappl_cli/tests/uncert.rs index 100962236..43b465385 100644 --- a/pineappl_cli/tests/uncert.rs +++ b/pineappl_cli/tests/uncert.rs @@ -178,7 +178,6 @@ fn pdf_default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--pdf", "--threads=1", @@ -195,7 +194,6 @@ fn pdf_cl_90() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--pdf", "--cl=90", @@ -213,7 +211,6 @@ fn pdf_integrated() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--pdf", "--integrated", @@ -231,7 +228,6 @@ fn pdf_orders_a2_as1a2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--pdf", "--orders=a2,as1a2", @@ -249,7 +245,6 @@ fn scale_abs() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--scale-abs", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -265,7 +260,6 @@ fn scale_cov() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--scale-cov", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -281,7 +275,6 @@ fn scale_cov_9() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--scale-cov=9", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -297,7 +290,6 @@ fn scale_env() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--scale-env", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -313,7 +305,6 @@ fn scale_env_9() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--scale-env=9", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -329,7 +320,6 @@ fn pdf_with_scale_cov() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "uncert", "--pdf-with-scale-cov", "../test-data/LHCB_WP_7TEV.pineappl.lz4", diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index cb84dd43a..429bed77a 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -256,7 +256,6 @@ fn cc1() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -285,7 +284,6 @@ fn cc2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -314,7 +312,6 @@ fn delete_bins_02_57() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -343,7 +340,6 @@ fn delete_bins_25() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -429,7 +425,6 @@ fn merge_bins() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -478,7 +473,6 @@ fn remap() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -541,7 +535,6 @@ fn scale_by_bin() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -571,7 +564,6 @@ fn scale_by_order() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -600,7 +592,6 @@ fn split_lumi() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", @@ -611,12 +602,7 @@ fn split_lumi() { Command::cargo_bin("pineappl") .unwrap() - .args([ - "--silence-lhapdf", - "read", - "--lumis", - output.path().to_str().unwrap(), - ]) + .args(["read", "--lumis", output.path().to_str().unwrap()]) .assert() .success() .stdout(SPLIT_LUMI_STR); @@ -642,7 +628,6 @@ fn dedup_channels() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "diff", "../test-data/LHCB_WP_7TEV.pineappl.lz4", output.path().to_str().unwrap(), @@ -654,12 +639,7 @@ fn dedup_channels() { Command::cargo_bin("pineappl") .unwrap() - .args([ - "--silence-lhapdf", - "read", - "--lumis", - output.path().to_str().unwrap(), - ]) + .args(["read", "--lumis", output.path().to_str().unwrap()]) .assert() .success() .stdout(CHANNEL_STR); @@ -704,7 +684,6 @@ fn multiple_arguments() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF40_nnlo_as_01180", @@ -748,7 +727,6 @@ fn rewrite_channels() { Command::cargo_bin("pineappl") .unwrap() .args([ - "--silence-lhapdf", "convolute", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", From 22b0effceef3258820522c212d2131fedc9b3707 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 10 Apr 2024 13:54:21 +0200 Subject: [PATCH 021/179] Update `cc` to get rid of useless warnings --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b5390adb..5b470a6a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -204,9 +204,9 @@ checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "cc" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723" +checksum = "2678b2e3449475e95b0aa6f9b506a28e61b3dc8996592b983695e8ebb58a8b41" [[package]] name = "cfg-if" From a824508dad60c50e660b5f4eeb909ae2fd439f47 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 16 Apr 2024 17:36:31 +0200 Subject: [PATCH 022/179] Use `convolve` instead of `convolute` in CLI --- CHANGELOG.md | 5 +++ pineappl_cli/src/analyze.rs | 6 ++-- pineappl_cli/src/channels.rs | 4 +-- .../src/{convolute.rs => convolve.rs} | 7 ++-- pineappl_cli/src/diff.rs | 10 +++--- pineappl_cli/src/evolve.rs | 4 +-- pineappl_cli/src/export.rs | 2 +- pineappl_cli/src/helpers.rs | 10 +++--- pineappl_cli/src/import.rs | 2 +- pineappl_cli/src/lib.rs | 4 +-- pineappl_cli/src/orders.rs | 4 +-- pineappl_cli/src/plot.rs | 20 ++++++------ pineappl_cli/src/pull.rs | 10 +++--- pineappl_cli/src/uncert.rs | 6 ++-- .../tests/{convolute.rs => convolve.rs} | 28 ++++++++-------- pineappl_cli/tests/evolve.rs | 2 +- pineappl_cli/tests/import.rs | 4 +-- pineappl_cli/tests/main.rs | 32 +++++++++---------- pineappl_cli/tests/merge.rs | 2 +- pineappl_cli/tests/write.rs | 22 ++++++------- 20 files changed, 95 insertions(+), 89 deletions(-) rename pineappl_cli/src/{convolute.rs => convolve.rs} (96%) rename pineappl_cli/tests/{convolute.rs => convolve.rs} (96%) diff --git a/CHANGELOG.md b/CHANGELOG.md index d033a75b5..823d98174 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- changed the official name of the CLI subcommand `convolute` to `convolve`, + because the latter is the proper verb of 'convolution'. The old name + `convolute` is now an alias of `convolve`, which means both can be used. The + methods `Grid::convolute*` are left unchanged and will be renamed in later + version - changed switch `--silence-lhapdf` to `--lhapdf-banner` and suppress LHAPDF's banners by default, unless `--lhapdf-banner` is given - `Grid::evolve` has now been marked deprecated diff --git a/pineappl_cli/src/analyze.rs b/pineappl_cli/src/analyze.rs index 18f9a7301..84479b86d 100644 --- a/pineappl_cli/src/analyze.rs +++ b/pineappl_cli/src/analyze.rs @@ -80,12 +80,12 @@ impl Subcommand for CkfOpts { }; let limit = grid.lumi().len().min(self.limit); - let limits = helpers::convolute_limits(&grid, &[], ConvoluteMode::Normal); + let limits = helpers::convolve_limits(&grid, &[], ConvoluteMode::Normal); let results: Vec<_> = (0..grid.lumi().len()) .map(|lumi| { let mut lumi_mask = vec![false; grid.lumi().len()]; lumi_mask[lumi] = true; - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &[self.order], @@ -101,7 +101,7 @@ impl Subcommand for CkfOpts { .map(|lumi| { let mut lumi_mask = vec![false; grid.lumi().len()]; lumi_mask[lumi] = true; - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &orders_den, diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index 1f415e5ad..b3859dd1a 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -74,7 +74,7 @@ impl Subcommand for Opts { } else { limit.min(lumis.len()) }; - let limits = helpers::convolute_limits( + let limits = helpers::convolve_limits( &grid, &[], if self.integrated { @@ -87,7 +87,7 @@ impl Subcommand for Opts { .map(|lumi| { let mut lumi_mask = vec![false; grid.lumi().len()]; lumi_mask[lumi] = true; - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &self.orders, diff --git a/pineappl_cli/src/convolute.rs b/pineappl_cli/src/convolve.rs similarity index 96% rename from pineappl_cli/src/convolute.rs rename to pineappl_cli/src/convolve.rs index 4e26f9666..71f0225ec 100644 --- a/pineappl_cli/src/convolute.rs +++ b/pineappl_cli/src/convolve.rs @@ -9,6 +9,7 @@ use std::process::ExitCode; /// Convolutes a PineAPPL grid with a PDF set. #[derive(Parser)] +#[command(alias = "convolute")] pub struct Opts { /// Path of the input grid. #[arg(value_hint = ValueHint::FilePath)] @@ -51,7 +52,7 @@ impl Subcommand for Opts { let mut pdf = helpers::create_pdf(&self.pdfsets[0])?; let bins: Vec<_> = self.bins.iter().cloned().flatten().collect(); - let results = helpers::convolute( + let results = helpers::convolve( &grid, &mut pdf, &self.orders, @@ -65,7 +66,7 @@ impl Subcommand for Opts { }, cfg, ); - let limits = helpers::convolute_limits( + let limits = helpers::convolve_limits( &grid, &bins, if self.integrated { @@ -80,7 +81,7 @@ impl Subcommand for Opts { .iter() .flat_map(|pdfset| { let mut pdf = helpers::create_pdf(pdfset).unwrap(); - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &self.orders, diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index 35a9d545b..04303748c 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -137,7 +137,7 @@ impl Subcommand for Opts { title.add_cell(cell); } - let limits1 = helpers::convolute_limits(&grid1, &[], ConvoluteMode::Normal); + let limits1 = helpers::convolve_limits(&grid1, &[], ConvoluteMode::Normal); if self.ignore_orders { let mut cell = cell!(c->"diff"); @@ -146,7 +146,7 @@ impl Subcommand for Opts { table.set_titles(title); - let results1 = helpers::convolute( + let results1 = helpers::convolve( &grid1, &mut pdf, &orders1, @@ -156,7 +156,7 @@ impl Subcommand for Opts { ConvoluteMode::Normal, cfg, ); - let results2 = helpers::convolute( + let results2 = helpers::convolve( &grid2, &mut pdf, &orders2, @@ -202,7 +202,7 @@ impl Subcommand for Opts { let order_results1: Vec> = orders .iter() .map(|&order| { - helpers::convolute( + helpers::convolve( &grid1, &mut pdf, &[order], @@ -217,7 +217,7 @@ impl Subcommand for Opts { let order_results2: Vec> = orders .iter() .map(|&order| { - helpers::convolute( + helpers::convolve( &grid2, &mut pdf, &[order], diff --git a/pineappl_cli/src/evolve.rs b/pineappl_cli/src/evolve.rs index 568e78501..e8726b886 100644 --- a/pineappl_cli/src/evolve.rs +++ b/pineappl_cli/src/evolve.rs @@ -543,7 +543,7 @@ impl Subcommand for Opts { let grid = helpers::read_grid(&self.input)?; let mut pdf = helpers::create_pdf(&self.pdfset)?; - let results = helpers::convolute_scales( + let results = helpers::convolve_scales( &grid, &mut pdf, &self.orders, @@ -563,7 +563,7 @@ impl Subcommand for Opts { self.xif, self.use_old_evolve, )?; - let evolved_results = helpers::convolute_scales( + let evolved_results = helpers::convolve_scales( fk_table.grid(), &mut pdf, &[], diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index a67c8a920..a8cc3f0c0 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -155,7 +155,7 @@ impl Subcommand for Opts { println!("file was converted, but we cannot check the conversion for this type"); } else { let mut pdf = helpers::create_pdf(&self.pdfset)?; - let reference_results = helpers::convolute( + let reference_results = helpers::convolve( &grid, &mut pdf, &orders, diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index c9b814ad3..335c3db30 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -125,7 +125,7 @@ pub enum ConvoluteMode { Normal, } -pub fn convolute_scales( +pub fn convolve_scales( grid: &Grid, lhapdf: &mut Pdf, orders: &[(u32, u32)], @@ -210,7 +210,7 @@ pub fn convolute_scales( } } -pub fn convolute( +pub fn convolve( grid: &Grid, lhapdf: &mut Pdf, orders: &[(u32, u32)], @@ -220,7 +220,7 @@ pub fn convolute( mode: ConvoluteMode, cfg: &GlobalConfiguration, ) -> Vec { - convolute_scales( + convolve_scales( grid, lhapdf, orders, @@ -232,7 +232,7 @@ pub fn convolute( ) } -pub fn convolute_limits(grid: &Grid, bins: &[usize], mode: ConvoluteMode) -> Vec> { +pub fn convolve_limits(grid: &Grid, bins: &[usize], mode: ConvoluteMode) -> Vec> { let limits: Vec<_> = grid .bin_info() .limits() @@ -247,7 +247,7 @@ pub fn convolute_limits(grid: &Grid, bins: &[usize], mode: ConvoluteMode) -> Vec } } -pub fn convolute_subgrid( +pub fn convolve_subgrid( grid: &Grid, lhapdf: &mut Pdf, order: usize, diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index 530d39f86..f546588ac 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -275,7 +275,7 @@ impl Subcommand for Opts { println!("file was converted, but we cannot check the conversion for this type"); } else { let mut pdf = helpers::create_pdf(&self.pdfset)?; - let results = helpers::convolute( + let results = helpers::convolve( &grid, &mut pdf, &[], diff --git a/pineappl_cli/src/lib.rs b/pineappl_cli/src/lib.rs index 87c3ecffb..d02e045b9 100644 --- a/pineappl_cli/src/lib.rs +++ b/pineappl_cli/src/lib.rs @@ -5,7 +5,7 @@ mod analyze; mod channels; -mod convolute; +mod convolve; mod diff; mod evolve; mod export; @@ -50,7 +50,7 @@ pub trait Subcommand { pub enum SubcommandEnum { Analyze(analyze::Opts), Channels(channels::Opts), - Convolute(convolute::Opts), + Convolve(convolve::Opts), Diff(diff::Opts), Evolve(evolve::Opts), Export(export::Opts), diff --git a/pineappl_cli/src/orders.rs b/pineappl_cli/src/orders.rs index e5af3212d..77919d234 100644 --- a/pineappl_cli/src/orders.rs +++ b/pineappl_cli/src/orders.rs @@ -52,7 +52,7 @@ impl Subcommand for Opts { orders.sort(); let orders = orders; - let limits = helpers::convolute_limits( + let limits = helpers::convolve_limits( &grid, &[], if self.integrated { @@ -64,7 +64,7 @@ impl Subcommand for Opts { let results: Vec> = orders .iter() .map(|order| { - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &[(order.alphas, order.alpha)], diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index a1a203bed..2af5fcc6b 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -218,7 +218,7 @@ impl Subcommand for Opts { let bins: Vec<_> = (slice.0..slice.1).collect(); let results = - helpers::convolute(&grid, &mut pdf, &[], &bins, &[], self.scales, mode, cfg); + helpers::convolve(&grid, &mut pdf, &[], &bins, &[], self.scales, mode, cfg); let qcd_results = { let mut orders = grid.orders().to_vec(); @@ -235,7 +235,7 @@ impl Subcommand for Opts { }) .collect(); - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &qcd_orders, @@ -247,7 +247,7 @@ impl Subcommand for Opts { ) }; - let bin_limits: Vec<_> = helpers::convolute_limits(&grid, &bins, mode) + let bin_limits: Vec<_> = helpers::convolve_limits(&grid, &bins, mode) .into_iter() .map(|limits| limits.last().copied().unwrap()) .collect(); @@ -270,7 +270,7 @@ impl Subcommand for Opts { let mut pdf = helpers::create_pdf(pdfset).unwrap(); let results = - helpers::convolute(&grid, &mut pdf, &[], &bins, &[], 1, mode, cfg); + helpers::convolve(&grid, &mut pdf, &[], &bins, &[], 1, mode, cfg); Ok(vec![results; 3]) } else { @@ -280,7 +280,7 @@ impl Subcommand for Opts { .mk_pdfs()? .into_par_iter() .flat_map(|mut pdf| { - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &[], @@ -379,7 +379,7 @@ impl Subcommand for Opts { grid.has_pdf1(), grid.has_pdf2(), ), - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &[], @@ -523,7 +523,7 @@ impl Subcommand for Opts { let values1: Vec<_> = pdfset1 .par_iter_mut() .map(|pdf| { - let values = helpers::convolute( + let values = helpers::convolve( &grid, pdf, &[], @@ -540,7 +540,7 @@ impl Subcommand for Opts { let values2: Vec<_> = pdfset2 .par_iter_mut() .map(|pdf| { - let values = helpers::convolute( + let values = helpers::convolve( &grid, pdf, &[], @@ -576,9 +576,9 @@ impl Subcommand for Opts { unc1.hypot(unc2) }; - let res1 = helpers::convolute_subgrid(&grid, &mut pdfset1[0], order, bin, lumi, cfg) + let res1 = helpers::convolve_subgrid(&grid, &mut pdfset1[0], order, bin, lumi, cfg) .sum_axis(Axis(0)); - let res2 = helpers::convolute_subgrid(&grid, &mut pdfset2[0], order, bin, lumi, cfg) + let res2 = helpers::convolve_subgrid(&grid, &mut pdfset2[0], order, bin, lumi, cfg) .sum_axis(Axis(0)); let subgrid = grid.subgrid(order, bin, lumi); diff --git a/pineappl_cli/src/pull.rs b/pineappl_cli/src/pull.rs index e57a61530..25043c327 100644 --- a/pineappl_cli/src/pull.rs +++ b/pineappl_cli/src/pull.rs @@ -62,11 +62,11 @@ impl Subcommand for Opts { .unwrap(); let limit = grid.lumi().len().min(self.limit); - let bin_limits = helpers::convolute_limits(&grid, &[], ConvoluteMode::Normal); + let bin_limits = helpers::convolve_limits(&grid, &[], ConvoluteMode::Normal); let results1: Vec<_> = pdfset1 .par_iter_mut() .flat_map(|pdf| { - helpers::convolute( + helpers::convolve( &grid, pdf, &self.orders, @@ -81,7 +81,7 @@ impl Subcommand for Opts { let results2: Vec<_> = pdfset2 .par_iter_mut() .flat_map(|pdf| { - helpers::convolute( + helpers::convolve( &grid, pdf, &self.orders, @@ -147,7 +147,7 @@ impl Subcommand for Opts { .map(|lumi| { let mut lumi_mask = vec![false; grid.lumi().len()]; lumi_mask[lumi] = true; - match helpers::convolute( + match helpers::convolve( &grid, &mut pdfset[member], &self.orders, @@ -172,7 +172,7 @@ impl Subcommand for Opts { .map(|lumi| { let mut lumi_mask = vec![false; grid.lumi().len()]; lumi_mask[lumi] = true; - match helpers::convolute( + match helpers::convolve( &grid, pdf, &self.orders, diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index 515f87e89..aefe6f867 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -100,7 +100,7 @@ impl Subcommand for Opts { let grid = helpers::read_grid(&self.input)?; let (set, _) = helpers::create_pdfset(&self.pdfset)?; - let limits = helpers::convolute_limits( + let limits = helpers::convolve_limits( &grid, &[], if self.integrated { @@ -119,7 +119,7 @@ impl Subcommand for Opts { set.mk_pdfs()? .into_par_iter() .flat_map(|mut pdf| { - helpers::convolute( + helpers::convolve( &grid, &mut pdf, &self.orders, @@ -148,7 +148,7 @@ impl Subcommand for Opts { .map(|&x| usize::from(x)) .max() .unwrap_or(1); - let scale_results = helpers::convolute( + let scale_results = helpers::convolve( &grid, &mut helpers::create_pdf(&self.pdfset)?, &self.orders, diff --git a/pineappl_cli/tests/convolute.rs b/pineappl_cli/tests/convolve.rs similarity index 96% rename from pineappl_cli/tests/convolute.rs rename to pineappl_cli/tests/convolve.rs index 36265c0f2..9eb93292b 100644 --- a/pineappl_cli/tests/convolute.rs +++ b/pineappl_cli/tests/convolve.rs @@ -2,7 +2,7 @@ use assert_cmd::Command; const HELP_STR: &str = "Convolutes a PineAPPL grid with a PDF set -Usage: pineappl convolute [OPTIONS] ... +Usage: pineappl convolve [OPTIONS] ... Arguments: Path of the input grid @@ -168,7 +168,7 @@ For more information, try '--help'. fn help() { Command::cargo_bin("pineappl") .unwrap() - .args(["convolute", "--help"]) + .args(["convolve", "--help"]) .assert() .success() .stdout(HELP_STR); @@ -179,7 +179,7 @@ fn default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", ]) @@ -194,7 +194,7 @@ fn force_positive() { .unwrap() .args([ "--force-positive", - "convolute", + "convolve", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", ]) @@ -208,7 +208,7 @@ fn default_multiple_pdfs() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", "324900=NNPDF31_nlo_as_0118_luxqed", @@ -223,7 +223,7 @@ fn multiple_pdfs_with_new_construction() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed/0", "NNPDF31_nlo_as_0118_luxqed/1", @@ -238,7 +238,7 @@ fn multiple_pdfs_with_relabeling() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", "NNPDF31_nlo_as_0118_luxqed/1=other mc=1.4", @@ -253,7 +253,7 @@ fn two_pdfs_with_order_subset() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "--orders=a2", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed/0", @@ -269,7 +269,7 @@ fn three_pdfs() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed/0", "NNPDF31_nlo_as_0118_luxqed/1", @@ -285,7 +285,7 @@ fn bins_13567() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "--bins=1,3,5-7", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -300,7 +300,7 @@ fn integrated() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "--integrated", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -315,7 +315,7 @@ fn integrated_multiple_pdfs() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "--integrated", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -331,7 +331,7 @@ fn orders_a2_a3() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "--orders=a2,a3", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", @@ -346,7 +346,7 @@ fn wrong_orders() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", "--orders=a2a2as2", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", diff --git a/pineappl_cli/tests/evolve.rs b/pineappl_cli/tests/evolve.rs index 710365d16..5173c15b5 100644 --- a/pineappl_cli/tests/evolve.rs +++ b/pineappl_cli/tests/evolve.rs @@ -217,7 +217,7 @@ fn lhcb_wp_7tev() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", optimized.path().to_str().unwrap(), "NNPDF40_nlo_as_01180", ]) diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index a016c0d2a..c5736f9e2 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -680,7 +680,7 @@ fn import_dis_fktable() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -858,7 +858,7 @@ fn import_hadronic_fktable() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) diff --git a/pineappl_cli/tests/main.rs b/pineappl_cli/tests/main.rs index 0577a05ce..00d428172 100644 --- a/pineappl_cli/tests/main.rs +++ b/pineappl_cli/tests/main.rs @@ -5,22 +5,22 @@ const HELP_STR: &str = "Read, write, and query PineAPPL grids Usage: pineappl [OPTIONS] Commands: - analyze Perform various analyses with grids - channels Shows the contribution for each partonic channel - convolute Convolutes a PineAPPL grid with a PDF set - diff Compares the numerical content of two grids with each other - evolve Evolve a grid with an evolution kernel operator to an FK table - export Converts PineAPPL grids to APPLgrid files - help Display a manpage for selected subcommands - import Converts APPLgrid/fastNLO/FastKernel files to PineAPPL grids - merge Merges one or more PineAPPL grids together - orders Shows the predictions for all bin for each order separately - plot Creates a matplotlib script plotting the contents of the grid - pull Calculates the pull between two different PDF sets - read Read out information of a grid - subgrids Print information about the internal subgrid types - uncert Calculates scale and PDF uncertainties - write Write a grid modified by various operations + analyze Perform various analyses with grids + channels Shows the contribution for each partonic channel + convolve Convolutes a PineAPPL grid with a PDF set + diff Compares the numerical content of two grids with each other + evolve Evolve a grid with an evolution kernel operator to an FK table + export Converts PineAPPL grids to APPLgrid files + help Display a manpage for selected subcommands + import Converts APPLgrid/fastNLO/FastKernel files to PineAPPL grids + merge Merges one or more PineAPPL grids together + orders Shows the predictions for all bin for each order separately + plot Creates a matplotlib script plotting the contents of the grid + pull Calculates the pull between two different PDF sets + read Read out information of a grid + subgrids Print information about the internal subgrid types + uncert Calculates scale and PDF uncertainties + write Write a grid modified by various operations Options: --lhapdf-banner Allow LHAPDF to print banners diff --git a/pineappl_cli/tests/merge.rs b/pineappl_cli/tests/merge.rs index 134d7aff9..0741ae2d8 100644 --- a/pineappl_cli/tests/merge.rs +++ b/pineappl_cli/tests/merge.rs @@ -55,7 +55,7 @@ fn default() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF40_nnlo_as_01180", ]) diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index 429bed77a..03058a51e 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -256,7 +256,7 @@ fn cc1() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -284,7 +284,7 @@ fn cc2() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -312,7 +312,7 @@ fn delete_bins_02_57() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -340,7 +340,7 @@ fn delete_bins_25() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -425,7 +425,7 @@ fn merge_bins() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -473,7 +473,7 @@ fn remap() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -535,7 +535,7 @@ fn scale_by_bin() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -564,7 +564,7 @@ fn scale_by_order() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -592,7 +592,7 @@ fn split_lumi() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) @@ -684,7 +684,7 @@ fn multiple_arguments() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF40_nnlo_as_01180", ]) @@ -727,7 +727,7 @@ fn rewrite_channels() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolute", + "convolve", output.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) From 37074797e687483a544793c42ab02f33c44b1816 Mon Sep 17 00:00:00 2001 From: juacrumar Date: Mon, 15 Apr 2024 10:31:48 +0200 Subject: [PATCH 023/179] expose bins and lumi --- pineappl_py/pineappl/fk_table.py | 38 ++++++++++++++++++++++++++++++++ pineappl_py/src/fk_table.rs | 10 +++++++-- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/pineappl_py/pineappl/fk_table.py b/pineappl_py/pineappl/fk_table.py index 813f7a7b6..91a7080c4 100644 --- a/pineappl_py/pineappl/fk_table.py +++ b/pineappl_py/pineappl/fk_table.py @@ -1,3 +1,5 @@ +import numpy as np + from .pineappl import PyFkTable, PyFkAssumptions from .utils import PyWrapper @@ -50,6 +52,42 @@ def optimize(self, assumptions = "Nf6Ind"): assumptions = FkAssumptions(assumptions) return self._raw.optimize(assumptions._raw) + def convolute_with_one( + self, + pdg_id, + xfx, + bin_indices=np.array([], dtype=np.uint64), + lumi_mask=np.array([], dtype=bool), + ): + r"""Convolute FkTable with a pdf. + + Parameters + ---------- + pdg_id : int + PDG Monte Carlo ID of the hadronic particle `xfx` is the PDF for + xfx : callable + lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid + bin_indices : sequence(int) + A list with the indices of the corresponding bins that should be calculated. An + empty list means that all orders should be calculated. + lumi_mask : sequence(bool) + Mask for selecting specific luminosity channels. The value `True` means the + corresponding channel is included. An empty list corresponds to all channels being + enabled. + + Returns + ------- + list(float) : + cross sections for all bins, for each scale-variation tuple (first all bins, then + the scale variation) + """ + return self.raw.convolute_with_one( + pdg_id, + xfx, + np.array(bin_indices), + np.array(lumi_mask), + ) + class FkAssumptions(PyWrapper): """ diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index f03d1c793..17781aba1 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -2,7 +2,7 @@ use pineappl::fk_table::{FkAssumptions, FkTable}; use pineappl::grid::Grid; use pineappl::lumi::LumiCache; -use numpy::{IntoPyArray, PyArray1, PyArray4}; +use numpy::{IntoPyArray, PyArray1, PyArray4, PyReadonlyArray1}; use pyo3::prelude::*; use std::collections::HashMap; @@ -218,13 +218,19 @@ impl PyFkTable { &self, pdg_id: i32, xfx: &PyAny, + bin_indices: PyReadonlyArray1, + lumi_mask: PyReadonlyArray1, py: Python<'py>, ) -> &'py PyArray1 { let mut xfx = |id, x, q2| f64::extract(xfx.call1((id, x, q2)).unwrap()).unwrap(); let mut alphas = |_| 1.0; let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); self.fk_table - .convolute(&mut lumi_cache, &[], &[]) + .convolute( + &mut lumi_cache, + &bin_indices.to_vec().unwrap(), + &lumi_mask.to_vec().unwrap(), + ) .into_pyarray(py) } From 696ad6ffbc4a7febbde44334a8d7905b50e7a0b8 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 16 Apr 2024 13:26:53 +0200 Subject: [PATCH 024/179] fix: Add signature to provide defaults --- pineappl_py/src/fk_table.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 17781aba1..a74e26a65 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -214,6 +214,7 @@ impl PyFkTable { /// ------- /// numpy.ndarray(float) : /// cross sections for all bins + #[pyo3(signature = (pdf_id, xfx, bin_indices = pyarray![].readonly(), lumi_cache = pyarray![].readonly()))] pub fn convolute_with_one<'py>( &self, pdg_id: i32, From 013025db7e29756d85117aa0acc2bd179336f4bd Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 16 Apr 2024 14:04:00 +0200 Subject: [PATCH 025/179] fix: Provide default values using options --- pineappl_py/src/fk_table.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index a74e26a65..946cd6803 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -214,24 +214,30 @@ impl PyFkTable { /// ------- /// numpy.ndarray(float) : /// cross sections for all bins - #[pyo3(signature = (pdf_id, xfx, bin_indices = pyarray![].readonly(), lumi_cache = pyarray![].readonly()))] + #[pyo3(signature = (pdg_id, xfx, bin_indices = None, lumi_mask= None))] pub fn convolute_with_one<'py>( &self, pdg_id: i32, xfx: &PyAny, - bin_indices: PyReadonlyArray1, - lumi_mask: PyReadonlyArray1, + bin_indices: Option>, + lumi_mask: Option>, py: Python<'py>, ) -> &'py PyArray1 { let mut xfx = |id, x, q2| f64::extract(xfx.call1((id, x, q2)).unwrap()).unwrap(); let mut alphas = |_| 1.0; let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); + let bin_indices = if let Some(b) = bin_indices { + b.to_vec().unwrap() + } else { + vec![] + }; + let lumi_mask = if let Some(l) = lumi_mask { + l.to_vec().unwrap() + } else { + vec![] + }; self.fk_table - .convolute( - &mut lumi_cache, - &bin_indices.to_vec().unwrap(), - &lumi_mask.to_vec().unwrap(), - ) + .convolute(&mut lumi_cache, &bin_indices, &lumi_mask) .into_pyarray(py) } From 9f14f51e7d918a308a99ffaa5471ce621cc3df65 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 16 Apr 2024 14:08:24 +0200 Subject: [PATCH 026/179] fix: Simplify default application --- pineappl_py/src/fk_table.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 946cd6803..7ab06c778 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -226,18 +226,12 @@ impl PyFkTable { let mut xfx = |id, x, q2| f64::extract(xfx.call1((id, x, q2)).unwrap()).unwrap(); let mut alphas = |_| 1.0; let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); - let bin_indices = if let Some(b) = bin_indices { - b.to_vec().unwrap() - } else { - vec![] - }; - let lumi_mask = if let Some(l) = lumi_mask { - l.to_vec().unwrap() - } else { - vec![] - }; self.fk_table - .convolute(&mut lumi_cache, &bin_indices, &lumi_mask) + .convolute( + &mut lumi_cache, + &bin_indices.map_or(vec![], |b| b.to_vec().unwrap()), + &lumi_mask.map_or(vec![], |l| l.to_vec().unwrap()), + ) .into_pyarray(py) } From 0d9876fb5bc857beddf4eb4eec323e4cf037d509 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 16 Apr 2024 14:09:52 +0200 Subject: [PATCH 027/179] refactor: Use Rust version of convolute_with_one --- pineappl_py/pineappl/fk_table.py | 38 -------------------------------- 1 file changed, 38 deletions(-) diff --git a/pineappl_py/pineappl/fk_table.py b/pineappl_py/pineappl/fk_table.py index 91a7080c4..813f7a7b6 100644 --- a/pineappl_py/pineappl/fk_table.py +++ b/pineappl_py/pineappl/fk_table.py @@ -1,5 +1,3 @@ -import numpy as np - from .pineappl import PyFkTable, PyFkAssumptions from .utils import PyWrapper @@ -52,42 +50,6 @@ def optimize(self, assumptions = "Nf6Ind"): assumptions = FkAssumptions(assumptions) return self._raw.optimize(assumptions._raw) - def convolute_with_one( - self, - pdg_id, - xfx, - bin_indices=np.array([], dtype=np.uint64), - lumi_mask=np.array([], dtype=bool), - ): - r"""Convolute FkTable with a pdf. - - Parameters - ---------- - pdg_id : int - PDG Monte Carlo ID of the hadronic particle `xfx` is the PDF for - xfx : callable - lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid - bin_indices : sequence(int) - A list with the indices of the corresponding bins that should be calculated. An - empty list means that all orders should be calculated. - lumi_mask : sequence(bool) - Mask for selecting specific luminosity channels. The value `True` means the - corresponding channel is included. An empty list corresponds to all channels being - enabled. - - Returns - ------- - list(float) : - cross sections for all bins, for each scale-variation tuple (first all bins, then - the scale variation) - """ - return self.raw.convolute_with_one( - pdg_id, - xfx, - np.array(bin_indices), - np.array(lumi_mask), - ) - class FkAssumptions(PyWrapper): """ From 9557d635cb8e2b38446b5c1f23d1ffb7acab16fd Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 16 Apr 2024 14:17:28 +0200 Subject: [PATCH 028/179] feat: Add fk table constructor --- pineappl_py/src/fk_table.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 7ab06c778..40eb5983d 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -11,6 +11,8 @@ use std::io::BufReader; use std::path::PathBuf; use std::str::FromStr; +use crate::grid::PyGrid; + /// PyO3 wrapper to :rustdoc:`pineappl::fk_table::FkTable ` /// /// *Usage*: `pineko`, `yadism` @@ -38,6 +40,13 @@ impl PyFkAssumptions { #[pymethods] impl PyFkTable { + #[new] + pub fn new(grid: PyGrid) -> Self { + Self { + fk_table: FkTable::try_from(grid.grid), + } + } + #[staticmethod] pub fn read(path: PathBuf) -> Self { Self { From 2b22b3df8be69ee2fb5689f75fe86d4ce02f88fa Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 16 Apr 2024 14:17:49 +0200 Subject: [PATCH 029/179] test: Copy convolution test for fk table from grid one --- pineappl_py/tests/test_fk_table.py | 40 ++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 pineappl_py/tests/test_fk_table.py diff --git a/pineappl_py/tests/test_fk_table.py b/pineappl_py/tests/test_fk_table.py new file mode 100644 index 000000000..88fdddb3f --- /dev/null +++ b/pineappl_py/tests/test_fk_table.py @@ -0,0 +1,40 @@ +import numpy as np + +import pineappl + + +class TestFkTable: + def fake_grid(self, bins=None): + lumis = [pineappl.lumi.LumiEntry([(1, 21, 0.1)])] + orders = [pineappl.grid.Order(3, 0, 0, 0)] + bin_limits = np.array([1e-7, 1e-3, 1] if bins is None else bins, dtype=float) + subgrid_params = pineappl.subgrid.SubgridParams() + g = pineappl.grid.Grid.create(lumis, orders, bin_limits, subgrid_params) + return g + + def test_convolute_with_one(self): + g = self.fake_grid() + + # DIS grid + xs = np.linspace(0.5, 1.0, 5) + vs = xs.copy() + subgrid = pineappl.import_only_subgrid.ImportOnlySubgridV1( + vs[np.newaxis, :, np.newaxis], + np.array([90.0]), + xs, + np.array([1.0]), + ) + g.set_subgrid(0, 0, 0, subgrid) + fk = pineappl.fk_table.FkTable(g) + np.testing.assert_allclose( + fk.convolute_with_one(2212, lambda pid, x, q2: 0.0, lambda q2: 0.0), + [0.0] * 2, + ) + np.testing.assert_allclose( + fk.convolute_with_one(2212, lambda pid, x, q2: 1, lambda q2: 1.0), + [5e6 / 9999, 0.0], + ) + np.testing.assert_allclose( + fk.convolute_with_one(2212, lambda pid, x, q2: 1, lambda q2: 2.0), + [2**3 * 5e6 / 9999, 0.0], + ) From bc1de5b0669325ba8dcd8a4844373fe4acc302b6 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 16 Apr 2024 14:18:44 +0200 Subject: [PATCH 030/179] fix: Add forgotten unwrap --- pineappl_py/src/fk_table.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 40eb5983d..f561b5353 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -43,7 +43,7 @@ impl PyFkTable { #[new] pub fn new(grid: PyGrid) -> Self { Self { - fk_table: FkTable::try_from(grid.grid), + fk_table: FkTable::try_from(grid.grid).unwrap(), } } From 8f5f52bb0e81e447c0796a0a2883f5879223f674 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 19:49:56 +0000 Subject: [PATCH 031/179] Bump rustls from 0.22.3 to 0.22.4 Bumps [rustls](https://github.com/rustls/rustls) from 0.22.3 to 0.22.4. - [Release notes](https://github.com/rustls/rustls/releases) - [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustls/rustls/compare/v/0.22.3...v/0.22.4) --- updated-dependencies: - dependency-name: rustls dependency-type: indirect ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5b470a6a2..e0c0f6f4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1672,9 +1672,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", "ring", From b3239852355a0cbb54113137b4e5c0b0f01e2146 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 23 Apr 2024 09:58:01 +0200 Subject: [PATCH 032/179] Merge some fixes from `fix-macos-cli-generation` branch --- Cargo.lock | 1 + pineappl_applgrid/build.rs | 95 ++++++++++++++++++++++-------- pineappl_applgrid/src/applgrid.cpp | 4 +- pineappl_fastnlo/Cargo.toml | 1 + pineappl_fastnlo/build.rs | 16 +++++ xtask/src/install_manpages.rs | 11 ++-- 6 files changed, 94 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0c0f6f4e..2a38a6d0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1328,6 +1328,7 @@ version = "0.7.4-rc.1" dependencies = [ "cxx", "cxx-build", + "pkg-config", "thiserror", ] diff --git a/pineappl_applgrid/build.rs b/pineappl_applgrid/build.rs index c30b2f9a5..9eb5facd3 100644 --- a/pineappl_applgrid/build.rs +++ b/pineappl_applgrid/build.rs @@ -6,6 +6,14 @@ use std::env; use std::path::Path; use std::process::Command; +fn conditional_std<'a>(build: &'a mut Build, std: Option<&str>) -> &'a mut Build { + if let Some(std) = std { + build.std(std) + } else { + build + } +} + fn main() { let version = String::from_utf8( Command::new("applgrid-config") @@ -16,7 +24,14 @@ fn main() { ) .unwrap(); - if version.trim() != "1.6.27" { + let tested_versions = [ + "1.6.27", "1.6.28", "1.6.29", "1.6.30", "1.6.31", "1.6.32", "1.6.35", + ]; + + if !tested_versions + .iter() + .any(|&tested| tested == version.trim()) + { println!( "cargo:warning=found APPLgrid version {}, which has not been tested", version.trim() @@ -34,22 +49,44 @@ fn main() { println!("cargo:rustc-link-search={}", lib_path.trim()); - let include_path = String::from_utf8( + let appl_igrid_dir = env::var("APPL_IGRID_DIR").unwrap_or_else(|_| { + Path::new( + &String::from_utf8( + Command::new("applgrid-config") + .arg("--incdir") + .output() + .expect("did not find `applgrid-config`, please install APPLgrid") + .stdout, + ) + .unwrap(), + ) + .join("appl_grid") + .to_str() + .unwrap() + .to_owned() + }); + + let cxx_flags: Vec<_> = String::from_utf8( Command::new("applgrid-config") - .arg("--incdir") + .arg("--cxxflags") .output() .expect("did not find `applgrid-config`, please install APPLgrid") .stdout, ) - .unwrap(); + .unwrap() + .split_ascii_whitespace() + .map(ToOwned::to_owned) + .collect(); - let appl_igrid_dir = env::var("APPL_IGRID_DIR").unwrap_or_else(|_| { - Path::new(&include_path) - .join("appl_grid") - .to_str() - .unwrap() - .to_owned() - }); + let include_dirs: Vec<_> = cxx_flags + .iter() + .filter_map(|token| token.strip_prefix("-I")) + .collect(); + + let std = cxx_flags + .iter() + .filter_map(|token| token.strip_prefix("-std=")) + .last(); let libs = String::from_utf8( Command::new("applgrid-config") @@ -82,16 +119,19 @@ fn main() { } } - Build::new() - .cpp(true) - .file("src/check_appl_igrid.cpp") - .include(include_path.trim()) - .include(&appl_igrid_dir) - .try_compile("appl_igrid") - .expect( - "could not find file `appl_igrid.h`, please set the environment variable \ + conditional_std( + Build::new() + .cpp(true) + .file("src/check_appl_igrid.cpp") + .includes(&include_dirs) + .include(&appl_igrid_dir), + std, + ) + .try_compile("appl_igrid") + .expect( + "could not find file `appl_igrid.h`, please set the environment variable \ `APPL_IGRID_DIR` to the directory containing it", - ); + ); println!("cargo:rerun-if-env-changed=APPL_IGRID_DIR"); @@ -105,12 +145,15 @@ fn main() { println!("cargo:rustc-link-lib={link_modifier}{lib}"); } - cxx_build::bridge("src/lib.rs") - .file("src/applgrid.cpp") - .include(include_path.trim()) - .include(appl_igrid_dir) - .includes(lhapdf.include_paths) - .compile("appl-bridge"); + conditional_std( + cxx_build::bridge("src/lib.rs") + .file("src/applgrid.cpp") + .includes(&include_dirs) + .include(appl_igrid_dir) + .includes(lhapdf.include_paths), + std, + ) + .compile("appl-bridge"); println!("cargo:rerun-if-changed=src/lib.rs"); println!("cargo:rerun-if-changed=src/applgrid.cpp"); diff --git a/pineappl_applgrid/src/applgrid.cpp b/pineappl_applgrid/src/applgrid.cpp index 1d31949c8..592a16d85 100644 --- a/pineappl_applgrid/src/applgrid.cpp +++ b/pineappl_applgrid/src/applgrid.cpp @@ -233,7 +233,7 @@ struct appl_igrid_m_reweight friend type access(appl_igrid_m_reweight); }; -template class access_private_member_variable; +template struct access_private_member_variable; // we need access to `m_reweight`, but it is private bool igrid_m_reweight(appl::igrid const& igrid) @@ -254,7 +254,7 @@ struct appl_grid_m_grids friend type access(appl_grid_m_grids); }; -template class access_private_member_variable; +template struct access_private_member_variable; appl::igrid& grid_get_igrid(appl::grid& grid, std::size_t order, std::size_t bin) { diff --git a/pineappl_fastnlo/Cargo.toml b/pineappl_fastnlo/Cargo.toml index 6d3e2a2a4..de1694c44 100644 --- a/pineappl_fastnlo/Cargo.toml +++ b/pineappl_fastnlo/Cargo.toml @@ -21,6 +21,7 @@ thiserror = "1.0.30" [build-dependencies] cxx-build = { version = "1.0.65" } +pkg-config = "0.3" [features] static = [] diff --git a/pineappl_fastnlo/build.rs b/pineappl_fastnlo/build.rs index c384b86ee..040b9c8a0 100644 --- a/pineappl_fastnlo/build.rs +++ b/pineappl_fastnlo/build.rs @@ -1,6 +1,7 @@ #![allow(missing_docs)] use std::process::Command; +use pkg_config::Config; fn main() { let fnlo_lib_path = String::from_utf8( @@ -24,6 +25,17 @@ fn main() { .unwrap(); let link_modifier = if cfg!(feature = "static") { + let zlib = Config::new().probe("zlib").unwrap(); + + // for some reason `libz.a` isn't found, although `libz.so` is + for link_path in zlib.link_paths { + println!("cargo:rustc-link-search={}", link_path.to_str().unwrap()); + } + + for lib in zlib.libs { + println!("cargo:rustc-link-lib=static={lib}"); + } + "static=" } else { "" @@ -31,9 +43,13 @@ fn main() { println!("cargo:rustc-link-lib={link_modifier}fastnlotoolkit"); + let lhapdf = Config::new().atleast_version("6").probe("lhapdf").unwrap(); + cxx_build::bridge("src/lib.rs") .file("src/fastnlo.cpp") .include(fnlo_include_path.trim()) + .includes(lhapdf.include_paths) + .std("c++11") // apparently not supported by MSVC, but fastNLO probably can't be compiled on Windows .compile("fnlo-bridge"); println!("cargo:rerun-if-changed=src/lib.rs"); diff --git a/xtask/src/install_manpages.rs b/xtask/src/install_manpages.rs index e75223faf..62c9aea1f 100644 --- a/xtask/src/install_manpages.rs +++ b/xtask/src/install_manpages.rs @@ -41,14 +41,13 @@ fn render_manpages(path: &Path, cmd: &clap::Command, version: &str) -> Result<() impl Subcommand for Opts { fn run(&self) -> Result<()> { let cmd = pineappl_cli::Opts::command(); - let version: String = cmd + let version = cmd .get_version() // UNWRAP: the command must have a version - .unwrap() - .strip_prefix('v') - // UNWRAP: the version string must start with a 'v' - .unwrap() - .to_string(); + .unwrap(); + + // TODO: why does the version string not start with a 'v' on GitHub? + let version = version.strip_prefix('v').unwrap_or(version).to_string(); let mut cmd = cmd.version(version.clone()); // this is needed so subcommands return the correct `bin_name` From 89b4315736c60edc29e84cdcfe2b9ea8b624c2ef Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 24 Apr 2024 16:32:24 +0200 Subject: [PATCH 033/179] Add `--dont-sort` flag to `pineappl channels` --- CHANGELOG.md | 2 ++ pineappl_cli/src/channels.rs | 20 +++++++++--- pineappl_cli/tests/channels.rs | 59 ++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 823d98174..2e1ff03fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - added `Grid::evolve_with_slice_iter`, `AlphasTable` and `OperatorSliceInfo`, which define a new interface supporting very large evolution kernels that have been introduced in EKO v0.13. This interface will replace `Grid::evolve` +- added `--dont-sort` switch to `pineappl channels`, which displays the channel + sizes orderd by channel index (instead of channel size) ### Changed diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index b3859dd1a..7d04111b2 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -50,6 +50,9 @@ pub struct Opts { value_parser = helpers::parse_order )] orders: Vec<(u32, u32)>, + /// Do not sort the channels according to their size. + #[arg(long)] + dont_sort: bool, /// Set the number of fractional digits shown for absolute numbers. #[arg(default_value_t = 7, long, value_name = "ABS")] digits_abs: usize, @@ -139,8 +142,12 @@ impl Subcommand for Opts { .map(|(lumi, vec)| (lumi, vec[bin])) .collect(); - // sort using the absolute value in descending order - values.sort_unstable_by(|(_, left), (_, right)| right.abs().total_cmp(&left.abs())); + if !self.dont_sort { + // sort using the absolute value in descending order + values.sort_unstable_by(|(_, left), (_, right)| { + right.abs().total_cmp(&left.abs()) + }); + } for (lumi, value) in values .iter() @@ -158,9 +165,12 @@ impl Subcommand for Opts { .map(|(lumi, vec)| (lumi, vec[bin] / sum * 100.0)) .collect(); - // sort using the absolute value in descending order - percentages - .sort_unstable_by(|(_, left), (_, right)| right.abs().total_cmp(&left.abs())); + if !self.dont_sort { + // sort using the absolute value in descending order + percentages.sort_unstable_by(|(_, left), (_, right)| { + right.abs().total_cmp(&left.abs()) + }); + } for (lumi, percentage) in percentages .iter() diff --git a/pineappl_cli/tests/channels.rs b/pineappl_cli/tests/channels.rs index 2ee7293b0..06a30f08a 100644 --- a/pineappl_cli/tests/channels.rs +++ b/pineappl_cli/tests/channels.rs @@ -14,6 +14,7 @@ Options: -i, --integrated Show integrated numbers (without bin widths) instead of differential ones --lumis Show only the listed channels -o, --orders Select orders manually + --dont-sort Do not sort the channels according to their size --digits-abs Set the number of fractional digits shown for absolute numbers [default: 7] --digits-rel Set the number of fractional digits shown for relative numbers [default: 2] -h, --help Print help @@ -104,6 +105,33 @@ const ORDERS_A2_AS1A2_STR: &str = "b etal l size l size l size l size 7 4 4.5 0 115.77 3 -8.54 1 -7.23 2 0.00 4 0.00 "; +const DONT_SORT_ABSOLUTE_STR: &str = + "b etal l dsig/detal l dsig/detal l dsig/detal l dsig/detal l dsig/detal + [] [pb] [pb] [pb] [pb] [pb] +-+----+----+-+-----------+-+------------+-+------------+-+------------+-+------------ +0 2 2.25 0 8.4002759e2 1 -2.4969360e1 2 8.8565923e-2 3 -6.0727462e1 4 1.7176328e-1 +1 2.25 2.5 0 7.7448295e2 1 -2.3319483e1 2 8.3802762e-2 3 -6.1109036e1 4 1.4518685e-1 +2 2.5 2.75 0 6.7891182e2 1 -2.1436419e1 2 4.7074109e-2 3 -5.7385834e1 4 1.1534278e-1 +3 2.75 3 0 5.5341626e2 1 -1.8639887e1 2 5.8147927e-2 3 -4.9385114e1 4 7.2943823e-2 +4 3 3.25 0 4.1562095e2 1 -1.5462782e1 2 3.4452663e-2 3 -3.8287410e1 4 4.9352954e-2 +5 3.25 3.5 0 2.8427837e2 1 -1.1889878e1 2 1.8643688e-2 3 -2.6578788e1 4 3.8564621e-2 +6 3.5 4 0 1.3470473e2 1 -6.7199873e0 2 1.3223117e-2 3 -1.2142190e1 4 1.2734974e-2 +7 4 4.5 0 3.1886258e1 1 -2.0056686e0 2 1.9334685e-3 3 -2.3686722e0 4 3.4154203e-3 +"; + +const DONT_SORT_STR: &str = "b etal l size l size l size l size l size + [] [%] [%] [%] [%] [%] +-+----+----+-+------+-+-----+-+----+-+------+-+---- +0 2 2.25 0 111.32 1 -3.31 2 0.01 3 -8.05 4 0.02 +1 2.25 2.5 0 112.20 1 -3.38 2 0.01 3 -8.85 4 0.02 +2 2.5 2.75 0 113.10 1 -3.57 2 0.01 3 -9.56 4 0.02 +3 2.75 3 0 113.98 1 -3.84 2 0.01 3 -10.17 4 0.02 +4 3 3.25 0 114.83 1 -4.27 2 0.01 3 -10.58 4 0.01 +5 3.25 3.5 0 115.62 1 -4.84 2 0.01 3 -10.81 4 0.02 +6 3.5 4 0 116.26 1 -5.80 2 0.01 3 -10.48 4 0.01 +7 4 4.5 0 115.88 1 -7.29 2 0.01 3 -8.61 4 0.01 +"; + #[test] fn help() { Command::cargo_bin("pineappl") @@ -218,3 +246,34 @@ fn orders_a2_as1a2() { .success() .stdout(ORDERS_A2_AS1A2_STR); } + +#[test] +fn dont_sort_absolute() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "channels", + "--absolute", + "--dont-sort", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(DONT_SORT_ABSOLUTE_STR); +} + +#[test] +fn dont_sort() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "channels", + "--dont-sort", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(DONT_SORT_STR); +} From f0c8aaa3152c1d4d87832db54a2e9ff4f40f2d6e Mon Sep 17 00:00:00 2001 From: janw20 Date: Thu, 18 Apr 2024 11:09:39 +0200 Subject: [PATCH 034/179] Add new type `PackedArray` --- pineappl/src/lib.rs | 1 + pineappl/src/packed_array.rs | 825 +++++++++++++++++++++++++++++++++++ 2 files changed, 826 insertions(+) create mode 100644 pineappl/src/packed_array.rs diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index 42879684f..6c1479b13 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -8,6 +8,7 @@ pub mod evolution; pub mod fk_table; pub mod grid; pub mod import_only_subgrid; +pub mod packed_array; pub mod lagrange_subgrid; pub mod lumi; pub mod ntuple_subgrid; diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs new file mode 100644 index 000000000..061666563 --- /dev/null +++ b/pineappl/src/packed_array.rs @@ -0,0 +1,825 @@ +use std::ops::{Index, IndexMut, MulAssign}; +use std::{mem, vec}; + +use ndarray::ArrayView3; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Deserialize, Serialize)] +pub struct PackedArray { + entries: Vec, + start_indices: Vec, + lengths: Vec, + shape: Vec, +} + +impl PackedArray { + #[must_use] + pub fn new(shape: [usize; D]) -> Self { + Self { + // entries: BTreeMap::new(), + entries: vec![], + start_indices: vec![], + lengths: vec![], + shape: shape.to_vec(), + } + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + pub fn shape(&self) -> &[usize] { + &self.shape + } + + pub fn clear(&mut self) { + self.entries.clear(); + self.start_indices.clear(); + self.lengths.clear(); + } + + #[must_use] + pub fn overhead(&self) -> usize { + ((self.start_indices.len() + self.lengths.len()) * mem::size_of::()) + / mem::size_of::() + } + + #[must_use] + pub fn explicit_zeros(&self) -> usize { + self.entries.iter().filter(|x| **x == T::default()).count() + } + + pub fn non_zeros(&self) -> usize { + self.entries.iter().filter(|x| **x != T::default()).count() + } + + pub fn indexed_iter(&self) -> impl Iterator + '_ { + self.start_indices + .iter() + .zip(&self.lengths) + .flat_map(|(&start_index, &length)| { + (start_index..(start_index + length)).map(|i| unravel_index(i, &self.shape)) + }) + .zip(&self.entries) + .filter(|&(_, entry)| *entry != Default::default()) + .map(|(indices, entry)| (indices, *entry)) + } +} + +impl, const D: usize> MulAssign for PackedArray { + fn mul_assign(&mut self, rhs: T) { + self.entries.iter_mut().for_each(|x| *x *= rhs); + } +} + +impl PackedArray { + pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { + let shape = array.shape(); + + let mut result = Self::new([xsize, shape[1], shape[2]]); + + for ((i, j, k), &entry) in array + .indexed_iter() + .filter(|(_, &entry)| entry != Default::default()) + { + result[[i + xstart, j, k]] = entry; + } + + result + } +} + +fn ravel_multi_index(indices: &[usize], dimensions: &[usize]) -> usize { + assert_eq!(indices.len(), dimensions.len()); + + indices + .iter() + .skip(1) + .zip(dimensions.iter().skip(1)) + .fold(indices[0], |acc, (i, d)| acc * d + i) +} + +fn unravel_index(index: usize, dimensions: &[usize]) -> [usize; D] { + assert!(index < dimensions.iter().product()); + let mut indices = [0; D]; + indices + .iter_mut() + .rev() + .zip(dimensions.iter().rev()) + .fold(index, |acc, (i, d)| { + *i = acc % d; + acc / d + }); + indices +} + +impl Index<[usize; D]> for PackedArray { + type Output = T; + + fn index(&self, index: [usize; D]) -> &Self::Output { + debug_assert_eq!(index.len(), self.shape.len()); + assert!( + index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), + "index {:?} is out of bounds for array of shape {:?}", + index, + self.shape + ); + + let raveled_index = ravel_multi_index::(&index, &self.shape); + let point = self.start_indices.partition_point(|&i| i <= raveled_index); + + assert!( + point > 0, + "entry at index {index:?} is implicitly set to the default value" + ); + + let start_index = self.start_indices[point - 1]; + let length = self.lengths[point - 1]; + + let point_entries = + self.lengths.iter().take(point - 1).sum::() + raveled_index - start_index; + + assert!( + raveled_index < (start_index + length), + "entry at index {index:?} is implicitly set to the default value" + ); + + &self.entries[point_entries] + } +} + +// TODO: implement axis swapping optimization +impl IndexMut<[usize; D]> + for PackedArray +{ + fn index_mut(&mut self, index: [usize; D]) -> &mut Self::Output { + debug_assert_eq!(index.len(), self.shape.len()); + assert!( + index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), + "index {:?} is out of bounds for array of shape {:?}", + index, + self.shape + ); + + let raveled_index = ravel_multi_index::(&index, &self.shape); + + // if self.entries.is_empty() { + // self.start_indices.push(raveled_index); + // self.lengths.push(1); + // self.entries.push(Default::default()); + + // return &mut self.entries[0]; + // } + + // the closest start_index that is greater than raveled_index + let point = self.start_indices.partition_point(|&i| i <= raveled_index); + + // the index that is stored in `point`, translated to the indices of the entries + let point_entries = self.lengths.iter().take(point).sum::(); + + // println!("test 1"); + + // maximum distance for merging regions + let threshold_distance = 2; + + if point > 0 { + let start_index = self.start_indices[point - 1]; + let length = self.lengths[point - 1]; + + if raveled_index < start_index + length { + // println!("test 2"); + return &mut self.entries[point_entries - length + raveled_index - start_index]; + } else if raveled_index < start_index + length + threshold_distance { + // println!("test 3"); + + let distance = raveled_index - (start_index + length) + 1; + // println!("distance: {}", distance); + self.lengths[point - 1] += distance; + for _ in 0..(distance) { + self.entries.insert(point_entries, Default::default()); + } + + if let Some(start_index_next) = self.start_indices.get(point) { + if raveled_index + threshold_distance >= *start_index_next { + // println!("test 4"); + let distance_next = start_index_next - raveled_index; + + self.lengths[point - 1] += distance_next - 1 + self.lengths[point]; + self.lengths.remove(point); + self.start_indices.remove(point); + for _ in 0..(distance_next - 1) { + self.entries.insert(point_entries, Default::default()); + } + } + } + + return &mut self.entries[point_entries - 1 + distance]; + } else if let Some(start_index_next) = self.start_indices.get(point) { + if raveled_index + threshold_distance >= *start_index_next { + // println!("test 5"); + let distance = start_index_next - raveled_index; + + self.start_indices[point] = raveled_index; + self.lengths[point] += distance; + for _ in 0..distance { + self.entries.insert(point_entries, Default::default()); + } + return &mut self.entries[point_entries]; + } + } + } + + // println!("test 6"); + + // in the most general case we have to insert a new start_index + self.start_indices.insert(point, raveled_index); + self.lengths.insert(point, 1); + self.entries.insert(point_entries, Default::default()); + + &mut self.entries[point_entries] + } +} + +// impl IntoIterator for &JaggedArray { +// type Item = (Vec, T); +// type IntoIter = Iterator, T)>; + +// fn into_iter(self) -> Self::IntoIter { + +// } +// } + +#[cfg(test)] +mod tests { + use ndarray::Array3; + use std::mem; + + use super::*; + + #[test] + fn unravel_index() { + assert_eq!(super::unravel_index(0, &[3, 2]), [0, 0]); + assert_eq!(super::unravel_index(1, &[3, 2]), [0, 1]); + assert_eq!(super::unravel_index(2, &[3, 2]), [1, 0]); + assert_eq!(super::unravel_index(3, &[3, 2]), [1, 1]); + assert_eq!(super::unravel_index(4, &[3, 2]), [2, 0]); + assert_eq!(super::unravel_index(5, &[3, 2]), [2, 1]); + } + + #[test] + fn ravel_multi_index() { + assert_eq!(super::ravel_multi_index::<2>(&[0, 0], &[3, 2]), 0); + assert_eq!(super::ravel_multi_index::<2>(&[0, 1], &[3, 2]), 1); + assert_eq!(super::ravel_multi_index::<2>(&[1, 0], &[3, 2]), 2); + assert_eq!(super::ravel_multi_index::<2>(&[1, 1], &[3, 2]), 3); + assert_eq!(super::ravel_multi_index::<2>(&[2, 0], &[3, 2]), 4); + assert_eq!(super::ravel_multi_index::<2>(&[2, 1], &[3, 2]), 5); + } + + #[test] + fn index() { + let mut a = PackedArray::::new([4, 2]); + + a[[0, 0]] = 1.0; + println!( + "entries: {:?}, start_indices: {:?}, lengths: {:?}", + a.entries, a.start_indices, a.lengths + ); + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a.entries, vec![1.0]); + assert_eq!(a.start_indices, vec![0]); + assert_eq!(a.lengths, vec![1]); + + a[[3, 0]] = 2.0; + println!( + "entries: {:?}, start_indices: {:?}, lengths: {:?}", + a.entries, a.start_indices, a.lengths + ); + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a.entries, vec![1.0, 2.0]); + assert_eq!(a.start_indices, vec![0, 6]); + assert_eq!(a.lengths, vec![1, 1]); + + a[[3, 1]] = 3.0; + println!( + "entries: {:?}, start_indices: {:?}, lengths: {:?}", + a.entries, a.start_indices, a.lengths + ); + + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a[[3, 1]], 3.0); + assert_eq!(a.entries, vec![1.0, 2.0, 3.0]); + assert_eq!(a.start_indices, vec![0, 6]); + assert_eq!(a.lengths, vec![1, 2]); + + a[[2, 0]] = 3.5; + println!( + "entries: {:?}, start_indices: {:?}, lengths: {:?}", + a.entries, a.start_indices, a.lengths + ); + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a[[3, 1]], 3.0); + assert_eq!(a[[2, 0]], 3.5); + // assert_eq!(a[[2, 1]], 0.0); + assert_eq!(a.entries, vec![1.0, 3.5, 0.0, 2.0, 3.0]); + assert_eq!(a.start_indices, vec![0, 4]); + assert_eq!(a.lengths, vec![1, 4]); + + a[[2, 0]] = 4.0; + println!( + "entries: {:?}, start_indices: {:?}, lengths: {:?}", + a.entries, a.start_indices, a.lengths + ); + + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a[[3, 1]], 3.0); + assert_eq!(a[[2, 0]], 4.0); + // assert_eq!(a[[2, 1]], 0.0); + assert_eq!(a.entries, vec![1.0, 4.0, 0.0, 2.0, 3.0]); + assert_eq!(a.start_indices, vec![0, 4]); + assert_eq!(a.lengths, vec![1, 4]); + + a[[1, 0]] = 5.0; + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a[[3, 1]], 3.0); + assert_eq!(a[[2, 0]], 4.0); + // assert_eq!(a[[2, 1]], 0.0); + assert_eq!(a[[1, 0]], 5.0); + assert_eq!(a.entries, vec![1.0, 0.0, 5.0, 0.0, 4.0, 0.0, 2.0, 3.0]); + assert_eq!(a.start_indices, vec![0]); + assert_eq!(a.lengths, vec![8]); + + println!( + "entries: {:?}, start_indices: {:?}, lengths: {:?}", + a.entries, a.start_indices, a.lengths + ); + } + + #[test] + fn iter() { + let mut a = PackedArray::::new([6, 5]); + a[[2, 2]] = 1; + a[[2, 4]] = 2; + a[[4, 1]] = 3; + a[[4, 4]] = 4; + println!( + "entries: {:?}, start_indices: {:?}, lengths: {:?}", + a.entries, a.start_indices, a.lengths + ); + a[[5, 0]] = 5; + println!( + "entries: {:?}, start_indices: {:?}, lengths: {:?}", + a.entries, a.start_indices, a.lengths + ); + assert_eq!( + a.indexed_iter().collect::>(), + &[ + ([2, 2], 1), + ([2, 4], 2), + ([4, 1], 3), + ([4, 4], 4), + ([5, 0], 5), + ] + ); + } + + fn index_access() { + let mut array = PackedArray::new([40, 50, 50]); + + // after creation the array must be empty + assert_eq!(array.overhead(), 2); + assert!(array.is_empty()); + + // insert the first element + array[[5, 10, 10]] = 1.0; + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 1); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 102); + assert!(!array.is_empty()); + + // insert an element after the first one + array[[8, 10, 10]] = 2.0; + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 2); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 402); + assert!(!array.is_empty()); + + // insert an element before the first one + array[[1, 10, 10]] = 3.0; + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 3); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + array[[1, 10, 11]] = 4.0; + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 4); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + array[[1, 10, 9]] = 5.0; + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 5); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + array[[1, 10, 0]] = 6.0; + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 6); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 2]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.explicit_zeros(), 8); + + // insert where previously a zero was + array[[1, 10, 2]] = 7.0; + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 7); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.explicit_zeros(), 7); + + array[[1, 15, 2]] = 8.0; + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 8); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.explicit_zeros(), 7); + + array[[1, 15, 4]] = 9.0; + assert_eq!(array[[1, 15, 4]], 9.0); + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 15, 3]], 0.0); + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.explicit_zeros(), 8); + + array[[1, 15, 0]] = 10.0; + assert_eq!(array[[1, 15, 0]], 10.0); + assert_eq!(array[[1, 15, 4]], 9.0); + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 10); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 15, 1]], 0.0); + assert_eq!(array[[1, 15, 3]], 0.0); + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.explicit_zeros(), 9); + } + + #[test] + #[should_panic(expected = "index [40, 0, 50] is out of bounds for array of shape [40, 50, 50]")] + fn index_mut_panic_dim0() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[40, 0, 50]] = 1.0; + } + + #[test] + #[should_panic(expected = "index [0, 50, 0] is out of bounds for array of shape [40, 50, 50]")] + fn index_mut_panic_dim1() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[0, 50, 0]] = 1.0; + } + + #[test] + #[should_panic(expected = "index [0, 0, 50] is out of bounds for array of shape [40, 50, 50]")] + fn index_mut_panic_dim2() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[0, 0, 50]] = 1.0; + } + + #[test] + #[should_panic(expected = "entry at index [0, 0, 0] is implicitly set to the default value")] + fn index_panic_dim0_0() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[0, 0, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "entry at index [2, 0, 0] is implicitly set to the default value")] + fn index_panic_dim0_1() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[2, 0, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "index [1, 50, 0] is out of bounds for array of shape [40, 50, 50]")] + fn index_panic_dim1() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[1, 50, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "entry at index [0, 0, 0] is implicitly set to the default value")] + fn index_panic_dim2_0() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[0, 0, 1]] = 1.0; + + assert_eq!(array[[0, 0, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "entry at index [0, 0, 2] is implicitly set to the default value")] + fn index_panic_dim2_1() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[0, 0, 1]] = 1.0; + + assert_eq!(array[[0, 0, 2]], 0.0); + } + + #[test] + fn indexed_iter() { + let mut array = PackedArray::new([40, 50, 50]); + + // check empty iterator + assert_eq!(array.indexed_iter().next(), None); + + // insert an element + array[[2, 3, 4]] = 1.0; + + let mut iter = array.indexed_iter(); + + // check iterator with one element + assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); + assert_eq!(iter.next(), None); + + mem::drop(iter); + + // insert another element + array[[2, 3, 6]] = 2.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); + assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); + assert_eq!(iter.next(), None); + + mem::drop(iter); + + // insert yet another element + array[[4, 5, 7]] = 3.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); + assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); + assert_eq!(iter.next(), Some(([4, 5, 7], 3.0))); + assert_eq!(iter.next(), None); + + mem::drop(iter); + + // insert at the very first position + array[[2, 0, 0]] = 4.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(([2, 0, 0], 4.0))); + assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); + assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); + assert_eq!(iter.next(), Some(([4, 5, 7], 3.0))); + assert_eq!(iter.next(), None); + } + + #[test] + fn clear() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[3, 5, 1]] = 1.0; + array[[7, 8, 9]] = 2.0; + array[[9, 1, 4]] = 3.0; + + assert!(!array.is_empty()); + assert_eq!(array.non_zeros(), 3); + assert_eq!(array.explicit_zeros(), 0); + + array.clear(); + + assert!(array.is_empty()); + assert_eq!(array.non_zeros(), 0); + assert_eq!(array.explicit_zeros(), 0); + } + + #[test] + fn from_ndarray() { + let mut ndarray = Array3::zeros((2, 50, 50)); + + ndarray[[0, 4, 3]] = 1.0; + ndarray[[0, 4, 4]] = 2.0; + ndarray[[0, 4, 6]] = 3.0; + ndarray[[0, 5, 1]] = 4.0; + ndarray[[0, 5, 7]] = 5.0; + ndarray[[1, 3, 9]] = 6.0; + + let array = PackedArray::from_ndarray(ndarray.view(), 3, 40); + + assert_eq!(array[[3, 4, 3]], 1.0); + assert_eq!(array[[3, 4, 4]], 2.0); + // assert_eq!(array[[3, 4, 5]], 0.0); + assert_eq!(array[[3, 4, 6]], 3.0); + assert_eq!(array[[3, 5, 1]], 4.0); + // assert_eq!(array[[3, 5, 2]], 0.0); + // assert_eq!(array[[3, 5, 3]], 0.0); + // assert_eq!(array[[3, 5, 4]], 0.0); + // assert_eq!(array[[3, 5, 5]], 0.0); + // assert_eq!(array[[3, 5, 6]], 0.0); + assert_eq!(array[[3, 5, 7]], 5.0); + assert_eq!(array[[4, 3, 9]], 6.0); + + // assert_eq!(array.len(), 6); + // assert_eq!(array.zeros(), 6); + } + + // #[test] + // fn test_index_swap() { + // let mut array = JaggedArray::new([5, 50, 2]); + + // array[[0, 0, 0]] = 1.0; + // array[[0, 0, 1]] = 2.0; + // array[[1, 2, 1]] = 3.0; + // array[[1, 5, 1]] = 4.0; + // array[[1, 6, 0]] = 5.0; + // array[[1, 8, 0]] = 6.0; + // array[[1, 9, 0]] = 7.0; + // array[[2, 0, 0]] = 8.0; + // array[[3, 2, 1]] = 9.0; + // array[[3, 4, 0]] = 10.0; + // array[[3, 4, 1]] = 11.0; + // array[[4, 0, 0]] = 12.0; + // array[[4, 0, 1]] = 13.0; + + // assert_eq!(array[[0, 0, 0]], 1.0); + // assert_eq!(array[[0, 0, 1]], 2.0); + // assert_eq!(array[[1, 2, 1]], 3.0); + // assert_eq!(array[[1, 5, 1]], 4.0); + // assert_eq!(array[[1, 6, 0]], 5.0); + // assert_eq!(array[[1, 8, 0]], 6.0); + // assert_eq!(array[[1, 9, 0]], 7.0); + // assert_eq!(array[[2, 0, 0]], 8.0); + // assert_eq!(array[[3, 2, 1]], 9.0); + // assert_eq!(array[[3, 4, 0]], 10.0); + // assert_eq!(array[[3, 4, 1]], 11.0); + // assert_eq!(array[[4, 0, 0]], 12.0); + // assert_eq!(array[[4, 0, 1]], 13.0); + + // let mut iter = array.indexed_iter(); + + // assert_eq!(iter.next(), Some(([0, 0, 0], 1.0))); + // assert_eq!(iter.next(), Some(([0, 0, 1], 2.0))); + // assert_eq!(iter.next(), Some(([1, 6, 0], 5.0))); + // assert_eq!(iter.next(), Some(([1, 8, 0], 6.0))); + // assert_eq!(iter.next(), Some(([1, 9, 0], 7.0))); + // assert_eq!(iter.next(), Some(([1, 2, 1], 3.0))); + // assert_eq!(iter.next(), Some(([1, 5, 1], 4.0))); + // assert_eq!(iter.next(), Some(([2, 0, 0], 8.0))); + // assert_eq!(iter.next(), Some(([3, 4, 0], 10.0))); + // assert_eq!(iter.next(), Some(([3, 2, 1], 9.0))); + // assert_eq!(iter.next(), Some(([3, 4, 1], 11.0))); + // assert_eq!(iter.next(), Some(([4, 0, 0], 12.0))); + // assert_eq!(iter.next(), Some(([4, 0, 1], 13.0))); + // assert_eq!(iter.next(), None); + + // let mut ndarray = Array3::zeros((5, 50, 2)); + + // ndarray[[0, 0, 0]] = 1.0; + // ndarray[[0, 0, 1]] = 2.0; + // ndarray[[1, 2, 1]] = 3.0; + // ndarray[[1, 5, 1]] = 4.0; + // ndarray[[1, 6, 0]] = 5.0; + // ndarray[[1, 8, 0]] = 6.0; + // ndarray[[1, 9, 0]] = 7.0; + // ndarray[[2, 0, 0]] = 8.0; + // ndarray[[3, 2, 1]] = 9.0; + // ndarray[[3, 4, 0]] = 10.0; + // ndarray[[3, 4, 1]] = 11.0; + // ndarray[[4, 0, 0]] = 12.0; + // ndarray[[4, 0, 1]] = 13.0; + + // let mut other = JaggedArray::from_ndarray(ndarray.view(), 0, 5); + + // assert_eq!(other[[0, 0, 0]], 1.0); + // assert_eq!(other[[0, 0, 1]], 2.0); + // assert_eq!(other[[1, 2, 1]], 3.0); + // assert_eq!(other[[1, 5, 1]], 4.0); + // assert_eq!(other[[1, 6, 0]], 5.0); + // assert_eq!(other[[1, 8, 0]], 6.0); + // assert_eq!(other[[1, 9, 0]], 7.0); + // assert_eq!(other[[2, 0, 0]], 8.0); + // assert_eq!(other[[3, 2, 1]], 9.0); + // assert_eq!(other[[3, 4, 0]], 10.0); + // assert_eq!(other[[3, 4, 1]], 11.0); + // assert_eq!(other[[4, 0, 0]], 12.0); + // assert_eq!(other[[4, 0, 1]], 13.0); + // } +} From b9edaf6cc2bffb4dd33be4a8bfa484465f54328d Mon Sep 17 00:00:00 2001 From: janw20 Date: Thu, 18 Apr 2024 14:30:23 +0200 Subject: [PATCH 035/179] Fix `index_access` test and one edge case --- pineappl/src/packed_array.rs | 175 +++++++---------------------------- 1 file changed, 32 insertions(+), 143 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index 061666563..bb2a32626 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -215,18 +215,20 @@ impl IndexMut<[usize; D]> } return &mut self.entries[point_entries - 1 + distance]; - } else if let Some(start_index_next) = self.start_indices.get(point) { - if raveled_index + threshold_distance >= *start_index_next { - // println!("test 5"); - let distance = start_index_next - raveled_index; - - self.start_indices[point] = raveled_index; - self.lengths[point] += distance; - for _ in 0..distance { - self.entries.insert(point_entries, Default::default()); - } - return &mut self.entries[point_entries]; + } + } + + if let Some(start_index_next) = self.start_indices.get(point) { + if raveled_index + threshold_distance >= *start_index_next { + // println!("test 5"); + let distance = start_index_next - raveled_index; + + self.start_indices[point] = raveled_index; + self.lengths[point] += distance; + for _ in 0..distance { + self.entries.insert(point_entries, Default::default()); } + return &mut self.entries[point_entries]; } } @@ -389,11 +391,12 @@ mod tests { ); } + #[test] fn index_access() { let mut array = PackedArray::new([40, 50, 50]); // after creation the array must be empty - assert_eq!(array.overhead(), 2); + assert_eq!(array.overhead(), 0); assert!(array.is_empty()); // insert the first element @@ -401,7 +404,7 @@ mod tests { assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 1); assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 102); + assert_eq!(array.overhead(), 2); assert!(!array.is_empty()); // insert an element after the first one @@ -410,7 +413,7 @@ mod tests { assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 2); assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 402); + assert_eq!(array.overhead(), 4); assert!(!array.is_empty()); // insert an element before the first one @@ -420,7 +423,7 @@ mod tests { assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 3); assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 802); + assert_eq!(array.overhead(), 6); assert!(!array.is_empty()); array[[1, 10, 11]] = 4.0; @@ -430,7 +433,7 @@ mod tests { assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 4); assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 802); + assert_eq!(array.overhead(), 6); assert!(!array.is_empty()); array[[1, 10, 9]] = 5.0; @@ -441,7 +444,9 @@ mod tests { assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 5); assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 802); + // dbg!(&array.start_indices); + // dbg!(&array.lengths); + assert_eq!(array.overhead(), 6); assert!(!array.is_empty()); array[[1, 10, 0]] = 6.0; @@ -452,21 +457,10 @@ mod tests { assert_eq!(array[[8, 10, 10]], 2.0); assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 6); - assert_eq!(array.overhead(), 802); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 8); assert!(!array.is_empty()); - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 2]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.explicit_zeros(), 8); - - // insert where previously a zero was array[[1, 10, 2]] = 7.0; assert_eq!(array[[1, 10, 2]], 7.0); assert_eq!(array[[1, 10, 0]], 6.0); @@ -476,18 +470,12 @@ mod tests { assert_eq!(array[[8, 10, 10]], 2.0); assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 7); - assert_eq!(array.overhead(), 802); + assert_eq!(array.overhead(), 8); assert!(!array.is_empty()); // check zeros assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.explicit_zeros(), 7); + assert_eq!(array.explicit_zeros(), 1); array[[1, 15, 2]] = 8.0; assert_eq!(array[[1, 15, 2]], 8.0); @@ -499,18 +487,12 @@ mod tests { assert_eq!(array[[8, 10, 10]], 2.0); assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 8); - assert_eq!(array.overhead(), 802); + assert_eq!(array.overhead(), 10); assert!(!array.is_empty()); // check zeros assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.explicit_zeros(), 7); + assert_eq!(array.explicit_zeros(), 1); array[[1, 15, 4]] = 9.0; assert_eq!(array[[1, 15, 4]], 9.0); @@ -523,19 +505,13 @@ mod tests { assert_eq!(array[[8, 10, 10]], 2.0); assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 9); - assert_eq!(array.overhead(), 802); + assert_eq!(array.overhead(), 10); assert!(!array.is_empty()); // check zeros assert_eq!(array[[1, 15, 3]], 0.0); assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.explicit_zeros(), 8); + assert_eq!(array.explicit_zeros(), 2); array[[1, 15, 0]] = 10.0; assert_eq!(array[[1, 15, 0]], 10.0); @@ -549,20 +525,14 @@ mod tests { assert_eq!(array[[8, 10, 10]], 2.0); assert_eq!(array[[5, 10, 10]], 1.0); assert_eq!(array.non_zeros(), 10); - assert_eq!(array.overhead(), 802); + assert_eq!(array.overhead(), 10); assert!(!array.is_empty()); // check zeros assert_eq!(array[[1, 15, 1]], 0.0); assert_eq!(array[[1, 15, 3]], 0.0); assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.explicit_zeros(), 9); + assert_eq!(array.explicit_zeros(), 3); } #[test] @@ -741,85 +711,4 @@ mod tests { // assert_eq!(array.zeros(), 6); } - // #[test] - // fn test_index_swap() { - // let mut array = JaggedArray::new([5, 50, 2]); - - // array[[0, 0, 0]] = 1.0; - // array[[0, 0, 1]] = 2.0; - // array[[1, 2, 1]] = 3.0; - // array[[1, 5, 1]] = 4.0; - // array[[1, 6, 0]] = 5.0; - // array[[1, 8, 0]] = 6.0; - // array[[1, 9, 0]] = 7.0; - // array[[2, 0, 0]] = 8.0; - // array[[3, 2, 1]] = 9.0; - // array[[3, 4, 0]] = 10.0; - // array[[3, 4, 1]] = 11.0; - // array[[4, 0, 0]] = 12.0; - // array[[4, 0, 1]] = 13.0; - - // assert_eq!(array[[0, 0, 0]], 1.0); - // assert_eq!(array[[0, 0, 1]], 2.0); - // assert_eq!(array[[1, 2, 1]], 3.0); - // assert_eq!(array[[1, 5, 1]], 4.0); - // assert_eq!(array[[1, 6, 0]], 5.0); - // assert_eq!(array[[1, 8, 0]], 6.0); - // assert_eq!(array[[1, 9, 0]], 7.0); - // assert_eq!(array[[2, 0, 0]], 8.0); - // assert_eq!(array[[3, 2, 1]], 9.0); - // assert_eq!(array[[3, 4, 0]], 10.0); - // assert_eq!(array[[3, 4, 1]], 11.0); - // assert_eq!(array[[4, 0, 0]], 12.0); - // assert_eq!(array[[4, 0, 1]], 13.0); - - // let mut iter = array.indexed_iter(); - - // assert_eq!(iter.next(), Some(([0, 0, 0], 1.0))); - // assert_eq!(iter.next(), Some(([0, 0, 1], 2.0))); - // assert_eq!(iter.next(), Some(([1, 6, 0], 5.0))); - // assert_eq!(iter.next(), Some(([1, 8, 0], 6.0))); - // assert_eq!(iter.next(), Some(([1, 9, 0], 7.0))); - // assert_eq!(iter.next(), Some(([1, 2, 1], 3.0))); - // assert_eq!(iter.next(), Some(([1, 5, 1], 4.0))); - // assert_eq!(iter.next(), Some(([2, 0, 0], 8.0))); - // assert_eq!(iter.next(), Some(([3, 4, 0], 10.0))); - // assert_eq!(iter.next(), Some(([3, 2, 1], 9.0))); - // assert_eq!(iter.next(), Some(([3, 4, 1], 11.0))); - // assert_eq!(iter.next(), Some(([4, 0, 0], 12.0))); - // assert_eq!(iter.next(), Some(([4, 0, 1], 13.0))); - // assert_eq!(iter.next(), None); - - // let mut ndarray = Array3::zeros((5, 50, 2)); - - // ndarray[[0, 0, 0]] = 1.0; - // ndarray[[0, 0, 1]] = 2.0; - // ndarray[[1, 2, 1]] = 3.0; - // ndarray[[1, 5, 1]] = 4.0; - // ndarray[[1, 6, 0]] = 5.0; - // ndarray[[1, 8, 0]] = 6.0; - // ndarray[[1, 9, 0]] = 7.0; - // ndarray[[2, 0, 0]] = 8.0; - // ndarray[[3, 2, 1]] = 9.0; - // ndarray[[3, 4, 0]] = 10.0; - // ndarray[[3, 4, 1]] = 11.0; - // ndarray[[4, 0, 0]] = 12.0; - // ndarray[[4, 0, 1]] = 13.0; - - // let mut other = JaggedArray::from_ndarray(ndarray.view(), 0, 5); - - // assert_eq!(other[[0, 0, 0]], 1.0); - // assert_eq!(other[[0, 0, 1]], 2.0); - // assert_eq!(other[[1, 2, 1]], 3.0); - // assert_eq!(other[[1, 5, 1]], 4.0); - // assert_eq!(other[[1, 6, 0]], 5.0); - // assert_eq!(other[[1, 8, 0]], 6.0); - // assert_eq!(other[[1, 9, 0]], 7.0); - // assert_eq!(other[[2, 0, 0]], 8.0); - // assert_eq!(other[[3, 2, 1]], 9.0); - // assert_eq!(other[[3, 4, 0]], 10.0); - // assert_eq!(other[[3, 4, 1]], 11.0); - // assert_eq!(other[[4, 0, 0]], 12.0); - // assert_eq!(other[[4, 0, 1]], 13.0); - // } } From 1c2ca7980b22f55fbb8f62bed69aa3eaf8bbe412 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 18 Apr 2024 11:48:56 +0200 Subject: [PATCH 036/179] Add missing `must_use` attributes --- pineappl/src/packed_array.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index bb2a32626..f0b119f59 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -29,6 +29,7 @@ impl PackedArray { self.entries.is_empty() } + #[must_use] pub fn shape(&self) -> &[usize] { &self.shape } @@ -50,6 +51,7 @@ impl PackedArray { self.entries.iter().filter(|x| **x == T::default()).count() } + #[must_use] pub fn non_zeros(&self) -> usize { self.entries.iter().filter(|x| **x != T::default()).count() } @@ -74,6 +76,7 @@ impl, const D: usize> MulAssign for PackedArray } impl PackedArray { + #[must_use] pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { let shape = array.shape(); From 8a9c20299c00832fb07972f979b0723b330483a2 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 18 Apr 2024 11:52:44 +0200 Subject: [PATCH 037/179] Avoid having to specify dimension `D` explicitly --- pineappl/src/packed_array.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index f0b119f59..bc874617d 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -93,7 +93,7 @@ impl PackedArray { } } -fn ravel_multi_index(indices: &[usize], dimensions: &[usize]) -> usize { +fn ravel_multi_index(indices: &[usize; D], dimensions: &[usize]) -> usize { assert_eq!(indices.len(), dimensions.len()); indices @@ -129,7 +129,7 @@ impl Index<[usize; D]> for Packed self.shape ); - let raveled_index = ravel_multi_index::(&index, &self.shape); + let raveled_index = ravel_multi_index(&index, &self.shape); let point = self.start_indices.partition_point(|&i| i <= raveled_index); assert!( @@ -165,7 +165,7 @@ impl IndexMut<[usize; D]> self.shape ); - let raveled_index = ravel_multi_index::(&index, &self.shape); + let raveled_index = ravel_multi_index(&index, &self.shape); // if self.entries.is_empty() { // self.start_indices.push(raveled_index); @@ -274,12 +274,12 @@ mod tests { #[test] fn ravel_multi_index() { - assert_eq!(super::ravel_multi_index::<2>(&[0, 0], &[3, 2]), 0); - assert_eq!(super::ravel_multi_index::<2>(&[0, 1], &[3, 2]), 1); - assert_eq!(super::ravel_multi_index::<2>(&[1, 0], &[3, 2]), 2); - assert_eq!(super::ravel_multi_index::<2>(&[1, 1], &[3, 2]), 3); - assert_eq!(super::ravel_multi_index::<2>(&[2, 0], &[3, 2]), 4); - assert_eq!(super::ravel_multi_index::<2>(&[2, 1], &[3, 2]), 5); + assert_eq!(super::ravel_multi_index(&[0, 0], &[3, 2]), 0); + assert_eq!(super::ravel_multi_index(&[0, 1], &[3, 2]), 1); + assert_eq!(super::ravel_multi_index(&[1, 0], &[3, 2]), 2); + assert_eq!(super::ravel_multi_index(&[1, 1], &[3, 2]), 3); + assert_eq!(super::ravel_multi_index(&[2, 0], &[3, 2]), 4); + assert_eq!(super::ravel_multi_index(&[2, 1], &[3, 2]), 5); } #[test] From 444fb7591bc91012871b6eb7ce494de79e64c4a9 Mon Sep 17 00:00:00 2001 From: janw20 Date: Thu, 18 Apr 2024 15:18:23 +0200 Subject: [PATCH 038/179] Add documentation and do cleanup --- pineappl/src/packed_array.rs | 110 ++++++++++------------------------- 1 file changed, 30 insertions(+), 80 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index bc874617d..6cb1ca2ac 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -4,15 +4,21 @@ use std::{mem, vec}; use ndarray::ArrayView3; use serde::{Deserialize, Serialize}; +/// `D`-dimensional array similar to ndarray::ArrayBase, except that `T::default()` is not stored to save space. Instead, adjacent elements are grouped together and only the index of their first element (`start_index`) and the length of the group (`lengths`) is stored. #[derive(Clone, Deserialize, Serialize)] pub struct PackedArray { + /// The actual values stored in the array. The length of `entries` is always the sum of the elements in `lengths`. entries: Vec, + /// The indices of the first elements in each group. start_indices: Vec, + /// The length of each group. lengths: Vec, + /// The shape (dimensions) of the array. shape: Vec, } impl PackedArray { + /// Constructs a new and empty `PackedArray` of shape `shape`. #[must_use] pub fn new(shape: [usize; D]) -> Self { Self { @@ -24,38 +30,45 @@ impl PackedArray { } } + /// Returns `true` if the array contains no element. #[must_use] pub fn is_empty(&self) -> bool { self.entries.is_empty() } + /// Returns the shape of the array. #[must_use] pub fn shape(&self) -> &[usize] { &self.shape } + /// Clears the contents of the array. pub fn clear(&mut self) { self.entries.clear(); self.start_indices.clear(); self.lengths.clear(); } + /// Returns the overhead of storing the `start_indices` and the `lengths` of the groups, in units of `f64`. #[must_use] pub fn overhead(&self) -> usize { ((self.start_indices.len() + self.lengths.len()) * mem::size_of::()) / mem::size_of::() } + /// Returns the number of default (zero) elements that are explicitly stored in `entries`. If there is one default elements between adjacent groups, it is more economical to store the one default element explicitly and merge the two groups, than to store the `start_indices` and `lengths` of those groups. #[must_use] pub fn explicit_zeros(&self) -> usize { self.entries.iter().filter(|x| **x == T::default()).count() } + /// Returns the number of non-default (non-zero) elements stored in the array. #[must_use] pub fn non_zeros(&self) -> usize { self.entries.iter().filter(|x| **x != T::default()).count() } + /// Returns an `Iterator` over the non-default (non-zero) elements of this array. The type of an iterator element is `([usize; D], T)`. pub fn indexed_iter(&self) -> impl Iterator + '_ { self.start_indices .iter() @@ -70,12 +83,16 @@ impl PackedArray { } impl, const D: usize> MulAssign for PackedArray { + + /// Perform `self *= rhs` as elementwise multiplication (in place). fn mul_assign(&mut self, rhs: T) { self.entries.iter_mut().for_each(|x| *x *= rhs); } } impl PackedArray { + + /// Converts `array` into a `PackedArray` of dimension 3. #[must_use] pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { let shape = array.shape(); @@ -93,16 +110,18 @@ impl PackedArray { } } -fn ravel_multi_index(indices: &[usize; D], dimensions: &[usize]) -> usize { - assert_eq!(indices.len(), dimensions.len()); - - indices - .iter() - .skip(1) - .zip(dimensions.iter().skip(1)) - .fold(indices[0], |acc, (i, d)| acc * d + i) +/// Converts a `multi_index` into an flat index. +fn ravel_multi_index(multi_index: &[usize; D], dimensions: &[usize]) -> usize { + assert_eq!(multi_index.len(), dimensions.len()); + + multi_index + .iter() + .skip(1) + .zip(dimensions.iter().skip(1)) + .fold(multi_index[0], |acc, (i, d)| acc * d + i) } +/// Converts a flat `index` into a multi_index. fn unravel_index(index: usize, dimensions: &[usize]) -> [usize; D] { assert!(index < dimensions.iter().product()); let mut indices = [0; D]; @@ -152,7 +171,6 @@ impl Index<[usize; D]> for Packed } } -// TODO: implement axis swapping optimization impl IndexMut<[usize; D]> for PackedArray { @@ -167,22 +185,12 @@ impl IndexMut<[usize; D]> let raveled_index = ravel_multi_index(&index, &self.shape); - // if self.entries.is_empty() { - // self.start_indices.push(raveled_index); - // self.lengths.push(1); - // self.entries.push(Default::default()); - - // return &mut self.entries[0]; - // } - // the closest start_index that is greater than raveled_index let point = self.start_indices.partition_point(|&i| i <= raveled_index); // the index that is stored in `point`, translated to the indices of the entries let point_entries = self.lengths.iter().take(point).sum::(); - // println!("test 1"); - // maximum distance for merging regions let threshold_distance = 2; @@ -191,14 +199,11 @@ impl IndexMut<[usize; D]> let length = self.lengths[point - 1]; if raveled_index < start_index + length { - // println!("test 2"); return &mut self.entries[point_entries - length + raveled_index - start_index]; } else if raveled_index < start_index + length + threshold_distance { - // println!("test 3"); - let distance = raveled_index - (start_index + length) + 1; - // println!("distance: {}", distance); self.lengths[point - 1] += distance; + for _ in 0..(distance) { self.entries.insert(point_entries, Default::default()); } @@ -223,7 +228,6 @@ impl IndexMut<[usize; D]> if let Some(start_index_next) = self.start_indices.get(point) { if raveled_index + threshold_distance >= *start_index_next { - // println!("test 5"); let distance = start_index_next - raveled_index; self.start_indices[point] = raveled_index; @@ -235,8 +239,6 @@ impl IndexMut<[usize; D]> } } - // println!("test 6"); - // in the most general case we have to insert a new start_index self.start_indices.insert(point, raveled_index); self.lengths.insert(point, 1); @@ -246,14 +248,6 @@ impl IndexMut<[usize; D]> } } -// impl IntoIterator for &JaggedArray { -// type Item = (Vec, T); -// type IntoIter = Iterator, T)>; - -// fn into_iter(self) -> Self::IntoIter { - -// } -// } #[cfg(test)] mod tests { @@ -287,20 +281,12 @@ mod tests { let mut a = PackedArray::::new([4, 2]); a[[0, 0]] = 1.0; - println!( - "entries: {:?}, start_indices: {:?}, lengths: {:?}", - a.entries, a.start_indices, a.lengths - ); assert_eq!(a[[0, 0]], 1.0); assert_eq!(a.entries, vec![1.0]); assert_eq!(a.start_indices, vec![0]); assert_eq!(a.lengths, vec![1]); a[[3, 0]] = 2.0; - println!( - "entries: {:?}, start_indices: {:?}, lengths: {:?}", - a.entries, a.start_indices, a.lengths - ); assert_eq!(a[[0, 0]], 1.0); assert_eq!(a[[3, 0]], 2.0); assert_eq!(a.entries, vec![1.0, 2.0]); @@ -308,11 +294,6 @@ mod tests { assert_eq!(a.lengths, vec![1, 1]); a[[3, 1]] = 3.0; - println!( - "entries: {:?}, start_indices: {:?}, lengths: {:?}", - a.entries, a.start_indices, a.lengths - ); - assert_eq!(a[[0, 0]], 1.0); assert_eq!(a[[3, 0]], 2.0); assert_eq!(a[[3, 1]], 3.0); @@ -321,30 +302,19 @@ mod tests { assert_eq!(a.lengths, vec![1, 2]); a[[2, 0]] = 3.5; - println!( - "entries: {:?}, start_indices: {:?}, lengths: {:?}", - a.entries, a.start_indices, a.lengths - ); assert_eq!(a[[0, 0]], 1.0); assert_eq!(a[[3, 0]], 2.0); assert_eq!(a[[3, 1]], 3.0); assert_eq!(a[[2, 0]], 3.5); - // assert_eq!(a[[2, 1]], 0.0); assert_eq!(a.entries, vec![1.0, 3.5, 0.0, 2.0, 3.0]); assert_eq!(a.start_indices, vec![0, 4]); assert_eq!(a.lengths, vec![1, 4]); a[[2, 0]] = 4.0; - println!( - "entries: {:?}, start_indices: {:?}, lengths: {:?}", - a.entries, a.start_indices, a.lengths - ); - assert_eq!(a[[0, 0]], 1.0); assert_eq!(a[[3, 0]], 2.0); assert_eq!(a[[3, 1]], 3.0); assert_eq!(a[[2, 0]], 4.0); - // assert_eq!(a[[2, 1]], 0.0); assert_eq!(a.entries, vec![1.0, 4.0, 0.0, 2.0, 3.0]); assert_eq!(a.start_indices, vec![0, 4]); assert_eq!(a.lengths, vec![1, 4]); @@ -354,16 +324,10 @@ mod tests { assert_eq!(a[[3, 0]], 2.0); assert_eq!(a[[3, 1]], 3.0); assert_eq!(a[[2, 0]], 4.0); - // assert_eq!(a[[2, 1]], 0.0); assert_eq!(a[[1, 0]], 5.0); assert_eq!(a.entries, vec![1.0, 0.0, 5.0, 0.0, 4.0, 0.0, 2.0, 3.0]); assert_eq!(a.start_indices, vec![0]); assert_eq!(a.lengths, vec![8]); - - println!( - "entries: {:?}, start_indices: {:?}, lengths: {:?}", - a.entries, a.start_indices, a.lengths - ); } #[test] @@ -373,15 +337,7 @@ mod tests { a[[2, 4]] = 2; a[[4, 1]] = 3; a[[4, 4]] = 4; - println!( - "entries: {:?}, start_indices: {:?}, lengths: {:?}", - a.entries, a.start_indices, a.lengths - ); a[[5, 0]] = 5; - println!( - "entries: {:?}, start_indices: {:?}, lengths: {:?}", - a.entries, a.start_indices, a.lengths - ); assert_eq!( a.indexed_iter().collect::>(), &[ @@ -699,19 +655,13 @@ mod tests { assert_eq!(array[[3, 4, 3]], 1.0); assert_eq!(array[[3, 4, 4]], 2.0); - // assert_eq!(array[[3, 4, 5]], 0.0); + assert_eq!(array[[3, 4, 5]], 0.0); assert_eq!(array[[3, 4, 6]], 3.0); assert_eq!(array[[3, 5, 1]], 4.0); - // assert_eq!(array[[3, 5, 2]], 0.0); - // assert_eq!(array[[3, 5, 3]], 0.0); - // assert_eq!(array[[3, 5, 4]], 0.0); - // assert_eq!(array[[3, 5, 5]], 0.0); - // assert_eq!(array[[3, 5, 6]], 0.0); assert_eq!(array[[3, 5, 7]], 5.0); assert_eq!(array[[4, 3, 9]], 6.0); - // assert_eq!(array.len(), 6); - // assert_eq!(array.zeros(), 6); + assert_eq!(array.explicit_zeros(), 1); } } From 3fb92969c37933211a92ad77f8d48c018891814d Mon Sep 17 00:00:00 2001 From: janw20 Date: Thu, 18 Apr 2024 17:42:51 +0200 Subject: [PATCH 039/179] Add small changes to the documentation --- pineappl/src/packed_array.rs | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index 6cb1ca2ac..cf4dbedda 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -4,7 +4,7 @@ use std::{mem, vec}; use ndarray::ArrayView3; use serde::{Deserialize, Serialize}; -/// `D`-dimensional array similar to ndarray::ArrayBase, except that `T::default()` is not stored to save space. Instead, adjacent elements are grouped together and only the index of their first element (`start_index`) and the length of the group (`lengths`) is stored. +/// `D`-dimensional array similar to ndarray::ArrayBase, except that `T::default()` is not stored to save space. Instead, adjacent non-default elements are grouped together and the index of their first element (`start_index`) and the length of the group (`lengths`) is stored. #[derive(Clone, Deserialize, Serialize)] pub struct PackedArray { /// The actual values stored in the array. The length of `entries` is always the sum of the elements in `lengths`. @@ -56,7 +56,7 @@ impl PackedArray { / mem::size_of::() } - /// Returns the number of default (zero) elements that are explicitly stored in `entries`. If there is one default elements between adjacent groups, it is more economical to store the one default element explicitly and merge the two groups, than to store the `start_indices` and `lengths` of those groups. + /// Returns the number of default (zero) elements that are explicitly stored in `entries`. If there is one default element between adjacent groups, it is more economical to store the one default element explicitly and merge the two groups, than to store the `start_indices` and `lengths` of both groups. #[must_use] pub fn explicit_zeros(&self) -> usize { self.entries.iter().filter(|x| **x == T::default()).count() @@ -68,7 +68,7 @@ impl PackedArray { self.entries.iter().filter(|x| **x != T::default()).count() } - /// Returns an `Iterator` over the non-default (non-zero) elements of this array. The type of an iterator element is `([usize; D], T)`. + /// Returns an `Iterator` over the non-default (non-zero) elements of this array. The type of an iterator element is `([usize; D], T)` where the first element of the tuple is the index and the second element is the value. pub fn indexed_iter(&self) -> impl Iterator + '_ { self.start_indices .iter() @@ -83,7 +83,6 @@ impl PackedArray { } impl, const D: usize> MulAssign for PackedArray { - /// Perform `self *= rhs` as elementwise multiplication (in place). fn mul_assign(&mut self, rhs: T) { self.entries.iter_mut().for_each(|x| *x *= rhs); @@ -91,7 +90,6 @@ impl, const D: usize> MulAssign for PackedArray } impl PackedArray { - /// Converts `array` into a `PackedArray` of dimension 3. #[must_use] pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { @@ -110,15 +108,15 @@ impl PackedArray { } } -/// Converts a `multi_index` into an flat index. +/// Converts a `multi_index` into a flat index. fn ravel_multi_index(multi_index: &[usize; D], dimensions: &[usize]) -> usize { assert_eq!(multi_index.len(), dimensions.len()); - + multi_index - .iter() - .skip(1) - .zip(dimensions.iter().skip(1)) - .fold(multi_index[0], |acc, (i, d)| acc * d + i) + .iter() + .skip(1) + .zip(dimensions.iter().skip(1)) + .fold(multi_index[0], |acc, (i, d)| acc * d + i) } /// Converts a flat `index` into a multi_index. @@ -248,7 +246,6 @@ impl IndexMut<[usize; D]> } } - #[cfg(test)] mod tests { use ndarray::Array3; @@ -663,5 +660,4 @@ mod tests { assert_eq!(array.explicit_zeros(), 1); } - } From a4d7c6e69102b65a65a2231ae9b44e97c35a6fb8 Mon Sep 17 00:00:00 2001 From: janw20 Date: Thu, 18 Apr 2024 17:59:02 +0200 Subject: [PATCH 040/179] Fix ordering --- pineappl/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index 6c1479b13..7a115d085 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -8,10 +8,10 @@ pub mod evolution; pub mod fk_table; pub mod grid; pub mod import_only_subgrid; -pub mod packed_array; pub mod lagrange_subgrid; pub mod lumi; pub mod ntuple_subgrid; +pub mod packed_array; pub mod pids; pub mod sparse_array3; pub mod subgrid; From 3bbf980ce609ee40c1b5f9f20b9cb60b74490943 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 23 Apr 2024 13:40:04 +0200 Subject: [PATCH 041/179] Adjust documentation a bit --- pineappl/src/packed_array.rs | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index cf4dbedda..e15a67615 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -1,13 +1,18 @@ +//! Provides the [`PackedArray`] struct. + use std::ops::{Index, IndexMut, MulAssign}; use std::{mem, vec}; use ndarray::ArrayView3; use serde::{Deserialize, Serialize}; -/// `D`-dimensional array similar to ndarray::ArrayBase, except that `T::default()` is not stored to save space. Instead, adjacent non-default elements are grouped together and the index of their first element (`start_index`) and the length of the group (`lengths`) is stored. +/// `D`-dimensional array similar to [`ndarray::ArrayBase`], except that `T::default()` is not +/// stored to save space. Instead, adjacent non-default elements are grouped together and the index +/// of their first element (`start_index`) and the length of the group (`lengths`) is stored. #[derive(Clone, Deserialize, Serialize)] pub struct PackedArray { - /// The actual values stored in the array. The length of `entries` is always the sum of the elements in `lengths`. + /// The actual values stored in the array. The length of `entries` is always the sum of the + /// elements in `lengths`. entries: Vec, /// The indices of the first elements in each group. start_indices: Vec, @@ -22,7 +27,6 @@ impl PackedArray { #[must_use] pub fn new(shape: [usize; D]) -> Self { Self { - // entries: BTreeMap::new(), entries: vec![], start_indices: vec![], lengths: vec![], @@ -49,14 +53,18 @@ impl PackedArray { self.lengths.clear(); } - /// Returns the overhead of storing the `start_indices` and the `lengths` of the groups, in units of `f64`. + /// Returns the overhead of storing the `start_indices` and the `lengths` of the groups, in + /// units of `f64`. #[must_use] pub fn overhead(&self) -> usize { ((self.start_indices.len() + self.lengths.len()) * mem::size_of::()) / mem::size_of::() } - /// Returns the number of default (zero) elements that are explicitly stored in `entries`. If there is one default element between adjacent groups, it is more economical to store the one default element explicitly and merge the two groups, than to store the `start_indices` and `lengths` of both groups. + /// Returns the number of default (zero) elements that are explicitly stored in `entries`. If + /// there is one default element between adjacent groups, it is more economical to store the + /// one default element explicitly and merge the two groups, than to store the `start_indices` + /// and `lengths` of both groups. #[must_use] pub fn explicit_zeros(&self) -> usize { self.entries.iter().filter(|x| **x == T::default()).count() @@ -68,7 +76,9 @@ impl PackedArray { self.entries.iter().filter(|x| **x != T::default()).count() } - /// Returns an `Iterator` over the non-default (non-zero) elements of this array. The type of an iterator element is `([usize; D], T)` where the first element of the tuple is the index and the second element is the value. + /// Returns an `Iterator` over the non-default (non-zero) elements of this array. The type of + /// an iterator element is `([usize; D], T)` where the first element of the tuple is the index + /// and the second element is the value. pub fn indexed_iter(&self) -> impl Iterator + '_ { self.start_indices .iter() @@ -83,14 +93,13 @@ impl PackedArray { } impl, const D: usize> MulAssign for PackedArray { - /// Perform `self *= rhs` as elementwise multiplication (in place). fn mul_assign(&mut self, rhs: T) { self.entries.iter_mut().for_each(|x| *x *= rhs); } } impl PackedArray { - /// Converts `array` into a `PackedArray` of dimension 3. + /// Converts `array` into a `PackedArray`. #[must_use] pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { let shape = array.shape(); @@ -119,7 +128,7 @@ fn ravel_multi_index(multi_index: &[usize; D], dimensions: &[usi .fold(multi_index[0], |acc, (i, d)| acc * d + i) } -/// Converts a flat `index` into a multi_index. +/// Converts a flat `index` into a `multi_index`. fn unravel_index(index: usize, dimensions: &[usize]) -> [usize; D] { assert!(index < dimensions.iter().product()); let mut indices = [0; D]; From dbef998c0fc4503801a01571c5079b2acf5b9457 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 23 Apr 2024 13:41:24 +0200 Subject: [PATCH 042/179] Group `use` statements together --- pineappl/src/packed_array.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index e15a67615..d1d4ed680 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -1,10 +1,9 @@ //! Provides the [`PackedArray`] struct. -use std::ops::{Index, IndexMut, MulAssign}; -use std::{mem, vec}; - use ndarray::ArrayView3; use serde::{Deserialize, Serialize}; +use std::mem; +use std::ops::{Index, IndexMut, MulAssign}; /// `D`-dimensional array similar to [`ndarray::ArrayBase`], except that `T::default()` is not /// stored to save space. Instead, adjacent non-default elements are grouped together and the index From 5a858278a9f6147930b19160cc16caab6a569722 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 23 Apr 2024 14:52:34 +0200 Subject: [PATCH 043/179] Use `zip` early In this way all necessary modifications are made uniformly (with late `zip` it's possible to forget the first/second `skip`/`rev` --- pineappl/src/packed_array.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index d1d4ed680..437b68c20 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -122,8 +122,8 @@ fn ravel_multi_index(multi_index: &[usize; D], dimensions: &[usi multi_index .iter() + .zip(dimensions) .skip(1) - .zip(dimensions.iter().skip(1)) .fold(multi_index[0], |acc, (i, d)| acc * d + i) } @@ -133,8 +133,8 @@ fn unravel_index(index: usize, dimensions: &[usize]) -> [usize; let mut indices = [0; D]; indices .iter_mut() + .zip(dimensions) .rev() - .zip(dimensions.iter().rev()) .fold(index, |acc, (i, d)| { *i = acc % d; acc / d From a4c5357512d09f75817789421c5b06b4561ec3d1 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 24 Apr 2024 10:24:37 +0200 Subject: [PATCH 044/179] Add some cosmetic changes --- pineappl/src/packed_array.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index 437b68c20..a83eb3e1c 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -117,23 +117,23 @@ impl PackedArray { } /// Converts a `multi_index` into a flat index. -fn ravel_multi_index(multi_index: &[usize; D], dimensions: &[usize]) -> usize { - assert_eq!(multi_index.len(), dimensions.len()); +fn ravel_multi_index(multi_index: &[usize; D], shape: &[usize]) -> usize { + assert_eq!(multi_index.len(), shape.len()); multi_index .iter() - .zip(dimensions) + .zip(shape) .skip(1) .fold(multi_index[0], |acc, (i, d)| acc * d + i) } /// Converts a flat `index` into a `multi_index`. -fn unravel_index(index: usize, dimensions: &[usize]) -> [usize; D] { - assert!(index < dimensions.iter().product()); +fn unravel_index(index: usize, shape: &[usize]) -> [usize; D] { + assert!(index < shape.iter().product()); let mut indices = [0; D]; indices .iter_mut() - .zip(dimensions) + .zip(shape) .rev() .fold(index, |acc, (i, d)| { *i = acc % d; @@ -210,13 +210,12 @@ impl IndexMut<[usize; D]> let distance = raveled_index - (start_index + length) + 1; self.lengths[point - 1] += distance; - for _ in 0..(distance) { + for _ in 0..distance { self.entries.insert(point_entries, Default::default()); } if let Some(start_index_next) = self.start_indices.get(point) { if raveled_index + threshold_distance >= *start_index_next { - // println!("test 4"); let distance_next = start_index_next - raveled_index; self.lengths[point - 1] += distance_next - 1 + self.lengths[point]; @@ -256,11 +255,10 @@ impl IndexMut<[usize; D]> #[cfg(test)] mod tests { + use super::*; use ndarray::Array3; use std::mem; - use super::*; - #[test] fn unravel_index() { assert_eq!(super::unravel_index(0, &[3, 2]), [0, 0]); From 64197c2d2623b697333773650798f8552e8406ce Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 25 Apr 2024 08:45:32 +0200 Subject: [PATCH 045/179] Simplify `ravel_multi_index` and `unravel_index` --- pineappl/src/packed_array.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index a83eb3e1c..7458475f6 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -123,22 +123,17 @@ fn ravel_multi_index(multi_index: &[usize; D], shape: &[usize]) multi_index .iter() .zip(shape) - .skip(1) - .fold(multi_index[0], |acc, (i, d)| acc * d + i) + .fold(0, |acc, (i, d)| acc * d + i) } /// Converts a flat `index` into a `multi_index`. -fn unravel_index(index: usize, shape: &[usize]) -> [usize; D] { +fn unravel_index(mut index: usize, shape: &[usize]) -> [usize; D] { assert!(index < shape.iter().product()); let mut indices = [0; D]; - indices - .iter_mut() - .zip(shape) - .rev() - .fold(index, |acc, (i, d)| { - *i = acc % d; - acc / d - }); + for (i, d) in indices.iter_mut().zip(shape).rev() { + *i = index % d; + index /= d; + } indices } From 21fe08377c18e0bec9b5eef0503e2d0215d1a324 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 25 Apr 2024 08:46:17 +0200 Subject: [PATCH 046/179] Use `assert_eq` instead of `debug_assert_eq` --- pineappl/src/packed_array.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index 7458475f6..bbb3ec3a9 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -141,7 +141,7 @@ impl Index<[usize; D]> for Packed type Output = T; fn index(&self, index: [usize; D]) -> &Self::Output { - debug_assert_eq!(index.len(), self.shape.len()); + assert_eq!(index.len(), self.shape.len()); assert!( index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), "index {:?} is out of bounds for array of shape {:?}", @@ -176,7 +176,7 @@ impl IndexMut<[usize; D]> for PackedArray { fn index_mut(&mut self, index: [usize; D]) -> &mut Self::Output { - debug_assert_eq!(index.len(), self.shape.len()); + assert_eq!(index.len(), self.shape.len()); assert!( index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), "index {:?} is out of bounds for array of shape {:?}", From 1455e6b6d5f754b0d3dc261112d3f82140a62cf3 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 25 Apr 2024 08:53:01 +0200 Subject: [PATCH 047/179] Replace loop over `insert` with `splice` This should be more efficient since Rust can calculate the final size and thus allocate only once --- pineappl/src/packed_array.rs | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index bbb3ec3a9..80109c4ab 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -2,6 +2,7 @@ use ndarray::ArrayView3; use serde::{Deserialize, Serialize}; +use std::iter; use std::mem; use std::ops::{Index, IndexMut, MulAssign}; @@ -204,10 +205,10 @@ impl IndexMut<[usize; D]> } else if raveled_index < start_index + length + threshold_distance { let distance = raveled_index - (start_index + length) + 1; self.lengths[point - 1] += distance; - - for _ in 0..distance { - self.entries.insert(point_entries, Default::default()); - } + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance), + ); if let Some(start_index_next) = self.start_indices.get(point) { if raveled_index + threshold_distance >= *start_index_next { @@ -216,9 +217,10 @@ impl IndexMut<[usize; D]> self.lengths[point - 1] += distance_next - 1 + self.lengths[point]; self.lengths.remove(point); self.start_indices.remove(point); - for _ in 0..(distance_next - 1) { - self.entries.insert(point_entries, Default::default()); - } + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance_next - 1), + ); } } @@ -232,9 +234,10 @@ impl IndexMut<[usize; D]> self.start_indices[point] = raveled_index; self.lengths[point] += distance; - for _ in 0..distance { - self.entries.insert(point_entries, Default::default()); - } + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance), + ); return &mut self.entries[point_entries]; } } From 65a7da65289050c371c641f908066082c7416e95 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 25 Apr 2024 16:54:24 +0200 Subject: [PATCH 048/179] Fix integration tests not properly swapping `x1` and `x2` --- pineappl/tests/drell_yan_lo.rs | 216 ++++++++++++++++++--------------- 1 file changed, 118 insertions(+), 98 deletions(-) diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index 79d6816f3..7e61c8da8 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -239,7 +239,17 @@ fn fill_drell_yan_lo_grid( let pto = 0; let channel = 2; - grid.fill(pto, yll.abs(), channel, &Ntuple { x2, x1, q2, weight }); + grid.fill( + pto, + yll.abs(), + channel, + &Ntuple { + x1: x2, + x2: x1, + q2, + weight, + }, + ); // LO down-antidown-type channel let weight = jacobian * int_quark(s, t, u, -1.0 / 3.0, -0.5); @@ -253,7 +263,17 @@ fn fill_drell_yan_lo_grid( let pto = 0; let channel = 4; - grid.fill(pto, yll.abs(), channel, &Ntuple { x2, x1, q2, weight }); + grid.fill( + pto, + yll.abs(), + channel, + &Ntuple { + x1: x2, + x2: x1, + q2, + weight, + }, + ); } Ok(grid) @@ -457,113 +477,113 @@ fn generate_grid(subgrid_type: &str, dynamic: bool, reweight: bool) -> Result Date: Thu, 25 Apr 2024 17:04:35 +0200 Subject: [PATCH 049/179] Update year in copyright line --- pineappl_capi/cbindgen.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl_capi/cbindgen.toml b/pineappl_capi/cbindgen.toml index 21b9ea071..990540251 100644 --- a/pineappl_capi/cbindgen.toml +++ b/pineappl_capi/cbindgen.toml @@ -4,7 +4,7 @@ language = "C" header = """/* * PineAPPL - PDF-independent binning of phase space weights - * Copyright (C) 2020-2021 Christopher Schwan + * Copyright (C) 2020-2024 Christopher Schwan * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by From 618b097900710b642d5df7233cb6b06487daa3af Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 26 Apr 2024 10:34:58 +0200 Subject: [PATCH 050/179] Add Python 3.6 installation notes --- docs/installation.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/installation.md b/docs/installation.md index bd340e435..9c7d423a2 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -202,6 +202,22 @@ To install the Python interface, run This will not require any previous installation of Rust. For more documentation and more information see its [README](../pineappl_py/README.md). +### Python 3.6 + +Python 3.6 is at the end of its life since 2021 December, but we still support +its installation, which however is a bit trickier. First upgrade your +installation of `pip`: + + pip install --upgrade pip + +For this to work you may have to add the switch `--user` after `install`. Next, +make sure you call the newly upgraded `pip` as follows: + + pip install --ignore-requires-python pineappl + +The switch `--ignore-requires-python` is needed because for an unknown reason +the `requires-python` version is incorrectly determined by `pip`. + ## Rust You will need the Rust compiler and its build system `cargo`. If `cargo` is From 957c4b9cbd18867061c278eb2f4790c9236687ed Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 26 Apr 2024 13:48:43 +0200 Subject: [PATCH 051/179] Fix building the CLI on macOS (#269) * Install LHAPDF on macOS * Disable Python * Correctly disable Python * Fix `--disable-dynamic` with `--disable-shared` * Do not run workflows in this branch * Use brew to install LHAPDF on macOS * Run Release workflow automatically in this branch * Create missing directory * Fix problems with workflow syntax * Simplify matrix strategy a bit * Try to fix packaging the CLI * Add comment why we can't cross compile * Simplify CLI installation on Linux * Add missing `--locked` switch to installation * Install and use APPLgrid on macOS * Download sources and export `APPL_IGRID_DIR` on macOS * Add some diagnostics for homebrew * Keep temporary directory when compiling with brew * Fix `APPL_IGRID_DIR` determination * Print more diagnostics * Add missing include paths for `pineappl_applgrid` * Add more tested versions of APPLgrid * Set C++ standard in `pineappl_applgrid` * Reduce APPLgrid dependencies and install zlib * Fix C++ warning * Force installation of zlib and list contents * Print zlib search path * Install `pkg-config` to find zlib from brew * Fix `pkg-config` call * Add more diagnostic to pkg-config call * Print more diagnostic strings * List contents of the ROOT installation * Compile APPLgrid without ROOT * Add missing `--without-root` * Manually install zlib * Print `applgrid-config` script to diagnose building failure * Force-remove linking to `gfortran` * Rename existing `tmp` with `new` * Remove diagnostics and simplify `sed` call * Revert last commit except the removal of diagnostics * Compile CLI on macOS against fastNLO * Update `cc` create to fix yet another problem on macOS * Update `cxx` and related crates to hopefull fix missing compiler flag * Explicitly set C++11 standard * Add LHAPDF include paths to fastNLO * Install manpages on both Linux and macOS * Debug version string when installing manpages * Try to fix version string patching on GitHub * Add zlib when linking statically * Revert "refactor: Use Rust version of convolute_with_one" This reverts commit 0d9876fb5bc857beddf4eb4eec323e4cf037d509. * Remove `signature` macro that breaks Python 3.6 support This commit and the previous should be reverted when we drop Python 3.6 support * Try to fix problems on macOS --- .github/workflows/capi.yaml | 2 ++ .github/workflows/msrv.yml | 2 ++ .github/workflows/python.yml | 2 ++ .github/workflows/release.yml | 51 ++++++++++++++++++++++++-------- .github/workflows/rust.yml | 2 ++ Cargo.lock | 12 ++++---- pineappl_py/pineappl/fk_table.py | 38 ++++++++++++++++++++++++ pineappl_py/src/fk_table.rs | 1 - 8 files changed, 91 insertions(+), 19 deletions(-) diff --git a/.github/workflows/capi.yaml b/.github/workflows/capi.yaml index 4d15a0ad7..64e1a545b 100644 --- a/.github/workflows/capi.yaml +++ b/.github/workflows/capi.yaml @@ -2,6 +2,8 @@ name: CAPI on: push: + branches-ignore: + - fix-macos-cli-generation jobs: capi: diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 25ef4b9a0..5d77ba80a 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -2,6 +2,8 @@ name: MSRV on: push: + branches-ignore: + - fix-macos-cli-generation env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index c4777316a..ca63e3129 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -2,6 +2,8 @@ name: Python on: push: + branches-ignore: + - fix-macos-cli-generation jobs: test: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c816d503f..97d899a09 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,6 +4,8 @@ on: push: tags: - 'v[0-9]+*' + branches: + - fix-macos-cli-generation workflow_dispatch: env: @@ -31,9 +33,9 @@ jobs: - name: Compile library run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --target=${{ matrix.target }} - cargo install cargo-c + cargo install --locked cargo-c cd pineappl_capi - cargo cinstall --verbose --destdir=prefix --library-type=cdylib --prefix=/ --target=${{ matrix.target }} + cargo cinstall --destdir=prefix --library-type=cdylib --locked --prefix=/ --target=${{ matrix.target }} --verbose cd prefix tar czf ../../pineappl_capi-${{ matrix.target }}.tar.gz . - name: Upload artifact @@ -46,20 +48,44 @@ jobs: cli-macos: strategy: matrix: - os: [macos-latest, macos-14] include: - - os: macos-latest + - os: macos-13 target: x86_64-apple-darwin - os: macos-14 + # we cannot cross-compile, because we need also all dependencies for the specified target target: aarch64-apple-darwin runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 - name: Compile binary run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --target=${{ matrix.target }} - cargo build --release --features=evolve,fktable,static --bin pineappl --target=${{ matrix.target }} - cp target/release/pineappl prefix/bin/ + brew tap davidchall/hep + # install LHAPDF + brew install lhapdf + # install APPLgrid's dependencies; disable ROOT because its static libraries are missing + brew install --only-dependencies --without-hoppet --without-lhapdf --without-root applgrid + # install zlib, which is a dependency of APPLgrid but somehow missing + brew install zlib + # APPLgrid wants to be linked against zlib, and we need to find its static library via pkg-config + export PKG_CONFIG_PATH=$(find $(brew --cellar) -name '*.pc' -exec dirname {} + | sort -u | tr '\n' ':') + # manually compile APPLgrid, because we need the file `appl_igrid.h` and the files it includes, which are possibly generated + export HOMEBREW_TEMP="$(pwd)"/tmp + mkdir -p ${HOMEBREW_TEMP} + brew install --build-from-source --keep-tmp --without-hoppet --without-lhapdf --without-root applgrid + export APPL_IGRID_DIR=$(find ${HOMEBREW_TEMP} -name appl_igrid.h -exec dirname {} +) + # do not link against `gfortran` + sed 's/-lgfortran//g' $(command -v applgrid-config) > applgrid-config.new + mv applgrid-config.new $(command -v applgrid-config) + chmod +x $(command -v applgrid-config) + # install fastNLO + brew install --without-fastjet fastnlo + # install Rust + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-host=${{ matrix.target }} + # build the CLI + cargo install --all-features --locked --path pineappl_cli --root=prefix --target=${{ matrix.target }} + # build manpages + mkdir -p prefix/share/man/man1 + cargo xtask install-manpages prefix/share/man/man1 cd prefix tar czf ../pineappl_cli-${{ matrix.target }}.tar.gz . - name: Upload artifact @@ -81,7 +107,7 @@ jobs: - name: Compile library run: | cd pineappl_capi - cargo cinstall --verbose --destdir=prefix --library-type=cdylib --prefix=/ --target=${{ matrix.target }} + cargo cinstall --destdir=prefix --library-type=cdylib --locked --prefix=/ --target=${{ matrix.target }} --verbose cd prefix tar czf ../../pineappl_capi-${{ matrix.target }}.tar.gz . # print the glibc version requirement @@ -104,9 +130,10 @@ jobs: - uses: actions/checkout@v3 - name: Compile binary run: | - cargo build --release --all-features --bin pineappl - mkdir -p prefix/bin - cp target/release/pineappl prefix/bin/ + cargo install --all-features --locked --path pineappl_cli --root=prefix --target=${{ matrix.target }} + # build manpages + mkdir -p prefix/share/man/man1 + cargo xtask install-manpages prefix/share/man/man1 cd prefix tar czf ../pineappl_cli-${{ matrix.target }}.tar.gz . - name: Upload artifact @@ -235,7 +262,7 @@ jobs: path: dist wheels-macos: - runs-on: macos-latest + runs-on: macos-13 strategy: matrix: target: [aarch64, x86_64] diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a2751695b..4e47550de 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -2,6 +2,8 @@ name: Rust on: push: + branches-ignore: + - fix-macos-cli-generation defaults: run: diff --git a/Cargo.lock b/Cargo.lock index 2a38a6d0d..deb147705 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -347,9 +347,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.115" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de00f15a6fa069c99b88c5c78c4541d0e7899a33b86f7480e23df2431fce0bc" +checksum = "ff4dc7287237dd438b926a81a1a5605dad33d286870e5eee2db17bf2bcd9e92a" dependencies = [ "cc", "cxxbridge-flags", @@ -374,15 +374,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.115" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f3fed61d56ba497c4efef9144dfdbaa25aa58f2f6b3a7cf441d4591c583745c" +checksum = "701a1ac7a697e249cdd8dc026d7a7dafbfd0dbcd8bd24ec55889f2bc13dd6287" [[package]] name = "cxxbridge-macro" -version = "1.0.115" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8908e380a8efd42150c017b0cfa31509fc49b6d47f7cb6b33e93ffb8f4e3661e" +checksum = "b404f596046b0bb2d903a9c786b875a126261b52b7c3a64bbb66382c41c771df" dependencies = [ "proc-macro2", "quote", diff --git a/pineappl_py/pineappl/fk_table.py b/pineappl_py/pineappl/fk_table.py index 813f7a7b6..91a7080c4 100644 --- a/pineappl_py/pineappl/fk_table.py +++ b/pineappl_py/pineappl/fk_table.py @@ -1,3 +1,5 @@ +import numpy as np + from .pineappl import PyFkTable, PyFkAssumptions from .utils import PyWrapper @@ -50,6 +52,42 @@ def optimize(self, assumptions = "Nf6Ind"): assumptions = FkAssumptions(assumptions) return self._raw.optimize(assumptions._raw) + def convolute_with_one( + self, + pdg_id, + xfx, + bin_indices=np.array([], dtype=np.uint64), + lumi_mask=np.array([], dtype=bool), + ): + r"""Convolute FkTable with a pdf. + + Parameters + ---------- + pdg_id : int + PDG Monte Carlo ID of the hadronic particle `xfx` is the PDF for + xfx : callable + lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid + bin_indices : sequence(int) + A list with the indices of the corresponding bins that should be calculated. An + empty list means that all orders should be calculated. + lumi_mask : sequence(bool) + Mask for selecting specific luminosity channels. The value `True` means the + corresponding channel is included. An empty list corresponds to all channels being + enabled. + + Returns + ------- + list(float) : + cross sections for all bins, for each scale-variation tuple (first all bins, then + the scale variation) + """ + return self.raw.convolute_with_one( + pdg_id, + xfx, + np.array(bin_indices), + np.array(lumi_mask), + ) + class FkAssumptions(PyWrapper): """ diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index f561b5353..b1565afe4 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -223,7 +223,6 @@ impl PyFkTable { /// ------- /// numpy.ndarray(float) : /// cross sections for all bins - #[pyo3(signature = (pdg_id, xfx, bin_indices = None, lumi_mask= None))] pub fn convolute_with_one<'py>( &self, pdg_id: i32, From 6a711bbd347a0f1d090ddac043ce0358cc5fa45d Mon Sep 17 00:00:00 2001 From: janw20 Date: Fri, 26 Apr 2024 14:12:22 +0200 Subject: [PATCH 052/179] Improve `index_mut` documentation --- pineappl/src/packed_array.rs | 53 ++++++++++++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 6 deletions(-) diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index 80109c4ab..4f29764a7 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -14,9 +14,10 @@ pub struct PackedArray { /// The actual values stored in the array. The length of `entries` is always the sum of the /// elements in `lengths`. entries: Vec, - /// The indices of the first elements in each group. + /// The indices of the first elements in each group. `start_indices[i]` corresponds to the + /// group with index `i`. start_indices: Vec, - /// The length of each group. + /// The length of each group. `lengths[i]` corresponds to the group with index `i`. lengths: Vec, /// The shape (dimensions) of the array. shape: Vec, @@ -178,6 +179,9 @@ impl IndexMut<[usize; D]> { fn index_mut(&mut self, index: [usize; D]) -> &mut Self::Output { assert_eq!(index.len(), self.shape.len()); + + // Panic if the index value for any dimension is greater or equal than the length of this + // dimension. assert!( index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), "index {:?} is out of bounds for array of shape {:?}", @@ -185,38 +189,73 @@ impl IndexMut<[usize; D]> self.shape ); + // The insertion cases are: + // 1. this array already stores an element at `index`: + // -> we just have to update this element + // 2. this array does not store an element at `index`: + // a. the distance of the (raveled) `index` is `threshold_distance` away from the next + // or previous element that is already stored: + // -> we can merge the new element into already stored groups, potentially padding + // with `T::default()` elements + // b. the distance of the (raveled) `index` from the existing elements is greater than + // `threshold_distance`: + // -> we insert the element as a new group + let raveled_index = ravel_multi_index(&index, &self.shape); - // the closest start_index that is greater than raveled_index + // To determine which groups the new element is close to, `point` is the index of the + // start_index of the first group after the new element. `point` is 0 if no elements before + // the new element are stored, and point is `self.start_indices.len()` if no elements after + // the new element are stored. let point = self.start_indices.partition_point(|&i| i <= raveled_index); - // the index that is stored in `point`, translated to the indices of the entries + // `point_entries` is the index of the first element of the next group, given in + // `self.entries`, i.e. the element at index `self.start_indices[point]`. let point_entries = self.lengths.iter().take(point).sum::(); - // maximum distance for merging regions + // Maximum distance for merging groups. If the new element is within `threshold_distance` + // of an existing group (i.e. there are `threshold_distance - 1` implicit elements + // between them), we merge the new element into the existing group. We choose 2 as the + // `threshold_distance` based on memory: in the case of `T` = `f64`, it is more economical + // to store one zero explicitly than to store the start_index and length of a new group. let threshold_distance = 2; + // If `point > 0`, there is at least one group preceding the new element. Thus, in the + // following we determine if we can insert the new element into this group. if point > 0 { + // start_index and length of the group before the new element, i.e. the group + // (potentially) getting the new element let start_index = self.start_indices[point - 1]; let length = self.lengths[point - 1]; + // Case 1: an element is already stored at this `index` if raveled_index < start_index + length { return &mut self.entries[point_entries - length + raveled_index - start_index]; + // Case 2a: the new element can be merged into the preceding group } else if raveled_index < start_index + length + threshold_distance { let distance = raveled_index - (start_index + length) + 1; + // Merging happens by increasing the length of the group self.lengths[point - 1] += distance; + // and inserting the necessary number of default elements. self.entries.splice( point_entries..point_entries, iter::repeat(Default::default()).take(distance), ); + // If the new element is within `threshold_distance` of the *next* group, we merge + // the next group into this group. if let Some(start_index_next) = self.start_indices.get(point) { if raveled_index + threshold_distance >= *start_index_next { let distance_next = start_index_next - raveled_index; + // Increase the length of this group self.lengths[point - 1] += distance_next - 1 + self.lengths[point]; + // and remove the next group. we don't have to manipulate `self.entries`, + // since the grouping of the elements is handled only by + // `self.start_indices` and `self.lengths` self.lengths.remove(point); self.start_indices.remove(point); + // Insert the default elements between the groups. self.entries.splice( point_entries..point_entries, iter::repeat(Default::default()).take(distance_next - 1), @@ -228,6 +267,8 @@ impl IndexMut<[usize; D]> } } + // Case 2a: the new element can be merged into the next group. No `self.lengths.remove` and + // `self.start_indices.remove` here, since we are not merging two groups. if let Some(start_index_next) = self.start_indices.get(point) { if raveled_index + threshold_distance >= *start_index_next { let distance = start_index_next - raveled_index; @@ -242,7 +283,7 @@ impl IndexMut<[usize; D]> } } - // in the most general case we have to insert a new start_index + // Case 2b: we insert a new group of length 1 self.start_indices.insert(point, raveled_index); self.lengths.insert(point, 1); self.entries.insert(point_entries, Default::default()); From ebf172a361593015297e9840efe87673b6de3054 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 26 Apr 2024 14:34:44 +0200 Subject: [PATCH 053/179] Optimize grids with similar channels --- pineappl/src/grid.rs | 8 ++++++-- pineappl/src/lumi.rs | 45 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index d30dbaf55..fa677ef1a 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1241,8 +1241,11 @@ impl Grid { // merge luminosities that are the same while let Some(index) = indices.pop() { - if let Some(&other_index) = indices.iter().find(|i| self.lumi[**i] == self.lumi[index]) - { + if let Some((other_index, factor)) = indices.iter().find_map(|&i| { + self.lumi[i] + .common_factor(&self.lumi[index]) + .map(|factor| (i, factor)) + }) { let (mut a, mut b) = self .subgrids .multi_slice_mut((s![.., .., other_index], s![.., .., index])); @@ -1250,6 +1253,7 @@ impl Grid { // check if in all cases the limits are compatible with merging for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) { if !rhs.is_empty() { + rhs.scale(factor); if lhs.is_empty() { // we can't merge into an EmptySubgridV1 *lhs = rhs.clone_empty(); diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 2bed5e8fb..5c21a99c2 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -3,6 +3,7 @@ use super::grid::Grid; use super::pids; use super::subgrid::{Mu2, Subgrid}; +use float_cmp::approx_eq; use itertools::Itertools; use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; @@ -130,6 +131,50 @@ impl LumiEntry { pub fn transpose(&self) -> Self { Self::new(self.entry.iter().map(|(a, b, c)| (*b, *a, *c)).collect()) } + + /// If `other` is the same channel when only comparing PIDs and neglecting the factors, return + /// the number `f1 / f2`, where `f1` is the factor from `self` and `f2` is the factor from + /// `other`. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::lumi::LumiEntry; + /// + /// let entry1 = LumiEntry::new(vec![(2, 2, 2.0), (4, 4, 2.0)]); + /// let entry2 = LumiEntry::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); + /// let entry3 = LumiEntry::new(vec![(3, 4, 1.0), (2, 2, 1.0)]); + /// let entry4 = LumiEntry::new(vec![(4, 3, 1.0), (2, 3, 2.0)]); + /// + /// assert_eq!(entry1.common_factor(&entry2), Some(2.0)); + /// assert_eq!(entry1.common_factor(&entry3), None); + /// assert_eq!(entry1.common_factor(&entry4), None); + /// ``` + pub fn common_factor(&self, other: &Self) -> Option { + if self.entry.len() != other.entry.len() { + return None; + } + + let result: Option> = self + .entry + .iter() + .zip(&other.entry) + .map(|(a, b)| ((a.0 == b.0) && (a.1 == b.1)).then_some(a.2 / b.2)) + .collect(); + + if let Some(factors) = result { + if factors + .windows(2) + .all(|win| approx_eq!(f64, win[0], win[1], ulps = 4)) + { + factors.first().copied() + } else { + None + } + } else { + None + } + } } /// Error type keeping information if [`LumiEntry::from_str`] went wrong. From fd4d0caf5f8c8890ac805e77e1914f2991390181 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 26 Apr 2024 15:43:39 +0200 Subject: [PATCH 054/179] Fix bug from commit ebf172a --- pineappl/src/grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index fa677ef1a..88a64e2c8 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1253,7 +1253,7 @@ impl Grid { // check if in all cases the limits are compatible with merging for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) { if !rhs.is_empty() { - rhs.scale(factor); + rhs.scale(1.0 / factor); if lhs.is_empty() { // we can't merge into an EmptySubgridV1 *lhs = rhs.clone_empty(); From 8724a941b62ffa11843536bf9a816fbd9fa173fc Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 26 Apr 2024 17:08:59 +0200 Subject: [PATCH 055/179] Fix potential bug in `ndarray` crate --- pineappl/src/grid.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 88a64e2c8..a8bd7dd21 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -2211,7 +2211,11 @@ impl Grid { for range in channel_ranges.into_iter() { self.lumi.drain(range.clone()); - self.subgrids.slice_axis_inplace(Axis(2), range.into()); + // TODO: the following line should be equivalent to the loop but it isn't + //self.subgrids.slice_axis_inplace(Axis(2), range.into()); + for index in range.rev() { + self.subgrids.remove_index(Axis(2), index); + } } } From 20341d2117bb6236c9ccb13be1a85d9685c091b1 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 26 Apr 2024 17:27:51 +0200 Subject: [PATCH 056/179] Streamline `Grid::delete_channels` method There's no bug in `Array3::slice_axis_inplace`, this method was wrongly used --- pineappl/src/grid.rs | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index a8bd7dd21..2168ce5dd 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -2193,29 +2193,12 @@ impl Grid { // sort and remove repeated indices channel_indices.sort_unstable(); channel_indices.dedup(); + channel_indices.reverse(); let channel_indices = channel_indices; - let mut channel_ranges: Vec> = Vec::new(); - - // convert indices into consecutive ranges - for &channel_index in &channel_indices { - match channel_ranges.last_mut() { - Some(range) if range.end == channel_index => range.end += 1, - _ => channel_ranges.push(channel_index..(channel_index + 1)), - } - } - - // reverse order so we don't invalidate indices - channel_ranges.reverse(); - let channel_ranges = channel_ranges; - - for range in channel_ranges.into_iter() { - self.lumi.drain(range.clone()); - // TODO: the following line should be equivalent to the loop but it isn't - //self.subgrids.slice_axis_inplace(Axis(2), range.into()); - for index in range.rev() { - self.subgrids.remove_index(Axis(2), index); - } + for index in channel_indices { + self.lumi.remove(index); + self.subgrids.remove_index(Axis(2), index); } } From 2ec6091b78b3692ddba1253230442e1c96895077 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Fri, 26 Apr 2024 21:41:53 +0200 Subject: [PATCH 057/179] fix: Add suitable FkTable constructor --- pineappl_py/pineappl/fk_table.py | 20 +++++++++++--------- pineappl_py/tests/test_fk_table.py | 4 ++-- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/pineappl_py/pineappl/fk_table.py b/pineappl_py/pineappl/fk_table.py index 91a7080c4..04efc8a14 100644 --- a/pineappl_py/pineappl/fk_table.py +++ b/pineappl_py/pineappl/fk_table.py @@ -5,8 +5,8 @@ class FkTable(PyWrapper): - """ - Python wrapper object to interface :class:`~pineappl.pineappl.PyFkTable`. + """Python wrapper object to interface + :class:`~pineappl.pineappl.PyFkTable`. Parameters ---------- @@ -17,10 +17,13 @@ class FkTable(PyWrapper): def __init__(self, pyfktable): self._raw = pyfktable + @classmethod + def from_grid(cls, grid): + return cls(PyFkTable(grid.raw)) + @classmethod def read(cls, path): - """ - Load an existing grid from file. + """Load an existing grid from file. Convenience wrapper for :meth:`pineappl.pineappl.PyFkTable.read()`. @@ -36,19 +39,19 @@ def read(cls, path): """ return cls(PyFkTable.read(path)) - def optimize(self, assumptions = "Nf6Ind"): + def optimize(self, assumptions="Nf6Ind"): """Optimize FK table storage. In order to perform any relevant optimization, assumptions are needed, and they are passed as parameters to the function. - + Parameters ---------- assumptions : FkAssumptions or str assumptions about the FkTable properties, declared by the user, deciding which optimizations are possible """ - if not isinstance(assumptions,FkAssumptions): + if not isinstance(assumptions, FkAssumptions): assumptions = FkAssumptions(assumptions) return self._raw.optimize(assumptions._raw) @@ -90,8 +93,7 @@ def convolute_with_one( class FkAssumptions(PyWrapper): - """ - Python wrapper object to interface + """Python wrapper object to interface :class:`~pineappl.pineappl.PyFkAssumptions`. Parameters diff --git a/pineappl_py/tests/test_fk_table.py b/pineappl_py/tests/test_fk_table.py index 88fdddb3f..0efbc69bf 100644 --- a/pineappl_py/tests/test_fk_table.py +++ b/pineappl_py/tests/test_fk_table.py @@ -6,7 +6,7 @@ class TestFkTable: def fake_grid(self, bins=None): lumis = [pineappl.lumi.LumiEntry([(1, 21, 0.1)])] - orders = [pineappl.grid.Order(3, 0, 0, 0)] + orders = [pineappl.grid.Order(0, 0, 0, 0)] bin_limits = np.array([1e-7, 1e-3, 1] if bins is None else bins, dtype=float) subgrid_params = pineappl.subgrid.SubgridParams() g = pineappl.grid.Grid.create(lumis, orders, bin_limits, subgrid_params) @@ -25,7 +25,7 @@ def test_convolute_with_one(self): np.array([1.0]), ) g.set_subgrid(0, 0, 0, subgrid) - fk = pineappl.fk_table.FkTable(g) + fk = pineappl.fk_table.FkTable.from_grid(g) np.testing.assert_allclose( fk.convolute_with_one(2212, lambda pid, x, q2: 0.0, lambda q2: 0.0), [0.0] * 2, From 339d90837d6efc0cb54719fd8c7e50b53076b7a5 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Fri, 26 Apr 2024 21:56:12 +0200 Subject: [PATCH 058/179] test: Compelte fk table test adaption from the grid one --- pineappl_py/tests/test_fk_table.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/pineappl_py/tests/test_fk_table.py b/pineappl_py/tests/test_fk_table.py index 0efbc69bf..2c09d02e0 100644 --- a/pineappl_py/tests/test_fk_table.py +++ b/pineappl_py/tests/test_fk_table.py @@ -5,11 +5,12 @@ class TestFkTable: def fake_grid(self, bins=None): - lumis = [pineappl.lumi.LumiEntry([(1, 21, 0.1)])] + lumis = [pineappl.lumi.LumiEntry([(1, 21, 1.0)])] orders = [pineappl.grid.Order(0, 0, 0, 0)] bin_limits = np.array([1e-7, 1e-3, 1] if bins is None else bins, dtype=float) subgrid_params = pineappl.subgrid.SubgridParams() g = pineappl.grid.Grid.create(lumis, orders, bin_limits, subgrid_params) + g.set_key_value("lumi_id_types", "pdg_mc_ids") return g def test_convolute_with_one(self): @@ -27,14 +28,10 @@ def test_convolute_with_one(self): g.set_subgrid(0, 0, 0, subgrid) fk = pineappl.fk_table.FkTable.from_grid(g) np.testing.assert_allclose( - fk.convolute_with_one(2212, lambda pid, x, q2: 0.0, lambda q2: 0.0), + fk.convolute_with_one(2212, lambda pid, x, q2: 0.0), [0.0] * 2, ) np.testing.assert_allclose( - fk.convolute_with_one(2212, lambda pid, x, q2: 1, lambda q2: 1.0), - [5e6 / 9999, 0.0], - ) - np.testing.assert_allclose( - fk.convolute_with_one(2212, lambda pid, x, q2: 1, lambda q2: 2.0), - [2**3 * 5e6 / 9999, 0.0], + fk.convolute_with_one(2212, lambda pid, x, q2: 1), + [5e7 / 9999, 0.0], ) From 1954e5c1fb076cd12dac2f04081335c12f22275d Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Fri, 26 Apr 2024 22:12:40 +0200 Subject: [PATCH 059/179] ci: Remove branch exception --- .github/workflows/capi.yaml | 2 -- .github/workflows/msrv.yml | 2 -- .github/workflows/python.yml | 2 -- .github/workflows/release.yml | 2 -- .github/workflows/rust.yml | 2 -- 5 files changed, 10 deletions(-) diff --git a/.github/workflows/capi.yaml b/.github/workflows/capi.yaml index 64e1a545b..4d15a0ad7 100644 --- a/.github/workflows/capi.yaml +++ b/.github/workflows/capi.yaml @@ -2,8 +2,6 @@ name: CAPI on: push: - branches-ignore: - - fix-macos-cli-generation jobs: capi: diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 5d77ba80a..25ef4b9a0 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -2,8 +2,6 @@ name: MSRV on: push: - branches-ignore: - - fix-macos-cli-generation env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index ca63e3129..c4777316a 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -2,8 +2,6 @@ name: Python on: push: - branches-ignore: - - fix-macos-cli-generation jobs: test: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 97d899a09..961db5faa 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,8 +4,6 @@ on: push: tags: - 'v[0-9]+*' - branches: - - fix-macos-cli-generation workflow_dispatch: env: diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4e47550de..a2751695b 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -2,8 +2,6 @@ name: Rust on: push: - branches-ignore: - - fix-macos-cli-generation defaults: run: From e09998618292df6a2b4869c763a8fb12b12956f2 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 29 Apr 2024 18:17:02 +0200 Subject: [PATCH 060/179] Add enum `PidBasis` --- pineappl/src/grid.rs | 31 +++++++++++++++++++------------ pineappl/src/pids.rs | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 12 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 2168ce5dd..6822eecd3 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -9,7 +9,7 @@ use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, Lagran use super::lumi::{LumiCache, LumiEntry}; use super::lumi_entry; use super::ntuple_subgrid::NtupleSubgridV1; -use super::pids; +use super::pids::{self, PidBasis}; use super::sparse_array3::SparseArray3; use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; use bitflags::bitflags; @@ -523,24 +523,31 @@ impl Grid { }) } - fn pdg_lumi(&self) -> Cow<[LumiEntry]> { + /// Return by which convention the particle IDs are encoded. + pub fn pid_basis(&self) -> PidBasis { if let Some(key_values) = self.key_values() { if let Some(lumi_id_types) = key_values.get("lumi_id_types") { match lumi_id_types.as_str() { - "pdg_mc_ids" => {} - "evol" => { - return self - .lumi - .iter() - .map(|entry| LumiEntry::translate(entry, &pids::evol_to_pdg_mc_ids)) - .collect(); - } - _ => unimplemented!(), + "pdg_mc_ids" => return PidBasis::Pdg, + "evol" => return PidBasis::Evol, + _ => unimplemented!("unknown particle ID convention {lumi_id_types}"), } } } - Cow::Borrowed(self.lumi()) + // if there's no basis explicitly set we're assuming to use PDG IDs + return PidBasis::Pdg; + } + + fn pdg_lumi(&self) -> Cow<[LumiEntry]> { + match self.pid_basis() { + PidBasis::Evol => self + .lumi + .iter() + .map(|entry| LumiEntry::translate(entry, &pids::evol_to_pdg_mc_ids)) + .collect(), + PidBasis::Pdg => Cow::Borrowed(self.lumi()), + } } /// Perform a convolution using the PDFs and strong coupling in `lumi_cache`, and only diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index b0fc9be5d..3909f061e 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -1,7 +1,46 @@ //! TODO +use std::str::FromStr; +use thiserror::Error; + const EVOL_BASIS_IDS: [i32; 12] = [100, 103, 108, 115, 124, 135, 200, 203, 208, 215, 224, 235]; +/// Particle ID bases. In `PineAPPL` every particle is identified using a particle identifier +/// (PID), which is represented as an `i32`. The values of this `enum` specify how this value is +/// interpreted. +#[derive(Clone, Copy)] +pub enum PidBasis { + /// This basis uses the [particle data group](https://pdg.lbl.gov/) (PDG) PIDs. For a complete + /// definition see the section 'Monte Carlo Particle Numbering Scheme' of the PDG Review, for + /// instance the [2023 review](https://pdg.lbl.gov/2023/mcdata/mc_particle_id_contents.html). + Pdg, + /// This basis specifies the evolution basis, which is the same as [`PidConv::Pdg`], except the + /// following values have a special meaning: `100`, `103`, `108`, `115`, `124`, `135`, `200`, + /// `203`, `208`, `215`, `224`, `235`. + Evol, +} + +impl FromStr for PidBasis { + type Err = UnknownPidBasis; + + fn from_str(s: &str) -> Result { + match s { + "Pdg" | "PDG" | "pdg_mc_ids" => Ok(PidBasis::Pdg), + "Evol" | "EVOL" | "evol" => Ok(PidBasis::Evol), + _ => Err(UnknownPidBasis { + basis: s.to_owned(), + }), + } + } +} + +/// Error returned by [`PidBasis::from_str`] when passed with an unknown argument. +#[derive(Debug, Error)] +#[error("unknown PID basis: {basis}")] +pub struct UnknownPidBasis { + basis: String, +} + /// Translates IDs from the evolution basis into IDs using PDG Monte Carlo IDs. #[must_use] pub fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { From 0287de4407e8c614ba645b71d42f8abd51944bba Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 29 Apr 2024 18:19:25 +0200 Subject: [PATCH 061/179] Add `pdg_mc_pids_to_evol` and test it --- pineappl/src/lumi.rs | 3 + pineappl/src/pids.rs | 153 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 5c21a99c2..77d27a8c5 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -76,6 +76,9 @@ impl LumiEntry { Err((lhs, rhs)) } }) + // filter zeros + // TODO: find a better than to hardcode the epsilon limit + .filter(|&(_, _, f)| !approx_eq!(f64, f.abs(), 0.0, epsilon = 1e-14)) .collect(), } } diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index 3909f061e..6ec8fa443 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -167,6 +167,141 @@ pub fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { } } +/// Translates PDG Monte Carlo IDs to particle IDs from the evolution basis. +pub fn pdg_mc_pids_to_evol(pid: i32) -> Vec<(i32, f64)> { + match pid { + -6 => vec![ + (100, 1.0 / 12.0), + (135, -1.0 / 12.0), + (200, -1.0 / 12.0), + (235, 1.0 / 12.0), + ], + -5 => vec![ + (100, 1.0 / 12.0), + (124, -1.0 / 10.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (224, 1.0 / 10.0), + (235, -1.0 / 60.0), + ], + -4 => vec![ + (100, 1.0 / 12.0), + (115, -1.0 / 8.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (215, 1.0 / 8.0), + (224, -1.0 / 40.0), + (235, -1.0 / 60.0), + ], + -3 => vec![ + (100, 1.0 / 12.0), + (108, -1.0 / 6.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (208, 1.0 / 6.0), + (215, -1.0 / 24.0), + (224, -1.0 / 40.0), + (235, -1.0 / 60.0), + ], + -2 => vec![ + (100, 1.0 / 12.0), + (103, 1.0 / 4.0), + (108, 1.0 / 12.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (203, -1.0 / 4.0), + (208, -1.0 / 12.0), + (215, -1.0 / 24.0), + (224, -1.0 / 40.0), + (235, -1.0 / 60.0), + ], + -1 => vec![ + (100, 1.0 / 12.0), + (103, -1.0 / 4.0), + (108, 1.0 / 12.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (203, 1.0 / 4.0), + (208, -1.0 / 12.0), + (215, -1.0 / 24.0), + (224, -1.0 / 40.0), + (235, -1.0 / 60.0), + ], + 1 => vec![ + (100, 1.0 / 12.0), + (103, -1.0 / 4.0), + (108, 1.0 / 12.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (203, -1.0 / 4.0), + (208, 1.0 / 12.0), + (215, 1.0 / 24.0), + (224, 1.0 / 40.0), + (235, 1.0 / 60.0), + ], + 2 => vec![ + (100, 1.0 / 12.0), + (103, 1.0 / 4.0), + (108, 1.0 / 12.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (203, 1.0 / 4.0), + (208, 1.0 / 12.0), + (215, 1.0 / 24.0), + (224, 1.0 / 40.0), + (235, 1.0 / 60.0), + ], + 3 => vec![ + (100, 1.0 / 12.0), + (108, -1.0 / 6.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (208, -1.0 / 6.0), + (215, 1.0 / 24.0), + (224, 1.0 / 40.0), + (235, 1.0 / 60.0), + ], + 4 => vec![ + (100, 1.0 / 12.0), + (115, -1.0 / 8.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (215, -1.0 / 8.0), + (224, 1.0 / 40.0), + (235, 1.0 / 60.0), + ], + 5 => vec![ + (100, 1.0 / 12.0), + (124, -1.0 / 10.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (224, -1.0 / 10.0), + (235, 1.0 / 60.0), + ], + 6 => vec![ + (100, 1.0 / 12.0), + (135, -1.0 / 12.0), + (200, 1.0 / 12.0), + (235, -1.0 / 12.0), + ], + _ => vec![(pid, 1.0)], + } +} + /// Return the charge-conjugated PDG ID of `pid`. #[must_use] pub const fn charge_conjugate_pdg_pid(pid: i32) -> i32 { @@ -247,6 +382,9 @@ pub fn pdg_mc_ids_to_evol(tuples: &[(i32, f64)]) -> Option { #[cfg(test)] mod tests { use super::*; + use crate::lumi::LumiEntry; + use crate::lumi_entry; + use float_cmp::assert_approx_eq; #[test] fn test() { @@ -748,4 +886,19 @@ mod tests { "evol" ); } + + #[test] + fn inverse_inverse_evol() { + for pid in [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6] { + let result = LumiEntry::translate( + &LumiEntry::translate(&lumi_entry![pid, pid, 1.0], &pdg_mc_pids_to_evol), + &evol_to_pdg_mc_ids, + ); + + assert_eq!(result.entry().len(), 1); + assert_eq!(result.entry()[0].0, pid); + assert_eq!(result.entry()[0].1, pid); + assert_approx_eq!(f64, result.entry()[0].2, 1.0, ulps = 8); + } + } } From ba0bbf92ab912100941755b07e30f8bb07e5795c Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 29 Apr 2024 18:28:32 +0200 Subject: [PATCH 062/179] Add `Grid::rotate_pid_basis` and its CLI equivalent --- CHANGELOG.md | 3 +++ pineappl/src/grid.rs | 27 +++++++++++++++++++++++++++ pineappl_cli/src/write.rs | 30 ++++++++++++++++++++++++++++++ pineappl_cli/tests/write.rs | 1 + 4 files changed, 61 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e1ff03fb..60c132d18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 have been introduced in EKO v0.13. This interface will replace `Grid::evolve` - added `--dont-sort` switch to `pineappl channels`, which displays the channel sizes orderd by channel index (instead of channel size) +- added `Grid::rotate_pid_basis` and `pineappl write --rotate-pid-basis`. This + allows to change the meaning of the used particle IDs, and supported formats + are PDG MC IDs and the evolution basis ### Changed diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 6822eecd3..a7236b10c 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -2187,6 +2187,33 @@ impl Grid { } } + /// Change the particle ID convention. + pub fn rotate_pid_basis(&mut self, pid_basis: PidBasis) { + match (self.pid_basis(), pid_basis) { + (PidBasis::Pdg, PidBasis::Evol) => { + self.lumi = self + .lumi + .iter() + .map(|channel| LumiEntry::translate(channel, &pids::pdg_mc_pids_to_evol)) + .collect(); + + self.set_key_value("lumi_id_types", "evol"); + } + (PidBasis::Evol, PidBasis::Pdg) => { + self.lumi = self + .lumi + .iter() + .map(|channel| LumiEntry::translate(channel, &pids::evol_to_pdg_mc_ids)) + .collect(); + + self.set_key_value("lumi_id_types", "pdg_mc_ids"); + } + (PidBasis::Evol, PidBasis::Evol) | (PidBasis::Pdg, PidBasis::Pdg) => { + // here's nothing to do + } + } + } + /// Deletes channels with the corresponding `channel_indices`. Repeated indices and indices /// larger or equal than the number of channels are ignored. pub fn delete_channels(&mut self, channel_indices: &[usize]) { diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 2e8fbdb40..ca7eb0e4b 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -10,6 +10,7 @@ use pineappl::bin::BinRemapper; use pineappl::fk_table::{FkAssumptions, FkTable}; use pineappl::lumi::LumiEntry; use pineappl::pids; +use pineappl::pids::PidBasis; use std::fs; use std::ops::{Deref, RangeInclusive}; use std::path::PathBuf; @@ -43,6 +44,7 @@ enum OpsArg { RemapNorm(f64), RemapNormIgnore(Vec), RewriteChannel((usize, LumiEntry)), + RotatePidBasis(PidBasis), Scale(f64), ScaleByBin(Vec), ScaleByOrder(Vec), @@ -191,6 +193,20 @@ impl FromArgMatches for MoreArgs { ))); } } + "rotate_pid_basis" => { + for (index, arg) in indices.into_iter().zip( + matches + .remove_occurrences(&id) + .unwrap() + .map(Iterator::collect::>), + ) { + assert_eq!(arg.len(), 1); + args[index] = Some(match id.as_str() { + "rotate_pid_basis" => OpsArg::RotatePidBasis(arg[0]), + _ => unreachable!(), + }); + } + } "scale_by_bin" | "scale_by_order" => { for (index, arg) in indices.into_iter().zip( matches @@ -369,6 +385,17 @@ impl Args for MoreArgs { .num_args(2) .value_names(["IDX", "CHAN"]) ) + .arg( + Arg::new("rotate_pid_basis") + .action(ArgAction::Append) + .help("Rotate the PID basis for this grid") + .long("rotate-pid-basis") + .value_name("BASIS") + .value_parser( + PossibleValuesParser::new(["PDG", "EVOL"]) + .try_map(|s| s.parse::()), + ), + ) .arg( Arg::new("scale") .action(ArgAction::Append) @@ -559,6 +586,9 @@ impl Subcommand for Opts { channels[*index] = new_channel.clone(); grid.set_lumis(channels); } + OpsArg::RotatePidBasis(pid_basis) => { + grid.rotate_pid_basis(pid_basis.clone()); + } OpsArg::Scale(factor) => grid.scale(*factor), OpsArg::Optimize(true) => grid.optimize(), OpsArg::OptimizeFkTable(assumptions) => { diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index 03058a51e..7a8cd0175 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -23,6 +23,7 @@ Options: --remap-norm Modify the bin normalizations with a common factor --remap-norm-ignore Modify the bin normalizations by multiplying with the bin lengths for the given dimensions --rewrite-channel Rewrite the definition of the channel with index IDX + --rotate-pid-basis Rotate the PID basis for this grid [possible values: PDG, EVOL] -s, --scale Scales all grids with the given factor --scale-by-bin Scale each bin with a different factor --scale-by-order Scales all grids with order-dependent factors From 87e906cc8171988ff6ac9bce4a8932974be59de5 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 30 Apr 2024 15:15:01 +0200 Subject: [PATCH 063/179] Add test for `pineappl write --rotate-pid-basis` --- pineappl_cli/tests/write.rs | 252 ++++++++++++++++++++++++++++++++++++ 1 file changed, 252 insertions(+) diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index 7a8cd0175..48a02ebfc 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -228,6 +228,180 @@ const MULTIPLE_ARGUMENTS_STR: &str = "b etal dsig/detal 5 4 4.5 2.9004607e1 "; +const ROTATE_PID_BASIS_DIFF_STR: &str = "b x1 O(as^0 a^2) O(as^0 a^3) O(as^1 a^2) +-+----+----+-----------+-----------+----------+-------------+-------------+----------+-----------+-----------+---------- +0 2 2.25 6.5070305e2 6.5070305e2 -2.220e-16 -7.8692484e0 -7.8692484e0 -4.441e-16 1.1175729e2 1.1175729e2 -1.221e-15 +1 2.25 2.5 5.9601236e2 5.9601236e2 -7.772e-16 -6.5623495e0 -6.5623495e0 -2.220e-16 1.0083341e2 1.0083341e2 -5.551e-16 +2 2.5 2.75 5.1561247e2 5.1561247e2 -8.882e-16 -5.2348261e0 -5.2348261e0 -6.661e-16 8.9874343e1 8.9874343e1 -1.221e-15 +3 2.75 3 4.1534629e2 4.1534629e2 -4.441e-16 -3.7590420e0 -3.7590420e0 -5.551e-16 7.3935106e1 7.3935106e1 -1.554e-15 +4 3 3.25 3.0812719e2 3.0812719e2 -3.331e-16 -2.5871885e0 -2.5871885e0 -5.551e-16 5.6414554e1 5.6414554e1 -2.220e-16 +5 3.25 3.5 2.0807482e2 2.0807482e2 -6.661e-16 -1.6762487e0 -1.6762487e0 -1.110e-16 3.9468336e1 3.9468336e1 -3.331e-16 +6 3.5 4 9.6856769e1 9.6856769e1 -3.331e-16 -8.1027456e-1 -8.1027456e-1 -1.110e-16 1.9822014e1 1.9822014e1 -1.110e-15 +7 4 4.5 2.2383492e1 2.2383492e1 -4.441e-16 -2.2022770e-1 -2.2022770e-1 -5.551e-16 5.3540011e0 5.3540011e0 -3.331e-16 +"; + +const ROTATE_PID_BASIS_READ_LUMIS_STR: &str = " l entry +---+----------------------------------- +0 0.013888888888888888 × (100, 100) +1 -0.020833333333333332 × (100, 103) +2 -0.006944444444444444 × (100, 108) +3 0.006944444444444444 × (100, 115) +4 0.004166666666666667 × (100, 124) +5 0.0027777777777777775 × (100, 135) +6 -0.013888888888888888 × (100, 200) +7 0.020833333333333332 × (100, 203) +8 0.006944444444444444 × (100, 208) +9 -0.006944444444444444 × (100, 215) +10 -0.004166666666666667 × (100, 224) +11 -0.0027777777777777775 × (100, 235) +12 0.020833333333333332 × (103, 100) +13 -0.0625 × (103, 103) +14 0.020833333333333332 × (103, 108) +15 0.010416666666666666 × (103, 115) +16 0.00625 × (103, 124) +17 0.004166666666666667 × (103, 135) +18 -0.020833333333333332 × (103, 200) +19 0.0625 × (103, 203) +20 -0.020833333333333332 × (103, 208) +21 -0.010416666666666666 × (103, 215) +22 -0.00625 × (103, 224) +23 -0.004166666666666667 × (103, 235) +24 0.006944444444444444 × (108, 100) +25 -0.020833333333333332 × (108, 103) +26 0.006944444444444444 × (108, 108) +27 0.003472222222222222 × (108, 115) +28 0.0020833333333333333 × (108, 124) +29 0.0013888888888888887 × (108, 135) +30 -0.006944444444444444 × (108, 200) +31 0.020833333333333332 × (108, 203) +32 -0.006944444444444444 × (108, 208) +33 -0.003472222222222222 × (108, 215) +34 -0.0020833333333333333 × (108, 224) +35 -0.0013888888888888887 × (108, 235) +36 -0.006944444444444444 × (115, 100) +37 -0.010416666666666666 × (115, 103) +38 0.024305555555555552 × (115, 108) +39 -0.003472222222222222 × (115, 115) +40 -0.0020833333333333337 × (115, 124) +41 -0.001388888888888889 × (115, 135) +42 0.006944444444444444 × (115, 200) +43 0.010416666666666666 × (115, 203) +44 -0.024305555555555552 × (115, 208) +45 0.003472222222222222 × (115, 215) +46 0.0020833333333333337 × (115, 224) +47 0.001388888888888889 × (115, 235) +48 -0.00625 × (124, 103) +49 -0.0020833333333333333 × (124, 108) +50 0.0020833333333333333 × (124, 115) +51 0.0012500000000000002 × (124, 124) +52 0.0008333333333333334 × (124, 135) +53 -0.004166666666666667 × (124, 200) +54 0.00625 × (124, 203) +55 0.0020833333333333333 × (124, 208) +56 -0.0020833333333333333 × (124, 215) +57 -0.0012500000000000002 × (124, 224) +58 -0.0008333333333333334 × (124, 235) +59 -0.004166666666666667 × (135, 103) +60 -0.0013888888888888887 × (135, 108) +61 0.0013888888888888887 × (135, 115) +62 0.0005555555555555556 × (135, 135) +63 -0.0027777777777777775 × (135, 200) +64 0.004166666666666667 × (135, 203) +65 0.0013888888888888887 × (135, 208) +66 -0.0013888888888888887 × (135, 215) +67 -0.0008333333333333334 × (135, 224) +68 -0.0005555555555555556 × (135, 235) +69 0.013888888888888888 × (200, 100) +70 0.004166666666666667 × (200, 124) +71 0.0027777777777777775 × (200, 135) +72 -0.013888888888888888 × (200, 200) +73 0.020833333333333332 × (200, 203) +74 0.006944444444444444 × (200, 208) +75 -0.006944444444444444 × (200, 215) +76 -0.004166666666666667 × (200, 224) +77 -0.0027777777777777775 × (200, 235) +78 -0.0625 × (203, 103) +79 -0.020833333333333332 × (203, 200) +80 0.0625 × (203, 203) +81 -0.020833333333333332 × (203, 208) +82 -0.010416666666666666 × (203, 215) +83 -0.00625 × (203, 224) +84 -0.004166666666666667 × (203, 235) +85 0.006944444444444444 × (208, 108) +86 0.003472222222222222 × (208, 115) +87 -0.006944444444444444 × (208, 200) +88 0.020833333333333332 × (208, 203) +89 -0.006944444444444444 × (208, 208) +90 -0.003472222222222222 × (208, 215) +91 -0.0020833333333333333 × (208, 224) +92 -0.0013888888888888887 × (208, 235) +93 0.024305555555555552 × (215, 108) +94 -0.003472222222222222 × (215, 115) +95 -0.0020833333333333337 × (215, 124) +96 -0.001388888888888889 × (215, 135) +97 0.006944444444444444 × (215, 200) +98 0.010416666666666666 × (215, 203) +99 -0.024305555555555552 × (215, 208) +100 0.003472222222222222 × (215, 215) +101 0.0020833333333333337 × (215, 224) +102 0.001388888888888889 × (215, 235) +103 0.004166666666666667 × (224, 100) +104 0.0020833333333333333 × (224, 115) +105 0.0012500000000000002 × (224, 124) +106 0.0008333333333333334 × (224, 135) +107 0.00625 × (224, 203) +108 0.0020833333333333333 × (224, 208) +109 -0.0020833333333333333 × (224, 215) +110 -0.0012500000000000002 × (224, 224) +111 -0.0008333333333333334 × (224, 235) +112 0.0027777777777777775 × (235, 100) +113 0.0013888888888888887 × (235, 115) +114 0.0008333333333333334 × (235, 124) +115 0.0005555555555555556 × (235, 135) +116 0.004166666666666667 × (235, 203) +117 0.0013888888888888887 × (235, 208) +118 -0.0013888888888888887 × (235, 215) +119 -0.0005555555555555556 × (235, 235) +120 0.16666666666666666 × (21, 100) +121 -0.25 × (21, 103) +122 -0.08333333333333333 × (21, 108) +123 0.08333333333333333 × (21, 115) +124 0.05 × (21, 124) +125 0.03333333333333333 × (21, 135) +126 -0.16666666666666666 × (21, 200) +127 0.25 × (21, 203) +128 0.08333333333333333 × (21, 208) +129 -0.08333333333333333 × (21, 215) +130 -0.05 × (21, 224) +131 -0.03333333333333333 × (21, 235) +132 0.16666666666666666 × (22, 100) +133 -0.25 × (22, 103) +134 -0.08333333333333333 × (22, 108) +135 0.08333333333333333 × (22, 115) +136 0.05 × (22, 124) +137 0.03333333333333333 × (22, 135) +138 -0.16666666666666666 × (22, 200) +139 0.25 × (22, 203) +140 0.08333333333333333 × (22, 208) +141 -0.08333333333333333 × (22, 215) +142 -0.05 × (22, 224) +143 -0.03333333333333333 × (22, 235) +144 0.25 × (103, 21) +145 0.08333333333333333 × (108, 21) +146 -0.08333333333333334 × (115, 21) +147 0.16666666666666666 × (200, 21) +148 -0.08333333333333334 × (215, 21) +149 0.05 × (224, 21) +150 0.03333333333333333 × (235, 21) +151 0.25 × (103, 22) +152 0.08333333333333333 × (108, 22) +153 -0.08333333333333334 × (115, 22) +154 0.16666666666666666 × (200, 22) +155 -0.08333333333333334 × (215, 22) +156 0.05 × (224, 22) +157 0.03333333333333333 × (235, 22) +"; + #[test] fn help() { Command::cargo_bin("pineappl") @@ -736,3 +910,81 @@ fn rewrite_channels() { .success() .stdout(REWRITE_CHANNELS_CONVOLUTE_STR); } + +#[test] +fn rotate_pid_basis() { + let output = NamedTempFile::new("evolution-basis.pineappl.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--rotate-pid-basis=EVOL", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + output.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "diff", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + output.path().to_str().unwrap(), + "NNPDF31_nlo_as_0118_luxqed", + "--ignore-lumis", + ]) + .assert() + .success() + .stdout(ROTATE_PID_BASIS_DIFF_STR); + + let output2 = NamedTempFile::new("evolution-basis.pineappl.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--rotate-pid-basis=EVOL", + "--split-lumi", + "--optimize", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + output2.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + Command::cargo_bin("pineappl") + .unwrap() + .args(["read", "--lumis", output2.path().to_str().unwrap()]) + .assert() + .success() + .stdout(ROTATE_PID_BASIS_READ_LUMIS_STR); + + let output3 = NamedTempFile::new("evolution-basis.pineappl.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--rotate-pid-basis=PDG", + output2.path().to_str().unwrap(), + output3.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "convolve", + output3.path().to_str().unwrap(), + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(DEFAULT_STR); +} From ce37a1106b0b4906d353434689ec588bbc7d0078 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 3 May 2024 11:16:47 +0200 Subject: [PATCH 064/179] Test `--rotate-pid-basis` more thoroughly --- pineappl_cli/src/diff.rs | 1 + pineappl_cli/tests/write.rs | 105 ++++++++++++++++++++++++++++++------ 2 files changed, 90 insertions(+), 16 deletions(-) diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index 04303748c..afa874f4b 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -122,6 +122,7 @@ impl Subcommand for Opts { bail!("number of bins differ"); } + // TODO: use approximate comparison if !self.ignore_lumis && (grid1.lumi() != grid2.lumi()) { bail!("luminosities differ"); } diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index 48a02ebfc..122e3489d 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -228,6 +228,18 @@ const MULTIPLE_ARGUMENTS_STR: &str = "b etal dsig/detal 5 4 4.5 2.9004607e1 "; +const ROTATE_PID_BASIS_NO_DIFF_STR: &str = "b x1 O(as^0 a^2) O(as^0 a^3) O(as^1 a^2) +-+----+----+-----------+-----------+-------+-------------+-------------+-------+-----------+-----------+------- +0 2 2.25 6.5070305e2 6.5070305e2 0.000e0 -7.8692484e0 -7.8692484e0 0.000e0 1.1175729e2 1.1175729e2 0.000e0 +1 2.25 2.5 5.9601236e2 5.9601236e2 0.000e0 -6.5623495e0 -6.5623495e0 0.000e0 1.0083341e2 1.0083341e2 0.000e0 +2 2.5 2.75 5.1561247e2 5.1561247e2 0.000e0 -5.2348261e0 -5.2348261e0 0.000e0 8.9874343e1 8.9874343e1 0.000e0 +3 2.75 3 4.1534629e2 4.1534629e2 0.000e0 -3.7590420e0 -3.7590420e0 0.000e0 7.3935106e1 7.3935106e1 0.000e0 +4 3 3.25 3.0812719e2 3.0812719e2 0.000e0 -2.5871885e0 -2.5871885e0 0.000e0 5.6414554e1 5.6414554e1 0.000e0 +5 3.25 3.5 2.0807482e2 2.0807482e2 0.000e0 -1.6762487e0 -1.6762487e0 0.000e0 3.9468336e1 3.9468336e1 0.000e0 +6 3.5 4 9.6856769e1 9.6856769e1 0.000e0 -8.1027456e-1 -8.1027456e-1 0.000e0 1.9822014e1 1.9822014e1 0.000e0 +7 4 4.5 2.2383492e1 2.2383492e1 0.000e0 -2.2022770e-1 -2.2022770e-1 0.000e0 5.3540011e0 5.3540011e0 0.000e0 +"; + const ROTATE_PID_BASIS_DIFF_STR: &str = "b x1 O(as^0 a^2) O(as^0 a^3) O(as^1 a^2) -+----+----+-----------+-----------+----------+-------------+-------------+----------+-----------+-----------+---------- 0 2 2.25 6.5070305e2 6.5070305e2 -2.220e-16 -7.8692484e0 -7.8692484e0 -4.441e-16 1.1175729e2 1.1175729e2 -1.221e-15 @@ -913,7 +925,33 @@ fn rewrite_channels() { #[test] fn rotate_pid_basis() { - let output = NamedTempFile::new("evolution-basis.pineappl.lz4").unwrap(); + let pdg_to_pdg = NamedTempFile::new("pdg-to-pdg.pineappl.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--rotate-pid-basis=PDG", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + pdg_to_pdg.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "diff", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + pdg_to_pdg.path().to_str().unwrap(), + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(ROTATE_PID_BASIS_NO_DIFF_STR); + + let pdg_to_evol = NamedTempFile::new("pdg-to-evol.pineappl.lz4").unwrap(); Command::cargo_bin("pineappl") .unwrap() @@ -921,7 +959,7 @@ fn rotate_pid_basis() { "write", "--rotate-pid-basis=EVOL", "../test-data/LHCB_WP_7TEV.pineappl.lz4", - output.path().to_str().unwrap(), + pdg_to_evol.path().to_str().unwrap(), ]) .assert() .success() @@ -932,7 +970,7 @@ fn rotate_pid_basis() { .args([ "diff", "../test-data/LHCB_WP_7TEV.pineappl.lz4", - output.path().to_str().unwrap(), + pdg_to_evol.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", "--ignore-lumis", ]) @@ -940,17 +978,15 @@ fn rotate_pid_basis() { .success() .stdout(ROTATE_PID_BASIS_DIFF_STR); - let output2 = NamedTempFile::new("evolution-basis.pineappl.lz4").unwrap(); + let evol_to_evol = NamedTempFile::new("evol-to-evol.pineappl.lz4").unwrap(); Command::cargo_bin("pineappl") .unwrap() .args([ "write", "--rotate-pid-basis=EVOL", - "--split-lumi", - "--optimize", - "../test-data/LHCB_WP_7TEV.pineappl.lz4", - output2.path().to_str().unwrap(), + pdg_to_evol.path().to_str().unwrap(), + evol_to_evol.path().to_str().unwrap(), ]) .assert() .success() @@ -958,20 +994,29 @@ fn rotate_pid_basis() { Command::cargo_bin("pineappl") .unwrap() - .args(["read", "--lumis", output2.path().to_str().unwrap()]) + .args([ + "diff", + pdg_to_evol.path().to_str().unwrap(), + evol_to_evol.path().to_str().unwrap(), + "NNPDF31_nlo_as_0118_luxqed", + ]) .assert() .success() - .stdout(ROTATE_PID_BASIS_READ_LUMIS_STR); + .stdout(ROTATE_PID_BASIS_NO_DIFF_STR); - let output3 = NamedTempFile::new("evolution-basis.pineappl.lz4").unwrap(); + let evol_to_pdg = NamedTempFile::new("evol-to-pdg.pineappl.lz4").unwrap(); Command::cargo_bin("pineappl") .unwrap() .args([ "write", "--rotate-pid-basis=PDG", - output2.path().to_str().unwrap(), - output3.path().to_str().unwrap(), + // fix factors that are almost '1' to exact '1's + "--rewrite-channel", + "0", + "1 * ( 2, -1) + 1 * ( 4, -3)", + pdg_to_evol.path().to_str().unwrap(), + evol_to_pdg.path().to_str().unwrap(), ]) .assert() .success() @@ -980,11 +1025,39 @@ fn rotate_pid_basis() { Command::cargo_bin("pineappl") .unwrap() .args([ - "convolve", - output3.path().to_str().unwrap(), + "diff", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + evol_to_pdg.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", ]) .assert() .success() - .stdout(DEFAULT_STR); + .stdout(ROTATE_PID_BASIS_NO_DIFF_STR); + + let evol_to_evol_optimize = NamedTempFile::new("evol-to-evol-optimize.pineappl.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--rotate-pid-basis=EVOL", + "--split-lumi", + "--optimize", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + evol_to_evol_optimize.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "read", + "--lumis", + evol_to_evol_optimize.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(ROTATE_PID_BASIS_READ_LUMIS_STR); } From f2620d9baddf6459785d08dcd7f5391c69f7314d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 27 Apr 2024 08:59:37 +0200 Subject: [PATCH 065/179] Add missing changelog entry for fix --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60c132d18..99d2394ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - fixed yet another problem that prevent the Python interface for Python 3.6 from being successfully installed +- fixed `Grid::delete_channels` and its CLI variant `pineappl write + --delete-channels`. This command wasn't working properly before ## [0.7.3] - 23/02/2024 From 6a9b5f1f2ad58fe22e87248ee03ad9ff18efb322 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 29 Apr 2024 09:24:58 +0200 Subject: [PATCH 066/179] Add APPLgrid 1.6.36 to tested version list --- pineappl_applgrid/build.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl_applgrid/build.rs b/pineappl_applgrid/build.rs index 9eb5facd3..15af1894a 100644 --- a/pineappl_applgrid/build.rs +++ b/pineappl_applgrid/build.rs @@ -25,7 +25,7 @@ fn main() { .unwrap(); let tested_versions = [ - "1.6.27", "1.6.28", "1.6.29", "1.6.30", "1.6.31", "1.6.32", "1.6.35", + "1.6.27", "1.6.28", "1.6.29", "1.6.30", "1.6.31", "1.6.32", "1.6.35", "1.6.36", ]; if !tested_versions From f52469ca649656ef6d4ef365a9dbc087b312823f Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 29 Apr 2024 09:25:54 +0200 Subject: [PATCH 067/179] Test versions of fastNLO --- pineappl_fastnlo/build.rs | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/pineappl_fastnlo/build.rs b/pineappl_fastnlo/build.rs index 040b9c8a0..bb700bb52 100644 --- a/pineappl_fastnlo/build.rs +++ b/pineappl_fastnlo/build.rs @@ -1,9 +1,30 @@ #![allow(missing_docs)] -use std::process::Command; use pkg_config::Config; +use std::process::Command; fn main() { + let version = String::from_utf8( + Command::new("applgrid-config") + .arg("--version") + .output() + .expect("did not find `fnlo-tk-config`, please install fastNLO") + .stdout, + ) + .unwrap(); + + let tested_versions = ["2.5.0_2826"]; + + if !tested_versions + .iter() + .any(|&tested| tested == version.trim()) + { + println!( + "cargo:warning=found fastNLO version {}, which has not been tested", + version.trim() + ); + } + let fnlo_lib_path = String::from_utf8( Command::new("fnlo-tk-config") .arg("--libdir") From ad8515682f6c7ab2866420f99a659c47db20d2d3 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 4 May 2024 09:41:54 +0200 Subject: [PATCH 068/179] Clarify use of `unwrap` in contributing guidelines --- CONTRIBUTING.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66e30adf3..1504387ce 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -38,8 +38,11 @@ increasing the MSRV make sure to set it everywhere to the same value: - avoid the use of indices whenever possible; use `Iterator` instead. - use the `unwrap` methods whenever a panic would signal a bug in the program, and use `Result` instead if errors should be propagated down to the user. + When using `unwrap`, document the nature of the bug if a panic happens with a + comment of the form: `// UNWRAP: ...`. - in APIs prefer `unwrap_or_else(|| unreachable!())` over `unwrap` whenever - this avoids the clippy warning that a Panic section is missing + this avoids the clippy warning that a Panic section is missing. Also document + this with `// UNWRAP: ...` ## Git From e648ae4ec52566dbd54339d9bbc93dd3d5a362a1 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 4 May 2024 09:43:10 +0200 Subject: [PATCH 069/179] Add links to Rust crates in maintainer's guide --- docs/maintainers-guide.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/maintainers-guide.md b/docs/maintainers-guide.md index cd31d69c4..a99018cbc 100644 --- a/docs/maintainers-guide.md +++ b/docs/maintainers-guide.md @@ -24,14 +24,16 @@ - `examples`: contains examples programs to learn how to use PineAPPL's C, C++, Fortran and Python APIs - `maintainer`: contains [maintainer-specific tools] -- `pineappl`: the main Rust crate +- `pineappl`: the main Rust crate, documentation at - `pineappl_applgrid`: interface to [APPLgrid], which the CLI uses to convert APPLgrids to PineAPPL grids (and vice versa) -- `pineappl_capi`: the crate that builds PineAPPL's CAPI +- `pineappl_capi`: the crate that builds PineAPPL's CAPI, documentation at + - `pineappl_cli`: the crate that builds PineAPPL's CLI - `pineappl_fastnlo`: interface to [fastNLO], which the CLI uses to convert fastNLO tables to PineAPPL grids -- `pineappl_py`: the crate that builds PineAPPL's Python interface +- `pineappl_py`: the crate that builds PineAPPL's Python interface, + documentation at - `xtask`: crate for [cargo-xtask] commands - `.gitignore`: PineAPPL's Git ignore rules - `.readthedocs.yml`: configuration for PineAPPL's [Read-the-Docs] Python From fc762a6577aa579811d725ef9b19fe79fb3fa3e1 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 4 May 2024 09:43:44 +0200 Subject: [PATCH 070/179] Add related locations to maintainer's guide --- docs/maintainers-guide.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/maintainers-guide.md b/docs/maintainers-guide.md index a99018cbc..00d857baa 100644 --- a/docs/maintainers-guide.md +++ b/docs/maintainers-guide.md @@ -80,3 +80,14 @@ figure which version you've got [package manager]: https://doc.rust-lang.org/cargo/index.html + +# Connected accounts + +- Codecov: +- Conda: +- Crates.io: +- PyPI: +- ReadTheDocs: +- Zenodo: + +We also store testing data in . From 531b98ba79b1c7f0ef7e203dcd29638e62360103 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 4 May 2024 10:26:09 +0200 Subject: [PATCH 071/179] Change `convolute` to `convolve` in the CLI tutorial --- docs/cli-tutorial.md | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/cli-tutorial.md b/docs/cli-tutorial.md index 968c3a3e5..ecc3bf8b4 100644 --- a/docs/cli-tutorial.md +++ b/docs/cli-tutorial.md @@ -1,10 +1,10 @@ # Tutorial for PineAPPL's CLI Welcome to PineAPPL's CLI tutorial! Here we'll explain the basics of PineAPPL's -command-line interface (CLI): that's the program `pineappl` that you can you use -inside your shell to convolute grids with PDFs and to perform other operations. -This tutorial will also introduce and explain the terminology needed to -understand the C, Fortran, Python and Rust API. +command-line interface (CLI): that's the program `pineappl` that you can you +use inside your shell to convolve grids with PDFs and to perform other +operations. This tutorial will also introduce and explain the terminology +needed to understand the C, Fortran, Python and Rust API. This tutorial assumes that you understand the basics of interpolation grids. If you'd like to refresh your memory read the short @@ -25,11 +25,11 @@ to create a temporary directory. Finally, you'll need a grid, which you'll use with the CLI. -## `pineappl convolute`: Performing convolutions +## `pineappl convolve`: Performing convolutions Now that you've got a grid, you can perform a convolution with a PDF set: - pineappl convolute LHCB_WP_7TEV.pineappl.lz4 CT18NNLO + pineappl convolve LHCB_WP_7TEV.pineappl.lz4 CT18NNLO We chose to use the default CT18 PDF set for this tutorial, because it's the shortest to type. If you get an error that reads @@ -59,9 +59,9 @@ see the following output: On your computer the output will be slightly different depending on your LHAPDF installation. If you don't want to see LHAPDF messages (first and last two lines), add the option `--silence-lhapdf` after `pineappl` and before -`convolute`: +`convolve`: - pineappl --silence-lhapdf convolute LHCB_WP_7TEV.pineappl.lz4 CT18NNLO + pineappl --silence-lhapdf convolve LHCB_WP_7TEV.pineappl.lz4 CT18NNLO Let's have a closer look at what the output shows: @@ -85,17 +85,17 @@ grouped, and a corresponding description. You'll be familiar with the concept of subcommand if you're using `git`: `add`, `commit` and `push` are well-known subcommands of it. -To get more help on a specific subcommand, for instance `convolute`, which -we've used already, run +To get more help on a specific subcommand, for instance `convolve`, which we've +used already, run - pineappl convolute -h + pineappl convolve -h Depending on the version of PineAPPL this will show output similar to the following: Convolutes a PineAPPL grid with a PDF set - Usage: pineappl convolute [OPTIONS] ... + Usage: pineappl convolve [OPTIONS] ... Arguments: Path of the input grid @@ -109,7 +109,7 @@ following: --digits-rel Set the number of fractional digits shown for relative numbers [default: 2] -h, --help Print help -This explains that `pineappl convolute` needs at least two arguments, the first +This explains that `pineappl convolve` needs at least two arguments, the first being the grid file, denoted as `` and a second argument ``, which determines the PDF set. Note that this argument has three dots, `...`, meaning that you're allowed to pass multiple PDF sets, in which case `pineappl` @@ -121,9 +121,9 @@ denoted with `[OPTIONS]`. If you're experienced enough in high-energy physics, you've already inferred from the file name of the grid and the observable name `etal` what the -convoluted numbers will most likely show. However, how can you be certain? -Specifically, if you didn't generate the grid yourself you'll probably want to -know the answers to the following questions: +numbers will most likely show. However, how can you be certain? Specifically, +if you didn't generate the grid yourself you'll probably want to know the +answers to the following questions: 1. For which process is the prediction for? 2. Which observable is shown? @@ -146,7 +146,7 @@ through them one by one: section at 7 TeV`. 2. The keys `x1_label` contains the name of the observable, and `y_label` the name of the corresponding (differential) cross section. These strings are - being used by `convolute` and other subcommands that perform convolutions to + being used by `convolve` and other subcommands that perform convolutions to label the columns with the corresponding numbers. If grids contain two- or even higher-dimensional distributions there would be additional labels, for instance `x2_label`, etc. Furthermore, for plots there are the corresponding @@ -235,7 +235,7 @@ which prints: 7 4 4.5 0.5 this shows the bin indices `b` for the observable `etal`, with their left and -right bin limits, which you've already seen in `convolute`. The column `norm` +right bin limits, which you've already seen in `convolve`. The column `norm` shows the factor that all convolutions are divided with. Typically, as shown in this case, this is the bin width, but in general this can be different. @@ -355,7 +355,7 @@ Let's calculate the scale and PDF uncertainties for our grid: pineappl uncert --pdf --scale-env=7 LHCB_WP_7TEV.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed -This will show a table very similar to `pineappl convolute`: +This will show a table very similar to `pineappl convolve`: b etal dsig/detal PDF central PDF 7pt-svar (env) [] [pb] [%] [%] @@ -369,8 +369,8 @@ This will show a table very similar to `pineappl convolute`: 6 3.5 4 1.1746882e2 1.1745148e2 -1.33 1.33 -3.48 2.80 7 4 4.5 2.8023753e1 2.8018010e1 -4.05 4.05 -3.40 2.74 -The first three columns are exactly the one that `pineappl convolute` shows. -The next columns are the PDF central predictions, and negative and positive PDF +The first three columns are exactly the one that `pineappl convolve` shows. The +next columns are the PDF central predictions, and negative and positive PDF uncertainties. These uncertainties are calculated using LHAPDF, so `pineappl` always uses the correct algorithm no matter what type of PDF sets you use: Hessian, Monte Carlo, etc. Note that we've chosen a PDF set with Monte Carlo From 50d60102dfcba613ae78c0242622b09cf8f16d64 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 4 May 2024 10:26:44 +0200 Subject: [PATCH 072/179] Remove `--silence-lhapdf` switch from CLI tutorial --- docs/cli-tutorial.md | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/docs/cli-tutorial.md b/docs/cli-tutorial.md index ecc3bf8b4..c47078104 100644 --- a/docs/cli-tutorial.md +++ b/docs/cli-tutorial.md @@ -40,8 +40,6 @@ install the PDF set with LHAPDF, or use a different PDF set—the numbers won't matter for the sake of the tutorial. If the command was successful, you should see the following output: - LHAPDF 6.5.1 loading /home/cschwan/prefix/share/LHAPDF/CT18NNLO/CT18NNLO_0000.dat - CT18NNLO PDF set, member #0, version 1; LHAPDF ID = 14000 b etal dsig/detal [] [pb] -+----+----+----------- @@ -53,15 +51,6 @@ see the following output: 5 3.25 3.5 2.5236943e2 6 3.5 4 1.1857770e2 7 4 4.5 2.7740964e1 - Thanks for using LHAPDF 6.5.1. Please make sure to cite the paper: - Eur.Phys.J. C75 (2015) 3, 132 (http://arxiv.org/abs/1412.7420) - -On your computer the output will be slightly different depending on your LHAPDF -installation. If you don't want to see LHAPDF messages (first and last two -lines), add the option `--silence-lhapdf` after `pineappl` and before -`convolve`: - - pineappl --silence-lhapdf convolve LHCB_WP_7TEV.pineappl.lz4 CT18NNLO Let's have a closer look at what the output shows: @@ -432,14 +421,12 @@ a difference in the pull. Often a good way to start understanding predictions is to plot them. Fortunately, this is easy with PineAPPL: - pineappl --silence-lhapdf plot LHCB_WP_7TEV.pineappl.lz4 CT18NNLO > plot.py + pineappl plot LHCB_WP_7TEV.pineappl.lz4 CT18NNLO > plot.py This will write a [matplotlib] script in Python. Note that the script is -written to the standard output and redirected into `plot.py`. For this reason -you must add `--silence-lhapdf`, because otherwise LHAPDF's banner would end up -in the script and break it. The advantage of writing a plotting script instead -of directly producing the plot is that you can change it according to your -needs. Finally, let's run the plotting script: +written to the standard output and redirected into `plot.py`. The advantage of +writing a plotting script instead of directly producing the plot is that you +can change it according to your needs. Finally, let's run the plotting script: python3 plot.py @@ -453,7 +440,7 @@ Here's how the result for `.jpeg` looks: The `plot` subcommand is much more powerful, however. It accepts multiple PDF sets, for instance - pineappl --silence-lhapdf plot LHCB_WP_7TEV.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed=NNPDF31luxQED \ + pineappl plot LHCB_WP_7TEV.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed=NNPDF31luxQED \ CT18NNLO=CT18 MSHT20nnlo_as118=MSHT20 > plot.py in which case more insets are plotted, which show the PDF uncertainty for From 310ddd26363c5a0be0a3dc1d2fbe7d4f1f122af3 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 4 May 2024 10:34:24 +0200 Subject: [PATCH 073/179] Update citation information in `README.md` --- README.md | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c27a18c2d..98a55d222 100644 --- a/README.md +++ b/README.md @@ -37,13 +37,16 @@ If you use PineAPPL, please cite 1. the software itself using its [zenodo DOI] and 2. the [paper] introducing it. -[zenodo DOI]: https://zenodo.org/badge/latestdoi/248306479 -[paper]: https://inspirehep.net/literature/1814432 +By using PineAPPL, you're probably also using -# Similar projects +- [APPLgrid], +- [fastNLO] and/or +- [LHAPDF]. -`PineAPPL` is the most recent addition to the family of grid interpolation -libraries: +If that is the case, please cite these accordingly. -- [APPLgrid](https://applgrid.hepforge.org/) and -- [fastNLO](https://fastnlo.hepforge.org/). +[APPLgrid]: https://applgrid.hepforge.org +[fastNLO]: https://fastnlo.hepforge.org +[LHAPDF]: https://lhapdf.hepforge.org +[zenodo DOI]: https://zenodo.org/badge/latestdoi/248306479 +[paper]: https://inspirehep.net/literature/1814432 From 46ab7fc5bde9e7450017389dd374446fa8371cfe Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 4 May 2024 10:40:00 +0200 Subject: [PATCH 074/179] Add overview documentation for the main crate --- pineappl/src/lib.rs | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index 7a115d085..cf3ba9c66 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -1,4 +1,38 @@ //! `PineAPPL` is not an extension of `APPLgrid`. +//! +//! # Overview +//! +//! The main type of this crate is [`Grid`], which represents the interpolation grids that +//! `PineAPPL` implements. Roughly speaking, a `Grid` is a three-dimensional array of [`Subgrid`] +//! objects together with metadata. The three dimensions are +//! 1. (perturbative) orders, represented by the type [`Order`] and accessible by +//! [`Grid::orders()`], +//! 2. bins, whose limits can be accessed by [`Grid::bin_info()`], and +//! 3. channels, whose definition is returned by [`Grid::lumi()`]. Note that in older parts of +//! `PineAPPL` channels are often also called 'luminosities' or 'lumi'. +//! +//! `Subgrid` is a `trait` and objects that implement it are of the type [`SubgridEnum`]. The +//! latter is an `enum` of different types that are optimized to different scenarios: fast event +//! filling, small storage profile, etc. +//! +//! [`Grid`]: grid::Grid +//! [`Grid::bin_info()`]: grid::Grid::bin_info +//! [`Grid::lumi()`]: grid::Grid::lumi +//! [`Grid::orders()`]: grid::Grid::orders +//! [`Subgrid`]: subgrid::Subgrid +//! [`SubgridEnum`]: subgrid::SubgridEnum +//! [`Order`]: grid::Order +//! +//! ## Metadata +//! +//! Metadata is a collection of key--value pairs, in which both keys and values are `String` +//! objects. In metadata anything a user whishes can be stored. However, there are [special keys], +//! which have meaning to `PineAPPL` and/or its CLI `pineappl`. This metadata enables the CLI to +//! automatically generate plots that are correctly labeled, for instance. For more applications +//! see also the [CLI tutorial]. +//! +//! [special keys]: https://nnpdf.github.io/pineappl/docs/metadata.html +//! [CLI tutorial]: https://nnpdf.github.io/pineappl/docs/cli-tutorial.html mod convert; From 360e85446536a36d92f3b74c54ac607942f581df Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 4 May 2024 10:41:06 +0200 Subject: [PATCH 075/179] Fix documentation link --- pineappl/src/pids.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index 6ec8fa443..414c8f0e4 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -14,9 +14,9 @@ pub enum PidBasis { /// definition see the section 'Monte Carlo Particle Numbering Scheme' of the PDG Review, for /// instance the [2023 review](https://pdg.lbl.gov/2023/mcdata/mc_particle_id_contents.html). Pdg, - /// This basis specifies the evolution basis, which is the same as [`PidConv::Pdg`], except the - /// following values have a special meaning: `100`, `103`, `108`, `115`, `124`, `135`, `200`, - /// `203`, `208`, `215`, `224`, `235`. + /// This basis specifies the evolution basis, which is the same as [`PidBasis::Pdg`], except + /// the following values have a special meaning: `100`, `103`, `108`, `115`, `124`, `135`, + /// `200`, `203`, `208`, `215`, `224`, `235`. Evol, } From 38b6e7380ce706fd05172d7995125bfbcbc6affd Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 9 May 2024 11:01:46 +0200 Subject: [PATCH 076/179] Implement `FromStr` for `Order` --- pineappl/src/grid.rs | 73 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index a7236b10c..d29930fe6 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -28,8 +28,14 @@ use std::iter; use std::mem; use std::ops::Range; use std::slice; +use std::str::FromStr; use thiserror::Error; +/// Error type keeping information if [`Order::from_str`] went wrong. +#[derive(Debug, Error, PartialEq)] +#[error("{0}")] +pub struct ParseOrderError(String); + // TODO: when possible change the types from `u32` to `u8` to change `try_into` to `into` /// Coupling powers for each grid. @@ -45,6 +51,54 @@ pub struct Order { pub logxif: u32, } +impl FromStr for Order { + type Err = ParseOrderError; + + fn from_str(s: &str) -> Result { + let mut result = Self { + alphas: 0, + alpha: 0, + logxir: 0, + logxif: 0, + }; + + for tuple in s + .split(|c: char| c.is_ascii_digit()) + .filter(|s| !s.is_empty()) + .zip( + s.split(|c: char| !c.is_ascii_digit()) + .filter(|s| !s.is_empty()) + .map(str::parse), + ) + { + match tuple { + ("as", Ok(num)) => { + result.alphas = num; + } + ("a", Ok(num)) => { + result.alpha = num; + } + ("lr", Ok(num)) => { + result.logxir = num; + } + ("lf", Ok(num)) => { + result.logxif = num; + } + (label, Err(err)) => { + return Err(ParseOrderError(format!( + "error while parsing exponent of '{label}': {err}" + ))); + } + (label, Ok(_)) => { + return Err(ParseOrderError(format!("unknown coupling: '{label}'"))); + } + } + } + + Ok(result) + } +} + impl Ord for Order { fn cmp(&self, other: &Self) -> Ordering { // sort leading orders before next-to-leading orders, then the lowest power in alpha, the @@ -2361,6 +2415,25 @@ mod tests { use float_cmp::assert_approx_eq; use std::fs::File; + #[test] + fn order_from_str() { + assert_eq!("as1".parse(), Ok(Order::new(1, 0, 0, 0))); + assert_eq!("a1".parse(), Ok(Order::new(0, 1, 0, 0))); + assert_eq!("as1lr1".parse(), Ok(Order::new(1, 0, 1, 0))); + assert_eq!("as1lf1".parse(), Ok(Order::new(1, 0, 0, 1))); + assert_eq!( + "ab12".parse::(), + Err(ParseOrderError("unknown coupling: 'ab'".to_owned())) + ); + assert_eq!( + "ab123456789000000".parse::(), + Err(ParseOrderError( + "error while parsing exponent of 'ab': number too large to fit in target type" + .to_owned() + )) + ); + } + #[test] fn order_cmp() { let mut orders = [ From f92a469e21c565603dbdf9849a956d9b89b600be Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 9 May 2024 11:02:30 +0200 Subject: [PATCH 077/179] Add new method `Grid::orders_mut` --- pineappl/src/grid.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index d29930fe6..4af2040d8 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1116,6 +1116,12 @@ impl Grid { &self.orders } + /// Return a mutable reference to the subgrid parameters. + #[must_use] + pub fn orders_mut(&mut self) -> &mut [Order] { + &mut self.orders + } + /// Set the luminosity function for this grid. pub fn set_lumis(&mut self, lumis: Vec) { self.lumi = lumis; From 0693d6827a0219e5acbe4630a5d96a5d19b2d79d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 9 May 2024 11:04:11 +0200 Subject: [PATCH 078/179] Add `--rewrite-orders` switch to `pineappl write` --- CHANGELOG.md | 2 ++ pineappl_cli/src/write.rs | 30 +++++++++++++++--- pineappl_cli/tests/write.rs | 62 +++++++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99d2394ce..d5c96881d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - added `Grid::rotate_pid_basis` and `pineappl write --rotate-pid-basis`. This allows to change the meaning of the used particle IDs, and supported formats are PDG MC IDs and the evolution basis +- added `pineappl write --rewrite-orders` that lets the user change the + exponents of each order ### Changed diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index ca7eb0e4b..63216c3a1 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -8,6 +8,7 @@ use clap::{ }; use pineappl::bin::BinRemapper; use pineappl::fk_table::{FkAssumptions, FkTable}; +use pineappl::grid::Order; use pineappl::lumi::LumiEntry; use pineappl::pids; use pineappl::pids::PidBasis; @@ -44,6 +45,7 @@ enum OpsArg { RemapNorm(f64), RemapNormIgnore(Vec), RewriteChannel((usize, LumiEntry)), + RewriteOrder((usize, Order)), RotatePidBasis(PidBasis), Scale(f64), ScaleByBin(Vec), @@ -178,7 +180,7 @@ impl FromArgMatches for MoreArgs { }); } } - "rewrite_channel" => { + "rewrite_channel" | "rewrite_order" => { for (index, arg) in indices.into_iter().zip( matches .remove_occurrences(&id) @@ -187,10 +189,17 @@ impl FromArgMatches for MoreArgs { ) { assert_eq!(arg.len(), 2); - args[index] = Some(OpsArg::RewriteChannel(( - str::parse(&arg[0]).unwrap(), - str::parse(&arg[1]).unwrap(), - ))); + args[index] = Some(match id.as_str() { + "rewrite_channel" => OpsArg::RewriteChannel(( + str::parse(&arg[0]).unwrap(), + str::parse(&arg[1]).unwrap(), + )), + "rewrite_order" => OpsArg::RewriteOrder(( + str::parse(&arg[0]).unwrap(), + str::parse(&arg[1]).unwrap(), + )), + _ => unreachable!(), + }); } } "rotate_pid_basis" => { @@ -385,6 +394,14 @@ impl Args for MoreArgs { .num_args(2) .value_names(["IDX", "CHAN"]) ) + .arg( + Arg::new("rewrite_order") + .action(ArgAction::Append) + .help("Rewrite the definition of the order with index IDX") + .long("rewrite-order") + .num_args(2) + .value_names(["IDX", "ORDER"]) + ) .arg( Arg::new("rotate_pid_basis") .action(ArgAction::Append) @@ -586,6 +603,9 @@ impl Subcommand for Opts { channels[*index] = new_channel.clone(); grid.set_lumis(channels); } + OpsArg::RewriteOrder((index, order)) => { + grid.orders_mut()[*index] = order.clone(); + } OpsArg::RotatePidBasis(pid_basis) => { grid.rotate_pid_basis(pid_basis.clone()); } diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index 122e3489d..a090d7d45 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -23,6 +23,7 @@ Options: --remap-norm Modify the bin normalizations with a common factor --remap-norm-ignore Modify the bin normalizations by multiplying with the bin lengths for the given dimensions --rewrite-channel Rewrite the definition of the channel with index IDX + --rewrite-order Rewrite the definition of the order with index IDX --rotate-pid-basis Rotate the PID basis for this grid [possible values: PDG, EVOL] -s, --scale Scales all grids with the given factor --scale-by-bin Scale each bin with a different factor @@ -414,6 +415,30 @@ const ROTATE_PID_BASIS_READ_LUMIS_STR: &str = " l entry 157 0.03333333333333333 × (235, 22) "; +const REWRITE_ORDER_CONVOLVE_STR: &str = "b etal dsig/detal + [] [pb] +-+----+----+----------- +0 2 2.25 1.8216658e2 +1 2.25 2.5 1.6597039e2 +2 2.5 2.75 1.4666687e2 +3 2.75 3 1.2014156e2 +4 3 3.25 9.0894574e1 +5 3.25 3.5 6.2823156e1 +6 3.5 4 3.0663454e1 +7 4 4.5 7.8264717e0 +"; + +const REWRITE_ORDER_READ_STR: &str = "o order +-+---------------- +0 O(as^1 a^1) +1 O(as^1 a^2) +2 O(as^1 a^2 lr^1) +3 O(as^1 a^2 lf^1) +4 O(a^3) +5 O(a^3 lr^1) +6 O(a^3 lf^1) +"; + #[test] fn help() { Command::cargo_bin("pineappl") @@ -1061,3 +1086,40 @@ fn rotate_pid_basis() { .success() .stdout(ROTATE_PID_BASIS_READ_LUMIS_STR); } + +#[test] +fn rewrite_order() { + let output = NamedTempFile::new("rewrite-order.pineappl.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--rewrite-order", + "0", + "as1a1", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + output.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + Command::cargo_bin("pineappl") + .unwrap() + .args(["read", "--orders", output.path().to_str().unwrap()]) + .assert() + .success() + .stdout(REWRITE_ORDER_READ_STR); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "convolve", + output.path().to_str().unwrap(), + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(REWRITE_ORDER_CONVOLVE_STR); +} From ea27a8b20317fc327c5f1378505074de7750e097 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 9 May 2024 11:08:43 +0200 Subject: [PATCH 079/179] Call right command when checking fastNLO's version --- pineappl_fastnlo/build.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl_fastnlo/build.rs b/pineappl_fastnlo/build.rs index bb700bb52..a4aed63de 100644 --- a/pineappl_fastnlo/build.rs +++ b/pineappl_fastnlo/build.rs @@ -5,7 +5,7 @@ use std::process::Command; fn main() { let version = String::from_utf8( - Command::new("applgrid-config") + Command::new("fnlo-tk-config") .arg("--version") .output() .expect("did not find `fnlo-tk-config`, please install fastNLO") From 65d1bde78ac03f46b1273809004ef585b2d1df5b Mon Sep 17 00:00:00 2001 From: t7phy Date: Fri, 10 May 2024 08:53:29 +0200 Subject: [PATCH 080/179] make symmterize_channels check the convolution object type besides the PID --- pineappl/src/grid.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 4af2040d8..5e9fc83d1 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1389,10 +1389,22 @@ impl Grid { } fn symmetrize_channels(&mut self) { - if self.key_values().map_or(false, |map| { - map["initial_state_1"] != map["initial_state_2"] - }) { - return; + let map = self.key_values(); + if map.is_some() { + let map = map.unwrap(); + if map.contains_key("convolution_particle_1") + && map.contains_key("convolution_type_1") + && map.contains_key("convolution_particle_2") + && map.contains_key("convolution_type_2") + { + if map["convolution_particle_1"] != map["convolution_particle_2"] + || map["convolution_type_1"] != map["convolution_type_2"] + { + return; + } + } else if map["initial_state_1"] != map["initial_state_2"] { + return; + } } let mut indices: Vec = (0..self.lumi.len()).rev().collect(); From 8bf156b1fe91e0906b57fa5b71662c803c15330a Mon Sep 17 00:00:00 2001 From: t7phy Date: Fri, 10 May 2024 19:33:51 +0200 Subject: [PATCH 081/179] implement suggested changes --- pineappl/src/grid.rs | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 5e9fc83d1..4876d91c0 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1390,19 +1390,32 @@ impl Grid { fn symmetrize_channels(&mut self) { let map = self.key_values(); - if map.is_some() { - let map = map.unwrap(); - if map.contains_key("convolution_particle_1") - && map.contains_key("convolution_type_1") - && map.contains_key("convolution_particle_2") - && map.contains_key("convolution_type_2") - { - if map["convolution_particle_1"] != map["convolution_particle_2"] - || map["convolution_type_1"] != map["convolution_type_2"] - { - return; + if let Some(map) = map { + match ( + map.get("convolution_particle_1"), + map.get("convolution_particle_2"), + map.get("convolution_type_1"), + map.get("convolution_type_2"), + ) { + ( + Some(convolution_particle_1), + Some(convolution_particle_2), + Some(convolution_type_1), + Some(convolution_type_2), + ) => { + if convolution_particle_1 != convolution_particle_2 + || convolution_type_1 != convolution_type_2 + { + return; + } } - } else if map["initial_state_1"] != map["initial_state_2"] { + (None, None, None, None) => {} + _ => { + // TODO: if only some of the metadata is set, we should consider this an error + todo!(); + } + } + if map["initial_state_1"] != map["initial_state_2"] { return; } } From f85f512e87097a01bf8273700f3febdf2891485d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 13 May 2024 09:47:34 +0200 Subject: [PATCH 082/179] Fix typo in `CHANGELOG.md` --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d5c96881d..4f0a70371 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - added `Grid::rotate_pid_basis` and `pineappl write --rotate-pid-basis`. This allows to change the meaning of the used particle IDs, and supported formats are PDG MC IDs and the evolution basis -- added `pineappl write --rewrite-orders` that lets the user change the +- added `pineappl write --rewrite-order` that lets the user change the exponents of each order ### Changed From 05a115f2bd446aeeb5f01cbd5cfb3c4bb16213dc Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 13 May 2024 09:48:13 +0200 Subject: [PATCH 083/179] Fix a few clippy warnings --- pineappl/src/bin.rs | 2 +- pineappl/src/grid.rs | 5 +++-- pineappl/src/lumi.rs | 7 +++---- pineappl/src/pids.rs | 5 +++-- pineappl_capi/src/lib.rs | 2 +- pineappl_cli/src/import.rs | 2 +- pineappl_cli/src/write.rs | 2 +- 7 files changed, 13 insertions(+), 12 deletions(-) diff --git a/pineappl/src/bin.rs b/pineappl/src/bin.rs index ae94b8bf9..3d45a597e 100644 --- a/pineappl/src/bin.rs +++ b/pineappl/src/bin.rs @@ -227,7 +227,7 @@ impl FromStr for BinRemapper { } } - last_indices = indices.clone(); + last_indices.clone_from(&indices); let mut normalization = 1.0; diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 4af2040d8..12d618e9f 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -32,7 +32,7 @@ use std::str::FromStr; use thiserror::Error; /// Error type keeping information if [`Order::from_str`] went wrong. -#[derive(Debug, Error, PartialEq)] +#[derive(Debug, Error, Eq, PartialEq)] #[error("{0}")] pub struct ParseOrderError(String); @@ -578,6 +578,7 @@ impl Grid { } /// Return by which convention the particle IDs are encoded. + #[must_use] pub fn pid_basis(&self) -> PidBasis { if let Some(key_values) = self.key_values() { if let Some(lumi_id_types) = key_values.get("lumi_id_types") { @@ -590,7 +591,7 @@ impl Grid { } // if there's no basis explicitly set we're assuming to use PDG IDs - return PidBasis::Pdg; + PidBasis::Pdg } fn pdg_lumi(&self) -> Cow<[LumiEntry]> { diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 77d27a8c5..a8f727dc7 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -153,6 +153,7 @@ impl LumiEntry { /// assert_eq!(entry1.common_factor(&entry3), None); /// assert_eq!(entry1.common_factor(&entry4), None); /// ``` + #[must_use] pub fn common_factor(&self, other: &Self) -> Option { if self.entry.len() != other.entry.len() { return None; @@ -165,7 +166,7 @@ impl LumiEntry { .map(|(a, b)| ((a.0 == b.0) && (a.1 == b.1)).then_some(a.2 / b.2)) .collect(); - if let Some(factors) = result { + result.and_then(|factors| { if factors .windows(2) .all(|win| approx_eq!(f64, win[0], win[1], ulps = 4)) @@ -174,9 +175,7 @@ impl LumiEntry { } else { None } - } else { - None - } + }) } } diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index 414c8f0e4..2b87e174f 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -25,8 +25,8 @@ impl FromStr for PidBasis { fn from_str(s: &str) -> Result { match s { - "Pdg" | "PDG" | "pdg_mc_ids" => Ok(PidBasis::Pdg), - "Evol" | "EVOL" | "evol" => Ok(PidBasis::Evol), + "Pdg" | "PDG" | "pdg_mc_ids" => Ok(Self::Pdg), + "Evol" | "EVOL" | "evol" => Ok(Self::Evol), _ => Err(UnknownPidBasis { basis: s.to_owned(), }), @@ -168,6 +168,7 @@ pub fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { } /// Translates PDG Monte Carlo IDs to particle IDs from the evolution basis. +#[must_use] pub fn pdg_mc_pids_to_evol(pid: i32) -> Vec<(i32, f64)> { match pid { -6 => vec![ diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 809dda4e3..b74651fef 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -179,7 +179,7 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri } if let Some(value) = keyval.strings.get("subgrid_type") { - subgrid_type = value.to_str().unwrap().to_owned(); + value.to_str().unwrap().clone_into(&mut subgrid_type); } } diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index f546588ac..f0a0181fa 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -197,7 +197,7 @@ fn fnlo_mu_possible_values() -> Vec<&'static str> { } #[cfg(not(feature = "fastnlo"))] -fn fnlo_mu_possible_values() -> Vec<&'static str> { +const fn fnlo_mu_possible_values() -> Vec<&'static str> { vec![] } diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 63216c3a1..8fbf3f41d 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -607,7 +607,7 @@ impl Subcommand for Opts { grid.orders_mut()[*index] = order.clone(); } OpsArg::RotatePidBasis(pid_basis) => { - grid.rotate_pid_basis(pid_basis.clone()); + grid.rotate_pid_basis(*pid_basis); } OpsArg::Scale(factor) => grid.scale(*factor), OpsArg::Optimize(true) => grid.optimize(), From 56162bfb8ec537a42331e2f62ebf2e68f98c5675 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 20 May 2024 14:35:42 +0200 Subject: [PATCH 084/179] Update `managed-lhapdf` --- Cargo.lock | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index deb147705..c485e3a29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -554,6 +554,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "futures-channel" version = "0.3.30" @@ -950,13 +960,14 @@ dependencies = [ [[package]] name = "managed-lhapdf" version = "0.2.4" -source = "git+https://github.com/cschwan/managed-lhapdf.git#789512841c3e6e1bb82216b83806a59702b18a5f" +source = "git+https://github.com/cschwan/managed-lhapdf.git#5a86dbfc12b13cf75377b6bde3520d2ea7776462" dependencies = [ "anyhow", "cxx", "cxx-build", "dirs", "flate2", + "fs2", "pkg-config", "reqwest", "serde", From b9971d405f559a0770c98f4c4808ad53f54fd36e Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 23 May 2024 09:34:25 +0200 Subject: [PATCH 085/179] Depend on `managed-lhapdf` released on crates.io --- Cargo.lock | 5 +++-- pineappl/Cargo.toml | 2 +- pineappl_cli/Cargo.toml | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c485e3a29..a1ff950c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -959,8 +959,9 @@ dependencies = [ [[package]] name = "managed-lhapdf" -version = "0.2.4" -source = "git+https://github.com/cschwan/managed-lhapdf.git#5a86dbfc12b13cf75377b6bde3520d2ea7776462" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "188f1953ebeb0286d1d129c075e43d61f592e8c8e555aa631a92164c44384964" dependencies = [ "anyhow", "cxx", diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index cf03c9b5a..f9e062061 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -33,7 +33,7 @@ thiserror = "1.0.30" [dev-dependencies] anyhow = "1.0.48" -lhapdf = { git = "https://github.com/cschwan/managed-lhapdf.git", package = "managed-lhapdf" } +lhapdf = { package = "managed-lhapdf", version = "0.3.0" } num-complex = "0.4.4" rand = { default-features = false, version = "0.8.4" } rand_pcg = { default-features = false, version = "0.3.1" } diff --git a/pineappl_cli/Cargo.toml b/pineappl_cli/Cargo.toml index 793cd7cf9..c95cedb6b 100644 --- a/pineappl_cli/Cargo.toml +++ b/pineappl_cli/Cargo.toml @@ -26,7 +26,7 @@ flate2 = { optional = true, version = "1.0.22" } float-cmp = "0.9.0" git-version = "0.3.5" itertools = "0.10.1" -lhapdf = { git = "https://github.com/cschwan/managed-lhapdf.git", package = "managed-lhapdf" } +lhapdf = { package = "managed-lhapdf", version = "0.3.0" } lz4_flex = { optional = true, version = "0.9.2" } ndarray = "0.15.4" ndarray-npy = { optional = true, version = "0.8.1" } From 073c65fe2579ae7c542104c36340b5626817b465 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 23 May 2024 09:41:00 +0200 Subject: [PATCH 086/179] Ignore `test-data` folder --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ea8c4bf7f..5af45873c 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ /target +test-data/* From e3b70092cc6d198d2ad03b392896eddc259d3eda Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 23 May 2024 09:57:24 +0200 Subject: [PATCH 087/179] Release v0.7.4 --- CHANGELOG.md | 5 ++++- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- pineappl_capi/Cargo.toml | 2 +- pineappl_cli/Cargo.toml | 6 +++--- pineappl_py/Cargo.toml | 2 +- xtask/Cargo.toml | 2 +- 7 files changed, 18 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f0a70371..00234ec99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.7.4] - 23/05/2024 + ### Added - added `Grid::evolve_with_slice_iter`, `AlphasTable` and `OperatorSliceInfo`, @@ -563,7 +565,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - first release -[Unreleased]: https://github.com/NNPDF/pineappl/compare/v0.7.3...HEAD +[Unreleased]: https://github.com/NNPDF/pineappl/compare/v0.7.4...HEAD +[0.7.4]: https://github.com/NNPDF/pineappl/compare/v0.7.3...v0.7.4 [0.7.3]: https://github.com/NNPDF/pineappl/compare/v0.7.2...v0.7.3 [0.7.2]: https://github.com/NNPDF/pineappl/compare/v0.7.1...v0.7.2 [0.7.1]: https://github.com/NNPDF/pineappl/compare/v0.7.0...v0.7.1 diff --git a/Cargo.lock b/Cargo.lock index a1ff950c7..3846baadd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1261,7 +1261,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pineappl" -version = "0.7.4-rc.1" +version = "0.7.4" dependencies = [ "anyhow", "arrayvec", @@ -1287,7 +1287,7 @@ dependencies = [ [[package]] name = "pineappl_applgrid" -version = "0.7.4-rc.1" +version = "0.7.4" dependencies = [ "cc", "cxx", @@ -1297,7 +1297,7 @@ dependencies = [ [[package]] name = "pineappl_capi" -version = "0.7.4-rc.1" +version = "0.7.4" dependencies = [ "itertools", "pineappl", @@ -1305,7 +1305,7 @@ dependencies = [ [[package]] name = "pineappl_cli" -version = "0.7.4-rc.1" +version = "0.7.4" dependencies = [ "anyhow", "assert_cmd", @@ -1336,7 +1336,7 @@ dependencies = [ [[package]] name = "pineappl_fastnlo" -version = "0.7.4-rc.1" +version = "0.7.4" dependencies = [ "cxx", "cxx-build", @@ -1346,7 +1346,7 @@ dependencies = [ [[package]] name = "pineappl_py" -version = "0.7.4-rc.1" +version = "0.7.4" dependencies = [ "itertools", "ndarray", @@ -2508,7 +2508,7 @@ dependencies = [ [[package]] name = "xtask" -version = "0.7.4-rc.1" +version = "0.7.4" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index db60cc953..6aa82170d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ keywords = ["high-energy-physics", "physics"] license = "GPL-3.0-or-later" repository = "https://github.com/NNPDF/pineappl" rust-version = "1.70.0" -version = "0.7.4-rc.1" +version = "0.7.4" [workspace.lints.clippy] all = { level = "warn", priority = -1 } diff --git a/pineappl_capi/Cargo.toml b/pineappl_capi/Cargo.toml index 46f8c1d25..864faf214 100644 --- a/pineappl_capi/Cargo.toml +++ b/pineappl_capi/Cargo.toml @@ -16,7 +16,7 @@ version.workspace = true workspace = true [dependencies] -pineappl = { path = "../pineappl", version = "=0.7.4-rc.1" } +pineappl = { path = "../pineappl", version = "=0.7.4" } itertools = "0.10.1" [features] diff --git a/pineappl_cli/Cargo.toml b/pineappl_cli/Cargo.toml index c95cedb6b..7440fb640 100644 --- a/pineappl_cli/Cargo.toml +++ b/pineappl_cli/Cargo.toml @@ -30,9 +30,9 @@ lhapdf = { package = "managed-lhapdf", version = "0.3.0" } lz4_flex = { optional = true, version = "0.9.2" } ndarray = "0.15.4" ndarray-npy = { optional = true, version = "0.8.1" } -pineappl = { path = "../pineappl", version = "=0.7.4-rc.1" } -pineappl_applgrid = { optional = true, path = "../pineappl_applgrid", version = "=0.7.4-rc.1" } -pineappl_fastnlo = { optional = true, path = "../pineappl_fastnlo", version = "=0.7.4-rc.1" } +pineappl = { path = "../pineappl", version = "=0.7.4" } +pineappl_applgrid = { optional = true, path = "../pineappl_applgrid", version = "=0.7.4" } +pineappl_fastnlo = { optional = true, path = "../pineappl_fastnlo", version = "=0.7.4" } prettytable-rs = { default-features = false, features = ["win_crlf"], version = "0.10.0" } rayon = "1.5.1" serde = { features = ["derive"], optional = true, version = "1.0.130" } diff --git a/pineappl_py/Cargo.toml b/pineappl_py/Cargo.toml index 174b91b93..5b7f35b63 100644 --- a/pineappl_py/Cargo.toml +++ b/pineappl_py/Cargo.toml @@ -30,5 +30,5 @@ crate-type = ["cdylib"] itertools = "0.10.1" ndarray = "0.15.4" numpy = "0.20.0" -pineappl = { path = "../pineappl", version = "=0.7.4-rc.1" } +pineappl = { path = "../pineappl", version = "=0.7.4" } pyo3 = { features = ["extension-module"], version = "0.20.0" } diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index a9ab18072..8b025b56e 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -21,4 +21,4 @@ clap_mangen = "0.2.18" enum_dispatch = "0.3.7" #git2 = "0.17.2" #semver = "1.0.17" -pineappl_cli = { path = "../pineappl_cli", version = "=0.7.4-rc.1" } +pineappl_cli = { path = "../pineappl_cli", version = "=0.7.4" } From 29fd4408366e73d4dd1371a21bf1ccd04d375653 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 23 May 2024 10:24:40 +0200 Subject: [PATCH 088/179] Add `install-cli.sh` to install a pre-compiled CLI --- install-cli.sh | 112 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100755 install-cli.sh diff --git a/install-cli.sh b/install-cli.sh new file mode 100755 index 000000000..734e7293e --- /dev/null +++ b/install-cli.sh @@ -0,0 +1,112 @@ +#!/bin/sh + +# WARNING: do not commit changes to this file unless you've checked it against +# `shellcheck` (https://www.shellcheck.net/); run `shellcheck install-capi.sh` +# to make sure this script is POSIX shell compatible; we cannot rely on bash +# being present + +set -eu + +prefix= +version= + +while [ $# -gt 0 ]; do + case $1 in + --version) + version=$2 + shift + shift + ;; + --version=*) + version=${1#--version=} + shift + ;; + --prefix) + prefix=$2 + shift + shift + ;; + --prefix=*) + prefix=${1#--prefix=} + shift + ;; + --target) + target=$2 + shift + shift + ;; + --target=*) + target=${1#--target=} + shift + ;; + *) + echo "Error: argument '$1' unknown" + exit 1 + ;; + esac +done + +if [ -z ${target+x} ]; then + case $(uname -m):$(uname -s) in + arm64:Darwin) + target=aarch64-apple-darwin;; + x86_64:Darwin) + target=x86_64-apple-darwin;; + x86_64:Linux) + target=x86_64-unknown-linux-gnu;; + *) + echo "Error: unknown target, uname = '$(uname -a)'" + exit 1;; + esac +fi + +# if no prefix is given, prompt for one +if [ -z "${prefix}" ]; then + # read from stdin (`<&1`), even if piped into a shell + printf "Enter installation path: " + read -r <&1 prefix + echo +fi + +# we need the absolute path; use `eval` to expand possible tilde `~` +eval mkdir -p "${prefix}" +eval cd "${prefix}" +prefix=$(pwd) +cd - >/dev/null + +# if no version is given, use the latest version +if [ -z "${version}" ]; then + version=$(curl -s https://api.github.com/repos/NNPDF/pineappl/releases/latest | \ + sed -n 's/[ ]*"tag_name"[ ]*:[ ]*"v\([^"]*\)"[ ]*,[ ]*$/\1/p') +fi + +base_url=https://github.com/NNPDF/pineappl/releases/download + +echo "prefix: ${prefix}" +echo "target: ${target}" +echo "version: ${version}" + +curl -s -LJ "${base_url}/v${version}/pineappl_cli-${target}.tar.gz" \ + | tar xzf - -C "${prefix}" + +if command -v pineappl >/dev/null; then + path="$(command -v pineappl)" + + if [ "${path}" != "${prefix}"/bin/pineappl ]; then + echo + echo "Warning: Your PATH evironment variable isn't properly set." + echo "It appears a different installation of PineAPPL is found:" + echo + echo " ${path}" + echo + echo "Remove this installation or reorder your PATH" + fi +else + echo + echo "Warning: Your PATH environment variable isn't properly set." + echo "Try adding" + echo + echo " export PATH=${prefix}\"/bin:\${PATH}\"" + echo + echo "to your shell configuration file" +fi From 4aa63611959c487c7795be3230428ecdb264ab8d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 23 May 2024 10:25:08 +0200 Subject: [PATCH 089/179] Remove name Release workflow --- .github/workflows/release.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 961db5faa..c1a22f80b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -346,7 +346,6 @@ jobs: path: dist release-wheels: - name: Release runs-on: ubuntu-latest if: "startsWith(github.ref, 'refs/tags/')" needs: [wheels-eol-linux, wheels-linux, wheels-macos, wheels-sdist, wheels-windows] From fe623959d9ba432f19cb381239aee53ba166fd8d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 23 May 2024 11:06:23 +0200 Subject: [PATCH 090/179] Remove support for Python 3.6 --- .github/workflows/release.yml | 42 +------------------------------- CHANGELOG.md | 4 +++ pineappl_py/pineappl/fk_table.py | 38 ----------------------------- pineappl_py/src/fk_table.rs | 1 + 4 files changed, 6 insertions(+), 79 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c1a22f80b..300c489f1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -219,46 +219,6 @@ jobs: name: ${{ github.job }}-${{ matrix.target }} path: dist - wheels-eol-linux: - runs-on: ubuntu-latest - strategy: - matrix: - target: [x86_64] - steps: - - uses: actions/checkout@v4 - - name: Patch Cargo.toml - run: | - # - older maturin versions don't support metadata inheritance - overwrite it explicitly - # - use '@' separator since 'repository' contains slashes - # - downgrade both `numpy` and `pyo3` to the last version that support Python 3.6 - sed -i \ - -e "s@categories.workspace = true@$(grep '^categories' Cargo.toml)@" \ - -e "s@edition.workspace = true@$(grep '^edition' Cargo.toml)@" \ - -e "s@keywords.workspace = true@$(grep '^keywords' Cargo.toml)@" \ - -e "s@license.workspace = true@$(grep '^license' Cargo.toml)@" \ - -e "s@repository.workspace = true@$(grep '^repository' Cargo.toml)@" \ - -e "s@rust-version.workspace = true@$(grep '^rust-version' Cargo.toml)@" \ - -e "s@version.workspace = true@$(grep '^version' Cargo.toml)@" \ - -e "s@^numpy = .*@numpy = \"0.14.1\"@" \ - -e "s@^pyo3 = .*@pyo3 = { features = [\"extension-module\"], version = \"0.14.5\" }@" \ - pineappl_py/Cargo.toml - # the previous command is very fragile, so print the file contents to check it - cat pineappl_py/Cargo.toml - - name: Build wheels - uses: PyO3/maturin-action@v1 - with: - target: ${{ matrix.target }} - args: --release -i python3.6 --out dist --no-sdist --manifest-path pineappl_py/Cargo.toml - sccache: 'true' - manylinux: auto - # this is the latest version to support Python 3.6 - maturin-version: 0.12.20 - - name: Upload wheels - uses: actions/upload-artifact@v4 - with: - name: ${{ github.job }}-${{ matrix.target }} - path: dist - wheels-macos: runs-on: macos-13 strategy: @@ -348,7 +308,7 @@ jobs: release-wheels: runs-on: ubuntu-latest if: "startsWith(github.ref, 'refs/tags/')" - needs: [wheels-eol-linux, wheels-linux, wheels-macos, wheels-sdist, wheels-windows] + needs: [wheels-linux, wheels-macos, wheels-sdist, wheels-windows] steps: - uses: actions/download-artifact@v4 with: diff --git a/CHANGELOG.md b/CHANGELOG.md index 00234ec99..6115b8c73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Removed + +- removed support for Python 3.6 + ## [0.7.4] - 23/05/2024 ### Added diff --git a/pineappl_py/pineappl/fk_table.py b/pineappl_py/pineappl/fk_table.py index 04efc8a14..2634a523f 100644 --- a/pineappl_py/pineappl/fk_table.py +++ b/pineappl_py/pineappl/fk_table.py @@ -1,5 +1,3 @@ -import numpy as np - from .pineappl import PyFkTable, PyFkAssumptions from .utils import PyWrapper @@ -55,42 +53,6 @@ def optimize(self, assumptions="Nf6Ind"): assumptions = FkAssumptions(assumptions) return self._raw.optimize(assumptions._raw) - def convolute_with_one( - self, - pdg_id, - xfx, - bin_indices=np.array([], dtype=np.uint64), - lumi_mask=np.array([], dtype=bool), - ): - r"""Convolute FkTable with a pdf. - - Parameters - ---------- - pdg_id : int - PDG Monte Carlo ID of the hadronic particle `xfx` is the PDF for - xfx : callable - lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid - bin_indices : sequence(int) - A list with the indices of the corresponding bins that should be calculated. An - empty list means that all orders should be calculated. - lumi_mask : sequence(bool) - Mask for selecting specific luminosity channels. The value `True` means the - corresponding channel is included. An empty list corresponds to all channels being - enabled. - - Returns - ------- - list(float) : - cross sections for all bins, for each scale-variation tuple (first all bins, then - the scale variation) - """ - return self.raw.convolute_with_one( - pdg_id, - xfx, - np.array(bin_indices), - np.array(lumi_mask), - ) - class FkAssumptions(PyWrapper): """Python wrapper object to interface diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index b1565afe4..f561b5353 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -223,6 +223,7 @@ impl PyFkTable { /// ------- /// numpy.ndarray(float) : /// cross sections for all bins + #[pyo3(signature = (pdg_id, xfx, bin_indices = None, lumi_mask= None))] pub fn convolute_with_one<'py>( &self, pdg_id: i32, From bd478063098b744302c6f6481a70cbbc9671ae2c Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 23 May 2024 11:19:29 +0200 Subject: [PATCH 091/179] Remove deprecated evolution methods --- CHANGELOG.md | 2 + Cargo.lock | 39 +- pineappl/Cargo.toml | 1 - pineappl/src/grid.rs | 637 +-------------------------------- pineappl_py/src/grid.rs | 119 ------ pineappl_py/tests/test_grid.py | 43 --- 6 files changed, 4 insertions(+), 837 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6115b8c73..38797b98b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed - removed support for Python 3.6 +- removed deprecated evolution methods `Grid::axes`, `Grid::convolute_eko` and + the structs `EkoInfo` and `GridAxes` ## [0.7.4] - 23/05/2024 diff --git a/Cargo.lock b/Cargo.lock index 3846baadd..47e8aa2aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -280,18 +280,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" -[[package]] -name = "console" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" -dependencies = [ - "encode_unicode 0.3.6", - "lazy_static", - "libc", - "windows-sys 0.52.0", -] - [[package]] name = "cpufeatures" version = "0.2.12" @@ -462,12 +450,6 @@ dependencies = [ "serde", ] -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - [[package]] name = "encode_unicode" version = "1.0.0" @@ -835,18 +817,6 @@ dependencies = [ "hashbrown", ] -[[package]] -name = "indicatif" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d207dc617c7a380ab07ff572a6e52fa202a2a8f355860ac9c38e23f8196be1b" -dependencies = [ - "console", - "lazy_static", - "number_prefix", - "regex", -] - [[package]] name = "indoc" version = "2.0.4" @@ -1111,12 +1081,6 @@ dependencies = [ "libc", ] -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - [[package]] name = "numpy" version = "0.20.0" @@ -1270,7 +1234,6 @@ dependencies = [ "enum_dispatch", "float-cmp", "git-version", - "indicatif", "itertools", "lz4_flex", "managed-lhapdf", @@ -1397,7 +1360,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eea25e07510aa6ab6547308ebe3c036016d162b8da920dbb079e3ba8acf3d95a" dependencies = [ - "encode_unicode 1.0.0", + "encode_unicode", "is-terminal", "lazy_static", "term", diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index f9e062061..b2e9eee25 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -23,7 +23,6 @@ bitflags = "2.4.2" enum_dispatch = "0.3.7" float-cmp = "0.9.0" git-version = "0.3.5" -indicatif = "0.16.2" itertools = "0.10.1" lz4_flex = "0.9.2" ndarray = { features = ["serde"], version = "0.15.4" } diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 12d618e9f..130cf322a 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -7,18 +7,14 @@ use super::fk_table::FkTable; use super::import_only_subgrid::ImportOnlySubgridV2; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; use super::lumi::{LumiCache, LumiEntry}; -use super::lumi_entry; use super::ntuple_subgrid::NtupleSubgridV1; use super::pids::{self, PidBasis}; -use super::sparse_array3::SparseArray3; use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; use bitflags::bitflags; use float_cmp::approx_eq; use git_version::git_version; -use indicatif::{ProgressBar, ProgressStyle}; -use itertools::Itertools; use lz4_flex::frame::{FrameDecoder, FrameEncoder}; -use ndarray::{s, Array3, Array5, ArrayView5, Axis, CowArray, Dimension, Ix4}; +use ndarray::{s, Array3, ArrayView5, Axis, CowArray, Dimension, Ix4}; use serde::{Deserialize, Serialize, Serializer}; use std::borrow::Cow; use std::cmp::Ordering; @@ -27,7 +23,6 @@ use std::io::{self, BufRead, BufReader, BufWriter, Read, Write}; use std::iter; use std::mem; use std::ops::Range; -use std::slice; use std::str::FromStr; use thiserror::Error; @@ -438,44 +433,6 @@ impl MoreMembers { } } -/// Information required to calculate the evolution kernel operators (EKO) to perform a conversion -/// of a [`Grid`] using [`Grid::convolute_eko`] to an [`FkTable`]. -#[deprecated(since = "0.6.0", note = "use EvolveInfo instead")] -pub struct GridAxes { - /// Interpolation grid in x of the `Grid`. - pub x_grid: Vec, - /// Parton IDs used in the grid. - pub pids: Vec, - /// Interpolation grid for the renormalization scale of the `Grid`. - pub mur2_grid: Vec, - /// Interpolation grid for the factorization scale of the `Grid`. - pub muf2_grid: Vec, -} - -/// Extra information required to perform the conversion of a [`Grid`] to an [`FkTable`] using -/// [`Grid::convolute_eko`]. -#[deprecated(since = "0.6.0", note = "use OperatorInfo instead")] -pub struct EkoInfo { - /// Scale of the `FkTable`. - pub muf2_0: f64, - /// Strong coupling constants for the renormalization scales in the same ordering as given in - /// [`GridAxes`]. - pub alphas: Vec, - /// Renormalization scale variation. - pub xir: f64, - /// Factorization scale variation. - pub xif: f64, - /// Interpolation grid in x of the `FkTable`. - pub target_x_grid: Vec, - /// Parton IDs for the `FkTable`. - pub target_pids: Vec, - /// axes shared with the process grid - #[allow(deprecated)] - pub grid_axes: GridAxes, - /// TODO: replace this member with the actual data - pub lumi_id_types: String, -} - bitflags! { /// Bitflags for optimizing a [`Grid`]. See [`Grid::optimize_using`]. #[derive(Clone, Copy)] @@ -1479,496 +1436,6 @@ impl Grid { .insert(key.to_owned(), value.to_owned()); } - /// Provide information used to compute a suitable EKO for the current grid. - /// More specific, the `x_grid` and `muf2_grid` are extracted and checked. - /// - /// # Panics - /// - /// TODO - #[must_use] - #[deprecated(since = "0.6.0", note = "use evolve_info instead")] - #[allow(deprecated)] - pub fn axes(&self) -> Option { - // are the initial states hadrons? - let has_pdf1 = self.has_pdf1(); - let has_pdf2 = self.has_pdf1(); - - let mut mur2_grid = Vec::new(); - let mut muf2_grid = Vec::new(); - let mut x_grid = Vec::new(); - let pids = Vec::new(); - - // Within each lane, that is for a specific combination of (order, bin) ... - for lane in self.subgrids().lanes(Axis(2)) { - // for all luminosities ... - - // the renormalization and factorization grid must be the same, ... - if !lane - .iter() - .filter(|subgrid| !subgrid.is_empty()) - .map(|subgrid| subgrid.mu2_grid()) - .all_equal() - { - return None; - } - - // the x1 grid must be the same and finally ... - if has_pdf1 - && !lane - .iter() - .filter(|subgrid| !subgrid.is_empty()) - .map(|subgrid| subgrid.x1_grid()) - .all_equal() - { - return None; - } - - // the x2 grid must be the same - if has_pdf2 - && !lane - .iter() - .filter(|subgrid| !subgrid.is_empty()) - .map(|subgrid| subgrid.x2_grid()) - .all_equal() - { - return None; - } - - // not all luminosities are equal (some appear only at higher orders) - for subgrid in lane { - mur2_grid.append(&mut subgrid.mu2_grid().iter().map(|mu2| mu2.ren).collect()); - muf2_grid.append(&mut subgrid.mu2_grid().iter().map(|mu2| mu2.fac).collect()); - if has_pdf1 { - x_grid.extend_from_slice(&subgrid.x1_grid()); - } - if has_pdf2 { - x_grid.extend_from_slice(&subgrid.x2_grid()); - } - } - } - - // make grids unique - x_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); - x_grid.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = 64)); - mur2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); - mur2_grid.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = 64)); - muf2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); - muf2_grid.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = 64)); - - Some(GridAxes { - x_grid, - pids, // TODO: for the time being they are just empty, but we might use them for slicing the eko - mur2_grid, - muf2_grid, - }) - } - - /// Applies an evolution kernel operator (EKO) to the grids to evolve them from different - /// values of the factorization scale to a single one given by the parameter `q2`. - /// Using `xir` and `xif` you can trigger renormalization and factorization scale - /// variations respectively in the grid. - /// - /// # Panics - /// - /// Panics if the parameters do not match with the given grid. - #[must_use] - #[deprecated(since = "0.6.0", note = "use evolve instead")] - #[allow(deprecated)] - pub fn convolute_eko( - &self, - operator: Array5, - eko_info: EkoInfo, - order_mask: &[bool], - ) -> Option { - // Check operator layout - let dim = operator.shape(); - - assert_eq!(dim[0], eko_info.grid_axes.mur2_grid.len()); - assert_eq!(dim[0], eko_info.grid_axes.muf2_grid.len()); - assert_eq!(dim[1], eko_info.target_pids.len()); - assert_eq!(dim[3], eko_info.grid_axes.pids.len()); - - // swap axes around to optimize convolution - let operator = operator.permuted_axes([3, 1, 4, 0, 2]); - let operator = operator.as_standard_layout(); - - // determine what and how many hadrons are in the initial state - let initial_state_1 = self.initial_state_1(); - let initial_state_2 = self.initial_state_2(); - - // are the initial states hadrons? - let has_pdf1 = self.has_pdf1(); - let has_pdf2 = self.has_pdf2(); - - let pids1 = if has_pdf1 { - &eko_info.grid_axes.pids - } else { - slice::from_ref(&initial_state_1) - }; - let pids2 = if has_pdf2 { - &eko_info.grid_axes.pids - } else { - slice::from_ref(&initial_state_2) - }; - // create target luminosities - let tgt_pids1 = if has_pdf1 { - &eko_info.target_pids - } else { - slice::from_ref(&initial_state_1) - }; - let tgt_pids2 = if has_pdf2 { - &eko_info.target_pids - } else { - slice::from_ref(&initial_state_2) - }; - let lumi: Vec<_> = tgt_pids1 - .iter() - .cartesian_product(tgt_pids2.iter()) - .map(|(a, b)| lumi_entry![*a, *b, 1.0]) - .collect(); - - // create target subgrid dimensions - let tgt_q2_grid = vec![Mu2 { - ren: eko_info.muf2_0, - fac: eko_info.muf2_0, - }]; - let tgt_x1_grid = if has_pdf1 { - eko_info.target_x_grid.clone() - } else { - vec![1.0] - }; - let tgt_x2_grid = if has_pdf2 { - eko_info.target_x_grid.clone() - } else { - vec![1.0] - }; - - // create target grid - let mut result = Self { - subgrids: Array3::from_shape_simple_fn((1, self.bin_info().bins(), lumi.len()), || { - EmptySubgridV1.into() - }), - lumi, - bin_limits: self.bin_limits.clone(), - orders: vec![Order { - alphas: 0, - alpha: 0, - logxir: 0, - logxif: 0, - }], - subgrid_params: SubgridParams::default(), - more_members: self.more_members.clone(), - }; - // write additional metadata - result.set_key_value("lumi_id_types", &eko_info.lumi_id_types); - - // collect source grid informations - let grid_axes = self.axes()?; - - // Setup progress bar - let bar = ProgressBar::new( - u64::try_from(self.bin_info().bins() * self.lumi.len() * pids1.len() * pids2.len()) - .unwrap(), - ); - bar.set_style(ProgressStyle::default_bar().template( - "[{elapsed_precise}] {bar:50.cyan/blue} {pos:>7}/{len:7} - ETA: {eta_precise} {msg}", - )); - - // which (tgt_pid, src_pid) tuples are non-zero in general? - let non_zero_pid_indices: Vec<_> = (0..operator.dim().0) - .cartesian_product(0..operator.dim().1) - .filter(|&(tgt_pid_idx, src_pid_idx)| { - operator - .slice(s![tgt_pid_idx, src_pid_idx, .., .., ..]) - .iter() - .any(|&value| value != 0.0) - }) - .collect(); - - // iterate over all bins, which are mapped one-to-one from the target to the source grid - for bin in 0..self.bin_info().bins() { - // iterate over the source grid luminosities - for (src_lumi, src_entries) in self.lumi.iter().enumerate() { - // create a sorted and unique vector with the `q2` for all orders - let mut src_array_q2_grid: Vec<_> = (0..self.orders.len()) - .flat_map(|order| { - self.subgrids[[order, bin, src_lumi]] - .mu2_grid() - .iter() - .map(|mu2| mu2.fac) - .collect::>() - }) - .collect(); - src_array_q2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); - src_array_q2_grid.dedup(); - let src_array_q2_grid = src_array_q2_grid; - - let mut src_array = SparseArray3::::new( - src_array_q2_grid.len(), - if has_pdf1 { grid_axes.x_grid.len() } else { 1 }, - if has_pdf2 { grid_axes.x_grid.len() } else { 1 }, - ); - - // iterate over the source grid orders and add all of them together into - // `src_array`, using the right powers of alphas - for (order, powers) in self.orders.iter().enumerate() { - if !order_mask.is_empty() && !order_mask[order] { - continue; - } - - let logs = if (eko_info.xir, eko_info.xif) == (1.0, 1.0) { - if (powers.logxir > 0) || (powers.logxif > 0) { - continue; - } - - 1.0 - } else { - (eko_info.xir * eko_info.xir) - .ln() - .powi(powers.logxir.try_into().unwrap()) - * (eko_info.xif * eko_info.xif) - .ln() - .powi(powers.logxif.try_into().unwrap()) - }; - - let src_subgrid = &self.subgrids[[order, bin, src_lumi]]; - - // source x1/x2 grid might differ and be differently sorted than the operator - let x1_grid = if has_pdf1 { - src_subgrid - .x1_grid() - .iter() - .map(|x| { - eko_info - .grid_axes - .x_grid - .iter() - .position(|xi| approx_eq!(f64, *xi, *x, ulps = 64)) - .unwrap_or_else(|| unreachable!()) - }) - .collect() - } else { - Vec::new() - }; - let x2_grid = if has_pdf2 { - src_subgrid - .x2_grid() - .iter() - .map(|x| { - eko_info - .grid_axes - .x_grid - .iter() - .position(|xi| approx_eq!(f64, *xi, *x, ulps = 64)) - .unwrap_or_else(|| unreachable!()) - }) - .collect() - } else { - Vec::new() - }; - - for ((iq2, ix1, ix2), value) in src_subgrid.indexed_iter() { - let scale = src_subgrid.mu2_grid()[iq2].fac; - let src_iq2 = src_array_q2_grid - .iter() - .position(|&q2| q2 == scale) - .unwrap(); - let als_iq2 = eko_info - .grid_axes - .mur2_grid - .iter() - .position(|&q2| { - approx_eq!(f64, q2, eko_info.xir * eko_info.xir * scale, ulps = 64) - }) - .unwrap_or_else(|| { - panic!( - "Couldn't find mur2: {:?} with xir: {:?} and mur2_grid: {:?}", - scale, eko_info.xir, eko_info.grid_axes.mur2_grid - ) - }); - - let ix1 = if has_pdf1 { x1_grid[ix1] } else { ix1 }; - let ix2 = if has_pdf2 { x2_grid[ix2] } else { ix2 }; - - src_array[[src_iq2, ix1, ix2]] += eko_info.alphas[als_iq2] - .powi(powers.alphas.try_into().unwrap()) - * logs - * value; - } - } - - // Now we have our final grid - let src_array = src_array; - - if src_array.is_empty() { - bar.inc(u64::try_from(pids1.len() * pids2.len()).unwrap()); - continue; - } - - // Next we need to apply the tensor - let eko_src_q2_indices: Vec<_> = src_array_q2_grid - .iter() - .map(|&src_q2| { - eko_info - .grid_axes - .muf2_grid - .iter() - .position(|&q2| { - approx_eq!(f64, q2, eko_info.xif * eko_info.xif * src_q2, ulps = 64) - }) - .unwrap_or_else(|| { - panic!( - "Couldn't find muf2: {:?} with xif: {:?} and muf2_grid: {:?}", - src_q2, eko_info.xif, eko_info.grid_axes.muf2_grid - ) - }) - }) - .collect(); - // Iterate target lumis - for (tgt_lumi, (tgt_pid1_idx, tgt_pid2_idx)) in (0..pids1.len()) - .cartesian_product(0..pids2.len()) - .enumerate() - { - for (src_pid1, src_pid2, factor) in src_entries.entry() { - // find source lumi position - let src_pid1_idx = if has_pdf1 { - eko_info - .grid_axes - .pids - .iter() - .position(|x| { - // if `pid == 0` the gluon is meant - if *src_pid1 == 0 { - *x == 21 - } else { - x == src_pid1 - } - }) - .unwrap() - } else { - 0 - }; - let src_pid2_idx = if has_pdf2 { - eko_info - .grid_axes - .pids - .iter() - .position(|x| { - // `pid == 0` is the gluon exception, which might be 0 or 21 - if *src_pid2 == 0 { - *x == 21 - } else { - x == src_pid2 - } - }) - .unwrap() - } else { - 0 - }; - - // if `op1` and `op2` below are zero there's no work to do - // TODO: ideally we change the for loops instead of vetoing here - if (has_pdf1 - && !non_zero_pid_indices - .iter() - .any(|&tuple| tuple == (tgt_pid1_idx, src_pid1_idx))) - || (has_pdf2 - && !non_zero_pid_indices - .iter() - .any(|&tuple| tuple == (tgt_pid2_idx, src_pid2_idx))) - { - continue; - } - - // create target subgrid - let mut tgt_array = - SparseArray3::new(1, tgt_x1_grid.len(), tgt_x2_grid.len()); - - // slice the operater (which has already been reshuffled in the beginning) - let op1 = operator.slice(s![tgt_pid1_idx, src_pid1_idx, .., .., ..]); - let op2 = operator.slice(s![tgt_pid2_idx, src_pid2_idx, .., .., ..]); - - // -- this is by far the slowest section, and has to be optimized - - // iterate the target x position - for (tgt_x1_idx, tgt_x2_idx) in - (0..tgt_x1_grid.len()).cartesian_product(0..tgt_x2_grid.len()) - { - for ((src_q2_idx, src_x1_idx, src_x2_idx), value) in - src_array.indexed_iter() - { - // do the linear algebra - let mut value = factor * value; - let eko_src_q2_idx = eko_src_q2_indices[src_q2_idx]; - - if has_pdf1 { - value *= op1[[tgt_x1_idx, eko_src_q2_idx, src_x1_idx]]; - } - - // it's possible that at least one of the operators is zero - so skip, if possible - if value == 0.0 { - continue; - } - - if has_pdf2 { - value *= op2[[tgt_x2_idx, eko_src_q2_idx, src_x2_idx]]; - } - - // it's possible that at least one of the operators is zero - so skip, if possible - if value == 0.0 { - continue; - } - - tgt_array[[0, tgt_x1_idx, tgt_x2_idx]] += value; - } - } - - // -- - - // Now transfer the computed subgrid into the target grid - if !tgt_array.is_empty() { - let mut tgt_subgrid = mem::replace( - &mut result.subgrids[[0, bin, tgt_lumi]], - EmptySubgridV1.into(), - ); - - let mut subgrid = match tgt_subgrid { - SubgridEnum::EmptySubgridV1(_) => ImportOnlySubgridV2::new( - tgt_array, - tgt_q2_grid.clone(), - tgt_x1_grid.clone(), - tgt_x2_grid.clone(), - ) - .into(), - SubgridEnum::ImportOnlySubgridV2(ref mut array) => { - let array = array.array_mut(); - - for ((_, tgt_x1_idx, tgt_x2_idx), value) in - tgt_array.indexed_iter() - { - array[[0, tgt_x1_idx, tgt_x2_idx]] += value; - } - - tgt_subgrid - } - _ => unreachable!(), - }; - - mem::swap(&mut subgrid, &mut result.subgrids[[0, bin, tgt_lumi]]); - } - } - - bar.inc(1); - } - } - } - - bar.finish(); - - result.optimize(); - FkTable::try_from(result).ok() - } - /// Returns information for the generation of evolution operators that are being used in /// [`Grid::evolve`] with the parameter `order_mask`. #[must_use] @@ -2417,7 +1884,6 @@ impl Grid { #[cfg(test)] mod tests { use super::*; - use crate::import_only_subgrid::ImportOnlySubgridV1; use crate::lumi_entry; use float_cmp::assert_approx_eq; use std::fs::File; @@ -2858,107 +2324,6 @@ mod tests { ); } - // TODO: properly test axes returned - - #[allow(deprecated)] - fn simple_grid() -> (Grid, GridAxes) { - let mur2_grid = vec![20.]; - let muf2_grid = vec![20.]; - let x_grid = vec![0.1, 0.5, 1.]; - - let mut subgrid_params = SubgridParams::default(); - subgrid_params.set_x_order(1); - subgrid_params.set_x_bins(1); - subgrid_params.set_q2_order(1); - subgrid_params.set_q2_bins(1); - - let mut array = Array3::zeros((1, 3, 3)); - array[[0, 0, 0]] = 1.; - let sparse_array = SparseArray3::from_ndarray(array.view(), 0, 3); - let subgrid = ImportOnlySubgridV1::new( - sparse_array, - muf2_grid.clone(), - x_grid.clone(), - x_grid.clone(), - ) - .into(); - - let pids = vec![21, 1, 2]; - let mut grid = Grid::new( - vec![lumi_entry![21, 21, 1.0], lumi_entry![1, 2, 1.0]], - vec![Order { - alphas: 0, - alpha: 0, - logxir: 0, - logxif: 0, - }], - vec![0.0, 1.0], - subgrid_params, - ); - - grid.set_subgrid(0, 0, 0, subgrid); - - ( - grid, - GridAxes { - x_grid, - pids, - mur2_grid, - muf2_grid, - }, - ) - } - - #[test] - #[allow(deprecated)] - fn grid_axes() { - let (grid, axes) = simple_grid(); - - let ret_axes = grid.axes().unwrap(); - assert_eq!(ret_axes.x_grid, axes.x_grid); - assert_eq!(ret_axes.mur2_grid, axes.mur2_grid); - assert_eq!(ret_axes.muf2_grid, axes.muf2_grid); - assert_eq!(ret_axes.pids, vec![]); - } - - #[test] - #[allow(deprecated)] - fn grid_convolute_eko() { - let (grid, axes) = simple_grid(); - let target_x_grid = vec![1e-7, 1e-2, 1.]; - let target_pids = vec![21, 1, 2]; - - let lumi_id_types = "pdg_mc_ids".to_owned(); - - let eko_info = EkoInfo { - muf2_0: 1., - alphas: vec![1.], - xir: 1., - xif: 1., - target_x_grid, - target_pids, - grid_axes: GridAxes { - x_grid: axes.x_grid, - pids: axes.pids, - mur2_grid: axes.mur2_grid.clone(), - muf2_grid: axes.muf2_grid, - }, - lumi_id_types, - }; - let operator = ndarray::Array::from_shape_vec( - (1, 3, 3, 3, 3), - (0..4) - .map(|_| (0..3)) - .multi_cartesian_product() - .map(|v| if v[0] == v[2] && v[1] == v[3] { 1. } else { 0. }) - .collect(), - ) - .unwrap(); - let fk = grid.convolute_eko(operator, eko_info, &[]).unwrap(); - - assert_eq!(fk.bins(), 1); - } - #[test] fn evolve_info() { let grid = diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index f227e92e3..0bb5c8422 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,9 +1,6 @@ use pineappl::evolution::{AlphasTable, OperatorInfo}; use pineappl::grid::{Grid, Ntuple, Order}; -#[allow(deprecated)] -use pineappl::grid::{EkoInfo, GridAxes}; - use pineappl::lumi::LumiCache; use super::bin::PyBinRemapper; @@ -299,45 +296,6 @@ impl PyGrid { self.grid.set_remapper(remapper.bin_remapper).unwrap(); } - /// Extract the necessary informations for EKO. - /// - /// **Usage:** `pineko` - /// - /// Returns - /// ------- - /// x_grid: numpy.ndarray(float) - /// interpolation grid - /// pids: numpy.ndarray(int) - /// particle ids - /// mur2_grid : numpy.ndarray(float) - /// factorization scale list - /// muf2_grid : numpy.ndarray(float) - /// factorization scale list - #[deprecated(since = "0.6.0", note = "use evolve_info instead")] - #[allow(deprecated)] - pub fn axes<'py>( - &self, - py: Python<'py>, - ) -> ( - &'py PyArray1, - &'py PyArray1, - &'py PyArray1, - &'py PyArray1, - ) { - let GridAxes { - x_grid, - pids, - mur2_grid, - muf2_grid, - } = self.grid.axes().unwrap(); - ( - x_grid.into_pyarray(py), - pids.into_pyarray(py), - mur2_grid.into_pyarray(py), - muf2_grid.into_pyarray(py), - ) - } - /// Convolute grid with pdf. /// /// **Usage:** `pineko` @@ -464,83 +422,6 @@ impl PyGrid { .into_pyarray(py) } - /// Convolute with with an evolution operator. - /// - /// **Usage:** `pineko` - /// - /// Parameters - /// ---------- - /// muf2_0 : float - /// reference scale - /// alphas : numpy.ndarray(float) - /// list with :math:`\alpha_s(Q2)` for the process scales - /// pids : numpy.ndarray(int) - /// sorting of the particles in the tensor - /// x_grid : numpy.ndarray(float) - /// interpolation grid - /// target_pids : numpy.ndarray(int) - /// sorting of the particles in the tensor for final FkTable - /// target_x_grid : numpy.ndarray(float) - /// final FKTable interpolation grid - /// mur2_grid : numpy.ndarray(float) - /// list of renormalization scales - /// muf2_grid : numpy.ndarray(float) - /// list of factorization scales - /// operator : numpy.ndarray(int, rank=5) - /// evolution tensor - /// orders_mask: numpy.ndarray(bool) - /// boolean mask to activate orders - /// - /// Returns - /// ------- - /// PyFkTable : - /// produced FK table - #[deprecated(since = "0.6.0", note = "use evolve instead")] - #[allow(deprecated)] - pub fn convolute_eko( - &self, - muf2_0: f64, - alphas: PyReadonlyArray1, - pids: PyReadonlyArray1, - x_grid: PyReadonlyArray1, - target_pids: PyReadonlyArray1, - target_x_grid: PyReadonlyArray1, - mur2_grid: PyReadonlyArray1, - muf2_grid: PyReadonlyArray1, - operator: PyReadonlyArray5, - lumi_id_types: String, - order_mask: PyReadonlyArray1, - xi: (f64, f64), - ) -> PyFkTable { - let eko_info = EkoInfo { - muf2_0, - alphas: alphas.to_vec().unwrap(), - xir: xi.0, - xif: xi.1, - target_pids: target_pids.to_vec().unwrap(), - target_x_grid: target_x_grid.to_vec().unwrap(), - grid_axes: GridAxes { - x_grid: x_grid.to_vec().unwrap(), - pids: pids.to_vec().unwrap(), - mur2_grid: mur2_grid.to_vec().unwrap(), - muf2_grid: muf2_grid.to_vec().unwrap(), - }, - lumi_id_types, - }; - - let evolved_grid = self - .grid - .convolute_eko( - operator.as_array().to_owned(), - eko_info, - &order_mask.to_vec().unwrap(), - ) - .expect("Nothing returned from evolution."); - PyFkTable { - fk_table: evolved_grid, - } - } - /// Convolute with grid with an evolution operator. /// /// Parameters diff --git a/pineappl_py/tests/test_grid.py b/pineappl_py/tests/test_grid.py index ab0a6e56b..618faf279 100644 --- a/pineappl_py/tests/test_grid.py +++ b/pineappl_py/tests/test_grid.py @@ -107,34 +107,6 @@ def test_convolute_with_one(self): [2**3 * 5e6 / 9999, 0.0], ) - def test_axes(self): - g = self.fake_grid() - - # add 2 DIS grids - xs = np.linspace(0.5, 1.0, 5) - vs = np.random.rand(len(xs)) - subgrid = pineappl.import_only_subgrid.ImportOnlySubgridV1( - vs[np.newaxis, :, np.newaxis], - [90.0], - xs, - np.array([1.0]), - ) - g.set_subgrid(0, 0, 0, subgrid) - vs2 = np.random.rand(len(xs)) - subgrid = pineappl.import_only_subgrid.ImportOnlySubgridV1( - vs2[np.newaxis, :, np.newaxis], - [100.0], - xs, - np.array([1.0]), - ) - g.set_subgrid(0, 1, 0, subgrid) - # now get the thing - ei = g.axes() - - np.testing.assert_allclose(ei[0], xs) - np.testing.assert_allclose(ei[1], []) - np.testing.assert_allclose(ei[2], [90.0, 100.0]) - def test_io(self, tmp_path): g = self.fake_grid() p = tmp_path / "test.pineappl" @@ -144,21 +116,6 @@ def test_io(self, tmp_path): assert isinstance(gg, pineappl.grid.Grid) _ = pineappl.grid.Grid.read(str(p)) - def test_convolute_eko(self): - g = self.fake_grid() - fake_eko = { - "q2_ref": 1.0, - "targetpids": [1], - "targetgrid": [0.1, 1.0], - "inputpids": [1], - "inputgrid": [0.1, 1.0], - "interpolation_xgrid": [0.1, 1.0], - "Q2grid": {90: {"operators": np.random.rand(1, 2, 1, 2), "alphas": 1.0}}, - } - g.set_key_value("lumi_id_types", "pdg_mc_ids") - # fk = g.convolute_eko(fake_eko) - # assert isinstance(fk.raw, pineappl.pineappl.PyFkTable) - def test_fill(self): g = self.fake_grid() g.fill(0.5, 0.5, 10.0, 0, 0.01, 0, 10.0) From 87467903b91c7ffc9bef4ce1358a926733b36eb5 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 24 May 2024 11:31:00 +0200 Subject: [PATCH 092/179] Move `Order` and `ParseOrderError` to new module `order` --- CHANGELOG.md | 4 + pineappl/src/evolution.rs | 3 +- pineappl/src/fk_table.rs | 3 +- pineappl/src/grid.rs | 450 +-------------------------- pineappl/src/lib.rs | 1 + pineappl/src/order.rs | 458 ++++++++++++++++++++++++++++ pineappl/tests/drell_yan_lo.rs | 3 +- pineappl_capi/src/lib.rs | 3 +- pineappl_cli/src/export.rs | 3 +- pineappl_cli/src/export/applgrid.rs | 3 +- pineappl_cli/src/import/applgrid.rs | 3 +- pineappl_cli/src/import/fastnlo.rs | 3 +- pineappl_cli/src/import/fktable.rs | 3 +- pineappl_cli/src/read.rs | 2 +- pineappl_cli/src/write.rs | 2 +- 15 files changed, 484 insertions(+), 460 deletions(-) create mode 100644 pineappl/src/order.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 38797b98b..3d8a852e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Changed + +- moved `Order` and `ParseOrderError` to their own module `order` + ### Removed - removed support for Python 3.6 diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index a142fc5c1..ed7cad74a 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -1,9 +1,10 @@ //! Supporting classes and functions for [`Grid::evolve`]. -use super::grid::{Grid, GridError, Order}; +use super::grid::{Grid, GridError}; use super::import_only_subgrid::ImportOnlySubgridV2; use super::lumi::LumiEntry; use super::lumi_entry; +use super::order::Order; use super::sparse_array3::SparseArray3; use super::subgrid::{Mu2, Subgrid, SubgridEnum}; use float_cmp::approx_eq; diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 2f4977d6d..23580cc1f 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -1,7 +1,8 @@ //! Provides the [`FkTable`] type. -use super::grid::{Grid, GridError, Order}; +use super::grid::{Grid, GridError}; use super::lumi::LumiCache; +use super::order::Order; use super::subgrid::Subgrid; use float_cmp::approx_eq; use ndarray::Array4; diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 130cf322a..8c13f3ecc 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -8,6 +8,7 @@ use super::import_only_subgrid::ImportOnlySubgridV2; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; use super::lumi::{LumiCache, LumiEntry}; use super::ntuple_subgrid::NtupleSubgridV1; +use super::order::Order; use super::pids::{self, PidBasis}; use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; use bitflags::bitflags; @@ -17,261 +18,13 @@ use lz4_flex::frame::{FrameDecoder, FrameEncoder}; use ndarray::{s, Array3, ArrayView5, Axis, CowArray, Dimension, Ix4}; use serde::{Deserialize, Serialize, Serializer}; use std::borrow::Cow; -use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap}; use std::io::{self, BufRead, BufReader, BufWriter, Read, Write}; use std::iter; use std::mem; use std::ops::Range; -use std::str::FromStr; use thiserror::Error; -/// Error type keeping information if [`Order::from_str`] went wrong. -#[derive(Debug, Error, Eq, PartialEq)] -#[error("{0}")] -pub struct ParseOrderError(String); - -// TODO: when possible change the types from `u32` to `u8` to change `try_into` to `into` - -/// Coupling powers for each grid. -#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -pub struct Order { - /// Exponent of the strong coupling. - pub alphas: u32, - /// Exponent of the electromagnetic coupling. - pub alpha: u32, - /// Exponent of the logarithm of the scale factor of the renomalization scale. - pub logxir: u32, - /// Exponent of the logarithm of the scale factor of the factorization scale. - pub logxif: u32, -} - -impl FromStr for Order { - type Err = ParseOrderError; - - fn from_str(s: &str) -> Result { - let mut result = Self { - alphas: 0, - alpha: 0, - logxir: 0, - logxif: 0, - }; - - for tuple in s - .split(|c: char| c.is_ascii_digit()) - .filter(|s| !s.is_empty()) - .zip( - s.split(|c: char| !c.is_ascii_digit()) - .filter(|s| !s.is_empty()) - .map(str::parse), - ) - { - match tuple { - ("as", Ok(num)) => { - result.alphas = num; - } - ("a", Ok(num)) => { - result.alpha = num; - } - ("lr", Ok(num)) => { - result.logxir = num; - } - ("lf", Ok(num)) => { - result.logxif = num; - } - (label, Err(err)) => { - return Err(ParseOrderError(format!( - "error while parsing exponent of '{label}': {err}" - ))); - } - (label, Ok(_)) => { - return Err(ParseOrderError(format!("unknown coupling: '{label}'"))); - } - } - } - - Ok(result) - } -} - -impl Ord for Order { - fn cmp(&self, other: &Self) -> Ordering { - // sort leading orders before next-to-leading orders, then the lowest power in alpha, the - // rest lexicographically - (self.alphas + self.alpha) - .cmp(&(other.alphas + other.alpha)) - .then((self.alpha, self.logxir, self.logxif).cmp(&( - other.alpha, - other.logxir, - other.logxif, - ))) - } -} - -impl PartialOrd for Order { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Order { - /// Constructor. This function mainly exists to have a way of constructing `Order` that is less - /// verbose. - #[must_use] - pub const fn new(alphas: u32, alpha: u32, logxir: u32, logxif: u32) -> Self { - Self { - alphas, - alpha, - logxir, - logxif, - } - } - - /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolute`], - /// [`Grid::evolve`] or [`Grid::evolve_info`]. The selection of `orders` is controlled using - /// the `max_as` and `max_al` parameters, for instance setting `max_as = 1` and `max_al = 0` - /// selects the LO QCD only, `max_as = 2` and `max_al = 0` the NLO QCD; setting `max_as = 3` - /// and `max_al = 2` would select all NLOs, and the NNLO QCD. - /// - /// # Example - /// - /// In the case of Drell—Yan, there are the following orders: - /// - /// - exactly one leading order (LO), - /// - two next-to-leading orders (NLO), which are - /// - the NLO QCD and - /// - the NLO EW, and - /// - three next-to-next-to-leading orders (NNLO), - /// - the NNLO QCD, - /// - the NNLO EW, and finally - /// - the mixed NNLO QCD—EW. - /// - /// ```rust - /// use pineappl::grid::Order; - /// - /// let orders = [ - /// Order::new(0, 2, 0, 0), // LO : alpha^2 - /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - /// Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 - /// Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - /// Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 - /// ]; - /// - /// // LO EW - /// assert_eq!(Order::create_mask(&orders, 0, 1, false), [true, false, false, false, false, false]); - /// // LO QCD - /// assert_eq!(Order::create_mask(&orders, 1, 0, false), [true, false, false, false, false, false]); - /// // LO - /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, false, false, false, false, false]); - /// // NLO QCD - /// assert_eq!(Order::create_mask(&orders, 2, 0, false), [true, true, false, false, false, false]); - /// // NLO EW - /// assert_eq!(Order::create_mask(&orders, 0, 2, false), [true, false, true, false, false, false]); - /// // NNLO QCD - /// assert_eq!(Order::create_mask(&orders, 3, 0, false), [true, true, false, true, false, false]); - /// // NNLO EW - /// assert_eq!(Order::create_mask(&orders, 0, 3, false), [true, false, true, false, false, true]); - /// ``` - /// - /// Orders containing non-zero powers of logarithms can be selected as well if `logs` is set to - /// `true`: - /// - /// ```rust - /// use pineappl::grid::Order; - /// - /// let orders = [ - /// Order::new(0, 2, 0, 0), // LO : alpha^2 - /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - /// Order::new(1, 2, 1, 0), // NLO QCD : alphas alpha^2 logxif - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - /// Order::new(0, 3, 1, 0), // NLO EW : alpha^3 logxif - /// ]; - /// - /// assert_eq!(Order::create_mask(&orders, 0, 2, true), [true, false, false, true, true]); - /// ``` - /// - /// For the more complicated example of top-pair production one can see the difference between - /// the selection for different LOs: - /// - /// ```rust - /// use pineappl::grid::Order; - /// - /// let orders = [ - /// Order::new(2, 0, 0, 0), // LO QCD : alphas^2 - /// Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha - /// Order::new(0, 2, 0, 0), // LO EW : alpha^2 - /// Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 - /// Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha - /// Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - /// ]; - /// - /// // LO EW - /// assert_eq!(Order::create_mask(&orders, 0, 1, false), [false, false, true, false, false, false, false]); - /// // LO QCD - /// assert_eq!(Order::create_mask(&orders, 1, 0, false), [true, false, false, false, false, false, false]); - /// // LO - /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, true, true, false, false, false, false]); - /// ``` - #[must_use] - pub fn create_mask(orders: &[Self], max_as: u32, max_al: u32, logs: bool) -> Vec { - // smallest sum of alphas and alpha - let lo = orders - .iter() - .map(|Self { alphas, alpha, .. }| alphas + alpha) - .min() - .unwrap_or_default(); - - // all leading orders, without logarithms - let leading_orders: Vec<_> = orders - .iter() - .filter(|Self { alphas, alpha, .. }| alphas + alpha == lo) - .cloned() - .collect(); - - let lo_as = leading_orders - .iter() - .map(|Self { alphas, .. }| *alphas) - .max() - .unwrap_or_default(); - let lo_al = leading_orders - .iter() - .map(|Self { alpha, .. }| *alpha) - .max() - .unwrap_or_default(); - - let max = max_as.max(max_al); - let min = max_as.min(max_al); - - orders - .iter() - .map( - |&Self { - alphas, - alpha, - logxir, - logxif, - }| { - if !logs && (logxir > 0 || logxif > 0) { - return false; - } - - let pto = alphas + alpha - lo; - - alphas + alpha < min + lo - || (alphas + alpha < max + lo - && match max_as.cmp(&max_al) { - Ordering::Greater => lo_as + pto == alphas, - Ordering::Less => lo_al + pto == alpha, - Ordering::Equal => false, - }) - }, - ) - .collect() - } -} - /// This structure represents a position (`x1`, `x2`, `q2`) in a `Subgrid` together with a /// corresponding `weight`. The type `W` can either be a `f64` or `()`, which is used when multiple /// weights should be signaled. @@ -1888,207 +1641,6 @@ mod tests { use float_cmp::assert_approx_eq; use std::fs::File; - #[test] - fn order_from_str() { - assert_eq!("as1".parse(), Ok(Order::new(1, 0, 0, 0))); - assert_eq!("a1".parse(), Ok(Order::new(0, 1, 0, 0))); - assert_eq!("as1lr1".parse(), Ok(Order::new(1, 0, 1, 0))); - assert_eq!("as1lf1".parse(), Ok(Order::new(1, 0, 0, 1))); - assert_eq!( - "ab12".parse::(), - Err(ParseOrderError("unknown coupling: 'ab'".to_owned())) - ); - assert_eq!( - "ab123456789000000".parse::(), - Err(ParseOrderError( - "error while parsing exponent of 'ab': number too large to fit in target type" - .to_owned() - )) - ); - } - - #[test] - fn order_cmp() { - let mut orders = [ - Order::new(1, 2, 1, 0), - Order::new(1, 2, 0, 1), - Order::new(1, 2, 0, 0), - Order::new(0, 3, 1, 0), - Order::new(0, 3, 0, 1), - Order::new(0, 3, 0, 0), - Order::new(0, 2, 0, 0), - ]; - - orders.sort(); - - assert_eq!(orders[0], Order::new(0, 2, 0, 0)); - assert_eq!(orders[1], Order::new(1, 2, 0, 0)); - assert_eq!(orders[2], Order::new(1, 2, 0, 1)); - assert_eq!(orders[3], Order::new(1, 2, 1, 0)); - assert_eq!(orders[4], Order::new(0, 3, 0, 0)); - assert_eq!(orders[5], Order::new(0, 3, 0, 1)); - assert_eq!(orders[6], Order::new(0, 3, 1, 0)); - } - - #[test] - fn order_create_mask() { - // Drell—Yan orders - let orders = [ - Order::new(0, 2, 0, 0), // LO : alpha^2 - Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 - Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 - ]; - - assert_eq!( - Order::create_mask(&orders, 0, 0, false), - [false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 1, false), - [true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 2, false), - [true, false, true, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 3, false), - [true, false, true, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 1, 0, false), - [true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 1, false), - [true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 2, false), - [true, false, true, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 3, false), - [true, false, true, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 2, 0, false), - [true, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 1, false), - [true, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 2, false), - [true, true, true, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 3, false), - [true, true, true, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 3, 0, false), - [true, true, false, true, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 1, false), - [true, true, false, true, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 2, false), - [true, true, true, true, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 3, false), - [true, true, true, true, true, true] - ); - - // Top-pair production orders - let orders = [ - Order::new(2, 0, 0, 0), // LO QCD : alphas^2 - Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha - Order::new(0, 2, 0, 0), // LO EW : alpha^2 - Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 - Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha - Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 - Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - Order::new(4, 0, 0, 0), // NNLO QCD : alphas^4 - Order::new(3, 1, 0, 0), // NNLO QCD—EW : alphas^3 alpha - Order::new(2, 2, 0, 0), // NNLO QCD—EW : alphas^2 alpha^2 - Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 - ]; - - assert_eq!( - Order::create_mask(&orders, 0, 0, false), - [false, false, false, false, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 1, false), - [false, false, true, false, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 2, false), - [false, false, true, false, false, false, true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 3, false), - [false, false, true, false, false, false, true, false, false, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 1, 0, false), - [true, false, false, false, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 1, false), - [true, true, true, false, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 2, false), - [true, true, true, false, false, false, true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 3, false), - [true, true, true, false, false, false, true, false, false, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 2, 0, false), - [true, false, false, true, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 1, false), - [true, true, true, true, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 2, false), - [true, true, true, true, true, true, true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 3, false), - [true, true, true, true, true, true, true, false, false, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 3, 0, false), - [true, false, false, true, false, false, false, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 1, false), - [true, true, true, true, false, false, false, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 2, false), - [true, true, true, true, true, true, true, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 3, false), - [true, true, true, true, true, true, true, true, true, true, true, true] - ); - } - #[test] fn grid_with_subgrid_type() { let subgrid_type = String::from("Idontexist"); diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index cf3ba9c66..ae8dd0a13 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -45,6 +45,7 @@ pub mod import_only_subgrid; pub mod lagrange_subgrid; pub mod lumi; pub mod ntuple_subgrid; +pub mod order; pub mod packed_array; pub mod pids; pub mod sparse_array3; diff --git a/pineappl/src/order.rs b/pineappl/src/order.rs new file mode 100644 index 000000000..ef64dda6e --- /dev/null +++ b/pineappl/src/order.rs @@ -0,0 +1,458 @@ +//! TODO + +use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; +use std::str::FromStr; +use thiserror::Error; + +/// Error type keeping information if [`Order::from_str`] went wrong. +#[derive(Debug, Error, Eq, PartialEq)] +#[error("{0}")] +pub struct ParseOrderError(String); + +// TODO: when possible change the types from `u32` to `u8` to change `try_into` to `into` + +/// Coupling powers for each grid. +#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct Order { + /// Exponent of the strong coupling. + pub alphas: u32, + /// Exponent of the electromagnetic coupling. + pub alpha: u32, + /// Exponent of the logarithm of the scale factor of the renomalization scale. + pub logxir: u32, + /// Exponent of the logarithm of the scale factor of the factorization scale. + pub logxif: u32, +} + +impl FromStr for Order { + type Err = ParseOrderError; + + fn from_str(s: &str) -> Result { + let mut result = Self { + alphas: 0, + alpha: 0, + logxir: 0, + logxif: 0, + }; + + for tuple in s + .split(|c: char| c.is_ascii_digit()) + .filter(|s| !s.is_empty()) + .zip( + s.split(|c: char| !c.is_ascii_digit()) + .filter(|s| !s.is_empty()) + .map(str::parse), + ) + { + match tuple { + ("as", Ok(num)) => { + result.alphas = num; + } + ("a", Ok(num)) => { + result.alpha = num; + } + ("lr", Ok(num)) => { + result.logxir = num; + } + ("lf", Ok(num)) => { + result.logxif = num; + } + (label, Err(err)) => { + return Err(ParseOrderError(format!( + "error while parsing exponent of '{label}': {err}" + ))); + } + (label, Ok(_)) => { + return Err(ParseOrderError(format!("unknown coupling: '{label}'"))); + } + } + } + + Ok(result) + } +} + +impl Ord for Order { + fn cmp(&self, other: &Self) -> Ordering { + // sort leading orders before next-to-leading orders, then the lowest power in alpha, the + // rest lexicographically + (self.alphas + self.alpha) + .cmp(&(other.alphas + other.alpha)) + .then((self.alpha, self.logxir, self.logxif).cmp(&( + other.alpha, + other.logxir, + other.logxif, + ))) + } +} + +impl PartialOrd for Order { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Order { + /// Constructor. This function mainly exists to have a way of constructing `Order` that is less + /// verbose. + #[must_use] + pub const fn new(alphas: u32, alpha: u32, logxir: u32, logxif: u32) -> Self { + Self { + alphas, + alpha, + logxir, + logxif, + } + } + + /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolute`], + /// [`Grid::evolve`] or [`Grid::evolve_info`]. The selection of `orders` is controlled using + /// the `max_as` and `max_al` parameters, for instance setting `max_as = 1` and `max_al = 0` + /// selects the LO QCD only, `max_as = 2` and `max_al = 0` the NLO QCD; setting `max_as = 3` + /// and `max_al = 2` would select all NLOs, and the NNLO QCD. + /// + /// # Example + /// + /// In the case of Drell—Yan, there are the following orders: + /// + /// - exactly one leading order (LO), + /// - two next-to-leading orders (NLO), which are + /// - the NLO QCD and + /// - the NLO EW, and + /// - three next-to-next-to-leading orders (NNLO), + /// - the NNLO QCD, + /// - the NNLO EW, and finally + /// - the mixed NNLO QCD—EW. + /// + /// ```rust + /// use pineappl::grid::Order; + /// + /// let orders = [ + /// Order::new(0, 2, 0, 0), // LO : alpha^2 + /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 + /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + /// Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 + /// Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 + /// Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + /// ]; + /// + /// // LO EW + /// assert_eq!(Order::create_mask(&orders, 0, 1, false), [true, false, false, false, false, false]); + /// // LO QCD + /// assert_eq!(Order::create_mask(&orders, 1, 0, false), [true, false, false, false, false, false]); + /// // LO + /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, false, false, false, false, false]); + /// // NLO QCD + /// assert_eq!(Order::create_mask(&orders, 2, 0, false), [true, true, false, false, false, false]); + /// // NLO EW + /// assert_eq!(Order::create_mask(&orders, 0, 2, false), [true, false, true, false, false, false]); + /// // NNLO QCD + /// assert_eq!(Order::create_mask(&orders, 3, 0, false), [true, true, false, true, false, false]); + /// // NNLO EW + /// assert_eq!(Order::create_mask(&orders, 0, 3, false), [true, false, true, false, false, true]); + /// ``` + /// + /// Orders containing non-zero powers of logarithms can be selected as well if `logs` is set to + /// `true`: + /// + /// ```rust + /// use pineappl::grid::Order; + /// + /// let orders = [ + /// Order::new(0, 2, 0, 0), // LO : alpha^2 + /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 + /// Order::new(1, 2, 1, 0), // NLO QCD : alphas alpha^2 logxif + /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + /// Order::new(0, 3, 1, 0), // NLO EW : alpha^3 logxif + /// ]; + /// + /// assert_eq!(Order::create_mask(&orders, 0, 2, true), [true, false, false, true, true]); + /// ``` + /// + /// For the more complicated example of top-pair production one can see the difference between + /// the selection for different LOs: + /// + /// ```rust + /// use pineappl::grid::Order; + /// + /// let orders = [ + /// Order::new(2, 0, 0, 0), // LO QCD : alphas^2 + /// Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha + /// Order::new(0, 2, 0, 0), // LO EW : alpha^2 + /// Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 + /// Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha + /// Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 + /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + /// ]; + /// + /// // LO EW + /// assert_eq!(Order::create_mask(&orders, 0, 1, false), [false, false, true, false, false, false, false]); + /// // LO QCD + /// assert_eq!(Order::create_mask(&orders, 1, 0, false), [true, false, false, false, false, false, false]); + /// // LO + /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, true, true, false, false, false, false]); + /// ``` + #[must_use] + pub fn create_mask(orders: &[Self], max_as: u32, max_al: u32, logs: bool) -> Vec { + // smallest sum of alphas and alpha + let lo = orders + .iter() + .map(|Self { alphas, alpha, .. }| alphas + alpha) + .min() + .unwrap_or_default(); + + // all leading orders, without logarithms + let leading_orders: Vec<_> = orders + .iter() + .filter(|Self { alphas, alpha, .. }| alphas + alpha == lo) + .cloned() + .collect(); + + let lo_as = leading_orders + .iter() + .map(|Self { alphas, .. }| *alphas) + .max() + .unwrap_or_default(); + let lo_al = leading_orders + .iter() + .map(|Self { alpha, .. }| *alpha) + .max() + .unwrap_or_default(); + + let max = max_as.max(max_al); + let min = max_as.min(max_al); + + orders + .iter() + .map( + |&Self { + alphas, + alpha, + logxir, + logxif, + }| { + if !logs && (logxir > 0 || logxif > 0) { + return false; + } + + let pto = alphas + alpha - lo; + + alphas + alpha < min + lo + || (alphas + alpha < max + lo + && match max_as.cmp(&max_al) { + Ordering::Greater => lo_as + pto == alphas, + Ordering::Less => lo_al + pto == alpha, + Ordering::Equal => false, + }) + }, + ) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::{Order, ParseOrderError}; + + #[test] + fn order_from_str() { + assert_eq!("as1".parse(), Ok(Order::new(1, 0, 0, 0))); + assert_eq!("a1".parse(), Ok(Order::new(0, 1, 0, 0))); + assert_eq!("as1lr1".parse(), Ok(Order::new(1, 0, 1, 0))); + assert_eq!("as1lf1".parse(), Ok(Order::new(1, 0, 0, 1))); + assert_eq!( + "ab12".parse::(), + Err(ParseOrderError("unknown coupling: 'ab'".to_owned())) + ); + assert_eq!( + "ab123456789000000".parse::(), + Err(ParseOrderError( + "error while parsing exponent of 'ab': number too large to fit in target type" + .to_owned() + )) + ); + } + + #[test] + fn order_cmp() { + let mut orders = [ + Order::new(1, 2, 1, 0), + Order::new(1, 2, 0, 1), + Order::new(1, 2, 0, 0), + Order::new(0, 3, 1, 0), + Order::new(0, 3, 0, 1), + Order::new(0, 3, 0, 0), + Order::new(0, 2, 0, 0), + ]; + + orders.sort(); + + assert_eq!(orders[0], Order::new(0, 2, 0, 0)); + assert_eq!(orders[1], Order::new(1, 2, 0, 0)); + assert_eq!(orders[2], Order::new(1, 2, 0, 1)); + assert_eq!(orders[3], Order::new(1, 2, 1, 0)); + assert_eq!(orders[4], Order::new(0, 3, 0, 0)); + assert_eq!(orders[5], Order::new(0, 3, 0, 1)); + assert_eq!(orders[6], Order::new(0, 3, 1, 0)); + } + + #[test] + fn order_create_mask() { + // Drell—Yan orders + let orders = [ + Order::new(0, 2, 0, 0), // LO : alpha^2 + Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 + Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 + Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 + Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + ]; + + assert_eq!( + Order::create_mask(&orders, 0, 0, false), + [false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 1, false), + [true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 2, false), + [true, false, true, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 3, false), + [true, false, true, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 1, 0, false), + [true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 1, false), + [true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 2, false), + [true, false, true, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 3, false), + [true, false, true, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 2, 0, false), + [true, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 1, false), + [true, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 2, false), + [true, true, true, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 3, false), + [true, true, true, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 3, 0, false), + [true, true, false, true, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 1, false), + [true, true, false, true, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 2, false), + [true, true, true, true, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 3, false), + [true, true, true, true, true, true] + ); + + // Top-pair production orders + let orders = [ + Order::new(2, 0, 0, 0), // LO QCD : alphas^2 + Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha + Order::new(0, 2, 0, 0), // LO EW : alpha^2 + Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 + Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha + Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 + Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + Order::new(4, 0, 0, 0), // NNLO QCD : alphas^4 + Order::new(3, 1, 0, 0), // NNLO QCD—EW : alphas^3 alpha + Order::new(2, 2, 0, 0), // NNLO QCD—EW : alphas^2 alpha^2 + Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 + Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + ]; + + assert_eq!( + Order::create_mask(&orders, 0, 0, false), + [false, false, false, false, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 1, false), + [false, false, true, false, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 2, false), + [false, false, true, false, false, false, true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 3, false), + [false, false, true, false, false, false, true, false, false, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 1, 0, false), + [true, false, false, false, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 1, false), + [true, true, true, false, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 2, false), + [true, true, true, false, false, false, true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 3, false), + [true, true, true, false, false, false, true, false, false, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 2, 0, false), + [true, false, false, true, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 1, false), + [true, true, true, true, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 2, false), + [true, true, true, true, true, true, true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 3, false), + [true, true, true, true, true, true, true, false, false, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 3, 0, false), + [true, false, false, true, false, false, false, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 1, false), + [true, true, true, true, false, false, false, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 2, false), + [true, true, true, true, true, true, true, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 3, false), + [true, true, true, true, true, true, true, true, true, true, true, true] + ); + } +} diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index 7e61c8da8..0d2f43764 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -3,9 +3,10 @@ use float_cmp::assert_approx_eq; use lhapdf::Pdf; use num_complex::Complex; use pineappl::bin::BinRemapper; -use pineappl::grid::{Grid, GridOptFlags, Ntuple, Order}; +use pineappl::grid::{Grid, GridOptFlags, Ntuple}; use pineappl::lumi::LumiCache; use pineappl::lumi_entry; +use pineappl::order::Order; use pineappl::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; use rand::Rng; use rand_pcg::Pcg64; diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index b74651fef..7a4812e26 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -57,8 +57,9 @@ use itertools::izip; use pineappl::bin::BinRemapper; -use pineappl::grid::{Grid, GridOptFlags, Ntuple, Order}; +use pineappl::grid::{Grid, GridOptFlags, Ntuple}; use pineappl::lumi::{LumiCache, LumiEntry}; +use pineappl::order::Order; use pineappl::subgrid::{ExtraSubgridParams, SubgridParams}; use std::collections::HashMap; use std::ffi::{CStr, CString}; diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index a8cc3f0c0..e762ec5f3 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -3,7 +3,8 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; -use pineappl::grid::{Grid, Order}; +use pineappl::grid::Grid; +use pineappl::order::Order; use std::path::{Path, PathBuf}; use std::process::ExitCode; diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index ffd2eed62..377200c6c 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -2,7 +2,8 @@ use anyhow::{anyhow, bail, Result}; use cxx::{let_cxx_string, UniquePtr}; use float_cmp::approx_eq; use ndarray::{s, Axis}; -use pineappl::grid::{Grid, Order}; +use pineappl::grid::Grid; +use pineappl::order::Order; use pineappl::subgrid::{Mu2, Subgrid, SubgridParams}; use pineappl_applgrid::ffi::{self, grid}; use std::borrow::Cow; diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 4e35847c4..6eb0e4ef5 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,7 +1,8 @@ use anyhow::Result; -use pineappl::grid::{Grid, Order}; +use pineappl::grid::Grid; use pineappl::import_only_subgrid::ImportOnlySubgridV2; use pineappl::lumi::LumiEntry; +use pineappl::order::Order; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{Mu2, SubgridParams}; use pineappl_applgrid::ffi::{self, grid}; diff --git a/pineappl_cli/src/import/fastnlo.rs b/pineappl_cli/src/import/fastnlo.rs index da4ce7685..9b4d90ff8 100644 --- a/pineappl_cli/src/import/fastnlo.rs +++ b/pineappl_cli/src/import/fastnlo.rs @@ -1,9 +1,10 @@ use anyhow::Result; use itertools::Itertools; use pineappl::bin::BinRemapper; -use pineappl::grid::{Grid, Order}; +use pineappl::grid::Grid; use pineappl::import_only_subgrid::ImportOnlySubgridV2; use pineappl::lumi::LumiEntry; +use pineappl::order::Order; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{Mu2, SubgridParams}; use pineappl_fastnlo::ffi::{ diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index 0ee32bf61..441e07eed 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -1,8 +1,9 @@ use anyhow::{anyhow, Context, Result}; use flate2::read::GzDecoder; -use pineappl::grid::{Grid, Order}; +use pineappl::grid::Grid; use pineappl::import_only_subgrid::ImportOnlySubgridV1; use pineappl::lumi_entry; +use pineappl::order::Order; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::SubgridParams; use std::fs::File; diff --git a/pineappl_cli/src/read.rs b/pineappl_cli/src/read.rs index 66c94465e..17d27dd81 100644 --- a/pineappl_cli/src/read.rs +++ b/pineappl_cli/src/read.rs @@ -4,7 +4,7 @@ use anyhow::Result; use clap::{Args, Parser, ValueHint}; use itertools::Itertools; use pineappl::fk_table::FkTable; -use pineappl::grid::Order; +use pineappl::order::Order; use prettytable::{cell, row, Row}; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 8fbf3f41d..b9f1cff00 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -8,8 +8,8 @@ use clap::{ }; use pineappl::bin::BinRemapper; use pineappl::fk_table::{FkAssumptions, FkTable}; -use pineappl::grid::Order; use pineappl::lumi::LumiEntry; +use pineappl::order::Order; use pineappl::pids; use pineappl::pids::PidBasis; use std::fs; From 60c05e79ed77eaeac5c27918b79a0275ead22b1b Mon Sep 17 00:00:00 2001 From: t7phy Date: Fri, 24 May 2024 13:00:08 +0200 Subject: [PATCH 093/179] added Convolution enum and a method to output a vector of the enums --- pineappl/src/grid.rs | 94 ++++++++++++++++++++++++++++++-------------- 1 file changed, 64 insertions(+), 30 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 4876d91c0..cfcd17fce 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -512,6 +512,15 @@ pub struct Grid { more_members: MoreMembers, } +#[derive(Eq, PartialEq)] +enum Convolution { + unpolPDF(i32), + polPDF(i32), + unpolFF(i32), + polFF(i32), + // we can add more types as they are needed +} + impl Grid { /// Constructor. #[must_use] @@ -1047,6 +1056,57 @@ impl Grid { Ok(()) } + /// Returns a vector containing the type of convolution + /// object (PDF or FF, Unpol or Pol) and the PDG ID of the hadron. + /// + /// Todo: will need to be modified to accomoodate upcoming v1. + pub fn convolutions(&self) -> Vec { + let mut convolutions = Vec::new(); + let map = self.key_values(); + if let Some(map) = map { + match ( + map.get("convolution_particle_1").and_then(|s| s.parse::().ok()), + map.get("convolution_particle_2").and_then(|s| s.parse::().ok()), + map.get("convolution_type_1"), + map.get("convolution_type_2"), + ) { + ( + Some(convolution_particle_1), + Some(convolution_particle_2), + Some(convolution_type_1), + Some(convolution_type_2), + ) => { + match convolution_type_1.as_str() { + "unpolPDF" => convolutions.push(Convolution::unpolPDF(convolution_particle_1)), + "polPDF" => convolutions.push(Convolution::polPDF(convolution_particle_1)), + "unpolFF" => convolutions.push(Convolution::unpolFF(convolution_particle_1)), + "polFF" => convolutions.push(Convolution::polFF(convolution_particle_1)), + _ => (), + } + match convolution_type_2.as_str() { + "unpolPDF" => convolutions.push(Convolution::unpolPDF(convolution_particle_2)), + "polPDF" => convolutions.push(Convolution::polPDF(convolution_particle_2)), + "unpolFF" => convolutions.push(Convolution::unpolFF(convolution_particle_2)), + "polFF" => convolutions.push(Convolution::polFF(convolution_particle_2)), + _ => (), + } + } + (None, None, None, None) => { + // assumes old format + map.get("initial_state_1").and_then(|s| s.parse::().ok()), + map.get("initial_state_2").and_then(|s| s.parse::().ok()), + convolutions.push(Convolution::unpolPDF(initial_state_1)); + convolutions.push(Convolution::unpolPDF(initial_state_2)); + } + _ => { + // TODO: if only some of the metadata is set, we should consider this an error + todo!(); + } + } + } + convolutions + } + fn increase_shape(&mut self, new_dim: &(usize, usize, usize)) { let old_dim = self.subgrids.raw_dim().into_pattern(); let mut new_subgrids = Array3::from_shape_simple_fn( @@ -1389,37 +1449,11 @@ impl Grid { } fn symmetrize_channels(&mut self) { - let map = self.key_values(); - if let Some(map) = map { - match ( - map.get("convolution_particle_1"), - map.get("convolution_particle_2"), - map.get("convolution_type_1"), - map.get("convolution_type_2"), - ) { - ( - Some(convolution_particle_1), - Some(convolution_particle_2), - Some(convolution_type_1), - Some(convolution_type_2), - ) => { - if convolution_particle_1 != convolution_particle_2 - || convolution_type_1 != convolution_type_2 - { - return; - } - } - (None, None, None, None) => {} - _ => { - // TODO: if only some of the metadata is set, we should consider this an error - todo!(); - } - } - if map["initial_state_1"] != map["initial_state_2"] { - return; - } + let convolutions = self.convolutions(); + if convolutions.get(0) != convolutions.get(1) { + return; } - + let mut indices: Vec = (0..self.lumi.len()).rev().collect(); while let Some(index) = indices.pop() { From 5572c36ca29c666752f33188b61adc460f6414fd Mon Sep 17 00:00:00 2001 From: t7phy Date: Fri, 24 May 2024 13:13:09 +0200 Subject: [PATCH 094/179] fix some mistakes of last commit --- pineappl/src/grid.rs | 55 ++++++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index cfcd17fce..433057939 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -513,11 +513,11 @@ pub struct Grid { } #[derive(Eq, PartialEq)] -enum Convolution { - unpolPDF(i32), - polPDF(i32), - unpolFF(i32), - polFF(i32), +pub enum Convolution { + UnpolPDF(i32), + PolPDF(i32), + UnpolFF(i32), + PolFF(i32), // we can add more types as they are needed } @@ -1065,8 +1065,10 @@ impl Grid { let map = self.key_values(); if let Some(map) = map { match ( - map.get("convolution_particle_1").and_then(|s| s.parse::().ok()), - map.get("convolution_particle_2").and_then(|s| s.parse::().ok()), + map.get("convolution_particle_1") + .and_then(|s| s.parse::().ok()), + map.get("convolution_particle_2") + .and_then(|s| s.parse::().ok()), map.get("convolution_type_1"), map.get("convolution_type_2"), ) { @@ -1077,26 +1079,39 @@ impl Grid { Some(convolution_type_2), ) => { match convolution_type_1.as_str() { - "unpolPDF" => convolutions.push(Convolution::unpolPDF(convolution_particle_1)), - "polPDF" => convolutions.push(Convolution::polPDF(convolution_particle_1)), - "unpolFF" => convolutions.push(Convolution::unpolFF(convolution_particle_1)), - "polFF" => convolutions.push(Convolution::polFF(convolution_particle_1)), + "UnpolPDF" => { + convolutions.push(Convolution::UnpolPDF(convolution_particle_1)) + } + "PolPDF" => convolutions.push(Convolution::PolPDF(convolution_particle_1)), + "UnpolFF" => { + convolutions.push(Convolution::UnpolFF(convolution_particle_1)) + } + "PolFF" => convolutions.push(Convolution::PolFF(convolution_particle_1)), _ => (), } match convolution_type_2.as_str() { - "unpolPDF" => convolutions.push(Convolution::unpolPDF(convolution_particle_2)), - "polPDF" => convolutions.push(Convolution::polPDF(convolution_particle_2)), - "unpolFF" => convolutions.push(Convolution::unpolFF(convolution_particle_2)), - "polFF" => convolutions.push(Convolution::polFF(convolution_particle_2)), + "UnpolPDF" => { + convolutions.push(Convolution::UnpolPDF(convolution_particle_2)) + } + "PolPDF" => convolutions.push(Convolution::PolPDF(convolution_particle_2)), + "UnpolFF" => { + convolutions.push(Convolution::UnpolFF(convolution_particle_2)) + } + "PolFF" => convolutions.push(Convolution::PolFF(convolution_particle_2)), _ => (), } } (None, None, None, None) => { // assumes old format - map.get("initial_state_1").and_then(|s| s.parse::().ok()), - map.get("initial_state_2").and_then(|s| s.parse::().ok()), - convolutions.push(Convolution::unpolPDF(initial_state_1)); - convolutions.push(Convolution::unpolPDF(initial_state_2)); + if let (Some(initial_state_1), Some(initial_state_2)) = ( + map.get("initial_state_1") + .and_then(|s| s.parse::().ok()), + map.get("initial_state_2") + .and_then(|s| s.parse::().ok()), + ) { + convolutions.push(Convolution::UnpolPDF(initial_state_1)); + convolutions.push(Convolution::UnpolPDF(initial_state_2)); + } } _ => { // TODO: if only some of the metadata is set, we should consider this an error @@ -1453,7 +1468,7 @@ impl Grid { if convolutions.get(0) != convolutions.get(1) { return; } - + let mut indices: Vec = (0..self.lumi.len()).rev().collect(); while let Some(index) = indices.pop() { From 4921c782808cf473ce9473d533a368cb92ca4d69 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 24 May 2024 15:11:42 +0200 Subject: [PATCH 095/179] Add missing documentation for for struct `Convolution` --- pineappl/src/grid.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 433057939..9c3f21340 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -512,13 +512,22 @@ pub struct Grid { more_members: MoreMembers, } +/// Data type that indentifies different types of convolutions. #[derive(Eq, PartialEq)] pub enum Convolution { + /// No convolution. + None, + /// Unpolarized parton distribution function. The integer denotes the type of hadron with a PDG + /// MC ID. UnpolPDF(i32), + /// Polarized parton distribution function. The integer denotes the type of hadron with a PDG + /// MC ID. PolPDF(i32), + /// Unpolarized fragmentation function. The integer denotes the type of hadron with a PDG MC + /// ID. UnpolFF(i32), + /// Polarized fragmentation function. The integer denotes the type of hadron with a PDG MC ID. PolFF(i32), - // we can add more types as they are needed } impl Grid { From cc7fd091b01e355b606cf0d16233ac9a45a3333f Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 24 May 2024 15:18:22 +0200 Subject: [PATCH 096/179] Shorten implementation of `Grid::convolutions` and add error handling --- pineappl/src/grid.rs | 110 ++++++++++++++++++------------------------- 1 file changed, 47 insertions(+), 63 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 9c3f21340..054e033c8 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1065,70 +1065,54 @@ impl Grid { Ok(()) } - /// Returns a vector containing the type of convolution - /// object (PDF or FF, Unpol or Pol) and the PDG ID of the hadron. - /// - /// Todo: will need to be modified to accomoodate upcoming v1. + /// Return a vector containing the type of convolutions performed with this grid. + #[must_use] pub fn convolutions(&self) -> Vec { - let mut convolutions = Vec::new(); - let map = self.key_values(); - if let Some(map) = map { - match ( - map.get("convolution_particle_1") - .and_then(|s| s.parse::().ok()), - map.get("convolution_particle_2") - .and_then(|s| s.parse::().ok()), - map.get("convolution_type_1"), - map.get("convolution_type_2"), - ) { - ( - Some(convolution_particle_1), - Some(convolution_particle_2), - Some(convolution_type_1), - Some(convolution_type_2), - ) => { - match convolution_type_1.as_str() { - "UnpolPDF" => { - convolutions.push(Convolution::UnpolPDF(convolution_particle_1)) - } - "PolPDF" => convolutions.push(Convolution::PolPDF(convolution_particle_1)), - "UnpolFF" => { - convolutions.push(Convolution::UnpolFF(convolution_particle_1)) - } - "PolFF" => convolutions.push(Convolution::PolFF(convolution_particle_1)), - _ => (), - } - match convolution_type_2.as_str() { - "UnpolPDF" => { - convolutions.push(Convolution::UnpolPDF(convolution_particle_2)) - } - "PolPDF" => convolutions.push(Convolution::PolPDF(convolution_particle_2)), - "UnpolFF" => { - convolutions.push(Convolution::UnpolFF(convolution_particle_2)) + self.key_values().map_or_else( + // if there isn't any metadata, we assume that proton-PDFs are used + || vec![Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)], + |kv| { + // the current file format only supports exactly two convolutions + (1..=2) + .map(|index| { + match ( + kv.get(&format!("convolution_particle_{index}")) + .map(|s| s.parse::()), + kv.get(&format!("convolution_type_{index}")) + .map(String::as_str), + ) { + (_, Some("None")) => Convolution::None, + (Some(Ok(pid)), Some("UnpolPDF")) => Convolution::UnpolPDF(pid), + (Some(Ok(pid)), Some("PolPDF")) => Convolution::PolPDF(pid), + (Some(Ok(pid)), Some("UnpolFF")) => Convolution::UnpolFF(pid), + (Some(Ok(pid)), Some("PolFF")) => Convolution::PolFF(pid), + (None, None) => { + match kv + .get(&format!("initial_state_{index}")) + .map(|s| s.parse::()) + { + Some(Ok(pid)) => Convolution::UnpolPDF(pid), + None => Convolution::UnpolPDF(2212), + Some(Err(_err)) => todo!(), + } + } + (None, Some(_)) => { + panic!("metadata 'convolution_type_{index}' is missing") + } + (Some(_), None) => { + panic!("metadata 'convolution_particle_{index}' is missing") + } + (Some(Ok(_)), Some(type_)) => { + panic!("metadata 'convolution_type_{index} = {type_}' is unknown") + } + (Some(Err(err)), Some(_)) => panic!( + "metadata 'convolution_particle_{index}' could not be parsed: {err}" + ), } - "PolFF" => convolutions.push(Convolution::PolFF(convolution_particle_2)), - _ => (), - } - } - (None, None, None, None) => { - // assumes old format - if let (Some(initial_state_1), Some(initial_state_2)) = ( - map.get("initial_state_1") - .and_then(|s| s.parse::().ok()), - map.get("initial_state_2") - .and_then(|s| s.parse::().ok()), - ) { - convolutions.push(Convolution::UnpolPDF(initial_state_1)); - convolutions.push(Convolution::UnpolPDF(initial_state_2)); - } - } - _ => { - // TODO: if only some of the metadata is set, we should consider this an error - todo!(); - } - } - } - convolutions + }) + .collect() + }, + ) } fn increase_shape(&mut self, new_dim: &(usize, usize, usize)) { @@ -1474,7 +1458,7 @@ impl Grid { fn symmetrize_channels(&mut self) { let convolutions = self.convolutions(); - if convolutions.get(0) != convolutions.get(1) { + if convolutions[0] != convolutions[1] { return; } From 71648352f2b682d430d15da84b8bcb1b6703d9ad Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 24 May 2024 15:27:34 +0200 Subject: [PATCH 097/179] Add missing panic documentation for `Grid::convolutions` --- pineappl/src/grid.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 054e033c8..03fbcc4ce 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1066,6 +1066,11 @@ impl Grid { } /// Return a vector containing the type of convolutions performed with this grid. + /// + /// # Panics + /// + /// Panics if the metadata key--value pairs `convolution_particle_1` and `convolution_type_1`, + /// or `convolution_particle_2` and `convolution_type_2` are not correctly set. #[must_use] pub fn convolutions(&self) -> Vec { self.key_values().map_or_else( From 2e5aae11c750d457493a3c9c0bbe652f77564722 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 24 May 2024 15:52:18 +0200 Subject: [PATCH 098/179] Fix Python interface --- pineappl_py/src/grid.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 0bb5c8422..e8fefdc4b 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,5 +1,6 @@ use pineappl::evolution::{AlphasTable, OperatorInfo}; -use pineappl::grid::{Grid, Ntuple, Order}; +use pineappl::grid::{Grid, Ntuple}; +use pineappl::order::Order; use pineappl::lumi::LumiCache; From 86c25997f720802179d621c73a011e457d8edd78 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 24 May 2024 15:52:39 +0200 Subject: [PATCH 099/179] Update `CHANGELOG.md` and `CONTRIBUTING.md` --- CHANGELOG.md | 6 +++--- CONTRIBUTING.md | 11 +++++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d8a852e0..dc7569d4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,8 +14,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed - removed support for Python 3.6 -- removed deprecated evolution methods `Grid::axes`, `Grid::convolute_eko` and - the structs `EkoInfo` and `GridAxes` +- removed the deprecated evolution methods `Grid::axes`, `Grid::convolute_eko` + and the structs `EkoInfo` and `GridAxes` ## [0.7.4] - 23/05/2024 @@ -25,7 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 which define a new interface supporting very large evolution kernels that have been introduced in EKO v0.13. This interface will replace `Grid::evolve` - added `--dont-sort` switch to `pineappl channels`, which displays the channel - sizes orderd by channel index (instead of channel size) + sizes ordered by channel index (instead of channel size) - added `Grid::rotate_pid_basis` and `pineappl write --rotate-pid-basis`. This allows to change the meaning of the used particle IDs, and supported formats are PDG MC IDs and the evolution basis diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1504387ce..02d70f712 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,7 +3,9 @@ ## Rust - Before you commit, make sure that your code compiles with `cargo check` and - that it has been formatted properly; `cargo fmt` does that for you. + that it has been formatted properly; `cargo fmt` does that for you. Also + check if your changes introduce any new linter warnings by running `cargo + clippy` - Make sure to keep `CHANGELOG.md` up-to-date. - Make sure not to use Rust features newer than the specified minimum supported Rust Version (MSRV), which is documented in the [README](README.md). You can @@ -48,7 +50,12 @@ increasing the MSRV make sure to set it everywhere to the same value: - When you commit, make sure the commit message is written properly. This blogpost explains it nicely: . -- Whenever possible, prefer rebase over merge. +- Whenever you have unpushed local commits that are behind `origin/master`, use + `git pull --rebase` to rebase them +- When editing Github workflow files, use a separate branch, because usually + many commits are needed to get something working. When merging this branch + into `master` (or any other branch), squash-merge the commits; the exact + history in this case is not important ## Making a new release From 0085f70460095d07570e236577b6171dfe0fe027 Mon Sep 17 00:00:00 2001 From: t7phy Date: Fri, 24 May 2024 18:14:08 +0200 Subject: [PATCH 100/179] add None in conv vector (for dis grids) --- pineappl/src/grid.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 03fbcc4ce..281069b15 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1096,7 +1096,20 @@ impl Grid { .get(&format!("initial_state_{index}")) .map(|s| s.parse::()) { - Some(Ok(pid)) => Convolution::UnpolPDF(pid), + Some(Ok(pid)) => { + let condition = !self.lumi().iter().all(|entry| { + entry + .entry() + .iter() + .all(|&channels| channels[index - 1] == pid) + }); + + if condition { + Convolution::UnpolPDF(pid) + } else { + Convolution::None(pid) + } + } None => Convolution::UnpolPDF(2212), Some(Err(_err)) => todo!(), } From f665a0cafc1c8f03bfc503e8fa906f0f1b499313 Mon Sep 17 00:00:00 2001 From: t7phy Date: Fri, 24 May 2024 18:22:54 +0200 Subject: [PATCH 101/179] fix errors --- pineappl/src/grid.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 281069b15..5dc28b7ac 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1098,16 +1098,17 @@ impl Grid { { Some(Ok(pid)) => { let condition = !self.lumi().iter().all(|entry| { - entry - .entry() - .iter() - .all(|&channels| channels[index - 1] == pid) + entry.entry().iter().all(|&channels| match index { + 1 => channels.0 == pid, + 2 => channels.1 == pid, + _ => false, + }) }); if condition { Convolution::UnpolPDF(pid) } else { - Convolution::None(pid) + Convolution::None } } None => Convolution::UnpolPDF(2212), From 94d6d03fa8c9204a9f0f392f55ef090a2fbdc310 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 26 May 2024 08:11:22 +0200 Subject: [PATCH 102/179] Fix doctests --- pineappl/src/order.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pineappl/src/order.rs b/pineappl/src/order.rs index ef64dda6e..15e93117b 100644 --- a/pineappl/src/order.rs +++ b/pineappl/src/order.rs @@ -126,7 +126,7 @@ impl Order { /// - the mixed NNLO QCD—EW. /// /// ```rust - /// use pineappl::grid::Order; + /// use pineappl::order::Order; /// /// let orders = [ /// Order::new(0, 2, 0, 0), // LO : alpha^2 @@ -157,7 +157,7 @@ impl Order { /// `true`: /// /// ```rust - /// use pineappl::grid::Order; + /// use pineappl::order::Order; /// /// let orders = [ /// Order::new(0, 2, 0, 0), // LO : alpha^2 @@ -174,7 +174,7 @@ impl Order { /// the selection for different LOs: /// /// ```rust - /// use pineappl::grid::Order; + /// use pineappl::order::Order; /// /// let orders = [ /// Order::new(2, 0, 0, 0), // LO QCD : alphas^2 From df53796bb8cf2233b6529f5e90717bdfcda84942 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 26 May 2024 08:34:28 +0200 Subject: [PATCH 103/179] Make unexpected branches panic --- pineappl/src/grid.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 5dc28b7ac..27f2f461a 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1101,7 +1101,7 @@ impl Grid { entry.entry().iter().all(|&channels| match index { 1 => channels.0 == pid, 2 => channels.1 == pid, - _ => false, + _ => unreachable!(), }) }); @@ -1112,7 +1112,7 @@ impl Grid { } } None => Convolution::UnpolPDF(2212), - Some(Err(_err)) => todo!(), + Some(Err(err)) => panic!("metadata 'initial_state_{index}' could not be parsed: {err}"), } } (None, Some(_)) => { From de6d9a3e0a10f23bce031c37917222420ebd581a Mon Sep 17 00:00:00 2001 From: t7phy Date: Sun, 26 May 2024 09:58:03 +0200 Subject: [PATCH 104/179] remove usage of Grid::has_pdf1/2 --- pineappl/src/grid.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 0f742d9ea..c018168a5 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1281,8 +1281,14 @@ impl Grid { pub fn evolve_info(&self, order_mask: &[bool]) -> EvolveInfo { use super::evolution::EVOLVE_INFO_TOL_ULPS; - let has_pdf1 = self.has_pdf1(); - let has_pdf2 = self.has_pdf2(); + let has_pdf1 = self + .convolutions() + .get(0) + .map_or(false, |conv| !matches!(conv, Convolution::None)); + let has_pdf2 = self + .convolutions() + .get(1) + .map_or(false, |conv| !matches!(conv, Convolution::None)); let mut ren1 = Vec::new(); let mut fac1 = Vec::new(); @@ -1425,7 +1431,15 @@ impl Grid { let view = operator.view(); - let (subgrids, lumi) = if self.has_pdf1() && self.has_pdf2() { + let (subgrids, lumi) = if self + .convolutions() + .get(0) + .map_or(false, |conv| !matches!(conv, Convolution::None)) + && self + .convolutions() + .get(1) + .map_or(false, |conv| !matches!(conv, Convolution::None)) + { evolution::evolve_slice_with_two(self, &view, &info, order_mask, xi, alphas_table) } else { evolution::evolve_slice_with_one(self, &view, &info, order_mask, xi, alphas_table) From 3a9a787d71f08b255682bb507c8e98d533472e57 Mon Sep 17 00:00:00 2001 From: t7phy Date: Sun, 26 May 2024 10:26:05 +0200 Subject: [PATCH 105/179] make has_pdf check simpler and remove has_pdf and Intial_state methods --- pineappl/src/grid.rs | 76 +++----------------------------------------- 1 file changed, 4 insertions(+), 72 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index c018168a5..4139ecb06 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1281,14 +1281,8 @@ impl Grid { pub fn evolve_info(&self, order_mask: &[bool]) -> EvolveInfo { use super::evolution::EVOLVE_INFO_TOL_ULPS; - let has_pdf1 = self - .convolutions() - .get(0) - .map_or(false, |conv| !matches!(conv, Convolution::None)); - let has_pdf2 = self - .convolutions() - .get(1) - .map_or(false, |conv| !matches!(conv, Convolution::None)); + let has_pdf1 = self.convolutions()[0] != Convolution::None; + let has_pdf2 = self.convolutions()[1] != Convolution::None; let mut ren1 = Vec::new(); let mut fac1 = Vec::new(); @@ -1431,14 +1425,8 @@ impl Grid { let view = operator.view(); - let (subgrids, lumi) = if self - .convolutions() - .get(0) - .map_or(false, |conv| !matches!(conv, Convolution::None)) - && self - .convolutions() - .get(1) - .map_or(false, |conv| !matches!(conv, Convolution::None)) + let (subgrids, lumi) = if self.convolutions()[0] != Convolution::None + && self.convolutions()[1] != Convolution::None { evolution::evolve_slice_with_two(self, &view, &info, order_mask, xi, alphas_table) } else { @@ -1676,62 +1664,6 @@ impl Grid { }) .collect(); } - - /// Returns `true` if the first initial state needs a convolution, `false` otherwise. - #[must_use] - pub fn has_pdf1(&self) -> bool { - let initial_state_1 = self.initial_state_1(); - - !self - .lumi() - .iter() - .all(|entry| entry.entry().iter().all(|&(a, _, _)| a == initial_state_1)) - } - - /// Returns `true` if the second initial state needs a convolution, `false` otherwise. - #[must_use] - pub fn has_pdf2(&self) -> bool { - let initial_state_2 = self.initial_state_2(); - - !self - .lumi() - .iter() - .all(|entry| entry.entry().iter().all(|&(_, b, _)| b == initial_state_2)) - } - - /// Returns the particle identifier of the first initial state. This is usually but not always - /// a proton, which is represented by the PDG ID `2212`. - /// - /// # Panics - /// - /// TODO - #[must_use] - pub fn initial_state_1(&self) -> i32 { - self.key_values() - .map_or(Some("2212"), |kv| { - kv.get("initial_state_1").map(String::as_str) - }) - .map(str::parse) - .unwrap() - .unwrap() - } - - /// Returns the particle identifier of the second initial state. This is usually but not always - /// a proton, which is represented by the PDG ID `2212`. - /// - /// # Panics - /// - /// TODO - #[must_use] - pub fn initial_state_2(&self) -> i32 { - self.key_values() - .map_or(Some("2212"), |kv| { - kv.get("initial_state_2").map(String::as_str) - }) - .map(str::parse) - .unwrap() - .unwrap() - } } #[cfg(test)] From e232e67e061b1f30facd3ab07e907bb1c0f77634 Mon Sep 17 00:00:00 2001 From: t7phy Date: Sun, 26 May 2024 10:38:14 +0200 Subject: [PATCH 106/179] remove has_pdf1/2 usage from evolution and fk_table --- pineappl/src/evolution.rs | 4 ++-- pineappl/src/fk_table.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index ed7cad74a..a033c6593 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -1,6 +1,6 @@ //! Supporting classes and functions for [`Grid::evolve`]. -use super::grid::{Grid, GridError}; +use super::grid::{Convolution, Grid, GridError}; use super::import_only_subgrid::ImportOnlySubgridV2; use super::lumi::LumiEntry; use super::lumi_entry; @@ -408,7 +408,7 @@ pub(crate) fn evolve_slice_with_one( alphas_table: &AlphasTable, ) -> Result<(Array3, Vec), GridError> { let gluon_has_pid_zero = gluon_has_pid_zero(grid); - let has_pdf1 = grid.has_pdf1(); + let has_pdf1 = grid.convolutions()[0] != Convolution::None; let (pid_indices, pids) = pid_slices(operator, info, gluon_has_pid_zero, &|pid| { grid.lumi() diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 23580cc1f..8fc4ce518 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -1,6 +1,6 @@ //! Provides the [`FkTable`] type. -use super::grid::{Grid, GridError}; +use super::grid::{Convolution, Grid, GridError}; use super::lumi::LumiCache; use super::order::Order; use super::subgrid::Subgrid; @@ -150,8 +150,8 @@ impl FkTable { /// TODO #[must_use] pub fn table(&self) -> Array4 { - let has_pdf1 = self.grid.has_pdf1(); - let has_pdf2 = self.grid.has_pdf2(); + let has_pdf1 = self.grid.convolutions()[0] != Convolution::None; + let has_pdf2 = self.grid.convolutions()[0] != Convolution::None; let x_grid = self.x_grid(); let mut result = Array4::zeros(( From 8de726d5fb0e1bf5e419c3868de46815f67b768d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 27 May 2024 16:23:45 +0200 Subject: [PATCH 107/179] Fix bug in commit e232e67 --- pineappl/src/fk_table.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 8fc4ce518..9379fd273 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -151,7 +151,7 @@ impl FkTable { #[must_use] pub fn table(&self) -> Array4 { let has_pdf1 = self.grid.convolutions()[0] != Convolution::None; - let has_pdf2 = self.grid.convolutions()[0] != Convolution::None; + let has_pdf2 = self.grid.convolutions()[1] != Convolution::None; let x_grid = self.x_grid(); let mut result = Array4::zeros(( From d7fc83151a79334c3b9ba1a25e97f07aa48ff828 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 27 May 2024 16:23:06 +0200 Subject: [PATCH 108/179] Replace remaining instances of `initial_state_1/2` metadata usage --- pineappl/src/evolution.rs | 6 +-- pineappl/src/fk_table.rs | 6 +-- pineappl/src/grid.rs | 73 +++++++++++++++++++++++------ pineappl/src/lumi.rs | 57 ++++++++++------------ pineappl_cli/src/export/applgrid.rs | 6 +-- pineappl_cli/src/import/applgrid.rs | 7 ++- pineappl_cli/src/import/fastnlo.rs | 40 ++++++++++++---- pineappl_cli/src/import/fktable.rs | 16 +++---- pineappl_cli/src/plot.rs | 5 +- pineappl_cli/src/write.rs | 10 ++-- pineappl_cli/tests/import.rs | 8 ++-- 11 files changed, 143 insertions(+), 91 deletions(-) diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index a033c6593..6ed9bbc5b 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -499,10 +499,10 @@ pub(crate) fn evolve_slice_with_one( })); } - let pid = if has_pdf1 { - grid.initial_state_2() + let pid = if grid.convolutions()[0] != Convolution::None { + grid.lumi()[0].entry()[0].1 } else { - grid.initial_state_1() + grid.lumi()[0].entry()[0].0 }; Ok(( diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 9379fd273..de59dd3c3 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -410,11 +410,7 @@ impl TryFrom for FkTable { } if let Some(key_values) = grid.key_values() { - let keys = vec![ - "initial_state_1".to_owned(), - "initial_state_2".to_owned(), - "lumi_id_types".to_owned(), - ]; + let keys = vec!["lumi_id_types".to_owned()]; for key in keys { if !key_values.contains_key(&key) { diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 4139ecb06..26f82896d 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -155,8 +155,10 @@ impl Mmv3 { .to_owned(), ), // by default we assume there are protons in the initial state - ("initial_state_1".to_owned(), "2212".to_owned()), - ("initial_state_2".to_owned(), "2212".to_owned()), + ("convolution_particle_1".to_owned(), "2212".to_owned()), + ("convolution_type_1".to_owned(), "UnpolPDF".to_owned()), + ("convolution_particle_2".to_owned(), "2212".to_owned()), + ("convolution_type_2".to_owned(), "UnpolPDF".to_owned()), ] .iter() .cloned() @@ -223,8 +225,9 @@ pub struct Grid { } /// Data type that indentifies different types of convolutions. -#[derive(Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq)] pub enum Convolution { + // TODO: eventually get rid of this value /// No convolution. None, /// Unpolarized parton distribution function. The integer denotes the type of hadron with a PDG @@ -240,6 +243,32 @@ pub enum Convolution { PolFF(i32), } +impl Convolution { + /// Return the convolution if the PID is charged conjugated. + pub fn cc(&self) -> Convolution { + match *self { + Convolution::None => Convolution::None, + Convolution::UnpolPDF(pid) => { + Convolution::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)) + } + Convolution::PolPDF(pid) => Convolution::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), + Convolution::UnpolFF(pid) => Convolution::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), + Convolution::PolFF(pid) => Convolution::PolFF(pids::charge_conjugate_pdg_pid(pid)), + } + } + + /// Return the PID of the convolution if it has any. + pub fn pid(&self) -> Option { + match *self { + Convolution::None => None, + Convolution::UnpolPDF(pid) + | Convolution::PolPDF(pid) + | Convolution::UnpolFF(pid) + | Convolution::PolFF(pid) => Some(pid), + } + } +} + impl Grid { /// Constructor. #[must_use] @@ -845,6 +874,24 @@ impl Grid { ) } + /// Set the convolution type for this grid for the corresponding `index`. + pub fn set_convolution(&mut self, index: usize, convolution: Convolution) { + // remove outdated metadata + self.key_values_mut() + .remove(&format!("initial_state_{}", index + 1)); + + let (type_, particle) = match convolution { + Convolution::UnpolPDF(pid) => ("UnpolPDF".to_owned(), pid.to_string()), + Convolution::PolPDF(pid) => ("PolPDF".to_owned(), pid.to_string()), + Convolution::UnpolFF(pid) => ("UnpolFF".to_owned(), pid.to_string()), + Convolution::PolFF(pid) => ("PolFF".to_owned(), pid.to_string()), + Convolution::None => ("None".to_owned(), String::new()), + }; + + self.set_key_value(&format!("convolution_type_{}", index + 1), &type_); + self.set_key_value(&format!("convolution_particle_{}", index + 1), &particle); + } + fn increase_shape(&mut self, new_dim: &(usize, usize, usize)) { let old_dim = self.subgrids.raw_dim().into_pattern(); let mut new_subgrids = Array3::from_shape_simple_fn( @@ -1876,7 +1923,7 @@ mod tests { // TODO: convolute_subgrid, merge_bins, subgrid, set_subgrid #[test] - fn grid_key_value() { + fn grid_convolutions() { let mut grid = Grid::new( vec![lumi_entry![21, 21, 1.0]], vec![Order { @@ -1889,22 +1936,18 @@ mod tests { SubgridParams::default(), ); + // by default we assume unpolarized proton PDFs are used assert_eq!( - grid.key_values().unwrap().get("initial_state_1").unwrap(), - "2212" + grid.convolutions(), + [Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)] ); - grid.key_values_mut() - .insert("initial_state_1".into(), "-2212".into()); - grid.set_key_value("initial_state_2", "-2212"); + grid.set_convolution(0, Convolution::UnpolPDF(-2212)); + grid.set_convolution(1, Convolution::UnpolPDF(-2212)); assert_eq!( - grid.key_values().unwrap().get("initial_state_1").unwrap(), - "-2212" - ); - assert_eq!( - grid.key_values().unwrap().get("initial_state_2").unwrap(), - "-2212" + grid.convolutions(), + [Convolution::UnpolPDF(-2212), Convolution::UnpolPDF(-2212)] ); } diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index a8f727dc7..2a500d9e5 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -375,44 +375,35 @@ impl<'a> LumiCache<'a> { } pub(crate) fn setup(&mut self, grid: &Grid, xi: &[(f64, f64)]) -> Result<(), ()> { - // PDG identifiers of the initial states - let pdga = grid.key_values().map_or(2212, |kv| { - kv.get("initial_state_1") - .map_or(2212, |s| s.parse::().unwrap()) - }); - let pdgb = grid.key_values().map_or(2212, |kv| { - kv.get("initial_state_2") - .map_or(2212, |s| s.parse::().unwrap()) - }); - - // are the initial states hadrons? - let has_pdfa = !grid - .lumi() - .iter() - .all(|entry| entry.entry().iter().all(|&(a, _, _)| a == pdga)); - let has_pdfb = !grid - .lumi() - .iter() - .all(|entry| entry.entry().iter().all(|&(_, b, _)| b == pdgb)); + let convolutions = grid.convolutions(); + + // TODO: the following code only works with exactly two convolutions + assert_eq!(convolutions.len(), 2); // do we have to charge-conjugate the initial states? - let cc1 = if !has_pdfa { - 0 - } else if self.pdg1 == pdga { - 1 - } else if self.pdg1 == -pdga { - -1 + let cc1 = if let Some(pid) = convolutions[0].pid() { + if self.pdg1 == pid { + 1 + } else if self.pdg1 == pids::charge_conjugate_pdg_pid(pid) { + -1 + } else { + // TODO: return a proper error + return Err(()); + } } else { - return Err(()); - }; - let cc2 = if !has_pdfb { 0 - } else if self.pdg2 == pdgb { - 1 - } else if self.pdg2 == -pdgb { - -1 + }; + let cc2 = if let Some(pid) = convolutions[1].pid() { + if self.pdg2 == pid { + 1 + } else if self.pdg2 == pids::charge_conjugate_pdg_pid(pid) { + -1 + } else { + // TODO: return a proper error + return Err(()); + } } else { - return Err(()); + 0 }; // TODO: try to avoid calling clear diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index 377200c6c..edba6c118 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -2,7 +2,7 @@ use anyhow::{anyhow, bail, Result}; use cxx::{let_cxx_string, UniquePtr}; use float_cmp::approx_eq; use ndarray::{s, Axis}; -use pineappl::grid::Grid; +use pineappl::grid::{Convolution, Grid}; use pineappl::order::Order; use pineappl::subgrid::{Mu2, Subgrid, SubgridParams}; use pineappl_applgrid::ffi::{self, grid}; @@ -68,8 +68,8 @@ pub fn convert_into_applgrid( } let lumis = grid.lumi().len(); - let has_pdf1 = grid.has_pdf1(); - let has_pdf2 = grid.has_pdf2(); + let has_pdf1 = grid.convolutions()[0] != Convolution::None; + let has_pdf2 = grid.convolutions()[1] != Convolution::None; // TODO: check that PDG MC IDs are used diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 6eb0e4ef5..b70939567 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use pineappl::grid::Grid; +use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV2; use pineappl::lumi::LumiEntry; use pineappl::order::Order; @@ -122,8 +122,11 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul SubgridParams::default(), ); + // from APPLgrid alone we don't know what type of convolution we have + pgrid.set_convolution(0, Convolution::UnpolPDF(2212)); + if grid.isDIS() { - pgrid.set_key_value("initial_state_2", &dis_pid.to_string()); + pgrid.set_convolution(1, Convolution::None); } for bin in 0..grid.Nobs_internal() { diff --git a/pineappl_cli/src/import/fastnlo.rs b/pineappl_cli/src/import/fastnlo.rs index 9b4d90ff8..36cac3a91 100644 --- a/pineappl_cli/src/import/fastnlo.rs +++ b/pineappl_cli/src/import/fastnlo.rs @@ -1,7 +1,7 @@ use anyhow::Result; use itertools::Itertools; use pineappl::bin::BinRemapper; -use pineappl::grid::Grid; +use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV2; use pineappl::lumi::LumiEntry; use pineappl::order::Order; @@ -111,6 +111,22 @@ fn convert_coeff_add_fix( SubgridParams::default(), ); + // UNWRAP: shouldn't be larger than `2` + let npdf = usize::try_from(table_as_add_base.GetNPDF()).unwrap(); + assert!(npdf <= 2); + + for index in 0..2 { + grid.set_convolution( + index, + if index < npdf { + // TODO: how do we determined the PID/type of the convolution for fixed tables? + Convolution::UnpolPDF(2212) + } else { + Convolution::None + }, + ); + } + let total_scalenodes: usize = table.GetTotalScalenodes().try_into().unwrap(); for obs in 0..table_as_add_base.GetNObsBin() { @@ -248,15 +264,19 @@ fn convert_coeff_add_flex( ); let npdf = table_as_add_base.GetNPDF(); - let pdf_pdg1 = table.GetPDFPDG(0).to_string(); - let pdf_pdg2 = if npdf == 2 { - table.GetPDFPDG(1).to_string() - } else { - dis_pid.to_string() - }; - - grid.set_key_value("initial_state_1", &pdf_pdg1); - grid.set_key_value("initial_state_2", &pdf_pdg2); + assert!(npdf <= 2); + + for index in 0..2 { + grid.set_convolution( + // UNWRAP: index is smaller than 2 + index.try_into().unwrap(), + if index < npdf { + Convolution::UnpolPDF(table.GetPDFPDG(index)) + } else { + Convolution::None + }, + ); + } let rescale = 0.1_f64.powi(table.GetIXsectUnits() - ipub_units); diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index 441e07eed..85dc7ac0f 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -1,6 +1,6 @@ use anyhow::{anyhow, Context, Result}; use flate2::read::GzDecoder; -use pineappl::grid::Grid; +use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV1; use pineappl::lumi_entry; use pineappl::order::Order; @@ -112,13 +112,13 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { .key_values_mut() .insert("lumi_id_types".to_owned(), "evol".to_owned()); - fktable - .key_values_mut() - .insert("initial_state_1".to_owned(), "2212".to_owned()); - if !hadronic { - fktable - .key_values_mut() - .insert("initial_state_2".to_owned(), dis_pid.to_string()); + // legacy FK-tables only support unpolarized proton PDFs + fktable.set_convolution(0, Convolution::UnpolPDF(2212)); + + if hadronic { + fktable.set_convolution(1, Convolution::UnpolPDF(2212)); + } else { + fktable.set_convolution(1, Convolution::None); } grid = Some(fktable); diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index 2af5fcc6b..92cee08ef 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -5,6 +5,7 @@ use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; use itertools::Itertools; use ndarray::Axis; +use pineappl::grid::Convolution; use pineappl::lumi::LumiEntry; use pineappl::subgrid::Subgrid; use rayon::{prelude::*, ThreadPoolBuilder}; @@ -376,8 +377,8 @@ impl Subcommand for Opts { ( map_format_lumi( &grid.lumi()[lumi], - grid.has_pdf1(), - grid.has_pdf2(), + grid.convolutions()[0] != Convolution::None, + grid.convolutions()[1] != Convolution::None, ), helpers::convolve( &grid, diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index b9f1cff00..787709446 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -499,6 +499,7 @@ impl Subcommand for Opts { let cc1 = matches!(arg, OpsArg::Cc1(true)); let cc2 = matches!(arg, OpsArg::Cc2(true)); + // TODO: make this a member function of `Grid` let lumi_id_types = grid.key_values().map_or("pdg_mc_ids", |kv| { kv.get("lumi_id_types").map_or("pdg_mc_ids", Deref::deref) }); @@ -528,18 +529,13 @@ impl Subcommand for Opts { }) .collect(); - let mut initial_state_1 = grid.initial_state_1(); - let mut initial_state_2 = grid.initial_state_2(); - if cc1 { - initial_state_1 = pids::charge_conjugate_pdg_pid(initial_state_1); + grid.set_convolution(0, grid.convolutions()[0].cc()) } if cc2 { - initial_state_2 = pids::charge_conjugate_pdg_pid(initial_state_2); + grid.set_convolution(1, grid.convolutions()[1].cc()) } - grid.set_key_value("initial_state_1", &initial_state_1.to_string()); - grid.set_key_value("initial_state_2", &initial_state_2.to_string()); grid.set_lumis(lumis); } OpsArg::DedupChannels(ulps) => { diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index c5736f9e2..d73d7e5bb 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -837,7 +837,7 @@ fn import_hadronic_fktable() { use float_cmp::assert_approx_eq; use lhapdf::Pdf; use pineappl::fk_table::{FkAssumptions, FkTable}; - use pineappl::grid::Grid; + use pineappl::grid::{Convolution, Grid}; use pineappl::lumi::LumiCache; use std::fs::File; @@ -906,8 +906,10 @@ fn import_hadronic_fktable() { assert_eq!(fk_table.bin_dimensions(), 1); assert_eq!(fk_table.bin_left(0), [0.0]); assert_eq!(fk_table.bin_right(0), [1.0]); - assert_eq!(&fk_table.key_values().unwrap()["initial_state_1"], "2212"); - assert_eq!(&fk_table.key_values().unwrap()["initial_state_2"], "2212"); + assert_eq!( + fk_table.grid().convolutions(), + [Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)] + ); let lumi = fk_table.lumi(); assert_eq!( lumi, From 4bc46c729066c4c4e7f2ca098e74b1a30a3aabc2 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 27 May 2024 17:53:46 +0200 Subject: [PATCH 109/179] Fix remaining bug in CAPI --- pineappl/src/grid.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 26f82896d..1c02cdc1a 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -154,11 +154,10 @@ impl Mmv3 { ) .to_owned(), ), - // by default we assume there are protons in the initial state - ("convolution_particle_1".to_owned(), "2212".to_owned()), - ("convolution_type_1".to_owned(), "UnpolPDF".to_owned()), - ("convolution_particle_2".to_owned(), "2212".to_owned()), - ("convolution_type_2".to_owned(), "UnpolPDF".to_owned()), + // by default we assume there are unpolarized protons in the initial state + // do not change these to the new metadata to not break backwards compatibility + ("initial_state_1".to_owned(), "2212".to_owned()), + ("initial_state_2".to_owned(), "2212".to_owned()), ] .iter() .cloned() From 49c05e22d1f7d304234569e9218790ff31fcb956 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 28 May 2024 09:53:59 +0200 Subject: [PATCH 110/179] Update `CHANGELOG.md` --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc7569d4a..a4c7e8fd2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- added new methods `Grid::convolutions` and `Grid::set_convolution` + ### Changed - moved `Order` and `ParseOrderError` to their own module `order` @@ -16,6 +20,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - removed support for Python 3.6 - removed the deprecated evolution methods `Grid::axes`, `Grid::convolute_eko` and the structs `EkoInfo` and `GridAxes` +- removed methods `Grid::has_pdf1`, `Grid::has_pdf2`, `Grid::initial_state_1` + and `Grid::initial_state_2` ## [0.7.4] - 23/05/2024 From ba94ab9cf1a300b7727c57ed4d05a87b86788f93 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 28 May 2024 09:55:42 +0200 Subject: [PATCH 111/179] Add more comments to implementation of `Grid::convolutions` --- pineappl/src/grid.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 1c02cdc1a..81bb5163e 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -813,12 +813,15 @@ impl Grid { #[must_use] pub fn convolutions(&self) -> Vec { self.key_values().map_or_else( - // if there isn't any metadata, we assume that proton-PDFs are used + // if there isn't any metadata, we assume two unpolarized proton-PDFs are used || vec![Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)], |kv| { // the current file format only supports exactly two convolutions (1..=2) .map(|index| { + // if there are key-value pairs `convolution_particle_1` and + // `convolution_type_1` and the same with a higher index, we convert this + // metadata into `Convolution` match ( kv.get(&format!("convolution_particle_{index}")) .map(|s| s.parse::()), @@ -831,6 +834,7 @@ impl Grid { (Some(Ok(pid)), Some("UnpolFF")) => Convolution::UnpolFF(pid), (Some(Ok(pid)), Some("PolFF")) => Convolution::PolFF(pid), (None, None) => { + // if these key-value pairs are missing use the old metadata match kv .get(&format!("initial_state_{index}")) .map(|s| s.parse::()) From 738fa645b8b9fc6bc736c52b7622bd9c5b1ac1dc Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 28 May 2024 11:03:58 +0200 Subject: [PATCH 112/179] Fix documentation links --- pineappl/src/lib.rs | 2 +- pineappl/src/order.rs | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index ae8dd0a13..51f5a8a26 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -21,7 +21,7 @@ //! [`Grid::orders()`]: grid::Grid::orders //! [`Subgrid`]: subgrid::Subgrid //! [`SubgridEnum`]: subgrid::SubgridEnum -//! [`Order`]: grid::Order +//! [`Order`]: order::Order //! //! ## Metadata //! diff --git a/pineappl/src/order.rs b/pineappl/src/order.rs index 15e93117b..614957834 100644 --- a/pineappl/src/order.rs +++ b/pineappl/src/order.rs @@ -112,6 +112,10 @@ impl Order { /// selects the LO QCD only, `max_as = 2` and `max_al = 0` the NLO QCD; setting `max_as = 3` /// and `max_al = 2` would select all NLOs, and the NNLO QCD. /// + /// [`Grid::convolve`]: super::grid::Grid::convolve + /// [`Grid::evolve`]: super::grid::Grid::evolve + /// [`Grid::evolve_info`]: super::grid::Grid::evolve_info + /// /// # Example /// /// In the case of Drell—Yan, there are the following orders: From 354a4125b04e937eaf814b2a9caddab6d1ba8e9c Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 28 May 2024 11:04:16 +0200 Subject: [PATCH 113/179] Remove `convolute_eko` left-over in Python interface --- pineappl_py/pineappl/grid.py | 60 ------------------------------------ 1 file changed, 60 deletions(-) diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index a1e4c208e..9f0e29a28 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -297,66 +297,6 @@ def convolute_with_two( xi, ) - def convolute_eko( - self, - operators, - mur2_grid, - alphas_values, - lumi_id_types="pdg_mc_ids", - order_mask=(), - xi=(1.0, 1.0), - ): - """Create an FKTable with the EKO. - - Convenience wrapper for :meth:`pineappl.pineappl.PyGrid.convolute_eko()`. - - Parameters - ---------- - operators : dict - EKO Output - mur2_grid : list[float] - renormalization scales - alphas_values : list[float] - alpha_s values associated to the renormalization scales - lumi_id_types : str - kind of lumi types (e.g. "pdg_mc_ids" for flavor basis, "evol" - for evolution basis) - order_mask : list(bool) - Mask for selecting specific orders. The value `True` means the corresponding order - is included. An empty list corresponds to all orders being enabled. - xi : (float, float) - A tuple with the scale variation factors that should be used. - The first entry of a tuple corresponds to the variation of - the renormalization scale, the second entry to the variation of the factorization - scale. If only results for the central scale are need the tuple should be - `(1.0, 1.0)`. - - Returns - ------ - PyFkTable : - raw grid as an FKTable - """ - operator_grid = np.array( - [op["operators"] for op in operators["Q2grid"].values()] - ) - q2grid = list(operators["Q2grid"].keys()) - return FkTable( - self.raw.convolute_eko( - operators["q2_ref"], - np.array(alphas_values, dtype=np.float64), - np.array(operators["targetpids"], dtype=np.int32), - np.array(operators["targetgrid"]), - np.array(operators["inputpids"], dtype=np.int32), - np.array(operators["inputgrid"]), - np.array(mur2_grid, dtype=np.float64), - np.array(q2grid, dtype=np.float64), - np.array(operator_grid), - lumi_id_types, - np.array(order_mask, dtype=bool), - xi, - ) - ) - def evolve( self, operators, From b171502992cc81e1580dd4cd5f83b04606e59741 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 28 May 2024 11:05:03 +0200 Subject: [PATCH 114/179] Remove Python 3.6 documentation leftover --- docs/installation.md | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index 9c7d423a2..bd340e435 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -202,22 +202,6 @@ To install the Python interface, run This will not require any previous installation of Rust. For more documentation and more information see its [README](../pineappl_py/README.md). -### Python 3.6 - -Python 3.6 is at the end of its life since 2021 December, but we still support -its installation, which however is a bit trickier. First upgrade your -installation of `pip`: - - pip install --upgrade pip - -For this to work you may have to add the switch `--user` after `install`. Next, -make sure you call the newly upgraded `pip` as follows: - - pip install --ignore-requires-python pineappl - -The switch `--ignore-requires-python` is needed because for an unknown reason -the `requires-python` version is incorrectly determined by `pip`. - ## Rust You will need the Rust compiler and its build system `cargo`. If `cargo` is From 2a8a9912a29d81d8b4ca4906eeec8daf6f5f80ea Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 28 May 2024 10:27:50 +0200 Subject: [PATCH 115/179] Use `convolute` instead of `convolve` --- README.md | 2 +- docs/cli-reference.md | 2 +- docs/installation.md | 6 +- docs/interpolation-grids.md | 4 +- examples/cpp/Makefile | 4 +- examples/cpp/advanced-convolution.cpp | 12 ++-- .../{convolute-grid.cpp => convolve-grid.cpp} | 4 +- examples/cpp/fill-grid.cpp | 4 +- examples/cpp/output | 4 +- examples/fortran/dyaa.f90 | 2 +- examples/fortran/lhapdf_example.f90 | 12 ++-- examples/fortran/pineappl.f90 | 16 ++--- examples/fortran/test.f90 | 12 ++-- examples/object-oriented-cpp/dyaa.cpp | 4 +- examples/python/dyaa.py | 2 +- pineappl/src/empty_subgrid.rs | 4 +- pineappl/src/evolution.rs | 6 +- pineappl/src/fk_table.rs | 7 ++- pineappl/src/grid.rs | 10 +-- pineappl/src/import_only_subgrid.rs | 38 ++++++------ pineappl/src/lagrange_subgrid.rs | 21 +++---- pineappl/src/lumi.rs | 2 +- pineappl/src/ntuple_subgrid.rs | 10 +-- pineappl/src/order.rs | 2 +- pineappl/src/subgrid.rs | 2 +- pineappl/tests/drell_yan_lo.rs | 24 ++++---- pineappl_applgrid/src/applgrid.cpp | 2 +- pineappl_applgrid/src/applgrid.hpp | 2 +- pineappl_applgrid/src/lib.rs | 2 +- pineappl_capi/include/PineAPPL.hpp | 4 +- pineappl_capi/src/lib.rs | 61 +++++++++++++++++-- pineappl_cli/src/export.rs | 2 +- pineappl_cli/src/export/applgrid.rs | 4 +- pineappl_cli/src/helpers.rs | 4 +- pineappl_cli/src/import.rs | 2 +- pineappl_cli/src/import/applgrid.rs | 4 +- pineappl_cli/tests/import.rs | 14 ++--- pineappl_py/docs/source/recipes.rst | 4 +- pineappl_py/pineappl/grid.py | 10 +-- pineappl_py/src/fk_table.rs | 4 +- pineappl_py/src/grid.rs | 10 +-- pineappl_py/tests/test_fk_table.py | 6 +- pineappl_py/tests/test_grid.py | 14 ++--- pineappl_py/tests/test_sugrid.py | 8 +-- 44 files changed, 213 insertions(+), 160 deletions(-) rename examples/cpp/{convolute-grid.cpp => convolve-grid.cpp} (97%) diff --git a/README.md b/README.md index 98a55d222..ff54d9ba3 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ This repository contains programs, libraries and interfaces to read and write coupling]. PineAPPL grids are generated by Monte Carlo generators, and the grids in turn -can be convoluted with PDFs to produce tables and plots, such as the following +can be convolved with PDFs to produce tables and plots, such as the following one: ![plot](docs/NNPDF_DY_14TEV_40_PHENO.jpeg) diff --git a/docs/cli-reference.md b/docs/cli-reference.md index bb8a09b06..48b0f8292 100644 --- a/docs/cli-reference.md +++ b/docs/cli-reference.md @@ -32,7 +32,7 @@ order (`a2`) and the next-to-leading order QCD (`a2as1`). ## `PDFSET`: Specifying PDF members or entire PDF sets The parameter `PDFSET` that appears for all convolutional-type subcommands -(`channels`, `convolute`, etc.) must be one of the following strings: +(`channels`, `convolve`, etc.) must be one of the following strings: - `setname/member`: In this case `setname` must be a valid [LHAPDF] set name and `member` must be the member index. The index `0` denotes the central PDF, diff --git a/docs/installation.md b/docs/installation.md index bd340e435..14567e20b 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -168,16 +168,16 @@ inside it. ### Man pages Most of PineAPPL's help is available as `pineappl --help` and as `pineappl -convolute --help`, for example. However, the same information can also be +convolve --help`, for example. However, the same information can also be installed as man pages, which can then be used as either man pineappl - man pineappl-convolute + man pineappl-convolve or pineappl help - pineappl help convolute + pineappl help convolve For this to work first install the [CLI](#cli-pineappl-for-your-shell). Next, go to your local copy of PineAPPL's repository and create the man page diff --git a/docs/interpolation-grids.md b/docs/interpolation-grids.md index 45b450f92..1e7dd9e9f 100644 --- a/docs/interpolation-grids.md +++ b/docs/interpolation-grids.md @@ -4,9 +4,9 @@ This document explains what interpolation grids are and what PineAPPL is. ## What are interpolation grids? -Interpolation grids store theoretical predictions that are not convoluted with +Interpolation grids store theoretical predictions that are not convolved with PDFs yet. After the predictions have been generated once, the grids can be -convoluted with arbitrary PDFs in a fraction of a second. This is very +convolved with arbitrary PDFs in a fraction of a second. This is very advantageous for the following applications: - *PDF set dependence* of a prediction. Are the predictions with CT18, MSHT20 diff --git a/examples/cpp/Makefile b/examples/cpp/Makefile index 34060c5c5..e2786a32e 100644 --- a/examples/cpp/Makefile +++ b/examples/cpp/Makefile @@ -8,7 +8,7 @@ PROGRAMS = \ fill-custom-grid \ advanced-convolution \ advanced-filling \ - convolute-grid \ + convolve-grid \ display-channels \ display-orders \ merge-grids \ @@ -25,7 +25,7 @@ advanced-convolution: advanced-convolution.cpp advanced-filling: advanced-filling.cpp $(CXX) $(CXXFLAGS) $< $(PINEAPPL_DEPS) -o $@ -convolute-grid: convolute-grid.cpp +convolve-grid: convolve-grid.cpp $(CXX) $(CXXFLAGS) $< $(LHAPDF_DEPS) $(PINEAPPL_DEPS) -o $@ display-channels: display-channels.cpp diff --git a/examples/cpp/advanced-convolution.cpp b/examples/cpp/advanced-convolution.cpp index 55c9a957c..32359374a 100644 --- a/examples/cpp/advanced-convolution.cpp +++ b/examples/cpp/advanced-convolution.cpp @@ -78,10 +78,10 @@ int main(int argc, char* argv[]) { // `xfx1` and `alphas` are *proton* PDFs. If the grid contains cross sections of either a // proton-proton, proton-antiproton or antiproton-antiproton collision PineAPPL will perform the // necessary charge conjugations to yield the correct convolutions - pineappl_grid_convolute_with_one(grid, 2212, xfx1, alphas, pdf, order_mask.get(), + pineappl_grid_convolve_with_one(grid, 2212, xfx1, alphas, pdf, order_mask.get(), channel_mask.get(), xir, xif, dxsec1.data()); - // how does the grid know which PDFs it must be convoluted with? This is determined by the + // how does the grid know which PDFs it must be convolved with? This is determined by the // metadata keys `initial_state_1` and `initial_state_2`, which are by default set to `2212`, // the PDG MC ID for the proton. Let's change the second value to an antiproton: pineappl_grid_set_key_value(grid, "initial_state_2", "-2212"); @@ -90,8 +90,8 @@ int main(int argc, char* argv[]) { // this calculates the corresponding proton-antiproton differential cross sections. Since the // grid itself is unchanged, this change effectively means that for the second PDF the charge - // convoluted PDFs are used - pineappl_grid_convolute_with_one(grid, 2212, xfx1, alphas, pdf, order_mask.get(), + // convolved PDFs are used + pineappl_grid_convolve_with_one(grid, 2212, xfx1, alphas, pdf, order_mask.get(), channel_mask.get(), xir, xif, dxsec2.data()); // what if we have a collision where we actually need two PDFs? Let's simulate the collision of @@ -102,13 +102,13 @@ int main(int argc, char* argv[]) { // For proton-deuteron collisions we can't easily relate the PDFs and have to actually pass two // different PDFs, each with their ID of the particle they represent: - pineappl_grid_convolute_with_two(grid, 2212, xfx1, 1000010020, xfx2, alphas, pdf, + pineappl_grid_convolve_with_two(grid, 2212, xfx1, 1000010020, xfx2, alphas, pdf, order_mask.get(), channel_mask.get(), xir, xif, dxsec3.data()); std::vector dxsec4(bins); // test with both masks set to `nullptr` - pineappl_grid_convolute_with_two(grid, 2212, xfx1, 1000010020, xfx2, alphas, pdf, nullptr, + pineappl_grid_convolve_with_two(grid, 2212, xfx1, 1000010020, xfx2, alphas, pdf, nullptr, nullptr, xir, xif, dxsec4.data()); std::vector normalizations(bins); diff --git a/examples/cpp/convolute-grid.cpp b/examples/cpp/convolve-grid.cpp similarity index 97% rename from examples/cpp/convolute-grid.cpp rename to examples/cpp/convolve-grid.cpp index 9e0843fe4..547fb722e 100644 --- a/examples/cpp/convolute-grid.cpp +++ b/examples/cpp/convolve-grid.cpp @@ -68,11 +68,11 @@ int main(int argc, char* argv[]) { // parameter. The integer `2212` is the PDG MC id for a proton and signals and `xfx` is the PDF // of a proton. In this case we assume that both initial state hadrons' PDFs can derived from // that of a proton. If this isn't the case, for instance for a proton-lead collision, both PDFs - // must be given separately and the function `pineappl_grid_convolute_with_two` must be used. + // must be given separately and the function `pineappl_grid_convolve_with_two` must be used. // The parameters `order_mask` and `channel_mask` can be used to select specific orders and // channels, respectively. Using `xir` and `xif` the renormalization and factorization scales // can be varied around its central values, respectively. - pineappl_grid_convolute_with_one(grid, 2212, xfx, alphas, pdf, order_mask, + pineappl_grid_convolve_with_one(grid, 2212, xfx, alphas, pdf, order_mask, channel_mask, xir, xif, dxsec.data()); std::vector normalizations(bins); diff --git a/examples/cpp/fill-grid.cpp b/examples/cpp/fill-grid.cpp index adee1821f..2c27ed204 100644 --- a/examples/cpp/fill-grid.cpp +++ b/examples/cpp/fill-grid.cpp @@ -203,7 +203,7 @@ int main() { // determine the subgrid which are being filled pineappl_keyval_set_string(keyval, "subgrid_type", "LagrangeSubgrid"); - // set the PDG ids of hadrons whose PDFs should be used to convolute the grid with + // set the PDG ids of hadrons whose PDFs should be used to convolve the grid with pineappl_keyval_set_string(keyval, "initial_state_1", "2212"); pineappl_keyval_set_string(keyval, "initial_state_2", "2212"); @@ -249,7 +249,7 @@ int main() { std::cout << "Generated " << filename << " containing a a -> l+ l-.\n\n" "Try running (PDF sets must contain non-zero photon PDF):\n" - " - pineappl convolute " << filename << " NNPDF31_nnlo_as_0118_luxqed\n" + " - pineappl convolve " << filename << " NNPDF31_nnlo_as_0118_luxqed\n" " - pineappl --silence-lhapdf plot " << filename << " NNPDF31_nnlo_as_0118_luxqed MSHT20qed_nnlo > plot_script.py\n" " - pineappl --help\n"; diff --git a/examples/cpp/output b/examples/cpp/output index 89286a745..25f01afcf 100644 --- a/examples/cpp/output +++ b/examples/cpp/output @@ -1,13 +1,13 @@ Generated drell-yan-rap-ll.pineappl.lz4 containing a a -> l+ l-. Try running (PDF sets must contain non-zero photon PDF): - - pineappl convolute drell-yan-rap-ll.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed + - pineappl convolve drell-yan-rap-ll.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed - pineappl --silence-lhapdf plot drell-yan-rap-ll.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed MSHT20qed_nnlo > plot_script.py - pineappl --help Generated drell-yan-rap-ll-custom-grid.pineappl.lz4 containing a a -> l+ l-. Try running (PDF sets must contain non-zero photon PDF): - - pineappl convolute drell-yan-rap-ll-custom-grid.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed + - pineappl convolve drell-yan-rap-ll-custom-grid.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed - pineappl --silence-lhapdf plot drell-yan-rap-ll-custom-grid.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed MSHT20qed_nnlo > plot_script.py - pineappl --help idx p-p c#0 l#0 p-p~ c#0 l# p-d c#0 l#0 p-d dx diff --git a/examples/fortran/dyaa.f90 b/examples/fortran/dyaa.f90 index 595663e73..29c45e8c2 100644 --- a/examples/fortran/dyaa.f90 +++ b/examples/fortran/dyaa.f90 @@ -113,7 +113,7 @@ program dyaa print *, 'Generated DY-LO-AA.pineappl.lz4 containing a a -> l+ l-.' print *, '' print *, 'Try running (PDF sets must contain non-zero photon PDF):' - print *, ' - pineappl convolute DY-LO-AA.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed' + print *, ' - pineappl convolve DY-LO-AA.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed' print *, ' - pineappl --silence-lhapdf plot DY-LO-AA.pineappl.lz4 NNPDF31_nnlo_as_0118_luxqed MSHT20qed_nnlo > plot_script.py' print *, ' - pineappl --help' diff --git a/examples/fortran/lhapdf_example.f90 b/examples/fortran/lhapdf_example.f90 index a377e8ddf..9f835f820 100644 --- a/examples/fortran/lhapdf_example.f90 +++ b/examples/fortran/lhapdf_example.f90 @@ -27,19 +27,19 @@ program lhapdf_example call lhapdf_initpdfset_byname(0, "nCTEQ15_1_1") call lhapdf_initpdfset_byname(1, "nCTEQ15FullNuc_208_82") - ! calling pineappl_grid_convolute without any flags + ! calling pineappl_grid_convolve without any flags xfx => xfx_test1 alphas => alphas_test1 - write(*, *) "first pineappl_grid_convolute_with_one: " - write(*, *) pineappl_grid_convolute_with_one(grid, 2212, xfx, alphas, & + write(*, *) "first pineappl_grid_convolve_with_one: " + write(*, *) pineappl_grid_convolve_with_one(grid, 2212, xfx, alphas, & [.true., .true.], [.true., .true.], 1.0_dp, 1.0_dp) - ! calling pineappl_grid_convolute with two integer flags that are used in xfx_test2 and alphas_test2 to determine the set and member indices + ! calling pineappl_grid_convolve with two integer flags that are used in xfx_test2 and alphas_test2 to determine the set and member indices xfx => xfx_test2 alphas => alphas_test2 flags = [1, 0] - write(*, *) "second pineappl_grid_convolute_with_one: " - write(*, *) pineappl_grid_convolute_with_one(grid, 2212, xfx, alphas, & + write(*, *) "second pineappl_grid_convolve_with_one: " + write(*, *) pineappl_grid_convolve_with_one(grid, 2212, xfx, alphas, & [.true., .true.], [.true., .true.], 1.0_dp, 1.0_dp, c_loc(flags(1))) contains diff --git a/examples/fortran/pineappl.f90 b/examples/fortran/pineappl.f90 index e14aea92c..3dbd63da5 100644 --- a/examples/fortran/pineappl.f90 +++ b/examples/fortran/pineappl.f90 @@ -85,8 +85,8 @@ type (c_ptr) function grid_clone(grid) bind(c, name = 'pineappl_grid_clone') type (c_ptr), value :: grid end function - subroutine grid_convolute_with_one(grid, pdg_id, xfx, alphas, state, order_mask, lumi_mask, xi_ren, xi_fac, results) & - bind(c, name = 'pineappl_grid_convolute_with_one') + subroutine grid_convolve_with_one(grid, pdg_id, xfx, alphas, state, order_mask, lumi_mask, xi_ren, xi_fac, results) & + bind(c, name = 'pineappl_grid_convolve_with_one') use iso_c_binding type (c_ptr), value :: grid, state integer (c_int32_t), value :: pdg_id @@ -96,8 +96,8 @@ subroutine grid_convolute_with_one(grid, pdg_id, xfx, alphas, state, order_mask, real (c_double) :: results(*) end subroutine - subroutine grid_convolute_with_two(grid, pdg_id1, xfx1, pdg_id2, xfx2, alphas, state, order_mask, lumi_mask, & - xi_ren, xi_fac, results) bind(c, name = 'pineappl_grid_convolute_with_two') + subroutine grid_convolve_with_two(grid, pdg_id1, xfx1, pdg_id2, xfx2, alphas, state, order_mask, lumi_mask, & + xi_ren, xi_fac, results) bind(c, name = 'pineappl_grid_convolve_with_two') use iso_c_binding type (c_ptr), value :: grid, state integer (c_int32_t), value :: pdg_id1, pdg_id2 @@ -437,7 +437,7 @@ type (pineappl_grid) function pineappl_grid_clone(grid) pineappl_grid_clone = pineappl_grid(grid_clone(grid%ptr)) end function - function pineappl_grid_convolute_with_one(grid, pdg_id, xfx, alphas, order_mask, lumi_mask, xi_ren, xi_fac, state) result(res) + function pineappl_grid_convolve_with_one(grid, pdg_id, xfx, alphas, order_mask, lumi_mask, xi_ren, xi_fac, state) result(res) use iso_c_binding implicit none @@ -469,13 +469,13 @@ function pineappl_grid_convolute_with_one(grid, pdg_id, xfx, alphas, order_mask, state_ = c_null_ptr end if - call grid_convolute_with_one(grid%ptr, pdg_id, c_funloc(xfx), c_funloc(alphas), state_, & + call grid_convolve_with_one(grid%ptr, pdg_id, c_funloc(xfx), c_funloc(alphas), state_, & [(logical(order_mask(i), c_bool), i = 1, size(order_mask))], & [(logical(lumi_mask(i), c_bool), i = 1, size(lumi_mask))], & xi_ren, xi_fac, res) end function - function pineappl_grid_convolute_with_two(grid, pdg_id1, xfx1, pdg_id2, xfx2, alphas, & + function pineappl_grid_convolve_with_two(grid, pdg_id1, xfx1, pdg_id2, xfx2, alphas, & order_mask, lumi_mask, xi_ren, xi_fac, state) result(res) use iso_c_binding @@ -510,7 +510,7 @@ function pineappl_grid_convolute_with_two(grid, pdg_id1, xfx1, pdg_id2, xfx2, al state_ = c_null_ptr end if - call grid_convolute_with_two(grid%ptr, pdg_id1, c_funloc(xfx1), pdg_id2, c_funloc(xfx2), c_funloc(alphas), state_, & + call grid_convolve_with_two(grid%ptr, pdg_id1, c_funloc(xfx1), pdg_id2, c_funloc(xfx2), c_funloc(alphas), state_, & [(logical(order_mask(i), c_bool), i = 1, size(order_mask))], & [(logical(lumi_mask(i), c_bool), i = 1, size(lumi_mask))], & xi_ren, xi_fac, res) diff --git a/examples/fortran/test.f90 b/examples/fortran/test.f90 index 7936a07f0..95ed75032 100644 --- a/examples/fortran/test.f90 +++ b/examples/fortran/test.f90 @@ -136,18 +136,18 @@ program test_pineappl xfx2 => xfx2_test alphas => alphas_test - result = pineappl_grid_convolute_with_one(grid, 2212, xfx1, alphas, & + result = pineappl_grid_convolve_with_one(grid, 2212, xfx1, alphas, & [.true., .true.], [.true., .true.], 1.0_dp, 1.0_dp) if (any(result > 0 .neqv. [.true., .true., .false.])) then - write(*, *) "pineappl_grid_convolute_with_one(): ", result - error stop "error: pineappl_grid_convolute_with_one" + write(*, *) "pineappl_grid_convolve_with_one(): ", result + error stop "error: pineappl_grid_convolve_with_one" end if - result = pineappl_grid_convolute_with_two(grid, 2212, xfx1, 2212, xfx2, alphas, & + result = pineappl_grid_convolve_with_two(grid, 2212, xfx1, 2212, xfx2, alphas, & [.true., .true.], [.true., .true.], 1.0_dp, 1.0_dp) if (any(result < 0 .neqv. [.true., .true., .false.])) then - write(*, *) "pineappl_grid_convolute_with_two(): ", result - error stop "error: pineappl_grid_convolute_with_two" + write(*, *) "pineappl_grid_convolve_with_two(): ", result + error stop "error: pineappl_grid_convolve_with_two" end if call pineappl_keyval_delete(key_vals) diff --git a/examples/object-oriented-cpp/dyaa.cpp b/examples/object-oriented-cpp/dyaa.cpp index 2767f9b7d..4e29bb5ec 100644 --- a/examples/object-oriented-cpp/dyaa.cpp +++ b/examples/object-oriented-cpp/dyaa.cpp @@ -124,7 +124,7 @@ int main() { // perform a convolution of the grid with PDFs std::unique_ptr pdf (LHAPDF::mkPDF("NNPDF31_nlo_as_0118_luxqed", 0)); - std::vector dxsec = grid.convolute_with_one(2212, *pdf.get()); + std::vector dxsec = grid.convolve_with_one(2212, *pdf.get()); // print the results for (std::size_t j = 0; j != dxsec.size(); ++j) { @@ -144,7 +144,7 @@ int main() { std::printf("Generated %s containing a a -> l+ l-.\n\n" "Try running (PDF sets must contain non-zero photon PDF):\n" - " - pineappl convolute %s NNPDF31_nnlo_as_0118_luxqed\n" + " - pineappl convolve %s NNPDF31_nnlo_as_0118_luxqed\n" " - pineappl --silence-lhapdf plot %s NNPDF31_nnlo_as_0118_luxqed MSHT20qed_nnlo > plot_script.py\n" " - pineappl --help\n", filename.c_str(), filename.c_str(), filename.c_str()); } diff --git a/examples/python/dyaa.py b/examples/python/dyaa.py index 6811add4e..a9620b57c 100755 --- a/examples/python/dyaa.py +++ b/examples/python/dyaa.py @@ -159,7 +159,7 @@ def main(calls, pdfname, filename): pdf = lhapdf.mkPDF(pdfname, 0) pdg_id = int(pdf.set().get_entry('Particle')) # perform convolution - dxsec = grid.convolute_with_one(pdg_id, pdf.xfxQ2, pdf.alphasQ2) + dxsec = grid.convolve_with_one(pdg_id, pdf.xfxQ2, pdf.alphasQ2) for i in range(len(dxsec)): print(f"{bins[i]:.1f} {bins[i + 1]:.1f} {dxsec[i]:.3e}") diff --git a/pineappl/src/empty_subgrid.rs b/pineappl/src/empty_subgrid.rs index 641bf72c2..79640e655 100644 --- a/pineappl/src/empty_subgrid.rs +++ b/pineappl/src/empty_subgrid.rs @@ -11,7 +11,7 @@ use std::iter; pub struct EmptySubgridV1; impl Subgrid for EmptySubgridV1 { - fn convolute( + fn convolve( &self, _: &[f64], _: &[f64], @@ -82,7 +82,7 @@ mod tests { #[test] fn create_empty() { let mut subgrid = EmptySubgridV1; - assert_eq!(subgrid.convolute(&[], &[], &[], &mut |_, _, _| 0.0), 0.0,); + assert_eq!(subgrid.convolve(&[], &[], &[], &mut |_, _, _| 0.0), 0.0,); assert!(subgrid.is_empty()); subgrid.merge(&mut EmptySubgridV1.into(), false); subgrid.scale(2.0); diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index 6ed9bbc5b..480bcb8c3 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -45,12 +45,12 @@ pub struct EvolveInfo { /// /// The EKO may convert a `Grid` from a basis given by the particle identifiers [`pids1`] to a /// possibly different basis given by [`pids0`]. This basis must also be identified using -/// [`lumi_id_types`], which tells [`FkTable::convolute`] how to perform a convolution. The members +/// [`lumi_id_types`], which tells [`FkTable::convolve`] how to perform a convolution. The members /// [`ren1`] and [`alphas`] must be the strong couplings given at the respective renormalization /// scales. Finally, [`xir`] and [`xif`] can be used to vary the renormalization and factorization /// scales, respectively, around their central values. /// -/// [`FkTable::convolute`]: super::fk_table::FkTable::convolute +/// [`FkTable::convolve`]: super::fk_table::FkTable::convolve /// [`FkTable`]: super::fk_table::FkTable /// [`alphas`]: Self::alphas /// [`fac0`]: Self::fac0 @@ -104,7 +104,7 @@ pub struct OperatorInfo { /// The EKO slice may convert a `Grid` from a basis given by the particle identifiers `pids1` to a /// possibly different basis given by `pids0`. This basis must also be identified using /// [`lumi_id_types`](Self::lumi_id_types), which tells -/// [`FkTable::convolute`](super::fk_table::FkTable::convolute) how to perform a convolution. +/// [`FkTable::convolve`](super::fk_table::FkTable::convolve) how to perform a convolution. #[derive(Clone)] pub struct OperatorSliceInfo { /// Squared factorization scale of the `FkTable`. diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index de59dd3c3..aa364c1db 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -276,15 +276,16 @@ impl FkTable { self.grid.write_lz4(writer) } - /// Propagate convolute to grid - pub fn convolute( + /// Convolve the FK-table. This method has fewer arguments than [`Grid::convolve`], because + /// FK-tables have all orders merged together and do not support scale variations. + pub fn convolve( &self, lumi_cache: &mut LumiCache, bin_indices: &[usize], lumi_mask: &[bool], ) -> Vec { self.grid - .convolute(lumi_cache, &[], bin_indices, lumi_mask, &[(1.0, 1.0)]) + .convolve(lumi_cache, &[], bin_indices, lumi_mask, &[(1.0, 1.0)]) } /// Set a metadata key-value pair diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 81bb5163e..23c0a19e3 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -199,7 +199,7 @@ bitflags! { /// [`Self::OPTIMIZE_SUBGRID_TYPE`] to be active. const STATIC_SCALE_DETECTION = 0b10; /// If two channels differ by transposition of the two initial states and the functions - /// this grid is convoluted with are the same for both initial states, this will merge one + /// this grid is convolved with are the same for both initial states, this will merge one /// channel into the other, with the correct transpositions. const SYMMETRIZE_CHANNELS = 0b100; /// Remove all orders ([`Grid::orders`]), which do not contain any non-zero subgrids. @@ -371,7 +371,7 @@ impl Grid { /// # Panics /// /// TODO - pub fn convolute( + pub fn convolve( &self, lumi_cache: &mut LumiCache, order_mask: &[bool], @@ -420,7 +420,7 @@ impl Grid { lumi_cache.set_grids(&mu2_grid, &x1_grid, &x2_grid, xir, xif); let mut value = - subgrid.convolute(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { + subgrid.convolve(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { let x1 = x1_grid[ix1]; let x2 = x2_grid[ix2]; let mut lumi = 0.0; @@ -460,7 +460,7 @@ impl Grid { /// # Panics /// /// TODO - pub fn convolute_subgrid( + pub fn convolve_subgrid( &self, lumi_cache: &mut LumiCache, ord: usize, @@ -1923,7 +1923,7 @@ mod tests { assert_eq!(grid.orders().len(), 1); } - // TODO: convolute_subgrid, merge_bins, subgrid, set_subgrid + // TODO: convolve_subgrid, merge_bins, subgrid, set_subgrid #[test] fn grid_convolutions() { diff --git a/pineappl/src/import_only_subgrid.rs b/pineappl/src/import_only_subgrid.rs index b86addb3c..04624c09a 100644 --- a/pineappl/src/import_only_subgrid.rs +++ b/pineappl/src/import_only_subgrid.rs @@ -40,7 +40,7 @@ impl ImportOnlySubgridV1 { } impl Subgrid for ImportOnlySubgridV1 { - fn convolute( + fn convolve( &self, _: &[f64], _: &[f64], @@ -210,7 +210,7 @@ impl ImportOnlySubgridV2 { } impl Subgrid for ImportOnlySubgridV2 { - fn convolute( + fn convolve( &self, _: &[f64], _: &[f64], @@ -496,7 +496,7 @@ mod tests { let lumi = &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; - assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 0.228515625); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 0.228515625); // create grid with transposed entries, but different q2 let mut grid2: SubgridEnum = ImportOnlySubgridV1::new( @@ -514,7 +514,7 @@ mod tests { } else { unreachable!(); } - assert_eq!(grid2.convolute(&x, &x, &mu2, lumi), 0.228515625); + assert_eq!(grid2.convolve(&x, &x, &mu2, lumi), 0.228515625); assert_eq!(grid2.indexed_iter().next(), Some(((0, 1, 7), 8.0))); assert_eq!(grid2.indexed_iter().nth(1), Some(((0, 2, 1), 1.0))); @@ -523,7 +523,7 @@ mod tests { grid1.merge(&mut grid2, false); - assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 2.0 * 0.228515625); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); let mut grid1 = { let mut g = grid1.clone_empty(); @@ -534,10 +534,10 @@ mod tests { // the luminosity function is symmetric, so after symmetrization the result must be // unchanged grid1.symmetrize(); - assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 2.0 * 0.228515625); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); grid1.scale(2.0); - assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 4.0 * 0.228515625); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 4.0 * 0.228515625); assert_eq!( grid1.stats(), @@ -593,7 +593,7 @@ mod tests { let lumi = &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; - assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 0.228515625); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 0.228515625); // create grid with transposed entries, but different q2 let mut grid2: SubgridEnum = ImportOnlySubgridV2::new( @@ -611,7 +611,7 @@ mod tests { } else { unreachable!(); } - assert_eq!(grid2.convolute(&x, &x, &mu2, lumi), 0.228515625); + assert_eq!(grid2.convolve(&x, &x, &mu2, lumi), 0.228515625); assert_eq!(grid2.indexed_iter().next(), Some(((0, 1, 7), 8.0))); assert_eq!(grid2.indexed_iter().nth(1), Some(((0, 2, 1), 1.0))); @@ -620,7 +620,7 @@ mod tests { grid1.merge(&mut grid2, false); - assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 2.0 * 0.228515625); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); let mut grid1 = { let mut g = grid1.clone_empty(); @@ -631,10 +631,10 @@ mod tests { // the luminosity function is symmetric, so after symmetrization the result must be // unchanged grid1.symmetrize(); - assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 2.0 * 0.228515625); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); grid1.scale(2.0); - assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 4.0 * 0.228515625); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 4.0 * 0.228515625); assert_eq!( grid1.stats(), @@ -719,10 +719,10 @@ mod tests { let mu2 = lagrange.mu2_grid().to_vec(); let lumi = &mut (|_, _, _| 1.0) as &mut dyn FnMut(usize, usize, usize) -> f64; - let reference = lagrange.convolute(&x1, &x2, &mu2, lumi); + let reference = lagrange.convolve(&x1, &x2, &mu2, lumi); let imported = ImportOnlySubgridV2::from(&lagrange.into()); - let test = imported.convolute(&x1, &x2, &mu2, lumi); + let test = imported.convolve(&x1, &x2, &mu2, lumi); // make sure the conversion did not change the results assert_approx_eq!(f64, reference, test, ulps = 8); @@ -762,14 +762,14 @@ mod tests { } let lumi = &mut (|_, _, _| 1.0) as &mut dyn FnMut(usize, usize, usize) -> f64; - let result1 = grid1.convolute(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); - let result2 = grid2.convolute(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); + let result1 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); + let result2 = grid2.convolve(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); let mut grid1: SubgridEnum = ImportOnlySubgridV2::from(&grid1.into()).into(); let mut grid2: SubgridEnum = ImportOnlySubgridV2::from(&grid2.into()).into(); - let result3 = grid1.convolute(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); - let result4 = grid2.convolute(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); + let result3 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); + let result4 = grid2.convolve(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); // conversion from LangrangeSubgridV2 to ImportOnlySubgridV2 shouldn't change the results assert!((result3 / result1 - 1.0).abs() < 1e-13); @@ -777,7 +777,7 @@ mod tests { grid1.merge(&mut grid2, false); - let result5 = grid1.convolute(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); + let result5 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); // merging the two grids should give the sum of the two results assert!((result5 / (result3 + result4) - 1.0).abs() < 1e-12); diff --git a/pineappl/src/lagrange_subgrid.rs b/pineappl/src/lagrange_subgrid.rs index 3dd361e2c..f3ccf2a5e 100644 --- a/pineappl/src/lagrange_subgrid.rs +++ b/pineappl/src/lagrange_subgrid.rs @@ -129,7 +129,7 @@ impl LagrangeSubgridV1 { } impl Subgrid for LagrangeSubgridV1 { - fn convolute( + fn convolve( &self, x1: &[f64], x2: &[f64], @@ -495,7 +495,7 @@ impl LagrangeSubgridV2 { } impl Subgrid for LagrangeSubgridV2 { - fn convolute( + fn convolve( &self, x1: &[f64], x2: &[f64], @@ -843,7 +843,7 @@ impl LagrangeSparseSubgridV1 { } impl Subgrid for LagrangeSparseSubgridV1 { - fn convolute( + fn convolve( &self, x1: &[f64], x2: &[f64], @@ -1097,8 +1097,7 @@ mod tests { let x2 = grid.x2_grid(); let mu2 = grid.mu2_grid(); - let reference = - grid.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let reference = grid.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); let mut test = 0.0; @@ -1149,13 +1148,13 @@ mod tests { let mu2 = grid1.mu2_grid().into_owned(); let reference = - grid1.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + grid1.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); // merge filled grid into empty one grid2.merge(&mut grid1.into(), false); assert!(!grid2.is_empty()); - let merged = grid2.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let merged = grid2.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); assert_approx_eq!(f64, reference, merged, ulps = 8); @@ -1186,7 +1185,7 @@ mod tests { grid2.merge(&mut grid3.into(), false); - let merged = grid2.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let merged = grid2.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); assert_approx_eq!(f64, 2.0 * reference, merged, ulps = 8); } @@ -1241,7 +1240,7 @@ mod tests { let x2 = grid.x2_grid(); let mu2 = grid.mu2_grid(); - let result = grid.convolute(&x1, &x2, &mu2, &mut |_, _, _| 1.0); + let result = grid.convolve(&x1, &x2, &mu2, &mut |_, _, _| 1.0); assert_eq!(result, 0.0); } @@ -1380,9 +1379,9 @@ mod tests { assert!(!sparse.is_empty()); let reference = - dense.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + dense.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); let converted = - sparse.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + sparse.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); assert_approx_eq!(f64, reference, converted, ulps = 8); } diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 2a500d9e5..c56f94b7e 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -286,7 +286,7 @@ impl<'a> Pdfs<'a> { } } -/// A cache for evaluating PDFs. Methods like [`Grid::convolute`] accept instances of this `struct` +/// A cache for evaluating PDFs. Methods like [`Grid::convolve`] accept instances of this `struct` /// instead of the PDFs themselves. pub struct LumiCache<'a> { pdfs: Pdfs<'a>, diff --git a/pineappl/src/ntuple_subgrid.rs b/pineappl/src/ntuple_subgrid.rs index 401eab34a..282d9fffc 100644 --- a/pineappl/src/ntuple_subgrid.rs +++ b/pineappl/src/ntuple_subgrid.rs @@ -21,14 +21,14 @@ impl NtupleSubgridV1 { } impl Subgrid for NtupleSubgridV1 { - fn convolute( + fn convolve( &self, _: &[f64], _: &[f64], _: &[Mu2], _: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64 { - panic!("NtupleSubgridV1 doesn't support the convolute operation"); + panic!("NtupleSubgridV1 doesn't support the convolve operation"); } fn fill(&mut self, ntuple: &Ntuple) { @@ -101,9 +101,9 @@ mod tests { use crate::subgrid::{ExtraSubgridParams, SubgridParams}; #[test] - #[should_panic(expected = "NtupleSubgridV1 doesn't support the convolute operation")] - fn convolute() { - NtupleSubgridV1::new().convolute(&[], &[], &[], &mut |_, _, _| 0.0); + #[should_panic(expected = "NtupleSubgridV1 doesn't support the convolve operation")] + fn convolve() { + NtupleSubgridV1::new().convolve(&[], &[], &[], &mut |_, _, _| 0.0); } #[test] diff --git a/pineappl/src/order.rs b/pineappl/src/order.rs index 614957834..aa6b52db9 100644 --- a/pineappl/src/order.rs +++ b/pineappl/src/order.rs @@ -106,7 +106,7 @@ impl Order { } } - /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolute`], + /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolve`], /// [`Grid::evolve`] or [`Grid::evolve_info`]. The selection of `orders` is controlled using /// the `max_as` and `max_al` parameters, for instance setting `max_as = 1` and `max_al = 0` /// selects the LO QCD only, `max_as = 2` and `max_al = 0` the NLO QCD; setting `max_as = 3` diff --git a/pineappl/src/subgrid.rs b/pineappl/src/subgrid.rs index 7dda201aa..1dce12abb 100644 --- a/pineappl/src/subgrid.rs +++ b/pineappl/src/subgrid.rs @@ -79,7 +79,7 @@ pub trait Subgrid { /// Convolute the subgrid with a luminosity function, which takes indices as arguments that /// correspond to the entries given in the slices `x1`, `x2` and `mu2`. - fn convolute( + fn convolve( &self, x1: &[f64], x2: &[f64], diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index 0d2f43764..e6c6838b9 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -328,20 +328,20 @@ fn perform_grid_tests( grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0); grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); - // TEST 5: `convolute` + // TEST 5: `convolve` let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference) in bins.iter().zip(reference.iter()) { assert_approx_eq!(f64, *result, *reference, ulps = 16); } - // TEST 5b: `convolute` with `LumiCache::with_two` + // TEST 5b: `convolve` with `LumiCache::with_two` let mut xfx1 = |id, x, q2| pdf.xfx_q2(id, x, q2); let mut xfx2 = |id, x, q2| pdf.xfx_q2(id, x, q2); let mut alphas2 = |_| 0.0; let mut lumi_cache2 = LumiCache::with_two(2212, &mut xfx1, 2212, &mut xfx2, &mut alphas2); - let bins2 = grid.convolute(&mut lumi_cache2, &[], &[], &[], &[(1.0, 1.0)]); + let bins2 = grid.convolve(&mut lumi_cache2, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference) in bins2.iter().zip(reference.iter()) { assert_approx_eq!(f64, *result, *reference, ulps = 16); @@ -350,12 +350,12 @@ fn perform_grid_tests( mem::drop(lumi_cache2); mem::drop(bins2); - // TEST 6: `convolute_subgrid` + // TEST 6: `convolve_subgrid` let bins: Vec<_> = (0..grid.bin_info().bins()) .map(|bin| { (0..grid.lumi().len()) .map(|channel| { - grid.convolute_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) + grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) .sum() }) .sum() @@ -375,12 +375,12 @@ fn perform_grid_tests( assert_eq!(grid.subgrid(0, 0, 0).x1_grid().as_ref(), x_grid); assert_eq!(grid.subgrid(0, 0, 0).x2_grid().as_ref(), x_grid); - // TEST 8: `convolute_subgrid` for the optimized subgrids + // TEST 8: `convolve_subgrid` for the optimized subgrids let bins: Vec<_> = (0..grid.bin_info().bins()) .map(|bin| { (0..grid.lumi().len()) .map(|channel| { - grid.convolute_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) + grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) .sum() }) .sum() @@ -391,7 +391,7 @@ fn perform_grid_tests( assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); } - let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); @@ -424,7 +424,7 @@ fn perform_grid_tests( grid.merge_bins(bin..bin + 2)?; } - let merged2 = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let merged2 = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference_after_ssd) in merged2.iter().zip( reference_after_ssd @@ -439,7 +439,7 @@ fn perform_grid_tests( // delete a few bins from the start grid.delete_bins(&[0, 1]); - let deleted = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let deleted = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); assert_eq!(deleted.len(), 10); @@ -455,7 +455,7 @@ fn perform_grid_tests( // delete a few bins from the ending grid.delete_bins(&[8, 9]); - let deleted2 = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let deleted2 = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); assert_eq!(deleted2.len(), 8); diff --git a/pineappl_applgrid/src/applgrid.cpp b/pineappl_applgrid/src/applgrid.cpp index 592a16d85..36ac9b2a9 100644 --- a/pineappl_applgrid/src/applgrid.cpp +++ b/pineappl_applgrid/src/applgrid.cpp @@ -187,7 +187,7 @@ rust::Vec grid_combine(appl::grid const& grid) return std_vector_to_rust_vec(grid.combine()); } -rust::Vec grid_convolute( +rust::Vec grid_convolve( appl::grid& grid, rust::Str pdfset, int member, diff --git a/pineappl_applgrid/src/applgrid.hpp b/pineappl_applgrid/src/applgrid.hpp index 45d15f235..fd2e4d9b0 100644 --- a/pineappl_applgrid/src/applgrid.hpp +++ b/pineappl_applgrid/src/applgrid.hpp @@ -58,7 +58,7 @@ std::unique_ptr make_lumi_pdf(rust::Str s, rust::Slice comb rust::Vec grid_combine(appl::grid const& grid); -rust::Vec grid_convolute( +rust::Vec grid_convolve( appl::grid& grid, rust::Str pdfset, int member, diff --git a/pineappl_applgrid/src/lib.rs b/pineappl_applgrid/src/lib.rs index 865f10755..ab8f28216 100644 --- a/pineappl_applgrid/src/lib.rs +++ b/pineappl_applgrid/src/lib.rs @@ -111,7 +111,7 @@ pub mod ffi { fn make_lumi_pdf(_: &str, _: &[i32]) -> UniquePtr; fn grid_combine(_: &grid) -> Vec; - fn grid_convolute( + fn grid_convolve( _: Pin<&mut grid>, _: &str, _: i32, diff --git a/pineappl_capi/include/PineAPPL.hpp b/pineappl_capi/include/PineAPPL.hpp index 561237110..35ae094ed 100644 --- a/pineappl_capi/include/PineAPPL.hpp +++ b/pineappl_capi/include/PineAPPL.hpp @@ -228,7 +228,7 @@ struct Grid { * @return prediction for each bin */ std::vector - convolute_with_one(const std::int32_t pdg_id, LHAPDF::PDF &pdf, + convolve_with_one(const std::int32_t pdg_id, LHAPDF::PDF &pdf, const double xi_ren = 1.0, const double xi_fac = 1.0, const std::vector &order_mask = {}, const std::vector &lumi_mask = {}) const { @@ -253,7 +253,7 @@ struct Grid { } // do it! std::vector results(this->bin_count()); - pineappl_grid_convolute_with_one(this->raw, pdg_id, xfx, alphas, &pdf, + pineappl_grid_convolve_with_one(this->raw, pdg_id, xfx, alphas, &pdf, raw_order_mask.get(), raw_lumi_mask.get(), xi_ren, xi_fac, results.data()); return results; diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 7a4812e26..02fdb477a 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -291,6 +291,59 @@ pub unsafe extern "C" fn pineappl_grid_clone(grid: *const Grid) -> Box { Box::new(grid.clone()) } +/// Wrapper for [`pineappl_grid_convolve_with_one`]. +#[deprecated( + since = "0.8.0", + note = "please use `pineappl_grid_convolve_with_one` instead" +)] +#[no_mangle] +pub unsafe extern "C" fn pineappl_grid_convolute_with_one( + grid: *const Grid, + pdg_id: i32, + xfx: extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, + alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, + state: *mut c_void, + order_mask: *const bool, + lumi_mask: *const bool, + xi_ren: f64, + xi_fac: f64, + results: *mut f64, +) { + unsafe { + pineappl_grid_convolve_with_one( + grid, pdg_id, xfx, alphas, state, order_mask, lumi_mask, xi_ren, xi_fac, results, + ); + } +} + +/// Wrapper for [`pineappl_grid_convolve_with_two`]. +#[deprecated( + since = "0.8.0", + note = "please use `pineappl_grid_convolve_with_two` instead" +)] +#[no_mangle] +pub unsafe extern "C" fn pineappl_grid_convolute_with_two( + grid: *const Grid, + pdg_id1: i32, + xfx1: extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, + pdg_id2: i32, + xfx2: extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, + alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, + state: *mut c_void, + order_mask: *const bool, + lumi_mask: *const bool, + xi_ren: f64, + xi_fac: f64, + results: *mut f64, +) { + unsafe { + pineappl_grid_convolve_with_two( + grid, pdg_id1, xfx1, pdg_id2, xfx2, alphas, state, order_mask, lumi_mask, xi_ren, + xi_fac, results, + ); + } +} + /// Convolutes the specified grid with the PDF `xfx`, which is the PDF for a hadron with the PDG id /// `pdg_id`, and strong coupling `alphas`. These functions must evaluate the PDFs for the given /// `x` and `q2` for the parton with the given PDG id, `pdg_id`, and return the result. Note that @@ -312,7 +365,7 @@ pub unsafe extern "C" fn pineappl_grid_clone(grid: *const Grid) -> Box { /// either be null pointers or point to arrays that are as long as `grid` has orders and lumi /// entries, respectively. Finally, `results` must be as long as `grid` has bins. #[no_mangle] -pub unsafe extern "C" fn pineappl_grid_convolute_with_one( +pub unsafe extern "C" fn pineappl_grid_convolve_with_one( grid: *const Grid, pdg_id: i32, xfx: extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, @@ -340,7 +393,7 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_one( let results = unsafe { slice::from_raw_parts_mut(results, grid.bin_info().bins()) }; let mut lumi_cache = LumiCache::with_one(pdg_id, &mut pdf, &mut als); - results.copy_from_slice(&grid.convolute( + results.copy_from_slice(&grid.convolve( &mut lumi_cache, &order_mask, &[], @@ -371,7 +424,7 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_one( /// either be null pointers or point to arrays that are as long as `grid` has orders and lumi /// entries, respectively. Finally, `results` must be as long as `grid` has bins. #[no_mangle] -pub unsafe extern "C" fn pineappl_grid_convolute_with_two( +pub unsafe extern "C" fn pineappl_grid_convolve_with_two( grid: *const Grid, pdg_id1: i32, xfx1: extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, @@ -402,7 +455,7 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_two( let results = unsafe { slice::from_raw_parts_mut(results, grid.bin_info().bins()) }; let mut lumi_cache = LumiCache::with_two(pdg_id1, &mut pdf1, pdg_id2, &mut pdf2, &mut als); - results.copy_from_slice(&grid.convolute( + results.copy_from_slice(&grid.convolve( &mut lumi_cache, &order_mask, &[], diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index e762ec5f3..8724ed236 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -24,7 +24,7 @@ fn convert_into_applgrid( let (mut applgrid, order_mask) = applgrid::convert_into_applgrid(grid, output, discard_non_matching_scales)?; - let results = applgrid::convolute_applgrid(applgrid.pin_mut(), pdfset, member); + let results = applgrid::convolve_applgrid(applgrid.pin_mut(), pdfset, member); Ok(("APPLgrid", results, 1, order_mask)) } diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index edba6c118..f6c202f6c 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -296,10 +296,10 @@ pub fn convert_into_applgrid( } // TODO: deduplicate this function from import -pub fn convolute_applgrid(grid: Pin<&mut grid>, pdfset: &str, member: usize) -> Vec { +pub fn convolve_applgrid(grid: Pin<&mut grid>, pdfset: &str, member: usize) -> Vec { let nloops = grid.nloops(); - ffi::grid_convolute( + ffi::grid_convolve( grid, pdfset, member.try_into().unwrap(), diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 335c3db30..e0b18dd52 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -168,7 +168,7 @@ pub fn convolve_scales( }; let mut alphas = |q2| lhapdf.alphas_q2(q2); let mut cache = LumiCache::with_one(pdf_pdg_id, &mut pdf, &mut alphas); - let mut results = grid.convolute(&mut cache, &orders, bins, lumis, scales); + let mut results = grid.convolve(&mut cache, &orders, bins, lumis, scales); match mode { ConvoluteMode::Asymmetry => { @@ -278,7 +278,7 @@ pub fn convolve_subgrid( let mut alphas = |q2| lhapdf.alphas_q2(q2); let mut cache = LumiCache::with_one(pdf_pdg_id, &mut pdf, &mut alphas); - grid.convolute_subgrid(&mut cache, order, bin, lumi, 1.0, 1.0) + grid.convolve_subgrid(&mut cache, order, bin, lumi, 1.0, 1.0) } pub fn parse_pdfset(argument: &str) -> std::result::Result { diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index f0a0181fa..727bc718b 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -29,7 +29,7 @@ fn convert_applgrid( let mut grid = ffi::make_grid(input.to_str().unwrap())?; let pgrid = applgrid::convert_applgrid(grid.pin_mut(), alpha, dis_pid)?; - let results = applgrid::convolute_applgrid(grid.pin_mut(), pdfset, member); + let results = applgrid::convolve_applgrid(grid.pin_mut(), pdfset, member); Ok(("APPLgrid", pgrid, results, 1)) } diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index b70939567..0b39c56d3 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -249,10 +249,10 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul Ok(grid0) } -pub fn convolute_applgrid(grid: Pin<&mut grid>, pdfset: &str, member: usize) -> Vec { +pub fn convolve_applgrid(grid: Pin<&mut grid>, pdfset: &str, member: usize) -> Vec { let nloops = grid.nloops(); - ffi::grid_convolute( + ffi::grid_convolve( grid, pdfset, member.try_into().unwrap(), diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index d73d7e5bb..f07ce4acd 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -875,7 +875,7 @@ fn import_hadronic_fktable() { let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); let mut alphas = |_| 0.0; let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let results = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let results = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let mut fk_table = FkTable::try_from(grid).unwrap(); let table = fk_table.table(); @@ -998,14 +998,14 @@ fn import_hadronic_fktable() { ] ); - assert_eq!(results, fk_table.convolute(&mut lumi_cache, &[], &[])); + assert_eq!(results, fk_table.convolve(&mut lumi_cache, &[], &[])); fk_table.optimize(FkAssumptions::Nf6Ind); assert_eq!(fk_table.lumi(), lumi); assert_approx_eq!( f64, results[0], - fk_table.convolute(&mut lumi_cache, &[], &[])[0], + fk_table.convolve(&mut lumi_cache, &[], &[])[0], ulps = 4 ); fk_table.optimize(FkAssumptions::Nf6Sym); @@ -1013,7 +1013,7 @@ fn import_hadronic_fktable() { assert_approx_eq!( f64, results[0], - fk_table.convolute(&mut lumi_cache, &[], &[])[0], + fk_table.convolve(&mut lumi_cache, &[], &[])[0], ulps = 4 ); fk_table.optimize(FkAssumptions::Nf5Ind); @@ -1021,21 +1021,21 @@ fn import_hadronic_fktable() { assert_approx_eq!( f64, results[0], - fk_table.convolute(&mut lumi_cache, &[], &[])[0] + fk_table.convolve(&mut lumi_cache, &[], &[])[0] ); fk_table.optimize(FkAssumptions::Nf5Sym); assert_eq!(fk_table.lumi(), lumi); assert_approx_eq!( f64, results[0], - fk_table.convolute(&mut lumi_cache, &[], &[])[0] + fk_table.convolve(&mut lumi_cache, &[], &[])[0] ); fk_table.optimize(FkAssumptions::Nf4Ind); assert_eq!(fk_table.lumi(), lumi); assert_approx_eq!( f64, results[0], - fk_table.convolute(&mut lumi_cache, &[], &[])[0] + fk_table.convolve(&mut lumi_cache, &[], &[])[0] ); fk_table.optimize(FkAssumptions::Nf4Sym); diff --git a/pineappl_py/docs/source/recipes.rst b/pineappl_py/docs/source/recipes.rst index c2d29cdf6..f026bf4ea 100644 --- a/pineappl_py/docs/source/recipes.rst +++ b/pineappl_py/docs/source/recipes.rst @@ -4,7 +4,7 @@ Recipes Below we list some common use cases with their solutions. -How can I convolute a given PineAPPL grid with my PDF? +How can I convolve a given PineAPPL grid with my PDF? ------------------------------------------------------ .. code:: python @@ -13,7 +13,7 @@ How can I convolute a given PineAPPL grid with my PDF? import lhapdf g = pineappl.grid.Grid.read("path/to/grid.pineappl.lz4") pdf = lhapdf.mkPDF("YourPDF", 0) - bins = g.convolute(pdf.xfxQ2, pdf.xfxQ2, pdf.alphasQ2) + bins = g.convolve(pdf.xfxQ2, pdf.xfxQ2, pdf.alphasQ2) If the grid is actually an FkTable just replace diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index 9f0e29a28..b0b6c4cc9 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -27,7 +27,7 @@ def __init__(self, alphas, alpha, logxir, logxif): def create_mask(orders, max_as, max_al, logs): r""" Return a mask suitable to pass as the `order_mask` parameter of - :meth:`Grid.convolute`. + :meth:`Grid.convolve`. Parameters ---------- @@ -183,7 +183,7 @@ def orders(self): """ return [Order(*pyorder.as_tuple()) for pyorder in self.raw.orders()] - def convolute_with_one( + def convolve_with_one( self, pdg_id, xfx, @@ -226,7 +226,7 @@ def convolute_with_one( cross sections for all bins, for each scale-variation tuple (first all bins, then the scale variation) """ - return self.raw.convolute_with_one( + return self.raw.convolve_with_one( pdg_id, xfx, alphas, @@ -236,7 +236,7 @@ def convolute_with_one( xi, ) - def convolute_with_two( + def convolve_with_two( self, pdg_id1, xfx1, @@ -285,7 +285,7 @@ def convolute_with_two( cross sections for all bins, for each scale-variation tuple (first all bins, then the scale variation) """ - return self.raw.convolute_with_two( + return self.raw.convolve_with_two( pdg_id1, xfx1, pdg_id2, diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index f561b5353..58de6aea0 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -224,7 +224,7 @@ impl PyFkTable { /// numpy.ndarray(float) : /// cross sections for all bins #[pyo3(signature = (pdg_id, xfx, bin_indices = None, lumi_mask= None))] - pub fn convolute_with_one<'py>( + pub fn convolve_with_one<'py>( &self, pdg_id: i32, xfx: &PyAny, @@ -236,7 +236,7 @@ impl PyFkTable { let mut alphas = |_| 1.0; let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); self.fk_table - .convolute( + .convolve( &mut lumi_cache, &bin_indices.map_or(vec![], |b| b.to_vec().unwrap()), &lumi_mask.map_or(vec![], |l| l.to_vec().unwrap()), diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index e8fefdc4b..3f7dd9593 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -66,7 +66,7 @@ impl PyOrder { self.order.logxif, ) } - /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolute`]. The + /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolve`]. The /// selection of `orders` is controlled using the `max_as` and `max_al` parameters, for /// instance setting `max_as = 1` and `max_al = 0` selects the LO QCD only, `max_as = 2` and /// `max_al = 0` the NLO QCD; setting `max_as = 3` and `max_al = 2` would select all NLOs, and @@ -331,7 +331,7 @@ impl PyGrid { /// numpy.ndarray(float) : /// cross sections for all bins, for each scale-variation tuple (first all bins, then /// the scale variation) - pub fn convolute_with_one<'py>( + pub fn convolve_with_one<'py>( &self, pdg_id: i32, xfx: &PyAny, @@ -346,7 +346,7 @@ impl PyGrid { let mut alphas = |q2| f64::extract(alphas.call1((q2,)).unwrap()).unwrap(); let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); self.grid - .convolute( + .convolve( &mut lumi_cache, &order_mask.to_vec().unwrap(), &bin_indices.to_vec().unwrap(), @@ -394,7 +394,7 @@ impl PyGrid { /// numpy.ndarray(float) : /// cross sections for all bins, for each scale-variation tuple (first all bins, then /// the scale variation) - pub fn convolute_with_two<'py>( + pub fn convolve_with_two<'py>( &self, pdg_id1: i32, xfx1: &PyAny, @@ -413,7 +413,7 @@ impl PyGrid { let mut lumi_cache = LumiCache::with_two(pdg_id1, &mut xfx1, pdg_id2, &mut xfx2, &mut alphas); self.grid - .convolute( + .convolve( &mut lumi_cache, &order_mask.to_vec().unwrap(), &bin_indices.to_vec().unwrap(), diff --git a/pineappl_py/tests/test_fk_table.py b/pineappl_py/tests/test_fk_table.py index 2c09d02e0..7769dcf7d 100644 --- a/pineappl_py/tests/test_fk_table.py +++ b/pineappl_py/tests/test_fk_table.py @@ -13,7 +13,7 @@ def fake_grid(self, bins=None): g.set_key_value("lumi_id_types", "pdg_mc_ids") return g - def test_convolute_with_one(self): + def test_convolve_with_one(self): g = self.fake_grid() # DIS grid @@ -28,10 +28,10 @@ def test_convolute_with_one(self): g.set_subgrid(0, 0, 0, subgrid) fk = pineappl.fk_table.FkTable.from_grid(g) np.testing.assert_allclose( - fk.convolute_with_one(2212, lambda pid, x, q2: 0.0), + fk.convolve_with_one(2212, lambda pid, x, q2: 0.0), [0.0] * 2, ) np.testing.assert_allclose( - fk.convolute_with_one(2212, lambda pid, x, q2: 1), + fk.convolve_with_one(2212, lambda pid, x, q2: 1), [5e7 / 9999, 0.0], ) diff --git a/pineappl_py/tests/test_grid.py b/pineappl_py/tests/test_grid.py index 618faf279..83ad810a1 100644 --- a/pineappl_py/tests/test_grid.py +++ b/pineappl_py/tests/test_grid.py @@ -81,7 +81,7 @@ def test_bins(self): np.testing.assert_allclose(g.bin_left(1), [2, 3]) np.testing.assert_allclose(g.bin_right(1), [3, 5]) - def test_convolute_with_one(self): + def test_convolve_with_one(self): g = self.fake_grid() # DIS grid @@ -95,15 +95,15 @@ def test_convolute_with_one(self): ) g.set_subgrid(0, 0, 0, subgrid) np.testing.assert_allclose( - g.convolute_with_one(2212, lambda pid, x, q2: 0.0, lambda q2: 0.0), + g.convolve_with_one(2212, lambda pid, x, q2: 0.0, lambda q2: 0.0), [0.0] * 2, ) np.testing.assert_allclose( - g.convolute_with_one(2212, lambda pid, x, q2: 1, lambda q2: 1.0), + g.convolve_with_one(2212, lambda pid, x, q2: 1, lambda q2: 1.0), [5e6 / 9999, 0.0], ) np.testing.assert_allclose( - g.convolute_with_one(2212, lambda pid, x, q2: 1, lambda q2: 2.0), + g.convolve_with_one(2212, lambda pid, x, q2: 1, lambda q2: 2.0), [2**3 * 5e6 / 9999, 0.0], ) @@ -119,7 +119,7 @@ def test_io(self, tmp_path): def test_fill(self): g = self.fake_grid() g.fill(0.5, 0.5, 10.0, 0, 0.01, 0, 10.0) - res = g.convolute_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) + res = g.convolve_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) pytest.approx(res) == 0.0 def test_fill_array(self): @@ -133,13 +133,13 @@ def test_fill_array(self): 0, np.array([10.0, 100.0]), ) - res = g.convolute_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) + res = g.convolve_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) pytest.approx(res) == 0.0 def test_fill_all(self): g = self.fake_grid() g.fill_all(1.0, 1.0, 1.0, 0, 1e-2, np.array([10.0])) - res = g.convolute_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) + res = g.convolve_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) pytest.approx(res) == 0.0 def test_merge(self): diff --git a/pineappl_py/tests/test_sugrid.py b/pineappl_py/tests/test_sugrid.py index 15cb7484b..d5b7aeb5b 100644 --- a/pineappl_py/tests/test_sugrid.py +++ b/pineappl_py/tests/test_sugrid.py @@ -17,18 +17,18 @@ def test_issue_164(pdf): orders = [pineappl.grid.Order(0, 0, 0, 0)] params = pineappl.subgrid.SubgridParams() - def convolute_grid(): + def convolve_grid(): grid = pineappl.grid.Grid.create(luminosities, orders, [0.0, 1.0], params) grid.fill(0.2, 0.2, 10, 0, 0.5, 0, 0.5) - return grid.convolute_with_one(2212, pdf.xfxQ, pdf.alphasQ) + return grid.convolve_with_one(2212, pdf.xfxQ, pdf.alphasQ) # default minimum is q2=100 - res = convolute_grid() + res = convolve_grid() assert res == 0.0 # lower minimum to q2=1 params.set_q2_min(1.0) - res = convolute_grid() + res = convolve_grid() assert pytest.approx(res) != 0.0 class TestSubgrid: From e203ec1887481bf296a250e8374e5353a5451cd9 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 28 May 2024 11:13:53 +0200 Subject: [PATCH 116/179] Update `CHANGELOG.md` --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4c7e8fd2..59d11bb28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - added new methods `Grid::convolutions` and `Grid::set_convolution` +- added the function `pineappl_grid_convolve_with_one` and + `pineappl_grid_convolve_with_two` which replace the deprecated function + similarly named with `convolute` in CAPI ### Changed From a1156f5be129d050e9eba23f4683fd03e402ac4a Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 28 May 2024 14:23:38 +0200 Subject: [PATCH 117/179] Move `Order` into new module `boc` --- CHANGELOG.md | 2 +- pineappl/src/{order.rs => boc.rs} | 9 +++++---- pineappl/src/evolution.rs | 2 +- pineappl/src/fk_table.rs | 2 +- pineappl/src/grid.rs | 2 +- pineappl/src/lib.rs | 2 +- pineappl/tests/drell_yan_lo.rs | 2 +- pineappl_capi/src/lib.rs | 2 +- pineappl_cli/src/export.rs | 2 +- pineappl_cli/src/export/applgrid.rs | 2 +- pineappl_cli/src/import/applgrid.rs | 2 +- pineappl_cli/src/import/fastnlo.rs | 2 +- pineappl_cli/src/import/fktable.rs | 2 +- pineappl_cli/src/read.rs | 2 +- pineappl_cli/src/write.rs | 2 +- 15 files changed, 19 insertions(+), 18 deletions(-) rename pineappl/src/{order.rs => boc.rs} (98%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 59d11bb28..345e705a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed -- moved `Order` and `ParseOrderError` to their own module `order` +- moved `Order` and `ParseOrderError` to the new module `boc` ### Removed diff --git a/pineappl/src/order.rs b/pineappl/src/boc.rs similarity index 98% rename from pineappl/src/order.rs rename to pineappl/src/boc.rs index aa6b52db9..be5cd0d2a 100644 --- a/pineappl/src/order.rs +++ b/pineappl/src/boc.rs @@ -1,4 +1,5 @@ -//! TODO +//! Module containing structures for the 3 dimensions of a [`Grid`]: bins, [`Order`] and channels +//! (`boc`). use serde::{Deserialize, Serialize}; use std::cmp::Ordering; @@ -130,7 +131,7 @@ impl Order { /// - the mixed NNLO QCD—EW. /// /// ```rust - /// use pineappl::order::Order; + /// use pineappl::boc::Order; /// /// let orders = [ /// Order::new(0, 2, 0, 0), // LO : alpha^2 @@ -161,7 +162,7 @@ impl Order { /// `true`: /// /// ```rust - /// use pineappl::order::Order; + /// use pineappl::boc::Order; /// /// let orders = [ /// Order::new(0, 2, 0, 0), // LO : alpha^2 @@ -178,7 +179,7 @@ impl Order { /// the selection for different LOs: /// /// ```rust - /// use pineappl::order::Order; + /// use pineappl::boc::Order; /// /// let orders = [ /// Order::new(2, 0, 0, 0), // LO QCD : alphas^2 diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index 480bcb8c3..76f21fa84 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -1,10 +1,10 @@ //! Supporting classes and functions for [`Grid::evolve`]. +use super::boc::Order; use super::grid::{Convolution, Grid, GridError}; use super::import_only_subgrid::ImportOnlySubgridV2; use super::lumi::LumiEntry; use super::lumi_entry; -use super::order::Order; use super::sparse_array3::SparseArray3; use super::subgrid::{Mu2, Subgrid, SubgridEnum}; use float_cmp::approx_eq; diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index aa364c1db..6b041abda 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -1,8 +1,8 @@ //! Provides the [`FkTable`] type. +use super::boc::Order; use super::grid::{Convolution, Grid, GridError}; use super::lumi::LumiCache; -use super::order::Order; use super::subgrid::Subgrid; use float_cmp::approx_eq; use ndarray::Array4; diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 23c0a19e3..ca159f2a1 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1,6 +1,7 @@ //! Module containing all traits and supporting structures for grids. use super::bin::{BinInfo, BinLimits, BinRemapper}; +use super::boc::Order; use super::empty_subgrid::EmptySubgridV1; use super::evolution::{self, AlphasTable, EvolveInfo, OperatorInfo, OperatorSliceInfo}; use super::fk_table::FkTable; @@ -8,7 +9,6 @@ use super::import_only_subgrid::ImportOnlySubgridV2; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; use super::lumi::{LumiCache, LumiEntry}; use super::ntuple_subgrid::NtupleSubgridV1; -use super::order::Order; use super::pids::{self, PidBasis}; use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; use bitflags::bitflags; diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index 51f5a8a26..08cfe449b 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -37,6 +37,7 @@ mod convert; pub mod bin; +pub mod boc; pub mod empty_subgrid; pub mod evolution; pub mod fk_table; @@ -45,7 +46,6 @@ pub mod import_only_subgrid; pub mod lagrange_subgrid; pub mod lumi; pub mod ntuple_subgrid; -pub mod order; pub mod packed_array; pub mod pids; pub mod sparse_array3; diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index e6c6838b9..ed33d7e17 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -3,10 +3,10 @@ use float_cmp::assert_approx_eq; use lhapdf::Pdf; use num_complex::Complex; use pineappl::bin::BinRemapper; +use pineappl::boc::Order; use pineappl::grid::{Grid, GridOptFlags, Ntuple}; use pineappl::lumi::LumiCache; use pineappl::lumi_entry; -use pineappl::order::Order; use pineappl::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; use rand::Rng; use rand_pcg::Pcg64; diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 02fdb477a..b059ad803 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -57,9 +57,9 @@ use itertools::izip; use pineappl::bin::BinRemapper; +use pineappl::boc::Order; use pineappl::grid::{Grid, GridOptFlags, Ntuple}; use pineappl::lumi::{LumiCache, LumiEntry}; -use pineappl::order::Order; use pineappl::subgrid::{ExtraSubgridParams, SubgridParams}; use std::collections::HashMap; use std::ffi::{CStr, CString}; diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index 8724ed236..7986a6c82 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -3,8 +3,8 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; +use pineappl::boc::Order; use pineappl::grid::Grid; -use pineappl::order::Order; use std::path::{Path, PathBuf}; use std::process::ExitCode; diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index f6c202f6c..74709bee7 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -2,8 +2,8 @@ use anyhow::{anyhow, bail, Result}; use cxx::{let_cxx_string, UniquePtr}; use float_cmp::approx_eq; use ndarray::{s, Axis}; +use pineappl::boc::Order; use pineappl::grid::{Convolution, Grid}; -use pineappl::order::Order; use pineappl::subgrid::{Mu2, Subgrid, SubgridParams}; use pineappl_applgrid::ffi::{self, grid}; use std::borrow::Cow; diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 0b39c56d3..67d623840 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,8 +1,8 @@ use anyhow::Result; +use pineappl::boc::Order; use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV2; use pineappl::lumi::LumiEntry; -use pineappl::order::Order; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{Mu2, SubgridParams}; use pineappl_applgrid::ffi::{self, grid}; diff --git a/pineappl_cli/src/import/fastnlo.rs b/pineappl_cli/src/import/fastnlo.rs index 36cac3a91..28d11a569 100644 --- a/pineappl_cli/src/import/fastnlo.rs +++ b/pineappl_cli/src/import/fastnlo.rs @@ -1,10 +1,10 @@ use anyhow::Result; use itertools::Itertools; use pineappl::bin::BinRemapper; +use pineappl::boc::Order; use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV2; use pineappl::lumi::LumiEntry; -use pineappl::order::Order; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{Mu2, SubgridParams}; use pineappl_fastnlo::ffi::{ diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index 85dc7ac0f..7e9f38483 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -1,9 +1,9 @@ use anyhow::{anyhow, Context, Result}; use flate2::read::GzDecoder; +use pineappl::boc::Order; use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV1; use pineappl::lumi_entry; -use pineappl::order::Order; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::SubgridParams; use std::fs::File; diff --git a/pineappl_cli/src/read.rs b/pineappl_cli/src/read.rs index 17d27dd81..eedcc4c2e 100644 --- a/pineappl_cli/src/read.rs +++ b/pineappl_cli/src/read.rs @@ -3,8 +3,8 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::{Args, Parser, ValueHint}; use itertools::Itertools; +use pineappl::boc::Order; use pineappl::fk_table::FkTable; -use pineappl::order::Order; use prettytable::{cell, row, Row}; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 787709446..fc59fe85c 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -7,9 +7,9 @@ use clap::{ ValueHint, }; use pineappl::bin::BinRemapper; +use pineappl::boc::Order; use pineappl::fk_table::{FkAssumptions, FkTable}; use pineappl::lumi::LumiEntry; -use pineappl::order::Order; use pineappl::pids; use pineappl::pids::PidBasis; use std::fs; From aef098226ebf22e697106eb33177be2819d89b11 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 31 May 2024 11:00:55 +0200 Subject: [PATCH 118/179] Use 'channel` instead of 'lumi' consistently --- CHANGELOG.md | 13 ++ docs/cli-tutorial.md | 37 ++-- pineappl/src/boc.rs | 308 ++++++++++++++++++++++++++- pineappl/src/evolution.rs | 108 +++++----- pineappl/src/fk_table.rs | 42 ++-- pineappl/src/grid.rs | 274 ++++++++++++------------ pineappl/src/lib.rs | 5 +- pineappl/src/lumi.rs | 313 ---------------------------- pineappl/src/pids.rs | 8 +- pineappl/tests/drell_yan_lo.rs | 30 +-- pineappl_capi/src/lib.rs | 89 ++++---- pineappl_cli/src/analyze.rs | 12 +- pineappl_cli/src/channels.rs | 49 +++-- pineappl_cli/src/diff.rs | 10 +- pineappl_cli/src/export/applgrid.rs | 47 +++-- pineappl_cli/src/helpers.rs | 4 +- pineappl_cli/src/import/applgrid.rs | 7 +- pineappl_cli/src/import/fastnlo.rs | 9 +- pineappl_cli/src/import/fktable.rs | 6 +- pineappl_cli/src/plot.rs | 31 +-- pineappl_cli/src/pull.rs | 48 ++--- pineappl_cli/src/read.rs | 18 +- pineappl_cli/src/subgrids.rs | 6 +- pineappl_cli/src/write.rs | 34 +-- pineappl_cli/tests/analyze.rs | 4 +- pineappl_cli/tests/channels.rs | 38 ++-- pineappl_cli/tests/diff.rs | 8 +- pineappl_cli/tests/import.rs | 20 +- pineappl_cli/tests/plot.rs | 2 +- pineappl_cli/tests/pull.rs | 12 +- pineappl_cli/tests/read.rs | 20 +- pineappl_cli/tests/subgrids.rs | 18 +- pineappl_cli/tests/write.rs | 44 ++-- pineappl_py/src/fk_table.rs | 4 +- pineappl_py/src/grid.rs | 6 +- pineappl_py/src/lumi.rs | 8 +- 36 files changed, 865 insertions(+), 827 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 345e705a2..2f20e5042 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - moved `Order` and `ParseOrderError` to the new module `boc` +- renamed switch `--split-lumi` of `pineappl write` to `--split-channels`. The + old switch can still be used +- renamed switch `--lumis` of `pineappl read` to `--channels`. The old switch + can still be used +- renamed switch `--ignore-lumis` of `pineappl diff` to `--ignore-channels`. + The old switch can still be used +- renamed `Grid::lumi` to `Grid::channels`, `Grid::split_lumi` to + `Grid::split_channels`, `Grid::rewrite_lumi` to `Grid::rewrite_channels` and + `Grid::set_lumis` to `Grid::set_channels`. The term 'channel' is now used + everywhere instead of 'lumi', 'luminosity function', etc. +- renamed the struct `LumiEntry` to `Channel` and `ParseLumiEntryError` to + `ParseChannelError`. Both structures have been moved to the module `boc` +- renamed the macro `lumi_entry` to `channel` ### Removed diff --git a/docs/cli-tutorial.md b/docs/cli-tutorial.md index c47078104..f148248e8 100644 --- a/docs/cli-tutorial.md +++ b/docs/cli-tutorial.md @@ -163,14 +163,14 @@ through them one by one: If you'd like a complete description of all recognized metadata, have a look at the [full list](metadata.md). -### Orders, bins and lumis +### Orders, bins and channels Each *grid* is—basically—a three-dimensional array of *subgrids*, which are the actual interpolation grids. The three dimensions are: - orders (`o`), - bins (`b`) and -- luminosities/lumis (`l`). +- channels (`c`). You can use the subcommand `read` to see exactly how each grid is built. Let's go through them one by one using our grid: @@ -228,13 +228,13 @@ right bin limits, which you've already seen in `convolve`. The column `norm` shows the factor that all convolutions are divided with. Typically, as shown in this case, this is the bin width, but in general this can be different. -Finally, let's have a look at the luminosities or *lumis*: +Finally, let's have a look at the channel definition: - pineappl read --lumis LHCB_WP_7TEV.pineappl.lz4 + pineappl read --channels LHCB_WP_7TEV.pineappl.lz4 This prints all partonic initial states that contribute to this process: - l entry entry + c entry entry -+------------+------------ 0 1 × ( 2, -1) 1 × ( 4, -3) 1 1 × (21, -3) 1 × (21, -1) @@ -246,16 +246,16 @@ In this case you see that the up–anti-down (2, -1) and charm–anti-strange (4 -3) initial states (the numbers are [PDG](https://pdg.lbl.gov/) MC IDs) are grouped together in a single *channel*, each with a factor of `1`. In general this number can be different from `1`, if the Monte Carlo decides to factor out -CKM values or electric charges, for instance, to group more lumis with the same -matrix elements together into a single channel. This is an optimization step, -as fewer lumis result in a smaller grid file. +CKM values or electric charges, for instance, to group more contributions with +the same matrix elements together into a single channel. This is an +optimization step, as fewer channels result in a smaller grid file. -Note that lumis with the transposed initial states, for instance +Note that channels with the transposed initial states, for instance anti-down—up, are merged with each other, which always works if the two initial-state hadrons are the same; this is an optimization step, also to keep the size of the grid files small. -All remaining lumis are the ones with a gluon, `21`, or with a photon, `22`. +All remaining channels are the ones with a gluon, `21`, or with a photon, `22`. ## `pineappl orders`: What's the size of each perturbative order? @@ -301,14 +301,13 @@ which will show ## `pineappl channels`: What's the size of each channel? -You can also show a convolution separately for each lumi, or in other words -show the size of each partonic channel: +You can also show a convolution separately for each channel: pineappl channels LHCB_WP_7TEV.pineappl.lz4 CT18NNLO This will show the following table, - b etal l size l size l size l size l size + b etal c size c size c size c size c size [] [%] [%] [%] [%] [%] -+----+----+-+------+-+------+-+-----+-+----+-+---- 0 2 2.25 0 111.00 3 -7.91 1 -3.10 2 0.00 4 0.00 @@ -320,13 +319,13 @@ This will show the following table, 6 3.5 4 0 115.65 3 -10.25 1 -5.39 2 0.00 4 0.00 7 4 4.5 0 115.81 3 -8.58 1 -7.23 2 0.00 4 0.00 -The most important lumi is `0`, which is the up-type–anti-down-type +The most important channel is `0`, which is the up-type–anti-down-type combination. The channels with gluons are much smaller and negative. Channels with a photon are zero, because the PDF set that we've chosen doesn't have a photon PDF. Let's try again with `NNPDF31_nnlo_as_0118_luxqed` (remember to install the set first) as the PDF set: - b etal l size l size l size l size l size + b etal c size c size c size c size c size [] [%] [%] [%] [%] [%] -+----+----+-+------+-+------+-+-----+-+----+-+---- 0 2 2.25 0 111.04 3 -7.84 1 -3.23 4 0.02 2 0.01 @@ -392,7 +391,7 @@ and `NNPDF31_nnlo_as_0118_luxqed` for $\sigma_2$ and $\delta \sigma_2$. This will show not only the pull, in the column `total`, but also how this pull is calculated using the different channels: - b etal total l pull l pull l pull l pull l pull + b etal total c pull c pull c pull c pull c pull [] [σ] [σ] [σ] [σ] [σ] [σ] -+----+----+------+-+------+-+------+-+------+-+-----+-+----- 0 2 2.25 0.065 0 0.086 1 -0.058 3 0.024 4 0.009 2 0.005 @@ -407,10 +406,10 @@ calculated using the different channels: Looking at the `total` column you can see that the numbers are much smaller than `1`, where `1` corresponds to a one sigma difference. This we expect knowing that this dataset is used in the fit of both PDF sets. The remaining -columns show how the different luminosities (with indices in the `l` column) -contribute to the total pull. For the last bin, for instance, we see lumi `0` +columns show how different channels (with indices in the `c` column) contribute +to the total pull. For the last bin, for instance, we see channel `0` contributes roughly half to the total pull, the remaining pull coming from -lumis `3` and `1`. +channels `3` and `1`. Note that CT18NNLO doesn't have a photon PDF, but the NNPDF set *has* one. However, for these observables the photon PDF contribution is too small to make diff --git a/pineappl/src/boc.rs b/pineappl/src/boc.rs index be5cd0d2a..62599b098 100644 --- a/pineappl/src/boc.rs +++ b/pineappl/src/boc.rs @@ -1,6 +1,8 @@ //! Module containing structures for the 3 dimensions of a [`Grid`]: bins, [`Order`] and channels //! (`boc`). +use float_cmp::approx_eq; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; use std::str::FromStr; @@ -256,9 +258,257 @@ impl Order { } } +/// This structure represents a channel. Each channel consists of a tuple containing in the +/// following order, the particle ID of the first incoming parton, then the particle ID of the +/// second parton, and finally a numerical factor that will multiply the result for this specific +/// combination. +#[derive(Clone, Debug, Deserialize, PartialEq, PartialOrd, Serialize)] +pub struct Channel { + entry: Vec<(i32, i32, f64)>, +} + +impl Channel { + /// Constructor for `Channel`. Note that `entry` must be non-empty, otherwise this function + /// panics. + /// + /// # Examples + /// + /// Ordering of the arguments doesn't matter: + /// + /// ```rust + /// use pineappl::boc::Channel; + /// + /// let entry1 = Channel::new(vec![(2, 2, 1.0), (4, 4, 1.0)]); + /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); + /// + /// // checks that the ordering doesn't matter + /// assert_eq!(entry1, entry2); + /// ``` + /// + /// Same arguments are merged together: + /// + /// ```rust + /// use pineappl::boc::Channel; + /// + /// let entry1 = Channel::new(vec![(1, 1, 1.0), (1, 1, 3.0), (3, 3, 1.0), (1, 1, 6.0)]); + /// let entry2 = Channel::new(vec![(1, 1, 10.0), (3, 3, 1.0)]); + /// + /// assert_eq!(entry1, entry2); + /// ``` + /// + /// # Panics + /// + /// Creating an empty channel panics: + /// + /// ```rust,should_panic + /// use pineappl::boc::Channel; + /// + /// let _ = Channel::new(vec![]); + /// ``` + #[must_use] + pub fn new(mut entry: Vec<(i32, i32, f64)>) -> Self { + assert!(!entry.is_empty()); + + // sort `entry` because the ordering doesn't matter and because it makes it easier to + // compare `Channel` objects with each other + entry.sort_by(|x, y| (x.0, x.1).cmp(&(y.0, y.1))); + + Self { + entry: entry + .into_iter() + .coalesce(|lhs, rhs| { + // sum the factors of repeated elements + if (lhs.0, lhs.1) == (rhs.0, rhs.1) { + Ok((lhs.0, lhs.1, lhs.2 + rhs.2)) + } else { + Err((lhs, rhs)) + } + }) + // filter zeros + // TODO: find a better than to hardcode the epsilon limit + .filter(|&(_, _, f)| !approx_eq!(f64, f.abs(), 0.0, epsilon = 1e-14)) + .collect(), + } + } + + /// Translates `entry` into a different basis using `translator`. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::boc::Channel; + /// use pineappl::channel; + /// + /// let entry = Channel::translate(&channel![103, 11, 1.0], &|evol_id| match evol_id { + /// 103 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], + /// _ => vec![(evol_id, 1.0)], + /// }); + /// + /// assert_eq!(entry, channel![2, 11, 1.0; -2, 11, -1.0; 1, 11, -1.0; -1, 11, 1.0]); + /// ``` + pub fn translate(entry: &Self, translator: &dyn Fn(i32) -> Vec<(i32, f64)>) -> Self { + let mut tuples = Vec::new(); + + for &(a, b, factor) in &entry.entry { + for (aid, af) in translator(a) { + for (bid, bf) in translator(b) { + tuples.push((aid, bid, factor * af * bf)); + } + } + } + + Self::new(tuples) + } + + /// Returns a tuple representation of this entry. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::channel; + /// use pineappl::boc::Channel; + /// + /// let entry = channel![4, 4, 1.0; 2, 2, 1.0]; + /// + /// assert_eq!(entry.entry(), [(2, 2, 1.0), (4, 4, 1.0)]); + /// ``` + #[must_use] + pub fn entry(&self) -> &[(i32, i32, f64)] { + &self.entry + } + + /// Creates a new object with the initial states transposed. + #[must_use] + pub fn transpose(&self) -> Self { + Self::new(self.entry.iter().map(|(a, b, c)| (*b, *a, *c)).collect()) + } + + /// If `other` is the same channel when only comparing PIDs and neglecting the factors, return + /// the number `f1 / f2`, where `f1` is the factor from `self` and `f2` is the factor from + /// `other`. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::boc::Channel; + /// + /// let entry1 = Channel::new(vec![(2, 2, 2.0), (4, 4, 2.0)]); + /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); + /// let entry3 = Channel::new(vec![(3, 4, 1.0), (2, 2, 1.0)]); + /// let entry4 = Channel::new(vec![(4, 3, 1.0), (2, 3, 2.0)]); + /// + /// assert_eq!(entry1.common_factor(&entry2), Some(2.0)); + /// assert_eq!(entry1.common_factor(&entry3), None); + /// assert_eq!(entry1.common_factor(&entry4), None); + /// ``` + #[must_use] + pub fn common_factor(&self, other: &Self) -> Option { + if self.entry.len() != other.entry.len() { + return None; + } + + let result: Option> = self + .entry + .iter() + .zip(&other.entry) + .map(|(a, b)| ((a.0 == b.0) && (a.1 == b.1)).then_some(a.2 / b.2)) + .collect(); + + result.and_then(|factors| { + if factors + .windows(2) + .all(|win| approx_eq!(f64, win[0], win[1], ulps = 4)) + { + factors.first().copied() + } else { + None + } + }) + } +} + +/// Error type keeping information if [`Channel::from_str`] went wrong. +#[derive(Debug, Error)] +#[error("{0}")] +pub struct ParseChannelError(String); + +impl FromStr for Channel { + type Err = ParseChannelError; + + fn from_str(s: &str) -> Result { + Ok(Self::new( + s.split('+') + .map(|sub| { + sub.split_once('*').map_or_else( + || Err(ParseChannelError(format!("missing '*' in '{sub}'"))), + |(factor, pids)| { + let tuple = pids.split_once(',').map_or_else( + || Err(ParseChannelError(format!("missing ',' in '{pids}'"))), + |(a, b)| { + Ok(( + a.trim() + .strip_prefix('(') + .ok_or_else(|| { + ParseChannelError(format!( + "missing '(' in '{pids}'" + )) + })? + .trim() + .parse::() + .map_err(|err| ParseChannelError(err.to_string()))?, + b.trim() + .strip_suffix(')') + .ok_or_else(|| { + ParseChannelError(format!( + "missing ')' in '{pids}'" + )) + })? + .trim() + .parse::() + .map_err(|err| ParseChannelError(err.to_string()))?, + )) + }, + )?; + + Ok(( + tuple.0, + tuple.1, + str::parse::(factor.trim()) + .map_err(|err| ParseChannelError(err.to_string()))?, + )) + }, + ) + }) + .collect::>()?, + )) + } +} + +/// Helper macro to quickly generate a `Channel` at compile time. +/// +/// # Examples +/// +/// In the following example `entry1` and `entry2` represent the same values: +/// +/// ```rust +/// use pineappl::channel; +/// +/// let entry1 = channel![2, 2, 1.0; 4, 4, 1.0]; +/// let entry2 = channel![4, 4, 1.0; 2, 2, 1.0]; +/// +/// assert_eq!(entry1, entry2); +/// ``` +#[macro_export] +macro_rules! channel { + ($a:expr, $b:expr, $factor:expr $(; $c:expr, $d:expr, $fac:expr)*) => { + $crate::boc::Channel::new(vec![($a, $b, $factor), $(($c, $d, $fac)),*]) + }; +} + #[cfg(test)] mod tests { - use super::{Order, ParseOrderError}; + use super::{Channel, Order, ParseOrderError}; + use crate::pids; #[test] fn order_from_str() { @@ -460,4 +710,60 @@ mod tests { [true, true, true, true, true, true, true, true, true, true, true, true] ); } + + #[test] + fn channel_translate() { + let channel = Channel::translate(&channel![103, 203, 2.0], &pids::evol_to_pdg_mc_ids); + + assert_eq!( + channel, + channel![ 2, 2, 2.0; 2, -2, -2.0; 2, 1, -2.0; 2, -1, 2.0; + -2, 2, 2.0; -2, -2, -2.0; -2, 1, -2.0; -2, -1, 2.0; + 1, 2, -2.0; 1, -2, 2.0; 1, 1, 2.0; 1, -1, -2.0; + -1, 2, -2.0; -1, -2, 2.0; -1, 1, 2.0; -1, -1, -2.0] + ); + } + + #[test] + fn channel_from_str() { + assert_eq!( + str::parse::(" 1 * ( 2 , -2) + 2* (4,-4)").unwrap(), + channel![2, -2, 1.0; 4, -4, 2.0] + ); + + assert_eq!( + str::parse::("* ( 2, -2) + 2* (4,-4)") + .unwrap_err() + .to_string(), + "cannot parse float from empty string" + ); + + assert_eq!( + str::parse::(" 1 ( 2 -2) + 2* (4,-4)") + .unwrap_err() + .to_string(), + "missing '*' in ' 1 ( 2 -2) '" + ); + + assert_eq!( + str::parse::(" 1 * ( 2 -2) + 2* (4,-4)") + .unwrap_err() + .to_string(), + "missing ',' in ' ( 2 -2) '" + ); + + assert_eq!( + str::parse::(" 1 * 2, -2) + 2* (4,-4)") + .unwrap_err() + .to_string(), + "missing '(' in ' 2, -2) '" + ); + + assert_eq!( + str::parse::(" 1 * ( 2, -2 + 2* (4,-4)") + .unwrap_err() + .to_string(), + "missing ')' in ' ( 2, -2 '" + ); + } } diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index 76f21fa84..336411a63 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -1,10 +1,9 @@ //! Supporting classes and functions for [`Grid::evolve`]. -use super::boc::Order; +use super::boc::{Channel, Order}; +use super::channel; use super::grid::{Convolution, Grid, GridError}; use super::import_only_subgrid::ImportOnlySubgridV2; -use super::lumi::LumiEntry; -use super::lumi_entry; use super::sparse_array3::SparseArray3; use super::subgrid::{Mu2, Subgrid, SubgridEnum}; use float_cmp::approx_eq; @@ -162,7 +161,7 @@ impl AlphasTable { fn gluon_has_pid_zero(grid: &Grid) -> bool { // if there are any PID zero particles ... - grid.lumi() + grid.channels() .iter() .any(|entry| entry.entry().iter().any(|&(a, b, _)| (a == 0) || (b == 0))) // and if lumi_id_types = pdg_mc_ids or if the key-value pair doesn't exist @@ -186,7 +185,7 @@ fn pid_slices( .cartesian_product(0..operator.dim().0) .filter(|&(pid0_idx, pid1_idx)| { // 1) at least one element of the operator must be non-zero, and 2) the pid must be - // contained in the lumi somewhere + // contained in some channel operator .slice(s![pid1_idx, .., pid0_idx, ..]) .iter() @@ -223,7 +222,7 @@ fn pid_slices( Ok((pid_indices, pids)) } -fn lumi0_with_one(pids: &[(i32, i32)]) -> Vec { +fn channels0_with_one(pids: &[(i32, i32)]) -> Vec { let mut pids0: Vec<_> = pids.iter().map(|&(pid0, _)| pid0).collect(); pids0.sort_unstable(); pids0.dedup(); @@ -231,7 +230,7 @@ fn lumi0_with_one(pids: &[(i32, i32)]) -> Vec { pids0 } -fn lumi0_with_two(pids_a: &[(i32, i32)], pids_b: &[(i32, i32)]) -> Vec<(i32, i32)> { +fn channels0_with_two(pids_a: &[(i32, i32)], pids_b: &[(i32, i32)]) -> Vec<(i32, i32)> { let mut pids0_a: Vec<_> = pids_a.iter().map(|&(pid0, _)| pid0).collect(); pids0_a.sort_unstable(); pids0_a.dedup(); @@ -406,28 +405,28 @@ pub(crate) fn evolve_slice_with_one( order_mask: &[bool], xi: (f64, f64), alphas_table: &AlphasTable, -) -> Result<(Array3, Vec), GridError> { +) -> Result<(Array3, Vec), GridError> { let gluon_has_pid_zero = gluon_has_pid_zero(grid); let has_pdf1 = grid.convolutions()[0] != Convolution::None; let (pid_indices, pids) = pid_slices(operator, info, gluon_has_pid_zero, &|pid| { - grid.lumi() + grid.channels() .iter() - .flat_map(LumiEntry::entry) + .flat_map(Channel::entry) .any(|&(a, b, _)| if has_pdf1 { a } else { b } == pid) })?; - let lumi0 = lumi0_with_one(&pids); - let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * lumi0.len()); + let channels0 = channels0_with_one(&pids); + let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); let new_axis = if has_pdf1 { 2 } else { 1 }; let mut last_x1 = Vec::new(); let mut ops = Vec::new(); for subgrids_ol in grid.subgrids().axis_iter(Axis(1)) { - let mut tables = vec![Array1::zeros(info.x0.len()); lumi0.len()]; + let mut tables = vec![Array1::zeros(info.x0.len()); channels0.len()]; - for (subgrids_o, lumi1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.lumi()) { + for (subgrids_o, channel1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.channels()) { let (x1_a, x1_b, array) = ndarray_from_subgrid_orders_slice( info, &subgrids_o, @@ -453,13 +452,14 @@ pub(crate) fn evolve_slice_with_one( last_x1 = x1; } - for (&pid1, &factor) in lumi1 - .entry() - .iter() - .map(|(a, b, f)| if has_pdf1 { (a, f) } else { (b, f) }) + for (&pid1, &factor) in + channel1 + .entry() + .iter() + .map(|(a, b, f)| if has_pdf1 { (a, f) } else { (b, f) }) { for (fk_table, op) in - lumi0 + channels0 .iter() .zip(tables.iter_mut()) .filter_map(|(&pid0, fk_table)| { @@ -500,19 +500,19 @@ pub(crate) fn evolve_slice_with_one( } let pid = if grid.convolutions()[0] != Convolution::None { - grid.lumi()[0].entry()[0].1 + grid.channels()[0].entry()[0].1 } else { - grid.lumi()[0].entry()[0].0 + grid.channels()[0].entry()[0].0 }; Ok(( Array1::from_iter(sub_fk_tables) - .into_shape((1, grid.bin_info().bins(), lumi0.len())) + .into_shape((1, grid.bin_info().bins(), channels0.len())) .unwrap(), - lumi0 + channels0 .iter() .map(|&a| { - lumi_entry![ + channel![ if has_pdf1 { a } else { pid }, if has_pdf1 { pid } else { a }, 1.0 @@ -529,24 +529,24 @@ pub(crate) fn evolve_slice_with_two( order_mask: &[bool], xi: (f64, f64), alphas_table: &AlphasTable, -) -> Result<(Array3, Vec), GridError> { +) -> Result<(Array3, Vec), GridError> { let gluon_has_pid_zero = gluon_has_pid_zero(grid); let (pid_indices_a, pids_a) = pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { - grid.lumi() + grid.channels() .iter() - .flat_map(LumiEntry::entry) + .flat_map(Channel::entry) .any(|&(a, _, _)| a == pid1) })?; let (pid_indices_b, pids_b) = pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { - grid.lumi() + grid.channels() .iter() - .flat_map(LumiEntry::entry) + .flat_map(Channel::entry) .any(|&(_, b, _)| b == pid1) })?; - let lumi0 = lumi0_with_two(&pids_a, &pids_b); - let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * lumi0.len()); + let channels0 = channels0_with_two(&pids_a, &pids_b); + let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); let mut last_x1a = Vec::new(); let mut last_x1b = Vec::new(); @@ -554,9 +554,9 @@ pub(crate) fn evolve_slice_with_two( let mut operators_b = Vec::new(); for subgrids_ol in grid.subgrids().axis_iter(Axis(1)) { - let mut tables = vec![Array2::zeros((info.x0.len(), info.x0.len())); lumi0.len()]; + let mut tables = vec![Array2::zeros((info.x0.len(), info.x0.len())); channels0.len()]; - for (subgrids_o, lumi1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.lumi()) { + for (subgrids_o, channel1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.channels()) { let (x1_a, x1_b, array) = ndarray_from_subgrid_orders_slice( info, &subgrids_o, @@ -588,26 +588,21 @@ pub(crate) fn evolve_slice_with_two( let mut tmp = Array2::zeros((last_x1a.len(), info.x0.len())); - for &(pida1, pidb1, factor) in lumi1.entry() { - for (fk_table, opa, opb) in - lumi0 - .iter() - .zip(tables.iter_mut()) - .filter_map(|(&(pida0, pidb0), fk_table)| { - pids_a - .iter() - .zip(operators_a.iter()) - .find_map(|(&(pa0, pa1), opa)| { - (pa0 == pida0 && pa1 == pida1).then_some(opa) - }) - .zip(pids_b.iter().zip(operators_b.iter()).find_map( - |(&(pb0, pb1), opb)| { - (pb0 == pidb0 && pb1 == pidb1).then_some(opb) - }, - )) - .map(|(opa, opb)| (fk_table, opa, opb)) - }) - { + for &(pida1, pidb1, factor) in channel1.entry() { + for (fk_table, opa, opb) in channels0.iter().zip(tables.iter_mut()).filter_map( + |(&(pida0, pidb0), fk_table)| { + pids_a + .iter() + .zip(operators_a.iter()) + .find_map(|(&(pa0, pa1), opa)| { + (pa0 == pida0 && pa1 == pida1).then_some(opa) + }) + .zip(pids_b.iter().zip(operators_b.iter()).find_map( + |(&(pb0, pb1), opb)| (pb0 == pidb0 && pb1 == pidb1).then_some(opb), + )) + .map(|(opa, opb)| (fk_table, opa, opb)) + }, + ) { linalg::general_mat_mul(1.0, &array, &opb.t(), 0.0, &mut tmp); linalg::general_mat_mul(factor, opa, &tmp, 1.0, fk_table); } @@ -632,8 +627,11 @@ pub(crate) fn evolve_slice_with_two( Ok(( Array1::from_iter(sub_fk_tables) - .into_shape((1, grid.bin_info().bins(), lumi0.len())) + .into_shape((1, grid.bin_info().bins(), channels0.len())) .unwrap(), - lumi0.iter().map(|&(a, b)| lumi_entry![a, b, 1.0]).collect(), + channels0 + .iter() + .map(|&(a, b)| channel![a, b, 1.0]) + .collect(), )) } diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 6b041abda..5245805a5 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -19,9 +19,9 @@ use thiserror::Error; /// [`FkTable::muf2`]. /// - all subgrids, for both hadronic initial states (if both initial states are hadronic), share /// the same `x` grid. See [`FkTable::x_grid`]. -/// - the luminosity function is *simple*, meaning that every entry consists of a single pair of +/// - the channel definitions are *simple*, meaning that every entry consists of a single pair of /// partons with trivial factor `1.0`, and all tuples are distinct from each other. See -/// [`Grid::lumi`]. +/// [`Grid::channels`]. /// - the FK table's grid contains only a single [`Order`], whose exponents are all zero. #[repr(transparent)] pub struct FkTable { @@ -34,8 +34,8 @@ pub enum TryFromGridError { /// Error if the grid contains multiple scales instead of a single one. #[error("multiple scales detected")] MultipleScales, - /// Error if the luminosity is not simple. - #[error("complicated luminosity function detected")] + /// Error if the channels are not simple. + #[error("complicated channel function detected")] InvalidLumi, /// Error if the order of the grid was not a single one with all zeros in the exponents. #[error("multiple orders detected")] @@ -142,7 +142,7 @@ impl FkTable { self.grid } - /// Returns the FK table represented as a four-dimensional array indexed by `bin`, `lumi`, + /// Returns the FK table represented as a four-dimensional array indexed by `bin`, `channel`, /// `x1` and `x2`, in this order. /// /// # Panics @@ -156,14 +156,14 @@ impl FkTable { let mut result = Array4::zeros(( self.bins(), - self.grid.lumi().len(), + self.grid.channels().len(), if has_pdf1 { x_grid.len() } else { 1 }, if has_pdf2 { x_grid.len() } else { 1 }, )); for bin in 0..self.bins() { - for lumi in 0..self.grid.lumi().len() { - let subgrid = self.grid().subgrid(0, bin, lumi); + for channel in 0..self.grid.channels().len() { + let subgrid = self.grid().subgrid(0, bin, channel); let indices1 = if has_pdf1 { subgrid @@ -187,7 +187,7 @@ impl FkTable { }; for ((_, ix1, ix2), value) in subgrid.indexed_iter() { - result[[bin, lumi, indices1[ix1], indices2[ix2]]] = value; + result[[bin, channel, indices1[ix1], indices2[ix2]]] = value; } } } @@ -231,11 +231,11 @@ impl FkTable { self.grid.key_values() } - /// Returns the (simplified) luminosity function for this `FkTable`. All factors are `1.0`. + /// Return the channel definition for this `FkTable`. All factors are `1.0`. #[must_use] - pub fn lumi(&self) -> Vec<(i32, i32)> { + pub fn channels(&self) -> Vec<(i32, i32)> { self.grid - .lumi() + .channels() .iter() .map(|entry| (entry.entry()[0].0, entry.entry()[0].1)) .collect() @@ -282,10 +282,10 @@ impl FkTable { &self, lumi_cache: &mut LumiCache, bin_indices: &[usize], - lumi_mask: &[bool], + channel_mask: &[bool], ) -> Vec { self.grid - .convolve(lumi_cache, &[], bin_indices, lumi_mask, &[(1.0, 1.0)]) + .convolve(lumi_cache, &[], bin_indices, channel_mask, &[(1.0, 1.0)]) } /// Set a metadata key-value pair @@ -350,7 +350,7 @@ impl FkTable { } } - self.grid.rewrite_lumi(&add, &[]); + self.grid.rewrite_channels(&add, &[]); // store the assumption so that we can check it later on self.grid @@ -377,8 +377,8 @@ impl TryFrom for FkTable { } for bin in 0..grid.bin_info().bins() { - for lumi in 0..grid.lumi().len() { - let subgrid = grid.subgrid(0, bin, lumi); + for channel in 0..grid.channels().len() { + let subgrid = grid.subgrid(0, bin, channel); if subgrid.is_empty() { continue; @@ -398,15 +398,17 @@ impl TryFrom for FkTable { } } - for lumi in grid.lumi() { - let entry = lumi.entry(); + for channel in grid.channels() { + let entry = channel.entry(); if entry.len() != 1 || entry[0].2 != 1.0 { return Err(TryFromGridError::InvalidLumi); } } - if (1..grid.lumi().len()).any(|i| grid.lumi()[i..].contains(&grid.lumi()[i - 1])) { + if (1..grid.channels().len()) + .any(|i| grid.channels()[i..].contains(&grid.channels()[i - 1])) + { return Err(TryFromGridError::InvalidLumi); } diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index ca159f2a1..36251fff1 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1,13 +1,13 @@ //! Module containing all traits and supporting structures for grids. use super::bin::{BinInfo, BinLimits, BinRemapper}; -use super::boc::Order; +use super::boc::{Channel, Order}; use super::empty_subgrid::EmptySubgridV1; use super::evolution::{self, AlphasTable, EvolveInfo, OperatorInfo, OperatorSliceInfo}; use super::fk_table::FkTable; use super::import_only_subgrid::ImportOnlySubgridV2; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; -use super::lumi::{LumiCache, LumiEntry}; +use super::lumi::LumiCache; use super::ntuple_subgrid::NtupleSubgridV1; use super::pids::{self, PidBasis}; use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; @@ -206,7 +206,7 @@ bitflags! { const STRIP_EMPTY_ORDERS = 0b1000; /// Merge the subgrids of channels which have the same definition. const MERGE_SAME_CHANNELS = 0b10000; - /// Remove all channels ([`Grid::lumi`]), which do not contain any non-zero subgrids. + /// Remove all channels ([`Grid::channels`]), which do not contain any non-zero subgrids. const STRIP_EMPTY_CHANNELS = 0b10_0000; } } @@ -216,7 +216,7 @@ bitflags! { #[derive(Clone, Deserialize, Serialize)] pub struct Grid { subgrids: Array3, - lumi: Vec, + channels: Vec, bin_limits: BinLimits, orders: Vec, subgrid_params: SubgridParams, @@ -272,18 +272,18 @@ impl Grid { /// Constructor. #[must_use] pub fn new( - lumi: Vec, + channels: Vec, orders: Vec, bin_limits: Vec, subgrid_params: SubgridParams, ) -> Self { Self { subgrids: Array3::from_shape_simple_fn( - (orders.len(), bin_limits.len() - 1, lumi.len()), + (orders.len(), bin_limits.len() - 1, channels.len()), || EmptySubgridV1.into(), ), orders, - lumi, + channels, bin_limits: BinLimits::new(bin_limits), more_members: MoreMembers::V3(Mmv3::new( LagrangeSubgridV2::new(&subgrid_params, &ExtraSubgridParams::from(&subgrid_params)) @@ -303,7 +303,7 @@ impl Grid { /// /// If `subgrid_type` is none of the values listed above, an error is returned. pub fn with_subgrid_type( - lumi: Vec, + channels: Vec, orders: Vec, bin_limits: Vec, subgrid_params: SubgridParams, @@ -322,11 +322,11 @@ impl Grid { Ok(Self { subgrids: Array3::from_shape_simple_fn( - (orders.len(), bin_limits.len() - 1, lumi.len()), + (orders.len(), bin_limits.len() - 1, channels.len()), || EmptySubgridV1.into(), ), orders, - lumi, + channels, bin_limits: BinLimits::new(bin_limits), subgrid_params, more_members: MoreMembers::V3(Mmv3::new(subgrid_template)), @@ -350,23 +350,22 @@ impl Grid { PidBasis::Pdg } - fn pdg_lumi(&self) -> Cow<[LumiEntry]> { + fn pdg_channels(&self) -> Cow<[Channel]> { match self.pid_basis() { PidBasis::Evol => self - .lumi + .channels .iter() - .map(|entry| LumiEntry::translate(entry, &pids::evol_to_pdg_mc_ids)) + .map(|entry| Channel::translate(entry, &pids::evol_to_pdg_mc_ids)) .collect(), - PidBasis::Pdg => Cow::Borrowed(self.lumi()), + PidBasis::Pdg => Cow::Borrowed(self.channels()), } } /// Perform a convolution using the PDFs and strong coupling in `lumi_cache`, and only - /// selecting only the orders, bins and luminosities corresponding to `order_mask`, - /// `bin_indices` and `lumi_mask`. A variation of the scales - /// is performed using the factors in `xi`; the first factor varies the renormalization scale, - /// the second the factorization scale. Note that for the variation to be trusted all non-zero - /// log-grids must be contained. + /// selecting only the orders, bins and channels corresponding to `order_mask`, `bin_indices` + /// and `channel_mask`. A variation of the scales is performed using the factors in `xi`; the + /// first factor varies the renormalization scale, the second the factorization scale. Note + /// that for the variation to be trusted all non-zero log-grids must be contained. /// /// # Panics /// @@ -376,7 +375,7 @@ impl Grid { lumi_cache: &mut LumiCache, order_mask: &[bool], bin_indices: &[usize], - lumi_mask: &[bool], + channel_mask: &[bool], xi: &[(f64, f64)], ) -> Vec { lumi_cache.setup(self, xi).unwrap(); @@ -388,10 +387,10 @@ impl Grid { }; let mut bins = vec![0.0; bin_indices.len() * xi.len()]; let normalizations = self.bin_info().normalizations(); - let self_lumi = self.pdg_lumi(); + let pdg_channels = self.pdg_channels(); for (xi_index, &(xir, xif)) in xi.iter().enumerate() { - for ((ord, bin, lumi), subgrid) in self.subgrids.indexed_iter() { + for ((ord, bin, chan), subgrid) in self.subgrids.indexed_iter() { let order = &self.orders[ord]; if ((order.logxir > 0) && (xir == 1.0)) || ((order.logxif > 0) && (xif == 1.0)) { @@ -399,7 +398,7 @@ impl Grid { } if (!order_mask.is_empty() && !order_mask[ord]) - || (!lumi_mask.is_empty() && !lumi_mask[lumi]) + || (!channel_mask.is_empty() && !channel_mask[chan]) { continue; } @@ -412,7 +411,7 @@ impl Grid { continue; } - let lumi_entry = &self_lumi[lumi]; + let channel = &pdg_channels[chan]; let mu2_grid = subgrid.mu2_grid(); let x1_grid = subgrid.x1_grid(); let x2_grid = subgrid.x2_grid(); @@ -425,7 +424,7 @@ impl Grid { let x2 = x2_grid[ix2]; let mut lumi = 0.0; - for entry in lumi_entry.entry() { + for entry in channel.entry() { let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); @@ -452,7 +451,7 @@ impl Grid { bins } - /// Convolutes a single subgrid `(order, bin, lumi)` with the PDFs strong coupling given by + /// Convolutes a single subgrid `(order, bin, channel)` with the PDFs strong coupling given by /// `xfx1`, `xfx2` and `alphas`. The convolution result is fully differentially, such that the /// axes of the result correspond to the values given by the subgrid `q2`, `x1` and `x2` grid /// values. @@ -465,19 +464,19 @@ impl Grid { lumi_cache: &mut LumiCache, ord: usize, bin: usize, - lumi: usize, + channel: usize, xir: f64, xif: f64, ) -> Array3 { lumi_cache.setup(self, &[(xir, xif)]).unwrap(); let normalizations = self.bin_info().normalizations(); - let self_lumi = self.pdg_lumi(); + let pdg_channels = self.pdg_channels(); - let subgrid = &self.subgrids[[ord, bin, lumi]]; + let subgrid = &self.subgrids[[ord, bin, channel]]; let order = &self.orders[ord]; - let lumi_entry = &self_lumi[lumi]; + let channel = &pdg_channels[channel]; let mu2_grid = subgrid.mu2_grid(); let x1_grid = subgrid.x1_grid(); let x2_grid = subgrid.x2_grid(); @@ -491,7 +490,7 @@ impl Grid { let x2 = x2_grid[ix2]; let mut lumi = 0.0; - for entry in lumi_entry.entry() { + for entry in channel.entry() { let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); @@ -516,14 +515,14 @@ impl Grid { array } - /// Fills the grid with an ntuple for the given `order`, `observable`, and `lumi`. + /// Fills the grid with an ntuple for the given `order`, `observable`, and `channel`. /// /// # Panics /// /// TODO - pub fn fill(&mut self, order: usize, observable: f64, lumi: usize, ntuple: &Ntuple) { + pub fn fill(&mut self, order: usize, observable: f64, channel: usize, ntuple: &Ntuple) { if let Some(bin) = self.bin_limits.index(observable) { - let subgrid = &mut self.subgrids[[order, bin, lumi]]; + let subgrid = &mut self.subgrids[[order, bin, channel]]; if let SubgridEnum::EmptySubgridV1(_) = subgrid { if let MoreMembers::V3(mmv3) = &self.more_members { *subgrid = mmv3.subgrid_template.clone_empty(); @@ -614,8 +613,8 @@ impl Grid { } /// Fills the grid with events for the parton momentum fractions `x1` and `x2`, the scale `q2`, - /// and the `order` and `observable`. The events are stored in `weights` and must be ordered as - /// the corresponding luminosity function was created. + /// and the `order` and `observable`. The events are stored in `weights` and their ordering + /// corresponds to the ordering of [`Grid::channels`]. pub fn fill_all( &mut self, order: usize, @@ -623,11 +622,11 @@ impl Grid { ntuple: &Ntuple<()>, weights: &[f64], ) { - for (lumi, weight) in weights.iter().enumerate() { + for (channel, weight) in weights.iter().enumerate() { self.fill( order, observable, - lumi, + channel, &Ntuple { x1: ntuple.x1, x2: ntuple.x2, @@ -638,10 +637,10 @@ impl Grid { } } - /// Returns the luminosity function. + /// Return the channels for this `Grid`. #[must_use] - pub fn lumi(&self) -> &[LumiEntry] { - &self.lumi + pub fn channels(&self) -> &[Channel] { + &self.channels } /// Merges the bins for the corresponding range together in a single one. @@ -663,18 +662,19 @@ impl Grid { let bin_count = self.bin_info().bins(); let mut old_subgrids = mem::replace( &mut self.subgrids, - Array3::from_shape_simple_fn((self.orders.len(), bin_count, self.lumi.len()), || { - EmptySubgridV1.into() - }), + Array3::from_shape_simple_fn( + (self.orders.len(), bin_count, self.channels.len()), + || EmptySubgridV1.into(), + ), ); - for ((order, bin, lumi), subgrid) in old_subgrids.indexed_iter_mut() { + for ((order, bin, channel), subgrid) in old_subgrids.indexed_iter_mut() { if subgrid.is_empty() { continue; } if bins.contains(&bin) { - let new_subgrid = &mut self.subgrids[[order, bins.start, lumi]]; + let new_subgrid = &mut self.subgrids[[order, bins.start, channel]]; if new_subgrid.is_empty() { mem::swap(new_subgrid, subgrid); @@ -688,7 +688,7 @@ impl Grid { bin }; - mem::swap(&mut self.subgrids[[order, new_bin, lumi]], subgrid); + mem::swap(&mut self.subgrids[[order, new_bin, channel]], subgrid); } } @@ -708,7 +708,7 @@ impl Grid { pub fn merge(&mut self, mut other: Self) -> Result<(), GridError> { let mut new_orders: Vec = Vec::new(); let mut new_bins = 0; - let mut new_entries: Vec = Vec::new(); + let mut new_entries: Vec = Vec::new(); if self.bin_info() != other.bin_info() { let lhs_bins = self.bin_info().bins(); @@ -746,7 +746,7 @@ impl Grid { .filter(|((_, _, _), subgrid)| !subgrid.is_empty()) { let other_order = &other.orders[i]; - let other_entry = &other.lumi[k]; + let other_entry = &other.channels[k]; if !self .orders @@ -758,7 +758,7 @@ impl Grid { } if !self - .lumi + .channels() .iter() .chain(new_entries.iter()) .any(|y| y == other_entry) @@ -772,7 +772,7 @@ impl Grid { } self.orders.append(&mut new_orders); - self.lumi.append(&mut new_entries); + self.channels.append(&mut new_entries); let bin_indices: Vec<_> = (0..other.bin_info().bins()) .map(|bin| { @@ -788,11 +788,11 @@ impl Grid { .filter(|((_, _, _), subgrid)| !subgrid.is_empty()) { let other_order = &other.orders[i]; - let other_entry = &other.lumi[k]; + let other_entry = &other.channels[k]; let self_i = self.orders.iter().position(|x| x == other_order).unwrap(); let self_j = bin_indices[j]; - let self_k = self.lumi.iter().position(|y| y == other_entry).unwrap(); + let self_k = self.channels.iter().position(|y| y == other_entry).unwrap(); if self.subgrids[[self_i, self_j, self_k]].is_empty() { mem::swap(&mut self.subgrids[[self_i, self_j, self_k]], subgrid); @@ -840,7 +840,7 @@ impl Grid { .map(|s| s.parse::()) { Some(Ok(pid)) => { - let condition = !self.lumi().iter().all(|entry| { + let condition = !self.channels().iter().all(|entry| { entry.entry().iter().all(|&channels| match index { 1 => channels.0 == pid, 2 => channels.1 == pid, @@ -970,15 +970,15 @@ impl Grid { &mut self.orders } - /// Set the luminosity function for this grid. - pub fn set_lumis(&mut self, lumis: Vec) { - self.lumi = lumis; + /// Set the channels for this grid. + pub fn set_channels(&mut self, channels: Vec) { + self.channels = channels; } - /// Returns the subgrid with the specified indices `order`, `bin`, and `lumi`. + /// Returns the subgrid with the specified indices `order`, `bin`, and `channel`. #[must_use] - pub fn subgrid(&self, order: usize, bin: usize, lumi: usize) -> &SubgridEnum { - &self.subgrids[[order, bin, lumi]] + pub fn subgrid(&self, order: usize, bin: usize, channel: usize) -> &SubgridEnum { + &self.subgrids[[order, bin, channel]] } /// Returns all subgrids as an `Array3`. @@ -987,9 +987,10 @@ impl Grid { &self.subgrids } - /// Replaces the subgrid for the specified indices `order`, `bin`, and `lumi` with `subgrid`. - pub fn set_subgrid(&mut self, order: usize, bin: usize, lumi: usize, subgrid: SubgridEnum) { - self.subgrids[[order, bin, lumi]] = subgrid; + /// Replaces the subgrid for the specified indices `order`, `bin`, and `channel` with + /// `subgrid`. + pub fn set_subgrid(&mut self, order: usize, bin: usize, channel: usize, subgrid: SubgridEnum) { + self.subgrids[[order, bin, channel]] = subgrid; } /// Sets a remapper. A remapper can change the dimensions and limits of each bin in this grid. @@ -1106,7 +1107,7 @@ impl Grid { /// numerical equality is tested using a tolerance of `ulps`, given in [units of least /// precision](https://docs.rs/float-cmp/latest/float_cmp/index.html#some-explanation). pub fn dedup_channels(&mut self, ulps: i64) { - let mut indices: Vec = (0..self.lumi.len()).collect(); + let mut indices: Vec = (0..self.channels.len()).collect(); while let Some(index) = indices.pop() { if let Some(other_index) = indices.iter().copied().find(|&other_index| { @@ -1142,23 +1143,23 @@ impl Grid { true }) { - let old_channel = self.lumi.remove(index).entry().to_vec(); - let mut new_channel = self.lumi[other_index].entry().to_vec(); + let old_channel = self.channels.remove(index).entry().to_vec(); + let mut new_channel = self.channels[other_index].entry().to_vec(); new_channel.extend(old_channel); - self.lumi[other_index] = LumiEntry::new(new_channel); + self.channels[other_index] = Channel::new(new_channel); self.subgrids.remove_index(Axis(2), index); } } } fn merge_same_channels(&mut self) { - let mut indices: Vec<_> = (0..self.lumi.len()).rev().collect(); + let mut indices: Vec<_> = (0..self.channels.len()).rev().collect(); - // merge luminosities that are the same + // merge channels that are the same while let Some(index) = indices.pop() { if let Some((other_index, factor)) = indices.iter().find_map(|&i| { - self.lumi[i] - .common_factor(&self.lumi[index]) + self.channels[i] + .common_factor(&self.channels[index]) .map(|factor| (i, factor)) }) { let (mut a, mut b) = self @@ -1183,21 +1184,21 @@ impl Grid { } fn strip_empty_channels(&mut self) { - let mut keep_lumi_indices = vec![]; - let mut new_lumi_entries = vec![]; + let mut keep_channel_indices = vec![]; + let mut new_channel_entries = vec![]; - // only keep luminosities that have non-zero factors and for which at least one subgrid is + // only keep channels that have non-zero factors and for which at least one subgrid is // non-empty - for (lumi, entry) in self.lumi.iter().enumerate() { + for (channel, entry) in self.channels.iter().enumerate() { if !entry.entry().iter().all(|&(_, _, factor)| factor == 0.0) && !self .subgrids - .slice(s![.., .., lumi]) + .slice(s![.., .., channel]) .iter() .all(Subgrid::is_empty) { - keep_lumi_indices.push(lumi); - new_lumi_entries.push(entry.clone()); + keep_channel_indices.push(channel); + new_channel_entries.push(entry.clone()); } } @@ -1206,17 +1207,17 @@ impl Grid { ( self.orders.len(), self.bin_info().bins(), - keep_lumi_indices.len(), + keep_channel_indices.len(), ), - |(order, bin, new_lumi)| { + |(order, bin, new_channel)| { mem::replace( - &mut self.subgrids[[order, bin, keep_lumi_indices[new_lumi]]], + &mut self.subgrids[[order, bin, keep_channel_indices[new_channel]]], EmptySubgridV1.into(), ) }, ); - self.lumi = new_lumi_entries; + self.channels = new_channel_entries; self.subgrids = new_subgrids; } @@ -1242,12 +1243,12 @@ impl Grid { return; } - let mut indices: Vec = (0..self.lumi.len()).rev().collect(); + let mut indices: Vec = (0..self.channels.len()).rev().collect(); while let Some(index) = indices.pop() { - let lumi_entry = &self.lumi[index]; + let channel_entry = &self.channels[index]; - if *lumi_entry == lumi_entry.transpose() { + if *channel_entry == channel_entry.transpose() { // check if in all cases the limits are compatible with merging self.subgrids .slice_mut(s![.., .., index]) @@ -1260,7 +1261,7 @@ impl Grid { } else if let Some((j, &other_index)) = indices .iter() .enumerate() - .find(|(_, i)| self.lumi[**i] == lumi_entry.transpose()) + .find(|(_, i)| self.channels[**i] == channel_entry.transpose()) { indices.remove(j); @@ -1339,7 +1340,7 @@ impl Grid { let mut x1 = Vec::new(); let mut pids1 = Vec::new(); - for (lumi, subgrid) in self + for (channel, subgrid) in self .subgrids() .indexed_iter() .filter_map(|(tuple, subgrid)| { @@ -1366,10 +1367,10 @@ impl Grid { x1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); if has_pdf1 { - pids1.extend(self.lumi()[lumi].entry().iter().map(|(a, _, _)| a)); + pids1.extend(self.channels()[channel].entry().iter().map(|(a, _, _)| a)); } if has_pdf2 { - pids1.extend(self.lumi()[lumi].entry().iter().map(|(_, b, _)| b)); + pids1.extend(self.channels()[channel].entry().iter().map(|(_, b, _)| b)); } pids1.sort_unstable(); @@ -1475,7 +1476,7 @@ impl Grid { let view = operator.view(); - let (subgrids, lumi) = if self.convolutions()[0] != Convolution::None + let (subgrids, channels) = if self.convolutions()[0] != Convolution::None && self.convolutions()[1] != Convolution::None { evolution::evolve_slice_with_two(self, &view, &info, order_mask, xi, alphas_table) @@ -1485,7 +1486,7 @@ impl Grid { let mut rhs = Self { subgrids, - lumi, + channels, bin_limits: self.bin_limits.clone(), orders: vec![Order::new(0, 0, 0, 0)], subgrid_params: SubgridParams::default(), @@ -1610,19 +1611,19 @@ impl Grid { pub fn rotate_pid_basis(&mut self, pid_basis: PidBasis) { match (self.pid_basis(), pid_basis) { (PidBasis::Pdg, PidBasis::Evol) => { - self.lumi = self - .lumi + self.channels = self + .channels() .iter() - .map(|channel| LumiEntry::translate(channel, &pids::pdg_mc_pids_to_evol)) + .map(|channel| Channel::translate(channel, &pids::pdg_mc_pids_to_evol)) .collect(); self.set_key_value("lumi_id_types", "evol"); } (PidBasis::Evol, PidBasis::Pdg) => { - self.lumi = self - .lumi + self.channels = self + .channels() .iter() - .map(|channel| LumiEntry::translate(channel, &pids::evol_to_pdg_mc_ids)) + .map(|channel| Channel::translate(channel, &pids::evol_to_pdg_mc_ids)) .collect(); self.set_key_value("lumi_id_types", "pdg_mc_ids"); @@ -1640,7 +1641,7 @@ impl Grid { .iter() .copied() // ignore indices corresponding to bin that don't exist - .filter(|&index| index < self.lumi().len()) + .filter(|&index| index < self.channels().len()) .collect(); // sort and remove repeated indices @@ -1650,17 +1651,17 @@ impl Grid { let channel_indices = channel_indices; for index in channel_indices { - self.lumi.remove(index); + self.channels.remove(index); self.subgrids.remove_index(Axis(2), index); } } - pub(crate) fn rewrite_lumi(&mut self, add: &[(i32, i32)], del: &[i32]) { - self.lumi = self - .lumi + pub(crate) fn rewrite_channels(&mut self, add: &[(i32, i32)], del: &[i32]) { + self.channels = self + .channels() .iter() .map(|entry| { - LumiEntry::new( + Channel::new( entry .entry() .iter() @@ -1691,26 +1692,25 @@ impl Grid { .collect(); } - /// Splits the grid such that the luminosity function contains only a single combination per - /// channel. - pub fn split_lumi(&mut self) { + /// Splits the grid such that each channel contains only a single tuple of PIDs. + pub fn split_channels(&mut self) { let indices: Vec<_> = self - .lumi + .channels() .iter() .enumerate() .flat_map(|(index, entry)| iter::repeat(index).take(entry.entry().len())) .collect(); self.subgrids = self.subgrids.select(Axis(2), &indices); - self.lumi = self - .lumi + self.channels = self + .channels() .iter() .flat_map(|entry| { entry .entry() .iter() .copied() - .map(move |entry| LumiEntry::new(vec![entry])) + .map(move |entry| Channel::new(vec![entry])) }) .collect(); } @@ -1719,7 +1719,7 @@ impl Grid { #[cfg(test)] mod tests { use super::*; - use crate::lumi_entry; + use crate::channel; use float_cmp::assert_approx_eq; use std::fs::File; @@ -1742,8 +1742,8 @@ mod tests { fn grid_merge_empty_subgrids() { let mut grid = Grid::new( vec![ - lumi_entry![2, 2, 1.0; 4, 4, 1.0], - lumi_entry![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], ], vec![Order::new(0, 2, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], @@ -1751,14 +1751,14 @@ mod tests { ); assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.lumi().len(), 2); + assert_eq!(grid.channels().len(), 2); assert_eq!(grid.orders().len(), 1); let other = Grid::new( vec![ // differently ordered than `grid` - lumi_entry![1, 1, 1.0; 3, 3, 1.0], - lumi_entry![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], ], vec![Order::new(1, 2, 0, 0), Order::new(1, 2, 0, 1)], vec![0.0, 0.25, 0.5, 0.75, 1.0], @@ -1769,7 +1769,7 @@ mod tests { grid.merge(other).unwrap(); assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.lumi().len(), 2); + assert_eq!(grid.channels().len(), 2); assert_eq!(grid.orders().len(), 1); } @@ -1777,8 +1777,8 @@ mod tests { fn grid_merge_orders() { let mut grid = Grid::new( vec![ - lumi_entry![2, 2, 1.0; 4, 4, 1.0], - lumi_entry![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], ], vec![Order::new(0, 2, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], @@ -1786,13 +1786,13 @@ mod tests { ); assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.lumi().len(), 2); + assert_eq!(grid.channels().len(), 2); assert_eq!(grid.orders().len(), 1); let mut other = Grid::new( vec![ - lumi_entry![2, 2, 1.0; 4, 4, 1.0], - lumi_entry![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], ], vec![ Order::new(1, 2, 0, 0), @@ -1830,16 +1830,16 @@ mod tests { grid.merge(other).unwrap(); assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.lumi().len(), 2); + assert_eq!(grid.channels().len(), 2); assert_eq!(grid.orders().len(), 3); } #[test] - fn grid_merge_lumi_entries() { + fn grid_merge_channels_entries() { let mut grid = Grid::new( vec![ - lumi_entry![2, 2, 1.0; 4, 4, 1.0], - lumi_entry![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], ], vec![Order::new(0, 2, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], @@ -1847,11 +1847,11 @@ mod tests { ); assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.lumi().len(), 2); + assert_eq!(grid.channels().len(), 2); assert_eq!(grid.orders().len(), 1); let mut other = Grid::new( - vec![lumi_entry![22, 22, 1.0], lumi_entry![2, 2, 1.0; 4, 4, 1.0]], + vec![channel![22, 22, 1.0], channel![2, 2, 1.0; 4, 4, 1.0]], vec![Order::new(0, 2, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], SubgridParams::default(), @@ -1873,7 +1873,7 @@ mod tests { grid.merge(other).unwrap(); assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.lumi().len(), 3); + assert_eq!(grid.channels().len(), 3); assert_eq!(grid.orders().len(), 1); } @@ -1881,8 +1881,8 @@ mod tests { fn grid_merge_bins() { let mut grid = Grid::new( vec![ - lumi_entry![2, 2, 1.0; 4, 4, 1.0], - lumi_entry![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], ], vec![Order::new(0, 2, 0, 0)], vec![0.0, 0.25, 0.5], @@ -1890,14 +1890,14 @@ mod tests { ); assert_eq!(grid.bin_info().bins(), 2); - assert_eq!(grid.lumi().len(), 2); + assert_eq!(grid.channels().len(), 2); assert_eq!(grid.orders().len(), 1); let mut other = Grid::new( vec![ - // luminosity function is differently sorted - lumi_entry![1, 1, 1.0; 3, 3, 1.0], - lumi_entry![2, 2, 1.0; 4, 4, 1.0], + // channels are differently sorted + channel![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], ], vec![Order::new(0, 2, 0, 0)], vec![0.5, 0.75, 1.0], @@ -1919,7 +1919,7 @@ mod tests { grid.merge(other).unwrap(); assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.lumi().len(), 2); + assert_eq!(grid.channels().len(), 2); assert_eq!(grid.orders().len(), 1); } @@ -1928,7 +1928,7 @@ mod tests { #[test] fn grid_convolutions() { let mut grid = Grid::new( - vec![lumi_entry![21, 21, 1.0]], + vec![channel![21, 21, 1.0]], vec![Order { alphas: 0, alpha: 0, diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index 08cfe449b..8792fc691 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -8,8 +8,7 @@ //! 1. (perturbative) orders, represented by the type [`Order`] and accessible by //! [`Grid::orders()`], //! 2. bins, whose limits can be accessed by [`Grid::bin_info()`], and -//! 3. channels, whose definition is returned by [`Grid::lumi()`]. Note that in older parts of -//! `PineAPPL` channels are often also called 'luminosities' or 'lumi'. +//! 3. channels, whose definition is returned by [`Grid::channels()`]. //! //! `Subgrid` is a `trait` and objects that implement it are of the type [`SubgridEnum`]. The //! latter is an `enum` of different types that are optimized to different scenarios: fast event @@ -17,7 +16,7 @@ //! //! [`Grid`]: grid::Grid //! [`Grid::bin_info()`]: grid::Grid::bin_info -//! [`Grid::lumi()`]: grid::Grid::lumi +//! [`Grid::channels()`]: grid::Grid::channels //! [`Grid::orders()`]: grid::Grid::orders //! [`Subgrid`]: subgrid::Subgrid //! [`SubgridEnum`]: subgrid::SubgridEnum diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index c56f94b7e..9b15f44ab 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -3,259 +3,7 @@ use super::grid::Grid; use super::pids; use super::subgrid::{Mu2, Subgrid}; -use float_cmp::approx_eq; -use itertools::Itertools; use rustc_hash::FxHashMap; -use serde::{Deserialize, Serialize}; -use std::str::FromStr; -use thiserror::Error; - -/// This structure represents an entry of a luminosity function. Each entry consists of a tuple, -/// which contains, in the following order, the PDG id of the first incoming parton, then the PDG -/// id of the second parton, and finally a numerical factor that will multiply the result for this -/// specific combination. -#[derive(Clone, Debug, Deserialize, PartialEq, PartialOrd, Serialize)] -pub struct LumiEntry { - entry: Vec<(i32, i32, f64)>, -} - -impl LumiEntry { - /// Constructor for `LumiEntry`. Note that `entry` must be non-empty, otherwise this function - /// panics. - /// - /// # Examples - /// - /// Ordering of the arguments doesn't matter: - /// - /// ```rust - /// use pineappl::lumi::LumiEntry; - /// - /// let entry1 = LumiEntry::new(vec![(2, 2, 1.0), (4, 4, 1.0)]); - /// let entry2 = LumiEntry::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); - /// - /// // checks that the ordering doesn't matter - /// assert_eq!(entry1, entry2); - /// ``` - /// - /// Same arguments are merged together: - /// - /// ```rust - /// use pineappl::lumi::LumiEntry; - /// - /// let entry1 = LumiEntry::new(vec![(1, 1, 1.0), (1, 1, 3.0), (3, 3, 1.0), (1, 1, 6.0)]); - /// let entry2 = LumiEntry::new(vec![(1, 1, 10.0), (3, 3, 1.0)]); - /// - /// assert_eq!(entry1, entry2); - /// ``` - /// - /// # Panics - /// - /// Creating an entry with content panics: - /// - /// ```rust,should_panic - /// use pineappl::lumi::LumiEntry; - /// - /// let _ = LumiEntry::new(vec![]); - /// ``` - #[must_use] - pub fn new(mut entry: Vec<(i32, i32, f64)>) -> Self { - assert!(!entry.is_empty()); - - // sort `entry` because the ordering doesn't matter and because it makes it easier to - // compare `LumiEntry` objects with each other - entry.sort_by(|x, y| (x.0, x.1).cmp(&(y.0, y.1))); - - Self { - entry: entry - .into_iter() - .coalesce(|lhs, rhs| { - // sum the factors of repeated elements - if (lhs.0, lhs.1) == (rhs.0, rhs.1) { - Ok((lhs.0, lhs.1, lhs.2 + rhs.2)) - } else { - Err((lhs, rhs)) - } - }) - // filter zeros - // TODO: find a better than to hardcode the epsilon limit - .filter(|&(_, _, f)| !approx_eq!(f64, f.abs(), 0.0, epsilon = 1e-14)) - .collect(), - } - } - - /// Translates `entry` into a different basis using `translator`. - /// - /// # Examples - /// - /// ```rust - /// use pineappl::lumi::LumiEntry; - /// use pineappl::lumi_entry; - /// - /// let entry = LumiEntry::translate(&lumi_entry![103, 11, 1.0], &|evol_id| match evol_id { - /// 103 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], - /// _ => vec![(evol_id, 1.0)], - /// }); - /// - /// assert_eq!(entry, lumi_entry![2, 11, 1.0; -2, 11, -1.0; 1, 11, -1.0; -1, 11, 1.0]); - /// ``` - pub fn translate(entry: &Self, translator: &dyn Fn(i32) -> Vec<(i32, f64)>) -> Self { - let mut tuples = Vec::new(); - - for &(a, b, factor) in &entry.entry { - for (aid, af) in translator(a) { - for (bid, bf) in translator(b) { - tuples.push((aid, bid, factor * af * bf)); - } - } - } - - Self::new(tuples) - } - - /// Returns a tuple representation of this entry. - /// - /// # Examples - /// - /// ```rust - /// use pineappl::lumi_entry; - /// use pineappl::lumi::LumiEntry; - /// - /// let entry = lumi_entry![4, 4, 1.0; 2, 2, 1.0]; - /// - /// assert_eq!(entry.entry(), [(2, 2, 1.0), (4, 4, 1.0)]); - /// ``` - #[must_use] - pub fn entry(&self) -> &[(i32, i32, f64)] { - &self.entry - } - - /// Creates a new object with the initial states transposed. - #[must_use] - pub fn transpose(&self) -> Self { - Self::new(self.entry.iter().map(|(a, b, c)| (*b, *a, *c)).collect()) - } - - /// If `other` is the same channel when only comparing PIDs and neglecting the factors, return - /// the number `f1 / f2`, where `f1` is the factor from `self` and `f2` is the factor from - /// `other`. - /// - /// # Examples - /// - /// ```rust - /// use pineappl::lumi::LumiEntry; - /// - /// let entry1 = LumiEntry::new(vec![(2, 2, 2.0), (4, 4, 2.0)]); - /// let entry2 = LumiEntry::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); - /// let entry3 = LumiEntry::new(vec![(3, 4, 1.0), (2, 2, 1.0)]); - /// let entry4 = LumiEntry::new(vec![(4, 3, 1.0), (2, 3, 2.0)]); - /// - /// assert_eq!(entry1.common_factor(&entry2), Some(2.0)); - /// assert_eq!(entry1.common_factor(&entry3), None); - /// assert_eq!(entry1.common_factor(&entry4), None); - /// ``` - #[must_use] - pub fn common_factor(&self, other: &Self) -> Option { - if self.entry.len() != other.entry.len() { - return None; - } - - let result: Option> = self - .entry - .iter() - .zip(&other.entry) - .map(|(a, b)| ((a.0 == b.0) && (a.1 == b.1)).then_some(a.2 / b.2)) - .collect(); - - result.and_then(|factors| { - if factors - .windows(2) - .all(|win| approx_eq!(f64, win[0], win[1], ulps = 4)) - { - factors.first().copied() - } else { - None - } - }) - } -} - -/// Error type keeping information if [`LumiEntry::from_str`] went wrong. -#[derive(Debug, Error)] -#[error("{0}")] -pub struct ParseLumiEntryError(String); - -impl FromStr for LumiEntry { - type Err = ParseLumiEntryError; - - fn from_str(s: &str) -> Result { - Ok(Self::new( - s.split('+') - .map(|sub| { - sub.split_once('*').map_or_else( - || Err(ParseLumiEntryError(format!("missing '*' in '{sub}'"))), - |(factor, pids)| { - let tuple = pids.split_once(',').map_or_else( - || Err(ParseLumiEntryError(format!("missing ',' in '{pids}'"))), - |(a, b)| { - Ok(( - a.trim() - .strip_prefix('(') - .ok_or_else(|| { - ParseLumiEntryError(format!( - "missing '(' in '{pids}'" - )) - })? - .trim() - .parse::() - .map_err(|err| ParseLumiEntryError(err.to_string()))?, - b.trim() - .strip_suffix(')') - .ok_or_else(|| { - ParseLumiEntryError(format!( - "missing ')' in '{pids}'" - )) - })? - .trim() - .parse::() - .map_err(|err| ParseLumiEntryError(err.to_string()))?, - )) - }, - )?; - - Ok(( - tuple.0, - tuple.1, - str::parse::(factor.trim()) - .map_err(|err| ParseLumiEntryError(err.to_string()))?, - )) - }, - ) - }) - .collect::>()?, - )) - } -} - -/// Helper macro to quickly generate a `LumiEntry` at compile time. -/// -/// # Examples -/// -/// In the following example `entry1` and `entry2` represent the same values: -/// -/// ```rust -/// use pineappl::lumi_entry; -/// -/// let entry1 = lumi_entry![2, 2, 1.0; 4, 4, 1.0]; -/// let entry2 = lumi_entry![4, 4, 1.0; 2, 2, 1.0]; -/// -/// assert_eq!(entry1, entry2); -/// ``` -#[macro_export] -macro_rules! lumi_entry { - ($a:expr, $b:expr, $factor:expr $(; $c:expr, $d:expr, $fac:expr)*) => { - $crate::lumi::LumiEntry::new(vec![($a, $b, $factor), $(($c, $d, $fac)),*]) - }; -} enum Pdfs<'a> { Two { @@ -592,64 +340,3 @@ impl<'a> LumiCache<'a> { .collect(); } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn translate() { - let lumi = LumiEntry::translate(&lumi_entry![103, 203, 2.0], &pids::evol_to_pdg_mc_ids); - - assert_eq!( - lumi, - lumi_entry![ 2, 2, 2.0; 2, -2, -2.0; 2, 1, -2.0; 2, -1, 2.0; - -2, 2, 2.0; -2, -2, -2.0; -2, 1, -2.0; -2, -1, 2.0; - 1, 2, -2.0; 1, -2, 2.0; 1, 1, 2.0; 1, -1, -2.0; - -1, 2, -2.0; -1, -2, 2.0; -1, 1, 2.0; -1, -1, -2.0] - ); - } - - #[test] - fn from_str() { - assert_eq!( - str::parse::(" 1 * ( 2 , -2) + 2* (4,-4)").unwrap(), - lumi_entry![2, -2, 1.0; 4, -4, 2.0] - ); - - assert_eq!( - str::parse::("* ( 2, -2) + 2* (4,-4)") - .unwrap_err() - .to_string(), - "cannot parse float from empty string" - ); - - assert_eq!( - str::parse::(" 1 ( 2 -2) + 2* (4,-4)") - .unwrap_err() - .to_string(), - "missing '*' in ' 1 ( 2 -2) '" - ); - - assert_eq!( - str::parse::(" 1 * ( 2 -2) + 2* (4,-4)") - .unwrap_err() - .to_string(), - "missing ',' in ' ( 2 -2) '" - ); - - assert_eq!( - str::parse::(" 1 * 2, -2) + 2* (4,-4)") - .unwrap_err() - .to_string(), - "missing '(' in ' 2, -2) '" - ); - - assert_eq!( - str::parse::(" 1 * ( 2, -2 + 2* (4,-4)") - .unwrap_err() - .to_string(), - "missing ')' in ' ( 2, -2 '" - ); - } -} diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index 2b87e174f..b6c13dbc3 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -383,8 +383,8 @@ pub fn pdg_mc_ids_to_evol(tuples: &[(i32, f64)]) -> Option { #[cfg(test)] mod tests { use super::*; - use crate::lumi::LumiEntry; - use crate::lumi_entry; + use crate::boc::Channel; + use crate::channel; use float_cmp::assert_approx_eq; #[test] @@ -891,8 +891,8 @@ mod tests { #[test] fn inverse_inverse_evol() { for pid in [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6] { - let result = LumiEntry::translate( - &LumiEntry::translate(&lumi_entry![pid, pid, 1.0], &pdg_mc_pids_to_evol), + let result = Channel::translate( + &Channel::translate(&channel![pid, pid, 1.0], &pdg_mc_pids_to_evol), &evol_to_pdg_mc_ids, ); diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index ed33d7e17..6e28bfed9 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -4,9 +4,9 @@ use lhapdf::Pdf; use num_complex::Complex; use pineappl::bin::BinRemapper; use pineappl::boc::Order; +use pineappl::channel; use pineappl::grid::{Grid, GridOptFlags, Ntuple}; use pineappl::lumi::LumiCache; -use pineappl::lumi_entry; use pineappl::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; use rand::Rng; use rand_pcg::Pcg64; @@ -120,17 +120,17 @@ fn fill_drell_yan_lo_grid( dynamic: bool, reweight: bool, ) -> Result { - let lumi = vec![ + let channels = vec![ // photons - lumi_entry![22, 22, 1.0], + channel![22, 22, 1.0], // up-antiup - lumi_entry![2, -2, 1.0; 4, -4, 1.0], + channel![2, -2, 1.0; 4, -4, 1.0], // antiup-up - lumi_entry![-2, 2, 1.0; -4, 4, 1.0], + channel![-2, 2, 1.0; -4, 4, 1.0], // down-antidown - lumi_entry![1, -1, 1.0; 3, -3, 1.0; 5, -5, 1.0], + channel![1, -1, 1.0; 3, -3, 1.0; 5, -5, 1.0], // antidown-down - lumi_entry![-1, 1, 1.0; -3, 3, 1.0; -5, 5, 1.0], + channel![-1, 1, 1.0; -3, 3, 1.0; -5, 5, 1.0], ]; let orders = vec![ @@ -179,7 +179,7 @@ fn fill_drell_yan_lo_grid( // create the PineAPPL grid let mut grid = Grid::with_subgrid_type( - lumi, + channels, orders, bin_limits, subgrid_params, @@ -353,7 +353,7 @@ fn perform_grid_tests( // TEST 6: `convolve_subgrid` let bins: Vec<_> = (0..grid.bin_info().bins()) .map(|bin| { - (0..grid.lumi().len()) + (0..grid.channels().len()) .map(|channel| { grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) .sum() @@ -378,7 +378,7 @@ fn perform_grid_tests( // TEST 8: `convolve_subgrid` for the optimized subgrids let bins: Vec<_> = (0..grid.bin_info().bins()) .map(|bin| { - (0..grid.lumi().len()) + (0..grid.channels().len()) .map(|channel| { grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) .sum() @@ -763,7 +763,7 @@ fn grid_optimize() -> Result<()> { let mut grid = generate_grid("LagrangeSubgridV2", false, false)?; assert_eq!(grid.orders().len(), 3); - assert_eq!(grid.lumi().len(), 5); + assert_eq!(grid.channels().len(), 5); assert!(matches!( grid.subgrids()[[0, 0, 0]], SubgridEnum::LagrangeSubgridV2 { .. } @@ -800,23 +800,23 @@ fn grid_optimize() -> Result<()> { grid.optimize_using(GridOptFlags::SYMMETRIZE_CHANNELS); assert_eq!(grid.orders().len(), 3); - assert_eq!(grid.lumi().len(), 5); + assert_eq!(grid.channels().len(), 5); grid.optimize_using(GridOptFlags::STRIP_EMPTY_ORDERS); assert_eq!(grid.orders().len(), 1); - assert_eq!(grid.lumi().len(), 5); + assert_eq!(grid.channels().len(), 5); // has no effect for this test grid.optimize_using(GridOptFlags::MERGE_SAME_CHANNELS); assert_eq!(grid.orders().len(), 1); - assert_eq!(grid.lumi().len(), 5); + assert_eq!(grid.channels().len(), 5); grid.optimize_using(GridOptFlags::STRIP_EMPTY_CHANNELS); assert_eq!(grid.orders().len(), 1); - assert_eq!(grid.lumi().len(), 3); + assert_eq!(grid.channels().len(), 3); Ok(()) } diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index b059ad803..5132b9384 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -57,9 +57,9 @@ use itertools::izip; use pineappl::bin::BinRemapper; -use pineappl::boc::Order; +use pineappl::boc::{Channel, Order}; use pineappl::grid::{Grid, GridOptFlags, Ntuple}; -use pineappl::lumi::{LumiCache, LumiEntry}; +use pineappl::lumi::LumiCache; use pineappl::subgrid::{ExtraSubgridParams, SubgridParams}; use std::collections::HashMap; use std::ffi::{CStr, CString}; @@ -189,7 +189,7 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri /// Type for defining a luminosity function. #[derive(Default)] -pub struct Lumi(Vec); +pub struct Lumi(Vec); /// Returns the number of bins in `grid`. /// @@ -304,14 +304,23 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_one( alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, state: *mut c_void, order_mask: *const bool, - lumi_mask: *const bool, + channel_mask: *const bool, xi_ren: f64, xi_fac: f64, results: *mut f64, ) { unsafe { pineappl_grid_convolve_with_one( - grid, pdg_id, xfx, alphas, state, order_mask, lumi_mask, xi_ren, xi_fac, results, + grid, + pdg_id, + xfx, + alphas, + state, + order_mask, + channel_mask, + xi_ren, + xi_fac, + results, ); } } @@ -331,15 +340,25 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_two( alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, state: *mut c_void, order_mask: *const bool, - lumi_mask: *const bool, + channel_mask: *const bool, xi_ren: f64, xi_fac: f64, results: *mut f64, ) { unsafe { pineappl_grid_convolve_with_two( - grid, pdg_id1, xfx1, pdg_id2, xfx2, alphas, state, order_mask, lumi_mask, xi_ren, - xi_fac, results, + grid, + pdg_id1, + xfx1, + pdg_id2, + xfx2, + alphas, + state, + order_mask, + channel_mask, + xi_ren, + xi_fac, + results, ); } } @@ -351,19 +370,19 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_two( /// provided to these functions is the same one given to this function. The parameter `order_mask` /// must be as long as there are perturbative orders contained in `grid` and is used to selectively /// disable (`false`) or enable (`true`) individual orders. If `order_mask` is set to `NULL`, all -/// orders are active. The parameter `lumi_mask` can be used similarly, but must be as long as the -/// luminosity function `grid` was created with has entries, or `NULL` to enable all luminosities. -/// The values `xi_ren` and `xi_fac` can be used to vary the renormalization and factorization from -/// its central value, which corresponds to `1.0`. After convolution of the grid with the PDFs the +/// orders are active. The parameter `channel_mask` can be used similarly, but must be as long as +/// the channels `grid` was created with has entries, or `NULL` to enable all channels. The values +/// `xi_ren` and `xi_fac` can be used to vary the renormalization and factorization from its +/// central value, which corresponds to `1.0`. After convolution of the grid with the PDFs the /// differential cross section for each bin is written into `results`. /// /// # Safety /// /// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, /// this function is not safe to call. The function pointers `xfx1`, `xfx2`, and `alphas` must not -/// be null pointers and point to valid functions. The parameters `order_mask` and `lumi_mask` must -/// either be null pointers or point to arrays that are as long as `grid` has orders and lumi -/// entries, respectively. Finally, `results` must be as long as `grid` has bins. +/// be null pointers and point to valid functions. The parameters `order_mask` and `channel_mask` +/// must either be null pointers or point to arrays that are as long as `grid` has orders and +/// channels, respectively. Finally, `results` must be as long as `grid` has bins. #[no_mangle] pub unsafe extern "C" fn pineappl_grid_convolve_with_one( grid: *const Grid, @@ -372,7 +391,7 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_one( alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, state: *mut c_void, order_mask: *const bool, - lumi_mask: *const bool, + channel_mask: *const bool, xi_ren: f64, xi_fac: f64, results: *mut f64, @@ -385,10 +404,10 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_one( } else { unsafe { slice::from_raw_parts(order_mask, grid.orders().len()) }.to_owned() }; - let lumi_mask = if lumi_mask.is_null() { + let channel_mask = if channel_mask.is_null() { vec![] } else { - unsafe { slice::from_raw_parts(lumi_mask, grid.lumi().len()) }.to_vec() + unsafe { slice::from_raw_parts(channel_mask, grid.channels().len()) }.to_vec() }; let results = unsafe { slice::from_raw_parts_mut(results, grid.bin_info().bins()) }; let mut lumi_cache = LumiCache::with_one(pdg_id, &mut pdf, &mut als); @@ -397,7 +416,7 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_one( &mut lumi_cache, &order_mask, &[], - &lumi_mask, + &channel_mask, &[(xi_ren, xi_fac)], )); } @@ -410,19 +429,19 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_one( /// given to this function. The parameter `order_mask` must be as long as there are perturbative /// orders contained in `grid` and is used to selectively disable (`false`) or enable (`true`) /// individual orders. If `order_mask` is set to `NULL`, all orders are active. The parameter -/// `lumi_mask` can be used similarly, but must be as long as the luminosity function `grid` was -/// created with has entries, or `NULL` to enable all luminosities. The values `xi_ren` and -/// `xi_fac` can be used to vary the renormalization and factorization from its central value, -/// which corresponds to `1.0`. After convolution of the grid with the PDFs the differential cross -/// section for each bin is written into `results`. +/// `channel_mask` can be used similarly, but must be as long as the channels `grid` was created +/// with has entries, or `NULL` to enable all channels. The values `xi_ren` and `xi_fac` can be +/// used to vary the renormalization and factorization from its central value, which corresponds to +/// `1.0`. After convolution of the grid with the PDFs the differential cross section for each bin +/// is written into `results`. /// /// # Safety /// /// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, /// this function is not safe to call. The function pointers `xfx1`, `xfx2`, and `alphas` must not -/// be null pointers and point to valid functions. The parameters `order_mask` and `lumi_mask` must -/// either be null pointers or point to arrays that are as long as `grid` has orders and lumi -/// entries, respectively. Finally, `results` must be as long as `grid` has bins. +/// be null pointers and point to valid functions. The parameters `order_mask` and `channel_mask` +/// must either be null pointers or point to arrays that are as long as `grid` has orders and +/// channels, respectively. Finally, `results` must be as long as `grid` has bins. #[no_mangle] pub unsafe extern "C" fn pineappl_grid_convolve_with_two( grid: *const Grid, @@ -433,7 +452,7 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_two( alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, state: *mut c_void, order_mask: *const bool, - lumi_mask: *const bool, + channel_mask: *const bool, xi_ren: f64, xi_fac: f64, results: *mut f64, @@ -447,10 +466,10 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_two( } else { unsafe { slice::from_raw_parts(order_mask, grid.orders().len()) }.to_vec() }; - let lumi_mask = if lumi_mask.is_null() { + let channel_mask = if channel_mask.is_null() { vec![] } else { - unsafe { slice::from_raw_parts(lumi_mask, grid.lumi().len()) }.to_vec() + unsafe { slice::from_raw_parts(channel_mask, grid.channels().len()) }.to_vec() }; let results = unsafe { slice::from_raw_parts_mut(results, grid.bin_info().bins()) }; let mut lumi_cache = LumiCache::with_two(pdg_id1, &mut pdf1, pdg_id2, &mut pdf2, &mut als); @@ -459,7 +478,7 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_two( &mut lumi_cache, &order_mask, &[], - &lumi_mask, + &channel_mask, &[(xi_ren, xi_fac)], )); } @@ -526,7 +545,7 @@ pub unsafe extern "C" fn pineappl_grid_fill_all( weights: *const f64, ) { let grid = unsafe { &mut *grid }; - let weights = unsafe { slice::from_raw_parts(weights, grid.lumi().len()) }; + let weights = unsafe { slice::from_raw_parts(weights, grid.channels().len()) }; grid.fill_all( order, @@ -586,7 +605,7 @@ pub unsafe extern "C" fn pineappl_grid_fill_array( pub unsafe extern "C" fn pineappl_grid_lumi(grid: *const Grid) -> Box { let grid = unsafe { &*grid }; - Box::new(Lumi(grid.lumi().to_vec())) + Box::new(Lumi(grid.channels().to_vec())) } /// Write the order parameters of `grid` into `order_params`. @@ -778,7 +797,7 @@ pub unsafe extern "C" fn pineappl_grid_scale(grid: *mut Grid, factor: f64) { pub unsafe extern "C" fn pineappl_grid_split_lumi(grid: *mut Grid) { let grid = unsafe { &mut *grid }; - grid.split_lumi(); + grid.split_channels(); } /// Optimizes the grid representation for space efficiency. @@ -994,7 +1013,7 @@ pub unsafe extern "C" fn pineappl_lumi_add( unsafe { slice::from_raw_parts(factors, combinations) }.to_vec() }; - lumi.0.push(LumiEntry::new( + lumi.0.push(Channel::new( pdg_id_pairs .chunks(2) .zip(factors) diff --git a/pineappl_cli/src/analyze.rs b/pineappl_cli/src/analyze.rs index 84479b86d..506767db3 100644 --- a/pineappl_cli/src/analyze.rs +++ b/pineappl_cli/src/analyze.rs @@ -79,11 +79,11 @@ impl Subcommand for CkfOpts { self.orders_den.clone() }; - let limit = grid.lumi().len().min(self.limit); + let limit = grid.channels().len().min(self.limit); let limits = helpers::convolve_limits(&grid, &[], ConvoluteMode::Normal); - let results: Vec<_> = (0..grid.lumi().len()) + let results: Vec<_> = (0..grid.channels().len()) .map(|lumi| { - let mut lumi_mask = vec![false; grid.lumi().len()]; + let mut lumi_mask = vec![false; grid.channels().len()]; lumi_mask[lumi] = true; helpers::convolve( &grid, @@ -97,9 +97,9 @@ impl Subcommand for CkfOpts { ) }) .collect(); - let results_den: Vec<_> = (0..grid.lumi().len()) + let results_den: Vec<_> = (0..grid.channels().len()) .map(|lumi| { - let mut lumi_mask = vec![false; grid.lumi().len()]; + let mut lumi_mask = vec![false; grid.channels().len()]; lumi_mask[lumi] = true; helpers::convolve( &grid, @@ -124,7 +124,7 @@ impl Subcommand for CkfOpts { } title.add_cell(cell!(c->"bin-K")); for _ in 0..limit { - title.add_cell(cell!(c->"l")); + title.add_cell(cell!(c->"c")); title.add_cell(cell!(c->"K")); } diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index 7d04111b2..d9841f666 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -34,13 +34,14 @@ pub struct Opts { integrated: bool, /// Show only the listed channels. #[arg( + alias = "lumis", conflicts_with = "limit", long, num_args = 1, value_delimiter = ',', value_parser = helpers::parse_integer_range )] - lumis: Vec>, + channels: Vec>, /// Select orders manually. #[arg( long, @@ -66,16 +67,16 @@ impl Subcommand for Opts { let grid = helpers::read_grid(&self.input)?; let mut pdf = helpers::create_pdf(&self.pdfset)?; - let mut lumis: Vec<_> = self.lumis.iter().cloned().flatten().collect(); - lumis.sort_unstable(); - lumis.dedup(); - let lumis = lumis; + let mut channels: Vec<_> = self.channels.iter().cloned().flatten().collect(); + channels.sort_unstable(); + channels.dedup(); + let channels = channels; - let limit = grid.lumi().len().min(self.limit); - let limit = if lumis.is_empty() { + let limit = grid.channels().len().min(self.limit); + let limit = if channels.is_empty() { limit } else { - limit.min(lumis.len()) + limit.min(channels.len()) }; let limits = helpers::convolve_limits( &grid, @@ -86,16 +87,16 @@ impl Subcommand for Opts { ConvoluteMode::Normal }, ); - let results: Vec<_> = (0..grid.lumi().len()) - .map(|lumi| { - let mut lumi_mask = vec![false; grid.lumi().len()]; - lumi_mask[lumi] = true; + let results: Vec<_> = (0..grid.channels().len()) + .map(|channel| { + let mut channel_mask = vec![false; grid.channels().len()]; + channel_mask[channel] = true; helpers::convolve( &grid, &mut pdf, &self.orders, &[], - &lumi_mask, + &channel_mask, 1, if self.integrated { ConvoluteMode::Integrated @@ -116,7 +117,7 @@ impl Subcommand for Opts { title.add_cell(cell); } for _ in 0..limit { - title.add_cell(cell!(c->"l")); + title.add_cell(cell!(c->"c")); title.add_cell( cell!(c->&if self.absolute { format!("{y_label}\n[{y_unit}]") } else { "size\n[%]".to_owned() }), ); @@ -139,7 +140,7 @@ impl Subcommand for Opts { let mut values: Vec<_> = results .iter() .enumerate() - .map(|(lumi, vec)| (lumi, vec[bin])) + .map(|(channel, vec)| (channel, vec[bin])) .collect(); if !self.dont_sort { @@ -149,12 +150,14 @@ impl Subcommand for Opts { }); } - for (lumi, value) in values + for (channel, value) in values .iter() - .filter(|(lumi, _)| lumis.is_empty() || lumis.iter().any(|l| l == lumi)) + .filter(|(channel, _)| { + channels.is_empty() || channels.iter().any(|c| c == channel) + }) .take(limit) { - row.add_cell(cell!(r->format!("{lumi}"))); + row.add_cell(cell!(r->format!("{channel}"))); row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, value))); } } else { @@ -162,7 +165,7 @@ impl Subcommand for Opts { let mut percentages: Vec<_> = results .iter() .enumerate() - .map(|(lumi, vec)| (lumi, vec[bin] / sum * 100.0)) + .map(|(channel, vec)| (channel, vec[bin] / sum * 100.0)) .collect(); if !self.dont_sort { @@ -172,12 +175,14 @@ impl Subcommand for Opts { }); } - for (lumi, percentage) in percentages + for (channel, percentage) in percentages .iter() - .filter(|(lumi, _)| lumis.is_empty() || lumis.iter().any(|l| l == lumi)) + .filter(|(channel, _)| { + channels.is_empty() || channels.iter().any(|c| c == channel) + }) .take(limit) { - row.add_cell(cell!(r->format!("{lumi}"))); + row.add_cell(cell!(r->format!("{channel}"))); row.add_cell(cell!(r->format!("{:.*}", self.digits_rel, percentage))); } } diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index afa874f4b..8757c45d6 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -25,9 +25,9 @@ pub struct Opts { /// Ignore bin limits (but not number of bins). #[arg(long)] ignore_bin_limits: bool, - /// Ignore differences in the luminosity functions. - #[arg(long)] - ignore_lumis: bool, + /// Ignore differences in the channel definition. + #[arg(alias = "ignore-lumis", long)] + ignore_channels: bool, /// Select orders of the first grid. #[arg( long, @@ -123,8 +123,8 @@ impl Subcommand for Opts { } // TODO: use approximate comparison - if !self.ignore_lumis && (grid1.lumi() != grid2.lumi()) { - bail!("luminosities differ"); + if !self.ignore_channels && (grid1.channels() != grid2.channels()) { + bail!("channels differ"); } let mut pdf = helpers::create_pdf(&self.pdfset)?; diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index 74709bee7..ec800c56a 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -67,32 +67,37 @@ pub fn convert_into_applgrid( bail!("grid has non-consecutive bin limits, which APPLgrid does not support"); } - let lumis = grid.lumi().len(); + let lumis = grid.channels().len(); let has_pdf1 = grid.convolutions()[0] != Convolution::None; let has_pdf2 = grid.convolutions()[1] != Convolution::None; // TODO: check that PDG MC IDs are used let combinations: Vec<_> = iter::once(lumis.try_into().unwrap()) - .chain(grid.lumi().iter().enumerate().flat_map(|(index, entry)| { - [ - index.try_into().unwrap(), - entry.entry().len().try_into().unwrap(), - ] - .into_iter() - .chain(entry.entry().iter().flat_map(|&(a, b, factor)| { - // TODO: if the factors aren't trivial, we have to find some other way to - // propagate them - assert_eq!(factor, 1.0); - - match (has_pdf1, has_pdf2) { - (true, true) => [a, b], - (true, false) => [a, 0], - (false, true) => [b, 0], - (false, false) => unreachable!(), - } - })) - })) + .chain( + grid.channels() + .iter() + .enumerate() + .flat_map(|(index, entry)| { + [ + index.try_into().unwrap(), + entry.entry().len().try_into().unwrap(), + ] + .into_iter() + .chain(entry.entry().iter().flat_map(|&(a, b, factor)| { + // TODO: if the factors aren't trivial, we have to find some other way to + // propagate them + assert_eq!(factor, 1.0); + + match (has_pdf1, has_pdf2) { + (true, true) => [a, b], + (true, false) => [a, 0], + (false, true) => [b, 0], + (false, false) => unreachable!(), + } + })) + }), + ) .collect(); // `id` must end with '.config' for APPLgrid to know its type is `lumi_pdf` @@ -162,7 +167,7 @@ pub fn convert_into_applgrid( p.x_order().try_into().unwrap(), "f2", "h0", - grid.lumi().len().try_into().unwrap(), + grid.channels().len().try_into().unwrap(), has_pdf1 != has_pdf2, ); let appl_q2: Vec<_> = (0..igrid.Ntau()).map(|i| igrid.getQ2(i)).collect(); diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index e0b18dd52..9d5713719 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -130,7 +130,7 @@ pub fn convolve_scales( lhapdf: &mut Pdf, orders: &[(u32, u32)], bins: &[usize], - lumis: &[bool], + channels: &[bool], scales: &[(f64, f64)], mode: ConvoluteMode, cfg: &GlobalConfiguration, @@ -168,7 +168,7 @@ pub fn convolve_scales( }; let mut alphas = |q2| lhapdf.alphas_q2(q2); let mut cache = LumiCache::with_one(pdf_pdg_id, &mut pdf, &mut alphas); - let mut results = grid.convolve(&mut cache, &orders, bins, lumis, scales); + let mut results = grid.convolve(&mut cache, &orders, bins, channels, scales); match mode { ConvoluteMode::Asymmetry => { diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 67d623840..cc34973ed 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,8 +1,7 @@ use anyhow::Result; -use pineappl::boc::Order; +use pineappl::boc::{Channel, Order}; use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV2; -use pineappl::lumi::LumiEntry; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{Mu2, SubgridParams}; use pineappl_applgrid::ffi::{self, grid}; @@ -21,7 +20,7 @@ fn convert_to_pdg_id(pid: usize) -> i32 { } } -fn reconstruct_luminosity_function(grid: &grid, order: i32, dis_pid: i32) -> Vec { +fn reconstruct_luminosity_function(grid: &grid, order: i32, dis_pid: i32) -> Vec { let pdf = unsafe { &*grid.genpdf(order, false) }; let nproc: usize = pdf.Nproc().try_into().unwrap(); @@ -64,7 +63,7 @@ fn reconstruct_luminosity_function(grid: &grid, order: i32, dis_pid: i32) -> Vec xfx1[a] = 0.0; } - lumis.into_iter().map(LumiEntry::new).collect() + lumis.into_iter().map(Channel::new).collect() } pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Result { diff --git a/pineappl_cli/src/import/fastnlo.rs b/pineappl_cli/src/import/fastnlo.rs index 28d11a569..f658cc0bc 100644 --- a/pineappl_cli/src/import/fastnlo.rs +++ b/pineappl_cli/src/import/fastnlo.rs @@ -1,10 +1,9 @@ use anyhow::Result; use itertools::Itertools; use pineappl::bin::BinRemapper; -use pineappl::boc::Order; +use pineappl::boc::{Channel, Order}; use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV2; -use pineappl::lumi::LumiEntry; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{Mu2, SubgridParams}; use pineappl_fastnlo::ffi::{ @@ -25,7 +24,7 @@ fn create_lumi( table: &fastNLOCoeffAddBase, comb: &fastNLOPDFLinearCombinations, dis_pid: i32, -) -> Vec { +) -> Vec { let dis_pid = if table.GetNPDF() == 2 { 0 } else { dis_pid }; let mut lumis = Vec::new(); @@ -46,7 +45,7 @@ fn create_lumi( entries.push((a, b, f)); } - lumis.push(LumiEntry::new(entries)); + lumis.push(Channel::new(entries)); } // if the PDF coefficient vector was empty, we must reconstruct the lumi function @@ -82,7 +81,7 @@ fn create_lumi( xfx1[a] = 0.0; } - lumis = entries.into_iter().map(LumiEntry::new).collect(); + lumis = entries.into_iter().map(Channel::new).collect(); } lumis diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index 7e9f38483..bd72b4984 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -1,9 +1,9 @@ use anyhow::{anyhow, Context, Result}; use flate2::read::GzDecoder; use pineappl::boc::Order; +use pineappl::channel; use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV1; -use pineappl::lumi_entry; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::SubgridParams; use std::fs::File; @@ -83,14 +83,14 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { .iter() .enumerate() .filter(|&(_, &value)| value) - .map(|(index, _)| lumi_entry![basis[index / 14], basis[index % 14], 1.0]) + .map(|(index, _)| channel![basis[index / 14], basis[index % 14], 1.0]) .collect() } else { flavor_mask .iter() .enumerate() .filter(|&(_, &value)| value) - .map(|(index, _)| lumi_entry![basis[index], dis_pid, 1.0]) + .map(|(index, _)| channel![basis[index], dis_pid, 1.0]) .collect() }; diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index 92cee08ef..e9de5fcfd 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -5,8 +5,8 @@ use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; use itertools::Itertools; use ndarray::Axis; +use pineappl::boc::Channel; use pineappl::grid::Convolution; -use pineappl::lumi::LumiEntry; use pineappl::subgrid::Subgrid; use rayon::{prelude::*, ThreadPoolBuilder}; use std::fmt::Write; @@ -38,7 +38,7 @@ pub struct Opts { long, num_args = 1, value_delimiter = ',', - value_name = "ORDER,BIN,LUMI" + value_name = "ORDER,BIN,CHAN" )] subgrid_pull: Vec, /// Plot the asymmetry. @@ -101,8 +101,9 @@ fn map_format_parton(parton: i32) -> &'static str { } } -fn map_format_lumi(lumi: &LumiEntry, has_pdf1: bool, has_pdf2: bool) -> String { - lumi.entry() +fn map_format_channel(channel: &Channel, has_pdf1: bool, has_pdf2: bool) -> String { + channel + .entry() .iter() .map(|&(a, b, _)| { format!( @@ -370,13 +371,13 @@ impl Subcommand for Opts { let channels = if matches!(mode, ConvoluteMode::Asymmetry) { vec![] } else { - let mut channels: Vec<_> = (0..grid.lumi().len()) - .map(|lumi| { - let mut lumi_mask = vec![false; grid.lumi().len()]; - lumi_mask[lumi] = true; + let mut channels: Vec<_> = (0..grid.channels().len()) + .map(|channel| { + let mut channel_mask = vec![false; grid.channels().len()]; + channel_mask[channel] = true; ( - map_format_lumi( - &grid.lumi()[lumi], + map_format_channel( + &grid.channels()[channel], grid.convolutions()[0] != Convolution::None, grid.convolutions()[1] != Convolution::None, ), @@ -385,7 +386,7 @@ impl Subcommand for Opts { &mut pdf, &[], &bins, - &lumi_mask, + &channel_mask, 1, mode, cfg, @@ -506,7 +507,7 @@ impl Subcommand for Opts { ); } else { let (pdfset1, pdfset2) = self.pdfsets.iter().collect_tuple().unwrap(); - let (order, bin, lumi) = self + let (order, bin, channel) = self .subgrid_pull .iter() .map(|num| num.parse::().unwrap()) @@ -577,12 +578,12 @@ impl Subcommand for Opts { unc1.hypot(unc2) }; - let res1 = helpers::convolve_subgrid(&grid, &mut pdfset1[0], order, bin, lumi, cfg) + let res1 = helpers::convolve_subgrid(&grid, &mut pdfset1[0], order, bin, channel, cfg) .sum_axis(Axis(0)); - let res2 = helpers::convolve_subgrid(&grid, &mut pdfset2[0], order, bin, lumi, cfg) + let res2 = helpers::convolve_subgrid(&grid, &mut pdfset2[0], order, bin, channel, cfg) .sum_axis(Axis(0)); - let subgrid = grid.subgrid(order, bin, lumi); + let subgrid = grid.subgrid(order, bin, channel); //let q2 = subgrid.q2_grid(); let x1 = subgrid.x1_grid(); let x2 = subgrid.x2_grid(); diff --git a/pineappl_cli/src/pull.rs b/pineappl_cli/src/pull.rs index 25043c327..e12cc0ada 100644 --- a/pineappl_cli/src/pull.rs +++ b/pineappl_cli/src/pull.rs @@ -27,7 +27,7 @@ pub struct Opts { /// Confidence level in per cent. #[arg(default_value_t = lhapdf::CL_1_SIGMA, long)] cl: f64, - /// The maximum number of luminosities displayed. + /// The maximum number of channels displayed. #[arg(default_value_t = 10, long, short)] limit: usize, /// Select orders manually. @@ -61,7 +61,7 @@ impl Subcommand for Opts { .build_global() .unwrap(); - let limit = grid.lumi().len().min(self.limit); + let limit = grid.channels().len().min(self.limit); let bin_limits = helpers::convolve_limits(&grid, &[], ConvoluteMode::Normal); let results1: Vec<_> = pdfset1 .par_iter_mut() @@ -103,7 +103,7 @@ impl Subcommand for Opts { } title.add_cell(cell!(c->"total\n[\u{3c3}]")); for _ in 0..limit { - title.add_cell(cell!(c->"l")); + title.add_cell(cell!(c->"c")); title.add_cell(cell!(c->"pull\n[\u{3c3}]")); } @@ -140,19 +140,19 @@ impl Subcommand for Opts { (diff / unc1.hypot(unc2), unc1, unc2) }; - let lumi_results = + let channel_results = |member: Option, pdfset: &mut Vec, set: &PdfSet| -> Vec { if let Some(member) = member { - (0..grid.lumi().len()) - .map(|lumi| { - let mut lumi_mask = vec![false; grid.lumi().len()]; - lumi_mask[lumi] = true; + (0..grid.channels().len()) + .map(|channel| { + let mut channel_mask = vec![false; grid.channels().len()]; + channel_mask[channel] = true; match helpers::convolve( &grid, &mut pdfset[member], &self.orders, &[bin], - &lumi_mask, + &channel_mask, 1, ConvoluteMode::Normal, cfg, @@ -168,16 +168,16 @@ impl Subcommand for Opts { let results: Vec<_> = pdfset .iter_mut() .flat_map(|pdf| { - (0..grid.lumi().len()) - .map(|lumi| { - let mut lumi_mask = vec![false; grid.lumi().len()]; - lumi_mask[lumi] = true; + (0..grid.channels().len()) + .map(|channel| { + let mut channel_mask = vec![false; grid.channels().len()]; + channel_mask[channel] = true; match helpers::convolve( &grid, pdf, &self.orders, &[bin], - &lumi_mask, + &channel_mask, 1, ConvoluteMode::Normal, cfg, @@ -192,12 +192,12 @@ impl Subcommand for Opts { }) .collect(); - (0..grid.lumi().len()) - .map(|lumi| { + (0..grid.channels().len()) + .map(|channel| { let central: Vec<_> = results .iter() - .skip(lumi) - .step_by(grid.lumi().len()) + .skip(channel) + .step_by(grid.channels().len()) .copied() .collect(); set.uncertainty(¢ral, self.cl, false).unwrap().central @@ -209,12 +209,12 @@ impl Subcommand for Opts { let mut pull_tuples = if self.limit == 0 { vec![] } else { - let lumi_results1 = lumi_results(member1, &mut pdfset1, &set1); - let lumi_results2 = lumi_results(member2, &mut pdfset2, &set2); + let channel_results1 = channel_results(member1, &mut pdfset1, &set1); + let channel_results2 = channel_results(member2, &mut pdfset2, &set2); - let pull_tuples: Vec<_> = lumi_results2 + let pull_tuples: Vec<_> = channel_results2 .iter() - .zip(lumi_results1.iter()) + .zip(channel_results1.iter()) .map(|(res2, res1)| (res2 - res1) / unc1.hypot(unc2)) .enumerate() .collect(); @@ -237,8 +237,8 @@ impl Subcommand for Opts { pull_right.abs().total_cmp(&pull_left.abs()) }); - for (lumi, pull) in pull_tuples.iter().take(self.limit) { - row.add_cell(cell!(r->format!("{lumi}"))); + for (channel, pull) in pull_tuples.iter().take(self.limit) { + row.add_cell(cell!(r->format!("{channel}"))); row.add_cell(cell!(r->format!("{:.*}", self.digits, pull))); } } diff --git a/pineappl_cli/src/read.rs b/pineappl_cli/src/read.rs index eedcc4c2e..b08e4e159 100644 --- a/pineappl_cli/src/read.rs +++ b/pineappl_cli/src/read.rs @@ -24,9 +24,9 @@ struct Group { /// Show the bins of a grid. #[arg(long, short)] bins: bool, - /// Show the luminsities a grid. - #[arg(long, short)] - lumis: bool, + /// Show the channel definition of a grid. + #[arg(alias = "lumis", long)] + channels: bool, /// Check if input is an FK table. #[arg(long)] fktable: bool, @@ -104,14 +104,14 @@ impl Subcommand for Opts { println!("yes"); return Ok(ExitCode::SUCCESS); - } else if self.group.lumis { - let mut titles = row![c => "l"]; + } else if self.group.channels { + let mut titles = row![c => "c"]; // if there are no channels print at least one column for _ in 0..grid - .lumi() + .channels() .iter() - .map(|lumi| lumi.entry().len()) + .map(|channel| channel.entry().len()) .max() .unwrap_or(1) { @@ -119,12 +119,12 @@ impl Subcommand for Opts { } table.set_titles(titles); - for (index, entry) in grid.lumi().iter().enumerate() { + for (index, channel) in grid.channels().iter().enumerate() { let row = table.add_empty_row(); row.add_cell(cell!(format!("{index}"))); - for (id1, id2, factor) in entry.entry() { + for (id1, id2, factor) in channel.entry() { row.add_cell(cell!(format!("{factor} \u{d7} ({id1:2}, {id2:2})"))); } } diff --git a/pineappl_cli/src/subgrids.rs b/pineappl_cli/src/subgrids.rs index 69f26a6a3..3cdd5323c 100644 --- a/pineappl_cli/src/subgrids.rs +++ b/pineappl_cli/src/subgrids.rs @@ -57,7 +57,7 @@ impl Subcommand for Opts { fn run(&self, _: &GlobalConfiguration) -> Result { let grid = helpers::read_grid(&self.input)?; let mut table = helpers::create_table(); - let mut titles = row![c => "o", "b", "l"]; + let mut titles = row![c => "o", "b", "c"]; if self.group.type_ { titles.add_cell(cell!(c->"type")); @@ -88,7 +88,7 @@ impl Subcommand for Opts { } table.set_titles(titles); - for ((order, bin, lumi), subgrid) in grid.subgrids().indexed_iter() { + for ((order, bin, channel), subgrid) in grid.subgrids().indexed_iter() { if !self.show_empty && subgrid.is_empty() { continue; } @@ -97,7 +97,7 @@ impl Subcommand for Opts { row.add_cell(cell!(l->format!("{order}"))); row.add_cell(cell!(l->format!("{bin}"))); - row.add_cell(cell!(l->format!("{lumi}"))); + row.add_cell(cell!(l->format!("{channel}"))); if self.group.type_ { row.add_cell(cell!(l-> diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index fc59fe85c..6be8c3822 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -7,9 +7,8 @@ use clap::{ ValueHint, }; use pineappl::bin::BinRemapper; -use pineappl::boc::Order; +use pineappl::boc::{Channel, Order}; use pineappl::fk_table::{FkAssumptions, FkTable}; -use pineappl::lumi::LumiEntry; use pineappl::pids; use pineappl::pids::PidBasis; use std::fs; @@ -44,7 +43,7 @@ enum OpsArg { Remap(String), RemapNorm(f64), RemapNormIgnore(Vec), - RewriteChannel((usize, LumiEntry)), + RewriteChannel((usize, Channel)), RewriteOrder((usize, Order)), RotatePidBasis(PidBasis), Scale(f64), @@ -52,7 +51,7 @@ enum OpsArg { ScaleByOrder(Vec), SetKeyFile(Vec), SetKeyValue(Vec), - SplitLumi(bool), + SplitChannels(bool), Upgrade(bool), } @@ -74,7 +73,7 @@ impl FromArgMatches for MoreArgs { args.resize(indices.iter().max().unwrap() + 1, None); match id.as_str() { - "cc1" | "cc2" | "optimize" | "split_lumi" | "upgrade" => { + "cc1" | "cc2" | "optimize" | "split_channels" | "upgrade" => { let arguments: Vec> = matches .remove_occurrences(&id) .unwrap() @@ -88,7 +87,7 @@ impl FromArgMatches for MoreArgs { "cc1" => OpsArg::Cc1(arg[0]), "cc2" => OpsArg::Cc2(arg[0]), "optimize" => OpsArg::Optimize(arg[0]), - "split_lumi" => OpsArg::SplitLumi(arg[0]), + "split_channels" => OpsArg::SplitChannels(arg[0]), "upgrade" => OpsArg::Upgrade(arg[0]), _ => unreachable!(), }); @@ -461,11 +460,12 @@ impl Args for MoreArgs { .value_names(["KEY", "FILE"]), ) .arg( - Arg::new("split_lumi") + Arg::new("split_channels") .action(ArgAction::Append) .default_missing_value("true") - .help("Split the grid such that the luminosity function contains only a single combination per channel") - .long("split-lumi") + .help("Split the grid such that each channel contains only a single PID combination") + .long("split-channels") + .alias("split-lumi") .num_args(0..=1) .require_equals(true) .value_name("ENABLE") @@ -503,11 +503,11 @@ impl Subcommand for Opts { let lumi_id_types = grid.key_values().map_or("pdg_mc_ids", |kv| { kv.get("lumi_id_types").map_or("pdg_mc_ids", Deref::deref) }); - let lumis = grid - .lumi() + let channels = grid + .channels() .iter() .map(|entry| { - LumiEntry::new( + Channel::new( entry .entry() .iter() @@ -536,7 +536,7 @@ impl Subcommand for Opts { grid.set_convolution(1, grid.convolutions()[1].cc()) } - grid.set_lumis(lumis); + grid.set_channels(channels); } OpsArg::DedupChannels(ulps) => { grid.dedup_channels(*ulps); @@ -594,10 +594,10 @@ impl Subcommand for Opts { )?; } OpsArg::RewriteChannel((index, new_channel)) => { - let mut channels = grid.lumi().to_vec(); + let mut channels = grid.channels().to_vec(); // TODO: check that `index` is valid channels[*index] = new_channel.clone(); - grid.set_lumis(channels); + grid.set_channels(channels); } OpsArg::RewriteOrder((index, order)) => { grid.orders_mut()[*index] = order.clone(); @@ -622,12 +622,12 @@ impl Subcommand for Opts { OpsArg::SetKeyFile(key_file) => { grid.set_key_value(&key_file[0], &fs::read_to_string(&key_file[1])?); } - OpsArg::SplitLumi(true) => grid.split_lumi(), + OpsArg::SplitChannels(true) => grid.split_channels(), OpsArg::Upgrade(true) => grid.upgrade(), OpsArg::Cc1(false) | OpsArg::Cc2(false) | OpsArg::Optimize(false) - | OpsArg::SplitLumi(false) + | OpsArg::SplitChannels(false) | OpsArg::Upgrade(false) => {} } } diff --git a/pineappl_cli/tests/analyze.rs b/pineappl_cli/tests/analyze.rs index ca94a7648..a84718372 100644 --- a/pineappl_cli/tests/analyze.rs +++ b/pineappl_cli/tests/analyze.rs @@ -27,7 +27,7 @@ Options: -h, --help Print help "; -const CKF_STR: &str = "b etal bin-K l K l K l K l K l K +const CKF_STR: &str = "b etal bin-K c K c K c K c K c K [] -+----+----+-----+-+----+-+----+-+----+-+----+-+---- 0 2 2.25 1.17 0 1.30 3 -inf 1 -inf 2 0.00 4 0.00 @@ -42,7 +42,7 @@ const CKF_STR: &str = "b etal bin-K l K l K l K l K l K // TODO: understand these factors const CKF_WITH_DEFAULT_DENOMINATOR_STR: &str = - "b etal bin-K l K l K l K l K l K + "b etal bin-K c K c K c K c K c K [] -+----+----+------+-+------+-+----+-+----+-+----+-+---- 0 2 2.25 -13.20 0 -23.29 3 -inf 1 -inf 4 1.00 2 1.00 diff --git a/pineappl_cli/tests/channels.rs b/pineappl_cli/tests/channels.rs index 06a30f08a..c6dcee11b 100644 --- a/pineappl_cli/tests/channels.rs +++ b/pineappl_cli/tests/channels.rs @@ -9,18 +9,18 @@ Arguments: LHAPDF id or name of the PDF set Options: - -a, --absolute Show absolute numbers of each contribution - -l, --limit The maximum number of channels displayed [default: 10] - -i, --integrated Show integrated numbers (without bin widths) instead of differential ones - --lumis Show only the listed channels - -o, --orders Select orders manually - --dont-sort Do not sort the channels according to their size - --digits-abs Set the number of fractional digits shown for absolute numbers [default: 7] - --digits-rel Set the number of fractional digits shown for relative numbers [default: 2] - -h, --help Print help + -a, --absolute Show absolute numbers of each contribution + -l, --limit The maximum number of channels displayed [default: 10] + -i, --integrated Show integrated numbers (without bin widths) instead of differential ones + --channels Show only the listed channels + -o, --orders Select orders manually + --dont-sort Do not sort the channels according to their size + --digits-abs Set the number of fractional digits shown for absolute numbers [default: 7] + --digits-rel Set the number of fractional digits shown for relative numbers [default: 2] + -h, --help Print help "; -const DEFAULT_STR: &str = "b etal l size l size l size l size l size +const DEFAULT_STR: &str = "b etal c size c size c size c size c size [] [%] [%] [%] [%] [%] -+----+----+-+------+-+------+-+-----+-+----+-+---- 0 2 2.25 0 111.32 3 -8.05 1 -3.31 4 0.02 2 0.01 @@ -34,7 +34,7 @@ const DEFAULT_STR: &str = "b etal l size l size l size l size l size "; const ABSOLUTE_STR: &str = - "b etal l dsig/detal l dsig/detal l dsig/detal l dsig/detal l dsig/detal + "b etal c dsig/detal c dsig/detal c dsig/detal c dsig/detal c dsig/detal [] [pb] [pb] [pb] [pb] [pb] -+----+----+-+-----------+-+------------+-+------------+-+------------+-+------------ 0 2 2.25 0 8.4002759e2 3 -6.0727462e1 1 -2.4969360e1 4 1.7176328e-1 2 8.8565923e-2 @@ -48,7 +48,7 @@ const ABSOLUTE_STR: &str = "; const ABSOLUTE_INTEGRATED_STR: &str = - "b etal l integ l integ l integ l integ l integ + "b etal c integ c integ c integ c integ c integ [] [] [] [] [] [] -+----+----+-+-----------+-+------------+-+------------+-+------------+-+------------ 0 2 2.25 0 2.1000690e2 3 -1.5181865e1 1 -6.2423401e0 4 4.2940819e-2 2 2.2141481e-2 @@ -61,7 +61,7 @@ const ABSOLUTE_INTEGRATED_STR: &str = 7 4 4.5 0 1.5943129e1 3 -1.1843361e0 1 -1.0028343e0 4 1.7077102e-3 2 9.6673424e-4 "; -const LIMIT_3_STR: &str = "b etal l size l size l size +const LIMIT_3_STR: &str = "b etal c size c size c size [] [%] [%] [%] -+----+----+-+------+-+------+-+----- 0 2 2.25 0 111.32 3 -8.05 1 -3.31 @@ -79,7 +79,7 @@ const BAD_LIMIT_STR: &str = "error: invalid value '0' for '--limit ': 0 i For more information, try '--help'. "; -const LUMIS_0123_STR: &str = "b etal l size l size l size l size +const LUMIS_0123_STR: &str = "b etal c size c size c size c size [] [%] [%] [%] [%] -+----+----+-+------+-+------+-+-----+-+---- 0 2 2.25 0 111.32 3 -8.05 1 -3.31 2 0.01 @@ -92,7 +92,7 @@ const LUMIS_0123_STR: &str = "b etal l size l size l size l size 7 4 4.5 0 115.88 3 -8.61 1 -7.29 2 0.01 "; -const ORDERS_A2_AS1A2_STR: &str = "b etal l size l size l size l size l size +const ORDERS_A2_AS1A2_STR: &str = "b etal c size c size c size c size c size [] [%] [%] [%] [%] [%] -+----+----+-+------+-+------+-+-----+-+----+-+---- 0 2 2.25 0 111.24 3 -7.96 1 -3.27 2 0.00 4 0.00 @@ -106,7 +106,7 @@ const ORDERS_A2_AS1A2_STR: &str = "b etal l size l size l size l size "; const DONT_SORT_ABSOLUTE_STR: &str = - "b etal l dsig/detal l dsig/detal l dsig/detal l dsig/detal l dsig/detal + "b etal c dsig/detal c dsig/detal c dsig/detal c dsig/detal c dsig/detal [] [pb] [pb] [pb] [pb] [pb] -+----+----+-+-----------+-+------------+-+------------+-+------------+-+------------ 0 2 2.25 0 8.4002759e2 1 -2.4969360e1 2 8.8565923e-2 3 -6.0727462e1 4 1.7176328e-1 @@ -119,7 +119,7 @@ const DONT_SORT_ABSOLUTE_STR: &str = 7 4 4.5 0 3.1886258e1 1 -2.0056686e0 2 1.9334685e-3 3 -2.3686722e0 4 3.4154203e-3 "; -const DONT_SORT_STR: &str = "b etal l size l size l size l size l size +const DONT_SORT_STR: &str = "b etal c size c size c size c size c size [] [%] [%] [%] [%] [%] -+----+----+-+------+-+-----+-+----+-+------+-+---- 0 2 2.25 0 111.32 1 -3.31 2 0.01 3 -8.05 4 0.02 @@ -218,12 +218,12 @@ fn bad_limit() { } #[test] -fn lumis_0123() { +fn channels_0123() { Command::cargo_bin("pineappl") .unwrap() .args([ "channels", - "--lumis=0-3", + "--channels=0-3", "../test-data/LHCB_WP_7TEV.pineappl.lz4", "NNPDF31_nlo_as_0118_luxqed", ]) diff --git a/pineappl_cli/tests/diff.rs b/pineappl_cli/tests/diff.rs index e8369420f..cba201448 100644 --- a/pineappl_cli/tests/diff.rs +++ b/pineappl_cli/tests/diff.rs @@ -13,7 +13,7 @@ Arguments: Options: --ignore-orders Ignore differences in the orders and sum them --ignore-bin-limits Ignore bin limits (but not number of bins) - --ignore-lumis Ignore differences in the luminosity functions + --ignore-channels Ignore differences in the channel definition --orders1 Select orders of the first grid --orders2 Select orders of the second grid --scale1 Scale all results of the first grid [default: 1.0] @@ -81,7 +81,7 @@ const BIN_LIMITS_DIFFER_STR: &str = "Error: bins limits differ const BIN_NUMBER_DIFFERS_STR: &str = "Error: number of bins differ "; -const LUMIS_DIFFER_STR: &str = "Error: luminosities differ +const CHANNELS_DIFFER_STR: &str = "Error: channels differ "; #[test] @@ -240,7 +240,7 @@ fn bin_number_differs() { } #[test] -fn lumis_differ() { +fn channels_differ() { Command::cargo_bin("pineappl") .unwrap() .args([ @@ -251,6 +251,6 @@ fn lumis_differ() { ]) .assert() .failure() - .stderr(LUMIS_DIFFER_STR) + .stderr(CHANNELS_DIFFER_STR) .stdout(""); } diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index f07ce4acd..5bf26923e 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -910,9 +910,9 @@ fn import_hadronic_fktable() { fk_table.grid().convolutions(), [Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)] ); - let lumi = fk_table.lumi(); + let channels = fk_table.channels(); assert_eq!( - lumi, + channels, [ (100, 100), (100, 21), @@ -1001,7 +1001,7 @@ fn import_hadronic_fktable() { assert_eq!(results, fk_table.convolve(&mut lumi_cache, &[], &[])); fk_table.optimize(FkAssumptions::Nf6Ind); - assert_eq!(fk_table.lumi(), lumi); + assert_eq!(fk_table.channels(), channels); assert_approx_eq!( f64, results[0], @@ -1009,7 +1009,7 @@ fn import_hadronic_fktable() { ulps = 4 ); fk_table.optimize(FkAssumptions::Nf6Sym); - assert_eq!(fk_table.lumi(), lumi); + assert_eq!(fk_table.channels(), channels); assert_approx_eq!( f64, results[0], @@ -1017,21 +1017,21 @@ fn import_hadronic_fktable() { ulps = 4 ); fk_table.optimize(FkAssumptions::Nf5Ind); - assert_eq!(fk_table.lumi(), lumi); + assert_eq!(fk_table.channels(), channels); assert_approx_eq!( f64, results[0], fk_table.convolve(&mut lumi_cache, &[], &[])[0] ); fk_table.optimize(FkAssumptions::Nf5Sym); - assert_eq!(fk_table.lumi(), lumi); + assert_eq!(fk_table.channels(), channels); assert_approx_eq!( f64, results[0], fk_table.convolve(&mut lumi_cache, &[], &[])[0] ); fk_table.optimize(FkAssumptions::Nf4Ind); - assert_eq!(fk_table.lumi(), lumi); + assert_eq!(fk_table.channels(), channels); assert_approx_eq!( f64, results[0], @@ -1040,7 +1040,7 @@ fn import_hadronic_fktable() { fk_table.optimize(FkAssumptions::Nf4Sym); assert_eq!( - fk_table.lumi(), + fk_table.channels(), [ (100, 100), (100, 21), @@ -1082,7 +1082,7 @@ fn import_hadronic_fktable() { ); fk_table.optimize(FkAssumptions::Nf3Ind); assert_eq!( - fk_table.lumi(), + fk_table.channels(), [ (100, 21), (100, 203), @@ -1116,7 +1116,7 @@ fn import_hadronic_fktable() { ); fk_table.optimize(FkAssumptions::Nf3Sym); assert_eq!( - fk_table.lumi(), + fk_table.channels(), [ (100, 21), (100, 203), diff --git a/pineappl_cli/tests/plot.rs b/pineappl_cli/tests/plot.rs index 41c167789..2f2d4355a 100644 --- a/pineappl_cli/tests/plot.rs +++ b/pineappl_cli/tests/plot.rs @@ -12,7 +12,7 @@ Arguments: Options: -s, --scales Set the number of scale variations [default: 7] [possible values: 1, 3, 7, 9] - --subgrid-pull Show the pull for a specific grid three-dimensionally + --subgrid-pull Show the pull for a specific grid three-dimensionally --asymmetry Plot the asymmetry --threads Number of threads to utilize [default: {}] --no-pdf-unc Disable the (time-consuming) calculation of PDF uncertainties diff --git a/pineappl_cli/tests/pull.rs b/pineappl_cli/tests/pull.rs index cc605ab08..4cc2f686b 100644 --- a/pineappl_cli/tests/pull.rs +++ b/pineappl_cli/tests/pull.rs @@ -13,14 +13,14 @@ Arguments: Options: --cl Confidence level in per cent [default: 68.26894921370858] - -l, --limit The maximum number of luminosities displayed [default: 10] + -l, --limit The maximum number of channels displayed [default: 10] -o, --orders Select orders manually --threads Number of threads to utilize [default: {}] --digits Set the number of digits shown for numerical values [default: 3] -h, --help Print help "; -const DEFAULT_STR: &str = "b etal total l pull l pull l pull l pull l pull +const DEFAULT_STR: &str = "b etal total c pull c pull c pull c pull c pull [] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] -+----+----+-----+-+-----+-+------+-+------+-+------+-+------ 0 2 2.25 3.578 0 3.765 1 -0.108 3 -0.052 4 -0.018 2 -0.009 @@ -33,7 +33,7 @@ const DEFAULT_STR: &str = "b etal total l pull l pull l pull l pull 7 4 4.5 1.224 0 1.435 1 -0.353 3 0.147 4 -0.003 2 -0.002 "; -const ORDERS_STR: &str = "b etal total l pull l pull l pull l pull l pull +const ORDERS_STR: &str = "b etal total c pull c pull c pull c pull c pull [] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] -+----+----+-----+-+-----+-+-----+-+-----+-+-----+-+----- 0 2 2.25 3.631 0 3.631 1 0.000 2 0.000 3 0.000 4 0.000 @@ -46,7 +46,7 @@ const ORDERS_STR: &str = "b etal total l pull l pull l pull l pull l pu 7 4 4.5 1.202 0 1.202 1 0.000 2 0.000 3 0.000 4 0.000 "; -const CL_90_STR: &str = "b etal total l pull l pull l pull l pull l pull +const CL_90_STR: &str = "b etal total c pull c pull c pull c pull c pull [] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] -+----+----+-----+-+-----+-+------+-+------+-+------+-+------ 0 2 2.25 2.175 0 2.289 1 -0.065 3 -0.031 4 -0.011 2 -0.006 @@ -59,7 +59,7 @@ const CL_90_STR: &str = "b etal total l pull l pull l pull l pull l 7 4 4.5 0.744 0 0.872 1 -0.215 3 0.089 4 -0.002 2 -0.001 "; -const LIMIT_STR: &str = "b etal total l pull +const LIMIT_STR: &str = "b etal total c pull [] [\u{3c3}] [\u{3c3}] -+----+----+-----+-+----- 0 2 2.25 3.578 0 3.765 @@ -72,7 +72,7 @@ const LIMIT_STR: &str = "b etal total l pull 7 4 4.5 1.224 0 1.435 "; -const REPLICA0_STR: &str = "b etal total l pull l pull l pull l pull l pull +const REPLICA0_STR: &str = "b etal total c pull c pull c pull c pull c pull [] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] [\u{3c3}] -+----+----+-----+-+-----+-+------+-+------+-+------+-+------ 0 2 2.25 3.583 0 3.770 1 -0.108 3 -0.052 4 -0.018 2 -0.009 diff --git a/pineappl_cli/tests/read.rs b/pineappl_cli/tests/read.rs index 6630de279..31ad9faba 100644 --- a/pineappl_cli/tests/read.rs +++ b/pineappl_cli/tests/read.rs @@ -2,7 +2,7 @@ use assert_cmd::Command; const HELP_STR: &str = "Read out information of a grid -Usage: pineappl read <--orders|--orders-spaces|--orders-long|--bins|--lumis|--fktable|--ew|--get |--keys|--qcd|--show> +Usage: pineappl read <--orders|--orders-spaces|--orders-long|--bins|--channels|--fktable|--ew|--get |--keys|--qcd|--show> Arguments: Path to the input grid @@ -12,7 +12,7 @@ Options: --orders-spaces Show the orders of a grid, replacing zero powers with spaces --orders-long Show the orders of a grid, including zero powers -b, --bins Show the bins of a grid - -l, --lumis Show the luminsities a grid + --channels Show the channel definition of a grid --fktable Check if input is an FK table --ew For each order print a list of the largest EW order --get Gets an internal key-value pair @@ -34,7 +34,7 @@ const BINS_STR: &str = "b etal norm 7 4 4.5 0.5 "; -const LUMIS_STR: &str = "l entry entry +const CHANNELS_STR: &str = "c entry entry -+------------+------------ 0 1 × ( 2, -1) 1 × ( 4, -3) 1 1 × (21, -3) 1 × (21, -1) @@ -82,7 +82,7 @@ multiple orders detected const WRONG_ORDERS_STR: &str = "error: the argument '--orders' cannot be used with '--orders-long' -Usage: pineappl read <--orders|--orders-spaces|--orders-long|--bins|--lumis|--fktable|--ew|--get |--keys|--qcd|--show> +Usage: pineappl read <--orders|--orders-spaces|--orders-long|--bins|--channels|--fktable|--ew|--get |--keys|--qcd|--show> For more information, try '--help'. "; @@ -549,7 +549,7 @@ y_unit: pb const WRONG_ARGUMENTS_STR: &str = "error: the argument '--ew' cannot be used with '--qcd' -Usage: pineappl read <--orders|--orders-spaces|--orders-long|--bins|--lumis|--fktable|--ew|--get |--keys|--qcd|--show> +Usage: pineappl read <--orders|--orders-spaces|--orders-long|--bins|--channels|--fktable|--ew|--get |--keys|--qcd|--show> For more information, try '--help'. "; @@ -575,13 +575,17 @@ fn bins() { } #[test] -fn lumis() { +fn channels() { Command::cargo_bin("pineappl") .unwrap() - .args(["read", "--lumis", "../test-data/LHCB_WP_7TEV.pineappl.lz4"]) + .args([ + "read", + "--channels", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + ]) .assert() .success() - .stdout(LUMIS_STR); + .stdout(CHANNELS_STR); } #[test] diff --git a/pineappl_cli/tests/subgrids.rs b/pineappl_cli/tests/subgrids.rs index 40a5cad6b..5e4cd9773 100644 --- a/pineappl_cli/tests/subgrids.rs +++ b/pineappl_cli/tests/subgrids.rs @@ -21,7 +21,7 @@ Options: -h, --help Print help "; -const MUF_STR: &str = "o b l muf +const MUF_STR: &str = "o b c muf -+-+-+------ 0 0 0 80.352 0 1 0 80.352 @@ -129,7 +129,7 @@ const MUF_STR: &str = "o b l muf 6 7 4 80.352 "; -const MUF2_STR: &str = "o b l muf2 +const MUF2_STR: &str = "o b c muf2 -+-+-+-------- 0 0 0 6456.444 0 1 0 6456.444 @@ -237,7 +237,7 @@ const MUF2_STR: &str = "o b l muf2 6 7 4 6456.444 "; -const MUR_STR: &str = "o b l mur +const MUR_STR: &str = "o b c mur -+-+-+------ 0 0 0 80.352 0 1 0 80.352 @@ -345,7 +345,7 @@ const MUR_STR: &str = "o b l mur 6 7 4 80.352 "; -const MUR2_STR: &str = "o b l mur2 +const MUR2_STR: &str = "o b c mur2 -+-+-+-------- 0 0 0 6456.444 0 1 0 6456.444 @@ -453,7 +453,7 @@ const MUR2_STR: &str = "o b l mur2 6 7 4 6456.444 "; -const STATS_STR: &str = "o b l total allocated zeros overhead +const STATS_STR: &str = "o b c total allocated zeros overhead -+-+-+-----+---------+-----+-------- 0 0 0 2500 1012 3 102 0 1 0 2500 1009 8 102 @@ -561,7 +561,7 @@ const STATS_STR: &str = "o b l total allocated zeros overhead 6 7 4 2500 745 36 102 "; -const TYPE_STR: &str = "o b l type +const TYPE_STR: &str = "o b c type -+-+-+------------------- 0 0 0 ImportOnlySubgridV1 0 1 0 ImportOnlySubgridV1 @@ -669,7 +669,7 @@ const TYPE_STR: &str = "o b l type 6 7 4 ImportOnlySubgridV1 "; -const TYPE_SHOW_EMPTY_STR: &str = "o b l type +const TYPE_SHOW_EMPTY_STR: &str = "o b c type -+-+-+------------------- 0 0 0 ImportOnlySubgridV1 0 0 1 EmptySubgridV1 @@ -953,7 +953,7 @@ const TYPE_SHOW_EMPTY_STR: &str = "o b l type 6 7 4 ImportOnlySubgridV1 "; -const X1_STR: &str = "o b l x1 +const X1_STR: &str = "o b c x1 -+-+-+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 0 0 0 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4, 1.038e-4, 6.844e-5, 4.511e-5, 2.974e-5, 1.960e-5, 1.292e-5, 8.517e-6, 5.614e-6, 3.700e-6, 2.439e-6, 1.608e-6, 1.060e-6, 6.984e-7, 4.604e-7, 3.034e-7, 2.000e-7 0 1 0 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4, 1.038e-4, 6.844e-5, 4.511e-5, 2.974e-5, 1.960e-5, 1.292e-5, 8.517e-6, 5.614e-6, 3.700e-6, 2.439e-6, 1.608e-6, 1.060e-6, 6.984e-7, 4.604e-7, 3.034e-7, 2.000e-7 @@ -1061,7 +1061,7 @@ const X1_STR: &str = "o b l 6 7 4 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4, 1.038e-4, 6.844e-5, 4.511e-5, 2.974e-5, 1.960e-5, 1.292e-5, 8.517e-6, 5.614e-6, 3.700e-6, 2.439e-6, 1.608e-6, 1.060e-6, 6.984e-7, 4.604e-7, 3.034e-7, 2.000e-7 "; -const X2_STR: &str = "o b l x2 +const X2_STR: &str = "o b c x2 -+-+-+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 0 0 0 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4, 1.038e-4, 6.844e-5, 4.511e-5, 2.974e-5, 1.960e-5, 1.292e-5, 8.517e-6, 5.614e-6, 3.700e-6, 2.439e-6, 1.608e-6, 1.060e-6, 6.984e-7, 4.604e-7, 3.034e-7, 2.000e-7 0 1 0 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4, 1.038e-4, 6.844e-5, 4.511e-5, 2.974e-5, 1.960e-5, 1.292e-5, 8.517e-6, 5.614e-6, 3.700e-6, 2.439e-6, 1.608e-6, 1.060e-6, 6.984e-7, 4.604e-7, 3.034e-7, 2.000e-7 diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index a090d7d45..6f5c92513 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -30,12 +30,12 @@ Options: --scale-by-order Scales all grids with order-dependent factors --set-key-value Set an internal key-value pair --set-key-file Set an internal key-value pair, with value being read from a file - --split-lumi[=] Split the grid such that the luminosity function contains only a single combination per channel [possible values: true, false] + --split-channels[=] Split the grid such that each channel contains only a single PID combination [possible values: true, false] --upgrade[=] Convert the file format to the most recent version [possible values: true, false] -h, --help Print help "; -const CHANNEL_STR: &str = "l entry entry +const CHANNEL_STR: &str = "c entry entry -+------------+------------ 0 1 × ( 2, -1) 1 × ( 4, -3) 1 1 × (21, -3) 1 × (21, -1) @@ -85,7 +85,7 @@ const DELETE_BINS_25_STR: &str = "b etal dsig/detal 3 4 4.5 2.7517266e1 "; -const DELETE_CHANNELS_STR: &str = "l entry entry +const DELETE_CHANNELS_STR: &str = "c entry entry -+------------+------------ 0 1 × ( 2, -1) 1 × ( 4, -3) 1 1 × (22, -3) 1 × (22, -1) @@ -156,7 +156,7 @@ const REMAP_STR: &str = "b etal x2 x3 dsig/detal const REMAP_NO_REMAPPER_STR: &str = "Error: grid does not have a remapper "; -const REWRITE_CHANNELS_CONVOLUTE_STR: &str = "b etal dsig/detal +const REWRITE_CHANNELS_CONVOLVE_STR: &str = "b etal dsig/detal [] [pb] -+----+----+----------- 0 2 2.25 7.5534392e2 @@ -169,7 +169,7 @@ const REWRITE_CHANNELS_CONVOLUTE_STR: &str = "b etal dsig/detal 7 4 4.5 2.8214633e1 "; -const REWRITE_CHANNELS_LUMIS_STR: &str = "l entry entry entry entry entry entry +const REWRITE_CHANNELS_STR: &str = "c entry entry entry entry entry entry -+--------------------------------+-------------------------------+-----------------------+--------------------------------+-----------------------+--------------------- 0 0.0000128881 × ( 2, -5) 0.050940490000000005 × ( 2, -3) 0.9490461561 × ( 2, -1) 0.0017222500000000003 × ( 4, -5) 0.9473907556 × ( 4, -3) 0.05089536 × ( 4, -1) 1 0.0017351381000000003 × (-5, 21) 0.9983312456 × (-3, 21) 0.9999415161 × (-1, 21) @@ -204,7 +204,7 @@ const SCALE_BY_ORDER_STR: &str = "b etal dsig/detal 7 4 4.5 1.6435633e1 "; -const SPLIT_LUMI_STR: &str = "l entry +const SPLIT_CHANNELS_STR: &str = "c entry -+------------ 0 1 × ( 2, -1) 1 1 × ( 4, -3) @@ -253,7 +253,7 @@ const ROTATE_PID_BASIS_DIFF_STR: &str = "b x1 O(as^0 a^2) 7 4 4.5 2.2383492e1 2.2383492e1 -4.441e-16 -2.2022770e-1 -2.2022770e-1 -5.551e-16 5.3540011e0 5.3540011e0 -3.331e-16 "; -const ROTATE_PID_BASIS_READ_LUMIS_STR: &str = " l entry +const ROTATE_PID_BASIS_READ_CHANNELS_STR: &str = " c entry ---+----------------------------------- 0 0.013888888888888888 × (100, 100) 1 -0.020833333333333332 × (100, 103) @@ -579,7 +579,7 @@ fn delete_channels() { Command::cargo_bin("pineappl") .unwrap() - .args(["read", "--lumis", output.path().to_str().unwrap()]) + .args(["read", "--channels", output.path().to_str().unwrap()]) .assert() .success() .stdout(DELETE_CHANNELS_STR); @@ -786,14 +786,14 @@ fn scale_by_order() { } #[test] -fn split_lumi() { - let output = NamedTempFile::new("split-lumi.pineappl.lz4").unwrap(); +fn split_channels() { + let output = NamedTempFile::new("split-channels.pineappl.lz4").unwrap(); Command::cargo_bin("pineappl") .unwrap() .args([ "write", - "--split-lumi", + "--split-channels", "../test-data/LHCB_WP_7TEV.pineappl.lz4", output.path().to_str().unwrap(), ]) @@ -814,10 +814,10 @@ fn split_lumi() { Command::cargo_bin("pineappl") .unwrap() - .args(["read", "--lumis", output.path().to_str().unwrap()]) + .args(["read", "--channels", output.path().to_str().unwrap()]) .assert() .success() - .stdout(SPLIT_LUMI_STR); + .stdout(SPLIT_CHANNELS_STR); } #[test] @@ -828,7 +828,7 @@ fn dedup_channels() { .unwrap() .args([ "write", - "--split-lumi", + "--split-channels", "--dedup-channels", "../test-data/LHCB_WP_7TEV.pineappl.lz4", output.path().to_str().unwrap(), @@ -851,7 +851,7 @@ fn dedup_channels() { Command::cargo_bin("pineappl") .unwrap() - .args(["read", "--lumis", output.path().to_str().unwrap()]) + .args(["read", "--channels", output.path().to_str().unwrap()]) .assert() .success() .stdout(CHANNEL_STR); @@ -931,10 +931,10 @@ fn rewrite_channels() { Command::cargo_bin("pineappl") .unwrap() - .args(["read", "--lumis", output.path().to_str().unwrap()]) + .args(["read", "--channels", output.path().to_str().unwrap()]) .assert() .success() - .stdout(REWRITE_CHANNELS_LUMIS_STR); + .stdout(REWRITE_CHANNELS_STR); Command::cargo_bin("pineappl") .unwrap() @@ -945,7 +945,7 @@ fn rewrite_channels() { ]) .assert() .success() - .stdout(REWRITE_CHANNELS_CONVOLUTE_STR); + .stdout(REWRITE_CHANNELS_CONVOLVE_STR); } #[test] @@ -997,7 +997,7 @@ fn rotate_pid_basis() { "../test-data/LHCB_WP_7TEV.pineappl.lz4", pdg_to_evol.path().to_str().unwrap(), "NNPDF31_nlo_as_0118_luxqed", - "--ignore-lumis", + "--ignore-channels", ]) .assert() .success() @@ -1066,6 +1066,7 @@ fn rotate_pid_basis() { .args([ "write", "--rotate-pid-basis=EVOL", + // use the old name instead of `--split-channels` to test the alias "--split-lumi", "--optimize", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -1079,12 +1080,13 @@ fn rotate_pid_basis() { .unwrap() .args([ "read", - "--lumis", + // use the old name instead of `--channels` to test the alias + "--channels", evol_to_evol_optimize.path().to_str().unwrap(), ]) .assert() .success() - .stdout(ROTATE_PID_BASIS_READ_LUMIS_STR); + .stdout(ROTATE_PID_BASIS_READ_CHANNELS_STR); } #[test] diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 58de6aea0..60abdbe83 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -158,8 +158,8 @@ impl PyFkTable { /// ------- /// list(tuple(float,float)) : /// luminosity functions as pid tuples - pub fn lumi(&self) -> Vec<(i32, i32)> { - self.fk_table.lumi() + pub fn channels(&self) -> Vec<(i32, i32)> { + self.fk_table.channels() } /// Get reference (fitting) scale. diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 3f7dd9593..5188fd542 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,6 +1,6 @@ +use pineappl::boc::Order; use pineappl::evolution::{AlphasTable, OperatorInfo}; use pineappl::grid::{Grid, Ntuple}; -use pineappl::order::Order; use pineappl::lumi::LumiCache; @@ -724,9 +724,9 @@ impl PyGrid { /// list(list(tuple(float,float))) : /// luminosity functions as pid tuples (multiple tuples can bee associated to the same /// contribution) - pub fn lumi(&self) -> Vec> { + pub fn channels(&self) -> Vec> { self.grid - .lumi() + .channels() .iter() .map(|entry| entry.entry().to_vec()) .collect() diff --git a/pineappl_py/src/lumi.rs b/pineappl_py/src/lumi.rs index 08b4d7ffd..cb0d4e736 100644 --- a/pineappl_py/src/lumi.rs +++ b/pineappl_py/src/lumi.rs @@ -1,4 +1,4 @@ -use pineappl::lumi::LumiEntry; +use pineappl::boc::Channel; use pyo3::prelude::*; @@ -14,11 +14,11 @@ use pyo3::prelude::*; #[pyclass] #[repr(transparent)] pub struct PyLumiEntry { - pub(crate) lumi_entry: LumiEntry, + pub(crate) lumi_entry: Channel, } impl PyLumiEntry { - pub(crate) fn new(lumi_entry: LumiEntry) -> Self { + pub(crate) fn new(lumi_entry: Channel) -> Self { Self { lumi_entry } } } @@ -27,7 +27,7 @@ impl PyLumiEntry { impl PyLumiEntry { #[new] pub fn new_lumi_entry(entry: Vec<(i32, i32, f64)>) -> Self { - Self::new(LumiEntry::new(entry)) + Self::new(Channel::new(entry)) } /// Get list representation. From c35d39e61a8dbc63eb17091ba56438d80333fd56 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 31 May 2024 13:37:17 +0200 Subject: [PATCH 119/179] Update container (#282) * Update APPLgrid from 1.6.27 to 1.6.36 * Update LHAPDF from 6.4.0 to 6.5.4 --- maintainer/pineappl-ci/Containerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maintainer/pineappl-ci/Containerfile b/maintainer/pineappl-ci/Containerfile index 71da5db7e..bbfe715bc 100644 --- a/maintainer/pineappl-ci/Containerfile +++ b/maintainer/pineappl-ci/Containerfile @@ -1,10 +1,10 @@ FROM quay.io/pypa/manylinux2014_x86_64 -ARG APPLGRID_V=1.6.27 +ARG APPLGRID_V=1.6.36 # must be at least 0.9.16, see https://github.com/NNPDF/pineappl/pull/242#issuecomment-1705371291 ARG CARGOC_V=0.9.24+cargo-0.73.0 ARG FASTNLO_V=2.5.0-2826 -ARG LHAPDF_V=6.4.0 +ARG LHAPDF_V=6.5.4 ARG ZLIB_V=1.3.1 # the last version is the default Rust version used in the container From 2c331016c8e5fbc64922804086efd2541bdf16de Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 09:31:45 +0200 Subject: [PATCH 120/179] Add Python 3.11 and 3.12 to CI --- .github/workflows/python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index c4777316a..1750e999c 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -9,7 +9,7 @@ jobs: strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 From bc9caa88892c93db17ef9b92b133866512aae137 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 09:32:32 +0200 Subject: [PATCH 121/179] Try to use `venv` module instead of `virtualenv` --- .github/workflows/python.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 1750e999c..c9ccd8c8a 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -20,8 +20,7 @@ jobs: - name: Test run: | cd pineappl_py - pip install virtualenv - virtualenv env + python -m venv env . env/bin/activate pip install maturin maturin develop --extras test From 244577b0c2c968c98d3df906c917f854c30922a1 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 10:46:34 +0200 Subject: [PATCH 122/179] Fix some warnings in the Python interface --- pineappl_py/src/grid.rs | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 5188fd542..008068d9c 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,17 +1,17 @@ use pineappl::boc::Order; -use pineappl::evolution::{AlphasTable, OperatorInfo}; +use pineappl::evolution::OperatorInfo; use pineappl::grid::{Grid, Ntuple}; use pineappl::lumi::LumiCache; use super::bin::PyBinRemapper; -use super::evolution::{PyEvolveInfo, PyOperatorSliceInfo}; +use super::evolution::PyEvolveInfo; use super::fk_table::PyFkTable; use super::lumi::PyLumiEntry; use super::subgrid::{PySubgridEnum, PySubgridParams}; use itertools::izip; -use numpy::{IntoPyArray, PyArray1, PyReadonlyArray1, PyReadonlyArray4, PyReadonlyArray5}; +use numpy::{IntoPyArray, PyArray1, PyReadonlyArray1, PyReadonlyArray5}; use std::collections::HashMap; use std::fs::File; @@ -20,9 +20,7 @@ use std::path::PathBuf; use pyo3::exceptions::PyValueError; use pyo3::prelude::*; -use pyo3::types::{PyIterator, PyTuple}; - -use ndarray::CowArray; +use pyo3::types::PyIterator; /// PyO3 wrapper to :rustdoc:`pineappl::grid::Order ` /// @@ -527,11 +525,11 @@ impl PyGrid { /// TODO pub fn evolve_with_slice_iter( &self, - slices: &PyIterator, - order_mask: PyReadonlyArray1, - xi: (f64, f64), - ren1: Vec, - alphas: Vec, + _slices: &PyIterator, + _order_mask: PyReadonlyArray1, + _xi: (f64, f64), + _ren1: Vec, + _alphas: Vec, ) -> PyResult { todo!() //Ok(self From 819f5758baf503c7bfe37b39d24fb31328c36f92 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 10:55:51 +0200 Subject: [PATCH 123/179] Add `PidBasis::{charge_conjugate,guess}` and remove `pids::charge_conjugate` --- CHANGELOG.md | 4 +++ pineappl/src/grid.rs | 6 ++-- pineappl/src/pids.rs | 58 +++++++++++++++++++++++-------------- pineappl_cli/src/write.rs | 61 ++++++++++++++++----------------------- 4 files changed, 68 insertions(+), 61 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f20e5042..45d28ddac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - added the function `pineappl_grid_convolve_with_one` and `pineappl_grid_convolve_with_two` which replace the deprecated function similarly named with `convolute` in CAPI +- added `PidBasis::charge_conjugate` and `PidBasis::guess` ### Changed @@ -30,6 +31,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - renamed the struct `LumiEntry` to `Channel` and `ParseLumiEntryError` to `ParseChannelError`. Both structures have been moved to the module `boc` - renamed the macro `lumi_entry` to `channel` +- renamed `Grid::set_channels` to `Grid::channels_mut` ### Removed @@ -38,6 +40,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 and the structs `EkoInfo` and `GridAxes` - removed methods `Grid::has_pdf1`, `Grid::has_pdf2`, `Grid::initial_state_1` and `Grid::initial_state_2` +- removed `pids::charge_conjugate`; this function has been replaced with the + new function `PidBasis::charge_conjugate` ## [0.7.4] - 23/05/2024 diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 36251fff1..5765ac40d 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -970,9 +970,9 @@ impl Grid { &mut self.orders } - /// Set the channels for this grid. - pub fn set_channels(&mut self, channels: Vec) { - self.channels = channels; + /// Return a mutable reference to the grid's channels. + pub fn channels_mut(&mut self) -> &mut [Channel] { + &mut self.channels } /// Returns the subgrid with the specified indices `order`, `bin`, and `channel`. diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index b6c13dbc3..46d0818f0 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -8,7 +8,7 @@ const EVOL_BASIS_IDS: [i32; 12] = [100, 103, 108, 115, 124, 135, 200, 203, 208, /// Particle ID bases. In `PineAPPL` every particle is identified using a particle identifier /// (PID), which is represented as an `i32`. The values of this `enum` specify how this value is /// interpreted. -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum PidBasis { /// This basis uses the [particle data group](https://pdg.lbl.gov/) (PDG) PIDs. For a complete /// definition see the section 'Monte Carlo Particle Numbering Scheme' of the PDG Review, for @@ -34,6 +34,36 @@ impl FromStr for PidBasis { } } +impl PidBasis { + /// Return the charge-conjugated particle ID of `pid` given in the basis of `self`. The + /// returned tuple contains a factor that possibly arises during the charge conjugation. + pub fn charge_conjugate(&self, pid: i32) -> (i32, f64) { + match (*self, pid) { + // TODO: in the general case we should allow to return a vector of tuples + (Self::Evol, 100 | 103 | 108 | 115 | 124 | 135) => (pid, 1.0), + (Self::Evol, 200 | 203 | 208 | 215 | 224 | 235) => (pid, -1.0), + (Self::Evol, _) | (Self::Pdg, _) => (charge_conjugate_pdg_pid(pid), 1.0), + } + } + + /// Given the particle IDs in `pids`, guess the [`PidBasis`]. + #[must_use] + pub fn guess(pids: &[i32]) -> PidBasis { + // if we find more than 3 pids that are recognized to be from the evolution basis, declare + // it to be the evolution basis (that's a heuristic), otherwise PDG MC IDs + if pids + .iter() + .filter(|&pid| EVOL_BASIS_IDS.iter().any(|evol_pid| pid == evol_pid)) + .count() + > 3 + { + PidBasis::Evol + } else { + PidBasis::Pdg + } + } +} + /// Error returned by [`PidBasis::from_str`] when passed with an unknown argument. #[derive(Debug, Error)] #[error("unknown PID basis: {basis}")] @@ -312,22 +342,6 @@ pub const fn charge_conjugate_pdg_pid(pid: i32) -> i32 { } } -/// Return the charge-conjugated particle ID of `pid` for the basis `lumi_id_types`. The returned -/// tuple contains a factor that possible arises during the carge conjugation. -/// -/// # Panics -/// -/// TODO -#[must_use] -pub fn charge_conjugate(lumi_id_types: &str, pid: i32) -> (i32, f64) { - match (lumi_id_types, pid) { - ("evol", 100 | 103 | 108 | 115 | 124 | 135) => (pid, 1.0), - ("evol", 200 | 203 | 208 | 215 | 224 | 235) => (pid, -1.0), - ("evol" | "pdg_mc_ids", _) => (charge_conjugate_pdg_pid(pid), 1.0), - _ => todo!(), - } -} - /// Given the particle IDs in `pids`, determine the right string for `lumi_id_types` stored in /// `Grid`. #[must_use] @@ -874,17 +888,17 @@ mod tests { } #[test] - fn test_determine_lumi_id_types() { + fn pid_basis_guess() { assert_eq!( - determine_lumi_id_types(&[22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6]), - "pdg_mc_ids" + PidBasis::guess(&[22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6]), + PidBasis::Pdg, ); assert_eq!( - determine_lumi_id_types(&[ + PidBasis::guess(&[ 22, 100, 200, 21, 100, 103, 108, 115, 124, 135, 203, 208, 215, 224, 235 ]), - "evol" + PidBasis::Evol, ); } diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 6be8c3822..9b7314dd0 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -9,10 +9,9 @@ use clap::{ use pineappl::bin::BinRemapper; use pineappl::boc::{Channel, Order}; use pineappl::fk_table::{FkAssumptions, FkTable}; -use pineappl::pids; use pineappl::pids::PidBasis; use std::fs; -use std::ops::{Deref, RangeInclusive}; +use std::ops::RangeInclusive; use std::path::PathBuf; use std::process::ExitCode; @@ -499,35 +498,29 @@ impl Subcommand for Opts { let cc1 = matches!(arg, OpsArg::Cc1(true)); let cc2 = matches!(arg, OpsArg::Cc2(true)); - // TODO: make this a member function of `Grid` - let lumi_id_types = grid.key_values().map_or("pdg_mc_ids", |kv| { - kv.get("lumi_id_types").map_or("pdg_mc_ids", Deref::deref) - }); - let channels = grid - .channels() - .iter() - .map(|entry| { - Channel::new( - entry - .entry() - .iter() - .map(|&(a, b, f)| { - let (ap, f1) = if cc1 { - pids::charge_conjugate(lumi_id_types, a) - } else { - (a, 1.0) - }; - let (bp, f2) = if cc2 { - pids::charge_conjugate(lumi_id_types, b) - } else { - (b, 1.0) - }; - (ap, bp, f * f1 * f2) - }) - .collect(), - ) - }) - .collect(); + let pid_basis = grid.pid_basis(); + + for channel in grid.channels_mut() { + *channel = Channel::new( + channel + .entry() + .iter() + .map(|&(a, b, f)| { + let (ap, f1) = if cc1 { + pid_basis.charge_conjugate(a) + } else { + (a, 1.0) + }; + let (bp, f2) = if cc2 { + pid_basis.charge_conjugate(b) + } else { + (b, 1.0) + }; + (ap, bp, f * f1 * f2) + }) + .collect(), + ); + } if cc1 { grid.set_convolution(0, grid.convolutions()[0].cc()) @@ -535,8 +528,6 @@ impl Subcommand for Opts { if cc2 { grid.set_convolution(1, grid.convolutions()[1].cc()) } - - grid.set_channels(channels); } OpsArg::DedupChannels(ulps) => { grid.dedup_channels(*ulps); @@ -594,10 +585,8 @@ impl Subcommand for Opts { )?; } OpsArg::RewriteChannel((index, new_channel)) => { - let mut channels = grid.channels().to_vec(); // TODO: check that `index` is valid - channels[*index] = new_channel.clone(); - grid.set_channels(channels); + grid.channels_mut()[*index] = new_channel.clone(); } OpsArg::RewriteOrder((index, order)) => { grid.orders_mut()[*index] = order.clone(); From 1254a5b97c5c2049ef399151273cc014b8cafb66 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 11:37:57 +0200 Subject: [PATCH 124/179] Use `PidBasis` instead of its string representation --- CHANGELOG.md | 3 +++ examples/python/positivity.py | 1 - pineappl/src/evolution.rs | 22 ++++++++++------------ pineappl/src/grid.rs | 18 +++++++++++++----- pineappl/src/pids.rs | 18 ------------------ pineappl_cli/src/evolve.rs | 10 +++++----- pineappl_cli/src/import/fktable.rs | 5 ++--- pineappl_py/src/grid.rs | 2 +- pineappl_py/tests/test_fk_table.py | 1 - 9 files changed, 34 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 45d28ddac..166d2cd54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 `pineappl_grid_convolve_with_two` which replace the deprecated function similarly named with `convolute` in CAPI - added `PidBasis::charge_conjugate` and `PidBasis::guess` +- added `Grid::set_pid_basis` method ### Changed @@ -42,6 +43,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 and `Grid::initial_state_2` - removed `pids::charge_conjugate`; this function has been replaced with the new function `PidBasis::charge_conjugate` +- removed `pids::determine_lumi_id_types`; this function has been replaced with + the new function `PidBasis::guess` ## [0.7.4] - 23/05/2024 diff --git a/examples/python/positivity.py b/examples/python/positivity.py index 09a68678e..426a3600a 100755 --- a/examples/python/positivity.py +++ b/examples/python/positivity.py @@ -48,7 +48,6 @@ def main(filename, Q2): "runcard", f"positivity constraint for quark {pid}", ) - grid.set_key_value("lumi_id_types", "pdg_mc_ids") # dump file grid.optimize() diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index 336411a63..689f6b444 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -4,6 +4,7 @@ use super::boc::{Channel, Order}; use super::channel; use super::grid::{Convolution, Grid, GridError}; use super::import_only_subgrid::ImportOnlySubgridV2; +use super::pids::PidBasis; use super::sparse_array3::SparseArray3; use super::subgrid::{Mu2, Subgrid, SubgridEnum}; use float_cmp::approx_eq; @@ -44,7 +45,7 @@ pub struct EvolveInfo { /// /// The EKO may convert a `Grid` from a basis given by the particle identifiers [`pids1`] to a /// possibly different basis given by [`pids0`]. This basis must also be identified using -/// [`lumi_id_types`], which tells [`FkTable::convolve`] how to perform a convolution. The members +/// [`pid_basis`], which tells [`FkTable::convolve`] how to perform a convolution. The members /// [`ren1`] and [`alphas`] must be the strong couplings given at the respective renormalization /// scales. Finally, [`xir`] and [`xif`] can be used to vary the renormalization and factorization /// scales, respectively, around their central values. @@ -54,7 +55,7 @@ pub struct EvolveInfo { /// [`alphas`]: Self::alphas /// [`fac0`]: Self::fac0 /// [`fac1`]: Self::fac1 -/// [`lumi_id_types`]: Self::lumi_id_types +/// [`pid_basis`]: Self::pid_basis /// [`pids0`]: Self::pids0 /// [`pids1`]: Self::pids1 /// [`ren1`]: Self::ren1 @@ -85,8 +86,8 @@ pub struct OperatorInfo { pub xir: f64, /// Multiplicative factor for the central factorization scale. pub xif: f64, - /// Identifier of the particle basis for the `FkTable`. - pub lumi_id_types: String, + /// Particle ID basis for `FkTable`. + pub pid_basis: PidBasis, } /// Information about the evolution kernel operator slice (EKO) passed to @@ -102,7 +103,7 @@ pub struct OperatorInfo { /// /// The EKO slice may convert a `Grid` from a basis given by the particle identifiers `pids1` to a /// possibly different basis given by `pids0`. This basis must also be identified using -/// [`lumi_id_types`](Self::lumi_id_types), which tells +/// [`pid_basis`](Self::pid_basis), which tells /// [`FkTable::convolve`](super::fk_table::FkTable::convolve) how to perform a convolution. #[derive(Clone)] pub struct OperatorSliceInfo { @@ -120,8 +121,8 @@ pub struct OperatorSliceInfo { /// `x`-grid coordinates of the `Grid`. pub x1: Vec, - /// Identifier of the particle basis for the `FkTable`. - pub lumi_id_types: String, + /// Particle ID basis for `FkTable`. + pub pid_basis: PidBasis, } /// A mapping of squared renormalization scales in `ren1` to strong couplings in `alphas`. The @@ -164,11 +165,8 @@ fn gluon_has_pid_zero(grid: &Grid) -> bool { grid.channels() .iter() .any(|entry| entry.entry().iter().any(|&(a, b, _)| (a == 0) || (b == 0))) - // and if lumi_id_types = pdg_mc_ids or if the key-value pair doesn't exist - && grid - .key_values() - .and_then(|key_values| key_values.get("lumi_id_types")) - .map_or(true, |value| value == "pdg_mc_ids") + // and if the particle IDs are encoded using PDG MC IDs + && grid.pid_basis() == PidBasis::Pdg } type Pid01IndexTuples = Vec<(usize, usize)>; diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 5765ac40d..ab8feddf7 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -350,6 +350,14 @@ impl Grid { PidBasis::Pdg } + /// TODO + pub fn set_pid_basis(&mut self, pid_basis: PidBasis) { + match pid_basis { + PidBasis::Pdg => self.set_key_value("lumi_id_types", "pdg_mc_ids"), + PidBasis::Evol => self.set_key_value("lumi_id_types", "evol"), + } + } + fn pdg_channels(&self) -> Cow<[Channel]> { match self.pid_basis() { PidBasis::Evol => self @@ -1415,7 +1423,7 @@ impl Grid { fac1, pids1: info.pids1.clone(), x1: info.x1.clone(), - lumi_id_types: info.lumi_id_types.clone(), + pid_basis: info.pid_basis, }, CowArray::from(op), )) @@ -1493,8 +1501,8 @@ impl Grid { more_members: self.more_members.clone(), }; - // write additional metadata - rhs.set_key_value("lumi_id_types", &info.lumi_id_types); + // TODO: use a new constructor to set this information + rhs.set_pid_basis(info.pid_basis); if let Some(lhs) = &mut lhs { lhs.merge(rhs)?; @@ -1617,7 +1625,7 @@ impl Grid { .map(|channel| Channel::translate(channel, &pids::pdg_mc_pids_to_evol)) .collect(); - self.set_key_value("lumi_id_types", "evol"); + self.set_pid_basis(PidBasis::Evol); } (PidBasis::Evol, PidBasis::Pdg) => { self.channels = self @@ -1626,7 +1634,7 @@ impl Grid { .map(|channel| Channel::translate(channel, &pids::evol_to_pdg_mc_ids)) .collect(); - self.set_key_value("lumi_id_types", "pdg_mc_ids"); + self.set_pid_basis(PidBasis::Pdg); } (PidBasis::Evol, PidBasis::Evol) | (PidBasis::Pdg, PidBasis::Pdg) => { // here's nothing to do diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index 46d0818f0..b02189b7c 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -342,24 +342,6 @@ pub const fn charge_conjugate_pdg_pid(pid: i32) -> i32 { } } -/// Given the particle IDs in `pids`, determine the right string for `lumi_id_types` stored in -/// `Grid`. -#[must_use] -pub fn determine_lumi_id_types(pids: &[i32]) -> String { - // if we find more than 3 pids that are recognized to be from the evolution basis, declare - // it to be the evolution basis (that's a heuristic), otherwise PDG MC IDs - if pids - .iter() - .filter(|&pid| EVOL_BASIS_IDS.iter().any(|evol_pid| pid == evol_pid)) - .count() - > 3 - { - "evol".to_owned() - } else { - "pdg_mc_ids".to_owned() - } -} - /// Given `tuples` represting a linear combination of PDG MC IDs, return a PID for the `evol` /// basis. The order of each tuple in `tuples` is not relevant. This function inverts /// [`evol_to_pdg_mc_ids`]. If the inversion is not possible, `None` is returned. diff --git a/pineappl_cli/src/evolve.rs b/pineappl_cli/src/evolve.rs index e8726b886..07cd297f2 100644 --- a/pineappl_cli/src/evolve.rs +++ b/pineappl_cli/src/evolve.rs @@ -21,7 +21,7 @@ mod eko { use ndarray::{Array4, Array5, Axis, CowArray, Ix4}; use ndarray_npy::{NpzReader, ReadNpyExt}; use pineappl::evolution::OperatorSliceInfo; - use pineappl::pids; + use pineappl::pids::{self, PidBasis}; use serde::Deserialize; use std::collections::HashMap; use std::ffi::{OsStr, OsString}; @@ -155,7 +155,7 @@ mod eko { Ok(Self::V0 { fac1: metadata.q2_grid, info: OperatorSliceInfo { - lumi_id_types: pids::determine_lumi_id_types(&metadata.inputpids), + pid_basis: PidBasis::guess(&metadata.inputpids), fac0: metadata.q2_ref, pids0: metadata.inputpids, x0: metadata.inputgrid, @@ -222,7 +222,7 @@ mod eko { Ok(Self::V2 { fac1, info: OperatorSliceInfo { - lumi_id_types: pids::determine_lumi_id_types(&pids0), + pid_basis: PidBasis::guess(&pids0), fac0: metadata.mu20, pids0, x0: metadata @@ -290,7 +290,7 @@ mod eko { Ok(Self::V2 { fac1, info: OperatorSliceInfo { - lumi_id_types: pids::determine_lumi_id_types(&pids0), + pid_basis: PidBasis::guess(&pids0), fac0: operator.mu0 * operator.mu0, pids0, x0: metadata @@ -466,7 +466,7 @@ fn evolve_grid( alphas: alphas_table.alphas, xir, xif, - lumi_id_types: info.lumi_id_types, + pid_basis: info.pid_basis, }; #[allow(deprecated)] diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index bd72b4984..375874aa2 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -4,6 +4,7 @@ use pineappl::boc::Order; use pineappl::channel; use pineappl::grid::{Convolution, Grid}; use pineappl::import_only_subgrid::ImportOnlySubgridV1; +use pineappl::pids::PidBasis; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::SubgridParams; use std::fs::File; @@ -108,9 +109,7 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { ); // explicitly set the evolution basis - fktable - .key_values_mut() - .insert("lumi_id_types".to_owned(), "evol".to_owned()); + fktable.set_pid_basis(PidBasis::Evol); // legacy FK-tables only support unpolarized proton PDFs fktable.set_convolution(0, Convolution::UnpolPDF(2212)); diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 008068d9c..5dc255fe7 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -480,7 +480,7 @@ impl PyGrid { alphas: alphas.to_vec().unwrap(), xir: xi.0, xif: xi.1, - lumi_id_types: lumi_id_types, + pid_basis: lumi_id_types.parse().unwrap(), }; let evolved_grid = self diff --git a/pineappl_py/tests/test_fk_table.py b/pineappl_py/tests/test_fk_table.py index 7769dcf7d..a5fb537e4 100644 --- a/pineappl_py/tests/test_fk_table.py +++ b/pineappl_py/tests/test_fk_table.py @@ -10,7 +10,6 @@ def fake_grid(self, bins=None): bin_limits = np.array([1e-7, 1e-3, 1] if bins is None else bins, dtype=float) subgrid_params = pineappl.subgrid.SubgridParams() g = pineappl.grid.Grid.create(lumis, orders, bin_limits, subgrid_params) - g.set_key_value("lumi_id_types", "pdg_mc_ids") return g def test_convolve_with_one(self): From 438eee4e13ac55a720d6aaa9ae01b2d9c7cfe03c Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 11:43:47 +0200 Subject: [PATCH 125/179] Rename `InvalidLumi` to `InvalidChannel` --- CHANGELOG.md | 1 + pineappl/src/fk_table.rs | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 166d2cd54..d3719de1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 `ParseChannelError`. Both structures have been moved to the module `boc` - renamed the macro `lumi_entry` to `channel` - renamed `Grid::set_channels` to `Grid::channels_mut` +- renamed `TryFromGridError::InvalidLumi` to `TryFromGridError::InvalidChannel` ### Removed diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 5245805a5..ee4e50764 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -36,7 +36,7 @@ pub enum TryFromGridError { MultipleScales, /// Error if the channels are not simple. #[error("complicated channel function detected")] - InvalidLumi, + InvalidChannel, /// Error if the order of the grid was not a single one with all zeros in the exponents. #[error("multiple orders detected")] NonTrivialOrder, @@ -402,14 +402,14 @@ impl TryFrom for FkTable { let entry = channel.entry(); if entry.len() != 1 || entry[0].2 != 1.0 { - return Err(TryFromGridError::InvalidLumi); + return Err(TryFromGridError::InvalidChannel); } } if (1..grid.channels().len()) .any(|i| grid.channels()[i..].contains(&grid.channels()[i - 1])) { - return Err(TryFromGridError::InvalidLumi); + return Err(TryFromGridError::InvalidChannel); } if let Some(key_values) = grid.key_values() { From 6cd099a4edfa38bc6ecc8723c9a8f443a2f7bd2f Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 11:44:45 +0200 Subject: [PATCH 126/179] Remove `TryFromGridError::MetadataMissing` --- CHANGELOG.md | 1 + pineappl/src/fk_table.rs | 17 ----------------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3719de1a..27a00605e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 new function `PidBasis::charge_conjugate` - removed `pids::determine_lumi_id_types`; this function has been replaced with the new function `PidBasis::guess` +- removed `TryFromGridError::MetadataMissing` ## [0.7.4] - 23/05/2024 diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index ee4e50764..e48d37bd2 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -40,9 +40,6 @@ pub enum TryFromGridError { /// Error if the order of the grid was not a single one with all zeros in the exponents. #[error("multiple orders detected")] NonTrivialOrder, - /// Error if the certain metadata is missing. - #[error("metadata is missing: expected key `{0}` to have a value")] - MetadataMissing(String), } /// The optimization assumptions for an [`FkTable`], needed for [`FkTable::optimize`]. Since FK @@ -412,20 +409,6 @@ impl TryFrom for FkTable { return Err(TryFromGridError::InvalidChannel); } - if let Some(key_values) = grid.key_values() { - let keys = vec!["lumi_id_types".to_owned()]; - - for key in keys { - if !key_values.contains_key(&key) { - return Err(TryFromGridError::MetadataMissing(key)); - } - } - } else { - return Err(TryFromGridError::MetadataMissing( - "initial_states_1".to_owned(), - )); - } - Ok(Self { grid }) } } From 08ab22120472b9cb05b0c2795ae9e66c234e7832 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 11:56:06 +0200 Subject: [PATCH 127/179] Add item to `CHANGELOG.md` missing from commit 1254a5b --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27a00605e..2ffcac3de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - renamed the macro `lumi_entry` to `channel` - renamed `Grid::set_channels` to `Grid::channels_mut` - renamed `TryFromGridError::InvalidLumi` to `TryFromGridError::InvalidChannel` +- changed member `lumi_id_types` of `OperatorInfo` and `OperatorSliceInfo` to + `pid_basis`, which is now of type `PidBasis` ### Removed From fd856cb227ad0b7ae4f7803755bb6e09269f1b8a Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 12:41:00 +0200 Subject: [PATCH 128/179] Test deprecated CAPI functions --- examples/cpp/Makefile | 4 ++ examples/cpp/deprecated.cpp | 127 ++++++++++++++++++++++++++++++++++++ examples/cpp/output | 26 ++++++++ 3 files changed, 157 insertions(+) create mode 100644 examples/cpp/deprecated.cpp diff --git a/examples/cpp/Makefile b/examples/cpp/Makefile index e2786a32e..61866d496 100644 --- a/examples/cpp/Makefile +++ b/examples/cpp/Makefile @@ -9,6 +9,7 @@ PROGRAMS = \ advanced-convolution \ advanced-filling \ convolve-grid \ + deprecated \ display-channels \ display-orders \ merge-grids \ @@ -28,6 +29,9 @@ advanced-filling: advanced-filling.cpp convolve-grid: convolve-grid.cpp $(CXX) $(CXXFLAGS) $< $(LHAPDF_DEPS) $(PINEAPPL_DEPS) -o $@ +deprecated: deprecated.cpp + $(CXX) $(CXXFLAGS) $< $(LHAPDF_DEPS) $(PINEAPPL_DEPS) -o $@ + display-channels: display-channels.cpp $(CXX) $(CXXFLAGS) $< $(PINEAPPL_DEPS) -o $@ diff --git a/examples/cpp/deprecated.cpp b/examples/cpp/deprecated.cpp new file mode 100644 index 000000000..552cf4990 --- /dev/null +++ b/examples/cpp/deprecated.cpp @@ -0,0 +1,127 @@ +#include +#include + +#include +#include +#include +#include +#include +#include + +int main(int argc, char* argv[]) { + std::string filename = "drell-yan-rap-ll.pineappl.lz4"; + std::string pdfset = "NNPDF31_nlo_as_0118_luxqed"; + + switch (argc) { + case 3: + pdfset = argv[2]; + // fall through + case 2: + filename = argv[1]; + case 1: + break; + + default: + std::cout << "Usage: " << argv[0] << " [grid] [pdf]\n"; + } + + // disable LHAPDF banners to guarantee deterministic output + LHAPDF::setVerbosity(0); + + // read the grid from a file + auto* grid = pineappl_grid_read(filename.c_str()); + + auto* pdf = LHAPDF::mkPDF(pdfset, 0); + + // define callables for the PDFs and alphas + auto xfx1 = [](int32_t id, double x, double q2, void* pdf) { + return static_cast (pdf)->xfxQ2(id, x, q2); + }; + auto xfx2 = [](int32_t id, double x, double q2, void* pdf) { + return static_cast (pdf)->xfxQ2(id, x, q2); + }; + auto alphas = [](double q2, void* pdf) { + return static_cast (pdf)->alphasQ2(q2); + }; + + // how many perturbative orders does the grid contain? + std::size_t orders = pineappl_grid_order_count(grid); + + // how many bins does this grid have? + std::size_t bins = pineappl_grid_bin_count(grid); + + auto* lumi = pineappl_grid_lumi(grid); + + // how many channels does the grid have? + std::size_t channels = pineappl_lumi_count(lumi); + + pineappl_lumi_delete(lumi); + + // std::vector doesn't have `.data()` member + std::unique_ptr order_mask(new bool[orders]()); + + // allocate a vector holding the differential cross sections + std::vector dxsec1(bins); + + std::unique_ptr channel_mask(new bool[channels]()); + + // use the variables to select the included orders and channels + order_mask[0] = true; + channel_mask[0] = true; + + // use these variables to perform scale variations + double xir = 1.0; + double xif = 1.0; + + // use `pineappl_grid_convolve_with_one` instead + pineappl_grid_convolute_with_one(grid, 2212, xfx1, alphas, pdf, order_mask.get(), + channel_mask.get(), xir, xif, dxsec1.data()); + + // how does the grid know which PDFs it must be convolved with? This is determined by the + // metadata keys `initial_state_1` and `initial_state_2`, which are by default set to `2212`, + // the PDG MC ID for the proton. Let's change the second value to an antiproton: + pineappl_grid_set_key_value(grid, "initial_state_2", "-2212"); + + std::vector dxsec2(bins); + + // use `pineappl_grid_convolve_with_one` instead + pineappl_grid_convolute_with_one(grid, 2212, xfx1, alphas, pdf, order_mask.get(), + channel_mask.get(), xir, xif, dxsec2.data()); + + // what if we have a collision where we actually need two PDFs? Let's simulate the collision of + // protons with deuterons: + pineappl_grid_set_key_value(grid, "initial_state_2", "1000010020"); // 1000010020 = deuteron + + std::vector dxsec3(bins); + + // use `pineappl_grid_convolve_with_two` instead + pineappl_grid_convolute_with_two(grid, 2212, xfx1, 1000010020, xfx2, alphas, pdf, + order_mask.get(), channel_mask.get(), xir, xif, dxsec3.data()); + + std::vector dxsec4(bins); + + // use `pineappl_grid_convolve_with_two` instead + pineappl_grid_convolve_with_two(grid, 2212, xfx1, 1000010020, xfx2, alphas, pdf, nullptr, + nullptr, xir, xif, dxsec4.data()); + + std::vector normalizations(bins); + + // read out the bin normalizations, which is usually the size of each bin + pineappl_grid_bin_normalizations(grid, normalizations.data()); + + // print table header + std::cout << "idx p-p c#0 l#0 p-p~ c#0 l# p-d c#0 l#0 p-d dx\n" + "--- ------------ ----------- ------------- ------------ ------\n"; + + for (std::size_t bin = 0; bin != bins; ++bin) { + // print the bin index + std::cout << std::setw(3) << bin << ' '; + + // print the result together with the normalization + std::cout << std::scientific << dxsec1.at(bin) << ' ' << dxsec2.at(bin) << ' ' + << dxsec3.at(bin) << ' ' << dxsec4.at(bin) << ' ' << std::defaultfloat << std::setw(6) + << normalizations.at(bin) << '\n'; + } + + pineappl_grid_delete(grid); +} diff --git a/examples/cpp/output b/examples/cpp/output index 25f01afcf..dd0c7936b 100644 --- a/examples/cpp/output +++ b/examples/cpp/output @@ -62,6 +62,32 @@ idx left right dsig/dx dx 21 2.1 2.2 3.491788e-02 0.1 22 2.2 2.3 1.967518e-02 0.1 23 2.3 2.4 5.565306e-03 0.1 +idx p-p c#0 l#0 p-p~ c#0 l# p-d c#0 l#0 p-d dx +--- ------------ ----------- ------------- ------------ ------ + 0 5.263109e-01 5.263109e-01 5.263109e-01 5.263109e-01 0.1 + 1 5.254908e-01 5.254908e-01 5.254908e-01 5.254908e-01 0.1 + 2 5.246824e-01 5.246824e-01 5.246824e-01 5.246824e-01 0.1 + 3 5.188340e-01 5.188340e-01 5.188340e-01 5.188340e-01 0.1 + 4 5.175482e-01 5.175482e-01 5.175482e-01 5.175482e-01 0.1 + 5 5.008841e-01 5.008841e-01 5.008841e-01 5.008841e-01 0.1 + 6 4.905325e-01 4.905325e-01 4.905325e-01 4.905325e-01 0.1 + 7 4.675734e-01 4.675734e-01 4.675734e-01 4.675734e-01 0.1 + 8 4.393159e-01 4.393159e-01 4.393159e-01 4.393159e-01 0.1 + 9 3.992921e-01 3.992921e-01 3.992921e-01 3.992921e-01 0.1 + 10 3.706801e-01 3.706801e-01 3.706801e-01 3.706801e-01 0.1 + 11 3.264717e-01 3.264717e-01 3.264717e-01 3.264717e-01 0.1 + 12 2.849345e-01 2.849345e-01 2.849345e-01 2.849345e-01 0.1 + 13 2.486723e-01 2.486723e-01 2.486723e-01 2.486723e-01 0.1 + 14 2.110419e-01 2.110419e-01 2.110419e-01 2.110419e-01 0.1 + 15 1.797439e-01 1.797439e-01 1.797439e-01 1.797439e-01 0.1 + 16 1.471492e-01 1.471492e-01 1.471492e-01 1.471492e-01 0.1 + 17 1.205566e-01 1.205566e-01 1.205566e-01 1.205566e-01 0.1 + 18 9.491625e-02 9.491625e-02 9.491625e-02 9.491625e-02 0.1 + 19 7.255720e-02 7.255720e-02 7.255720e-02 7.255720e-02 0.1 + 20 5.056967e-02 5.056967e-02 5.056967e-02 5.056967e-02 0.1 + 21 3.491788e-02 3.491788e-02 3.491788e-02 3.491788e-02 0.1 + 22 1.967518e-02 1.967518e-02 1.967518e-02 1.967518e-02 0.1 + 23 5.565306e-03 5.565306e-03 5.565306e-03 5.565306e-03 0.1 0 1 x ( 22, 22) 1 1 x ( 1, -1) + 1 x ( 3, -3) + 1 x ( 5, -5) 0 O(as^0 a^2 lr^0 lf^0) From 6c0acce3cbdac765dcdcab997cfaa30e189696be Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 12:54:47 +0200 Subject: [PATCH 129/179] Fix some clippy warnings --- pineappl/src/evolution.rs | 6 +++--- pineappl/src/grid.rs | 27 +++++++++++++-------------- pineappl/src/lumi.rs | 2 +- pineappl/src/pids.rs | 11 ++++++----- pineappl_cli/src/write.rs | 4 ++-- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index 689f6b444..dd60e06c0 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -497,10 +497,10 @@ pub(crate) fn evolve_slice_with_one( })); } - let pid = if grid.convolutions()[0] != Convolution::None { - grid.channels()[0].entry()[0].1 - } else { + let pid = if grid.convolutions()[0] == Convolution::None { grid.channels()[0].entry()[0].0 + } else { + grid.channels()[0].entry()[0].1 }; Ok(( diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index ab8feddf7..b29f6d67b 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -244,26 +244,25 @@ pub enum Convolution { impl Convolution { /// Return the convolution if the PID is charged conjugated. - pub fn cc(&self) -> Convolution { + #[must_use] + pub const fn cc(&self) -> Self { match *self { - Convolution::None => Convolution::None, - Convolution::UnpolPDF(pid) => { - Convolution::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)) - } - Convolution::PolPDF(pid) => Convolution::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), - Convolution::UnpolFF(pid) => Convolution::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), - Convolution::PolFF(pid) => Convolution::PolFF(pids::charge_conjugate_pdg_pid(pid)), + Self::None => Self::None, + Self::UnpolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), + Self::PolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), + Self::UnpolFF(pid) => Self::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), + Self::PolFF(pid) => Self::PolFF(pids::charge_conjugate_pdg_pid(pid)), } } /// Return the PID of the convolution if it has any. - pub fn pid(&self) -> Option { + #[must_use] + pub const fn pid(&self) -> Option { match *self { - Convolution::None => None, - Convolution::UnpolPDF(pid) - | Convolution::PolPDF(pid) - | Convolution::UnpolFF(pid) - | Convolution::PolFF(pid) => Some(pid), + Self::None => None, + Self::UnpolPDF(pid) | Self::PolPDF(pid) | Self::UnpolFF(pid) | Self::PolFF(pid) => { + Some(pid) + } } } } diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 9b15f44ab..791d3a6d9 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -279,7 +279,7 @@ impl<'a> LumiCache<'a> { /// Return the strong coupling for the renormalization scale set with [`LumiCache::set_grids`], /// in the grid `mu2_grid` at the index `imu2`. - pub fn alphas(&mut self, imu2: usize) -> f64 { + pub fn alphas(&self, imu2: usize) -> f64 { self.alphas_cache[self.imur2[imu2]] } diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index b02189b7c..8e23eaa49 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -37,18 +37,19 @@ impl FromStr for PidBasis { impl PidBasis { /// Return the charge-conjugated particle ID of `pid` given in the basis of `self`. The /// returned tuple contains a factor that possibly arises during the charge conjugation. - pub fn charge_conjugate(&self, pid: i32) -> (i32, f64) { + #[must_use] + pub const fn charge_conjugate(&self, pid: i32) -> (i32, f64) { match (*self, pid) { // TODO: in the general case we should allow to return a vector of tuples (Self::Evol, 100 | 103 | 108 | 115 | 124 | 135) => (pid, 1.0), (Self::Evol, 200 | 203 | 208 | 215 | 224 | 235) => (pid, -1.0), - (Self::Evol, _) | (Self::Pdg, _) => (charge_conjugate_pdg_pid(pid), 1.0), + (Self::Evol | Self::Pdg, _) => (charge_conjugate_pdg_pid(pid), 1.0), } } /// Given the particle IDs in `pids`, guess the [`PidBasis`]. #[must_use] - pub fn guess(pids: &[i32]) -> PidBasis { + pub fn guess(pids: &[i32]) -> Self { // if we find more than 3 pids that are recognized to be from the evolution basis, declare // it to be the evolution basis (that's a heuristic), otherwise PDG MC IDs if pids @@ -57,9 +58,9 @@ impl PidBasis { .count() > 3 { - PidBasis::Evol + Self::Evol } else { - PidBasis::Pdg + Self::Pdg } } } diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 9b7314dd0..449078de6 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -523,10 +523,10 @@ impl Subcommand for Opts { } if cc1 { - grid.set_convolution(0, grid.convolutions()[0].cc()) + grid.set_convolution(0, grid.convolutions()[0].cc()); } if cc2 { - grid.set_convolution(1, grid.convolutions()[1].cc()) + grid.set_convolution(1, grid.convolutions()[1].cc()); } } OpsArg::DedupChannels(ulps) => { From 9d65edf573602287d318f03798b02cab40266322 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 12:55:09 +0200 Subject: [PATCH 130/179] Fix bug in `Convolution::cc` --- pineappl/src/grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index b29f6d67b..fd70c7002 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -249,7 +249,7 @@ impl Convolution { match *self { Self::None => Self::None, Self::UnpolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), - Self::PolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), + Self::PolPDF(pid) => Self::PolPDF(pids::charge_conjugate_pdg_pid(pid)), Self::UnpolFF(pid) => Self::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), Self::PolFF(pid) => Self::PolFF(pids::charge_conjugate_pdg_pid(pid)), } From cca611189eb420bb0bfe26b96e3b6bbdec0c5a11 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 2 Jun 2024 12:56:11 +0200 Subject: [PATCH 131/179] Add missing documention to `Grid::set_pid_basis` --- pineappl/src/grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index fd70c7002..23e9ac327 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -349,7 +349,7 @@ impl Grid { PidBasis::Pdg } - /// TODO + /// Set the convention by which PIDs of channels are interpreted. pub fn set_pid_basis(&mut self, pid_basis: PidBasis) { match pid_basis { PidBasis::Pdg => self.set_key_value("lumi_id_types", "pdg_mc_ids"), From 17af4ec662980256d825a231b62acb7f941d06bb Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 5 Jun 2024 10:47:34 +0200 Subject: [PATCH 132/179] Replace `subgrid` and `set_subgrid` with `subgrids` and `subgrids_mut` --- CHANGELOG.md | 3 ++ pineappl/src/fk_table.rs | 80 +++++++++++++---------------- pineappl/src/grid.rs | 20 +++----- pineappl/tests/drell_yan_lo.rs | 4 +- pineappl_cli/src/import/applgrid.rs | 8 +-- pineappl_cli/src/import/fastnlo.rs | 42 ++++++++------- pineappl_cli/src/import/fktable.rs | 53 +++++++++---------- pineappl_cli/src/plot.rs | 2 +- pineappl_py/src/grid.rs | 5 +- 9 files changed, 100 insertions(+), 117 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ffcac3de..c47261988 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 similarly named with `convolute` in CAPI - added `PidBasis::charge_conjugate` and `PidBasis::guess` - added `Grid::set_pid_basis` method +- added `Grid::subgrids` and `Grid::subgrids_mut` methods ### Changed @@ -49,6 +50,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - removed `pids::determine_lumi_id_types`; this function has been replaced with the new function `PidBasis::guess` - removed `TryFromGridError::MetadataMissing` +- removed `Grid::subgrid` and `Grid::set_subgrid` methods; these functions have + been replaced with `Grid::subgrids` and `Grid::subgrids_mut` ## [0.7.4] - 23/05/2024 diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index e48d37bd2..5370a67b4 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -158,34 +158,30 @@ impl FkTable { if has_pdf2 { x_grid.len() } else { 1 }, )); - for bin in 0..self.bins() { - for channel in 0..self.grid.channels().len() { - let subgrid = self.grid().subgrid(0, bin, channel); - - let indices1 = if has_pdf1 { - subgrid - .x1_grid() - .iter() - .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) - .collect::>() - .unwrap() - } else { - vec![0] - }; - let indices2 = if has_pdf2 { - subgrid - .x2_grid() - .iter() - .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) - .collect::>() - .unwrap() - } else { - vec![0] - }; - - for ((_, ix1, ix2), value) in subgrid.indexed_iter() { - result[[bin, channel, indices1[ix1], indices2[ix2]]] = value; - } + for ((_, bin, channel), subgrid) in self.grid().subgrids().indexed_iter() { + let indices1 = if has_pdf1 { + subgrid + .x1_grid() + .iter() + .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) + .collect::>() + .unwrap() + } else { + vec![0] + }; + let indices2 = if has_pdf2 { + subgrid + .x2_grid() + .iter() + .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) + .collect::>() + .unwrap() + } else { + vec![0] + }; + + for ((_, ix1, ix2), value) in subgrid.indexed_iter() { + result[[bin, channel, indices1[ix1], indices2[ix2]]] = value; } } @@ -373,25 +369,21 @@ impl TryFrom for FkTable { return Err(TryFromGridError::NonTrivialOrder); } - for bin in 0..grid.bin_info().bins() { - for channel in 0..grid.channels().len() { - let subgrid = grid.subgrid(0, bin, channel); - - if subgrid.is_empty() { - continue; - } + for subgrid in grid.subgrids() { + if subgrid.is_empty() { + continue; + } - let mu2_grid = subgrid.mu2_grid(); + let mu2_grid = subgrid.mu2_grid(); - if mu2_grid.len() > 1 { - return Err(TryFromGridError::MultipleScales); - } + if mu2_grid.len() > 1 { + return Err(TryFromGridError::MultipleScales); + } - if muf2 < 0.0 { - muf2 = mu2_grid[0].fac; - } else if muf2 != mu2_grid[0].fac { - return Err(TryFromGridError::MultipleScales); - } + if muf2 < 0.0 { + muf2 = mu2_grid[0].fac; + } else if muf2 != mu2_grid[0].fac { + return Err(TryFromGridError::MultipleScales); } } diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 23e9ac327..f7301ddce 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -15,7 +15,7 @@ use bitflags::bitflags; use float_cmp::approx_eq; use git_version::git_version; use lz4_flex::frame::{FrameDecoder, FrameEncoder}; -use ndarray::{s, Array3, ArrayView5, Axis, CowArray, Dimension, Ix4}; +use ndarray::{s, Array3, ArrayView3, ArrayView5, ArrayViewMut3, Axis, CowArray, Dimension, Ix4}; use serde::{Deserialize, Serialize, Serializer}; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; @@ -982,22 +982,16 @@ impl Grid { &mut self.channels } - /// Returns the subgrid with the specified indices `order`, `bin`, and `channel`. + /// Return all subgrids as an `ArrayView3`. #[must_use] - pub fn subgrid(&self, order: usize, bin: usize, channel: usize) -> &SubgridEnum { - &self.subgrids[[order, bin, channel]] + pub fn subgrids(&self) -> ArrayView3 { + self.subgrids.view() } - /// Returns all subgrids as an `Array3`. + /// Return all subgrids as an `ArrayViewMut3`. #[must_use] - pub const fn subgrids(&self) -> &Array3 { - &self.subgrids - } - - /// Replaces the subgrid for the specified indices `order`, `bin`, and `channel` with - /// `subgrid`. - pub fn set_subgrid(&mut self, order: usize, bin: usize, channel: usize, subgrid: SubgridEnum) { - self.subgrids[[order, bin, channel]] = subgrid; + pub fn subgrids_mut(&mut self) -> ArrayViewMut3 { + self.subgrids.view_mut() } /// Sets a remapper. A remapper can change the dimensions and limits of each bin in this grid. diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index 6e28bfed9..8ba8dc505 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -372,8 +372,8 @@ fn perform_grid_tests( // TEST 7b: `optimize` grid.optimize(); - assert_eq!(grid.subgrid(0, 0, 0).x1_grid().as_ref(), x_grid); - assert_eq!(grid.subgrid(0, 0, 0).x2_grid().as_ref(), x_grid); + assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().as_ref(), x_grid); + assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().as_ref(), x_grid); // TEST 8: `convolve_subgrid` for the optimized subgrids let bins: Vec<_> = (0..grid.bin_info().bins()) diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index cc34973ed..356fd2e24 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -190,18 +190,14 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul } if !array.is_empty() { - pgrid.set_subgrid( - 0, - bin.try_into().unwrap(), - lumi, + pgrid.subgrids_mut()[[0, bin.try_into().unwrap(), lumi]] = ImportOnlySubgridV2::new( array, mu2_values.clone(), x1_values.clone(), x2_values.clone(), ) - .into(), - ); + .into(); } } } diff --git a/pineappl_cli/src/import/fastnlo.rs b/pineappl_cli/src/import/fastnlo.rs index f658cc0bc..d0c066495 100644 --- a/pineappl_cli/src/import/fastnlo.rs +++ b/pineappl_cli/src/import/fastnlo.rs @@ -1,5 +1,6 @@ use anyhow::Result; use itertools::Itertools; +use ndarray::s; use pineappl::bin::BinRemapper; use pineappl::boc::{Channel, Order}; use pineappl::grid::{Convolution, Grid}; @@ -201,18 +202,15 @@ fn convert_coeff_add_fix( } if !array.is_empty() { - grid.set_subgrid( - 0, - obs.try_into().unwrap(), - subproc.try_into().unwrap(), + grid.subgrids_mut() + [[0, obs.try_into().unwrap(), subproc.try_into().unwrap()]] = ImportOnlySubgridV2::new( array, mu2_values, x1_values.clone(), x2_values.clone(), ) - .into(), - ); + .into(); } } } @@ -361,23 +359,23 @@ fn convert_coeff_add_flex( } } - for (order, array) in arrays - .into_iter() - .enumerate() - .filter(|(_, array)| !array.is_empty()) + for (subgrid, array) in grid + .subgrids_mut() + .slice_mut(s![.., obs, usize::try_from(subproc).unwrap()]) + .iter_mut() + .zip(arrays.into_iter()) { - grid.set_subgrid( - order, - obs, - subproc.try_into().unwrap(), - ImportOnlySubgridV2::new( - array, - mu2_values.clone(), - x1_values.clone(), - x2_values.clone(), - ) - .into(), - ); + if array.is_empty() { + continue; + } + + *subgrid = ImportOnlySubgridV2::new( + array, + mu2_values.clone(), + x1_values.clone(), + x2_values.clone(), + ) + .into(); } } } diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index 375874aa2..23d3a1e70 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -1,5 +1,6 @@ use anyhow::{anyhow, Context, Result}; use flate2::read::GzDecoder; +use ndarray::s; use pineappl::boc::Order; use pineappl::channel; use pineappl::grid::{Convolution, Grid}; @@ -178,19 +179,19 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { if bin > last_bin { let grid = grid.as_mut().unwrap(); - for (lumi, array) in arrays.into_iter().enumerate() { - grid.set_subgrid( - 0, - last_bin, - lumi, - ImportOnlySubgridV1::new( - array, - vec![q0 * q0], - x_grid.clone(), - if hadronic { x_grid.clone() } else { vec![1.0] }, - ) - .into(), - ); + for (subgrid, array) in grid + .subgrids_mut() + .slice_mut(s![0, last_bin, ..]) + .iter_mut() + .zip(arrays.into_iter()) + { + *subgrid = ImportOnlySubgridV1::new( + array, + vec![q0 * q0], + x_grid.clone(), + if hadronic { x_grid.clone() } else { vec![1.0] }, + ) + .into(); } arrays = iter::repeat(SparseArray3::new(1, nx1, nx2)) @@ -234,19 +235,19 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { let mut grid = grid.unwrap(); - for (lumi, array) in arrays.into_iter().enumerate() { - grid.set_subgrid( - 0, - last_bin, - lumi, - ImportOnlySubgridV1::new( - array, - vec![q0 * q0], - x_grid.clone(), - if hadronic { x_grid.clone() } else { vec![1.0] }, - ) - .into(), - ); + for (subgrid, array) in grid + .subgrids_mut() + .slice_mut(s![0, last_bin, ..]) + .iter_mut() + .zip(arrays.into_iter()) + { + *subgrid = ImportOnlySubgridV1::new( + array, + vec![q0 * q0], + x_grid.clone(), + if hadronic { x_grid.clone() } else { vec![1.0] }, + ) + .into(); } Ok(grid) diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index e9de5fcfd..53c047bbe 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -583,7 +583,7 @@ impl Subcommand for Opts { let res2 = helpers::convolve_subgrid(&grid, &mut pdfset2[0], order, bin, channel, cfg) .sum_axis(Axis(0)); - let subgrid = grid.subgrid(order, bin, channel); + let subgrid = &grid.subgrids()[[order, bin, channel]]; //let q2 = subgrid.q2_grid(); let x1 = subgrid.x1_grid(); let x2 = subgrid.x2_grid(); diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 5dc255fe7..40b121f90 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -271,7 +271,7 @@ impl PyGrid { /// **Usage:** `yadism` pub fn subgrid(&self, order: usize, bin: usize, lumi: usize) -> PySubgridEnum { PySubgridEnum { - subgrid_enum: self.grid.subgrid(order, bin, lumi).clone(), + subgrid_enum: self.grid.subgrids()[[order, bin, lumi]].clone(), } } @@ -279,8 +279,7 @@ impl PyGrid { /// /// **Usage:** `yadism` pub fn set_subgrid(&mut self, order: usize, bin: usize, lumi: usize, subgrid: PySubgridEnum) { - self.grid - .set_subgrid(order, bin, lumi, subgrid.subgrid_enum); + self.grid.subgrids_mut()[[order, bin, lumi]] = subgrid.subgrid_enum; } /// Set the normalizations. From d1b18f03d0ba14cd755c93ca05c505c561703e7f Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 5 Jun 2024 11:24:23 +0200 Subject: [PATCH 133/179] Rename `lumi` module to `convolutions` --- CHANGELOG.md | 1 + pineappl/src/{lumi.rs => convolutions.rs} | 0 pineappl/src/fk_table.rs | 2 +- pineappl/src/grid.rs | 2 +- pineappl/src/lib.rs | 2 +- pineappl/tests/drell_yan_lo.rs | 2 +- pineappl_capi/src/lib.rs | 2 +- pineappl_cli/src/helpers.rs | 2 +- pineappl_cli/tests/import.rs | 2 +- pineappl_py/src/fk_table.rs | 2 +- pineappl_py/src/grid.rs | 3 +-- 11 files changed, 10 insertions(+), 10 deletions(-) rename pineappl/src/{lumi.rs => convolutions.rs} (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index c47261988..46ac89044 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - renamed `TryFromGridError::InvalidLumi` to `TryFromGridError::InvalidChannel` - changed member `lumi_id_types` of `OperatorInfo` and `OperatorSliceInfo` to `pid_basis`, which is now of type `PidBasis` +- renamed module `pineappl::lumi` to `pineappl::convolutions` ### Removed diff --git a/pineappl/src/lumi.rs b/pineappl/src/convolutions.rs similarity index 100% rename from pineappl/src/lumi.rs rename to pineappl/src/convolutions.rs diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 5370a67b4..8c2a466cb 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -1,8 +1,8 @@ //! Provides the [`FkTable`] type. use super::boc::Order; +use super::convolutions::LumiCache; use super::grid::{Convolution, Grid, GridError}; -use super::lumi::LumiCache; use super::subgrid::Subgrid; use float_cmp::approx_eq; use ndarray::Array4; diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index f7301ddce..9065cff81 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -2,12 +2,12 @@ use super::bin::{BinInfo, BinLimits, BinRemapper}; use super::boc::{Channel, Order}; +use super::convolutions::LumiCache; use super::empty_subgrid::EmptySubgridV1; use super::evolution::{self, AlphasTable, EvolveInfo, OperatorInfo, OperatorSliceInfo}; use super::fk_table::FkTable; use super::import_only_subgrid::ImportOnlySubgridV2; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; -use super::lumi::LumiCache; use super::ntuple_subgrid::NtupleSubgridV1; use super::pids::{self, PidBasis}; use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index 8792fc691..5a0387999 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -37,13 +37,13 @@ mod convert; pub mod bin; pub mod boc; +pub mod convolutions; pub mod empty_subgrid; pub mod evolution; pub mod fk_table; pub mod grid; pub mod import_only_subgrid; pub mod lagrange_subgrid; -pub mod lumi; pub mod ntuple_subgrid; pub mod packed_array; pub mod pids; diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index 8ba8dc505..0b53bc2b0 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -5,8 +5,8 @@ use num_complex::Complex; use pineappl::bin::BinRemapper; use pineappl::boc::Order; use pineappl::channel; +use pineappl::convolutions::LumiCache; use pineappl::grid::{Grid, GridOptFlags, Ntuple}; -use pineappl::lumi::LumiCache; use pineappl::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; use rand::Rng; use rand_pcg::Pcg64; diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 5132b9384..730303ae3 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -58,8 +58,8 @@ use itertools::izip; use pineappl::bin::BinRemapper; use pineappl::boc::{Channel, Order}; +use pineappl::convolutions::LumiCache; use pineappl::grid::{Grid, GridOptFlags, Ntuple}; -use pineappl::lumi::LumiCache; use pineappl::subgrid::{ExtraSubgridParams, SubgridParams}; use std::collections::HashMap; use std::ffi::{CStr, CString}; diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 9d5713719..7947da344 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -2,8 +2,8 @@ use super::GlobalConfiguration; use anyhow::{ensure, Context, Result}; use lhapdf::{Pdf, PdfSet}; use ndarray::Array3; +use pineappl::convolutions::LumiCache; use pineappl::grid::Grid; -use pineappl::lumi::LumiCache; use prettytable::format::{FormatBuilder, LinePosition, LineSeparator}; use prettytable::Table; use std::fs::{File, OpenOptions}; diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index 5bf26923e..8aea1a335 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -836,9 +836,9 @@ fn import_dis_fktable() { fn import_hadronic_fktable() { use float_cmp::assert_approx_eq; use lhapdf::Pdf; + use pineappl::convolutions::LumiCache; use pineappl::fk_table::{FkAssumptions, FkTable}; use pineappl::grid::{Convolution, Grid}; - use pineappl::lumi::LumiCache; use std::fs::File; let output = NamedTempFile::new("converted4.pineappl.lz4").unwrap(); diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 60abdbe83..9349f1d2a 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -1,6 +1,6 @@ +use pineappl::convolutions::LumiCache; use pineappl::fk_table::{FkAssumptions, FkTable}; use pineappl::grid::Grid; -use pineappl::lumi::LumiCache; use numpy::{IntoPyArray, PyArray1, PyArray4, PyReadonlyArray1}; use pyo3::prelude::*; diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 40b121f90..78e74e81a 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,9 +1,8 @@ use pineappl::boc::Order; +use pineappl::convolutions::LumiCache; use pineappl::evolution::OperatorInfo; use pineappl::grid::{Grid, Ntuple}; -use pineappl::lumi::LumiCache; - use super::bin::PyBinRemapper; use super::evolution::PyEvolveInfo; use super::fk_table::PyFkTable; From cb005792200c7c030e344c3b60b768c4c96a8532 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 6 Jun 2024 10:05:54 +0200 Subject: [PATCH 134/179] Move `Convolution` to different module and test it --- CHANGELOG.md | 1 + pineappl/src/convolutions.rs | 70 +++++++++++++++++++++++++++++ pineappl/src/evolution.rs | 3 +- pineappl/src/fk_table.rs | 4 +- pineappl/src/grid.rs | 46 +------------------ pineappl_cli/src/export/applgrid.rs | 3 +- pineappl_cli/src/import/applgrid.rs | 3 +- pineappl_cli/src/import/fastnlo.rs | 3 +- pineappl_cli/src/import/fktable.rs | 3 +- pineappl_cli/src/plot.rs | 2 +- pineappl_cli/tests/import.rs | 3 +- 11 files changed, 87 insertions(+), 54 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46ac89044..c24e4a4bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- added new type `Convolution` - added new methods `Grid::convolutions` and `Grid::set_convolution` - added the function `pineappl_grid_convolve_with_one` and `pineappl_grid_convolve_with_two` which replace the deprecated function diff --git a/pineappl/src/convolutions.rs b/pineappl/src/convolutions.rs index 791d3a6d9..705c1b685 100644 --- a/pineappl/src/convolutions.rs +++ b/pineappl/src/convolutions.rs @@ -340,3 +340,73 @@ impl<'a> LumiCache<'a> { .collect(); } } + +/// Data type that indentifies different types of convolutions. +#[derive(Debug, Eq, PartialEq)] +pub enum Convolution { + // TODO: eventually get rid of this value + /// No convolution. + None, + /// Unpolarized parton distribution function. The integer denotes the type of hadron with a PDG + /// MC ID. + UnpolPDF(i32), + /// Polarized parton distribution function. The integer denotes the type of hadron with a PDG + /// MC ID. + PolPDF(i32), + /// Unpolarized fragmentation function. The integer denotes the type of hadron with a PDG MC + /// ID. + UnpolFF(i32), + /// Polarized fragmentation function. The integer denotes the type of hadron with a PDG MC ID. + PolFF(i32), +} + +impl Convolution { + /// Return the convolution if the PID is charged conjugated. + #[must_use] + pub const fn cc(&self) -> Self { + match *self { + Self::None => Self::None, + Self::UnpolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), + Self::PolPDF(pid) => Self::PolPDF(pids::charge_conjugate_pdg_pid(pid)), + Self::UnpolFF(pid) => Self::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), + Self::PolFF(pid) => Self::PolFF(pids::charge_conjugate_pdg_pid(pid)), + } + } + + /// Return the PID of the convolution if it has any. + #[must_use] + pub const fn pid(&self) -> Option { + match *self { + Self::None => None, + Self::UnpolPDF(pid) | Self::PolPDF(pid) | Self::UnpolFF(pid) | Self::PolFF(pid) => { + Some(pid) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn convolution_cc() { + assert_eq!(Convolution::None.cc(), Convolution::None); + assert_eq!( + Convolution::UnpolPDF(2212).cc(), + Convolution::UnpolPDF(-2212) + ); + assert_eq!(Convolution::PolPDF(2212).cc(), Convolution::PolPDF(-2212)); + assert_eq!(Convolution::UnpolFF(2212).cc(), Convolution::UnpolFF(-2212)); + assert_eq!(Convolution::PolFF(2212).cc(), Convolution::PolFF(-2212)); + } + + #[test] + fn convolution_pid() { + assert_eq!(Convolution::None.pid(), None); + assert_eq!(Convolution::UnpolPDF(2212).pid(), Some(2212)); + assert_eq!(Convolution::PolPDF(2212).pid(), Some(2212)); + assert_eq!(Convolution::UnpolFF(2212).pid(), Some(2212)); + assert_eq!(Convolution::PolFF(2212).pid(), Some(2212)); + } +} diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index dd60e06c0..95667057d 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -2,7 +2,8 @@ use super::boc::{Channel, Order}; use super::channel; -use super::grid::{Convolution, Grid, GridError}; +use super::convolutions::Convolution; +use super::grid::{Grid, GridError}; use super::import_only_subgrid::ImportOnlySubgridV2; use super::pids::PidBasis; use super::sparse_array3::SparseArray3; diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 8c2a466cb..b7d045194 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -1,8 +1,8 @@ //! Provides the [`FkTable`] type. use super::boc::Order; -use super::convolutions::LumiCache; -use super::grid::{Convolution, Grid, GridError}; +use super::convolutions::{Convolution, LumiCache}; +use super::grid::{Grid, GridError}; use super::subgrid::Subgrid; use float_cmp::approx_eq; use ndarray::Array4; diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 9065cff81..5d3c2b17b 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -2,7 +2,7 @@ use super::bin::{BinInfo, BinLimits, BinRemapper}; use super::boc::{Channel, Order}; -use super::convolutions::LumiCache; +use super::convolutions::{Convolution, LumiCache}; use super::empty_subgrid::EmptySubgridV1; use super::evolution::{self, AlphasTable, EvolveInfo, OperatorInfo, OperatorSliceInfo}; use super::fk_table::FkTable; @@ -223,50 +223,6 @@ pub struct Grid { more_members: MoreMembers, } -/// Data type that indentifies different types of convolutions. -#[derive(Debug, Eq, PartialEq)] -pub enum Convolution { - // TODO: eventually get rid of this value - /// No convolution. - None, - /// Unpolarized parton distribution function. The integer denotes the type of hadron with a PDG - /// MC ID. - UnpolPDF(i32), - /// Polarized parton distribution function. The integer denotes the type of hadron with a PDG - /// MC ID. - PolPDF(i32), - /// Unpolarized fragmentation function. The integer denotes the type of hadron with a PDG MC - /// ID. - UnpolFF(i32), - /// Polarized fragmentation function. The integer denotes the type of hadron with a PDG MC ID. - PolFF(i32), -} - -impl Convolution { - /// Return the convolution if the PID is charged conjugated. - #[must_use] - pub const fn cc(&self) -> Self { - match *self { - Self::None => Self::None, - Self::UnpolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), - Self::PolPDF(pid) => Self::PolPDF(pids::charge_conjugate_pdg_pid(pid)), - Self::UnpolFF(pid) => Self::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), - Self::PolFF(pid) => Self::PolFF(pids::charge_conjugate_pdg_pid(pid)), - } - } - - /// Return the PID of the convolution if it has any. - #[must_use] - pub const fn pid(&self) -> Option { - match *self { - Self::None => None, - Self::UnpolPDF(pid) | Self::PolPDF(pid) | Self::UnpolFF(pid) | Self::PolFF(pid) => { - Some(pid) - } - } - } -} - impl Grid { /// Constructor. #[must_use] diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index ec800c56a..869105f08 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -3,7 +3,8 @@ use cxx::{let_cxx_string, UniquePtr}; use float_cmp::approx_eq; use ndarray::{s, Axis}; use pineappl::boc::Order; -use pineappl::grid::{Convolution, Grid}; +use pineappl::convolutions::Convolution; +use pineappl::grid::Grid; use pineappl::subgrid::{Mu2, Subgrid, SubgridParams}; use pineappl_applgrid::ffi::{self, grid}; use std::borrow::Cow; diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 356fd2e24..76baff863 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,6 +1,7 @@ use anyhow::Result; use pineappl::boc::{Channel, Order}; -use pineappl::grid::{Convolution, Grid}; +use pineappl::convolutions::Convolution; +use pineappl::grid::Grid; use pineappl::import_only_subgrid::ImportOnlySubgridV2; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{Mu2, SubgridParams}; diff --git a/pineappl_cli/src/import/fastnlo.rs b/pineappl_cli/src/import/fastnlo.rs index d0c066495..4e79d3f33 100644 --- a/pineappl_cli/src/import/fastnlo.rs +++ b/pineappl_cli/src/import/fastnlo.rs @@ -3,7 +3,8 @@ use itertools::Itertools; use ndarray::s; use pineappl::bin::BinRemapper; use pineappl::boc::{Channel, Order}; -use pineappl::grid::{Convolution, Grid}; +use pineappl::convolutions::Convolution; +use pineappl::grid::Grid; use pineappl::import_only_subgrid::ImportOnlySubgridV2; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{Mu2, SubgridParams}; diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index 23d3a1e70..3fa6f2c28 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -3,7 +3,8 @@ use flate2::read::GzDecoder; use ndarray::s; use pineappl::boc::Order; use pineappl::channel; -use pineappl::grid::{Convolution, Grid}; +use pineappl::convolutions::Convolution; +use pineappl::grid::Grid; use pineappl::import_only_subgrid::ImportOnlySubgridV1; use pineappl::pids::PidBasis; use pineappl::sparse_array3::SparseArray3; diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index 53c047bbe..6e988ef26 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -6,7 +6,7 @@ use clap::{Parser, ValueHint}; use itertools::Itertools; use ndarray::Axis; use pineappl::boc::Channel; -use pineappl::grid::Convolution; +use pineappl::convolutions::Convolution; use pineappl::subgrid::Subgrid; use rayon::{prelude::*, ThreadPoolBuilder}; use std::fmt::Write; diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index 8aea1a335..f5964780c 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -836,9 +836,10 @@ fn import_dis_fktable() { fn import_hadronic_fktable() { use float_cmp::assert_approx_eq; use lhapdf::Pdf; + use pineappl::convolutions::Convolution; use pineappl::convolutions::LumiCache; use pineappl::fk_table::{FkAssumptions, FkTable}; - use pineappl::grid::{Convolution, Grid}; + use pineappl::grid::Grid; use std::fs::File; let output = NamedTempFile::new("converted4.pineappl.lz4").unwrap(); From 70f54f245fc1a411ded11959f237dc3ee78ca600 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 6 Jun 2024 10:30:26 +0200 Subject: [PATCH 135/179] Rename `Convolution::cc` to `Convolution::charge_conjugate` --- pineappl/src/convolutions.rs | 23 ++++++++++++++++------- pineappl_cli/src/write.rs | 4 ++-- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/pineappl/src/convolutions.rs b/pineappl/src/convolutions.rs index 705c1b685..d1335e435 100644 --- a/pineappl/src/convolutions.rs +++ b/pineappl/src/convolutions.rs @@ -363,7 +363,7 @@ pub enum Convolution { impl Convolution { /// Return the convolution if the PID is charged conjugated. #[must_use] - pub const fn cc(&self) -> Self { + pub const fn charge_conjugate(&self) -> Self { match *self { Self::None => Self::None, Self::UnpolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), @@ -390,15 +390,24 @@ mod tests { use super::*; #[test] - fn convolution_cc() { - assert_eq!(Convolution::None.cc(), Convolution::None); + fn convolution_charge_conjugate() { + assert_eq!(Convolution::None.charge_conjugate(), Convolution::None); assert_eq!( - Convolution::UnpolPDF(2212).cc(), + Convolution::UnpolPDF(2212).charge_conjugate(), Convolution::UnpolPDF(-2212) ); - assert_eq!(Convolution::PolPDF(2212).cc(), Convolution::PolPDF(-2212)); - assert_eq!(Convolution::UnpolFF(2212).cc(), Convolution::UnpolFF(-2212)); - assert_eq!(Convolution::PolFF(2212).cc(), Convolution::PolFF(-2212)); + assert_eq!( + Convolution::PolPDF(2212).charge_conjugate(), + Convolution::PolPDF(-2212) + ); + assert_eq!( + Convolution::UnpolFF(2212).charge_conjugate(), + Convolution::UnpolFF(-2212) + ); + assert_eq!( + Convolution::PolFF(2212).charge_conjugate(), + Convolution::PolFF(-2212) + ); } #[test] diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 449078de6..3de51c3bd 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -523,10 +523,10 @@ impl Subcommand for Opts { } if cc1 { - grid.set_convolution(0, grid.convolutions()[0].cc()); + grid.set_convolution(0, grid.convolutions()[0].charge_conjugate()); } if cc2 { - grid.set_convolution(1, grid.convolutions()[1].cc()); + grid.set_convolution(1, grid.convolutions()[1].charge_conjugate()); } } OpsArg::DedupChannels(ulps) => { From a70dd01fcb209bb9ef884824c89b3d1e526e095d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 7 Jun 2024 11:14:34 +0200 Subject: [PATCH 136/179] Document procedure on how to use test data --- CONTRIBUTING.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 02d70f712..6e98a6ecc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -46,6 +46,16 @@ increasing the MSRV make sure to set it everywhere to the same value: this avoids the clippy warning that a Panic section is missing. Also document this with `// UNWRAP: ...` +### Writing tests that need test data + +- if you write a test that needs test data (grids, EKOs, etc.) store them at + . Ask one of the maintainers + to upload the data for you if you don't have access to this location). Then + add a line to `maintainer/generate-coverage.sh` that downloads the data with + `wget` and a similar line to `.github/workflows/rust.yml` that downloads the + data with `curl`. To make Github refresh the cached test data when running + the CI, increase the integer `XX` in the line `key: test-data-vXX` by one. + ## Git - When you commit, make sure the commit message is written properly. This From 31cc7799dccb5887d54ed9cc45e35a8eb57b4d9d Mon Sep 17 00:00:00 2001 From: t7phy Date: Sun, 9 Jun 2024 12:10:00 +0200 Subject: [PATCH 137/179] pdf input to a Vec of strings --- pineappl_cli/src/channels.rs | 4 ++-- pineappl_cli/src/convolve.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index d9841f666..341be9dfa 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -15,8 +15,8 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, /// LHAPDF id or name of the PDF set. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + #[arg(num_args = 1, value_delimiter = ',', value_parser = helpers::parse_pdfset)] + pdfset: Vec, /// Show absolute numbers of each contribution. #[arg(long, short)] absolute: bool, diff --git a/pineappl_cli/src/convolve.rs b/pineappl_cli/src/convolve.rs index 71f0225ec..ef37ca6a4 100644 --- a/pineappl_cli/src/convolve.rs +++ b/pineappl_cli/src/convolve.rs @@ -16,7 +16,7 @@ pub struct Opts { input: PathBuf, /// LHAPDF id(s) or name of the PDF set(s). #[arg(required = true, value_parser = helpers::parse_pdfset)] - pdfsets: Vec, + pdfsets: Vec>, /// Selects a subset of bins. #[arg( long, From 5efc0f1df6166a70c7ac99f33100a2740d51ab8c Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 10 Jun 2024 10:30:48 +0200 Subject: [PATCH 138/179] Undo changes from commit 31cc779 --- pineappl_cli/src/channels.rs | 4 ++-- pineappl_cli/src/convolve.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index 341be9dfa..d9841f666 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -15,8 +15,8 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, /// LHAPDF id or name of the PDF set. - #[arg(num_args = 1, value_delimiter = ',', value_parser = helpers::parse_pdfset)] - pdfset: Vec, + #[arg(value_parser = helpers::parse_pdfset)] + pdfset: String, /// Show absolute numbers of each contribution. #[arg(long, short)] absolute: bool, diff --git a/pineappl_cli/src/convolve.rs b/pineappl_cli/src/convolve.rs index ef37ca6a4..71f0225ec 100644 --- a/pineappl_cli/src/convolve.rs +++ b/pineappl_cli/src/convolve.rs @@ -16,7 +16,7 @@ pub struct Opts { input: PathBuf, /// LHAPDF id(s) or name of the PDF set(s). #[arg(required = true, value_parser = helpers::parse_pdfset)] - pdfsets: Vec>, + pdfsets: Vec, /// Selects a subset of bins. #[arg( long, From 0ec739360bc292af8b5bdfe8e617a3514a58cab4 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 10 Jun 2024 10:25:07 +0200 Subject: [PATCH 139/179] Prepare CLI for multiple convolution functions --- pineappl_cli/src/analyze.rs | 5 ++-- pineappl_cli/src/channels.rs | 3 +- pineappl_cli/src/convolve.rs | 5 ++-- pineappl_cli/src/diff.rs | 9 +++--- pineappl_cli/src/evolve.rs | 5 ++-- pineappl_cli/src/export.rs | 3 +- pineappl_cli/src/helpers.rs | 56 ++++++++++++++++++++---------------- pineappl_cli/src/import.rs | 3 +- pineappl_cli/src/orders.rs | 3 +- pineappl_cli/src/plot.rs | 35 ++++++++++++++++------ pineappl_cli/src/pull.rs | 9 +++--- pineappl_cli/src/uncert.rs | 5 ++-- 12 files changed, 88 insertions(+), 53 deletions(-) diff --git a/pineappl_cli/src/analyze.rs b/pineappl_cli/src/analyze.rs index 506767db3..134556b11 100644 --- a/pineappl_cli/src/analyze.rs +++ b/pineappl_cli/src/analyze.rs @@ -6,6 +6,7 @@ use clap::{value_parser, Parser, ValueHint}; use prettytable::{cell, Row}; use std::path::PathBuf; use std::process::ExitCode; +use std::slice; /// Perform various analyses with grids. #[derive(Parser)] @@ -87,7 +88,7 @@ impl Subcommand for CkfOpts { lumi_mask[lumi] = true; helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &[self.order], &[], &lumi_mask, @@ -103,7 +104,7 @@ impl Subcommand for CkfOpts { lumi_mask[lumi] = true; helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &orders_den, &[], &lumi_mask, diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index d9841f666..4278eb5b6 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -7,6 +7,7 @@ use prettytable::{cell, Row}; use std::ops::RangeInclusive; use std::path::PathBuf; use std::process::ExitCode; +use std::slice; /// Shows the contribution for each partonic channel. #[derive(Parser)] @@ -93,7 +94,7 @@ impl Subcommand for Opts { channel_mask[channel] = true; helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &self.orders, &[], &channel_mask, diff --git a/pineappl_cli/src/convolve.rs b/pineappl_cli/src/convolve.rs index 71f0225ec..91ee8bfe7 100644 --- a/pineappl_cli/src/convolve.rs +++ b/pineappl_cli/src/convolve.rs @@ -6,6 +6,7 @@ use prettytable::{cell, Row}; use std::ops::RangeInclusive; use std::path::PathBuf; use std::process::ExitCode; +use std::slice; /// Convolutes a PineAPPL grid with a PDF set. #[derive(Parser)] @@ -54,7 +55,7 @@ impl Subcommand for Opts { let results = helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &self.orders, &bins, &[], @@ -83,7 +84,7 @@ impl Subcommand for Opts { let mut pdf = helpers::create_pdf(pdfset).unwrap(); helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &self.orders, &bins, &[], diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index 8757c45d6..7e111e867 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -6,6 +6,7 @@ use prettytable::{cell, Row}; use std::collections::HashSet; use std::path::PathBuf; use std::process::ExitCode; +use std::slice; /// Compares the numerical content of two grids with each other. #[derive(Parser)] @@ -149,7 +150,7 @@ impl Subcommand for Opts { let results1 = helpers::convolve( &grid1, - &mut pdf, + slice::from_mut(&mut pdf), &orders1, &[], &[], @@ -159,7 +160,7 @@ impl Subcommand for Opts { ); let results2 = helpers::convolve( &grid2, - &mut pdf, + slice::from_mut(&mut pdf), &orders2, &[], &[], @@ -205,7 +206,7 @@ impl Subcommand for Opts { .map(|&order| { helpers::convolve( &grid1, - &mut pdf, + slice::from_mut(&mut pdf), &[order], &[], &[], @@ -220,7 +221,7 @@ impl Subcommand for Opts { .map(|&order| { helpers::convolve( &grid2, - &mut pdf, + slice::from_mut(&mut pdf), &[order], &[], &[], diff --git a/pineappl_cli/src/evolve.rs b/pineappl_cli/src/evolve.rs index 07cd297f2..71f4e1667 100644 --- a/pineappl_cli/src/evolve.rs +++ b/pineappl_cli/src/evolve.rs @@ -7,6 +7,7 @@ use pineappl::fk_table::FkTable; use pineappl::grid::Grid; use std::path::{Path, PathBuf}; use std::process::ExitCode; +use std::slice; #[cfg(feature = "evolve")] mod eko { @@ -545,7 +546,7 @@ impl Subcommand for Opts { let mut pdf = helpers::create_pdf(&self.pdfset)?; let results = helpers::convolve_scales( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &self.orders, &[], &[], @@ -565,7 +566,7 @@ impl Subcommand for Opts { )?; let evolved_results = helpers::convolve_scales( fk_table.grid(), - &mut pdf, + slice::from_mut(&mut pdf), &[], &[], &[], diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index 7986a6c82..684abb434 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -7,6 +7,7 @@ use pineappl::boc::Order; use pineappl::grid::Grid; use std::path::{Path, PathBuf}; use std::process::ExitCode; +use std::slice; #[cfg(feature = "applgrid")] mod applgrid; @@ -158,7 +159,7 @@ impl Subcommand for Opts { let mut pdf = helpers::create_pdf(&self.pdfset)?; let reference_results = helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &orders, &[], &[], diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 7947da344..6dd8f6c4f 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -127,7 +127,7 @@ pub enum ConvoluteMode { pub fn convolve_scales( grid: &Grid, - lhapdf: &mut Pdf, + conv_funs: &mut [Pdf], orders: &[(u32, u32)], bins: &[usize], channels: &[bool], @@ -146,29 +146,37 @@ pub fn convolve_scales( }) .collect(); - // if the field 'Particle' is missing we assume it's a proton PDF - let pdf_pdg_id = lhapdf - .set() - .entry("Particle") - .map_or(Ok(2212), |string| string.parse::()) - .unwrap(); - - if cfg.force_positive { - lhapdf.set_force_positive(1); - } - - let x_max = lhapdf.x_max(); - let x_min = lhapdf.x_min(); - let mut pdf = |id, x, q2| { - if !cfg.allow_extrapolation && (x < x_min || x > x_max) { - 0.0 - } else { - lhapdf.xfx_q2(id, x, q2) + let mut results = match conv_funs { + [fun] => { + // if the field 'Particle' is missing we assume it's a proton PDF + let pdf_pdg_id = fun + .set() + .entry("Particle") + .map_or(Ok(2212), |string| string.parse::()) + .unwrap(); + + if cfg.force_positive { + fun.set_force_positive(1); + } + + let x_max = fun.x_max(); + let x_min = fun.x_min(); + let mut alphas = |q2| fun.alphas_q2(q2); + let mut fun = |id, x, q2| { + if !cfg.allow_extrapolation && (x < x_min || x > x_max) { + 0.0 + } else { + fun.xfx_q2(id, x, q2) + } + }; + + let mut cache = LumiCache::with_one(pdf_pdg_id, &mut fun, &mut alphas); + + grid.convolve(&mut cache, &orders, bins, channels, scales) } + [_fun0, _fun1] => todo!(), + _ => unimplemented!(), }; - let mut alphas = |q2| lhapdf.alphas_q2(q2); - let mut cache = LumiCache::with_one(pdf_pdg_id, &mut pdf, &mut alphas); - let mut results = grid.convolve(&mut cache, &orders, bins, channels, scales); match mode { ConvoluteMode::Asymmetry => { @@ -212,7 +220,7 @@ pub fn convolve_scales( pub fn convolve( grid: &Grid, - lhapdf: &mut Pdf, + conv_funs: &mut [Pdf], orders: &[(u32, u32)], bins: &[usize], lumis: &[bool], @@ -222,7 +230,7 @@ pub fn convolve( ) -> Vec { convolve_scales( grid, - lhapdf, + conv_funs, orders, bins, lumis, diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index 727bc718b..db1c8c681 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -6,6 +6,7 @@ use clap::{Parser, ValueHint}; use pineappl::grid::Grid; use std::path::{Path, PathBuf}; use std::process::ExitCode; +use std::slice; #[cfg(feature = "applgrid")] mod applgrid; @@ -277,7 +278,7 @@ impl Subcommand for Opts { let mut pdf = helpers::create_pdf(&self.pdfset)?; let results = helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &[], &[], &[], diff --git a/pineappl_cli/src/orders.rs b/pineappl_cli/src/orders.rs index 77919d234..ef4840863 100644 --- a/pineappl_cli/src/orders.rs +++ b/pineappl_cli/src/orders.rs @@ -5,6 +5,7 @@ use clap::{Parser, ValueHint}; use prettytable::{cell, Row}; use std::path::PathBuf; use std::process::ExitCode; +use std::slice; /// Shows the predictions for all bin for each order separately. #[derive(Parser)] @@ -66,7 +67,7 @@ impl Subcommand for Opts { .map(|order| { helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &[(order.alphas, order.alpha)], &[], &[], diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index 6e988ef26..8b4d93910 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -13,6 +13,7 @@ use std::fmt::Write; use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; use std::process::ExitCode; +use std::slice; use std::thread; /// Creates a matplotlib script plotting the contents of the grid. @@ -219,8 +220,16 @@ impl Subcommand for Opts { })) { let bins: Vec<_> = (slice.0..slice.1).collect(); - let results = - helpers::convolve(&grid, &mut pdf, &[], &bins, &[], self.scales, mode, cfg); + let results = helpers::convolve( + &grid, + slice::from_mut(&mut pdf), + &[], + &bins, + &[], + self.scales, + mode, + cfg, + ); let qcd_results = { let mut orders = grid.orders().to_vec(); @@ -239,7 +248,7 @@ impl Subcommand for Opts { helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &qcd_orders, &bins, &[], @@ -271,8 +280,16 @@ impl Subcommand for Opts { if self.no_pdf_unc { let mut pdf = helpers::create_pdf(pdfset).unwrap(); - let results = - helpers::convolve(&grid, &mut pdf, &[], &bins, &[], 1, mode, cfg); + let results = helpers::convolve( + &grid, + slice::from_mut(&mut pdf), + &[], + &bins, + &[], + 1, + mode, + cfg, + ); Ok(vec![results; 3]) } else { @@ -284,7 +301,7 @@ impl Subcommand for Opts { .flat_map(|mut pdf| { helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &[], &bins, &[], @@ -383,7 +400,7 @@ impl Subcommand for Opts { ), helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &[], &bins, &channel_mask, @@ -527,7 +544,7 @@ impl Subcommand for Opts { .map(|pdf| { let values = helpers::convolve( &grid, - pdf, + slice::from_mut(pdf), &[], &[bin], &[], @@ -544,7 +561,7 @@ impl Subcommand for Opts { .map(|pdf| { let values = helpers::convolve( &grid, - pdf, + slice::from_mut(pdf), &[], &[bin], &[], diff --git a/pineappl_cli/src/pull.rs b/pineappl_cli/src/pull.rs index e12cc0ada..ee39bd6e0 100644 --- a/pineappl_cli/src/pull.rs +++ b/pineappl_cli/src/pull.rs @@ -8,6 +8,7 @@ use rayon::{prelude::*, ThreadPoolBuilder}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::process::ExitCode; +use std::slice; use std::thread; // TODO: do we need the CL parameter? @@ -68,7 +69,7 @@ impl Subcommand for Opts { .flat_map(|pdf| { helpers::convolve( &grid, - pdf, + slice::from_mut(pdf), &self.orders, &[], &[], @@ -83,7 +84,7 @@ impl Subcommand for Opts { .flat_map(|pdf| { helpers::convolve( &grid, - pdf, + slice::from_mut(pdf), &self.orders, &[], &[], @@ -149,7 +150,7 @@ impl Subcommand for Opts { channel_mask[channel] = true; match helpers::convolve( &grid, - &mut pdfset[member], + slice::from_mut(&mut pdfset[member]), &self.orders, &[bin], &channel_mask, @@ -174,7 +175,7 @@ impl Subcommand for Opts { channel_mask[channel] = true; match helpers::convolve( &grid, - pdf, + slice::from_mut(pdf), &self.orders, &[bin], &channel_mask, diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index aefe6f867..09bec4609 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -8,6 +8,7 @@ use rayon::{prelude::*, ThreadPoolBuilder}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::process::ExitCode; +use std::slice; use std::thread; #[derive(Args)] @@ -121,7 +122,7 @@ impl Subcommand for Opts { .flat_map(|mut pdf| { helpers::convolve( &grid, - &mut pdf, + slice::from_mut(&mut pdf), &self.orders, &[], &[], @@ -150,7 +151,7 @@ impl Subcommand for Opts { .unwrap_or(1); let scale_results = helpers::convolve( &grid, - &mut helpers::create_pdf(&self.pdfset)?, + slice::from_mut(&mut helpers::create_pdf(&self.pdfset)?), &self.orders, &[], &[], From 9556a504f175f4428af59eb89952daeed47a8715 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 10 Jun 2024 10:43:14 +0200 Subject: [PATCH 140/179] Add struct `ConvFun` and use it in `channels` --- pineappl_cli/src/channels.rs | 13 ++++++------ pineappl_cli/src/helpers.rs | 37 ++++++++++++++++++++++++++++++++++ pineappl_cli/tests/channels.rs | 25 ++++++++++++++++++++--- 3 files changed, 65 insertions(+), 10 deletions(-) diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index 4278eb5b6..2bb74c936 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFun, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::builder::TypedValueParser; @@ -7,7 +7,6 @@ use prettytable::{cell, Row}; use std::ops::RangeInclusive; use std::path::PathBuf; use std::process::ExitCode; -use std::slice; /// Shows the contribution for each partonic channel. #[derive(Parser)] @@ -15,9 +14,9 @@ pub struct Opts { /// Path to the input grid. #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, - /// LHAPDF id or name of the PDF set. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + /// LHAPDF ID(s) or name of the PDF(s)/FF(s). + #[arg(num_args = 1, required = true, value_delimiter = ',')] + conv_funs: Vec, /// Show absolute numbers of each contribution. #[arg(long, short)] absolute: bool, @@ -66,7 +65,7 @@ pub struct Opts { impl Subcommand for Opts { fn run(&self, cfg: &GlobalConfiguration) -> Result { let grid = helpers::read_grid(&self.input)?; - let mut pdf = helpers::create_pdf(&self.pdfset)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; let mut channels: Vec<_> = self.channels.iter().cloned().flatten().collect(); channels.sort_unstable(); @@ -94,7 +93,7 @@ impl Subcommand for Opts { channel_mask[channel] = true; helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &self.orders, &[], &channel_mask, diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 6dd8f6c4f..8ec30e804 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -6,11 +6,48 @@ use pineappl::convolutions::LumiCache; use pineappl::grid::Grid; use prettytable::format::{FormatBuilder, LinePosition, LineSeparator}; use prettytable::Table; +use std::convert::Infallible; use std::fs::{File, OpenOptions}; use std::iter; use std::ops::RangeInclusive; use std::path::Path; use std::process::ExitCode; +use std::str::FromStr; + +#[derive(Clone)] +pub struct ConvFun { + lhapdf_name: String, + label: String, +} + +impl FromStr for ConvFun { + type Err = Infallible; + + fn from_str(arg: &str) -> std::result::Result { + Ok(arg.split_once('=').map_or_else( + || ConvFun { + lhapdf_name: arg.to_owned(), + label: arg.to_owned(), + }, + |(lhapdf_name, label)| ConvFun { + lhapdf_name: lhapdf_name.to_owned(), + label: label.to_owned(), + }, + )) + } +} + +pub fn create_conv_funs(funs: &[ConvFun]) -> Result> { + Ok(funs + .iter() + .map(|fun| { + fun.lhapdf_name.parse().map_or_else( + |_| Pdf::with_setname_and_nmem(&fun.lhapdf_name), + Pdf::with_lhaid, + ) + }) + .collect::>()?) +} pub fn create_pdf(pdf: &str) -> Result { let pdf = pdf.split_once('=').map_or(pdf, |(name, _)| name); diff --git a/pineappl_cli/tests/channels.rs b/pineappl_cli/tests/channels.rs index c6dcee11b..21db2f5f5 100644 --- a/pineappl_cli/tests/channels.rs +++ b/pineappl_cli/tests/channels.rs @@ -2,11 +2,11 @@ use assert_cmd::Command; const HELP_STR: &str = "Shows the contribution for each partonic channel -Usage: pineappl channels [OPTIONS] +Usage: pineappl channels [OPTIONS] ... Arguments: - Path to the input grid - LHAPDF id or name of the PDF set + Path to the input grid + ... LHAPDF ID(s) or name of the PDF(s)/FF(s) Options: -a, --absolute Show absolute numbers of each contribution @@ -132,6 +132,14 @@ const DONT_SORT_STR: &str = "b etal c size c size c size c size c size 7 4 4.5 0 115.88 1 -7.29 2 0.01 3 -8.61 4 0.01 "; +const MISSING_CONV_FUN_STR: &str = "error: the following required arguments were not provided: + ... + +Usage: pineappl channels ... + +For more information, try '--help'. +"; + #[test] fn help() { Command::cargo_bin("pineappl") @@ -156,6 +164,17 @@ fn default() { .stdout(DEFAULT_STR); } +#[test] +fn missing_conv_fun() { + Command::cargo_bin("pineappl") + .unwrap() + .args(["channels", "../test-data/LHCB_WP_7TEV.pineappl.lz4"]) + .assert() + .failure() + .stderr(MISSING_CONV_FUN_STR) + .stdout(""); +} + #[test] fn absolute() { Command::cargo_bin("pineappl") From e9f038938f89fe5ab8e970c36c3c73938fb29666 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 10 Jun 2024 11:15:08 +0200 Subject: [PATCH 141/179] Remove erroneous closing parenthesis --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6e98a6ecc..9c864c73b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -50,7 +50,7 @@ increasing the MSRV make sure to set it everywhere to the same value: - if you write a test that needs test data (grids, EKOs, etc.) store them at . Ask one of the maintainers - to upload the data for you if you don't have access to this location). Then + to upload the data for you if you don't have access to this location. Then add a line to `maintainer/generate-coverage.sh` that downloads the data with `wget` and a similar line to `.github/workflows/rust.yml` that downloads the data with `curl`. To make Github refresh the cached test data when running From 98de0bac794300be629b398906e3eae768f25407 Mon Sep 17 00:00:00 2001 From: t7phy Date: Mon, 10 Jun 2024 23:17:33 +0200 Subject: [PATCH 142/179] add two conv funcs support in convolve_scales --- pineappl_cli/src/helpers.rs | 50 ++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 8ec30e804..0463bbc13 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -186,7 +186,7 @@ pub fn convolve_scales( let mut results = match conv_funs { [fun] => { // if the field 'Particle' is missing we assume it's a proton PDF - let pdf_pdg_id = fun + let pdg_id = fun .set() .entry("Particle") .map_or(Ok(2212), |string| string.parse::()) @@ -207,11 +207,55 @@ pub fn convolve_scales( } }; - let mut cache = LumiCache::with_one(pdf_pdg_id, &mut fun, &mut alphas); + let mut cache = LumiCache::with_one(pdg_id, &mut fun, &mut alphas); + + grid.convolve(&mut cache, &orders, bins, channels, scales) + } + [fun1, fun2] => { + let pdg_id1 = fun1 + .set() + .entry("Particle") + .map_or(Ok(2212), |string| string.parse::()) + .unwrap(); + + let pdg_id2 = fun2 + .set() + .entry("Particle") + .map_or(Ok(2212), |string| string.parse::()) + .unwrap(); + + if cfg.force_positive { + fun1.set_force_positive(1); + fun2.set_force_positive(1); + } + + let x_max1 = fun1.x_max(); + let x_min1 = fun1.x_min(); + let mut alphas1 = |q2| fun1.alphas_q2(q2); + let mut fun1 = |id, x, q2| { + if !cfg.allow_extrapolation && (x < x_min1 || x > x_max1) { + 0.0 + } else { + fun1.xfx_q2(id, x, q2) + } + }; + let x_max2 = fun2.x_max(); + let x_min2 = fun2.x_min(); + // is the following line needed? + // let mut alphas2 = |q2| fun2.alphas_q2(q2); + let mut fun2 = |id, x, q2| { + if !cfg.allow_extrapolation && (x < x_min2 || x > x_max2) { + 0.0 + } else { + fun2.xfx_q2(id, x, q2) + } + }; + + let mut cache = + LumiCache::with_two(pdg_id1, &mut fun1, pdg_id2, &mut fun2, &mut alphas1); grid.convolve(&mut cache, &orders, bins, channels, scales) } - [_fun0, _fun1] => todo!(), _ => unimplemented!(), }; From ed4cbc22256c9bd5c9f94c1a6309336f83991c9d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 11 Jun 2024 15:19:38 +0200 Subject: [PATCH 143/179] Make `helpers::convolve_scales` a bit more generic --- pineappl_cli/src/helpers.rs | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 0463bbc13..bb1fe3200 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -183,6 +183,10 @@ pub fn convolve_scales( }) .collect(); + for fun in conv_funs.iter_mut() { + fun.set_force_positive(1); + } + let mut results = match conv_funs { [fun] => { // if the field 'Particle' is missing we assume it's a proton PDF @@ -192,10 +196,6 @@ pub fn convolve_scales( .map_or(Ok(2212), |string| string.parse::()) .unwrap(); - if cfg.force_positive { - fun.set_force_positive(1); - } - let x_max = fun.x_max(); let x_min = fun.x_min(); let mut alphas = |q2| fun.alphas_q2(q2); @@ -224,11 +224,6 @@ pub fn convolve_scales( .map_or(Ok(2212), |string| string.parse::()) .unwrap(); - if cfg.force_positive { - fun1.set_force_positive(1); - fun2.set_force_positive(1); - } - let x_max1 = fun1.x_max(); let x_min1 = fun1.x_min(); let mut alphas1 = |q2| fun1.alphas_q2(q2); From 5148a6c8d7dd7785bc4f30f695ab400261b9c3c1 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 11 Jun 2024 15:41:01 +0200 Subject: [PATCH 144/179] Fix bug from commit ed4cbc2 --- pineappl_cli/src/helpers.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index bb1fe3200..5b7e68484 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -183,8 +183,10 @@ pub fn convolve_scales( }) .collect(); - for fun in conv_funs.iter_mut() { - fun.set_force_positive(1); + if cfg.force_positive { + for fun in conv_funs.iter_mut() { + fun.set_force_positive(1); + } } let mut results = match conv_funs { From f69ad05deb5c5134ec488979fa44873e0ac0633e Mon Sep 17 00:00:00 2001 From: t7phy Date: Thu, 13 Jun 2024 00:03:31 +0200 Subject: [PATCH 145/179] add flag to choose the pdfset from which alphas is taken --- pineappl_cli/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pineappl_cli/src/lib.rs b/pineappl_cli/src/lib.rs index d02e045b9..57d815b85 100644 --- a/pineappl_cli/src/lib.rs +++ b/pineappl_cli/src/lib.rs @@ -38,6 +38,9 @@ pub struct GlobalConfiguration { /// Allow extrapolation of PDFs outside their region of validity. #[arg(long)] pub allow_extrapolation: bool, + /// Choose the PDF/FF set for alpha_s. + #[arg(long)] + pub use_alphas_from: usize } #[enum_dispatch] From cc01da96419cbbbfcd6dbc7a6346d85402d91f20 Mon Sep 17 00:00:00 2001 From: t7phy Date: Thu, 13 Jun 2024 00:04:35 +0200 Subject: [PATCH 146/179] add flag to choose the pdfset from which alphas is taken --- pineappl_cli/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl_cli/src/lib.rs b/pineappl_cli/src/lib.rs index 57d815b85..4980dae44 100644 --- a/pineappl_cli/src/lib.rs +++ b/pineappl_cli/src/lib.rs @@ -40,7 +40,7 @@ pub struct GlobalConfiguration { pub allow_extrapolation: bool, /// Choose the PDF/FF set for alpha_s. #[arg(long)] - pub use_alphas_from: usize + pub use_alphas_from: usize, } #[enum_dispatch] From 644677b858615f22afb1fbda2dc00518b818e6c3 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 13 Jun 2024 09:18:36 +0200 Subject: [PATCH 147/179] Add instructions for how to update the container --- CONTRIBUTING.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9c864c73b..8299de98e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -81,3 +81,14 @@ This will take care of almost everything: the C, Python and Rust interfaces and their documentation. After some time also a new [Conda package](https://github.com/conda-forge/pineappl-feedstock) will be generated, for which the pull request will have to be accepted manually though. + +## Updating the CI's container + +To update the software the CI runs with, modify the files in +`maintainer/pineappl-ci`. See `maintainer/README.md` for a description of what +these files do. To generate a new container, you need to manually run the +[Container action] from the branch in which you modified the container files. +After the container has been generated, all following commits in *every* branch +will use the new container. + +[Container action]: https://github.com/NNPDF/pineappl/actions/workflows/container.yml From 756dc97a2dc71d340c860a729c2f58606a24304d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 13 Jun 2024 09:21:29 +0200 Subject: [PATCH 148/179] Adjust formatting of `CONTRIBUTING.md` a bit --- CONTRIBUTING.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8299de98e..f24254d59 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,8 +10,9 @@ - Make sure not to use Rust features newer than the specified minimum supported Rust Version (MSRV), which is documented in the [README](README.md). You can use `cargo-msrv` to check the crates. However, the Github CI also checks this. -- Make sure to follow the [Rust API - Guidelines](https://rust-lang.github.io/api-guidelines/checklist.html) +- Make sure to follow the [Rust API Guidelines] + +[Rust API Guidelines]: https://rust-lang.github.io/api-guidelines/checklist.html ### Increasing the minimum supported Rust version (MSRV) @@ -74,13 +75,14 @@ In the `maintainers` directory run ./make_release 0.5.4 and replace `0.5.4` with a version string, *not* including `v` at the start. -The version strings must adhere to [Semantic -Versioning](https://semver.org/spec/v2.0.0.html). +The version strings must adhere to [Semantic Versioning]. This will take care of almost everything: the C, Python and Rust interfaces and -their documentation. After some time also a new [Conda -package](https://github.com/conda-forge/pineappl-feedstock) will be generated, -for which the pull request will have to be accepted manually though. +their documentation. After some time also a new [Conda package] will be +generated, for which the pull request will have to be accepted manually though. + +[Semantic Versioning]: https://semver.org/spec/v2.0.0.html +[Conda package]: https://github.com/conda-forge/pineappl-feedstock ## Updating the CI's container From f47b8083a3fc10acc434e07675c1f73b91267769 Mon Sep 17 00:00:00 2001 From: t7phy Date: Thu, 13 Jun 2024 13:46:53 +0200 Subject: [PATCH 149/179] allow users to choose the conv_func for alphas --- pineappl_cli/src/helpers.rs | 20 ++++++++++++++------ pineappl_cli/src/lib.rs | 2 +- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 5b7e68484..4a503f12f 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -228,7 +228,18 @@ pub fn convolve_scales( let x_max1 = fun1.x_max(); let x_min1 = fun1.x_min(); - let mut alphas1 = |q2| fun1.alphas_q2(q2); + let x_max2 = fun2.x_max(); + let x_min2 = fun2.x_min(); + + let mut alphas = |q2| { + if cfg.use_alphas_from == 1 { + fun1.alphas_q2(q2) + } else if cfg.use_alphas_from == 2 { + fun2.alphas_q2(q2) + } else { + panic!("Invalid value for use_alphas_from, please use '1' or '2'") + } + }; let mut fun1 = |id, x, q2| { if !cfg.allow_extrapolation && (x < x_min1 || x > x_max1) { 0.0 @@ -236,10 +247,7 @@ pub fn convolve_scales( fun1.xfx_q2(id, x, q2) } }; - let x_max2 = fun2.x_max(); - let x_min2 = fun2.x_min(); - // is the following line needed? - // let mut alphas2 = |q2| fun2.alphas_q2(q2); + let mut fun2 = |id, x, q2| { if !cfg.allow_extrapolation && (x < x_min2 || x > x_max2) { 0.0 @@ -249,7 +257,7 @@ pub fn convolve_scales( }; let mut cache = - LumiCache::with_two(pdg_id1, &mut fun1, pdg_id2, &mut fun2, &mut alphas1); + LumiCache::with_two(pdg_id1, &mut fun1, pdg_id2, &mut fun2, &mut alphas); grid.convolve(&mut cache, &orders, bins, channels, scales) } diff --git a/pineappl_cli/src/lib.rs b/pineappl_cli/src/lib.rs index 4980dae44..9fee723c8 100644 --- a/pineappl_cli/src/lib.rs +++ b/pineappl_cli/src/lib.rs @@ -39,7 +39,7 @@ pub struct GlobalConfiguration { #[arg(long)] pub allow_extrapolation: bool, /// Choose the PDF/FF set for alpha_s. - #[arg(long)] + #[arg(default_value = "1", long)] pub use_alphas_from: usize, } From 044c7741df30145cb859b83878c03c734bba4e26 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 13 Jun 2024 15:29:58 +0200 Subject: [PATCH 150/179] Use zero-based indexing for `use_alphas_from` parameter --- pineappl_cli/src/helpers.rs | 18 ++++++++++-------- pineappl_cli/src/lib.rs | 4 ++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 4a503f12f..c8a94043e 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -191,6 +191,9 @@ pub fn convolve_scales( let mut results = match conv_funs { [fun] => { + // there's only one convolution function from which we can use the strong coupling + assert_eq!(cfg.use_alphas_from, 0); + // if the field 'Particle' is missing we assume it's a proton PDF let pdg_id = fun .set() @@ -231,14 +234,13 @@ pub fn convolve_scales( let x_max2 = fun2.x_max(); let x_min2 = fun2.x_min(); - let mut alphas = |q2| { - if cfg.use_alphas_from == 1 { - fun1.alphas_q2(q2) - } else if cfg.use_alphas_from == 2 { - fun2.alphas_q2(q2) - } else { - panic!("Invalid value for use_alphas_from, please use '1' or '2'") - } + let mut alphas = |q2| match cfg.use_alphas_from { + 0 => fun1.alphas_q2(q2), + 1 => fun2.alphas_q2(q2), + _ => panic!( + "expected `use_alphas_from` to be `0` or `1`, is {}", + cfg.use_alphas_from + ), }; let mut fun1 = |id, x, q2| { if !cfg.allow_extrapolation && (x < x_min1 || x > x_max1) { diff --git a/pineappl_cli/src/lib.rs b/pineappl_cli/src/lib.rs index 9fee723c8..fdcabb0dc 100644 --- a/pineappl_cli/src/lib.rs +++ b/pineappl_cli/src/lib.rs @@ -38,8 +38,8 @@ pub struct GlobalConfiguration { /// Allow extrapolation of PDFs outside their region of validity. #[arg(long)] pub allow_extrapolation: bool, - /// Choose the PDF/FF set for alpha_s. - #[arg(default_value = "1", long)] + /// Choose the PDF/FF set for the strong coupling. + #[arg(default_value = "0", long)] pub use_alphas_from: usize, } From be221cdef3cc1ad093bf4a7f61f424a2bb376c38 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sat, 15 Jun 2024 09:39:28 +0200 Subject: [PATCH 151/179] Use new struct `ConvFun` in most subcommands --- pineappl_cli/src/channels.rs | 2 +- pineappl_cli/src/diff.rs | 19 ++++++++-------- pineappl_cli/src/evolve.rs | 21 +++++++++--------- pineappl_cli/src/export.rs | 25 ++++++++++----------- pineappl_cli/src/export/applgrid.rs | 8 +++++-- pineappl_cli/src/helpers.rs | 4 ++-- pineappl_cli/src/import.rs | 34 +++++++++++++++-------------- pineappl_cli/src/import/applgrid.rs | 8 +++++-- pineappl_cli/src/lib.rs | 2 +- pineappl_cli/src/orders.rs | 13 +++++------ pineappl_cli/tests/channels.rs | 2 +- pineappl_cli/tests/diff.rs | 8 +++---- pineappl_cli/tests/evolve.rs | 10 ++++----- pineappl_cli/tests/export.rs | 8 +++---- pineappl_cli/tests/import.rs | 16 +++++++------- pineappl_cli/tests/main.rs | 11 +++++----- pineappl_cli/tests/orders.rs | 6 ++--- 17 files changed, 102 insertions(+), 95 deletions(-) diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index 2bb74c936..eb0598206 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -14,7 +14,7 @@ pub struct Opts { /// Path to the input grid. #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, - /// LHAPDF ID(s) or name of the PDF(s)/FF(s). + /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). #[arg(num_args = 1, required = true, value_delimiter = ',')] conv_funs: Vec, /// Show absolute numbers of each contribution. diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index 7e111e867..f8ca45f3b 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFun, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::{bail, Result}; use clap::{Parser, ValueHint}; @@ -6,7 +6,6 @@ use prettytable::{cell, Row}; use std::collections::HashSet; use std::path::PathBuf; use std::process::ExitCode; -use std::slice; /// Compares the numerical content of two grids with each other. #[derive(Parser)] @@ -17,9 +16,9 @@ pub struct Opts { /// Path to the second grid. #[arg(value_hint = ValueHint::FilePath)] input2: PathBuf, - /// LHAPDF id or name of the PDF set. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). + #[arg(num_args = 1, required = true, value_delimiter = ',')] + conv_funs: Vec, /// Ignore differences in the orders and sum them. #[arg(long)] ignore_orders: bool, @@ -128,7 +127,7 @@ impl Subcommand for Opts { bail!("channels differ"); } - let mut pdf = helpers::create_pdf(&self.pdfset)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; let mut table = helpers::create_table(); let mut title = Row::empty(); @@ -150,7 +149,7 @@ impl Subcommand for Opts { let results1 = helpers::convolve( &grid1, - slice::from_mut(&mut pdf), + &mut conv_funs, &orders1, &[], &[], @@ -160,7 +159,7 @@ impl Subcommand for Opts { ); let results2 = helpers::convolve( &grid2, - slice::from_mut(&mut pdf), + &mut conv_funs, &orders2, &[], &[], @@ -206,7 +205,7 @@ impl Subcommand for Opts { .map(|&order| { helpers::convolve( &grid1, - slice::from_mut(&mut pdf), + &mut conv_funs, &[order], &[], &[], @@ -221,7 +220,7 @@ impl Subcommand for Opts { .map(|&order| { helpers::convolve( &grid2, - slice::from_mut(&mut pdf), + &mut conv_funs, &[order], &[], &[], diff --git a/pineappl_cli/src/evolve.rs b/pineappl_cli/src/evolve.rs index 71f4e1667..c79021f14 100644 --- a/pineappl_cli/src/evolve.rs +++ b/pineappl_cli/src/evolve.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFun, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::{Parser, ValueHint}; @@ -7,7 +7,6 @@ use pineappl::fk_table::FkTable; use pineappl::grid::Grid; use std::path::{Path, PathBuf}; use std::process::ExitCode; -use std::slice; #[cfg(feature = "evolve")] mod eko { @@ -426,7 +425,7 @@ mod eko { fn evolve_grid( grid: &Grid, eko: &Path, - pdf: &Pdf, + use_alphas_from: &Pdf, orders: &[(u32, u32)], xir: f64, xif: f64, @@ -447,7 +446,7 @@ fn evolve_grid( .collect(); let mut eko_slices = EkoSlices::new(eko)?; - let alphas_table = AlphasTable::from_grid(grid, xir, &|q2| pdf.alphas_q2(q2)); + let alphas_table = AlphasTable::from_grid(grid, xir, &|q2| use_alphas_from.alphas_q2(q2)); if use_old_evolve { if let EkoSlices::V0 { @@ -507,9 +506,9 @@ pub struct Opts { /// Path to the converted grid. #[arg(value_hint = ValueHint::FilePath)] output: PathBuf, - /// LHAPDF id or name of the PDF set to check the converted grid with. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + /// LHAPDF ID(s) or name of the PDF(s)/FF(s). + #[arg(num_args = 1, required = true, value_delimiter = ',')] + conv_funs: Vec, /// Relative threshold between the table and the converted grid when comparison fails. #[arg(default_value = "1e-3", long)] accuracy: f64, @@ -543,10 +542,10 @@ impl Subcommand for Opts { use prettytable::row; let grid = helpers::read_grid(&self.input)?; - let mut pdf = helpers::create_pdf(&self.pdfset)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; let results = helpers::convolve_scales( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &self.orders, &[], &[], @@ -558,7 +557,7 @@ impl Subcommand for Opts { let fk_table = evolve_grid( &grid, &self.eko, - &pdf, + &conv_funs[cfg.use_alphas_from], &self.orders, self.xir, self.xif, @@ -566,7 +565,7 @@ impl Subcommand for Opts { )?; let evolved_results = helpers::convolve_scales( fk_table.grid(), - slice::from_mut(&mut pdf), + &mut conv_funs, &[], &[], &[], diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index 684abb434..bc7d7c9f8 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFun, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; @@ -7,7 +7,6 @@ use pineappl::boc::Order; use pineappl::grid::Grid; use std::path::{Path, PathBuf}; use std::process::ExitCode; -use std::slice; #[cfg(feature = "applgrid")] mod applgrid; @@ -16,7 +15,7 @@ mod applgrid; fn convert_into_applgrid( output: &Path, grid: &Grid, - pdfset: &str, + conv_funs: &[ConvFun], member: usize, _: usize, discard_non_matching_scales: bool, @@ -25,7 +24,7 @@ fn convert_into_applgrid( let (mut applgrid, order_mask) = applgrid::convert_into_applgrid(grid, output, discard_non_matching_scales)?; - let results = applgrid::convolve_applgrid(applgrid.pin_mut(), pdfset, member); + let results = applgrid::convolve_applgrid(applgrid.pin_mut(), conv_funs, member); Ok(("APPLgrid", results, 1, order_mask)) } @@ -34,7 +33,7 @@ fn convert_into_applgrid( fn convert_into_applgrid( _: &Path, _: &Grid, - _: &str, + _: &[ConvFun], _: usize, _: usize, _: bool, @@ -47,7 +46,7 @@ fn convert_into_applgrid( fn convert_into_grid( output: &Path, grid: &Grid, - pdfset: &str, + conv_funs: &[ConvFun], member: usize, scales: usize, discard_non_matching_scales: bool, @@ -57,7 +56,7 @@ fn convert_into_grid( return convert_into_applgrid( output, grid, - pdfset, + conv_funs, member, scales, discard_non_matching_scales, @@ -77,9 +76,9 @@ pub struct Opts { /// Path to the converted grid. #[arg(value_hint = ValueHint::FilePath)] output: PathBuf, - /// LHAPDF id or name of the PDF set to check the converted grid with. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + /// LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with. + #[arg(num_args = 1, required = true, value_delimiter = ',')] + conv_funs: Vec, /// Relative threshold between the table and the converted grid when comparison fails. #[arg(default_value = "1e-10", long)] accuracy: f64, @@ -112,7 +111,7 @@ impl Subcommand for Opts { let (grid_type, results, scale_variations, order_mask) = convert_into_grid( &self.output, &grid, - &self.pdfset, + &self.conv_funs, 0, self.scales, self.discard_non_matching_scales, @@ -156,10 +155,10 @@ impl Subcommand for Opts { if results.is_empty() { println!("file was converted, but we cannot check the conversion for this type"); } else { - let mut pdf = helpers::create_pdf(&self.pdfset)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; let reference_results = helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &orders, &[], &[], diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index 869105f08..de7bb763b 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -1,3 +1,4 @@ +use super::helpers::ConvFun; use anyhow::{anyhow, bail, Result}; use cxx::{let_cxx_string, UniquePtr}; use float_cmp::approx_eq; @@ -302,12 +303,15 @@ pub fn convert_into_applgrid( } // TODO: deduplicate this function from import -pub fn convolve_applgrid(grid: Pin<&mut grid>, pdfset: &str, member: usize) -> Vec { +pub fn convolve_applgrid(grid: Pin<&mut grid>, conv_funs: &[ConvFun], member: usize) -> Vec { let nloops = grid.nloops(); + // TODO: add support for convolving an APPLgrid with two functions + assert_eq!(conv_funs.len(), 1); + ffi::grid_convolve( grid, - pdfset, + &conv_funs[0].lhapdf_name, member.try_into().unwrap(), nloops, 1.0, diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index c8a94043e..d08180e1b 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -1,5 +1,5 @@ use super::GlobalConfiguration; -use anyhow::{ensure, Context, Result}; +use anyhow::{anyhow, ensure, Context, Result}; use lhapdf::{Pdf, PdfSet}; use ndarray::Array3; use pineappl::convolutions::LumiCache; @@ -16,7 +16,7 @@ use std::str::FromStr; #[derive(Clone)] pub struct ConvFun { - lhapdf_name: String, + pub lhapdf_name: String, label: String, } diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index db1c8c681..fe5388877 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFun, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; @@ -6,7 +6,6 @@ use clap::{Parser, ValueHint}; use pineappl::grid::Grid; use std::path::{Path, PathBuf}; use std::process::ExitCode; -use std::slice; #[cfg(feature = "applgrid")] mod applgrid; @@ -19,7 +18,7 @@ mod fktable; fn convert_applgrid( input: &Path, alpha: u32, - pdfset: &str, + conv_funs: &[ConvFun], member: usize, dis_pid: i32, _: usize, @@ -30,7 +29,7 @@ fn convert_applgrid( let mut grid = ffi::make_grid(input.to_str().unwrap())?; let pgrid = applgrid::convert_applgrid(grid.pin_mut(), alpha, dis_pid)?; - let results = applgrid::convolve_applgrid(grid.pin_mut(), pdfset, member); + let results = applgrid::convolve_applgrid(grid.pin_mut(), conv_funs, member); Ok(("APPLgrid", pgrid, results, 1)) } @@ -39,7 +38,7 @@ fn convert_applgrid( fn convert_applgrid( _: &Path, _: u32, - _: &str, + _: &[ConvFun], _: usize, _: i32, _: usize, @@ -53,7 +52,7 @@ fn convert_applgrid( fn convert_fastnlo( input: &Path, alpha: u32, - pdfset: &str, + conv_funs: &[ConvFun], member: usize, dis_pid: i32, scales: usize, @@ -63,9 +62,12 @@ fn convert_fastnlo( use pineappl_fastnlo::ffi; use std::ptr; + // TODO: convert this into an error? + assert_eq!(conv_funs.len(), 1); + let mut file = ffi::make_fastnlo_lhapdf_with_name_file_set( input.to_str().unwrap(), - pdfset, + &conv_funs[0].lhapdf_name, member.try_into().unwrap(), ); @@ -148,7 +150,7 @@ fn convert_fktable(_: &Path, _: i32) -> Result<(&'static str, Grid, Vec, us fn convert_grid( input: &Path, alpha: u32, - pdfset: &str, + conv_funs: &[ConvFun], member: usize, dis_pid: i32, scales: usize, @@ -164,12 +166,12 @@ fn convert_grid( .map_or(false, |ext| ext == "tab")) { return convert_fastnlo( - input, alpha, pdfset, member, dis_pid, scales, fnlo_mur, fnlo_muf, + input, alpha, conv_funs, member, dis_pid, scales, fnlo_mur, fnlo_muf, ); } else if extension == "dat" { return convert_fktable(input, dis_pid); } else if extension == "appl" || extension == "root" { - return convert_applgrid(input, alpha, pdfset, member, dis_pid, scales); + return convert_applgrid(input, alpha, conv_funs, member, dis_pid, scales); } } @@ -211,9 +213,9 @@ pub struct Opts { /// Path to the converted grid. #[arg(value_hint = ValueHint::FilePath)] output: PathBuf, - /// LHAPDF id or name of the PDF set to check the converted grid with. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + /// LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with. + #[arg(num_args = 1, required = true, value_delimiter = ',')] + conv_funs: Vec, /// LO coupling power in alpha. #[arg(default_value_t = 0, long)] alpha: u32, @@ -258,7 +260,7 @@ impl Subcommand for Opts { let (grid_type, mut grid, reference_results, scale_variations) = convert_grid( &self.input, self.alpha, - &self.pdfset, + &self.conv_funs, 0, self.dis_pid, self.scales, @@ -275,10 +277,10 @@ impl Subcommand for Opts { if reference_results.is_empty() { println!("file was converted, but we cannot check the conversion for this type"); } else { - let mut pdf = helpers::create_pdf(&self.pdfset)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; let results = helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &[], &[], &[], diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 76baff863..025af06e4 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,3 +1,4 @@ +use super::helpers::ConvFun; use anyhow::Result; use pineappl::boc::{Channel, Order}; use pineappl::convolutions::Convolution; @@ -245,12 +246,15 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul Ok(grid0) } -pub fn convolve_applgrid(grid: Pin<&mut grid>, pdfset: &str, member: usize) -> Vec { +pub fn convolve_applgrid(grid: Pin<&mut grid>, conv_funs: &[ConvFun], member: usize) -> Vec { let nloops = grid.nloops(); + // TODO: add support for convolving an APPLgrid with two functions + assert_eq!(conv_funs.len(), 1); + ffi::grid_convolve( grid, - pdfset, + &conv_funs[0].lhapdf_name, member.try_into().unwrap(), nloops, 1.0, diff --git a/pineappl_cli/src/lib.rs b/pineappl_cli/src/lib.rs index fdcabb0dc..04ee204bf 100644 --- a/pineappl_cli/src/lib.rs +++ b/pineappl_cli/src/lib.rs @@ -39,7 +39,7 @@ pub struct GlobalConfiguration { #[arg(long)] pub allow_extrapolation: bool, /// Choose the PDF/FF set for the strong coupling. - #[arg(default_value = "0", long)] + #[arg(default_value = "0", long, value_name = "IDX")] pub use_alphas_from: usize, } diff --git a/pineappl_cli/src/orders.rs b/pineappl_cli/src/orders.rs index ef4840863..d5346e730 100644 --- a/pineappl_cli/src/orders.rs +++ b/pineappl_cli/src/orders.rs @@ -1,11 +1,10 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFun, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::{Parser, ValueHint}; use prettytable::{cell, Row}; use std::path::PathBuf; use std::process::ExitCode; -use std::slice; /// Shows the predictions for all bin for each order separately. #[derive(Parser)] @@ -13,9 +12,9 @@ pub struct Opts { /// Path to the input grid. #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, - /// LHAPDF id or name of the PDF set. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). + #[arg(num_args = 1, required = true, value_delimiter = ',')] + conv_funs: Vec, /// Show absolute numbers of each perturbative order. #[arg(long, short)] absolute: bool, @@ -43,7 +42,7 @@ pub struct Opts { impl Subcommand for Opts { fn run(&self, cfg: &GlobalConfiguration) -> Result { let grid = helpers::read_grid(&self.input)?; - let mut pdf = helpers::create_pdf(&self.pdfset)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; let mut orders: Vec<_> = grid .orders() @@ -67,7 +66,7 @@ impl Subcommand for Opts { .map(|order| { helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &[(order.alphas, order.alpha)], &[], &[], diff --git a/pineappl_cli/tests/channels.rs b/pineappl_cli/tests/channels.rs index 21db2f5f5..9ef0bfcca 100644 --- a/pineappl_cli/tests/channels.rs +++ b/pineappl_cli/tests/channels.rs @@ -6,7 +6,7 @@ Usage: pineappl channels [OPTIONS] ... Arguments: Path to the input grid - ... LHAPDF ID(s) or name of the PDF(s)/FF(s) + ... LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) Options: -a, --absolute Show absolute numbers of each contribution diff --git a/pineappl_cli/tests/diff.rs b/pineappl_cli/tests/diff.rs index cba201448..b8213ba26 100644 --- a/pineappl_cli/tests/diff.rs +++ b/pineappl_cli/tests/diff.rs @@ -3,12 +3,12 @@ use assert_fs::NamedTempFile; const HELP_STR: &str = "Compares the numerical content of two grids with each other -Usage: pineappl diff [OPTIONS] +Usage: pineappl diff [OPTIONS] ... Arguments: - Path to the first grid - Path to the second grid - LHAPDF id or name of the PDF set + Path to the first grid + Path to the second grid + ... LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) Options: --ignore-orders Ignore differences in the orders and sum them diff --git a/pineappl_cli/tests/evolve.rs b/pineappl_cli/tests/evolve.rs index 5173c15b5..03920c708 100644 --- a/pineappl_cli/tests/evolve.rs +++ b/pineappl_cli/tests/evolve.rs @@ -5,13 +5,13 @@ use assert_fs::NamedTempFile; const HELP_STR: &str = "Evolve a grid with an evolution kernel operator to an FK table -Usage: pineappl evolve [OPTIONS] +Usage: pineappl evolve [OPTIONS] ... Arguments: - Path to the input grid - Path to the evolution kernel operator - Path to the converted grid - LHAPDF id or name of the PDF set to check the converted grid with + Path to the input grid + Path to the evolution kernel operator + Path to the converted grid + ... LHAPDF ID(s) or name of the PDF(s)/FF(s) Options: --accuracy Relative threshold between the table and the converted grid when comparison fails [default: 1e-3] diff --git a/pineappl_cli/tests/export.rs b/pineappl_cli/tests/export.rs index 392c3dc06..ec74a1e26 100644 --- a/pineappl_cli/tests/export.rs +++ b/pineappl_cli/tests/export.rs @@ -5,12 +5,12 @@ use assert_fs::NamedTempFile; const HELP_STR: &str = "Converts PineAPPL grids to APPLgrid files -Usage: pineappl export [OPTIONS] +Usage: pineappl export [OPTIONS] ... Arguments: - Path to the input grid - Path to the converted grid - LHAPDF id or name of the PDF set to check the converted grid with + Path to the input grid + Path to the converted grid + ... LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with Options: --accuracy Relative threshold between the table and the converted grid when comparison fails [default: 1e-10] diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index f5964780c..4d420eb28 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -6,12 +6,12 @@ use assert_fs::NamedTempFile; #[cfg(feature = "fastnlo")] const HELP_STR: &str = "Converts APPLgrid/fastNLO/FastKernel files to PineAPPL grids -Usage: pineappl import [OPTIONS] +Usage: pineappl import [OPTIONS] ... Arguments: - Path to the input grid - Path to the converted grid - LHAPDF id or name of the PDF set to check the converted grid with + Path to the input grid + Path to the converted grid + ... LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with Options: --alpha LO coupling power in alpha [default: 0] @@ -29,12 +29,12 @@ Options: #[cfg(not(feature = "fastnlo"))] const HELP_STR: &str = "Converts APPLgrid/fastNLO/FastKernel files to PineAPPL grids -Usage: pineappl import [OPTIONS] +Usage: pineappl import [OPTIONS] ... Arguments: - Path to the input grid - Path to the converted grid - LHAPDF id or name of the PDF set to check the converted grid with + Path to the input grid + Path to the converted grid + ... LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with Options: --alpha LO coupling power in alpha [default: 0] diff --git a/pineappl_cli/tests/main.rs b/pineappl_cli/tests/main.rs index 00d428172..19191f15c 100644 --- a/pineappl_cli/tests/main.rs +++ b/pineappl_cli/tests/main.rs @@ -23,11 +23,12 @@ Commands: write Write a grid modified by various operations Options: - --lhapdf-banner Allow LHAPDF to print banners - --force-positive Forces negative PDF values to zero - --allow-extrapolation Allow extrapolation of PDFs outside their region of validity - -h, --help Print help - -V, --version Print version + --lhapdf-banner Allow LHAPDF to print banners + --force-positive Forces negative PDF values to zero + --allow-extrapolation Allow extrapolation of PDFs outside their region of validity + --use-alphas-from Choose the PDF/FF set for the strong coupling [default: 0] + -h, --help Print help + -V, --version Print version "; #[test] diff --git a/pineappl_cli/tests/orders.rs b/pineappl_cli/tests/orders.rs index a159c3c4c..80ab1f6fe 100644 --- a/pineappl_cli/tests/orders.rs +++ b/pineappl_cli/tests/orders.rs @@ -2,11 +2,11 @@ use assert_cmd::Command; const HELP_STR: &str = "Shows the predictions for all bin for each order separately -Usage: pineappl orders [OPTIONS] +Usage: pineappl orders [OPTIONS] ... Arguments: - Path to the input grid - LHAPDF id or name of the PDF set + Path to the input grid + ... LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) Options: -a, --absolute Show absolute numbers of each perturbative order From cfcf02711cebb49028327012a4d31f9a7dfabecc Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 17 Jun 2024 09:02:41 +0200 Subject: [PATCH 152/179] Migrate `analyze` subcommand to use `ConvFun` --- pineappl_cli/src/analyze.rs | 16 ++++++++-------- pineappl_cli/src/helpers.rs | 13 ++++++++++++- pineappl_cli/tests/analyze.rs | 4 ++-- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/pineappl_cli/src/analyze.rs b/pineappl_cli/src/analyze.rs index 134556b11..62a6c99f1 100644 --- a/pineappl_cli/src/analyze.rs +++ b/pineappl_cli/src/analyze.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvoluteMode, VecConvFun}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::builder::TypedValueParser; @@ -6,7 +6,6 @@ use clap::{value_parser, Parser, ValueHint}; use prettytable::{cell, Row}; use std::path::PathBuf; use std::process::ExitCode; -use std::slice; /// Perform various analyses with grids. #[derive(Parser)] @@ -40,9 +39,10 @@ pub struct CkfOpts { /// Path to the input grid. #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, - /// LHAPDF id or name of the PDF set. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). + #[arg(value_parser = helpers::parse_conv_funs)] + // TODO: it would be better to use `Vec`, but this consumes all following arguments + conv_funs: VecConvFun, /// Order defining the K factors. #[arg(value_parser = helpers::parse_order)] order: (u32, u32), @@ -66,7 +66,7 @@ pub struct CkfOpts { impl Subcommand for CkfOpts { fn run(&self, cfg: &GlobalConfiguration) -> Result { let grid = helpers::read_grid(&self.input)?; - let mut pdf = helpers::create_pdf(&self.pdfset)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs.0)?; let orders_den = if self.orders_den.is_empty() { grid.orders() @@ -88,7 +88,7 @@ impl Subcommand for CkfOpts { lumi_mask[lumi] = true; helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &[self.order], &[], &lumi_mask, @@ -104,7 +104,7 @@ impl Subcommand for CkfOpts { lumi_mask[lumi] = true; helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &orders_den, &[], &lumi_mask, diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index d08180e1b..b8ac6523e 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -1,5 +1,5 @@ use super::GlobalConfiguration; -use anyhow::{anyhow, ensure, Context, Result}; +use anyhow::{ensure, Context, Result}; use lhapdf::{Pdf, PdfSet}; use ndarray::Array3; use pineappl::convolutions::LumiCache; @@ -37,6 +37,17 @@ impl FromStr for ConvFun { } } +#[derive(Clone)] +pub struct VecConvFun(pub Vec); + +pub fn parse_conv_funs(arg: &str) -> std::result::Result { + Ok(VecConvFun( + arg.split(',') + .map(|conv_fun| ConvFun::from_str(conv_fun).map_err(|err| format!("{err}"))) + .collect::>()?, + )) +} + pub fn create_conv_funs(funs: &[ConvFun]) -> Result> { Ok(funs .iter() diff --git a/pineappl_cli/tests/analyze.rs b/pineappl_cli/tests/analyze.rs index a84718372..8c9454ff2 100644 --- a/pineappl_cli/tests/analyze.rs +++ b/pineappl_cli/tests/analyze.rs @@ -13,11 +13,11 @@ Options: const CKF_HELP_STR: &str = "Compare K-factors with channel K factors (ckf) -Usage: pineappl analyze ckf [OPTIONS] [ORDERS_DEN]... +Usage: pineappl analyze ckf [OPTIONS] [ORDERS_DEN]... Arguments: Path to the input grid - LHAPDF id or name of the PDF set + LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) Order defining the K factors [ORDERS_DEN]... Normalizing orders of the K factors From a507af80c939eaf04e213c3fb80d43efcd290dad Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 17 Jun 2024 09:50:25 +0200 Subject: [PATCH 153/179] Replace `ConvFun` with `ConvFuns` --- pineappl_cli/src/analyze.rs | 8 +++--- pineappl_cli/src/channels.rs | 5 ++-- pineappl_cli/src/diff.rs | 5 ++-- pineappl_cli/src/evolve.rs | 5 ++-- pineappl_cli/src/export.rs | 11 ++++---- pineappl_cli/src/export/applgrid.rs | 8 +++--- pineappl_cli/src/helpers.rs | 39 +++++++++++------------------ pineappl_cli/src/import.rs | 19 +++++++------- pineappl_cli/src/import/applgrid.rs | 8 +++--- pineappl_cli/src/orders.rs | 5 ++-- pineappl_cli/tests/channels.rs | 10 ++++---- pineappl_cli/tests/diff.rs | 8 +++--- pineappl_cli/tests/evolve.rs | 10 ++++---- pineappl_cli/tests/export.rs | 8 +++--- pineappl_cli/tests/import.rs | 16 ++++++------ pineappl_cli/tests/orders.rs | 6 ++--- 16 files changed, 76 insertions(+), 95 deletions(-) diff --git a/pineappl_cli/src/analyze.rs b/pineappl_cli/src/analyze.rs index 62a6c99f1..3c644c487 100644 --- a/pineappl_cli/src/analyze.rs +++ b/pineappl_cli/src/analyze.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode, VecConvFun}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::builder::TypedValueParser; @@ -40,9 +40,7 @@ pub struct CkfOpts { #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). - #[arg(value_parser = helpers::parse_conv_funs)] - // TODO: it would be better to use `Vec`, but this consumes all following arguments - conv_funs: VecConvFun, + conv_funs: ConvFuns, /// Order defining the K factors. #[arg(value_parser = helpers::parse_order)] order: (u32, u32), @@ -66,7 +64,7 @@ pub struct CkfOpts { impl Subcommand for CkfOpts { fn run(&self, cfg: &GlobalConfiguration) -> Result { let grid = helpers::read_grid(&self.input)?; - let mut conv_funs = helpers::create_conv_funs(&self.conv_funs.0)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; let orders_den = if self.orders_den.is_empty() { grid.orders() diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index eb0598206..53173a1e6 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvFun, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::builder::TypedValueParser; @@ -15,8 +15,7 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). - #[arg(num_args = 1, required = true, value_delimiter = ',')] - conv_funs: Vec, + conv_funs: ConvFuns, /// Show absolute numbers of each contribution. #[arg(long, short)] absolute: bool, diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index f8ca45f3b..4e6e48585 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvFun, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::{bail, Result}; use clap::{Parser, ValueHint}; @@ -17,8 +17,7 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] input2: PathBuf, /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). - #[arg(num_args = 1, required = true, value_delimiter = ',')] - conv_funs: Vec, + conv_funs: ConvFuns, /// Ignore differences in the orders and sum them. #[arg(long)] ignore_orders: bool, diff --git a/pineappl_cli/src/evolve.rs b/pineappl_cli/src/evolve.rs index c79021f14..b2fff58de 100644 --- a/pineappl_cli/src/evolve.rs +++ b/pineappl_cli/src/evolve.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvFun, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::{Parser, ValueHint}; @@ -507,8 +507,7 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] output: PathBuf, /// LHAPDF ID(s) or name of the PDF(s)/FF(s). - #[arg(num_args = 1, required = true, value_delimiter = ',')] - conv_funs: Vec, + conv_funs: ConvFuns, /// Relative threshold between the table and the converted grid when comparison fails. #[arg(default_value = "1e-3", long)] accuracy: f64, diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index bc7d7c9f8..fd90b900c 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvFun, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; @@ -15,7 +15,7 @@ mod applgrid; fn convert_into_applgrid( output: &Path, grid: &Grid, - conv_funs: &[ConvFun], + conv_funs: &ConvFuns, member: usize, _: usize, discard_non_matching_scales: bool, @@ -33,7 +33,7 @@ fn convert_into_applgrid( fn convert_into_applgrid( _: &Path, _: &Grid, - _: &[ConvFun], + _: &ConvFuns, _: usize, _: usize, _: bool, @@ -46,7 +46,7 @@ fn convert_into_applgrid( fn convert_into_grid( output: &Path, grid: &Grid, - conv_funs: &[ConvFun], + conv_funs: &ConvFuns, member: usize, scales: usize, discard_non_matching_scales: bool, @@ -77,8 +77,7 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] output: PathBuf, /// LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with. - #[arg(num_args = 1, required = true, value_delimiter = ',')] - conv_funs: Vec, + conv_funs: ConvFuns, /// Relative threshold between the table and the converted grid when comparison fails. #[arg(default_value = "1e-10", long)] accuracy: f64, diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index de7bb763b..a900060fe 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -1,4 +1,4 @@ -use super::helpers::ConvFun; +use super::helpers::ConvFuns; use anyhow::{anyhow, bail, Result}; use cxx::{let_cxx_string, UniquePtr}; use float_cmp::approx_eq; @@ -303,15 +303,15 @@ pub fn convert_into_applgrid( } // TODO: deduplicate this function from import -pub fn convolve_applgrid(grid: Pin<&mut grid>, conv_funs: &[ConvFun], member: usize) -> Vec { +pub fn convolve_applgrid(grid: Pin<&mut grid>, conv_funs: &ConvFuns, member: usize) -> Vec { let nloops = grid.nloops(); // TODO: add support for convolving an APPLgrid with two functions - assert_eq!(conv_funs.len(), 1); + assert_eq!(conv_funs.lhapdf_names.len(), 1); ffi::grid_convolve( grid, - &conv_funs[0].lhapdf_name, + &conv_funs.lhapdf_names[0], member.try_into().unwrap(), nloops, 1.0, diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index b8ac6523e..14a3d21e3 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -15,47 +15,36 @@ use std::process::ExitCode; use std::str::FromStr; #[derive(Clone)] -pub struct ConvFun { - pub lhapdf_name: String, - label: String, +pub struct ConvFuns { + pub lhapdf_names: Vec, + pub label: String, } -impl FromStr for ConvFun { +impl FromStr for ConvFuns { type Err = Infallible; fn from_str(arg: &str) -> std::result::Result { Ok(arg.split_once('=').map_or_else( - || ConvFun { - lhapdf_name: arg.to_owned(), + || Self { + lhapdf_names: arg.split(',').map(ToOwned::to_owned).collect(), label: arg.to_owned(), }, - |(lhapdf_name, label)| ConvFun { - lhapdf_name: lhapdf_name.to_owned(), + |(lhapdf_names, label)| Self { + lhapdf_names: lhapdf_names.split(',').map(ToOwned::to_owned).collect(), label: label.to_owned(), }, )) } } -#[derive(Clone)] -pub struct VecConvFun(pub Vec); - -pub fn parse_conv_funs(arg: &str) -> std::result::Result { - Ok(VecConvFun( - arg.split(',') - .map(|conv_fun| ConvFun::from_str(conv_fun).map_err(|err| format!("{err}"))) - .collect::>()?, - )) -} - -pub fn create_conv_funs(funs: &[ConvFun]) -> Result> { +pub fn create_conv_funs(funs: &ConvFuns) -> Result> { Ok(funs + .lhapdf_names .iter() - .map(|fun| { - fun.lhapdf_name.parse().map_or_else( - |_| Pdf::with_setname_and_nmem(&fun.lhapdf_name), - Pdf::with_lhaid, - ) + .map(|lhapdf_name| { + lhapdf_name + .parse() + .map_or_else(|_| Pdf::with_setname_and_nmem(lhapdf_name), Pdf::with_lhaid) }) .collect::>()?) } diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index fe5388877..cfee13b1d 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvFun, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; @@ -18,7 +18,7 @@ mod fktable; fn convert_applgrid( input: &Path, alpha: u32, - conv_funs: &[ConvFun], + conv_funs: &ConvFuns, member: usize, dis_pid: i32, _: usize, @@ -38,7 +38,7 @@ fn convert_applgrid( fn convert_applgrid( _: &Path, _: u32, - _: &[ConvFun], + _: &ConvFuns, _: usize, _: i32, _: usize, @@ -52,7 +52,7 @@ fn convert_applgrid( fn convert_fastnlo( input: &Path, alpha: u32, - conv_funs: &[ConvFun], + conv_funs: &ConvFuns, member: usize, dis_pid: i32, scales: usize, @@ -63,11 +63,11 @@ fn convert_fastnlo( use std::ptr; // TODO: convert this into an error? - assert_eq!(conv_funs.len(), 1); + assert_eq!(conv_funs.lhapdf_names.len(), 1); let mut file = ffi::make_fastnlo_lhapdf_with_name_file_set( input.to_str().unwrap(), - &conv_funs[0].lhapdf_name, + &conv_funs.lhapdf_names[0], member.try_into().unwrap(), ); @@ -121,7 +121,7 @@ fn convert_fastnlo( fn convert_fastnlo( _: &Path, _: u32, - _: &str, + _: &ConvFuns, _: usize, _: i32, _: usize, @@ -150,7 +150,7 @@ fn convert_fktable(_: &Path, _: i32) -> Result<(&'static str, Grid, Vec, us fn convert_grid( input: &Path, alpha: u32, - conv_funs: &[ConvFun], + conv_funs: &ConvFuns, member: usize, dis_pid: i32, scales: usize, @@ -214,8 +214,7 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] output: PathBuf, /// LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with. - #[arg(num_args = 1, required = true, value_delimiter = ',')] - conv_funs: Vec, + conv_funs: ConvFuns, /// LO coupling power in alpha. #[arg(default_value_t = 0, long)] alpha: u32, diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 025af06e4..e567c24bf 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,4 +1,4 @@ -use super::helpers::ConvFun; +use super::helpers::ConvFuns; use anyhow::Result; use pineappl::boc::{Channel, Order}; use pineappl::convolutions::Convolution; @@ -246,15 +246,15 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul Ok(grid0) } -pub fn convolve_applgrid(grid: Pin<&mut grid>, conv_funs: &[ConvFun], member: usize) -> Vec { +pub fn convolve_applgrid(grid: Pin<&mut grid>, conv_funs: &ConvFuns, member: usize) -> Vec { let nloops = grid.nloops(); // TODO: add support for convolving an APPLgrid with two functions - assert_eq!(conv_funs.len(), 1); + assert_eq!(conv_funs.lhapdf_names.len(), 1); ffi::grid_convolve( grid, - &conv_funs[0].lhapdf_name, + &conv_funs.lhapdf_names[0], member.try_into().unwrap(), nloops, 1.0, diff --git a/pineappl_cli/src/orders.rs b/pineappl_cli/src/orders.rs index d5346e730..8a229251d 100644 --- a/pineappl_cli/src/orders.rs +++ b/pineappl_cli/src/orders.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvFun, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::{Parser, ValueHint}; @@ -13,8 +13,7 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). - #[arg(num_args = 1, required = true, value_delimiter = ',')] - conv_funs: Vec, + conv_funs: ConvFuns, /// Show absolute numbers of each perturbative order. #[arg(long, short)] absolute: bool, diff --git a/pineappl_cli/tests/channels.rs b/pineappl_cli/tests/channels.rs index 9ef0bfcca..84e9a25f3 100644 --- a/pineappl_cli/tests/channels.rs +++ b/pineappl_cli/tests/channels.rs @@ -2,11 +2,11 @@ use assert_cmd::Command; const HELP_STR: &str = "Shows the contribution for each partonic channel -Usage: pineappl channels [OPTIONS] ... +Usage: pineappl channels [OPTIONS] Arguments: - Path to the input grid - ... LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) + Path to the input grid + LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) Options: -a, --absolute Show absolute numbers of each contribution @@ -133,9 +133,9 @@ const DONT_SORT_STR: &str = "b etal c size c size c size c size c size "; const MISSING_CONV_FUN_STR: &str = "error: the following required arguments were not provided: - ... + -Usage: pineappl channels ... +Usage: pineappl channels For more information, try '--help'. "; diff --git a/pineappl_cli/tests/diff.rs b/pineappl_cli/tests/diff.rs index b8213ba26..61e139d0b 100644 --- a/pineappl_cli/tests/diff.rs +++ b/pineappl_cli/tests/diff.rs @@ -3,12 +3,12 @@ use assert_fs::NamedTempFile; const HELP_STR: &str = "Compares the numerical content of two grids with each other -Usage: pineappl diff [OPTIONS] ... +Usage: pineappl diff [OPTIONS] Arguments: - Path to the first grid - Path to the second grid - ... LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) + Path to the first grid + Path to the second grid + LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) Options: --ignore-orders Ignore differences in the orders and sum them diff --git a/pineappl_cli/tests/evolve.rs b/pineappl_cli/tests/evolve.rs index 03920c708..6b6193c81 100644 --- a/pineappl_cli/tests/evolve.rs +++ b/pineappl_cli/tests/evolve.rs @@ -5,13 +5,13 @@ use assert_fs::NamedTempFile; const HELP_STR: &str = "Evolve a grid with an evolution kernel operator to an FK table -Usage: pineappl evolve [OPTIONS] ... +Usage: pineappl evolve [OPTIONS] Arguments: - Path to the input grid - Path to the evolution kernel operator - Path to the converted grid - ... LHAPDF ID(s) or name of the PDF(s)/FF(s) + Path to the input grid + Path to the evolution kernel operator + Path to the converted grid + LHAPDF ID(s) or name of the PDF(s)/FF(s) Options: --accuracy Relative threshold between the table and the converted grid when comparison fails [default: 1e-3] diff --git a/pineappl_cli/tests/export.rs b/pineappl_cli/tests/export.rs index ec74a1e26..47fe293a7 100644 --- a/pineappl_cli/tests/export.rs +++ b/pineappl_cli/tests/export.rs @@ -5,12 +5,12 @@ use assert_fs::NamedTempFile; const HELP_STR: &str = "Converts PineAPPL grids to APPLgrid files -Usage: pineappl export [OPTIONS] ... +Usage: pineappl export [OPTIONS] Arguments: - Path to the input grid - Path to the converted grid - ... LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with + Path to the input grid + Path to the converted grid + LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with Options: --accuracy Relative threshold between the table and the converted grid when comparison fails [default: 1e-10] diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index 4d420eb28..c66f66c2d 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -6,12 +6,12 @@ use assert_fs::NamedTempFile; #[cfg(feature = "fastnlo")] const HELP_STR: &str = "Converts APPLgrid/fastNLO/FastKernel files to PineAPPL grids -Usage: pineappl import [OPTIONS] ... +Usage: pineappl import [OPTIONS] Arguments: - Path to the input grid - Path to the converted grid - ... LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with + Path to the input grid + Path to the converted grid + LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with Options: --alpha LO coupling power in alpha [default: 0] @@ -29,12 +29,12 @@ Options: #[cfg(not(feature = "fastnlo"))] const HELP_STR: &str = "Converts APPLgrid/fastNLO/FastKernel files to PineAPPL grids -Usage: pineappl import [OPTIONS] ... +Usage: pineappl import [OPTIONS] Arguments: - Path to the input grid - Path to the converted grid - ... LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with + Path to the input grid + Path to the converted grid + LHAPDF ID(s) or name of the PDF(s)/FF(s) to check the converted grid with Options: --alpha LO coupling power in alpha [default: 0] diff --git a/pineappl_cli/tests/orders.rs b/pineappl_cli/tests/orders.rs index 80ab1f6fe..4c6c41bf8 100644 --- a/pineappl_cli/tests/orders.rs +++ b/pineappl_cli/tests/orders.rs @@ -2,11 +2,11 @@ use assert_cmd::Command; const HELP_STR: &str = "Shows the predictions for all bin for each order separately -Usage: pineappl orders [OPTIONS] ... +Usage: pineappl orders [OPTIONS] Arguments: - Path to the input grid - ... LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) + Path to the input grid + LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) Options: -a, --absolute Show absolute numbers of each perturbative order From 14366184659adeaaac6930abb127e24a9483499a Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 17 Jun 2024 09:51:28 +0200 Subject: [PATCH 154/179] Migrate subcommand `convolve` to `ConvFuns` --- pineappl_cli/src/convolve.rs | 21 ++++++++++----------- pineappl_cli/tests/convolve.rs | 6 +++--- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/pineappl_cli/src/convolve.rs b/pineappl_cli/src/convolve.rs index 91ee8bfe7..5377abc54 100644 --- a/pineappl_cli/src/convolve.rs +++ b/pineappl_cli/src/convolve.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::{Parser, ValueHint}; @@ -6,7 +6,6 @@ use prettytable::{cell, Row}; use std::ops::RangeInclusive; use std::path::PathBuf; use std::process::ExitCode; -use std::slice; /// Convolutes a PineAPPL grid with a PDF set. #[derive(Parser)] @@ -16,8 +15,8 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, /// LHAPDF id(s) or name of the PDF set(s). - #[arg(required = true, value_parser = helpers::parse_pdfset)] - pdfsets: Vec, + #[arg(required = true)] + conv_funs: Vec, /// Selects a subset of bins. #[arg( long, @@ -50,12 +49,12 @@ pub struct Opts { impl Subcommand for Opts { fn run(&self, cfg: &GlobalConfiguration) -> Result { let grid = helpers::read_grid(&self.input)?; - let mut pdf = helpers::create_pdf(&self.pdfsets[0])?; + let mut conv_funs_0 = helpers::create_conv_funs(&self.conv_funs[0])?; let bins: Vec<_> = self.bins.iter().cloned().flatten().collect(); let results = helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs_0, &self.orders, &bins, &[], @@ -78,13 +77,13 @@ impl Subcommand for Opts { ); let bin_count = limits.len(); - let other_results: Vec<_> = self.pdfsets[1..] + let other_results: Vec<_> = self.conv_funs[1..] .iter() - .flat_map(|pdfset| { - let mut pdf = helpers::create_pdf(pdfset).unwrap(); + .flat_map(|conv_funs| { + let mut conv_funs = helpers::create_conv_funs(conv_funs).unwrap(); helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &self.orders, &bins, &[], @@ -109,7 +108,7 @@ impl Subcommand for Opts { } title.add_cell(cell!(c->format!("{y_label}\n[{y_unit}]"))); - for other in self.pdfsets[1..].iter().map(|pdf| helpers::pdf_label(pdf)) { + for other in self.conv_funs[1..].iter().map(|conv_funs| &conv_funs.label) { let mut cell = cell!(c->format!("{other}\n[{y_unit}] [%]")); cell.set_hspan(2); title.add_cell(cell); diff --git a/pineappl_cli/tests/convolve.rs b/pineappl_cli/tests/convolve.rs index 9eb93292b..36902e635 100644 --- a/pineappl_cli/tests/convolve.rs +++ b/pineappl_cli/tests/convolve.rs @@ -2,11 +2,11 @@ use assert_cmd::Command; const HELP_STR: &str = "Convolutes a PineAPPL grid with a PDF set -Usage: pineappl convolve [OPTIONS] ... +Usage: pineappl convolve [OPTIONS] ... Arguments: - Path of the input grid - ... LHAPDF id(s) or name of the PDF set(s) + Path of the input grid + ... LHAPDF id(s) or name of the PDF set(s) Options: -b, --bins Selects a subset of bins From 699851ce260b7a5cd354e447b4441a134b84f1e0 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 17 Jun 2024 16:07:14 +0200 Subject: [PATCH 155/179] Remove switch `--pdf-with-scale-cov` from `uncert` subcommand --- CHANGELOG.md | 1 + pineappl_cli/src/uncert.rs | 71 +++++++++--------------------------- pineappl_cli/tests/uncert.rs | 53 ++++++--------------------- 3 files changed, 30 insertions(+), 95 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c24e4a4bc..1b09c0dac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - removed `TryFromGridError::MetadataMissing` - removed `Grid::subgrid` and `Grid::set_subgrid` methods; these functions have been replaced with `Grid::subgrids` and `Grid::subgrids_mut` +- removed the switch `--pdf-with-scale-cov` from `pineappl uncert` ## [0.7.4] - 23/05/2024 diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index 09bec4609..81d7e25a4 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -17,16 +17,6 @@ struct Group { /// Calculate the PDF uncertainties. #[arg(long)] pdf: bool, - /// Calculate the combined PDF and scale uncertainty using the covariance method. - #[arg( - default_missing_value = "7", - num_args = 0..=1, - long, - require_equals = true, - value_name = "SCALES", - value_parser = PossibleValuesParser::new(["3", "7", "9"]).try_map(|s| s.parse::()) - )] - pdf_with_scale_cov: Option, /// Show absolute numbers of the scale-varied results. #[arg( default_missing_value = "7", @@ -111,7 +101,7 @@ impl Subcommand for Opts { }, ); - let pdf_results = if self.group.pdf || self.group.pdf_with_scale_cov.is_some() { + let pdf_results = if self.group.pdf { ThreadPoolBuilder::new() .num_threads(self.threads) .build_global() @@ -145,7 +135,6 @@ impl Subcommand for Opts { .iter() .chain(self.group.scale_abs.iter()) .chain(self.group.scale_cov.iter()) - .chain(self.group.pdf_with_scale_cov.iter()) .map(|&x| usize::from(x)) .max() .unwrap_or(1); @@ -179,11 +168,6 @@ impl Subcommand for Opts { title.add_cell(cell!(c->"PDF\n[%]").with_hspan(2)); } - if let Some(scales) = self.group.pdf_with_scale_cov { - title.add_cell(cell!(c->"PDF central")); - title.add_cell(cell!(c->format!("PDF w/ {}pt scale (cov)\n[%]", scales)).with_hspan(2)); - } - if let Some(scales) = self.group.scale_abs { for scale in &helpers::SCALES_VECTOR[0..scales.into()] { title.add_cell(cell!(c->format!("(r={},f={})\n[{}]", scale.0, scale.1, y_unit))); @@ -206,24 +190,23 @@ impl Subcommand for Opts { .zip(scale_results.chunks_exact(scales_max)) .enumerate() { - let (pdf_cen, pdf_neg, pdf_pos) = - if self.group.pdf || self.group.pdf_with_scale_cov.is_some() { - let values: Vec<_> = pdf_results - .iter() - .skip(bin) - .step_by(limits.len()) - .copied() - .collect(); - let uncertainty = set.uncertainty(&values, self.cl, false)?; + let (pdf_cen, pdf_neg, pdf_pos) = if self.group.pdf { + let values: Vec<_> = pdf_results + .iter() + .skip(bin) + .step_by(limits.len()) + .copied() + .collect(); + let uncertainty = set.uncertainty(&values, self.cl, false)?; - ( - uncertainty.central, - -100.0 * uncertainty.errminus / uncertainty.central, - 100.0 * uncertainty.errplus / uncertainty.central, - ) - } else { - (0.0, 0.0, 0.0) - }; + ( + uncertainty.central, + -100.0 * uncertainty.errminus / uncertainty.central, + 100.0 * uncertainty.errplus / uncertainty.central, + ) + } else { + (0.0, 0.0, 0.0) + }; let row = table.add_empty_row(); row.add_cell(cell!(r->format!("{bin}"))); @@ -240,26 +223,6 @@ impl Subcommand for Opts { row.add_cell(cell!(r->format!("{:.*}", self.digits_rel, pdf_pos))); } - if let Some(scales) = self.group.pdf_with_scale_cov { - let ns = if scales == 3 { 1.0 } else { 2.0 } / f64::from(scales - 1); - let unc = (ns - * scale_res - .iter() - .take(scales.into()) - .skip(1) - .map(|x| (x - scale_res[0]).powi(2)) - .sum::()) - .sqrt(); - let rel_unc = 100.0 * unc / scale_res[0]; - - let total_neg = -(pdf_neg * pdf_neg + rel_unc * rel_unc).sqrt(); - let total_pos = (pdf_pos * pdf_pos + rel_unc * rel_unc).sqrt(); - - row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, pdf_cen))); - row.add_cell(cell!(r->format!("{:.*}", self.digits_rel, total_neg))); - row.add_cell(cell!(r->format!("{:.*}", self.digits_rel, total_pos))); - } - if let Some(scales) = self.group.scale_abs { for result in scale_res.iter().take(scales.into()) { row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result))); diff --git a/pineappl_cli/tests/uncert.rs b/pineappl_cli/tests/uncert.rs index 43b465385..f0ab1b7d1 100644 --- a/pineappl_cli/tests/uncert.rs +++ b/pineappl_cli/tests/uncert.rs @@ -4,25 +4,24 @@ use std::thread; const HELP_STR: &str = "Calculates scale and PDF uncertainties -Usage: pineappl uncert [OPTIONS] <--pdf|--pdf-with-scale-cov[=]|--scale-abs[=]|--scale-cov[=]|--scale-env[=]> +Usage: pineappl uncert [OPTIONS] <--pdf|--scale-abs[=]|--scale-cov[=]|--scale-env[=]> Arguments: Path to the input grid LHAPDF id or name of the PDF set Options: - --pdf Calculate the PDF uncertainties - --pdf-with-scale-cov[=] Calculate the combined PDF and scale uncertainty using the covariance method [possible values: 3, 7, 9] - --scale-abs[=] Show absolute numbers of the scale-varied results [possible values: 3, 7, 9] - --scale-cov[=] Calculate scale uncertainties using the covariance method [possible values: 3, 7, 9] - --scale-env[=] Calculate the envelope of results where renormalization and factorization scales varied [possible values: 3, 7, 9] - --cl Confidence level in per cent, for PDF uncertainties [default: 68.26894921370858] - -i, --integrated Show integrated numbers (without bin widths) instead of differential ones - -o, --orders Select orders manually - --threads Number of threads to utilize [default: {}] - --digits-abs Set the number of fractional digits shown for absolute numbers [default: 7] - --digits-rel Set the number of fractional digits shown for relative numbers [default: 2] - -h, --help Print help + --pdf Calculate the PDF uncertainties + --scale-abs[=] Show absolute numbers of the scale-varied results [possible values: 3, 7, 9] + --scale-cov[=] Calculate scale uncertainties using the covariance method [possible values: 3, 7, 9] + --scale-env[=] Calculate the envelope of results where renormalization and factorization scales varied [possible values: 3, 7, 9] + --cl Confidence level in per cent, for PDF uncertainties [default: 68.26894921370858] + -i, --integrated Show integrated numbers (without bin widths) instead of differential ones + -o, --orders Select orders manually + --threads Number of threads to utilize [default: {}] + --digits-abs Set the number of fractional digits shown for absolute numbers [default: 7] + --digits-rel Set the number of fractional digits shown for relative numbers [default: 2] + -h, --help Print help "; const DEFAULT_STR: &str = "b etal dsig/detal PDF central PDF @@ -143,19 +142,6 @@ const SCALE_ENV_9_STR: &str = "b etal dsig/detal 9pt-svar (env) 7 4 4.5 2.7517266e1 -5.36 5.22 "; -const PDF_WITH_SCALE_COV_STR: &str = "b etal dsig/detal PDF central PDF w/ 7pt scale (cov) - [] [pb] [%] --+----+----+-----------+-----------+-----------+----------- -0 2 2.25 7.5459110e2 7.5461655e2 -3.48 3.48 -1 2.25 2.5 6.9028342e2 6.9027941e2 -3.50 3.50 -2 2.5 2.75 6.0025198e2 6.0022595e2 -3.55 3.55 -3 2.75 3 4.8552235e2 4.8548211e2 -3.56 3.56 -4 3 3.25 3.6195456e2 3.6191001e2 -3.58 3.58 -5 3.25 3.5 2.4586691e2 2.4582640e2 -3.60 3.60 -6 3.5 4 1.1586851e2 1.1584074e2 -3.63 3.63 -7 4 4.5 2.7517266e1 2.7504644e1 -4.26 4.26 -"; - #[test] fn help() { Command::cargo_bin("pineappl") @@ -314,18 +300,3 @@ fn scale_env_9() { .success() .stdout(SCALE_ENV_9_STR); } - -#[test] -fn pdf_with_scale_cov() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "uncert", - "--pdf-with-scale-cov", - "../test-data/LHCB_WP_7TEV.pineappl.lz4", - "NNPDF31_nlo_as_0118_luxqed", - ]) - .assert() - .success() - .stdout(PDF_WITH_SCALE_COV_STR); -} From 5f8e576bff7ef0ea31251ea034de8d76bf430c2a Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 18 Jun 2024 22:45:01 +0200 Subject: [PATCH 156/179] Migrate `uncert` to use `ConvFun` --- CHANGELOG.md | 4 + pineappl_cli/src/uncert.rs | 137 ++++++++++++++++++----------------- pineappl_cli/tests/uncert.rs | 119 +++++++++++++++--------------- 3 files changed, 136 insertions(+), 124 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b09c0dac..38c80703c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - changed member `lumi_id_types` of `OperatorInfo` and `OperatorSliceInfo` to `pid_basis`, which is now of type `PidBasis` - renamed module `pineappl::lumi` to `pineappl::convolutions` +- renamed switch `--pdf` to `--conv-fun` in the subcommand `uncert`. This + switch now optionally accepts a list of indices, which determines the + corresponding convolution function (PDF/FF), for which the uncertainty should + calculated ### Removed diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index 81d7e25a4..539d75d3f 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -1,6 +1,6 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; -use anyhow::Result; +use anyhow::{Error, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Args, Parser, ValueHint}; use prettytable::{cell, Row}; @@ -8,15 +8,21 @@ use rayon::{prelude::*, ThreadPoolBuilder}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::process::ExitCode; -use std::slice; use std::thread; #[derive(Args)] #[group(multiple = true, required = true)] struct Group { - /// Calculate the PDF uncertainties. - #[arg(long)] - pdf: bool, + /// Calculate convolution function uncertainties. + #[arg( + default_missing_value = "0", + num_args = 0..=1, + long, + require_equals = true, + value_delimiter = ',', + value_name = "IDX" + )] + conv_fun: Vec, /// Show absolute numbers of the scale-varied results. #[arg( default_missing_value = "7", @@ -49,18 +55,17 @@ struct Group { scale_env: Option, } -/// Calculates scale and PDF uncertainties. +/// Calculates scale and convolution function uncertainties. #[derive(Parser)] pub struct Opts { /// Path to the input grid. #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, - /// LHAPDF id or name of the PDF set. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset: String, + /// LHAPDF ID(s) or name(s) of the PDF(s)/FF(s). + conv_funs: ConvFuns, #[command(flatten)] group: Group, - /// Confidence level in per cent, for PDF uncertainties. + /// Confidence level in per cent, for convolution function uncertainties. #[arg(default_value_t = lhapdf::CL_1_SIGMA, long)] cl: f64, /// Show integrated numbers (without bin widths) instead of differential ones. @@ -89,7 +94,7 @@ pub struct Opts { impl Subcommand for Opts { fn run(&self, cfg: &GlobalConfiguration) -> Result { let grid = helpers::read_grid(&self.input)?; - let (set, _) = helpers::create_pdfset(&self.pdfset)?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; let limits = helpers::convolve_limits( &grid, @@ -101,34 +106,53 @@ impl Subcommand for Opts { }, ); - let pdf_results = if self.group.pdf { - ThreadPoolBuilder::new() - .num_threads(self.threads) - .build_global() - .unwrap(); + ThreadPoolBuilder::new() + .num_threads(self.threads) + .build_global() + .unwrap(); + + let conv_fun_results: Vec> = self + .group + .conv_fun + .iter() + .map(|&index| { + let set = conv_funs[index].set(); + let results: Vec<_> = set + .mk_pdfs()? + .into_par_iter() + .map(|fun| { + // TODO: do not create objects that are getting overwritten in any case + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; + conv_funs[index] = fun; - set.mk_pdfs()? - .into_par_iter() - .flat_map(|mut pdf| { - helpers::convolve( - &grid, - slice::from_mut(&mut pdf), - &self.orders, - &[], - &[], - 1, - if self.integrated { - ConvoluteMode::Integrated - } else { - ConvoluteMode::Normal - }, - cfg, - ) - }) - .collect() - } else { - vec![] - }; + Ok::<_, Error>(helpers::convolve( + &grid, + &mut conv_funs, + &self.orders, + &[], + &[], + 1, + if self.integrated { + ConvoluteMode::Integrated + } else { + ConvoluteMode::Normal + }, + cfg, + )) + }) + .collect::>()?; + + // transpose results + let results: Vec> = (0..results[0].len()) + .map(|bin| (0..results.len()).map(|pdf| results[pdf][bin]).collect()) + .collect(); + + results + .into_iter() + .map(|values| Ok(set.uncertainty(&values, self.cl, false)?)) + .collect::>() + }) + .collect::>()?; let scales_max = self .group .scale_env @@ -140,7 +164,7 @@ impl Subcommand for Opts { .unwrap_or(1); let scale_results = helpers::convolve( &grid, - slice::from_mut(&mut helpers::create_pdf(&self.pdfset)?), + &mut conv_funs, &self.orders, &[], &[], @@ -163,9 +187,10 @@ impl Subcommand for Opts { } title.add_cell(cell!(c->format!("{y_label}\n[{y_unit}]"))); - if self.group.pdf { - title.add_cell(cell!(c->"PDF central")); - title.add_cell(cell!(c->"PDF\n[%]").with_hspan(2)); + for &index in &self.group.conv_fun { + title.add_cell( + cell!(c->format!("{}", self.conv_funs.lhapdf_names[index])).with_hspan(3), + ); } if let Some(scales) = self.group.scale_abs { @@ -190,24 +215,6 @@ impl Subcommand for Opts { .zip(scale_results.chunks_exact(scales_max)) .enumerate() { - let (pdf_cen, pdf_neg, pdf_pos) = if self.group.pdf { - let values: Vec<_> = pdf_results - .iter() - .skip(bin) - .step_by(limits.len()) - .copied() - .collect(); - let uncertainty = set.uncertainty(&values, self.cl, false)?; - - ( - uncertainty.central, - -100.0 * uncertainty.errminus / uncertainty.central, - 100.0 * uncertainty.errplus / uncertainty.central, - ) - } else { - (0.0, 0.0, 0.0) - }; - let row = table.add_empty_row(); row.add_cell(cell!(r->format!("{bin}"))); for (left, right) in left_right_limits { @@ -217,10 +224,10 @@ impl Subcommand for Opts { row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, scale_res[0]))); - if self.group.pdf { - row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, pdf_cen))); - row.add_cell(cell!(r->format!("{:.*}", self.digits_rel, pdf_neg))); - row.add_cell(cell!(r->format!("{:.*}", self.digits_rel, pdf_pos))); + for uncertainty in conv_fun_results.iter().map(|results| &results[bin]) { + row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, uncertainty.central))); + row.add_cell(cell!(r->format!("{:.*}", self.digits_rel, -100.0 * uncertainty.errminus / uncertainty.central))); + row.add_cell(cell!(r->format!("{:.*}", self.digits_rel, 100.0 * uncertainty.errplus / uncertainty.central))); } if let Some(scales) = self.group.scale_abs { diff --git a/pineappl_cli/tests/uncert.rs b/pineappl_cli/tests/uncert.rs index f0ab1b7d1..0492ea5b9 100644 --- a/pineappl_cli/tests/uncert.rs +++ b/pineappl_cli/tests/uncert.rs @@ -2,20 +2,20 @@ use assert_cmd::Command; use std::num::NonZeroUsize; use std::thread; -const HELP_STR: &str = "Calculates scale and PDF uncertainties +const HELP_STR: &str = "Calculates scale and convolution function uncertainties -Usage: pineappl uncert [OPTIONS] <--pdf|--scale-abs[=]|--scale-cov[=]|--scale-env[=]> +Usage: pineappl uncert [OPTIONS] <--conv-fun[=]|--scale-abs[=]|--scale-cov[=]|--scale-env[=]> Arguments: - Path to the input grid - LHAPDF id or name of the PDF set + Path to the input grid + LHAPDF ID(s) or name(s) of the PDF(s)/FF(s) Options: - --pdf Calculate the PDF uncertainties + --conv-fun[=] Calculate convolution function uncertainties --scale-abs[=] Show absolute numbers of the scale-varied results [possible values: 3, 7, 9] --scale-cov[=] Calculate scale uncertainties using the covariance method [possible values: 3, 7, 9] --scale-env[=] Calculate the envelope of results where renormalization and factorization scales varied [possible values: 3, 7, 9] - --cl Confidence level in per cent, for PDF uncertainties [default: 68.26894921370858] + --cl Confidence level in per cent, for convolution function uncertainties [default: 68.26894921370858] -i, --integrated Show integrated numbers (without bin widths) instead of differential ones -o, --orders Select orders manually --threads Number of threads to utilize [default: {}] @@ -24,56 +24,57 @@ Options: -h, --help Print help "; -const DEFAULT_STR: &str = "b etal dsig/detal PDF central PDF - [] [pb] [%] --+----+----+-----------+-----------+-----+---- -0 2 2.25 7.5459110e2 7.5461655e2 -1.14 1.14 -1 2.25 2.5 6.9028342e2 6.9027941e2 -1.16 1.16 -2 2.5 2.75 6.0025198e2 6.0022595e2 -1.18 1.18 -3 2.75 3 4.8552235e2 4.8548211e2 -1.22 1.22 -4 3 3.25 3.6195456e2 3.6191001e2 -1.27 1.27 -5 3.25 3.5 2.4586691e2 2.4582640e2 -1.35 1.35 -6 3.5 4 1.1586851e2 1.1584074e2 -1.51 1.51 -7 4 4.5 2.7517266e1 2.7504644e1 -2.77 2.77 +const DEFAULT_STR: &str = + "b etal dsig/detal NNPDF31_nlo_as_0118_luxqed NNPDF40_nnlo_as_01180 + [] [pb] +-+----+----+-----------+-----------+---------+---------+-----------+-------+------- +0 2 2.25 7.7302788e2 7.7302788e2 -0.67 0.67 7.7302788e2 -0.62 0.62 +1 2.25 2.5 7.0634852e2 7.0634851e2 -0.72 0.72 7.0634851e2 -0.62 0.62 +2 2.5 2.75 6.1354750e2 6.1354750e2 -0.78 0.78 6.1354750e2 -0.64 0.64 +3 2.75 3 4.9584391e2 4.9584391e2 -0.86 0.86 4.9584391e2 -0.68 0.68 +4 3 3.25 3.6957893e2 3.6957893e2 -0.97 0.97 3.6957893e2 -0.76 0.76 +5 3.25 3.5 2.5143057e2 2.5143057e2 -1.14 1.14 2.5143057e2 -0.89 0.89 +6 3.5 4 1.1962468e2 1.1962468e2 -1.55 1.55 1.1962468e2 -1.34 1.34 +7 4 4.5 2.9665790e1 2.9665790e1 -2.56 2.56 2.9665789e1 -3.51 3.51 "; -const CL_90_STR: &str = "b etal dsig/detal PDF central PDF - [] [pb] [%] --+----+----+-----------+-----------+-----+---- -0 2 2.25 7.5459110e2 7.5461655e2 -1.87 1.87 -1 2.25 2.5 6.9028342e2 6.9027941e2 -1.90 1.90 -2 2.5 2.75 6.0025198e2 6.0022595e2 -1.95 1.95 -3 2.75 3 4.8552235e2 4.8548211e2 -2.00 2.00 -4 3 3.25 3.6195456e2 3.6191001e2 -2.08 2.08 -5 3.25 3.5 2.4586691e2 2.4582640e2 -2.22 2.22 -6 3.5 4 1.1586851e2 1.1584074e2 -2.48 2.48 -7 4 4.5 2.7517266e1 2.7504644e1 -4.55 4.55 +const CL_90_STR: &str = "b etal dsig/detal NNPDF31_nlo_as_0118_luxqed + [] [pb] +-+----+----+-----------+-----------+---------+--------- +0 2 2.25 7.5459110e2 7.5461655e2 -1.87 1.87 +1 2.25 2.5 6.9028342e2 6.9027941e2 -1.90 1.90 +2 2.5 2.75 6.0025198e2 6.0022595e2 -1.95 1.95 +3 2.75 3 4.8552235e2 4.8548211e2 -2.00 2.00 +4 3 3.25 3.6195456e2 3.6191001e2 -2.08 2.08 +5 3.25 3.5 2.4586691e2 2.4582640e2 -2.22 2.22 +6 3.5 4 1.1586851e2 1.1584074e2 -2.48 2.48 +7 4 4.5 2.7517266e1 2.7504644e1 -4.55 4.55 "; -const INTEGRATED_STR: &str = "b etal integ PDF central PDF - [] [] [%] --+----+----+-----------+-----------+-----+---- -0 2 2.25 1.8864777e2 1.8865414e2 -1.14 1.14 -1 2.25 2.5 1.7257086e2 1.7256985e2 -1.16 1.16 -2 2.5 2.75 1.5006300e2 1.5005649e2 -1.18 1.18 -3 2.75 3 1.2138059e2 1.2137053e2 -1.22 1.22 -4 3 3.25 9.0488640e1 9.0477502e1 -1.27 1.27 -5 3.25 3.5 6.1466727e1 6.1456599e1 -1.35 1.35 -6 3.5 4 5.7934254e1 5.7920368e1 -1.51 1.51 -7 4 4.5 1.3758633e1 1.3752322e1 -2.77 2.77 +const INTEGRATED_STR: &str = "b etal integ NNPDF31_nlo_as_0118_luxqed + [] [] +-+----+----+-----------+-----------+---------+--------- +0 2 2.25 1.8864777e2 1.8865414e2 -1.14 1.14 +1 2.25 2.5 1.7257086e2 1.7256985e2 -1.16 1.16 +2 2.5 2.75 1.5006300e2 1.5005649e2 -1.18 1.18 +3 2.75 3 1.2138059e2 1.2137053e2 -1.22 1.22 +4 3 3.25 9.0488640e1 9.0477502e1 -1.27 1.27 +5 3.25 3.5 6.1466727e1 6.1456599e1 -1.35 1.35 +6 3.5 4 5.7934254e1 5.7920368e1 -1.51 1.51 +7 4 4.5 1.3758633e1 1.3752322e1 -2.77 2.77 "; -const ORDERS_A2_AS1A2_STR: &str = "b etal dsig/detal PDF central PDF - [] [pb] [%] --+----+----+-----------+-----------+-----+---- -0 2 2.25 7.6246034e2 7.6248591e2 -1.14 1.14 -1 2.25 2.5 6.9684577e2 6.9684166e2 -1.16 1.16 -2 2.5 2.75 6.0548681e2 6.0546059e2 -1.18 1.18 -3 2.75 3 4.8928139e2 4.8924093e2 -1.22 1.22 -4 3 3.25 3.6454175e2 3.6449702e2 -1.27 1.27 -5 3.25 3.5 2.4754316e2 2.4750254e2 -1.35 1.35 -6 3.5 4 1.1667878e2 1.1665095e2 -1.50 1.50 -7 4 4.5 2.7737493e1 2.7724826e1 -2.77 2.77 +const ORDERS_A2_AS1A2_STR: &str = "b etal dsig/detal NNPDF31_nlo_as_0118_luxqed + [] [pb] +-+----+----+-----------+-----------+---------+--------- +0 2 2.25 7.6246034e2 7.6248591e2 -1.14 1.14 +1 2.25 2.5 6.9684577e2 6.9684166e2 -1.16 1.16 +2 2.5 2.75 6.0548681e2 6.0546059e2 -1.18 1.18 +3 2.75 3 4.8928139e2 4.8924093e2 -1.22 1.22 +4 3 3.25 3.6454175e2 3.6449702e2 -1.27 1.27 +5 3.25 3.5 2.4754316e2 2.4750254e2 -1.35 1.35 +6 3.5 4 1.1667878e2 1.1665095e2 -1.50 1.50 +7 4 4.5 2.7737493e1 2.7724826e1 -2.77 2.77 "; const SCALE_ABS_STR: &str = @@ -160,15 +161,15 @@ fn help() { } #[test] -fn pdf_default() { +fn conv_fun_default() { Command::cargo_bin("pineappl") .unwrap() .args([ "uncert", - "--pdf", + "--conv-fun=0,1", "--threads=1", "../test-data/LHCB_WP_7TEV.pineappl.lz4", - "NNPDF31_nlo_as_0118_luxqed", + "NNPDF31_nlo_as_0118_luxqed,NNPDF40_nnlo_as_01180=NNPDF3.1+NNPDF4.0", ]) .assert() .success() @@ -176,12 +177,12 @@ fn pdf_default() { } #[test] -fn pdf_cl_90() { +fn conv_fun_cl_90() { Command::cargo_bin("pineappl") .unwrap() .args([ "uncert", - "--pdf", + "--conv-fun", "--cl=90", "--threads=1", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -193,12 +194,12 @@ fn pdf_cl_90() { } #[test] -fn pdf_integrated() { +fn conv_fun_integrated() { Command::cargo_bin("pineappl") .unwrap() .args([ "uncert", - "--pdf", + "--conv-fun=0", "--integrated", "--threads=1", "../test-data/LHCB_WP_7TEV.pineappl.lz4", @@ -210,12 +211,12 @@ fn pdf_integrated() { } #[test] -fn pdf_orders_a2_as1a2() { +fn conv_fun_orders_a2_as1a2() { Command::cargo_bin("pineappl") .unwrap() .args([ "uncert", - "--pdf", + "--conv-fun=0", "--orders=a2,as1a2", "--threads=1", "../test-data/LHCB_WP_7TEV.pineappl.lz4", From 0020bdec057e795b6af0cdd5b4e081597bde95c7 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 19 Jun 2024 07:27:16 +0200 Subject: [PATCH 157/179] Fix main test --- pineappl_cli/src/uncert.rs | 2 +- pineappl_cli/tests/main.rs | 2 +- pineappl_cli/tests/uncert.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index 539d75d3f..fcae668b6 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -55,7 +55,7 @@ struct Group { scale_env: Option, } -/// Calculates scale and convolution function uncertainties. +/// Calculate scale and convolution function uncertainties. #[derive(Parser)] pub struct Opts { /// Path to the input grid. diff --git a/pineappl_cli/tests/main.rs b/pineappl_cli/tests/main.rs index 19191f15c..0ba04dce5 100644 --- a/pineappl_cli/tests/main.rs +++ b/pineappl_cli/tests/main.rs @@ -19,7 +19,7 @@ Commands: pull Calculates the pull between two different PDF sets read Read out information of a grid subgrids Print information about the internal subgrid types - uncert Calculates scale and PDF uncertainties + uncert Calculate scale and convolution function uncertainties write Write a grid modified by various operations Options: diff --git a/pineappl_cli/tests/uncert.rs b/pineappl_cli/tests/uncert.rs index 0492ea5b9..774751e89 100644 --- a/pineappl_cli/tests/uncert.rs +++ b/pineappl_cli/tests/uncert.rs @@ -2,7 +2,7 @@ use assert_cmd::Command; use std::num::NonZeroUsize; use std::thread; -const HELP_STR: &str = "Calculates scale and convolution function uncertainties +const HELP_STR: &str = "Calculate scale and convolution function uncertainties Usage: pineappl uncert [OPTIONS] <--conv-fun[=]|--scale-abs[=]|--scale-cov[=]|--scale-env[=]> From ab9bd08e4e9aa5d633adbc18e5024d50be1ce6e3 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 21 Jun 2024 14:40:28 +0200 Subject: [PATCH 158/179] Add `members` variable to `ConvFuns` --- pineappl_cli/src/helpers.rs | 71 +++++++++++++++++++++++++++---------- 1 file changed, 53 insertions(+), 18 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 14a3d21e3..b8035f49d 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -1,12 +1,11 @@ use super::GlobalConfiguration; -use anyhow::{ensure, Context, Result}; +use anyhow::{ensure, Context, Error, Result}; use lhapdf::{Pdf, PdfSet}; use ndarray::Array3; use pineappl::convolutions::LumiCache; use pineappl::grid::Grid; use prettytable::format::{FormatBuilder, LinePosition, LineSeparator}; use prettytable::Table; -use std::convert::Infallible; use std::fs::{File, OpenOptions}; use std::iter; use std::ops::RangeInclusive; @@ -14,26 +13,34 @@ use std::path::Path; use std::process::ExitCode; use std::str::FromStr; -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq)] pub struct ConvFuns { pub lhapdf_names: Vec, + pub members: Vec>, pub label: String, } impl FromStr for ConvFuns { - type Err = Infallible; + type Err = Error; fn from_str(arg: &str) -> std::result::Result { - Ok(arg.split_once('=').map_or_else( - || Self { - lhapdf_names: arg.split(',').map(ToOwned::to_owned).collect(), - label: arg.to_owned(), - }, - |(lhapdf_names, label)| Self { - lhapdf_names: lhapdf_names.split(',').map(ToOwned::to_owned).collect(), - label: label.to_owned(), - }, - )) + let (names, label) = arg.split_once('=').unwrap_or((arg, arg)); + let (lhapdf_names, members) = names + .split(',') + .map(|fun| { + Ok::<_, Error>(if let Some((name, mem)) = fun.split_once('/') { + (name.to_owned(), Some(mem.parse()?)) + } else { + (fun.to_owned(), None) + }) + }) + .collect::>()?; + + Ok(Self { + lhapdf_names, + members, + label: label.to_owned(), + }) } } @@ -41,10 +48,16 @@ pub fn create_conv_funs(funs: &ConvFuns) -> Result> { Ok(funs .lhapdf_names .iter() - .map(|lhapdf_name| { - lhapdf_name - .parse() - .map_or_else(|_| Pdf::with_setname_and_nmem(lhapdf_name), Pdf::with_lhaid) + .zip(&funs.members) + .map(|(lhapdf_name, member)| { + lhapdf_name.parse().map_or_else( + |_| { + let member = member.unwrap_or(0); + // UNWRAP: we don't support sets with more members than `i32` + Pdf::with_setname_and_member(lhapdf_name, member.try_into().unwrap()) + }, + Pdf::with_lhaid, + ) }) .collect::>()?) } @@ -433,3 +446,25 @@ pub fn parse_order(order: &str) -> Result<(u32, u32)> { Ok((alphas, alpha)) } + +#[cfg(test)] +mod test { + use super::ConvFuns; + + #[test] + fn conv_fun_from_str() { + assert_eq!( + "A/2,B/1,C/0,D=X".parse::().unwrap(), + ConvFuns { + lhapdf_names: vec![ + "A".to_owned(), + "B".to_owned(), + "C".to_owned(), + "D".to_owned() + ], + members: vec![Some(2), Some(1), Some(0), None], + label: "X".to_owned() + } + ); + } +} From 4ff0319148c4fb0a9d780fd81f7d20d035949d28 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 21 Jun 2024 14:45:47 +0200 Subject: [PATCH 159/179] Add new helper function `create_conv_funs_for_set` and use it in `uncert` --- pineappl_cli/src/helpers.rs | 35 ++++++++++++++++++++++++++++++++++- pineappl_cli/src/uncert.rs | 13 ++++--------- 2 files changed, 38 insertions(+), 10 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index b8035f49d..34cab50e5 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -1,5 +1,5 @@ use super::GlobalConfiguration; -use anyhow::{ensure, Context, Error, Result}; +use anyhow::{anyhow, ensure, Context, Error, Result}; use lhapdf::{Pdf, PdfSet}; use ndarray::Array3; use pineappl::convolutions::LumiCache; @@ -62,6 +62,39 @@ pub fn create_conv_funs(funs: &ConvFuns) -> Result> { .collect::>()?) } +pub fn create_conv_funs_for_set( + funs: &ConvFuns, + index_of_set: usize, +) -> Result<(PdfSet, Vec>)> { + let setname = &funs.lhapdf_names[index_of_set]; + let set = setname.parse().map_or_else( + |_| Ok::<_, Error>(PdfSet::new(setname)?), + |lhaid| { + Ok(PdfSet::new( + &lhapdf::lookup_pdf(lhaid) + .map(|(set, _)| set) + .ok_or(anyhow!( + "no convolution function for LHAID = `{lhaid}` found" + ))?, + )?) + }, + )?; + + let conv_funs = set + .mk_pdfs()? + .into_iter() + .map(|conv_fun| { + // TODO: do not create objects that are getting overwritten in any case + let mut conv_funs = create_conv_funs(funs)?; + conv_funs[index_of_set] = conv_fun; + + Ok::<_, Error>(conv_funs) + }) + .collect::>()?; + + Ok((set, conv_funs)) +} + pub fn create_pdf(pdf: &str) -> Result { let pdf = pdf.split_once('=').map_or(pdf, |(name, _)| name); diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index fcae668b6..5334a14aa 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -116,18 +116,13 @@ impl Subcommand for Opts { .conv_fun .iter() .map(|&index| { - let set = conv_funs[index].set(); - let results: Vec<_> = set - .mk_pdfs()? + let (set, funs) = helpers::create_conv_funs_for_set(&self.conv_funs, index)?; + let results: Vec<_> = funs .into_par_iter() - .map(|fun| { - // TODO: do not create objects that are getting overwritten in any case - let mut conv_funs = helpers::create_conv_funs(&self.conv_funs)?; - conv_funs[index] = fun; - + .map(|mut funs| { Ok::<_, Error>(helpers::convolve( &grid, - &mut conv_funs, + &mut funs, &self.orders, &[], &[], From a29639bd476971710672ec83fd6239832f532c9e Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 21 Jun 2024 14:54:48 +0200 Subject: [PATCH 160/179] Migrate `pull` subcommand to use `ConvFuns` --- pineappl_cli/src/pull.rs | 96 ++++++++++++++++++++------------------ pineappl_cli/tests/pull.rs | 9 ++-- 2 files changed, 56 insertions(+), 49 deletions(-) diff --git a/pineappl_cli/src/pull.rs b/pineappl_cli/src/pull.rs index ee39bd6e0..99b2c64f8 100644 --- a/pineappl_cli/src/pull.rs +++ b/pineappl_cli/src/pull.rs @@ -1,6 +1,6 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; -use anyhow::Result; +use anyhow::{Error, Result}; use clap::{Parser, ValueHint}; use lhapdf::{Pdf, PdfSet}; use prettytable::{cell, Row}; @@ -8,7 +8,6 @@ use rayon::{prelude::*, ThreadPoolBuilder}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::process::ExitCode; -use std::slice; use std::thread; // TODO: do we need the CL parameter? @@ -19,12 +18,13 @@ pub struct Opts { /// Path to the input grid. #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, - /// LHAPDF id or name of the first PDF set. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset1: String, - /// LHAPDF id or name of the second PDF set. - #[arg(value_parser = helpers::parse_pdfset)] - pdfset2: String, + /// LHAPDF ID(s) or name(s) of the first PDF(s)/FF(s). + conv_funs1: ConvFuns, + /// LHAPDF ID(s) or name(s) of the second PDF(s)/FF(s). + conv_funs2: ConvFuns, + /// Index for the convolution function for which the pull should be calculated. + #[arg(default_value = "0", long, value_name = "IDX")] + pull_from: usize, /// Confidence level in per cent. #[arg(default_value_t = lhapdf::CL_1_SIGMA, long)] cl: f64, @@ -52,10 +52,10 @@ impl Subcommand for Opts { fn run(&self, cfg: &GlobalConfiguration) -> Result { let grid = helpers::read_grid(&self.input)?; - let (set1, member1) = helpers::create_pdfset(&self.pdfset1)?; - let (set2, member2) = helpers::create_pdfset(&self.pdfset2)?; - let mut pdfset1 = set1.mk_pdfs()?; - let mut pdfset2 = set2.mk_pdfs()?; + let (set1, mut conv_funs1) = + helpers::create_conv_funs_for_set(&self.conv_funs1, self.pull_from)?; + let (set2, mut conv_funs2) = + helpers::create_conv_funs_for_set(&self.conv_funs2, self.pull_from)?; ThreadPoolBuilder::new() .num_threads(self.threads) @@ -64,35 +64,41 @@ impl Subcommand for Opts { let limit = grid.channels().len().min(self.limit); let bin_limits = helpers::convolve_limits(&grid, &[], ConvoluteMode::Normal); - let results1: Vec<_> = pdfset1 + let results1: Vec<_> = conv_funs1 .par_iter_mut() - .flat_map(|pdf| { - helpers::convolve( + .map(|mut funs| { + Ok::<_, Error>(helpers::convolve( &grid, - slice::from_mut(pdf), + &mut funs, &self.orders, &[], &[], 1, ConvoluteMode::Normal, cfg, - ) + )) }) + .collect::>()?; + let results1: Vec> = (0..results1[0].len()) + .map(|bin| (0..results1.len()).map(|pdf| results1[pdf][bin]).collect()) .collect(); - let results2: Vec<_> = pdfset2 + let results2: Vec<_> = conv_funs2 .par_iter_mut() - .flat_map(|pdf| { - helpers::convolve( + .map(|mut funs| { + Ok::<_, Error>(helpers::convolve( &grid, - slice::from_mut(pdf), + &mut funs, &self.orders, &[], &[], 1, ConvoluteMode::Normal, cfg, - ) + )) }) + .collect::>()?; + let results2: Vec> = (0..results2[0].len()) + .map(|bin| (0..results2.len()).map(|pdf| results2[pdf][bin]).collect()) .collect(); let mut title = Row::empty(); @@ -113,24 +119,16 @@ impl Subcommand for Opts { for (bin, limits) in bin_limits.iter().enumerate() { let (total, unc1, unc2) = { - let values1: Vec<_> = results1 - .iter() - .skip(bin) - .step_by(bin_limits.len()) - .copied() - .collect(); - let values2: Vec<_> = results2 - .iter() - .skip(bin) - .step_by(bin_limits.len()) - .copied() - .collect(); - let uncertainty1 = set1.uncertainty(&values1, self.cl, false)?; - let uncertainty2 = set2.uncertainty(&values2, self.cl, false)?; + let values1 = &results1[bin]; + let values2 = &results2[bin]; + let uncertainty1 = set1.uncertainty(values1, self.cl, false)?; + let uncertainty2 = set2.uncertainty(values2, self.cl, false)?; // if requested use the given member instead of the central value - let diff = member2.map_or(uncertainty2.central, |member| values2[member]) - - member1.map_or(uncertainty1.central, |member| values1[member]); + let diff = self.conv_funs2.members[self.pull_from] + .map_or(uncertainty2.central, |member| values2[member]) + - self.conv_funs1.members[self.pull_from] + .map_or(uncertainty1.central, |member| values1[member]); // use the uncertainties in the direction in which they point to each other let (unc1, unc2) = if diff > 0.0 { @@ -142,7 +140,7 @@ impl Subcommand for Opts { }; let channel_results = - |member: Option, pdfset: &mut Vec, set: &PdfSet| -> Vec { + |member: Option, pdfset: &mut [Vec], set: &PdfSet| -> Vec { if let Some(member) = member { (0..grid.channels().len()) .map(|channel| { @@ -150,7 +148,7 @@ impl Subcommand for Opts { channel_mask[channel] = true; match helpers::convolve( &grid, - slice::from_mut(&mut pdfset[member]), + &mut pdfset[member], &self.orders, &[bin], &channel_mask, @@ -168,14 +166,14 @@ impl Subcommand for Opts { } else { let results: Vec<_> = pdfset .iter_mut() - .flat_map(|pdf| { + .flat_map(|mut pdf| { (0..grid.channels().len()) .map(|channel| { let mut channel_mask = vec![false; grid.channels().len()]; channel_mask[channel] = true; match helpers::convolve( &grid, - slice::from_mut(pdf), + &mut pdf, &self.orders, &[bin], &channel_mask, @@ -210,8 +208,16 @@ impl Subcommand for Opts { let mut pull_tuples = if self.limit == 0 { vec![] } else { - let channel_results1 = channel_results(member1, &mut pdfset1, &set1); - let channel_results2 = channel_results(member2, &mut pdfset2, &set2); + let channel_results1 = channel_results( + self.conv_funs1.members[self.pull_from], + &mut conv_funs1, + &set1, + ); + let channel_results2 = channel_results( + self.conv_funs2.members[self.pull_from], + &mut conv_funs2, + &set2, + ); let pull_tuples: Vec<_> = channel_results2 .iter() diff --git a/pineappl_cli/tests/pull.rs b/pineappl_cli/tests/pull.rs index 4cc2f686b..2cbb506f0 100644 --- a/pineappl_cli/tests/pull.rs +++ b/pineappl_cli/tests/pull.rs @@ -4,14 +4,15 @@ use std::thread; const HELP_STR: &str = "Calculates the pull between two different PDF sets -Usage: pineappl pull [OPTIONS] +Usage: pineappl pull [OPTIONS] Arguments: - Path to the input grid - LHAPDF id or name of the first PDF set - LHAPDF id or name of the second PDF set + Path to the input grid + LHAPDF ID(s) or name(s) of the first PDF(s)/FF(s) + LHAPDF ID(s) or name(s) of the second PDF(s)/FF(s) Options: + --pull-from Index for the convolution function for which the pull should be calculated [default: 0] --cl Confidence level in per cent [default: 68.26894921370858] -l, --limit The maximum number of channels displayed [default: 10] -o, --orders Select orders manually From da7b9481fce62ed7f38e10fbe9784303c18f0b1e Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 21 Jun 2024 15:17:37 +0200 Subject: [PATCH 161/179] Change help text of `--pull-from` in `pull` --- pineappl_cli/src/pull.rs | 2 +- pineappl_cli/tests/pull.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pineappl_cli/src/pull.rs b/pineappl_cli/src/pull.rs index 99b2c64f8..7669e8465 100644 --- a/pineappl_cli/src/pull.rs +++ b/pineappl_cli/src/pull.rs @@ -22,7 +22,7 @@ pub struct Opts { conv_funs1: ConvFuns, /// LHAPDF ID(s) or name(s) of the second PDF(s)/FF(s). conv_funs2: ConvFuns, - /// Index for the convolution function for which the pull should be calculated. + /// Index of the convolution functions for which the pull should be calculated. #[arg(default_value = "0", long, value_name = "IDX")] pull_from: usize, /// Confidence level in per cent. diff --git a/pineappl_cli/tests/pull.rs b/pineappl_cli/tests/pull.rs index 2cbb506f0..5668a8f03 100644 --- a/pineappl_cli/tests/pull.rs +++ b/pineappl_cli/tests/pull.rs @@ -12,7 +12,7 @@ Arguments: LHAPDF ID(s) or name(s) of the second PDF(s)/FF(s) Options: - --pull-from Index for the convolution function for which the pull should be calculated [default: 0] + --pull-from Index of the convolution functions for which the pull should be calculated [default: 0] --cl Confidence level in per cent [default: 68.26894921370858] -l, --limit The maximum number of channels displayed [default: 10] -o, --orders Select orders manually From 8e0b172052e3bc9972655b1030073699eb3ee3d2 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 21 Jun 2024 15:36:33 +0200 Subject: [PATCH 162/179] Try to fix compilation error in CI --- pineappl_cli/src/helpers.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 34cab50e5..4197f19db 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -34,7 +34,9 @@ impl FromStr for ConvFuns { (fun.to_owned(), None) }) }) - .collect::>()?; + .collect::, _>>()? + .into_iter() + .unzip(); Ok(Self { lhapdf_names, From 162e05a847c28835ca4bb25f7586d4557b5dadcb Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 24 Jun 2024 09:35:53 +0200 Subject: [PATCH 163/179] Update description of metadata --- docs/metadata.md | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/docs/metadata.md b/docs/metadata.md index e1eca4ccc..dbe7a5e94 100644 --- a/docs/metadata.md +++ b/docs/metadata.md @@ -1,12 +1,10 @@ -Metadata -======== +# Metadata -Metadata is stored in PineAPPL grids in the form of key–value pairs, which can +Metadata is stored in PineAPPL grids in the form of key--value pairs, which can be read indiviually with `pineappl read --get `, and `pineappl read ---show ` shows all key–value pairs. +--show ` shows all key--value pairs. -Known keys ----------- +## Known keys - `arxiv` - `description` @@ -15,33 +13,45 @@ Known keys - `results` - `results_pdf` -CLI-recognized keys -------------------- +## CLI-recognized keys The following keys are used in the CLI when printing numbers resulting from convolutions: - `x1_label`: label of the first dimension for every bin. If the bins have more - than one dimensions, keys with higher indices like `x2_label` and `x3_label` + than one dimension, keys with higher indices like `x2_label` and `x3_label` are used. - `x1_unit`: the physical unit for the first dimension for every bin. If the bins have more than one dimension, keys with higher indices like `x2_unit` and `x3_unit` are used. - `y_label`: label of the quantities stored in this grid. -- `y_unit`: physical unit of quantities stored in this grid. +- `y_unit`: physical unit(s) of the quantities stored in this grid. If + differential cross sections are stored, also the unit of the dividing + observable must be given, for instance a cross sections differential in an + invariant mass could have the units `fb/GeV`. -For each missing key a default value will be used +In all values avoid long strings and try to avoid using spaces, for instance +use `d2sig/dy/dMll` -Physical units --------------- +For each missing key a default value will be used. + +### Recommended values + + + +### Physical units Recognized units are: - `pb`, `fb`: picobarn, femtobarn - `GeV`: gigaelectronvolt -`pineappl plot ...`-recognized keys ------------------------------------ +Ratios are denoted using `/`, and powers are denoted using `^2` + +## `pineappl plot ...`-recognized keys + +These should contain the equivalent of `x1_label` with (La)TeX commands. If +symbols from math mode are used, they must be enclosed with `$`. -- `x1_label_tex`: +- `x1_label_tex` - `y_label_tex` From 3c529003a799ccb98f2541888a5f2fadf0dc5e33 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 24 Jun 2024 09:36:28 +0200 Subject: [PATCH 164/179] Remove unused types from fastNLO interface --- pineappl_fastnlo/src/lib.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/pineappl_fastnlo/src/lib.rs b/pineappl_fastnlo/src/lib.rs index 2a3546d4e..e72b0d152 100644 --- a/pineappl_fastnlo/src/lib.rs +++ b/pineappl_fastnlo/src/lib.rs @@ -50,12 +50,6 @@ pub mod ffi { #[namespace = "fastNLO"] type EScaleFunctionalForm; - - #[namespace = "fastNLO"] - type ESMCalculation; - - #[namespace = "fastNLO"] - type ESMOrder; } unsafe extern "C++" { @@ -140,18 +134,6 @@ pub mod ffi { fn GetPDFPDG(&self, _: i32) -> i32; } - unsafe extern "C++" { - include!("fastnlotk/fastNLOCoeffData.h"); - - type fastNLOCoeffData; - } - - unsafe extern "C++" { - include!("fastnlotk/fastNLOCoeffMult.h"); - - type fastNLOCoeffMult; - } - unsafe extern "C++" { include!("pineappl_fastnlo/src/fastnlo.hpp"); From 8efeeffac3f010fd85fa2a4d5b1e599bbbc871b8 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 24 Jun 2024 12:34:50 +0200 Subject: [PATCH 165/179] Do not link `LHAPDF` but depend on crate --- Cargo.lock | 1 + pineappl_applgrid/Cargo.toml | 1 + pineappl_applgrid/build.rs | 13 +-- pineappl_applgrid/src/applgrid.cpp | 96 ++++++------------- pineappl_applgrid/src/applgrid.hpp | 7 +- .../src/{calculation.hpp => helpers.hpp} | 3 + pineappl_applgrid/src/lib.rs | 73 +++++++++++++- pineappl_cli/src/export.rs | 26 ++--- pineappl_cli/src/export/applgrid.rs | 13 +-- pineappl_cli/src/import.rs | 16 ++-- pineappl_cli/src/import/applgrid.rs | 13 +-- 11 files changed, 130 insertions(+), 132 deletions(-) rename pineappl_applgrid/src/{calculation.hpp => helpers.hpp} (80%) diff --git a/Cargo.lock b/Cargo.lock index 47e8aa2aa..bfa916d22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1255,6 +1255,7 @@ dependencies = [ "cc", "cxx", "cxx-build", + "managed-lhapdf", "pkg-config", ] diff --git a/pineappl_applgrid/Cargo.toml b/pineappl_applgrid/Cargo.toml index 6eb5f4164..5b83e6d47 100644 --- a/pineappl_applgrid/Cargo.toml +++ b/pineappl_applgrid/Cargo.toml @@ -17,6 +17,7 @@ workspace = true [dependencies] cxx = "1.0.65" +lhapdf = { package = "managed-lhapdf", version = "0.3.0" } [build-dependencies] cc = "1.0.49" diff --git a/pineappl_applgrid/build.rs b/pineappl_applgrid/build.rs index 15af1894a..95964c509 100644 --- a/pineappl_applgrid/build.rs +++ b/pineappl_applgrid/build.rs @@ -135,22 +135,11 @@ fn main() { println!("cargo:rerun-if-env-changed=APPL_IGRID_DIR"); - let lhapdf = Config::new().atleast_version("6").probe("lhapdf").unwrap(); - - for lib_path in lhapdf.link_paths { - println!("cargo:rustc-link-search={}", lib_path.to_str().unwrap()); - } - - for lib in lhapdf.libs { - println!("cargo:rustc-link-lib={link_modifier}{lib}"); - } - conditional_std( cxx_build::bridge("src/lib.rs") .file("src/applgrid.cpp") .includes(&include_dirs) - .include(appl_igrid_dir) - .includes(lhapdf.include_paths), + .include(appl_igrid_dir), std, ) .compile("appl-bridge"); diff --git a/pineappl_applgrid/src/applgrid.cpp b/pineappl_applgrid/src/applgrid.cpp index 36ac9b2a9..de97efaf8 100644 --- a/pineappl_applgrid/src/applgrid.cpp +++ b/pineappl_applgrid/src/applgrid.cpp @@ -6,11 +6,12 @@ #include #include #include -#include #include #include -std::unique_ptr pdf; +void* user_data = nullptr; +rust::Fn* xfx = nullptr; +rust::Fn* alphas = nullptr; template rust::Vec std_vector_to_rust_vec(std::vector vector) @@ -21,66 +22,6 @@ rust::Vec std_vector_to_rust_vec(std::vector vector) return result; } -enum flavour_map_index : std::size_t -{ - anti_top, // -6: anti-top - anti_bottom, // -5: anti-bottom - anti_charm, // -4: anti-charm - anti_strange, // -3: anti-strange - anti_up, // -2: anti-up - anti_down, // -1: anti-down - gluon, // 21: gluon - down, // 1: down - up, // 2: up - strange, // 3: strange - charm, // 4: charm - bottom, // 5: bottom - top, // 6: top - photon, // 22: photon -}; - -std::array flavour_map = { - true, // -6: anti-top - true, // -5: anti-bottom - true, // -4: anti-charm - true, // -3: anti-strange - true, // -2: anti-up - true, // -1: anti-down - true, // 21: gluon - true, // 1: down - true, // 2: up - true, // 3: strange - true, // 4: charm - true, // 5: bottom - true, // 6: top - true, // 22: photon -}; - -constexpr int index_to_pdg_id(std::size_t index) -{ - return (index == gluon) ? 21 : ((index == photon) ? 22 : (static_cast (index) - 6)); -} - -void xfx(double const& x, double const& q, double* xfx) -{ - for (std::size_t i = 0; i != flavour_map.size(); ++i) - { - if (flavour_map.at(i)) - { - xfx[i] = pdf->xfxQ(index_to_pdg_id(i), std::fmin(x, 1.0), q); - } - else - { - xfx[i] = 0.0; - } - } -} - -double as(double const& q) -{ - return pdf->alphasQ(q); -} - std::unique_ptr make_grid(rust::Str filename) { std::string name(filename.begin(), filename.end()); @@ -187,18 +128,37 @@ rust::Vec grid_combine(appl::grid const& grid) return std_vector_to_rust_vec(grid.combine()); } -rust::Vec grid_convolve( +rust::Vec grid_convolve_with_one( appl::grid& grid, - rust::Str pdfset, - int member, + rust::Fn xfx, + rust::Fn alphas, + void* user_data, int nloops, double rscale, double fscale, double escale ) { - pdf.reset(LHAPDF::mkPDF(std::string(pdfset.begin(), pdfset.end()), member)); - - auto const results = grid.vconvolute(xfx, as, nloops, rscale, fscale, escale); + // TODO: using global variables isn't thread-safe + ::user_data = user_data; + ::xfx = &xfx; + ::alphas = &alphas; + + auto const results = grid.vconvolute( + [](double const& x, double const& q2, double* results) { + (*::xfx)(x, q2, results, ::user_data); + }, + [](double const& q2) { + return (*::alphas)(q2, ::user_data); + }, + nloops, + rscale, + fscale, + escale + ); + + ::user_data = nullptr; + ::xfx = nullptr; + ::alphas = nullptr; return std_vector_to_rust_vec(results); } diff --git a/pineappl_applgrid/src/applgrid.hpp b/pineappl_applgrid/src/applgrid.hpp index fd2e4d9b0..6a401cbf3 100644 --- a/pineappl_applgrid/src/applgrid.hpp +++ b/pineappl_applgrid/src/applgrid.hpp @@ -58,10 +58,11 @@ std::unique_ptr make_lumi_pdf(rust::Str s, rust::Slice comb rust::Vec grid_combine(appl::grid const& grid); -rust::Vec grid_convolve( +rust::Vec grid_convolve_with_one( appl::grid& grid, - rust::Str pdfset, - int member, + rust::Fn xfx, + rust::Fn alphas, + void* user_data, int nloops, double rscale, double fscale, diff --git a/pineappl_applgrid/src/calculation.hpp b/pineappl_applgrid/src/helpers.hpp similarity index 80% rename from pineappl_applgrid/src/calculation.hpp rename to pineappl_applgrid/src/helpers.hpp index c2e724d6c..6c8e8d0b7 100644 --- a/pineappl_applgrid/src/calculation.hpp +++ b/pineappl_applgrid/src/helpers.hpp @@ -6,4 +6,7 @@ // hack to make declaration of `CALCULATION` in Rust an element of the class `grid` using grid_CALCULATION = appl::grid::CALCULATION; +// enable passing void pointers +using c_void = void; + #endif diff --git a/pineappl_applgrid/src/lib.rs b/pineappl_applgrid/src/lib.rs index ab8f28216..e476ec28c 100644 --- a/pineappl_applgrid/src/lib.rs +++ b/pineappl_applgrid/src/lib.rs @@ -10,6 +10,14 @@ #![allow(clippy::too_many_arguments)] #![allow(missing_docs)] +use lhapdf::Pdf; +use std::mem; +use std::pin::Pin; +use std::slice; +use std::sync::{Mutex, OnceLock}; + +static MUTEX: OnceLock> = OnceLock::new(); + #[cxx::bridge] pub mod ffi { #[repr(u32)] @@ -21,9 +29,14 @@ pub mod ffi { unsafe extern "C++" { // this header is needed to make the enum a member of the class `appl::grid` - include!("pineappl_applgrid/src/calculation.hpp"); + include!("pineappl_applgrid/src/helpers.hpp"); type grid_CALCULATION; + + // TODO: `std::ffi::c_void` isn't support by cxx, see: + // https://github.com/dtolnay/cxx/issues/1049, though there is a PR: + // https://github.com/dtolnay/cxx/pull/1204 + type c_void; } #[namespace = "appl"] @@ -111,10 +124,12 @@ pub mod ffi { fn make_lumi_pdf(_: &str, _: &[i32]) -> UniquePtr; fn grid_combine(_: &grid) -> Vec; - fn grid_convolve( + + unsafe fn grid_convolve_with_one( _: Pin<&mut grid>, - _: &str, - _: i32, + _: unsafe fn(&f64, &f64, *mut f64, *mut c_void), + _: unsafe fn(&f64, *mut c_void) -> f64, + _: *mut c_void, _: i32, _: f64, _: f64, @@ -132,3 +147,53 @@ pub mod ffi { fn igrid_weightgrid(_: Pin<&mut igrid>, _: usize) -> Pin<&mut SparseMatrix3d>; } } + +pub fn grid_convolve_with_one( + grid: Pin<&mut ffi::grid>, + pdf: &mut Pdf, + nloops: i32, + rscale: f64, + fscale: f64, + escale: f64, +) -> Vec { + let xfx = |x: &f64, q: &f64, results: *mut f64, pdf: *mut ffi::c_void| { + let pdf = unsafe { &mut *pdf.cast::() }; + let results = unsafe { slice::from_raw_parts_mut(results, 14) }; + for (pid, result) in [-6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6, 22] + .into_iter() + .zip(results.iter_mut()) + { + // some grids have x-nodes at slightly larger values than `1.0`; in that cases these + // are numerical problems which we 'fix' by evaluating at exactly `1.0` instead + *result = pdf.xfx_q2(pid, x.min(1.0), *q * *q); + } + }; + + let alphas = |q: &f64, pdf: *mut ffi::c_void| -> f64 { + let pdf = unsafe { &mut *pdf.cast::() }; + pdf.alphas_q2(*q * *q) + }; + + let lock = MUTEX + .get_or_init(|| Mutex::new(())) + .lock() + // UNWRAP: if this fails there's an unexpected bug somewhere + .unwrap_or_else(|_| unreachable!()); + + let results = unsafe { + ffi::grid_convolve_with_one( + grid, + xfx, + alphas, + (pdf as *mut Pdf).cast::(), + nloops, + rscale, + fscale, + escale, + ) + }; + + mem::drop(lock); + + results +} diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index 7986a6c82..c2ece1d50 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -3,6 +3,7 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; +use lhapdf::Pdf; use pineappl::boc::Order; use pineappl::grid::Grid; use std::path::{Path, PathBuf}; @@ -15,8 +16,7 @@ mod applgrid; fn convert_into_applgrid( output: &Path, grid: &Grid, - pdfset: &str, - member: usize, + pdf: &mut Pdf, _: usize, discard_non_matching_scales: bool, ) -> Result<(&'static str, Vec, usize, Vec)> { @@ -24,7 +24,7 @@ fn convert_into_applgrid( let (mut applgrid, order_mask) = applgrid::convert_into_applgrid(grid, output, discard_non_matching_scales)?; - let results = applgrid::convolve_applgrid(applgrid.pin_mut(), pdfset, member); + let results = applgrid::convolve_applgrid(applgrid.pin_mut(), pdf); Ok(("APPLgrid", results, 1, order_mask)) } @@ -33,8 +33,7 @@ fn convert_into_applgrid( fn convert_into_applgrid( _: &Path, _: &Grid, - _: &str, - _: usize, + _: &mut Pdf, _: usize, _: bool, ) -> Result<(&'static str, Vec, usize, Vec)> { @@ -46,21 +45,13 @@ fn convert_into_applgrid( fn convert_into_grid( output: &Path, grid: &Grid, - pdfset: &str, - member: usize, + pdf: &mut Pdf, scales: usize, discard_non_matching_scales: bool, ) -> Result<(&'static str, Vec, usize, Vec)> { if let Some(extension) = output.extension() { if extension == "appl" || extension == "root" { - return convert_into_applgrid( - output, - grid, - pdfset, - member, - scales, - discard_non_matching_scales, - ); + return convert_into_applgrid(output, grid, pdf, scales, discard_non_matching_scales); } } @@ -106,13 +97,13 @@ impl Subcommand for Opts { use prettytable::{cell, row}; let grid = helpers::read_grid(&self.input)?; + let mut pdf = helpers::create_pdf(&self.pdfset)?; // TODO: figure out `member` from `self.pdfset` let (grid_type, results, scale_variations, order_mask) = convert_into_grid( &self.output, &grid, - &self.pdfset, - 0, + &mut pdf, self.scales, self.discard_non_matching_scales, )?; @@ -155,7 +146,6 @@ impl Subcommand for Opts { if results.is_empty() { println!("file was converted, but we cannot check the conversion for this type"); } else { - let mut pdf = helpers::create_pdf(&self.pdfset)?; let reference_results = helpers::convolve( &grid, &mut pdf, diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index 869105f08..28cef6f34 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -1,6 +1,7 @@ use anyhow::{anyhow, bail, Result}; use cxx::{let_cxx_string, UniquePtr}; use float_cmp::approx_eq; +use lhapdf::Pdf; use ndarray::{s, Axis}; use pineappl::boc::Order; use pineappl::convolutions::Convolution; @@ -302,16 +303,8 @@ pub fn convert_into_applgrid( } // TODO: deduplicate this function from import -pub fn convolve_applgrid(grid: Pin<&mut grid>, pdfset: &str, member: usize) -> Vec { +pub fn convolve_applgrid(grid: Pin<&mut grid>, pdf: &mut Pdf) -> Vec { let nloops = grid.nloops(); - ffi::grid_convolve( - grid, - pdfset, - member.try_into().unwrap(), - nloops, - 1.0, - 1.0, - 1.0, - ) + pineappl_applgrid::grid_convolve_with_one(grid, pdf, nloops, 1.0, 1.0, 1.0) } diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index 727bc718b..858bc44a5 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -3,6 +3,7 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::{anyhow, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; +use lhapdf::Pdf; use pineappl::grid::Grid; use std::path::{Path, PathBuf}; use std::process::ExitCode; @@ -18,8 +19,7 @@ mod fktable; fn convert_applgrid( input: &Path, alpha: u32, - pdfset: &str, - member: usize, + pdf: &mut Pdf, dis_pid: i32, _: usize, ) -> Result<(&'static str, Grid, Vec, usize)> { @@ -29,7 +29,7 @@ fn convert_applgrid( let mut grid = ffi::make_grid(input.to_str().unwrap())?; let pgrid = applgrid::convert_applgrid(grid.pin_mut(), alpha, dis_pid)?; - let results = applgrid::convolve_applgrid(grid.pin_mut(), pdfset, member); + let results = applgrid::convolve_applgrid(grid.pin_mut(), pdf); Ok(("APPLgrid", pgrid, results, 1)) } @@ -38,8 +38,7 @@ fn convert_applgrid( fn convert_applgrid( _: &Path, _: u32, - _: &str, - _: usize, + _: &mut Pdf, _: i32, _: usize, ) -> Result<(&'static str, Grid, Vec, usize)> { @@ -147,6 +146,7 @@ fn convert_fktable(_: &Path, _: i32) -> Result<(&'static str, Grid, Vec, us fn convert_grid( input: &Path, alpha: u32, + pdf: &mut Pdf, pdfset: &str, member: usize, dis_pid: i32, @@ -168,7 +168,7 @@ fn convert_grid( } else if extension == "dat" { return convert_fktable(input, dis_pid); } else if extension == "appl" || extension == "root" { - return convert_applgrid(input, alpha, pdfset, member, dis_pid, scales); + return convert_applgrid(input, alpha, pdf, dis_pid, scales); } } @@ -253,10 +253,13 @@ impl Subcommand for Opts { fn run(&self, cfg: &GlobalConfiguration) -> Result { use prettytable::{cell, row}; + let mut pdf = helpers::create_pdf(&self.pdfset)?; + // TODO: figure out `member` from `self.pdfset` let (grid_type, mut grid, reference_results, scale_variations) = convert_grid( &self.input, self.alpha, + &mut pdf, &self.pdfset, 0, self.dis_pid, @@ -274,7 +277,6 @@ impl Subcommand for Opts { if reference_results.is_empty() { println!("file was converted, but we cannot check the conversion for this type"); } else { - let mut pdf = helpers::create_pdf(&self.pdfset)?; let results = helpers::convolve( &grid, &mut pdf, diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 76baff863..34996f7c6 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use lhapdf::Pdf; use pineappl::boc::{Channel, Order}; use pineappl::convolutions::Convolution; use pineappl::grid::Grid; @@ -245,16 +246,8 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul Ok(grid0) } -pub fn convolve_applgrid(grid: Pin<&mut grid>, pdfset: &str, member: usize) -> Vec { +pub fn convolve_applgrid(grid: Pin<&mut grid>, pdf: &mut Pdf) -> Vec { let nloops = grid.nloops(); - ffi::grid_convolve( - grid, - pdfset, - member.try_into().unwrap(), - nloops, - 1.0, - 1.0, - 1.0, - ) + pineappl_applgrid::grid_convolve_with_one(grid, pdf, nloops, 1.0, 1.0, 1.0) } From 83f746d3f92141522ab24bdb1702813a67a688d0 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 24 Jun 2024 12:35:24 +0200 Subject: [PATCH 166/179] Fix formatting of documentation --- pineappl_capi/src/lib.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 730303ae3..38c45d2d7 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -645,18 +645,18 @@ pub unsafe extern "C" fn pineappl_grid_order_count(grid: *const Grid) -> usize { /// Creates a new and empty grid. The creation requires four different sets of parameters: /// - The luminosity function `lumi`: A pointer to the luminosity function that specifies how the -/// cross section should be reconstructed. +/// cross section should be reconstructed. /// - Order specification `orders` and `order_params`. Each `PineAPPL` grid contains a number of -/// different perturbative orders, specified by `orders`. The array `order_params` stores the -/// exponent of each perturbative order and must contain 4 integers denoting the exponent of the -/// string coupling, of the electromagnetic coupling, of the logarithm of the renormalization -/// scale, and finally of the logarithm of the factorization scale. +/// different perturbative orders, specified by `orders`. The array `order_params` stores the +/// exponent of each perturbative order and must contain 4 integers denoting the exponent of the +/// string coupling, of the electromagnetic coupling, of the logarithm of the renormalization +/// scale, and finally of the logarithm of the factorization scale. /// - The observable definition `bins` and `bin_limits`. Each `PineAPPL` grid can store observables -/// from a one-dimensional distribution. To this end `bins` specifies how many observables are -/// stored and `bin_limits` must contain `bins + 1` entries denoting the left and right limit for -/// each bin. +/// from a one-dimensional distribution. To this end `bins` specifies how many observables are +/// stored and `bin_limits` must contain `bins + 1` entries denoting the left and right limit for +/// each bin. /// - More (optional) information can be given in a key-value storage `key_vals`, which might be -/// a null pointer, to signal there are no further parameters that need to be set. +/// a null pointer, to signal there are no further parameters that need to be set. /// /// # Safety /// From 29cd6a9474f807c45c2063e71a5889feeb88ae0a Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 25 Jun 2024 09:40:15 +0200 Subject: [PATCH 167/179] Link `lhapdf` statically --- pineappl_applgrid/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl_applgrid/Cargo.toml b/pineappl_applgrid/Cargo.toml index 5b83e6d47..e4af8f163 100644 --- a/pineappl_applgrid/Cargo.toml +++ b/pineappl_applgrid/Cargo.toml @@ -25,4 +25,4 @@ cxx-build = "1.0.65" pkg-config = "0.3.26" [features] -static = [] +static = ["lhapdf/static"] From 8652b8d1438ac978a7387491f9bb47eb2b5483b3 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 25 Jun 2024 09:43:23 +0200 Subject: [PATCH 168/179] Print more information about the linking process --- .github/workflows/release.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 300c489f1..d51fe79f4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -128,12 +128,14 @@ jobs: - uses: actions/checkout@v3 - name: Compile binary run: | - cargo install --all-features --locked --path pineappl_cli --root=prefix --target=${{ matrix.target }} + cargo install --all-features --locked --path pineappl_cli --root=prefix --target=${{ matrix.target }} --verbose # build manpages mkdir -p prefix/share/man/man1 cargo xtask install-manpages prefix/share/man/man1 cd prefix tar czf ../pineappl_cli-${{ matrix.target }}.tar.gz . + # print the dynamically linked libraries + ldd prefix/bin/pineappl - name: Upload artifact # upload-artifact@v4 uses a newer version of Node that's incompatible with our container's GLIBC uses: actions/upload-artifact@v3 From 8f485f2321e66a4cc81d7f92d710691d9e230037 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 25 Jun 2024 09:55:39 +0200 Subject: [PATCH 169/179] Fix wrong path in release workflow --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d51fe79f4..20029c6d6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -135,7 +135,7 @@ jobs: cd prefix tar czf ../pineappl_cli-${{ matrix.target }}.tar.gz . # print the dynamically linked libraries - ldd prefix/bin/pineappl + ldd bin/pineappl - name: Upload artifact # upload-artifact@v4 uses a newer version of Node that's incompatible with our container's GLIBC uses: actions/upload-artifact@v3 From 5f9189893c2e554e0fa1e64bb4b11fc670bab5b4 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 26 Jun 2024 11:11:16 +0200 Subject: [PATCH 170/179] Bump version of `managed-lhapdf` to 0.3.2 --- Cargo.lock | 4 ++-- pineappl/Cargo.toml | 2 +- pineappl_applgrid/Cargo.toml | 2 +- pineappl_cli/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bfa916d22..cf9c98177 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -929,9 +929,9 @@ dependencies = [ [[package]] name = "managed-lhapdf" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "188f1953ebeb0286d1d129c075e43d61f592e8c8e555aa631a92164c44384964" +checksum = "9c513a8fe45b2e88dfd5c73b87ac20979b4bc5af8ca6acd4aff7082d79195be0" dependencies = [ "anyhow", "cxx", diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index b2e9eee25..c7c1afa3c 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -32,7 +32,7 @@ thiserror = "1.0.30" [dev-dependencies] anyhow = "1.0.48" -lhapdf = { package = "managed-lhapdf", version = "0.3.0" } +lhapdf = { package = "managed-lhapdf", version = "0.3.2" } num-complex = "0.4.4" rand = { default-features = false, version = "0.8.4" } rand_pcg = { default-features = false, version = "0.3.1" } diff --git a/pineappl_applgrid/Cargo.toml b/pineappl_applgrid/Cargo.toml index e4af8f163..fde6ef3a1 100644 --- a/pineappl_applgrid/Cargo.toml +++ b/pineappl_applgrid/Cargo.toml @@ -17,7 +17,7 @@ workspace = true [dependencies] cxx = "1.0.65" -lhapdf = { package = "managed-lhapdf", version = "0.3.0" } +lhapdf = { package = "managed-lhapdf", version = "0.3.2" } [build-dependencies] cc = "1.0.49" diff --git a/pineappl_cli/Cargo.toml b/pineappl_cli/Cargo.toml index 7440fb640..dbd3c8393 100644 --- a/pineappl_cli/Cargo.toml +++ b/pineappl_cli/Cargo.toml @@ -26,7 +26,7 @@ flate2 = { optional = true, version = "1.0.22" } float-cmp = "0.9.0" git-version = "0.3.5" itertools = "0.10.1" -lhapdf = { package = "managed-lhapdf", version = "0.3.0" } +lhapdf = { package = "managed-lhapdf", version = "0.3.2" } lz4_flex = { optional = true, version = "0.9.2" } ndarray = "0.15.4" ndarray-npy = { optional = true, version = "0.8.1" } From ed74eca5eb41c4f06add7b60e495a5ded5138581 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 26 Jun 2024 17:02:20 +0200 Subject: [PATCH 171/179] Fix zlib linking --- pineappl_applgrid/build.rs | 9 ++++++++- pineappl_fastnlo/build.rs | 11 ----------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/pineappl_applgrid/build.rs b/pineappl_applgrid/build.rs index 95964c509..7719d3e7c 100644 --- a/pineappl_applgrid/build.rs +++ b/pineappl_applgrid/build.rs @@ -99,7 +99,14 @@ fn main() { let link_modifier = if cfg!(feature = "static") { // for some reason `libz.a` isn't found, although `libz.so` is - for link_path in Config::new().probe("zlib").unwrap().link_paths { + let zlib_link_paths = Config::new() + .cargo_metadata(false) + .statik(true) + .probe("zlib") + .unwrap() + .link_paths; + + for link_path in zlib_link_paths { println!("cargo:rustc-link-search={}", link_path.to_str().unwrap()); } diff --git a/pineappl_fastnlo/build.rs b/pineappl_fastnlo/build.rs index a4aed63de..2520cda98 100644 --- a/pineappl_fastnlo/build.rs +++ b/pineappl_fastnlo/build.rs @@ -46,17 +46,6 @@ fn main() { .unwrap(); let link_modifier = if cfg!(feature = "static") { - let zlib = Config::new().probe("zlib").unwrap(); - - // for some reason `libz.a` isn't found, although `libz.so` is - for link_path in zlib.link_paths { - println!("cargo:rustc-link-search={}", link_path.to_str().unwrap()); - } - - for lib in zlib.libs { - println!("cargo:rustc-link-lib=static={lib}"); - } - "static=" } else { "" From 3965a9deda11b0cb6f61d3c5036d9903fbe45f3d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 26 Jun 2024 17:02:53 +0200 Subject: [PATCH 172/179] Do not link `pineappl_fastnlo` against LHAPDF --- pineappl_fastnlo/build.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pineappl_fastnlo/build.rs b/pineappl_fastnlo/build.rs index 2520cda98..0eadd2b81 100644 --- a/pineappl_fastnlo/build.rs +++ b/pineappl_fastnlo/build.rs @@ -53,12 +53,18 @@ fn main() { println!("cargo:rustc-link-lib={link_modifier}fastnlotoolkit"); - let lhapdf = Config::new().atleast_version("6").probe("lhapdf").unwrap(); + let lhapdf_include_paths = Config::new() + .atleast_version("6") + .statik(cfg!(feature = "static")) + .cargo_metadata(false) + .probe("lhapdf") + .unwrap() + .include_paths; cxx_build::bridge("src/lib.rs") .file("src/fastnlo.cpp") .include(fnlo_include_path.trim()) - .includes(lhapdf.include_paths) + .includes(lhapdf_include_paths) .std("c++11") // apparently not supported by MSVC, but fastNLO probably can't be compiled on Windows .compile("fnlo-bridge"); From 787c244c51ffd3a9e7681a866d6f0759e08c5ff5 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 26 Jun 2024 17:20:49 +0200 Subject: [PATCH 173/179] Increase verbosity of CLI build on Linux --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 20029c6d6..8fa0463cd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -128,7 +128,7 @@ jobs: - uses: actions/checkout@v3 - name: Compile binary run: | - cargo install --all-features --locked --path pineappl_cli --root=prefix --target=${{ matrix.target }} --verbose + cargo install --all-features --locked --path pineappl_cli --root=prefix --target=${{ matrix.target }} -vv # build manpages mkdir -p prefix/share/man/man1 cargo xtask install-manpages prefix/share/man/man1 From fc5bbf93094f9f8fa6633fdd6caac752270fad2f Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 26 Jun 2024 17:28:56 +0200 Subject: [PATCH 174/179] Add another static feature flag --- pineappl/Cargo.toml | 3 +++ pineappl_cli/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index c7c1afa3c..c279937e9 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -38,3 +38,6 @@ rand = { default-features = false, version = "0.8.4" } rand_pcg = { default-features = false, version = "0.3.1" } serde_yaml = "0.9.13" ndarray-npy = "0.8.1" + +[features] +static = ["lhapdf/static"] diff --git a/pineappl_cli/Cargo.toml b/pineappl_cli/Cargo.toml index dbd3c8393..461729365 100644 --- a/pineappl_cli/Cargo.toml +++ b/pineappl_cli/Cargo.toml @@ -56,4 +56,4 @@ applgrid = ["dep:cxx", "dep:pineappl_applgrid"] evolve = ["dep:base64", "dep:either", "dep:tar", "dep:lz4_flex", "dep:ndarray-npy", "dep:serde", "dep:serde_yaml"] fastnlo = ["dep:pineappl_fastnlo"] fktable = ["dep:flate2", "dep:tar"] -static = ["lhapdf/static", "pineappl_applgrid?/static", "pineappl_fastnlo?/static"] +static = ["lhapdf/static", "pineappl/static", "pineappl_applgrid?/static", "pineappl_fastnlo?/static"] From f564ac5f01af7ea3add2962a48c76f404ca070af Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 27 Jun 2024 08:51:11 +0200 Subject: [PATCH 175/179] Bump version of `managed-lhapdf` to fix static linking --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf9c98177..d70e32868 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -929,9 +929,9 @@ dependencies = [ [[package]] name = "managed-lhapdf" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c513a8fe45b2e88dfd5c73b87ac20979b4bc5af8ca6acd4aff7082d79195be0" +checksum = "f930f0fd8c87a299efd84e910c2c0db3cd04fcbfe2952ccdced47dd830dc017c" dependencies = [ "anyhow", "cxx", From a981f7e2241410c1c73bc495aebdda3c7a64e306 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 27 Jun 2024 10:30:15 +0200 Subject: [PATCH 176/179] Add missing units for PDF uncertainties --- pineappl_cli/src/uncert.rs | 4 +++- pineappl_cli/tests/uncert.rs | 28 ++++++++++++++-------------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index 5334a14aa..fe1e79715 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -184,7 +184,9 @@ impl Subcommand for Opts { for &index in &self.group.conv_fun { title.add_cell( - cell!(c->format!("{}", self.conv_funs.lhapdf_names[index])).with_hspan(3), + // TODO: fix alignment for second title row + cell!(c->format!("{}\n[{y_unit}] [%] [%]", self.conv_funs.lhapdf_names[index])) + .with_hspan(3), ); } diff --git a/pineappl_cli/tests/uncert.rs b/pineappl_cli/tests/uncert.rs index 774751e89..c2e1da4d2 100644 --- a/pineappl_cli/tests/uncert.rs +++ b/pineappl_cli/tests/uncert.rs @@ -25,21 +25,21 @@ Options: "; const DEFAULT_STR: &str = - "b etal dsig/detal NNPDF31_nlo_as_0118_luxqed NNPDF40_nnlo_as_01180 - [] [pb] --+----+----+-----------+-----------+---------+---------+-----------+-------+------- -0 2 2.25 7.7302788e2 7.7302788e2 -0.67 0.67 7.7302788e2 -0.62 0.62 -1 2.25 2.5 7.0634852e2 7.0634851e2 -0.72 0.72 7.0634851e2 -0.62 0.62 -2 2.5 2.75 6.1354750e2 6.1354750e2 -0.78 0.78 6.1354750e2 -0.64 0.64 -3 2.75 3 4.9584391e2 4.9584391e2 -0.86 0.86 4.9584391e2 -0.68 0.68 -4 3 3.25 3.6957893e2 3.6957893e2 -0.97 0.97 3.6957893e2 -0.76 0.76 -5 3.25 3.5 2.5143057e2 2.5143057e2 -1.14 1.14 2.5143057e2 -0.89 0.89 -6 3.5 4 1.1962468e2 1.1962468e2 -1.55 1.55 1.1962468e2 -1.34 1.34 -7 4 4.5 2.9665790e1 2.9665790e1 -2.56 2.56 2.9665789e1 -3.51 3.51 + "b etal dsig/detal NNPDF31_nlo_as_0118_luxqed NNPDF40_nnlo_as_01180 + [] [pb] [pb] [%] [%] [pb] [%] [%] +-+----+----+-----------+-----------+---------+---------+-----------+--------+-------- +0 2 2.25 7.7302788e2 7.7302788e2 -0.67 0.67 7.7302788e2 -0.62 0.62 +1 2.25 2.5 7.0634852e2 7.0634851e2 -0.72 0.72 7.0634851e2 -0.62 0.62 +2 2.5 2.75 6.1354750e2 6.1354750e2 -0.78 0.78 6.1354750e2 -0.64 0.64 +3 2.75 3 4.9584391e2 4.9584391e2 -0.86 0.86 4.9584391e2 -0.68 0.68 +4 3 3.25 3.6957893e2 3.6957893e2 -0.97 0.97 3.6957893e2 -0.76 0.76 +5 3.25 3.5 2.5143057e2 2.5143057e2 -1.14 1.14 2.5143057e2 -0.89 0.89 +6 3.5 4 1.1962468e2 1.1962468e2 -1.55 1.55 1.1962468e2 -1.34 1.34 +7 4 4.5 2.9665790e1 2.9665790e1 -2.56 2.56 2.9665789e1 -3.51 3.51 "; const CL_90_STR: &str = "b etal dsig/detal NNPDF31_nlo_as_0118_luxqed - [] [pb] + [] [pb] [pb] [%] [%] -+----+----+-----------+-----------+---------+--------- 0 2 2.25 7.5459110e2 7.5461655e2 -1.87 1.87 1 2.25 2.5 6.9028342e2 6.9027941e2 -1.90 1.90 @@ -52,7 +52,7 @@ const CL_90_STR: &str = "b etal dsig/detal NNPDF31_nlo_as_0118_luxqed "; const INTEGRATED_STR: &str = "b etal integ NNPDF31_nlo_as_0118_luxqed - [] [] + [] [] [] [%] [%] -+----+----+-----------+-----------+---------+--------- 0 2 2.25 1.8864777e2 1.8865414e2 -1.14 1.14 1 2.25 2.5 1.7257086e2 1.7256985e2 -1.16 1.16 @@ -65,7 +65,7 @@ const INTEGRATED_STR: &str = "b etal integ NNPDF31_nlo_as_0118_luxq "; const ORDERS_A2_AS1A2_STR: &str = "b etal dsig/detal NNPDF31_nlo_as_0118_luxqed - [] [pb] + [] [pb] [pb] [%] [%] -+----+----+-----------+-----------+---------+--------- 0 2 2.25 7.6246034e2 7.6248591e2 -1.14 1.14 1 2.25 2.5 6.9684577e2 6.9684166e2 -1.16 1.16 From 1d4af9614b9338d477ab56a673b31a2685bc1940 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 27 Jun 2024 10:52:45 +0200 Subject: [PATCH 177/179] Migrate parts of `plot` to `ConvFuns` --- CHANGELOG.md | 3 ++ pineappl_cli/src/plot.py | 10 ++--- pineappl_cli/src/plot.rs | 78 +++++++++++++++++++++++--------------- pineappl_cli/tests/plot.rs | 11 +++--- 4 files changed, 62 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38c80703c..e63a8eceb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - added `PidBasis::charge_conjugate` and `PidBasis::guess` - added `Grid::set_pid_basis` method - added `Grid::subgrids` and `Grid::subgrids_mut` methods +- added new switch `conv_fun_uncert_from` to subcommand `plot` to allow + choosing with convolution function uncertainty should be plotted ### Changed @@ -43,6 +45,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 switch now optionally accepts a list of indices, which determines the corresponding convolution function (PDF/FF), for which the uncertainty should calculated +- renamed `no_pdf_unc` to `no_conv_fun_unc` in subcommand `plot` ### Removed diff --git a/pineappl_cli/src/plot.py b/pineappl_cli/src/plot.py index 09f4d67ce..01eb8ed27 100644 --- a/pineappl_cli/src/plot.py +++ b/pineappl_cli/src/plot.py @@ -69,11 +69,11 @@ def main(): {inte}plot_int, {nint}plot_abs, {nint}plot_rel_ewonoff, - {pdfs}plot_abs_pdfs, - {pdfs}plot_ratio_pdf, - {pdfs}plot_double_ratio_pdf, - {pdfs}plot_rel_pdfunc, - {pdfs}plot_rel_pdfpull, + {nconvs}plot_abs_pdfs, + {nconvs}plot_ratio_pdf, + {nconvs}plot_double_ratio_pdf, + {nconvs}plot_rel_pdfunc, + {nconvs}plot_rel_pdfpull, ] mpl.rcParams.update(stylesheet) diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index 8b4d93910..2bc9b5a83 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -1,4 +1,4 @@ -use super::helpers::{self, ConvoluteMode}; +use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::builder::{PossibleValuesParser, TypedValueParser}; @@ -23,8 +23,11 @@ pub struct Opts { #[arg(value_hint = ValueHint::FilePath)] input: PathBuf, /// LHAPDF id(s) or name of the PDF set(s). - #[arg(required = true, value_parser = helpers::parse_pdfset)] - pdfsets: Vec, + #[arg(required = true)] + conv_funs: Vec, + /// Choose for which convolution function the uncertainty should be calculated. + #[arg(default_value = "0", long, value_name = "IDX")] + conv_fun_uncert_from: usize, /// Set the number of scale variations. #[arg( default_value_t = 7, @@ -50,7 +53,7 @@ pub struct Opts { threads: usize, /// Disable the (time-consuming) calculation of PDF uncertainties. #[arg(long)] - no_pdf_unc: bool, + no_conv_fun_unc: bool, } fn map_format_join(slice: &[f64]) -> String { @@ -129,11 +132,11 @@ fn map_format_channels(channels: &[(String, Vec)]) -> String { .join(",\n") } -fn format_pdf_results(pdf_uncertainties: &[Vec>], pdfsets: &[String]) -> String { +fn format_pdf_results(pdf_uncertainties: &[Vec>], conv_funs: &[ConvFuns]) -> String { pdf_uncertainties .iter() - .zip(pdfsets.iter()) - .map(|(values, pdfset)| { + .zip(conv_funs.iter().map(|fun| &fun.label)) + .map(|(values, label)| { format!( " ( \"{}\", @@ -141,7 +144,7 @@ fn format_pdf_results(pdf_uncertainties: &[Vec>], pdfsets: &[String]) - np.array([{}]), np.array([{}]), ),", - helpers::pdf_label(pdfset).replace('_', r"\_"), + label.replace('_', r"\_"), map_format_e_join_repeat_last(&values[0]), map_format_e_join_repeat_last(&values[1]), map_format_e_join_repeat_last(&values[2]), @@ -192,7 +195,7 @@ impl Subcommand for Opts { }; let grid = helpers::read_grid(&self.input)?; - let mut pdf = helpers::create_pdf(&self.pdfsets[0])?; + let mut conv_funs = helpers::create_conv_funs(&self.conv_funs[0])?; let slices = grid.bin_info().slices(); let mut data_string = String::new(); @@ -222,7 +225,7 @@ impl Subcommand for Opts { let results = helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &[], &bins, &[], @@ -248,7 +251,7 @@ impl Subcommand for Opts { helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &qcd_orders, &bins, &[], @@ -273,16 +276,16 @@ impl Subcommand for Opts { .map(|limits| 0.5 * (limits[0] + limits[1])) .collect(); - let pdf_uncertainties: Vec>> = self - .pdfsets + let conv_fun_uncertainties: Vec>> = self + .conv_funs .par_iter() - .map(|pdfset| { - if self.no_pdf_unc { - let mut pdf = helpers::create_pdf(pdfset).unwrap(); + .map(|conv_funs| { + if self.no_conv_fun_unc { + let mut conv_funs = helpers::create_conv_funs(conv_funs)?; let results = helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &[], &bins, &[], @@ -293,15 +296,17 @@ impl Subcommand for Opts { Ok(vec![results; 3]) } else { - let (set, member) = helpers::create_pdfset(pdfset).unwrap(); + let (set, funs) = helpers::create_conv_funs_for_set( + conv_funs, + self.conv_fun_uncert_from, + )?; - let pdf_results: Vec<_> = set - .mk_pdfs()? + let pdf_results: Vec<_> = funs .into_par_iter() - .flat_map(|mut pdf| { + .flat_map(|mut funs| { helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut funs, &[], &bins, &[], @@ -329,7 +334,8 @@ impl Subcommand for Opts { let uncertainty = set.uncertainty(&values, lhapdf::CL_1_SIGMA, false).unwrap(); central.push( - member.map_or(uncertainty.central, |member| values[member]), + conv_funs.members[self.conv_fun_uncert_from] + .map_or(uncertainty.central, |member| values[member]), ); min.push(uncertainty.central - uncertainty.errminus); max.push(uncertainty.central + uncertainty.errplus); @@ -400,7 +406,7 @@ impl Subcommand for Opts { ), helpers::convolve( &grid, - slice::from_mut(&mut pdf), + &mut conv_funs, &[], &bins, &channel_mask, @@ -442,7 +448,7 @@ impl Subcommand for Opts { }},", slice_label = label, mid = map_format_join(&mid), - pdf_results = format_pdf_results(&pdf_uncertainties, &self.pdfsets), + pdf_results = format_pdf_results(&conv_fun_uncertainties, &self.conv_funs), qcd_y = map_format_e_join_repeat_last(&qcd_central), qcd_min = map_format_e_join_repeat_last(&qcd_min), qcd_max = map_format_e_join_repeat_last(&qcd_max), @@ -506,13 +512,13 @@ impl Subcommand for Opts { let ylog = xlog; let title = key_values.get("description").map_or("", String::as_str); let bins = grid.bin_info().bins(); - let pdfs = self.pdfsets.len(); + let nconvs = self.conv_funs.len(); print!( include_str!("plot.py"), inte = if bins == 1 { "" } else { "# " }, nint = if bins == 1 { "# " } else { "" }, - pdfs = if pdfs == 1 || bins == 1 { "# " } else { "" }, + nconvs = if nconvs == 1 || bins == 1 { "# " } else { "" }, xlabel = xlabel, ylabel = ylabel, xlog = xlog, @@ -523,7 +529,19 @@ impl Subcommand for Opts { metadata = format_metadata(&vector), ); } else { - let (pdfset1, pdfset2) = self.pdfsets.iter().collect_tuple().unwrap(); + let (pdfset1, pdfset2) = self + .conv_funs + .iter() + .map(|fun| { + assert_eq!(fun.lhapdf_names.len(), 1); + if let Some(member) = fun.members[0] { + format!("{}/{member}", fun.lhapdf_names[0]) + } else { + fun.lhapdf_names[0].clone() + } + }) + .collect_tuple() + .unwrap(); let (order, bin, channel) = self .subgrid_pull .iter() @@ -534,8 +552,8 @@ impl Subcommand for Opts { let cl = lhapdf::CL_1_SIGMA; let grid = helpers::read_grid(&self.input)?; - let (set1, member1) = helpers::create_pdfset(pdfset1)?; - let (set2, member2) = helpers::create_pdfset(pdfset2)?; + let (set1, member1) = helpers::create_pdfset(&pdfset1)?; + let (set2, member2) = helpers::create_pdfset(&pdfset2)?; let mut pdfset1 = set1.mk_pdfs()?; let mut pdfset2 = set2.mk_pdfs()?; diff --git a/pineappl_cli/tests/plot.rs b/pineappl_cli/tests/plot.rs index 2f2d4355a..a93ac23cb 100644 --- a/pineappl_cli/tests/plot.rs +++ b/pineappl_cli/tests/plot.rs @@ -4,18 +4,19 @@ use std::thread; const HELP_STR: &str = "Creates a matplotlib script plotting the contents of the grid -Usage: pineappl plot [OPTIONS] ... +Usage: pineappl plot [OPTIONS] ... Arguments: - Path to the input grid - ... LHAPDF id(s) or name of the PDF set(s) + Path to the input grid + ... LHAPDF id(s) or name of the PDF set(s) Options: + --conv-fun-uncert-from Choose for which convolution function the uncertainty should be calculated [default: 0] -s, --scales Set the number of scale variations [default: 7] [possible values: 1, 3, 7, 9] --subgrid-pull Show the pull for a specific grid three-dimensionally --asymmetry Plot the asymmetry --threads Number of threads to utilize [default: {}] - --no-pdf-unc Disable the (time-consuming) calculation of PDF uncertainties + --no-conv-fun-unc Disable the (time-consuming) calculation of PDF uncertainties -h, --help Print help "; @@ -1471,7 +1472,7 @@ fn drell_yan_mass_slices() { .unwrap() .args([ "plot", - "--no-pdf-unc", + "--no-conv-fun-unc", "--threads=1", "../test-data/NNPDF_DY_14TEV_BSM_AFB.pineappl.lz4", "NNPDF40_nnlo_as_01180", From c59f38ee23e50e66dad1733a15b3c89852c1e3e2 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 27 Jun 2024 11:13:23 +0200 Subject: [PATCH 178/179] Remove obsolete helper functions --- pineappl_cli/src/helpers.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 4197f19db..102d03e4e 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -97,14 +97,6 @@ pub fn create_conv_funs_for_set( Ok((set, conv_funs)) } -pub fn create_pdf(pdf: &str) -> Result { - let pdf = pdf.split_once('=').map_or(pdf, |(name, _)| name); - - Ok(pdf - .parse() - .map_or_else(|_| Pdf::with_setname_and_nmem(pdf), Pdf::with_lhaid)?) -} - pub fn create_pdfset(pdfset: &str) -> Result<(PdfSet, Option)> { let pdfset = pdfset.split_once('=').map_or(pdfset, |(name, _)| name); let (pdfset, member) = pdfset @@ -122,10 +114,6 @@ pub fn create_pdfset(pdfset: &str) -> Result<(PdfSet, Option)> { )) } -pub fn pdf_label(pdf: &str) -> &str { - pdf.split_once('=').map_or(pdf, |(_, label)| label) -} - pub fn read_grid(input: &Path) -> Result { Grid::read(File::open(input).context(format!("unable to open '{}'", input.display()))?) .context(format!("unable to read '{}'", input.display())) @@ -425,11 +413,6 @@ pub fn convolve_subgrid( grid.convolve_subgrid(&mut cache, order, bin, lumi, 1.0, 1.0) } -pub fn parse_pdfset(argument: &str) -> std::result::Result { - // TODO: figure out how to validate `argument` with `managed-lhapdf` - Ok(argument.to_owned()) -} - pub fn parse_integer_range(range: &str) -> Result> { if let Some(at) = range.find('-') { let (left, right) = range.split_at(at); From ddbf227937af035574372d6a76a34c990dc71d93 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 27 Jun 2024 14:38:42 +0200 Subject: [PATCH 179/179] Migrate the remaining parts of `plot` to `ConvFuns` --- pineappl_cli/src/helpers.rs | 115 ++++++++++++++++++++++++------------ pineappl_cli/src/plot.rs | 63 +++++++++++--------- 2 files changed, 113 insertions(+), 65 deletions(-) diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 102d03e4e..d1871f193 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -97,23 +97,6 @@ pub fn create_conv_funs_for_set( Ok((set, conv_funs)) } -pub fn create_pdfset(pdfset: &str) -> Result<(PdfSet, Option)> { - let pdfset = pdfset.split_once('=').map_or(pdfset, |(name, _)| name); - let (pdfset, member) = pdfset - .rsplit_once('/') - .map_or((pdfset, None), |(set, member)| { - (set, Some(member.parse::().unwrap())) - }); - - Ok(( - PdfSet::new(&pdfset.parse().map_or_else( - |_| pdfset.to_owned(), - |lhaid| lhapdf::lookup_pdf(lhaid).unwrap().0, - ))?, - member, - )) -} - pub fn read_grid(input: &Path) -> Result { Grid::read(File::open(input).context(format!("unable to open '{}'", input.display()))?) .context(format!("unable to read '{}'", input.display())) @@ -381,36 +364,94 @@ pub fn convolve_limits(grid: &Grid, bins: &[usize], mode: ConvoluteMode) -> Vec< pub fn convolve_subgrid( grid: &Grid, - lhapdf: &mut Pdf, + conv_funs: &mut [Pdf], order: usize, bin: usize, lumi: usize, cfg: &GlobalConfiguration, ) -> Array3 { - // if the field 'Particle' is missing we assume it's a proton PDF - let pdf_pdg_id = lhapdf - .set() - .entry("Particle") - .map_or(Ok(2212), |string| string.parse::()) - .unwrap(); - if cfg.force_positive { - lhapdf.set_force_positive(1); + for fun in conv_funs.iter_mut() { + fun.set_force_positive(1); + } } - let x_max = lhapdf.x_max(); - let x_min = lhapdf.x_min(); - let mut pdf = |id, x, q2| { - if !cfg.allow_extrapolation && (x < x_min || x > x_max) { - 0.0 - } else { - lhapdf.xfx_q2(id, x, q2) + match conv_funs { + [fun] => { + // there's only one convolution function from which we can use the strong coupling + assert_eq!(cfg.use_alphas_from, 0); + + // if the field 'Particle' is missing we assume it's a proton PDF + let pdg_id = fun + .set() + .entry("Particle") + .map_or(Ok(2212), |string| string.parse::()) + .unwrap(); + + let x_max = fun.x_max(); + let x_min = fun.x_min(); + let mut alphas = |q2| fun.alphas_q2(q2); + let mut fun = |id, x, q2| { + if !cfg.allow_extrapolation && (x < x_min || x > x_max) { + 0.0 + } else { + fun.xfx_q2(id, x, q2) + } + }; + + let mut cache = LumiCache::with_one(pdg_id, &mut fun, &mut alphas); + + grid.convolve_subgrid(&mut cache, order, bin, lumi, 1.0, 1.0) } - }; - let mut alphas = |q2| lhapdf.alphas_q2(q2); - let mut cache = LumiCache::with_one(pdf_pdg_id, &mut pdf, &mut alphas); + [fun1, fun2] => { + let pdg_id1 = fun1 + .set() + .entry("Particle") + .map_or(Ok(2212), |string| string.parse::()) + .unwrap(); + + let pdg_id2 = fun2 + .set() + .entry("Particle") + .map_or(Ok(2212), |string| string.parse::()) + .unwrap(); + + let x_max1 = fun1.x_max(); + let x_min1 = fun1.x_min(); + let x_max2 = fun2.x_max(); + let x_min2 = fun2.x_min(); + + let mut alphas = |q2| match cfg.use_alphas_from { + 0 => fun1.alphas_q2(q2), + 1 => fun2.alphas_q2(q2), + _ => panic!( + "expected `use_alphas_from` to be `0` or `1`, is {}", + cfg.use_alphas_from + ), + }; + let mut fun1 = |id, x, q2| { + if !cfg.allow_extrapolation && (x < x_min1 || x > x_max1) { + 0.0 + } else { + fun1.xfx_q2(id, x, q2) + } + }; + + let mut fun2 = |id, x, q2| { + if !cfg.allow_extrapolation && (x < x_min2 || x > x_max2) { + 0.0 + } else { + fun2.xfx_q2(id, x, q2) + } + }; + + let mut cache = + LumiCache::with_two(pdg_id1, &mut fun1, pdg_id2, &mut fun2, &mut alphas); - grid.convolve_subgrid(&mut cache, order, bin, lumi, 1.0, 1.0) + grid.convolve_subgrid(&mut cache, order, bin, lumi, 1.0, 1.0) + } + _ => unimplemented!(), + } } pub fn parse_integer_range(range: &str) -> Result> { diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index 2bc9b5a83..87acbe1bb 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -13,7 +13,6 @@ use std::fmt::Write; use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; use std::process::ExitCode; -use std::slice; use std::thread; /// Creates a matplotlib script plotting the contents of the grid. @@ -529,19 +528,13 @@ impl Subcommand for Opts { metadata = format_metadata(&vector), ); } else { - let (pdfset1, pdfset2) = self - .conv_funs - .iter() - .map(|fun| { - assert_eq!(fun.lhapdf_names.len(), 1); - if let Some(member) = fun.members[0] { - format!("{}/{member}", fun.lhapdf_names[0]) - } else { - fun.lhapdf_names[0].clone() - } - }) - .collect_tuple() - .unwrap(); + // TODO: enforce two arguments with clap + assert_eq!(self.conv_funs.len(), 2); + + let (set1, mut conv_funs1) = + helpers::create_conv_funs_for_set(&self.conv_funs[0], self.conv_fun_uncert_from)?; + let (set2, mut conv_funs2) = + helpers::create_conv_funs_for_set(&self.conv_funs[1], self.conv_fun_uncert_from)?; let (order, bin, channel) = self .subgrid_pull .iter() @@ -552,17 +545,15 @@ impl Subcommand for Opts { let cl = lhapdf::CL_1_SIGMA; let grid = helpers::read_grid(&self.input)?; - let (set1, member1) = helpers::create_pdfset(&pdfset1)?; - let (set2, member2) = helpers::create_pdfset(&pdfset2)?; - let mut pdfset1 = set1.mk_pdfs()?; - let mut pdfset2 = set2.mk_pdfs()?; + let member1 = self.conv_funs[0].members[self.conv_fun_uncert_from]; + let member2 = self.conv_funs[1].members[self.conv_fun_uncert_from]; - let values1: Vec<_> = pdfset1 + let values1: Vec<_> = conv_funs1 .par_iter_mut() - .map(|pdf| { + .map(|conv_funs| { let values = helpers::convolve( &grid, - slice::from_mut(pdf), + conv_funs, &[], &[bin], &[], @@ -574,12 +565,12 @@ impl Subcommand for Opts { values[0] }) .collect(); - let values2: Vec<_> = pdfset2 + let values2: Vec<_> = conv_funs2 .par_iter_mut() - .map(|pdf| { + .map(|conv_funs| { let values = helpers::convolve( &grid, - slice::from_mut(pdf), + conv_funs, &[], &[bin], &[], @@ -613,10 +604,26 @@ impl Subcommand for Opts { unc1.hypot(unc2) }; - let res1 = helpers::convolve_subgrid(&grid, &mut pdfset1[0], order, bin, channel, cfg) - .sum_axis(Axis(0)); - let res2 = helpers::convolve_subgrid(&grid, &mut pdfset2[0], order, bin, channel, cfg) - .sum_axis(Axis(0)); + // TODO: if no member is given, the zeroth is used, but we should show the averaged + // result of all members instead + let res1 = helpers::convolve_subgrid( + &grid, + &mut conv_funs1[member1.unwrap_or(0)], + order, + bin, + channel, + cfg, + ) + .sum_axis(Axis(0)); + let res2 = helpers::convolve_subgrid( + &grid, + &mut conv_funs2[member2.unwrap_or(0)], + order, + bin, + channel, + cfg, + ) + .sum_axis(Axis(0)); let subgrid = &grid.subgrids()[[order, bin, channel]]; //let q2 = subgrid.q2_grid();