diff --git a/n3fit/src/evolven3fit/eko_utils.py b/n3fit/src/evolven3fit/eko_utils.py index b528a89bcb..357a045d84 100644 --- a/n3fit/src/evolven3fit/eko_utils.py +++ b/n3fit/src/evolven3fit/eko_utils.py @@ -7,7 +7,6 @@ from eko.io import runcards from eko.matchings import Atlas, nf_default from eko.quantities.heavy_quarks import MatchingScales -from validphys.loader import Loader from . import utils @@ -22,7 +21,6 @@ } EVOLVEN3FIT_CONFIGS_DEFAULTS_EXA = { - "ev_op_iterations": 30, "ev_op_max_order": (1, 0), "evolution_method": "iterate-exact", "inversion_method": "exact", @@ -33,7 +31,7 @@ def construct_eko_cards( - theoryID, + nnpdf_theory, q_fin, q_points, x_grid, @@ -43,21 +41,21 @@ def construct_eko_cards( ): """ Return the theory and operator cards used to construct the eko. - theoryID is the ID of the theory for which we are computing the theory and operator card. + nnpdf_theory is a NNPDF theory card for which we are computing the operator card and eko q_fin is the final point of the q grid while q_points is the number of points of the grid. x_grid is the x grid to be used. op_card_dict and theory_card_dict are optional updates that can be provided respectively to the operator card and to the theory card. """ - theory, thresholds = load_theory(theoryID, theory_card_dict) + theory, thresholds = load_theory(nnpdf_theory, theory_card_dict) mu0 = theory["Q0"] - # Set nf_0 according to the fitting scale unless set explicitly - if "nf0" not in theory: - theory["nf0"] = find_nf(mu0, theory, thresholds) + # eko needs a value for Qedref and for max nf alphas + theory["Qedref"] = theory["Qref"] + theory["MaxNfAs"] = theory["MaxNfPdf"] - # The Legacy function is able to construct a theory card for eko starting from an NNPDF theory + # The Legacy function is able to construct a theory card for eko starting from a NNPDF theory legacy_class = runcards.Legacy(theory, {}) theory_card = legacy_class.new_theory @@ -106,7 +104,7 @@ def construct_eko_cards( def construct_eko_photon_cards( - theoryID, + nnpdf_theory, q_fin, x_grid, q_gamma, @@ -115,28 +113,24 @@ def construct_eko_photon_cards( ): """ Return the theory and operator cards used to construct the eko_photon. - theoryID is the ID of the theory for which we are computing the theory and operator card. + nnpdf_theory is a NNPDF theory card for which we are computing the operator card and eko q_fin is the final point of the q grid while q_points is the number of points of the grid. x_grid is the x grid to be used. op_card_dict and theory_card_dict are optional updates that can be provided respectively to the operator card and to the theory card. """ - theory, thresholds = load_theory(theoryID, theory_card_dict) + theory, _ = load_theory(nnpdf_theory, theory_card_dict) # if is eko_photon then mu0 = q_gamma mu0 = float(q_gamma) - # Set nf_0 according to mu0 unless set explicitly - if "nf0" not in theory: - theory["nf0"] = find_nf(mu0, theory, thresholds) - - # The Legacy function is able to construct a theory card for eko starting from an NNPDF theory + # The Legacy function is able to construct a theory card for eko starting from a NNPDF theory legacy_class = runcards.Legacy(theory, {}) theory_card = legacy_class.new_theory - q_fin = float(theory["Q0"]) - - nf_fin = find_nf(q_fin, theory, thresholds) + # The photon needs to be evolved down to Q0 + q_fin = theory["Q0"] + nf_fin = theory["nf0"] # construct mugrid mugrid = [(q_fin, nf_fin)] @@ -147,12 +141,12 @@ def construct_eko_photon_cards( return theory_card, op_card -def load_theory(theoryID, theory_card_dict): +def load_theory(nnpdf_theory, theory_card_dict): """loads and returns the theory dictionary and the thresholds""" if theory_card_dict is None: theory_card_dict = {} # theory_card construction - theory = Loader().check_theoryID(theoryID).get_description() + theory = dict(nnpdf_theory) theory.pop("FNS") theory.update(theory_card_dict) @@ -173,7 +167,9 @@ def load_theory(theoryID, theory_card_dict): def build_opcard(op_card_dict, theory, x_grid, mu0, mugrid): - """builds the opcard""" + """Build the operator card. + The user provided options should be given as part of ``op_card_dict`` + """ if op_card_dict is None: op_card_dict = {} @@ -182,12 +178,17 @@ def build_opcard(op_card_dict, theory, x_grid, mu0, mugrid): op_card.update({"mu0": mu0, "mugrid": mugrid}) op_card["xgrid"] = x_grid - # Specific defaults for evolven3fit evolution - if theory["ModEv"] == "TRN": - op_card["configs"].update(EVOLVEN3FIT_CONFIGS_DEFAULTS_TRN) - if theory["ModEv"] == "EXA": - op_card["configs"].update(EVOLVEN3FIT_CONFIGS_DEFAULTS_EXA) - # User can still change the configs via op_card_dict + + # Specify the evolution options and defaults differently from TRN / EXA + configs = op_card["configs"] + if theory.get("ModEv") == "TRN": + configs.update(EVOLVEN3FIT_CONFIGS_DEFAULTS_TRN) + elif theory.get("ModEv") == "EXA": + # Set the default from the theory card unless it was given in the input + op_card_dict.setdefault("ev_op_iterations", theory.get("IterEv")) + + configs.update(EVOLVEN3FIT_CONFIGS_DEFAULTS_EXA) + configs["ev_op_iterations"] = op_card_dict["ev_op_iterations"] # Note that every entry that is not a dictionary should not be # touched by the user and indeed an user cannot touch them @@ -197,30 +198,5 @@ def build_opcard(op_card_dict, theory, x_grid, mu0, mugrid): elif key in op_card_dict: _logger.warning("Entry %s is not a dictionary and will be ignored", key) - # if no -e was given, take ev_op_iterations from EVOLVEN3FIT_CONFIGS_DEFAULTS_{TRN,EXA} - if op_card['configs']['ev_op_iterations'] is None: - if theory["ModEv"] == "TRN": - op_card['configs']['ev_op_iterations'] = EVOLVEN3FIT_CONFIGS_DEFAULTS_TRN[ - "ev_op_iterations" - ] - if theory["ModEv"] == "EXA": - op_card['configs']['ev_op_iterations'] = EVOLVEN3FIT_CONFIGS_DEFAULTS_EXA[ - "ev_op_iterations" - ] - op_card = runcards.OperatorCard.from_dict(op_card) - return op_card - - -def find_nf(mu, theory, thresholds): - """compute nf for a given mu""" - if mu < theory["mc"] * thresholds["c"]: - nf = 3 - elif mu < theory["mb"] * thresholds["b"]: - nf = 4 - elif mu < theory["mt"] * thresholds["t"]: - nf = 5 - else: - nf = 6 - return nf diff --git a/n3fit/src/n3fit/scripts/evolven3fit.py b/n3fit/src/n3fit/scripts/evolven3fit.py index 7bf102b0f1..7d826182c4 100644 --- a/n3fit/src/n3fit/scripts/evolven3fit.py +++ b/n3fit/src/n3fit/scripts/evolven3fit.py @@ -1,6 +1,7 @@ """ This module contains the CLI for evolven3fit """ + from argparse import ArgumentParser import logging import pathlib @@ -11,6 +12,7 @@ from eko.runner.managed import solve from n3fit.io.writer import XGRID +from validphys.loader import FallbackLoader _logger = logging.getLogger(__name__) @@ -118,7 +120,7 @@ def main(): "--ev-op-iterations", type=int, default=None, - help="ev_op_iterations for the EXA theory", + help="ev_op_iterations for the EXA theory. Overrides the settings given in the theory card.", ) parser.add_argument( "--use-fhmruvv", @@ -156,6 +158,12 @@ def main(): args.force, ) else: + # If we are in the business of producing an eko, do some checks before starting: + # 1. load the nnpdf theory early to check for inconsistent options and theory problems + nnpdf_theory = FallbackLoader().check_theoryID(args.theoryID).get_description() + if nnpdf_theory.get("ModEv") == "TRN" and args.ev_op_iterations is not None: + raise ValueError("ev_op_iterations is not accepted with ModEv=TRN solution") + stdout_log = logging.StreamHandler(sys.stdout) stdout_log.setLevel(evolve.LOGGING_SETTINGS["level"]) stdout_log.setFormatter(evolve.LOGGING_SETTINGS["formatter"]) @@ -178,7 +186,7 @@ def main(): x_grid = np.geomspace(args.x_grid_ini, 1.0, args.x_grid_points) if args.actions == "produce_eko": tcard, opcard = eko_utils.construct_eko_cards( - args.theoryID, + nnpdf_theory, args.q_fin, args.q_points, x_grid, @@ -188,7 +196,7 @@ def main(): ) elif args.actions == "produce_eko_photon": tcard, opcard = eko_utils.construct_eko_photon_cards( - args.theoryID, args.q_fin, x_grid, args.q_gamma, op_card_info, theory_card_info + nnpdf_theory, args.q_fin, x_grid, args.q_gamma, op_card_info, theory_card_info ) solve(tcard, opcard, args.dump) diff --git a/n3fit/src/n3fit/tests/conftest.py b/n3fit/src/n3fit/tests/conftest.py index c2acb69852..91489169b1 100644 --- a/n3fit/src/n3fit/tests/conftest.py +++ b/n3fit/src/n3fit/tests/conftest.py @@ -1,9 +1,22 @@ """ Add markers for pytest """ + import sys + import pytest +from validphys.loader import FallbackLoader + +THEORYID = 399 + + +@pytest.fixture(scope='module') +def nnpdf_theory_card(): + """Return a theory card already loaded as a dictionary""" + th = FallbackLoader().check_theoryID(THEORYID) + return th.get_description() + def pytest_runtest_setup(item): ALL = {"darwin", "linux"} diff --git a/n3fit/src/n3fit/tests/test_evolven3fit.py b/n3fit/src/n3fit/tests/test_evolven3fit.py index 955fec3071..52799829f6 100644 --- a/n3fit/src/n3fit/tests/test_evolven3fit.py +++ b/n3fit/src/n3fit/tests/test_evolven3fit.py @@ -118,16 +118,15 @@ def test_utils(): assert ID == 162 -def test_eko_utils(tmp_path): +def test_eko_utils(tmp_path, nnpdf_theory_card): # Testing construct eko cards - theoryID = 162 q_fin = 100 q_points = 5 x_grid = [1.0e-3, 0.1, 1.0] pto = 2 comments = "Test" t_card, op_card = eko_utils.construct_eko_cards( - theoryID, + nnpdf_theory_card, q_fin, q_points, x_grid, @@ -140,7 +139,7 @@ def test_eko_utils(tmp_path): t_card_dict["order"][0] == pto + 1 ) # This is due to a different convention in eko orders due to QED np.testing.assert_allclose(op_card_dict["xgrid"], x_grid) - # In theory 162 the charm threshold is at 1.51 + # In theory 399 the charm threshold is at 1.51 # and we should find two entries, one for nf=3 and another one for nf=4 np.testing.assert_allclose(op_card_dict["mugrid"][0], (1.51, 3)) np.testing.assert_allclose(op_card_dict["mugrid"][1], (1.51, 4)) diff --git a/nnpdf_data/nnpdf_data/theory.py b/nnpdf_data/nnpdf_data/theory.py new file mode 100644 index 0000000000..76f7287ca5 --- /dev/null +++ b/nnpdf_data/nnpdf_data/theory.py @@ -0,0 +1,148 @@ +""" +This module provides an unique source and definition for all the possible parameters +that a theory card can contain. +""" + +import dataclasses +from functools import lru_cache +import logging +from typing import Literal, Optional + +from .utils import parse_yaml_inp + +DEPRECATED_KEYS = ["MaxNfAs", "SxRes", "SxOrd" "EScaleVar", "Qedref", "global_nx"] + +log = logging.getLogger(__name__) + + +class TheoryCardError(Exception): + pass + + +class TheoryNotFoundInDatabase(FileNotFoundError): + pass + + +@dataclasses.dataclass(frozen=True) +class TheoryCard: + ID: int # ID number of the theory + PTO: Literal[0, 1, 2, 3] # Perturbative order (0 = LO, 1 = NLO, 2 = NNLO ...) + FNS: str # Flavor number scheme (e.g. FONLL-C) + DAMP: int # Whether a damping function is applied or not for FONLL + IC: int # 0 = perturbative charm only , 1 = intrinsic charm allowed + ModEv: Literal["EXA", "TRN"] # DGLAP evolution solution method (e.g. EXA or TRN) + XIR: float # Renormalization scale over the hard scattering scale ratio + XIF: float # Factorization scale over the hard scattering scale ratio + NfFF: int # Number of active flavors, only for FFNS or FFN0 schemes + QED: int # Max order of alpha_qed in the evolution + Q0: float # [GeV] Parametrization scale for the fit (and the photon) + mc: float # [GeV] charm mass + Qmc: float # [GeV] MSbar mass reference scale of the charm + kcThr: float # Threshold ratio of the charm + mb: float # # [GeV] bottom mass + Qmb: float # [GeV] MSbar mass reference scale of the bottom + kbThr: float # Threshold ratio of the bottom + mt: float # # [GeV] top mass + Qmt: float # [GeV] MSbar mass reference scale of the top + ktThr: float # Threshold ratio of the top + # CKM matrix elements (running on the columns first, i.e. CKM_11 is CKM[0] and CKM_12 is CKM[1] and so on) + CKM: list[float] + MZ: float # [GeV] Mass of Z + MW: float # [GeV] Mass of W + GF: float # Fermi constant + SIN2TW: float + TMC: int # Include target mass corrections: 0 = disabled, 1 = leading twist, 2 = higher twist approximated, 3 = higher twist exact + MP: float # [GeV] Mass of the proton + Comments: str # Comments on the theory + # Definition of the reference scale for alpha_s and alpha_qed + alphas: float # Value of alpha_s at the scale Qref + alphaqed: float # Values of alpha QED at the scale Qref + Qref: float # [GeV] Reference scale for alphas and alphaqed + nfref: Optional[int] = None # nf at Qref (its default depend on Qref) + MaxNfPdf: Optional[int] = 5 # Used by pineko and the photon module to define the thresholds + ## Fit theory parameters default + # Number of active flavors at the parametrization scale Q0, its default depends on Q0 + nf0: Optional[int] = None + ## Evolution parameters + # Heavy quark mass scheme, POLE for pole masses (default), MSBAR for running masses + HQ: Optional[Literal["POLE", "MSBAR"]] = "POLE" + # iterations for the evolution of the PDF. Defaults to 60 when ModEv = EXA + IterEv: Optional[int] = None + ModSV: Optional[str] = None # Scale variations method in EKO (expanded or exponentiated) + DAMPPOWERc: Optional[int] = None # Power of the damping factor in FONLL for the c + DAMPPOWERb: Optional[int] = None # Power of the damping factor in FONLL for the b + ## N3LO parameters + # N3LO anomalous dimension variations + n3lo_ad_variation: Optional[list] = dataclasses.field(default_factory=lambda: 7 * [0]) + # N3LO coefficient functions variation: -1 = lower bound, 0 = central, 1 = upper bound + n3lo_cf_variation: Optional[int] = 0 + # N3LO splitting functions approximation: if True use the FHMRUVV parametrization, otherwise use EKO parametrization. + use_fhmruvv: Optional[bool] = False + ###### Keys for compatibility with old NNPDF theories, their values will be dropped immediately after reading to avoid problems + ###### they will be set to ``None`` immediately after loading the theory + MaxNfAs: Optional[int] = None + SxRes: Optional[int] = None + SxOrd: Optional[str] = None + EScaleVar: Optional[int] = None + Qedref: Optional[float] = None + global_nx: Optional[int] = None + + def __post_init__(self): + self._set_defaults() + self._apply_checks() + + for key in DEPRECATED_KEYS: + object.__setattr__(self, key, None) + + def _apply_checks(self): + """Apply complex checks to the input keys after parsing is complete""" + if self.Qedref is not None and self.QED != 0: + # Check that nobody is trying to use this with a wrong Qedref! + if self.Qedref != self.Qref: + self._raise_or_warn( + f"Trying to use {self.ID} with {self.Qedref} != {self.Qref}. This is not supported!" + ) + + def find_nf(self, mu): + """Given a value of q, find the corresponding number of flavours + for this theory.""" + if mu < self.mc * self.kcThr: + return 3 + elif mu < self.mb * self.kbThr: + return 4 + elif mu < self.mt * self.ktThr: + return 5 + return 6 + + def _set_defaults(self): + """Function to be called by __post_init__ to set defaults that might depends + on other variables""" + if self.ModEv == "EXA" and self.IterEv is None: + object.__setattr__(self, "IterEv", 60) + if self.ModEv == "TRN" and self.IterEv is not None: + raise TheoryCardError( + f"IterEv should not be given when ModEv=TRN, received {self.IterEv}" + ) + if self.nf0 is None: + object.__setattr__(self, "nf0", self.find_nf(self.Q0)) + if self.nfref is None: + object.__setattr__(self, "nfref", self.find_nf(self.Qref)) + + def _raise_or_warn(self, msg): + """Raise an error for new theories and a warning for old ones""" + if self.ID >= 600: + raise TheoryCardError(msg) + log.warning(msg) + + def asdict(self): + return dataclasses.asdict(self) + + +@lru_cache +def parse_theory_card(theory_card): + """Read the theory card using validobj parsing + Returns the theory as a dictionary + """ + if theory_card.exists(): + return parse_yaml_inp(theory_card, TheoryCard) + raise TheoryNotFoundInDatabase(f"Theory card {theory_card} not found") diff --git a/nnpdf_data/nnpdf_data/theory_cards/522.yaml b/nnpdf_data/nnpdf_data/theory_cards/41000000.yaml similarity index 64% rename from nnpdf_data/nnpdf_data/theory_cards/522.yaml rename to nnpdf_data/nnpdf_data/theory_cards/41000000.yaml index c8b2ed9942..33b29c4ad4 100644 --- a/nnpdf_data/nnpdf_data/theory_cards/522.yaml +++ b/nnpdf_data/nnpdf_data/theory_cards/41000000.yaml @@ -1,22 +1,16 @@ -ID: 522 +ID: 41_000_000 +Comments: NNPDF4.1 baseline PTO: 2 FNS: FONLL-C DAMP: 0 IC: 1 +Q0: 1.65 ModEv: EXA +IterEv: 60 XIR: 1.0 XIF: 1.0 NfFF: 5 -MaxNfAs: 5 -MaxNfPdf: 5 -Q0: 1.65 -alphas: 0.118 -Qref: 91.2 -QED: 2 -alphaqed: 0.007496252 -Qedref: 1.777 -SxRes: 0 -SxOrd: LL +QED: 0 HQ: POLE mc: 1.51 Qmc: 1.51 @@ -43,6 +37,6 @@ GF: 1.1663787e-05 SIN2TW: 0.23126 TMC: 1 MP: 0.938 -Comments: NNPDF4.0 QED nnlo exact evolution fixed alphaem, photon generated at 100GeV -global_nx: 0 -EScaleVar: 1 +Qref: 91.2 +alphas: 0.118 +alphaqed: 0.0077553 diff --git a/nnpdf_data/nnpdf_data/theorydbutils.py b/nnpdf_data/nnpdf_data/theorydbutils.py index defc35577f..bc92575a22 100644 --- a/nnpdf_data/nnpdf_data/theorydbutils.py +++ b/nnpdf_data/nnpdf_data/theorydbutils.py @@ -5,58 +5,15 @@ low level utilities for querying the theory database file and representing the data as a python object. """ -from dataclasses import asdict, dataclass from functools import lru_cache from pathlib import Path import pandas as pd +from .theory import TheoryCard from .utils import parse_yaml_inp -@dataclass(frozen=True) -class _TheoryCard: - ID: int - PTO: int - FNS: str - DAMP: int - IC: int - ModEv: str - XIR: float - XIF: float - NfFF: int - MaxNfAs: int - MaxNfPdf: int - Q0: float - alphas: float - Qref: float - QED: int - alphaqed: float - Qedref: float - SxRes: int - SxOrd: str - HQ: str - mc: float - Qmc: float - kcThr: float - mb: float - Qmb: float - kbThr: float - mt: float - Qmt: float - ktThr: float - CKM: list[float] - MZ: float - MW: float - GF: float - SIN2TW: float - TMC: int - MP: float - Comments: str - global_nx: int - EScaleVar: int - - class TheoryNotFoundInDatabase(Exception): pass @@ -67,7 +24,8 @@ def parse_theory_card(theory_card): Returns the theory as a dictionary """ if theory_card.exists(): - return asdict(parse_yaml_inp(theory_card, _TheoryCard)) + tcard = parse_yaml_inp(theory_card, TheoryCard) + return tcard.asdict() raise TheoryNotFoundInDatabase(f"Theory card {theory_card} not found") diff --git a/nnpdf_data/nnpdf_data/utils.py b/nnpdf_data/nnpdf_data/utils.py index 2fe7c15c4c..33987134bd 100644 --- a/nnpdf_data/nnpdf_data/utils.py +++ b/nnpdf_data/nnpdf_data/utils.py @@ -3,6 +3,11 @@ import ruamel.yaml as yaml from validobj import ValidationError, parse_input +try: + Loader = yaml.CLoader +except AttributeError: + Loader = yaml.Loader + def parse_yaml_inp(input_yaml, spec): """ @@ -12,12 +17,14 @@ def parse_yaml_inp(input_yaml, spec): https://validobj.readthedocs.io/en/latest/examples.html#yaml-line-numbers """ input_yaml = pathlib.Path(input_yaml) - inp = yaml.round_trip_load(input_yaml.open("r", encoding="utf-8")) + inp = yaml.load(input_yaml.read_text(encoding="utf-8"), Loader=Loader) try: return parse_input(inp, spec) except ValidationError as e: current_exc = e - current_inp = inp + # In order to provide a more complete error information, use round_trip_load + # to read the .yaml file again (insetad of using the CLoader) + current_inp = yaml.round_trip_load(input_yaml.open("r", encoding="utf-8")) error_text_lines = [] while current_exc: if hasattr(current_exc, 'wrong_field'): diff --git a/validphys2/src/validphys/photon/alpha.py b/validphys2/src/validphys/photon/alpha.py index 574f12d67c..f05d770c08 100644 --- a/validphys2/src/validphys/photon/alpha.py +++ b/validphys2/src/validphys/photon/alpha.py @@ -1,4 +1,5 @@ """Module that implements the alpha running""" + import numpy as np from scipy.integrate import solve_ivp @@ -10,6 +11,7 @@ class Alpha: """Class that solves the RGE for alpha_qed""" + def __init__(self, theory, q2max): self.theory = theory self.alpha_em_ref = theory["alphaqed"] @@ -20,11 +22,11 @@ def __init__(self, theory, q2max): self.thresh_c = self.theory["kcThr"] * self.theory["mc"] self.thresh_b = self.theory["kbThr"] * self.theory["mb"] self.thresh_t = self.theory["ktThr"] * self.theory["mt"] - if self.theory["MaxNfAs"] <= 5: + if self.theory["MaxNfPdf"] <= 5: self.thresh_t = np.inf - if self.theory["MaxNfAs"] <= 4: + if self.theory["MaxNfPdf"] <= 4: self.thresh_b = np.inf - if self.theory["MaxNfAs"] <= 3: + if self.theory["MaxNfPdf"] <= 3: self.thresh_c = np.inf if self.theory["ModEv"] == "TRN": @@ -145,7 +147,7 @@ def alphaem_fixed_flavor_exa(self, q, alphaem_ref, qref, nf, nl): # at LO in QED the TRN solution is exact if self.qed_order == 1: return self.alphaem_fixed_flavor_trn(q, alphaem_ref, qref, nf, nl) - + u = 2 * np.log(q / qref) # solve RGE @@ -214,7 +216,7 @@ def compute_betas(self, regions): for nf, nl in regions: betas_qed[(nf, nl)] = [ beta.beta_qed_aem2(nf, nl) / (4 * np.pi), - beta.beta_qed_aem3(nf, nl) / (4 * np.pi) ** 2 if self.theory['QED'] == 2 else 0., + beta.beta_qed_aem3(nf, nl) / (4 * np.pi) ** 2 if self.theory['QED'] == 2 else 0.0, ] return betas_qed @@ -249,6 +251,7 @@ def find_region(self, q): def _rge(_t, alpha_t, beta_qed_vec): """RGEs for the running of alphaem""" rge_qed = -(alpha_t**2) * ( - beta_qed_vec[0] + np.sum([alpha_t ** (k + 1) * beta for k, beta in enumerate(beta_qed_vec[1:])]) + beta_qed_vec[0] + + np.sum([alpha_t ** (k + 1) * beta for k, beta in enumerate(beta_qed_vec[1:])]) ) return rge_qed diff --git a/validphys2/src/validphys/photon/compute.py b/validphys2/src/validphys/photon/compute.py index 094c317753..265a698d2c 100644 --- a/validphys2/src/validphys/photon/compute.py +++ b/validphys2/src/validphys/photon/compute.py @@ -1,4 +1,5 @@ """Module that calls fiatlux to add the photon PDF.""" + import logging import tempfile @@ -55,11 +56,9 @@ def __init__(self, theoryid, lux_params, replicas): theory = theoryid.get_description() fiatlux_runcard = FIATLUX_DEFAULT - fiatlux_runcard["qed_running"] = bool(np.isclose(theory["Qedref"], theory["Qref"])) - # cast explicitly from np.bool_ to bool otherwise problems in dumping it - # TODO: for the time being, we trigger alphaem running if Qedref=Qref. - # This is going to be changed in favor of a bool em_running - # in the runcard + # TODO: for the time being, Qedref=Qref and so alphaem running will always trigger + # This may be changed in the future in favor of a bool em_running in the runcard + fiatlux_runcard["qed_running"] = True fiatlux_runcard["mproton"] = float(theory["MP"]) # precision on final integration of double integral diff --git a/validphys2/src/validphys/tests/photon/test_alpha.py b/validphys2/src/validphys/tests/photon/test_alpha.py index 7db54055b6..118ee2499b 100644 --- a/validphys2/src/validphys/tests/photon/test_alpha.py +++ b/validphys2/src/validphys/tests/photon/test_alpha.py @@ -34,7 +34,7 @@ def test_set_thresholds_alpha_em(): # test all the thresholds np.testing.assert_almost_equal(alpha.alpha_em_ref, theory["alphaqed"]) - np.testing.assert_almost_equal(alpha.thresh[(5, 3)], theory["Qedref"]) + np.testing.assert_almost_equal(alpha.thresh[(5, 3)], theory["Qref"]) np.testing.assert_almost_equal(alpha.thresh[(4, 3)], theory["mb"]) np.testing.assert_almost_equal(alpha.thresh[(4, 2)], constants.MTAU) np.testing.assert_almost_equal(alpha.thresh[(3, 2)], theory["mc"]) @@ -46,7 +46,7 @@ def test_set_thresholds_alpha_em(): np.testing.assert_almost_equal(alpha.alphaem_thresh[(5, 3)], theory["alphaqed"]) np.testing.assert_almost_equal( alpha.alphaem_thresh[(4, 3)], - alpha.alphaem_fixed_flavor(theory["mb"], alpha_ref, theory["Qedref"], 5, 3), + alpha.alphaem_fixed_flavor(theory["mb"], alpha_ref, theory["Qref"], 5, 3), ) np.testing.assert_almost_equal( alpha.alphaem_thresh[(4, 2)], @@ -102,7 +102,7 @@ def test_couplings_exa(): alphaem=theory["alphaqed"], scale=theory["Qref"], num_flavs_ref=None, - max_num_flavs=theory["MaxNfAs"], + max_num_flavs=theory["MaxNfPdf"], em_running=True, ) ) @@ -138,7 +138,7 @@ def test_couplings_exa(): np.testing.assert_allclose( alpha.alpha_em(q), eko_alpha.a_em(q**2) * 4 * np.pi, rtol=5e-6 ) - for nf in range(3, theory["MaxNfAs"]): + for nf in range(3, theory["MaxNfPdf"]): np.testing.assert_allclose( alpha.alphaem_thresh[(nf, 2 if nf == 3 else 3)], eko_alpha.a_em(mass_list[nf - 3] ** 2, nf) * 4 * np.pi, diff --git a/validphys2/src/validphys/tests/photon/test_compute.py b/validphys2/src/validphys/tests/photon/test_compute.py index 64e3cda263..dd846bdcf2 100644 --- a/validphys2/src/validphys/tests/photon/test_compute.py +++ b/validphys2/src/validphys/tests/photon/test_compute.py @@ -10,8 +10,8 @@ from validphys.core import PDF as PDFset from validphys.loader import FallbackLoader from validphys.photon import structure_functions as sf -from validphys.photon.compute import FIATLUX_DEFAULT, Photon from validphys.photon.alpha import Alpha +from validphys.photon.compute import FIATLUX_DEFAULT, Photon from ..conftest import PDF, THEORY_QED @@ -70,7 +70,7 @@ def test_photon(): # runcard fiatlux_default = FIATLUX_DEFAULT.copy() fiatlux_default['mproton'] = theory['MP'] - fiatlux_default["qed_running"] = bool(np.isclose(theory["Qedref"], theory["Qref"])) + fiatlux_default["qed_running"] = False fiatlux_default["q2_max"] = float(f2.q2_max) fiatlux_default["eps_base"] = fiatlux_runcard["eps_base"] diff --git a/validphys2/src/validphys/tests/test_theorydbutils.py b/validphys2/src/validphys/tests/test_theorydbutils.py index c812508445..a72d86900d 100644 --- a/validphys2/src/validphys/tests/test_theorydbutils.py +++ b/validphys2/src/validphys/tests/test_theorydbutils.py @@ -1,4 +1,6 @@ import pytest +from ruamel import yaml +from validobj import ValidationError from nnpdf_data.theorydbutils import TheoryNotFoundInDatabase, fetch_all, fetch_theory from validphys.api import API @@ -25,3 +27,26 @@ def test_vp_theoryinfo_tables(): assert all_tables.loc[53, 'Comments'] == 'NNPDF3.1 NNLO central' tb53 = API.theory_info_table(theory_db_id=53) assert tb53.loc['Comments'].iloc[0] == 'NNPDF3.1 NNLO central' + + +def _dump_and_check_error(tdict, tmp, bad_number=999): + """Dump theory dict to a file and load expecting an error""" + tdict["ID"] = bad_number + ofile = tmp / f"{bad_number}.yaml" + yaml.dump(tdict, ofile.open("w")) + with pytest.raises(ValidationError): + fetch_theory(tmp, bad_number) + + +def test_fetch_with_errors(tmp): + """Test some of the errors that theories can raise""" + # Get a good theory and copy it before doing evil to it + th_good = fetch_theory(DBPATH, 700) + # Wrong PTO + bad_pto = dict(th_good) + bad_pto["PTO"] = 7 + _dump_and_check_error(bad_pto, tmp) + # Wrong ModEv + bad_ev = dict(th_good) + bad_ev["ModEv"] = "WRONG" + _dump_and_check_error(bad_ev, tmp)