From ecc631c57b9e0f0318748a6a9abcccc81224adad Mon Sep 17 00:00:00 2001 From: Timo Reents <77727843+t-reents@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:10:05 +0200 Subject: [PATCH 1/2] Add `init_walltime` input to parallelize WorkChains (#62) The `HpParallelizeQpointsWorkChain` and the `HpParallelizeAtomsWorkChain` had an hard-coded walltime for the `HpBaseWorkChain` used for initialization, set to 3600 seconds. This might cause some problems on certain HPC partitions (e.g. debug, tests, ...) where the maximum walltime can lower than 1 hour. To make this more flexible, a new **non-db** input is added to both WorkChains with a default of 1 hour. --- .../workflows/hp/parallelize_atoms.py | 6 +++++- .../workflows/hp/parallelize_qpoints.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py b/src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py index d6fb0cb..40921e4 100644 --- a/src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py +++ b/src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py @@ -24,6 +24,10 @@ def define(cls, spec): spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir')) spec.input('parallelize_qpoints', valid_type=orm.Bool, default=lambda: orm.Bool(False)) spec.input('max_concurrent_base_workchains', valid_type=orm.Int, required=False) + spec.input( + 'init_walltime', valid_type=int, default=3600, non_db=True, + help='The walltime of the initialization `HpBaseWorkChain` in seconds (default: 3600).' + ) spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False), help='If `True`, work directories of all called calculation will be cleaned at the end of execution.') spec.outline( @@ -56,7 +60,7 @@ def run_init(self): inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) inputs.only_initialization = orm.Bool(True) inputs.clean_workdir = self.inputs.clean_workdir - inputs.hp.metadata.options.max_wallclock_seconds = 3600 # 1 hour is more than enough + inputs.hp.metadata.options.max_wallclock_seconds = self.inputs.init_walltime inputs.metadata.call_link_label = 'initialization' node = self.submit(HpBaseWorkChain, **inputs) diff --git a/src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py b/src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py index 993ed3b..e6315b6 100644 --- a/src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py +++ b/src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py @@ -30,6 +30,10 @@ def define(cls, spec): super().define(spec) spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir')) spec.input('max_concurrent_base_workchains', valid_type=orm.Int, required=False) + spec.input( + 'init_walltime', valid_type=int, default=3600, non_db=True, + help='The walltime of the initialization `HpBaseWorkChain` in seconds (default: 3600).' + ) spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False), help='If `True`, work directories of all called calculation will be cleaned at the end of execution.') spec.outline( @@ -63,7 +67,7 @@ def run_init(self): inputs.hp.parameters = orm.Dict(parameters) inputs.clean_workdir = self.inputs.clean_workdir - inputs.hp.metadata.options.max_wallclock_seconds = 3600 # 1 hour is more than enough + inputs.hp.metadata.options.max_wallclock_seconds = self.inputs.init_walltime inputs.metadata.call_link_label = 'initialization' node = self.submit(HpBaseWorkChain, **inputs) From 4c8036efa3d265a85b423aea5f00a44e4aca01ad Mon Sep 17 00:00:00 2001 From: Timo Reents <77727843+t-reents@users.noreply.github.com> Date: Thu, 11 Apr 2024 17:06:47 +0200 Subject: [PATCH 2/2] Relabel `HubbardStructure` if relaxations are skipped (#63) Fixes #61 The `ctx.current_hubbard_structure` is not relabeled in `SelfConsistentHubbardWorkChain` if `skip_relax_iterations != 0`. This can cause problems in the subsequent `PwRelaxWorkChains` if new types are introduced. Therefore, the relabeling step is moved to a separate method that is called in any case. Co-authored-by: Lorenzo <79980269+bastonero@users.noreply.github.com> --- .../workflows/hubbard.py | 33 +++++++++-------- tests/workflows/test_hubbard.py | 35 +++++++++++++++++++ 2 files changed, 54 insertions(+), 14 deletions(-) diff --git a/src/aiida_quantumespresso_hp/workflows/hubbard.py b/src/aiida_quantumespresso_hp/workflows/hubbard.py index 7673964..d517163 100644 --- a/src/aiida_quantumespresso_hp/workflows/hubbard.py +++ b/src/aiida_quantumespresso_hp/workflows/hubbard.py @@ -421,6 +421,22 @@ def get_pseudos(self) -> dict: return results + def relabel_hubbard_structure(self, workchain) -> None: + """Relabel the Hubbard structure if new types have been detected.""" + from aiida_quantumespresso.utils.hubbard import is_intersite_hubbard + + if not is_intersite_hubbard(workchain.outputs.hubbard_structure.hubbard): + for site in workchain.outputs.hubbard.dict.sites: + if not site['type'] == site['new_type']: + result = structure_relabel_kinds( + self.ctx.current_hubbard_structure, workchain.outputs.hubbard, self.ctx.current_magnetic_moments + ) + self.ctx.current_hubbard_structure = result['hubbard_structure'] + if self.ctx.current_magnetic_moments is not None: + self.ctx.current_magnetic_moments = result['starting_magnetization'] + self.report('new types have been detected: relabeling the structure.') + return + def run_relax(self): """Run the PwRelaxWorkChain to run a relax PwCalculation.""" inputs = self.get_inputs(PwRelaxWorkChain, 'relax') @@ -578,14 +594,14 @@ def inspect_hp(self): if not self.should_check_convergence(): self.ctx.current_hubbard_structure = workchain.outputs.hubbard_structure + self.relabel_hubbard_structure(workchain) + if not self.inputs.meta_convergence: self.report('meta convergence is switched off, so not checking convergence of Hubbard parameters.') self.ctx.is_converged = True def check_convergence(self): """Check the convergence of the Hubbard parameters.""" - from aiida_quantumespresso.utils.hubbard import is_intersite_hubbard - workchain = self.ctx.workchains_hp[-1] # We store in memory the parameters before relabelling to make the comparison easier. @@ -601,18 +617,7 @@ def check_convergence(self): # We check if new types were created, in which case we relabel the `HubbardStructureData` self.ctx.current_hubbard_structure = workchain.outputs.hubbard_structure - - if not is_intersite_hubbard(workchain.outputs.hubbard_structure.hubbard): - for site in workchain.outputs.hubbard.dict.sites: - if not site['type'] == site['new_type']: - self.report('new types have been detected: relabeling the structure and starting new iteration.') - result = structure_relabel_kinds( - self.ctx.current_hubbard_structure, workchain.outputs.hubbard, self.ctx.current_magnetic_moments - ) - self.ctx.current_hubbard_structure = result['hubbard_structure'] - if self.ctx.current_magnetic_moments is not None: - self.ctx.current_magnetic_moments = result['starting_magnetization'] - break + self.relabel_hubbard_structure(workchain) if not len(ref_params) == len(new_params): self.report('The new and old Hubbard parameters have different lenghts. Assuming to be at the first cycle.') diff --git a/tests/workflows/test_hubbard.py b/tests/workflows/test_hubbard.py index 2a06584..c24802c 100644 --- a/tests/workflows/test_hubbard.py +++ b/tests/workflows/test_hubbard.py @@ -227,6 +227,41 @@ def test_skip_relax_iterations(generate_workchain_hubbard, generate_inputs_hubba assert process.should_check_convergence() +@pytest.mark.usefixtures('aiida_profile') +def test_skip_relax_iterations_relabeling( + generate_workchain_hubbard, generate_inputs_hubbard, generate_hp_workchain_node, generate_hubbard_structure +): + """Test `SelfConsistentHubbardWorkChain` when skipping the first relax iterations and relabeling is needed.""" + from aiida.orm import Bool, Int + + inputs = generate_inputs_hubbard() + inputs['skip_relax_iterations'] = Int(1) + inputs['meta_convergence'] = Bool(True) + process = generate_workchain_hubbard(inputs=inputs) + process.setup() + + current_hubbard_structure = generate_hubbard_structure(u_value=1, only_u=True) + process.current_hubbard_structure = current_hubbard_structure + # 1 + process.update_iteration() + assert process.ctx.skip_relax_iterations == 1 + assert process.ctx.iteration == 1 + assert not process.should_run_relax() + assert not process.should_check_convergence() + process.ctx.workchains_hp = [generate_hp_workchain_node(relabel=True, u_value=1, only_u=True)] + process.inspect_hp() + assert process.ctx.current_hubbard_structure.get_kind_names( + ) != process.ctx.workchains_hp[-1].outputs.hubbard_structure.get_kind_names() + # 2 + process.update_iteration() + assert process.should_run_relax() + assert process.should_check_convergence() + # 3 + process.update_iteration() + assert process.should_run_relax() + assert process.should_check_convergence() + + @pytest.mark.usefixtures('aiida_profile') def test_relax_frequency(generate_workchain_hubbard, generate_inputs_hubbard): """Test `SelfConsistentHubbardWorkChain` when `relax_frequency` is different from 1."""