diff --git a/.github/workflows/ci-code.yml b/.github/workflows/ci-code.yml index dfa3cc787c..a8d0cb9a08 100644 --- a/.github/workflows/ci-code.yml +++ b/.github/workflows/ci-code.yml @@ -104,7 +104,7 @@ jobs: AIIDA_WARN_v3: 1 # Python 3.12 has a performance regression when running with code coverage # so run code coverage only for python 3.9. - run: pytest -v --db-backend psql -m 'not nightly' tests/ ${{ matrix.python-version == '3.9' && '--cov aiida' || '' }} + run: pytest --db-backend psql -m 'not nightly' tests/ ${{ matrix.python-version == '3.9' && '--cov aiida' || '' }} - name: Upload coverage report if: matrix.python-version == 3.9 && github.repository == 'aiidateam/aiida-core' diff --git a/.github/workflows/ci-style.yml b/.github/workflows/ci-style.yml deleted file mode 100644 index 1f4b549ad2..0000000000 --- a/.github/workflows/ci-style.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: ci-style - -on: - push: - branches-ignore: [gh-pages] - pull_request: - branches-ignore: [gh-pages] - -env: - FORCE_COLOR: 1 - -jobs: - - pre-commit: - - runs-on: ubuntu-latest - timeout-minutes: 30 - - steps: - - uses: actions/checkout@v4 - - - name: Install python dependencies - uses: ./.github/actions/install-aiida-core - with: - python-version: '3.11' - extras: '[pre-commit]' - from-requirements: 'false' - - - name: Run pre-commit - run: pre-commit run --all-files || ( git status --short ; git diff ; exit 1 ) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 9217e534f8..84ed617125 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -129,7 +129,7 @@ jobs: id: tests env: AIIDA_WARN_v3: 0 - run: pytest -sv --db-backend sqlite -m 'requires_rmq' tests/ + run: pytest -s --db-backend sqlite -m 'requires_rmq' tests/ - name: Slack notification # Always run this step (otherwise it would be skipped if any of the previous steps fail) but only if the diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 524a625afc..c47595baee 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -67,7 +67,7 @@ jobs: uses: ./.github/actions/install-aiida-core - name: Run sub-set of test suite - run: pytest -sv -m requires_rmq --db-backend=sqlite tests/ + run: pytest -s -m requires_rmq --db-backend=sqlite tests/ publish: diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index 75371449bb..d315a50691 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -229,7 +229,7 @@ jobs: env: AIIDA_TEST_PROFILE: test_aiida AIIDA_WARN_v3: 1 - run: pytest -v --db-backend psql tests -m 'not nightly' tests/ + run: pytest --db-backend psql tests -m 'not nightly' tests/ - name: Freeze test environment run: pip freeze | sed '1d' | tee requirements-py-${{ matrix.python-version }}.txt diff --git a/.github/workflows/tests_nightly.sh b/.github/workflows/tests_nightly.sh index dbb1b92a8a..2712a5124e 100755 --- a/.github/workflows/tests_nightly.sh +++ b/.github/workflows/tests_nightly.sh @@ -13,4 +13,4 @@ verdi -p test_aiida run ${SYSTEM_TESTS}/test_containerized_code.py bash ${SYSTEM_TESTS}/test_polish_workchains.sh verdi daemon stop -AIIDA_TEST_PROFILE=test_aiida pytest -v --db-backend psql -m nightly tests/ +AIIDA_TEST_PROFILE=test_aiida pytest --db-backend psql -m nightly tests/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9dd65e8d71..9aa6f910dc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -84,6 +84,7 @@ repos: .docker/.*| docs/.*| utils/.*| + tests/.*| src/aiida/calculations/arithmetic/add.py| src/aiida/calculations/diff_tutorial/calculations.py| @@ -186,17 +187,6 @@ repos: src/aiida/transports/plugins/local.py| src/aiida/transports/plugins/ssh.py| src/aiida/workflows/arithmetic/multiply_add.py| - - tests/conftest.py| - tests/repository/conftest.py| - tests/repository/test_repository.py| - tests/sphinxext/sources/workchain/conf.py| - tests/sphinxext/sources/workchain_broken/conf.py| - tests/storage/psql_dos/migrations/conftest.py| - tests/storage/psql_dos/migrations/django_branch/test_0026_0027_traj_data.py| - tests/test_calculation_node.py| - tests/test_nodes.py| - )$ - id: dm-generate-all diff --git a/CHANGELOG.md b/CHANGELOG.md index ea3c843d8a..04327d7105 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## v2.6.3 - 2024-11-6 + +### Fixes +- CLI: Fix exception for `verdi plugin list` (#6560) [[c3b10b7]](https://github.com/aiidateam/aiida-core/commit/c3b10b759a9cd062800ef120591d5c7fd0ae4ee7) +- `DirectScheduler`: Ensure killing child processes (#6572) [[fddffca]](https://github.com/aiidateam/aiida-core/commit/fddffca67b4f7e3b76b19df7db8e1511c449d2d9) +- Engine: Fix state change broadcast before process node is updated (#6580) [[867353c]](https://github.com/aiidateam/aiida-core/commit/867353c415c61d94a2427d5225dd5224a1b95fb9) + +### Devops +- Docker: Replace sleep with `s6-notifyoncheck` (#6475) [[9579378b]](https://github.com/aiidateam/aiida-core/commit/9579378ba063237baa5b73380eb8e9f0a28529ee) +- Fix failed docker CI using more reasoning grep regex to parse python version (#6581) [[332a4a91]](https://github.com/aiidateam/aiida-core/commit/332a4a915771afedcb144463b012558e4669e529) +- DevOps: Fix json query in reading the docker names to filter out fields not starting with aiida (#6573) [[e1467edc]](https://github.com/aiidateam/aiida-core/commit/e1467edca902867e53605e0e60b67f8767bf8d3e) + + ## v2.6.2 - 2024-08-07 ### Fixes @@ -31,7 +44,7 @@ ## v2.6.1 - 2024-07-01 -### Fixes: +### Fixes - Fixtures: Make `pgtest` truly an optional dependency [[9fe8fd2e0]](https://github.com/aiidateam/aiida-core/commit/9fe8fd2e0b88e746ee2156eccb71b7adbab6b2c5) diff --git a/pyproject.toml b/pyproject.toml index 31ecb8442b..35f2f23b7f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -255,6 +255,7 @@ runaiida = 'aiida.cmdline.commands.cmd_run:run' verdi = 'aiida.cmdline.commands.cmd_verdi:verdi' [project.urls] +Changelog = 'https://github.com/aiidateam/aiida-core/blob/main/CHANGELOG.md' Documentation = 'https://aiida.readthedocs.io' Home = 'http://www.aiida.net/' Source = 'https://github.com/aiidateam/aiida-core' diff --git a/src/aiida/cmdline/utils/ascii_vis.py b/src/aiida/cmdline/utils/ascii_vis.py index f42317e7a8..502abf3bcf 100644 --- a/src/aiida/cmdline/utils/ascii_vis.py +++ b/src/aiida/cmdline/utils/ascii_vis.py @@ -29,7 +29,7 @@ def calc_info(node, call_link_label: bool = False) -> str: raise TypeError(f'Unknown type: {type(node)}') process_label = node.process_label - process_state = node.process_state.value.capitalize() + process_state = 'None' if node.process_state is None else node.process_state.value.capitalize() exit_status = node.exit_status if call_link_label and (caller := node.caller): diff --git a/src/aiida/orm/nodes/data/list.py b/src/aiida/orm/nodes/data/list.py index fc39dd1acd..d2c0857b35 100644 --- a/src/aiida/orm/nodes/data/list.py +++ b/src/aiida/orm/nodes/data/list.py @@ -9,6 +9,7 @@ """`Data` sub class to represent a list.""" from collections.abc import MutableSequence +from typing import Any from .base import to_aiida_type from .data import Data @@ -81,15 +82,15 @@ def remove(self, value): self.set_list(data) return item - def pop(self, **kwargs): # type: ignore[override] + def pop(self, index: int = -1) -> Any: """Remove and return item at index (default last).""" data = self.get_list() - item = data.pop(**kwargs) + item = data.pop(index) if not self._using_list_reference(): self.set_list(data) return item - def index(self, value): # type: ignore[override] + def index(self, value: Any, start: int = 0, stop: int = 0) -> int: """Return first index of value..""" return self.get_list().index(value) diff --git a/src/aiida/tools/pytest_fixtures/daemon.py b/src/aiida/tools/pytest_fixtures/daemon.py index 2b74e4ce77..89ef02d841 100644 --- a/src/aiida/tools/pytest_fixtures/daemon.py +++ b/src/aiida/tools/pytest_fixtures/daemon.py @@ -116,7 +116,7 @@ def test(submit_and_await): from aiida.engine import ProcessState def factory( - submittable: type[Process] | ProcessBuilder | ProcessNode | t.Any, + submittable: type[Process] | ProcessBuilder | ProcessNode, state: ProcessState = ProcessState.FINISHED, timeout: int = 20, **kwargs, diff --git a/tests/cmdline/utils/test_ascii_vis.py b/tests/cmdline/utils/test_ascii_vis.py new file mode 100644 index 0000000000..9fb6d26423 --- /dev/null +++ b/tests/cmdline/utils/test_ascii_vis.py @@ -0,0 +1,20 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Tests for the :mod:`aiida.cmdline.utils.ascii_vis` module.""" + +from aiida.orm.nodes.process.process import ProcessNode + + +def test_build_call_graph(): + from aiida.cmdline.utils.ascii_vis import build_call_graph + + node = ProcessNode() + + call_graph = build_call_graph(node) + assert call_graph == 'None None' diff --git a/tests/orm/test_querybuilder.py b/tests/orm/test_querybuilder.py index 1aca33467c..c434c93411 100644 --- a/tests/orm/test_querybuilder.py +++ b/tests/orm/test_querybuilder.py @@ -9,6 +9,7 @@ """Tests for the QueryBuilder.""" import copy +import json import uuid import warnings from collections import defaultdict @@ -1703,3 +1704,163 @@ def test_statistics_default_class(self, aiida_localhost): # data are correct res = next(iter(qb.dict()[0].values())) assert res == expected_dict + + +class TestJsonFilters: + @pytest.mark.parametrize( + 'data,filters,is_match', + ( + # contains different types of element + ({'arr': [1, '2', None]}, {'attributes.arr': {'contains': [1]}}, True), + ({'arr': [1, '2', None]}, {'attributes.arr': {'contains': ['2']}}, True), + ({'arr': [1, '2', None]}, {'attributes.arr': {'contains': [None]}}, True), + # contains multiple elements of various types + ({'arr': [1, '2', None]}, {'attributes.arr': {'contains': [1, None]}}, True), + # contains non-exist elements + ({'arr': [1, '2', None]}, {'attributes.arr': {'contains': [114514]}}, False), + # contains empty set + ({'arr': [1, '2', None]}, {'attributes.arr': {'contains': []}}, True), + ({'arr': []}, {'attributes.arr': {'contains': []}}, True), + # nested arrays + ({'arr': [[1, 0], [0, 2]]}, {'attributes.arr': {'contains': [[1, 0]]}}, True), + ({'arr': [[2, 3], [0, 1], []]}, {'attributes.arr': {'contains': [[1, 0]]}}, True), + ({'arr': [[2, 3], [1]]}, {'attributes.arr': {'contains': [[4]]}}, False), + ({'arr': [[1, 0], [0, 2]]}, {'attributes.arr': {'contains': [[3]]}}, False), + ({'arr': [[1, 0], [0, 2]]}, {'attributes.arr': {'contains': [3]}}, False), + ({'arr': [[1, 0], [0, 2]]}, {'attributes.arr': {'contains': [[2]]}}, True), + ({'arr': [[1, 0], [0, 2]]}, {'attributes.arr': {'contains': [2]}}, False), + ({'arr': [[1, 0], [0, 2], 3]}, {'attributes.arr': {'contains': [[3]]}}, False), + ({'arr': [[1, 0], [0, 2], 3]}, {'attributes.arr': {'contains': [3]}}, True), + # negations + ({'arr': [1, '2', None]}, {'attributes.arr': {'!contains': [1]}}, False), + ({'arr': [1, '2', None]}, {'attributes.arr': {'!contains': []}}, False), + ({'arr': [1, '2', None]}, {'attributes.arr': {'!contains': [114514]}}, True), + ({'arr': [1, '2', None]}, {'attributes.arr': {'!contains': [1, 114514]}}, True), + # TODO: these pass, but why? are these behaviors expected? + # non-exist `attr_key`s + ({'foo': []}, {'attributes.arr': {'contains': []}}, False), + ({'foo': []}, {'attributes.arr': {'!contains': []}}, False), + ), + ids=json.dumps, + ) + @pytest.mark.usefixtures('aiida_profile_clean') + @pytest.mark.requires_psql + def test_json_filters_contains_arrays(self, data, filters, is_match): + """Test QueryBuilder filter `contains` for JSON array fields""" + orm.Dict(data).store() + qb = orm.QueryBuilder().append(orm.Dict, filters=filters) + assert qb.count() in {0, 1} + found = qb.count() == 1 + assert found == is_match + + @pytest.mark.parametrize( + 'data,filters,is_match', + ( + # contains different types of values + ( + { + 'dict': { + 'k1': 1, + 'k2': '2', + 'k3': None, + } + }, + {'attributes.dict': {'contains': {'k1': 1}}}, + True, + ), + ( + { + 'dict': { + 'k1': 1, + 'k2': '2', + 'k3': None, + } + }, + {'attributes.dict': {'contains': {'k1': 1, 'k2': '2'}}}, + True, + ), + ( + { + 'dict': { + 'k1': 1, + 'k2': '2', + 'k3': None, + } + }, + {'attributes.dict': {'contains': {'k3': None}}}, + True, + ), + # contains empty set + ( + { + 'dict': { + 'k1': 1, + 'k2': '2', + 'k3': None, + } + }, + {'attributes.dict': {'contains': {}}}, + True, + ), + # doesn't contain non-exist entries + ( + { + 'dict': { + 'k1': 1, + 'k2': '2', + 'k3': None, + } + }, + {'attributes.dict': {'contains': {'k1': 1, 'k': 'v'}}}, + False, + ), + # negations + ( + { + 'dict': { + 'k1': 1, + 'k2': '2', + 'k3': None, + } + }, + {'attributes.dict': {'!contains': {'k1': 1}}}, + False, + ), + ( + { + 'dict': { + 'k1': 1, + 'k2': '2', + 'k3': None, + } + }, + {'attributes.dict': {'!contains': {'k1': 1, 'k': 'v'}}}, + True, + ), + ( + { + 'dict': { + 'k1': 1, + 'k2': '2', + 'k3': None, + } + }, + {'attributes.dict': {'!contains': {}}}, + False, + ), + # TODO: these pass, but why? are these behaviors expected? + # non-exist `attr_key`s + ({'map': {}}, {'attributes.dict': {'contains': {}}}, False), + ({'map': {}}, {'attributes.dict': {'!contains': {}}}, False), + ), + ids=json.dumps, + ) + @pytest.mark.usefixtures('aiida_profile_clean') + @pytest.mark.requires_psql + def test_json_filters_contains_object(self, data, filters, is_match): + """Test QueryBuilder filter `contains` for JSON object fields""" + orm.Dict(data).store() + qb = orm.QueryBuilder().append(orm.Dict, filters=filters) + assert qb.count() in {0, 1} + found = qb.count() == 1 + assert found == is_match diff --git a/utils/patch-release.sh b/utils/patch-release.sh new file mode 100755 index 0000000000..a9d67bee79 --- /dev/null +++ b/utils/patch-release.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# Script: patch-release.sh +# Description: +# Cherry-picks a list of commits, amends each with the original commit hash for tracking, +# and generates a summary from the short github commit messages with links to each commit. +# +# Usage: +# ./patch-release.sh ... +# +# Example: +# ./patch-release.sh abc1234 def5678 + +set -e + +# Check if at least two arguments are provided (repo and at least one commit) +if [ "$#" -lt 1 ]; then + echo "Usage: $0 ..." + echo "Example: $0 abc1234 def5678" + exit 1 +fi + +GITHUB_REPO="aiidateam/aiida-core" + +# Create an array to store commit summaries +declare -a commit_summaries=() + +# Loop through each commit hash +for commit in "$@"; do + # Cherry-pick the commit + if git cherry-pick "$commit"; then + # If cherry-pick succeeds, get the short message and short hash + commit_message=$(git log -1 --pretty=format:"%B" HEAD) + original_short_hash=$(git log -1 --pretty=format:"%h" "$commit") + original_long_hash=$(git rev-parse $original_short_hash) + + # Amend the cherry-picked commit to include the original commit ID for tracking + git commit --amend -m "$commit_message" -m "Cherry-pick: $original_long_hash" + + # Format the output as a Markdown list item and add to the array + short_commit_message=$(git log -1 --pretty=format:"%s" HEAD) + cherry_picked_hash=$(git log -1 --pretty=format:"%h" HEAD) + commit_summaries+=("- $short_commit_message [[${commit}]](https://github.com/$GITHUB_REPO/commit/${original_long_hash})") + else + echo "Failed to cherry-pick commit $commit" + # Abort the cherry-pick in case of conflict + git cherry-pick --abort + exit 1 + fi +done + +# Print the summary +echo -e "\n### Cherry-Picked Commits Summary:\n" +for summary in "${commit_summaries[@]}"; do + echo "$summary" +done