diff --git a/.circleci/get_data.py b/.circleci/get_data.py old mode 100644 new mode 100755 index 4742d1e6a..9f9a702ac --- a/.circleci/get_data.py +++ b/.circleci/get_data.py @@ -1,10 +1,11 @@ #!/usr/bin/env python3 """Download test data.""" + import sys from xcp_d.tests.utils import download_test_data -if __name__ == "__main__": +if __name__ == '__main__': data_dir = sys.argv[1] dset = sys.argv[2] download_test_data(dset, data_dir) diff --git a/.codespellrc b/.codespellrc index 93eaad563..131c65212 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,6 +1,8 @@ [codespell] -skip = .git,*.pdf,*.svg,*.html,dataset_description.json +skip = .git,*.pdf,*.svg,*.html,dataset_description.json,*.gii # te - TE # Weill - name # reson - Reson. abbreviation in citation -ignore-words-list = te,weill,reson +# DNE: acronym for does not exist +# fo: acronym for file object +ignore-words-list = te,weill,reson,DNE,fo diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 569bfdeb4..35b44734a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,4 +1,4 @@ -name: Lint code +name: Contribution checks on: push: @@ -12,26 +12,27 @@ defaults: run: shell: bash -jobs: - stable: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: ['ubuntu-latest'] - python-version: ["3.10"] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read +jobs: + style: + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Display Python version - run: python -c "import sys; print(sys.version)" - - name: Install flake8 and related packages - run: python -m pip install \ - flake8 flake8-absolute-import flake8-black flake8-docstrings \ - flake8-isort flake8-pyproject flake8-unused-arguments \ - flake8-use-fstring pep8-naming - - name: Check xcp_d - run: python -m flake8 xcp_d + - run: pipx run ruff check . + - run: pipx run ruff format --diff . + + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Codespell + uses: codespell-project/actions-codespell@v2 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 38ff4eaf6..d4ad38431 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,16 +1,21 @@ +exclude: ".*/data/.*" repos: -- repo: https://github.com/pre-commit/pre-commit-hooks + - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files -- repo: https://github.com/psf/black - rev: 23.1.0 + - id: trailing-whitespace + exclude: '.*\.svg' + - id: end-of-file-fixer + exclude: '.*\.svg' + - id: check-yaml + - id: check-json + - id: check-toml + - id: check-added-large-files + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.6.2 hooks: - - id: black -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort + - id: ruff + args: [ --fix ] + - id: ruff-format + - id: ruff + args: [ --select, ISC001, --fix ] diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 7cadb06e4..f816e1c9f 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -6,7 +6,7 @@ experience for everyone. Although no list can hope to be all-encompassing, we explicitly honor diversity in age, body size, disability, ethnicity, gender identity and expression, level of experience, -native language, education, socio-economic status, nationality, personal appearance, race, religion, +native language, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards diff --git a/docs/changes.md b/docs/changes.md index 723d96ae0..cb4f8c4b6 100644 --- a/docs/changes.md +++ b/docs/changes.md @@ -152,7 +152,7 @@ This is a patch release that fixes two important bugs. The two bugs are: (1) band-pass filter values were not respected in versions 0.7.1 - 0.7.3 (they were hardcoded to 0.01 - 0.1) and (2) when processing CIFTI files, parcellated ReHo values in TSVs were not correct, due to a problem with how we were reconstructing CIFTI ReHo files. -The dense CIFTI files should still be useable though. +The dense CIFTI files should still be usable though. ### 🎉 Exciting New Features diff --git a/docs/conf.py b/docs/conf.py index ce3b3a215..c60065d7d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # xcp_d documentation build configuration file, created by # sphinx-quickstart on Mon May 9 09:04:25 2016. @@ -23,76 +22,75 @@ # " for paragraphs import os import sys -from datetime import datetime import xcp_d # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.append(os.path.abspath("sphinxext")) -sys.path.insert(0, os.path.abspath("../xcp_d")) +sys.path.append(os.path.abspath('sphinxext')) +sys.path.insert(0, os.path.abspath('../xcp_d')) from github_link import make_linkcode_resolve # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "4.2.0" +needs_sphinx = '4.2.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ # "matplotlib.sphinxext.plot_directive", - "nipype.sphinxext.apidoc", + 'nipype.sphinxext.apidoc', # "nipype.sphinxext.documenter", - "nipype.sphinxext.plot_workflow", - "recommonmark", - "sphinx.ext.autodoc", - "sphinx.ext.coverage", - "sphinx.ext.doctest", + 'nipype.sphinxext.plot_workflow', + 'recommonmark', + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.doctest', # "sphinx.ext.graphviz", # "sphinx.ext.inheritance_diagram", - "sphinx.ext.intersphinx", - "sphinx.ext.linkcode", - "sphinx.ext.mathjax", + 'sphinx.ext.intersphinx', + 'sphinx.ext.linkcode', + 'sphinx.ext.mathjax', # "sphinx.ext.todo", - "sphinx_markdown_tables", - "sphinxarg.ext", # argparse extension - "sphinxcontrib.apidoc", - "sphinxcontrib.bibtex", + 'sphinx_markdown_tables', + 'sphinxarg.ext', # argparse extension + 'sphinxcontrib.apidoc', + 'sphinxcontrib.bibtex', ] # Mock modules in autodoc: autodoc_mock_imports = [ - "numpy", - "matplotlib", - "pandas", - "nilearn", - "seaborn", + 'numpy', + 'matplotlib', + 'pandas', + 'nilearn', + 'seaborn', ] # NOTE: Not in qsiprep # autosectionlabel_prefix_document = True # Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffixes as a list of string: -source_suffix = [".rst", ".md"] +source_suffix = ['.rst', '.md'] # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = "index" +master_doc = 'index' # General information about the project. -project = "xcp_d" -author = "xcp_d team" -copyright = f"2021-{datetime.now().year}, {author}" +project = 'xcp_d' +author = 'xcp_d team' +copyright = '2021-, {author}' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -108,7 +106,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = "en" +language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: @@ -119,7 +117,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -137,7 +135,7 @@ # show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = "default" +pygments_style = 'default' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -172,26 +170,26 @@ # https://github.com/sphinx-contrib/napoleon/pull/10 is merged. napoleon_use_param = False napoleon_custom_sections = [ - ("Inputs", "Parameters"), - ("Outputs", "Parameters"), - ("Attributes", "Parameters"), - ("Mandatory Inputs", "Parameters"), - ("Optional Inputs", "Parameters"), - ("License", "License"), + ('Inputs', 'Parameters'), + ('Outputs', 'Parameters'), + ('Attributes', 'Parameters'), + ('Mandatory Inputs', 'Parameters'), + ('Optional Inputs', 'Parameters'), + ('License', 'License'), ] # -- Extension configuration ------------------------------------------------- -apidoc_module_dir = "../xcp_d" -apidoc_output_dir = "api" -apidoc_excluded_paths = ["conftest.py", "*/tests/*", "tests/*", "data/*"] +apidoc_module_dir = '../xcp_d' +apidoc_output_dir = 'api' +apidoc_excluded_paths = ['conftest.py', '*/tests/*', 'tests/*', 'data/*'] apidoc_separate_modules = True -apidoc_extra_args = ["--module-first", "-d 1", "-T"] +apidoc_extra_args = ['--module-first', '-d 1', '-T'] # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "sphinx_rtd_theme" +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -220,7 +218,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied @@ -285,7 +283,7 @@ # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = "xcp_d_doc" +htmlhelp_basename = 'xcp_d_doc' # -- Options for LaTeX output --------------------------------------------- @@ -304,7 +302,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, "xcp_d.tex", "xcp_d Documentation", author, "manual"), + (master_doc, 'xcp_d.tex', 'xcp_d Documentation', author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -332,7 +330,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "xcp_d", "xcp_d Documentation", [author], 1)] +man_pages = [(master_doc, 'xcp_d', 'xcp_d Documentation', [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -346,12 +344,12 @@ texinfo_documents = [ ( master_doc, - "xcp_d", - "xcp_d Documentation", + 'xcp_d', + 'xcp_d Documentation', author, - "Azeez Adebimpe and Team", - "One line description of project.", - "Miscellaneous", + 'Azeez Adebimpe and Team', + 'One line description of project.', + 'Miscellaneous', ), ] @@ -369,47 +367,46 @@ # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve( - "xcp_d", + 'xcp_d', ( - "https://github.com/pennlinc/xcp_d/blob/" - "{revision}/{package}/{path}#L{lineno}" # noqa: FS003 + 'https://github.com/pennlinc/xcp_d/blob/' '{revision}/{package}/{path}#L{lineno}' # noqa: FS003 ), ) # ----------------------------------------------------------------------------- # intersphinx # ----------------------------------------------------------------------------- -_python_version_str = f"{sys.version_info.major}.{sys.version_info.minor}" -_python_doc_base = f"https://docs.python.org/{_python_version_str}" +_python_version_str = f'{sys.version_info.major}.{sys.version_info.minor}' +_python_doc_base = f'https://docs.python.org/{_python_version_str}' intersphinx_mapping = { - "python": (_python_doc_base, None), - "numpy": ("https://numpy.org/doc/stable/", None), - "scipy": ( - "https://docs.scipy.org/doc/scipy/reference", - (None, "./_intersphinx/scipy-objects.inv"), + 'python': (_python_doc_base, None), + 'numpy': ('https://numpy.org/doc/stable/', None), + 'scipy': ( + 'https://docs.scipy.org/doc/scipy/reference', + (None, './_intersphinx/scipy-objects.inv'), ), - "matplotlib": ( - "https://matplotlib.org/stable/", - (None, "https://matplotlib.org/stable/objects.inv"), + 'matplotlib': ( + 'https://matplotlib.org/stable/', + (None, 'https://matplotlib.org/stable/objects.inv'), ), - "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), - "nibabel": ("https://nipy.org/nibabel/", None), - "nilearn": ("http://nilearn.github.io/stable/", None), - "nipype": ("https://nipype.readthedocs.io/en/latest/", None), + 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), + 'nibabel': ('https://nipy.org/nibabel/', None), + 'nilearn': ('http://nilearn.github.io/stable/', None), + 'nipype': ('https://nipype.readthedocs.io/en/latest/', None), } -suppress_warnings = ["image.nonlocal_uri"] +suppress_warnings = ['image.nonlocal_uri'] # ----------------------------------------------------------------------------- # sphinxcontrib-bibtex # ----------------------------------------------------------------------------- -bibtex_bibfiles = ["../xcp_d/data/boilerplate.bib"] -bibtex_style = "unsrt" -bibtex_reference_style = "author_year" -bibtex_footbibliography_header = "" +bibtex_bibfiles = ['../xcp_d/data/boilerplate.bib'] +bibtex_style = 'unsrt' +bibtex_reference_style = 'author_year' +bibtex_footbibliography_header = '' def setup(app): """Add extra formatting files.""" - app.add_css_file("theme_overrides.css") + app.add_css_file('theme_overrides.css') # We need this for the boilerplate script - app.add_js_file("https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js") + app.add_js_file('https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js') diff --git a/docs/sphinxext/github_link.py b/docs/sphinxext/github_link.py index 47ff2a76f..fabba0ab5 100644 --- a/docs/sphinxext/github_link.py +++ b/docs/sphinxext/github_link.py @@ -1,13 +1,14 @@ """ -This script comes from scikit-learn: +This vendored script comes from scikit-learn: https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/github_link.py """ -from operator import attrgetter + import inspect -import subprocess import os +import subprocess import sys from functools import partial +from operator import attrgetter REVISION_CMD = 'git rev-parse --short HEAD' @@ -44,12 +45,13 @@ def _linkcode_resolve(domain, info, package, url_fmt, revision): return class_name = info['fullname'].split('.')[0] - if type(class_name) != str: - # Python 2 only - class_name = class_name.encode('utf-8') module = __import__(info['module'], fromlist=[class_name]) obj = attrgetter(info['fullname'])(module) + # Unwrap the object to get the correct source + # file in case that is wrapped by a decorator + obj = inspect.unwrap(obj) + try: fn = inspect.getsourcefile(obj) except Exception: @@ -62,14 +64,12 @@ def _linkcode_resolve(domain, info, package, url_fmt, revision): if not fn: return - fn = os.path.relpath(fn, - start=os.path.dirname(__import__(package).__file__)) + fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__)) try: lineno = inspect.getsourcelines(obj)[1] except Exception: lineno = '' - return url_fmt.format(revision=revision, package=package, - path=fn, lineno=lineno) + return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) def make_linkcode_resolve(package, url_fmt): @@ -84,5 +84,4 @@ def make_linkcode_resolve(package, url_fmt): '{path}#L{lineno}') """ revision = _get_git_revision() - return partial(_linkcode_resolve, revision=revision, package=package, - url_fmt=url_fmt) + return partial(_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt) diff --git a/docs/workflows.rst b/docs/workflows.rst index 749841d51..45f3111f0 100644 --- a/docs/workflows.rst +++ b/docs/workflows.rst @@ -641,7 +641,7 @@ Dummy scan removal [OPTIONAL] :class:`~xcp_d.interfaces.censoring.RemoveDummyVolumes` XCP-D allows the first *N* volumes to be removed before processing. -These volumes are usually refered to as dummy volumes. +These volumes are usually referred to as dummy volumes. Most default scanning sequences include dummy volumes that are not reconstructed. However, some users still prefer to remove the first few reconstructed volumes. @@ -924,7 +924,7 @@ Quality control The quality control (QC) in ``XCP-D`` estimates the quality of BOLD data before and after regression and also estimates BOLD-T1w coregistration and BOLD-Template normalization -qualites. +qualities. The QC metrics include the following: a. Motion parameters summary: mean FD, mean and maximum RMS diff --git a/pyproject.toml b/pyproject.toml index b33753b4a..43379ff45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,17 +69,12 @@ doc = [ "sphinxcontrib-bibtex", "svgutils", ] +dev = [ + "ruff ~= 0.4.3", + "pre-commit", +] tests = [ "coverage", - "flake8", - "flake8-absolute-import", - "flake8-black", - "flake8-docstrings", - "flake8-isort", - "flake8-pyproject", - "flake8-unused-arguments", - "flake8-use-fstring", - "pep8-naming", "pytest", "pytest-cov", "pytest-xdist", @@ -91,7 +86,7 @@ maint = [ ] # Aliases -all = ["xcp_d[doc,maint,tests]"] +all = ["xcp_d[dev,doc,maint,tests]"] [project.scripts] xcp_d = "xcp_d.cli.run:main" @@ -126,51 +121,57 @@ version-file = "xcp_d/_version.py" # Developer tool configurations # +# Disable black [tool.black] +exclude = ".*" + +[tool.ruff] line-length = 99 -target-version = ['py38'] -include = '\.pyi?$' -exclude = ''' -( - /( - \.eggs # exclude a few common directories in the - | \.git # root of the project - | \.github - | \.hg - | \.pytest_cache - | _build - | build - | dist - )/ - | versioneer.py - | xcp_d/_version.py -) -''' - -[tool.isort] -profile = "black" -multi_line_output = 3 -src_paths = ["isort", "test"] -known_local_folder = ["xcp_d"] - -[tool.flake8] -max-line-length = "99" -doctests = "False" -exclude = [ - "*build/", - "xcp_d/_version.py", - "xcp_d/_warnings.py", - "xcp_d/config.py", - "xcp_d/data/", - "xcp_d/tests/", - "xcp_d/utils/sentry.py", + +[tool.ruff.lint] +extend-select = [ + "F", + "E", + "W", + "I", + "UP", + "YTT", + "S", + "BLE", + "B", + "A", + # "CPY", + "C4", + "DTZ", + "T10", + # "EM", + "EXE", + "FA", + "ISC", + "ICN", + "PT", + "Q", ] -ignore = ["D107", "E203", "E402", "E722", "W503", "N803", "N806", "N815"] -per-file-ignores = [ - "**/__init__.py : F401", - "docs/conf.py : E265", +ignore = [ + "S101", # Ignore use of assert for now + "S105", + "S311", # We are not using random for cryptographic purposes + "ISC001", + "S603", ] +[tool.ruff.lint.flake8-quotes] +inline-quotes = "single" + +[tool.ruff.lint.extend-per-file-ignores] +"*/test_*.py" = ["S101"] +"xcp_d/utils/debug.py" = ["A002", "T100"] +"docs/conf.py" = ["A001"] +"docs/sphinxext/github_link.py" = ["BLE001"] + +[tool.ruff.format] +quote-style = "single" + [tool.pytest.ini_options] addopts = '-m "not integration"' markers = [ diff --git a/tox.ini b/tox.ini new file mode 100644 index 000000000..fa08ff939 --- /dev/null +++ b/tox.ini @@ -0,0 +1,102 @@ +[tox] +requires = + tox>=4 +envlist = + py3{10,11,12}-latest + py310-min + py3{10,11,12}-pre +skip_missing_interpreters = true + +# Configuration that allows us to split tests across GitHub runners effectively +[gh-actions] +python = + 3.10: py310 + 3.11: py311 + 3.12: py312 + +[gh-actions:env] +DEPENDS = + min: min + latest: latest + pre: pre + +[testenv] +description = Pytest with coverage +labels = test +pip_pre = + pre: true +pass_env = + # getpass.getuser() sources for Windows: + LOGNAME + USER + LNAME + USERNAME + # Pass user color preferences through + PY_COLORS + FORCE_COLOR + NO_COLOR + CLICOLOR + CLICOLOR_FORCE + PYTHON_GIL +extras = test +setenv = + pre: PIP_EXTRA_INDEX_URL=https://pypi.anaconda.org/scientific-python-nightly-wheels/simple + +commands = + pytest --cov-report term-missing --durations=20 --durations-min=1.0 {posargs:-n auto} + +[testenv:style] +description = Check our style guide +labels = check +deps = + ruff +skip_install = true +commands = + ruff check --diff + ruff format --diff + +[testenv:style-fix] +description = Auto-apply style guide to the extent possible +labels = pre-release +deps = + ruff +skip_install = true +commands = + ruff check --fix + ruff format + ruff check --select ISC001 + +[testenv:spellcheck] +description = Check spelling +labels = check +deps = + codespell[toml] +skip_install = true +commands = + codespell . {posargs} + +[testenv:build{,-strict}] +labels = + check + pre-release +deps = + build + twine +skip_install = true +set_env = + # Ignore specific known warnings: + # https://github.com/pypa/pip/issues/11684 + # https://github.com/pypa/pip/issues/12243 + strict: PYTHONWARNINGS=error,once:pkg_resources is deprecated as an API.:DeprecationWarning:pip._internal.metadata.importlib._envs,once:Unimplemented abstract methods {'locate_file'}:DeprecationWarning:pip._internal.metadata.importlib._dists +commands = + python -m build + python -m twine check dist/* + +[testenv:publish] +depends = build +labels = release +deps = + twine +skip_install = true +commands = + python -m twine upload dist/* diff --git a/xcp_d/__about__.py b/xcp_d/__about__.py old mode 100755 new mode 100644 index 32912a1f3..3cea2d156 --- a/xcp_d/__about__.py +++ b/xcp_d/__about__.py @@ -1,17 +1,18 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Base module variables.""" + try: from xcp_d._version import __version__ except ImportError: - __version__ = "0+unknown" + __version__ = '0+unknown' -__packagename__ = "xcp_d" -__copyright__ = "Copyright 2020, PennLINC and DCAN labs" +__packagename__ = 'xcp_d' +__copyright__ = 'Copyright 2020, PennLINC and DCAN labs' __credits__ = ( - "Contributors: please check the ``.zenodo.json`` file at the top-level folder" - "of the repository" + 'Contributors: please check the ``.zenodo.json`` file at the top-level folder' + 'of the repository' ) -__url__ = "https://github.com/PennLINC/xcp_d" +__url__ = 'https://github.com/PennLINC/xcp_d' -DOWNLOAD_URL = f"https://github.com/PennLINC/{__packagename__}/archive/{__version__}.tar.gz" +DOWNLOAD_URL = f'https://github.com/PennLINC/{__packagename__}/archive/{__version__}.tar.gz' diff --git a/xcp_d/__init__.py b/xcp_d/__init__.py index 32d068ad9..11cb08eb9 100755 --- a/xcp_d/__init__.py +++ b/xcp_d/__init__.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """XCP-D : A Robust Postprocessing Pipeline of fMRI data. @@ -12,15 +11,15 @@ from xcp_d.__about__ import __copyright__, __credits__, __packagename__, __version__ __all__ = [ - "__copyright__", - "__credits__", - "__packagename__", - "__version__", + '__copyright__', + '__credits__', + '__packagename__', + '__version__', ] # cmp is not used by fmriprep, so ignore nipype-generated warnings -warnings.filterwarnings("ignore", r"cmp not installed") -warnings.filterwarnings("ignore", r"This has not been fully tested. Please report any failures.") -warnings.filterwarnings("ignore", r"can't resolve package from __spec__ or __package__") -warnings.simplefilter("ignore", DeprecationWarning) -warnings.simplefilter("ignore", ResourceWarning) +warnings.filterwarnings('ignore', r'cmp not installed') +warnings.filterwarnings('ignore', r'This has not been fully tested. Please report any failures.') +warnings.filterwarnings('ignore', r"can't resolve package from __spec__ or __package__") +warnings.simplefilter('ignore', DeprecationWarning) +warnings.simplefilter('ignore', ResourceWarning) diff --git a/xcp_d/_warnings.py b/xcp_d/_warnings.py index 88bdad5d4..ef4677f04 100644 --- a/xcp_d/_warnings.py +++ b/xcp_d/_warnings.py @@ -3,7 +3,7 @@ import logging import warnings -_wlog = logging.getLogger("py.warnings") +_wlog = logging.getLogger('py.warnings') _wlog.addHandler(logging.NullHandler()) @@ -11,9 +11,9 @@ def _warn(message, category=None, stacklevel=1, source=None): """Redefine the warning function.""" if category is not None: category = type(category).__name__ - category = category.replace("type", "WARNING") + category = category.replace('type', 'WARNING') - logging.getLogger("py.warnings").warning(f"{category or 'WARNING'}: {message}") + logging.getLogger('py.warnings').warning(f"{category or 'WARNING'}: {message}") def _showwarning(message, category, filename, lineno, file=None, line=None): diff --git a/xcp_d/cli/combineqc.py b/xcp_d/cli/combineqc.py old mode 100644 new mode 100755 index 8854bfce5..e6dd087d5 --- a/xcp_d/cli/combineqc.py +++ b/xcp_d/cli/combineqc.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """Aggregate qc of all the subjects.""" + import os from argparse import ArgumentParser, RawTextHelpFormatter from pathlib import Path @@ -13,16 +13,16 @@ def get_parser(): parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument( - "xcpd_dir", - action="store", + 'xcpd_dir', + action='store', type=Path, - help="xcp_d output dir", + help='xcp_d output dir', ) parser.add_argument( - "output_prefix", - action="store", + 'output_prefix', + action='store', type=str, - help="output prefix for group", + help='output prefix for group', ) return parser @@ -33,18 +33,18 @@ def main(args=None): opts = get_parser().parse_args(args) xcpd_dir = os.path.abspath(opts.xcpd_dir) - outputfile = os.path.join(os.getcwd(), f"{opts.output_prefix}_allsubjects_qc.tsv") + outputfile = os.path.join(os.getcwd(), f'{opts.output_prefix}_allsubjects_qc.tsv') qc_files = [] for dirpath, _, filenames in os.walk(xcpd_dir): for filename in filenames: - if filename.endswith("_desc-linc_qc.tsv"): + if filename.endswith('_desc-linc_qc.tsv'): qc_files.append(os.path.join(dirpath, filename)) dfs = [pd.read_table(qc_file) for qc_file in qc_files] df = pd.concat(dfs, axis=0) - df.to_csv(outputfile, index=False, sep="\t") + df.to_csv(outputfile, index=False, sep='\t') -if __name__ == "__main__": - raise RuntimeError("this should be run after XCP-D;\nrun XCP-D first") +if __name__ == '__main__': + raise RuntimeError('this should be run after XCP-D;\nrun XCP-D first') diff --git a/xcp_d/cli/parser.py b/xcp_d/cli/parser.py index 1c97449e7..9e4e64e15 100644 --- a/xcp_d/cli/parser.py +++ b/xcp_d/cli/parser.py @@ -1,10 +1,9 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- """The XCP-D preprocessing worklow. XCP-D preprocessing workflow ============================ """ + import os import sys @@ -24,13 +23,13 @@ def _build_parser(): from xcp_d.cli.version import check_latest, is_flagged from xcp_d.utils.atlas import select_atlases - verstr = f"XCP-D v{config.environment.version}" + verstr = f'XCP-D v{config.environment.version}' currentv = Version(config.environment.version) is_release = not any((currentv.is_devrelease, currentv.is_prerelease, currentv.is_postrelease)) parser = ArgumentParser( - description=f"XCP-D: Postprocessing Workflow of fMRI Data v{config.environment.version}", - epilog="See https://xcp-d.readthedocs.io/en/latest/workflows.html", + description=f'XCP-D: Postprocessing Workflow of fMRI Data v{config.environment.version}', + epilog='See https://xcp-d.readthedocs.io/en/latest/workflows.html', formatter_class=ArgumentDefaultsHelpFormatter, ) PathExists = partial(parser_utils._path_exists, parser=parser) @@ -40,84 +39,84 @@ def _build_parser(): # important parameters required parser.add_argument( - "fmri_dir", - action="store", + 'fmri_dir', + action='store', type=PathExists, help=( - "The root folder of fMRI preprocessing derivatives. " + 'The root folder of fMRI preprocessing derivatives. ' "For example, '/path/to/dset/derivatives/fmriprep'." ), ) parser.add_argument( - "output_dir", - action="store", + 'output_dir', + action='store', type=Path, help=( - "The output path for XCP-D derivatives. " + 'The output path for XCP-D derivatives. ' "For example, '/path/to/dset/derivatives/xcp_d'. " "As of version 0.7.0, 'xcp_d' will not be appended to the output directory." ), ) parser.add_argument( - "analysis_level", - action="store", - choices=["participant"], + 'analysis_level', + action='store', + choices=['participant'], help="The analysis level for XCP-D. Must be specified as 'participant'.", ) # Required "mode" argument - required = parser.add_argument_group("required arguments") + required = parser.add_argument_group('required arguments') required.add_argument( - "--mode", - dest="mode", - action="store", - choices=["abcd", "hbcd", "linc", "none"], + '--mode', + dest='mode', + action='store', + choices=['abcd', 'hbcd', 'linc', 'none'], required=True, help=( - "The mode of operation for XCP-D. " - "The mode sets several parameters, with values specific to different pipelines. " - "For more information, see the documentation at " - "https://xcp-d.readthedocs.io/en/latest/workflows.html#modes" + 'The mode of operation for XCP-D. ' + 'The mode sets several parameters, with values specific to different pipelines. ' + 'For more information, see the documentation at ' + 'https://xcp-d.readthedocs.io/en/latest/workflows.html#modes' ), ) # optional arguments - parser.add_argument("--version", action="version", version=verstr) + parser.add_argument('--version', action='version', version=verstr) - g_bids = parser.add_argument_group("Options for filtering BIDS queries") + g_bids = parser.add_argument_group('Options for filtering BIDS queries') g_bids.add_argument( - "--participant-label", - "--participant_label", - dest="participant_label", - action="store", - nargs="+", + '--participant-label', + '--participant_label', + dest='participant_label', + action='store', + nargs='+', help=( - "A space-delimited list of participant identifiers, or a single identifier. " + 'A space-delimited list of participant identifiers, or a single identifier. ' "The 'sub-' prefix can be removed." ), ) g_bids.add_argument( - "-t", - "--task-id", - "--task_id", - dest="task_id", - action="store", + '-t', + '--task-id', + '--task_id', + dest='task_id', + action='store', help=( - "The name of a specific task to postprocess. " - "By default, all tasks will be postprocessed. " - "If you want to select more than one task to postprocess (but not all of them), " - "you can either run XCP-D with the --task-id parameter, separately for each task, " - "or you can use the --bids-filter-file to specify the tasks to postprocess." + 'The name of a specific task to postprocess. ' + 'By default, all tasks will be postprocessed. ' + 'If you want to select more than one task to postprocess (but not all of them), ' + 'you can either run XCP-D with the --task-id parameter, separately for each task, ' + 'or you can use the --bids-filter-file to specify the tasks to postprocess.' ), ) g_bids.add_argument( - "--bids-filter-file", - "--bids_filter_file", - dest="bids_filters", - action="store", + '--bids-filter-file', + '--bids_filter_file', + dest='bids_filters', + action='store', type=BIDSFilter, default=None, - metavar="FILE", + metavar='FILE', help=( "A JSON file describing custom BIDS input filters using PyBIDS. " "For further details, please check out " @@ -127,204 +126,204 @@ def _build_parser(): ), ) g_bids.add_argument( - "-d", - "--datasets", + '-d', + '--datasets', action=parser_utils.ToDict, - metavar="PACKAGE=PATH", + metavar='PACKAGE=PATH', type=str, - nargs="+", + nargs='+', help=( - "Search PATH(s) for derivatives or atlas datasets. " - "These may be provided as named folders " - "(e.g., `--datasets smriprep=/path/to/smriprep`)." + 'Search PATH(s) for derivatives or atlas datasets. ' + 'These may be provided as named folders ' + '(e.g., `--datasets smriprep=/path/to/smriprep`).' ), ) g_bids.add_argument( - "--bids-database-dir", - metavar="PATH", + '--bids-database-dir', + metavar='PATH', type=Path, help=( - "Path to a PyBIDS database folder, for faster indexing " - "(especially useful for large datasets). " - "Will be created if not present." + 'Path to a PyBIDS database folder, for faster indexing ' + '(especially useful for large datasets). ' + 'Will be created if not present.' ), ) - g_perfm = parser.add_argument_group("Options for resource management") + g_perfm = parser.add_argument_group('Options for resource management') g_perfm.add_argument( - "--nprocs", - "--nthreads", - "--n-cpus", - "--n_cpus", - dest="nprocs", - action="store", + '--nprocs', + '--nthreads', + '--n-cpus', + '--n_cpus', + dest='nprocs', + action='store', type=int, default=2, - help="Maximum number of threads across all processes.", + help='Maximum number of threads across all processes.', ) g_perfm.add_argument( - "--omp-nthreads", - "--omp_nthreads", - dest="omp_nthreads", - action="store", + '--omp-nthreads', + '--omp_nthreads', + dest='omp_nthreads', + action='store', type=int, default=1, - help="Maximum number of threads per process.", + help='Maximum number of threads per process.', ) g_perfm.add_argument( - "--mem-gb", - "--mem_gb", - dest="memory_gb", - action="store", + '--mem-gb', + '--mem_gb', + dest='memory_gb', + action='store', type=int, - help="Upper bound memory limit, in gigabytes, for XCP-D processes.", + help='Upper bound memory limit, in gigabytes, for XCP-D processes.', ) g_perfm.add_argument( - "--low-mem", - dest="low_mem", - action="store_true", - help="Attempt to reduce memory usage (will increase disk usage in working directory).", + '--low-mem', + dest='low_mem', + action='store_true', + help='Attempt to reduce memory usage (will increase disk usage in working directory).', ) g_perfm.add_argument( - "--use-plugin", - "--use_plugin", - "--nipype-plugin-file", - "--nipype_plugin_file", - dest="use_plugin", - action="store", + '--use-plugin', + '--use_plugin', + '--nipype-plugin-file', + '--nipype_plugin_file', + dest='use_plugin', + action='store', default=None, type=IsFile, help=( - "Nipype plugin configuration file. " - "For more information, see https://nipype.readthedocs.io/en/0.11.0/users/plugins.html." + 'Nipype plugin configuration file. ' + 'For more information, see https://nipype.readthedocs.io/en/0.11.0/users/plugins.html.' ), ) g_perfm.add_argument( - "-v", - "--verbose", - dest="verbose_count", - action="count", + '-v', + '--verbose', + dest='verbose_count', + action='count', default=0, - help="Increases log verbosity for each occurence. Debug level is '-vvv'.", + help="Increases log verbosity for each occurrence. Debug level is '-vvv'.", ) - g_outputoption = parser.add_argument_group("Input flags") + g_outputoption = parser.add_argument_group('Input flags') g_outputoption.add_argument( - "--input-type", - "--input_type", - dest="input_type", + '--input-type', + '--input_type', + dest='input_type', required=False, - default="auto", - choices=["fmriprep", "dcan", "hcp", "nibabies", "ukb"], + default='auto', + choices=['fmriprep', 'dcan', 'hcp', 'nibabies', 'ukb'], help=( - "The pipeline used to generate the preprocessed derivatives. " + 'The pipeline used to generate the preprocessed derivatives. ' "The default pipeline is 'fmriprep'. " "The 'dcan', 'hcp', 'nibabies', and 'ukb' pipelines are also supported. " "'nibabies' assumes the same structure as 'fmriprep'." ), ) g_outputoption.add_argument( - "--file-format", - dest="file_format", - action="store", - default="auto", - choices=["auto", "cifti", "nifti"], + '--file-format', + dest='file_format', + action='store', + default='auto', + choices=['auto', 'cifti', 'nifti'], help=( - "The file format of the input data. " + 'The file format of the input data. ' "If 'auto', the file format will be inferred from the processing mode. " "If 'cifti', the input data are assumed to be in CIFTI format. " "If 'nifti', the input data are assumed to be in NIfTI format." ), ) - g_param = parser.add_argument_group("Postprocessing parameters") + g_param = parser.add_argument_group('Postprocessing parameters') g_param.add_argument( - "--dummy-scans", - "--dummy_scans", - dest="dummy_scans", + '--dummy-scans', + '--dummy_scans', + dest='dummy_scans', default=0, type=parser_utils._int_or_auto, - metavar="{{auto,INT}}", + metavar='{{auto,INT}}', help=( - "Number of volumes to remove from the beginning of each run. " + 'Number of volumes to remove from the beginning of each run. ' "If set to 'auto', XCP-D will extract non-steady-state volume indices from the " "preprocessing derivatives' confounds file." ), ) g_param.add_argument( - "--despike", - dest="despike", - nargs="?", + '--despike', + dest='despike', + nargs='?', const=None, - default="auto", - choices=["y", "n"], + default='auto', + choices=['y', 'n'], action=parser_utils.YesNoAction, help=( - "Despike the BOLD data before postprocessing. " + 'Despike the BOLD data before postprocessing. ' "If not defined, the despike option will be inferred from the 'mode'. " - "If defined without an argument, despiking will be enabled. " - "If defined with an argument (y or n), the value of the argument will be used. " + 'If defined without an argument, despiking will be enabled. ' + 'If defined with an argument (y or n), the value of the argument will be used. ' "'y' enables despiking. 'n' disables despiking." ), ) g_param.add_argument( - "-p", - "--nuisance-regressors", - "--nuisance_regressors", - dest="confounds_config", + '-p', + '--nuisance-regressors', + '--nuisance_regressors', + dest='confounds_config', required=False, action=parser_utils.ConfoundsAction, - default="auto", + default='auto', help=( - "Nuisance parameters to be selected. " + 'Nuisance parameters to be selected. ' "This may be a string indicating one of XCP-D's built-in nuisance regression " "strategies (e.g., '36P') or a path to a YAML file formatted according to XCP-D's " - "confound configuration file format. " + 'confound configuration file format. ' "Descriptions of each of the built-in options are included in XCP-D's documentation. " ), ) g_param.add_argument( - "--smoothing", - dest="smoothing", + '--smoothing', + dest='smoothing', default=6, - action="store", + action='store', type=float, help=( - "FWHM, in millimeters, of the Gaussian smoothing kernel to apply to the denoised BOLD " - "data. " - "Set to 0 to disable smoothing." + 'FWHM, in millimeters, of the Gaussian smoothing kernel to apply to the denoised BOLD ' + 'data. ' + 'Set to 0 to disable smoothing.' ), ) g_param.add_argument( - "-m", - "--combine-runs", - "--combine_runs", - dest="combine_runs", - nargs="?", + '-m', + '--combine-runs', + '--combine_runs', + dest='combine_runs', + nargs='?', const=None, - default="auto", - choices=["y", "n"], + default='auto', + choices=['y', 'n'], action=parser_utils.YesNoAction, - help="After denoising, concatenate each derivative from each task across runs.", + help='After denoising, concatenate each derivative from each task across runs.', ) g_motion_filter = parser.add_argument_group( - title="Motion filtering parameters", + title='Motion filtering parameters', description=( - "These parameters enable and control a filter that will be applied to motion " - "parameters. " - "Motion parameters may be contaminated by non-motion noise, and applying a filter " - "may reduce the impact of that contamination." + 'These parameters enable and control a filter that will be applied to motion ' + 'parameters. ' + 'Motion parameters may be contaminated by non-motion noise, and applying a filter ' + 'may reduce the impact of that contamination.' ), ) g_motion_filter.add_argument( - "--motion-filter-type", - "--motion_filter_type", - dest="motion_filter_type", - action="store", + '--motion-filter-type', + '--motion_filter_type', + dest='motion_filter_type', + action='store', type=str, default=None, - choices=["lp", "notch", "none"], + choices=['lp', 'notch', 'none'], help="""\ Type of filter to use for removing respiratory artifact from motion regressors. If not set, no filter will be applied. @@ -336,12 +335,12 @@ def _build_parser(): """, ) g_motion_filter.add_argument( - "--band-stop-min", - "--band_stop_min", - dest="band_stop_min", + '--band-stop-min', + '--band_stop_min', + dest='band_stop_min', default=None, type=float, - metavar="BPM", + metavar='BPM', help="""\ Lower frequency for the motion parameter filter, in breaths-per-minute (bpm). Motion filtering is only performed if ``motion-filter-type`` is not None. @@ -354,12 +353,12 @@ def _build_parser(): """, ) g_motion_filter.add_argument( - "--band-stop-max", - "--band_stop_max", - dest="band_stop_max", + '--band-stop-max', + '--band_stop_max', + dest='band_stop_max', default=None, type=float, - metavar="BPM", + metavar='BPM', help="""\ Upper frequency for the band-stop motion filter, in breaths-per-minute (bpm). Motion filtering is only performed if ``motion-filter-type`` is not None. @@ -368,56 +367,56 @@ def _build_parser(): """, ) g_motion_filter.add_argument( - "--motion-filter-order", - "--motion_filter_order", - dest="motion_filter_order", + '--motion-filter-order', + '--motion_filter_order', + dest='motion_filter_order', default=4, type=int, help=( - "Number of filter coefficients for the motion parameter filter. " + 'Number of filter coefficients for the motion parameter filter. ' "If the motion filter type is 'lp', the order is divided by 2 as filtfilt applies " - "the filter twice. " + 'the filter twice. ' "If the motion filter type is 'notch', the order is divided by 4 as iirnotch is a " - "second-order filter and filtfilt applies the filter twice. " + 'second-order filter and filtfilt applies the filter twice. ' "Make sure to set this parameter to a multiple of 2 if you choose the 'lp' filter and " "a multiple of 4 if you choose the 'notch' filter." ), ) - g_censor = parser.add_argument_group("Censoring and scrubbing options") + g_censor = parser.add_argument_group('Censoring and scrubbing options') g_censor.add_argument( - "-r", - "--head-radius", - "--head_radius", - dest="head_radius", + '-r', + '--head-radius', + '--head_radius', + dest='head_radius', default=50, type=parser_utils._float_or_auto, help=( - "Head radius used to calculate framewise displacement, in mm. " - "The default value is 50 mm, which is recommended for adults. " - "For infants, we recommend a value of 35 mm. " + 'Head radius used to calculate framewise displacement, in mm. ' + 'The default value is 50 mm, which is recommended for adults. ' + 'For infants, we recommend a value of 35 mm. ' "A value of 'auto' is also supported, in which case the brain radius is " - "estimated from the preprocessed brain mask by treating the mask as a sphere." + 'estimated from the preprocessed brain mask by treating the mask as a sphere.' ), ) g_censor.add_argument( - "-f", - "--fd-thresh", - "--fd_thresh", - dest="fd_thresh", - default="auto", + '-f', + '--fd-thresh', + '--fd_thresh', + dest='fd_thresh', + default='auto', type=parser_utils._float_or_auto, help=( - "Framewise displacement threshold for censoring. " - "Any volumes with an FD value greater than the threshold will be removed from the " - "denoised BOLD data. " - "A threshold of <=0 will disable censoring completely." + 'Framewise displacement threshold for censoring. ' + 'Any volumes with an FD value greater than the threshold will be removed from the ' + 'denoised BOLD data. ' + 'A threshold of <=0 will disable censoring completely.' ), ) g_censor.add_argument( - "--min-time", - "--min_time", - dest="min_time", + '--min-time', + '--min_time', + dest='min_time', required=False, default=240, type=float, @@ -433,123 +432,123 @@ def _build_parser(): """, ) g_censor.add_argument( - "--output-type", - dest="output_type", - default="auto", - action="store", - choices=["auto", "censored", "interpolated"], + '--output-type', + dest='output_type', + default='auto', + action='store', + choices=['auto', 'censored', 'interpolated'], help=( - "The type of output to generate. " + 'The type of output to generate. ' "If 'auto', the output type will be inferred from the processing mode. " "If 'censored', the BOLD outputs (dense and parcellated time series) will be " - "censored. " + 'censored. ' "If 'interpolated', the BOLD outputs (dense and parcellated time series) will be " - "interpolated." + 'interpolated.' ), ) g_temporal_filter = parser.add_argument_group( - title="Data filtering parameters", + title='Data filtering parameters', description=( - "These parameters determine whether a bandpass filter will be applied to the BOLD " - "data, after the censoring, denoising, and interpolation steps of the pipeline, " - "but before recensoring." + 'These parameters determine whether a bandpass filter will be applied to the BOLD ' + 'data, after the censoring, denoising, and interpolation steps of the pipeline, ' + 'but before recensoring.' ), ) g_temporal_filter.add_argument( - "--disable-bandpass-filter", - "--disable_bandpass_filter", - dest="bandpass_filter", - action="store_false", + '--disable-bandpass-filter', + '--disable_bandpass_filter', + dest='bandpass_filter', + action='store_false', help=( - "Disable bandpass filtering. " - "If bandpass filtering is disabled, then ALFF derivatives will not be calculated." + 'Disable bandpass filtering. ' + 'If bandpass filtering is disabled, then ALFF derivatives will not be calculated.' ), ) g_temporal_filter.add_argument( - "--lower-bpf", - "--lower_bpf", - action="store", + '--lower-bpf', + '--lower_bpf', + action='store', default=0.01, - dest="high_pass", + dest='high_pass', type=float, help=( - "Lower cut-off frequency (Hz) for the Butterworth bandpass filter to be applied to " - "the denoised BOLD data. Set to 0.0 or negative to disable high-pass filtering. " - "See Satterthwaite et al. (2013)." + 'Lower cut-off frequency (Hz) for the Butterworth bandpass filter to be applied to ' + 'the denoised BOLD data. Set to 0.0 or negative to disable high-pass filtering. ' + 'See Satterthwaite et al. (2013).' ), ) g_temporal_filter.add_argument( - "--upper-bpf", - "--upper_bpf", - action="store", + '--upper-bpf', + '--upper_bpf', + action='store', default=0.08, - dest="low_pass", + dest='low_pass', type=float, help=( - "Upper cut-off frequency (Hz) for the Butterworth bandpass filter to be applied to " - "the denoised BOLD data. Set to 0.0 or negative to disable low-pass filtering. " - "See Satterthwaite et al. (2013)." + 'Upper cut-off frequency (Hz) for the Butterworth bandpass filter to be applied to ' + 'the denoised BOLD data. Set to 0.0 or negative to disable low-pass filtering. ' + 'See Satterthwaite et al. (2013).' ), ) g_temporal_filter.add_argument( - "--bpf-order", - "--bpf_order", - dest="bpf_order", - action="store", + '--bpf-order', + '--bpf_order', + dest='bpf_order', + action='store', default=2, type=int, - help="Number of filter coefficients for the Butterworth bandpass filter.", + help='Number of filter coefficients for the Butterworth bandpass filter.', ) - g_parcellation = parser.add_argument_group("Parcellation options") + g_parcellation = parser.add_argument_group('Parcellation options') g_atlases = g_parcellation.add_mutually_exclusive_group(required=False) - all_atlases = select_atlases(atlases=None, subset="all") + all_atlases = select_atlases(atlases=None, subset='all') g_atlases.add_argument( - "--atlases", - action="store", - nargs="+", - metavar="ATLAS", + '--atlases', + action='store', + nargs='+', + metavar='ATLAS', default=all_atlases, - dest="atlases", + dest='atlases', help=( - "Selection of atlases to apply to the data. " + 'Selection of atlases to apply to the data. ' "All of XCP-D's built-in atlases are used by default." ), ) g_atlases.add_argument( - "--skip-parcellation", - "--skip_parcellation", - action="store_const", + '--skip-parcellation', + '--skip_parcellation', + action='store_const', const=[], - dest="atlases", - help="Skip parcellation and correlation steps.", + dest='atlases', + help='Skip parcellation and correlation steps.', ) g_parcellation.add_argument( - "--min-coverage", - "--min_coverage", - dest="min_coverage", + '--min-coverage', + '--min_coverage', + dest='min_coverage', required=False, default=0.5, type=parser_utils._restricted_float, help=( - "Coverage threshold to apply to parcels in each atlas. " - "Any parcels with lower coverage than the threshold will be replaced with NaNs. " - "Must be a value between zero and one, indicating proportion of the parcel. " - "Default is 0.5." + 'Coverage threshold to apply to parcels in each atlas. ' + 'Any parcels with lower coverage than the threshold will be replaced with NaNs. ' + 'Must be a value between zero and one, indicating proportion of the parcel. ' + 'Default is 0.5.' ), ) - g_dcan = parser.add_argument_group("abcd/hbcd mode options") + g_dcan = parser.add_argument_group('abcd/hbcd mode options') g_dcan.add_argument( - "--create-matrices", - "--create_matrices", - dest="dcan_correlation_lengths", + '--create-matrices', + '--create_matrices', + dest='dcan_correlation_lengths', required=False, default=None, - nargs="+", + nargs='+', type=parser_utils._float_or_auto_or_none, help="""\ If used, this parameter will produce correlation matrices limited to each requested amount of time. @@ -563,23 +562,23 @@ def _build_parser(): """, ) g_dcan.add_argument( - "--random-seed", - "--random_seed", - dest="random_seed", + '--random-seed', + '--random_seed', + dest='random_seed', default=None, type=int, - metavar="_RANDOM_SEED", + metavar='_RANDOM_SEED', help="Initialize the random seed for the '--create-matrices' option.", ) g_dcan.add_argument( - "--linc-qc", - "--linc_qc", - nargs="?", + '--linc-qc', + '--linc_qc', + nargs='?', const=None, - default="auto", - choices=["y", "n"], + default='auto', + choices=['y', 'n'], action=parser_utils.YesNoAction, - dest="linc_qc", + dest='linc_qc', help="""\ Run LINC QC. @@ -587,16 +586,16 @@ def _build_parser(): """, ) - g_linc = parser.add_argument_group("linc mode options") + g_linc = parser.add_argument_group('linc mode options') g_linc.add_argument( - "--abcc-qc", - "--abcc_qc", - nargs="?", + '--abcc-qc', + '--abcc_qc', + nargs='?', const=None, - default="auto", - choices=["y", "n"], + default='auto', + choices=['y', 'n'], action=parser_utils.YesNoAction, - dest="abcc_qc", + dest='abcc_qc', help="""\ Run ABCC QC. @@ -605,134 +604,134 @@ def _build_parser(): """, ) - g_other = parser.add_argument_group("Other options") + g_other = parser.add_argument_group('Other options') g_other.add_argument( - "--aggregate-session-reports", - dest="aggr_ses_reports", - action="store", + '--aggregate-session-reports', + dest='aggr_ses_reports', + action='store', type=PositiveInt, default=4, help=( "Maximum number of sessions aggregated in one subject's visual report. " - "If exceeded, visual reports are split by session." + 'If exceeded, visual reports are split by session.' ), ) g_other.add_argument( - "-w", - "--work-dir", - "--work_dir", - dest="work_dir", - action="store", + '-w', + '--work-dir', + '--work_dir', + dest='work_dir', + action='store', type=Path, - default=Path("working_dir"), - help="Path to working directory, where intermediate results should be stored.", + default=Path('working_dir'), + help='Path to working directory, where intermediate results should be stored.', ) g_other.add_argument( - "--clean-workdir", - "--clean_workdir", - dest="clean_workdir", - action="store_true", + '--clean-workdir', + '--clean_workdir', + dest='clean_workdir', + action='store_true', default=False, help=( - "Clears working directory of contents. " - "Use of this flag is not recommended when running concurrent processes of XCP-D." + 'Clears working directory of contents. ' + 'Use of this flag is not recommended when running concurrent processes of XCP-D.' ), ) g_other.add_argument( - "--resource-monitor", - "--resource_monitor", - dest="resource_monitor", - action="store_true", + '--resource-monitor', + '--resource_monitor', + dest='resource_monitor', + action='store_true', default=False, help="Enable Nipype's resource monitoring to keep track of memory and CPU usage.", ) g_other.add_argument( - "--config-file", - "--config_file", - dest="config_file", - action="store", - metavar="FILE", + '--config-file', + '--config_file', + dest='config_file', + action='store', + metavar='FILE', help=( - "Use pre-generated configuration file. " - "Values in file will be overridden by command-line arguments." + 'Use pre-generated configuration file. ' + 'Values in file will be overridden by command-line arguments.' ), ) g_other.add_argument( - "--write-graph", - dest="write_graph", - action="store_true", + '--write-graph', + dest='write_graph', + action='store_true', default=False, - help="Write workflow graph.", + help='Write workflow graph.', ) g_other.add_argument( - "--stop-on-first-crash", - dest="stop_on_first_crash", - action="store_true", + '--stop-on-first-crash', + dest='stop_on_first_crash', + action='store_true', default=False, - help="Force stopping on first crash, even if a work directory was specified.", + help='Force stopping on first crash, even if a work directory was specified.', ) g_other.add_argument( - "--notrack", - dest="notrack", - action="store_true", + '--notrack', + dest='notrack', + action='store_true', default=False, - help="Opt out of sending tracking information.", + help='Opt out of sending tracking information.', ) g_other.add_argument( - "--debug", - dest="debug", - action="store", - nargs="+", - choices=config.DEBUG_MODES + ("all",), + '--debug', + dest='debug', + action='store', + nargs='+', + choices=config.DEBUG_MODES + ('all',), help="Debug mode(s) to enable. 'all' is alias for all available modes.", ) g_other.add_argument( - "--fs-license-file", - dest="fs_license_file", - metavar="FILE", + '--fs-license-file', + dest='fs_license_file', + metavar='FILE', type=PathExists, help=( - "Path to FreeSurfer license key file. Get it (for free) by registering " - "at https://surfer.nmr.mgh.harvard.edu/registration.html. " - "This is not currently required, but may be in the future." + 'Path to FreeSurfer license key file. Get it (for free) by registering ' + 'at https://surfer.nmr.mgh.harvard.edu/registration.html. ' + 'This is not currently required, but may be in the future.' ), ) g_other.add_argument( - "--md-only-boilerplate", - dest="md_only_boilerplate", - action="store_true", + '--md-only-boilerplate', + dest='md_only_boilerplate', + action='store_true', default=False, - help="Skip generation of HTML and LaTeX formatted citation with pandoc", + help='Skip generation of HTML and LaTeX formatted citation with pandoc', ) g_other.add_argument( - "--boilerplate-only", - "--boilerplate_only", - dest="boilerplate_only", - action="store_true", + '--boilerplate-only', + '--boilerplate_only', + dest='boilerplate_only', + action='store_true', default=False, - help="generate boilerplate only", + help='generate boilerplate only', ) g_other.add_argument( - "--reports-only", - dest="reports_only", - action="store_true", + '--reports-only', + dest='reports_only', + action='store_true', default=False, help=( "only generate reports, don't run workflows. This will only rerun report " - "aggregation, not reportlet generation for specific nodes." + 'aggregation, not reportlet generation for specific nodes.' ), ) - g_experimental = parser.add_argument_group("Experimental options") + g_experimental = parser.add_argument_group('Experimental options') g_experimental.add_argument( - "--warp-surfaces-native2std", - "--warp_surfaces_native2std", - dest="process_surfaces", - nargs="?", + '--warp-surfaces-native2std', + '--warp_surfaces_native2std', + dest='process_surfaces', + nargs='?', const=None, - default="auto", - choices=["y", "n"], + default='auto', + choices=['y', 'n'], action=parser_utils.YesNoAction, help="""\ If used, a workflow will be run to warp native-space (``fsnative``) reconstructed cortical @@ -756,7 +755,7 @@ def _build_parser(): _blist = is_flagged() if _blist[0]: - _reason = _blist[1] or "unknown" + _reason = _blist[1] or 'unknown' print( f"""\ WARNING: Version {config.environment.version} of XCP-D (current) has been FLAGGED @@ -776,9 +775,9 @@ def parse_args(args=None, namespace=None): parser = _build_parser() opts = parser.parse_args(args, namespace) if opts.config_file: - skip = {} if opts.reports_only else {"execution": ("run_uuid",)} + skip = {} if opts.reports_only else {'execution': ('run_uuid',)} config.load(opts.config_file, skip=skip, init=False) - config.loggers.cli.info(f"Loaded previous configuration file {opts.config_file}") + config.loggers.cli.info(f'Loaded previous configuration file {opts.config_file}') opts = _validate_parameters(opts=opts, build_log=config.loggers.cli, parser=parser) @@ -786,22 +785,22 @@ def parse_args(args=None, namespace=None): if opts.clean_workdir and opts.work_dir.exists(): from niworkflows.utils.misc import clean_directory - config.loggers.cli.info(f"Clearing previous XCP-D working directory: {opts.work_dir}") + config.loggers.cli.info(f'Clearing previous XCP-D working directory: {opts.work_dir}') if not clean_directory(opts.work_dir): config.loggers.cli.warning( - f"Could not clear all contents of working directory: {opts.work_dir}" + f'Could not clear all contents of working directory: {opts.work_dir}' ) # First check that fmriprep_dir looks like a BIDS folder - if opts.input_type in ("dcan", "hcp", "ukb"): - if opts.input_type == "dcan": + if opts.input_type in ('dcan', 'hcp', 'ukb'): + if opts.input_type == 'dcan': from xcp_d.ingression.abcdbids import convert_dcan2bids as convert_to_bids - elif opts.input_type == "hcp": + elif opts.input_type == 'hcp': from xcp_d.ingression.hcpya import convert_hcp2bids as convert_to_bids - elif opts.input_type == "ukb": + elif opts.input_type == 'ukb': from xcp_d.ingression.ukbiobank import convert_ukb2bids as convert_to_bids - converted_fmri_dir = opts.work_dir / f"dset_bids/derivatives/{opts.input_type}" + converted_fmri_dir = opts.work_dir / f'dset_bids/derivatives/{opts.input_type}' converted_fmri_dir.mkdir(exist_ok=True, parents=True) convert_to_bids( @@ -811,19 +810,19 @@ def parse_args(args=None, namespace=None): ) opts.fmri_dir = converted_fmri_dir - assert converted_fmri_dir.exists(), f"Conversion to BIDS failed: {converted_fmri_dir}" + assert converted_fmri_dir.exists(), f'Conversion to BIDS failed: {converted_fmri_dir}' - if not os.path.isfile(os.path.join(opts.fmri_dir, "dataset_description.json")): + if not os.path.isfile(os.path.join(opts.fmri_dir, 'dataset_description.json')): config.loggers.cli.error( - "No dataset_description.json file found in input directory. " + 'No dataset_description.json file found in input directory. ' "Make sure to point to the specific pipeline's derivatives folder. " "For example, use '/dset/derivatives/fmriprep', not /dset/derivatives'." ) config.execution.log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG)) - config.from_dict(vars(opts), init=["nipype"]) + config.from_dict(vars(opts), init=['nipype']) assert config.execution.fmri_dir.exists(), ( - f"Conversion to BIDS failed: {config.execution.fmri_dir}", + f'Conversion to BIDS failed: {config.execution.fmri_dir}', ) # Retrieve logging level @@ -834,14 +833,14 @@ def parse_args(args=None, namespace=None): import yaml with open(opts.use_plugin) as f: - plugin_settings = yaml.load(f, Loader=yaml.FullLoader) + plugin_settings = yaml.safe_load(f) - _plugin = plugin_settings.get("plugin") + _plugin = plugin_settings.get('plugin') if _plugin: config.nipype.plugin = _plugin - config.nipype.plugin_args = plugin_settings.get("plugin_args", {}) + config.nipype.plugin_args = plugin_settings.get('plugin_args', {}) config.nipype.nprocs = opts.nprocs or config.nipype.plugin_args.get( - "n_procs", config.nipype.nprocs + 'n_procs', config.nipype.nprocs ) # Resource management options @@ -849,8 +848,8 @@ def parse_args(args=None, namespace=None): # This may need to be revisited if people try to use batch plugins if 1 < config.nipype.nprocs < config.nipype.omp_nthreads: build_log.warning( - f"Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed " - f"total threads (--nthreads/--n_cpus={config.nipype.nprocs})" + f'Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed ' + f'total threads (--nthreads/--n_cpus={config.nipype.nprocs})' ) fmri_dir = config.execution.fmri_dir @@ -866,20 +865,20 @@ def parse_args(args=None, namespace=None): # Ensure input and output folders are not the same if output_dir == fmri_dir: - rec_path = fmri_dir / "derivatives" / f"xcp_d-{version.split('+')[0]}" + rec_path = fmri_dir / 'derivatives' / f"xcp_d-{version.split('+')[0]}" parser.error( - "The selected output folder is the same as the input BIDS folder. " - f"Please modify the output path (suggestion: {rec_path})." + 'The selected output folder is the same as the input BIDS folder. ' + f'Please modify the output path (suggestion: {rec_path}).' ) if fmri_dir in work_dir.parents: parser.error( - "The selected working directory is a subdirectory of the input BIDS folder. " - "Please modify the output path." + 'The selected working directory is a subdirectory of the input BIDS folder. ' + 'Please modify the output path.' ) # Setup directories - config.execution.log_dir = config.execution.output_dir / "logs" + config.execution.log_dir = config.execution.output_dir / 'logs' # Check and create output and working directories config.execution.log_dir.mkdir(exist_ok=True, parents=True) output_dir.mkdir(exist_ok=True, parents=True) @@ -920,165 +919,165 @@ def _validate_parameters(opts, build_log, parser): if opts.fs_license_file is not None: opts.fs_license_file = opts.fs_license_file.resolve() if opts.fs_license_file.is_file(): - os.environ["FS_LICENSE"] = str(opts.fs_license_file) + os.environ['FS_LICENSE'] = str(opts.fs_license_file) else: - build_log.warning(f"Freesurfer license DNE: {opts.fs_license_file}.") + build_log.warning(f'Freesurfer license DNE: {opts.fs_license_file}.') else: - fs_license_file = os.environ.get("FS_LICENSE", "/opt/freesurfer/license.txt") + fs_license_file = os.environ.get('FS_LICENSE', '/opt/freesurfer/license.txt') if not Path(fs_license_file).is_file(): build_log.warning( - "A valid FreeSurfer license file is recommended. " + 'A valid FreeSurfer license file is recommended. ' "Set the FS_LICENSE environment variable or use the '--fs-license-file' flag." ) else: - os.environ["FS_LICENSE"] = str(fs_license_file) + os.environ['FS_LICENSE'] = str(fs_license_file) # Check parameter value types/valid values - assert opts.abcc_qc in (True, False, "auto") - assert opts.combine_runs in (True, False, "auto") - assert opts.despike in (True, False, "auto") - assert opts.file_format in ("nifti", "cifti", "auto") - assert opts.linc_qc in (True, False, "auto") - assert opts.mode in ("abcd", "hbcd", "linc", "none"), f"Unsupported mode '{opts.mode}'." - assert opts.output_type in ("censored", "interpolated", "auto") - assert opts.process_surfaces in (True, False, "auto") + assert opts.abcc_qc in (True, False, 'auto') + assert opts.combine_runs in (True, False, 'auto') + assert opts.despike in (True, False, 'auto') + assert opts.file_format in ('nifti', 'cifti', 'auto') + assert opts.linc_qc in (True, False, 'auto') + assert opts.mode in ('abcd', 'hbcd', 'linc', 'none'), f"Unsupported mode '{opts.mode}'." + assert opts.output_type in ('censored', 'interpolated', 'auto') + assert opts.process_surfaces in (True, False, 'auto') # Add internal atlas datasets to the list of datasets opts.datasets = opts.datasets or {} if opts.atlases: - if "xcpdatlases" not in opts.datasets: - opts.datasets["xcpdatlases"] = load_data("atlases") + if 'xcpdatlases' not in opts.datasets: + opts.datasets['xcpdatlases'] = load_data('atlases') - if any(atlas.startswith("4S") for atlas in opts.atlases): - if "xcpd4s" not in opts.datasets: - opts.datasets["xcpd4s"] = Path("/AtlasPack") + if any(atlas.startswith('4S') for atlas in opts.atlases): + if 'xcpd4s' not in opts.datasets: + opts.datasets['xcpd4s'] = Path('/AtlasPack') # Check parameters based on the mode - if opts.mode == "abcd": - opts.abcc_qc = True if (opts.abcc_qc == "auto") else opts.abcc_qc - opts.combine_runs = True if (opts.combine_runs == "auto") else opts.combine_runs + if opts.mode == 'abcd': + opts.abcc_qc = True if (opts.abcc_qc == 'auto') else opts.abcc_qc + opts.combine_runs = True if (opts.combine_runs == 'auto') else opts.combine_runs opts.confounds_config = ( - "36P" if (opts.confounds_config == "auto") else opts.confounds_config + '36P' if (opts.confounds_config == 'auto') else opts.confounds_config ) opts.dcan_correlation_lengths = ( [] if opts.dcan_correlation_lengths is None else opts.dcan_correlation_lengths ) - opts.despike = True if (opts.despike == "auto") else opts.despike - opts.fd_thresh = 0.3 if (opts.fd_thresh == "auto") else opts.fd_thresh - opts.file_format = "cifti" if (opts.file_format == "auto") else opts.file_format - opts.input_type = "fmriprep" if opts.input_type == "auto" else opts.input_type - opts.linc_qc = True if (opts.linc_qc == "auto") else opts.linc_qc + opts.despike = True if (opts.despike == 'auto') else opts.despike + opts.fd_thresh = 0.3 if (opts.fd_thresh == 'auto') else opts.fd_thresh + opts.file_format = 'cifti' if (opts.file_format == 'auto') else opts.file_format + opts.input_type = 'fmriprep' if opts.input_type == 'auto' else opts.input_type + opts.linc_qc = True if (opts.linc_qc == 'auto') else opts.linc_qc if opts.motion_filter_type is None: error_messages.append(f"'--motion-filter-type' is required for '{opts.mode}' mode.") - opts.output_correlations = True if "all" in opts.dcan_correlation_lengths else False - if opts.output_type == "censored": + opts.output_correlations = True if 'all' in opts.dcan_correlation_lengths else False + if opts.output_type == 'censored': error_messages.append(f"'--output-type' cannot be 'censored' for '{opts.mode}' mode.") - opts.output_type = "interpolated" - opts.confounds_config = "36P" if opts.confounds_config == "auto" else opts.confounds_config + opts.output_type = 'interpolated' + opts.confounds_config = '36P' if opts.confounds_config == 'auto' else opts.confounds_config opts.process_surfaces = ( - True if (opts.process_surfaces == "auto") else opts.process_surfaces + True if (opts.process_surfaces == 'auto') else opts.process_surfaces ) # Remove "all" from the list of correlation lengths - opts.dcan_correlation_lengths = [c for c in opts.dcan_correlation_lengths if c != "all"] - elif opts.mode == "hbcd": - opts.abcc_qc = True if (opts.abcc_qc == "auto") else opts.abcc_qc - opts.combine_runs = True if (opts.combine_runs == "auto") else opts.combine_runs + opts.dcan_correlation_lengths = [c for c in opts.dcan_correlation_lengths if c != 'all'] + elif opts.mode == 'hbcd': + opts.abcc_qc = True if (opts.abcc_qc == 'auto') else opts.abcc_qc + opts.combine_runs = True if (opts.combine_runs == 'auto') else opts.combine_runs opts.confounds_config = ( - "36P" if (opts.confounds_config == "auto") else opts.confounds_config + '36P' if (opts.confounds_config == 'auto') else opts.confounds_config ) opts.dcan_correlation_lengths = ( [] if opts.dcan_correlation_lengths is None else opts.dcan_correlation_lengths ) - opts.despike = True if (opts.despike == "auto") else opts.despike - opts.fd_thresh = 0.3 if (opts.fd_thresh == "auto") else opts.fd_thresh - opts.file_format = "cifti" if (opts.file_format == "auto") else opts.file_format - opts.input_type = "nibabies" if opts.input_type == "auto" else opts.input_type - opts.linc_qc = True if (opts.linc_qc == "auto") else opts.linc_qc + opts.despike = True if (opts.despike == 'auto') else opts.despike + opts.fd_thresh = 0.3 if (opts.fd_thresh == 'auto') else opts.fd_thresh + opts.file_format = 'cifti' if (opts.file_format == 'auto') else opts.file_format + opts.input_type = 'nibabies' if opts.input_type == 'auto' else opts.input_type + opts.linc_qc = True if (opts.linc_qc == 'auto') else opts.linc_qc if opts.motion_filter_type is None: error_messages.append(f"'--motion-filter-type' is required for '{opts.mode}' mode.") - opts.output_correlations = True if "all" in opts.dcan_correlation_lengths else False - if opts.output_type == "censored": + opts.output_correlations = True if 'all' in opts.dcan_correlation_lengths else False + if opts.output_type == 'censored': error_messages.append(f"'--output-type' cannot be 'censored' for '{opts.mode}' mode.") - opts.output_type = "interpolated" - opts.confounds_config = "36P" if opts.confounds_config == "auto" else opts.confounds_config + opts.output_type = 'interpolated' + opts.confounds_config = '36P' if opts.confounds_config == 'auto' else opts.confounds_config opts.process_surfaces = ( - True if (opts.process_surfaces == "auto") else opts.process_surfaces + True if (opts.process_surfaces == 'auto') else opts.process_surfaces ) # Remove "all" from the list of correlation lengths - opts.dcan_correlation_lengths = [c for c in opts.dcan_correlation_lengths if c != "all"] - elif opts.mode == "linc": - opts.abcc_qc = False if (opts.abcc_qc == "auto") else opts.abcc_qc - opts.combine_runs = False if opts.combine_runs == "auto" else opts.combine_runs + opts.dcan_correlation_lengths = [c for c in opts.dcan_correlation_lengths if c != 'all'] + elif opts.mode == 'linc': + opts.abcc_qc = False if (opts.abcc_qc == 'auto') else opts.abcc_qc + opts.combine_runs = False if opts.combine_runs == 'auto' else opts.combine_runs opts.confounds_config = ( - "36P" if (opts.confounds_config == "auto") else opts.confounds_config + '36P' if (opts.confounds_config == 'auto') else opts.confounds_config ) - opts.despike = True if (opts.despike == "auto") else opts.despike - opts.fd_thresh = 0 if (opts.fd_thresh == "auto") else opts.fd_thresh - opts.file_format = "cifti" if (opts.file_format == "auto") else opts.file_format - opts.input_type = "fmriprep" if opts.input_type == "auto" else opts.input_type - opts.linc_qc = True if (opts.linc_qc == "auto") else opts.linc_qc + opts.despike = True if (opts.despike == 'auto') else opts.despike + opts.fd_thresh = 0 if (opts.fd_thresh == 'auto') else opts.fd_thresh + opts.file_format = 'cifti' if (opts.file_format == 'auto') else opts.file_format + opts.input_type = 'fmriprep' if opts.input_type == 'auto' else opts.input_type + opts.linc_qc = True if (opts.linc_qc == 'auto') else opts.linc_qc opts.output_correlations = True - if opts.output_type == "interpolated": + if opts.output_type == 'interpolated': error_messages.append( f"'--output-type' cannot be 'interpolated' for '{opts.mode}' mode." ) - opts.output_type = "censored" - opts.confounds_config = "36P" if opts.confounds_config == "auto" else opts.confounds_config - opts.process_surfaces = False if opts.process_surfaces == "auto" else opts.process_surfaces + opts.output_type = 'censored' + opts.confounds_config = '36P' if opts.confounds_config == 'auto' else opts.confounds_config + opts.process_surfaces = False if opts.process_surfaces == 'auto' else opts.process_surfaces if opts.dcan_correlation_lengths is not None: error_messages.append(f"'--create-matrices' is not supported for '{opts.mode}' mode.") - elif opts.mode == "none": - if opts.abcc_qc == "auto": + elif opts.mode == 'none': + if opts.abcc_qc == 'auto': error_messages.append("'--abcc-qc' (y or n) is required for 'none' mode.") - if opts.combine_runs == "auto": + if opts.combine_runs == 'auto': error_messages.append("'--combine-runs' (y or n) is required for 'none' mode.") - if opts.confounds_config == "auto": + if opts.confounds_config == 'auto': error_messages.append("'--nuisance-regressors' is required for 'none' mode.") opts.dcan_correlation_lengths = ( [] if opts.dcan_correlation_lengths is None else opts.dcan_correlation_lengths ) - if opts.despike == "auto": + if opts.despike == 'auto': error_messages.append("'--despike' (y or n) is required for 'none' mode.") - if opts.fd_thresh == "auto": + if opts.fd_thresh == 'auto': error_messages.append("'--fd-thresh' is required for 'none' mode.") opts.fd_thresh = 0 # just to satisfy later checks, not to actually use - if opts.file_format == "auto": + if opts.file_format == 'auto': error_messages.append("'--file-format' is required for 'none' mode.") - if opts.input_type == "auto": + if opts.input_type == 'auto': error_messages.append("'--input-type' is required for 'none' mode.") - if opts.linc_qc == "auto": + if opts.linc_qc == 'auto': error_messages.append("'--linc-qc' (y or n) is required for 'none' mode.") if opts.motion_filter_type is None: error_messages.append("'--motion-filter-type' is required for 'none' mode.") - opts.output_correlations = True if "all" in opts.dcan_correlation_lengths else False + opts.output_correlations = True if 'all' in opts.dcan_correlation_lengths else False - if opts.output_type == "auto": + if opts.output_type == 'auto': error_messages.append("'--output-type' is required for 'none' mode.") - if opts.process_surfaces == "auto": + if opts.process_surfaces == 'auto': error_messages.append( "'--warp-surfaces-native2std' (y or n) is required for 'none' mode." ) # Remove "all" from the list of correlation lengths - opts.dcan_correlation_lengths = [c for c in opts.dcan_correlation_lengths if c != "all"] + opts.dcan_correlation_lengths = [c for c in opts.dcan_correlation_lengths if c != 'all'] # Load the confound configuration file - if opts.confounds_config == "none": + if opts.confounds_config == 'none': opts.confounds_config = None elif isinstance(opts.confounds_config, str): # A builtin confound config - opts.confounds_config = load_data.readable(f"nuisance/{opts.confounds_config}.yml") + opts.confounds_config = load_data.readable(f'nuisance/{opts.confounds_config}.yml') else: # An external confound config opts.confounds_config = Path(opts.confounds_config).resolve() @@ -1096,24 +1095,24 @@ def _validate_parameters(opts, build_log, parser): f"'--upper-bpf' ({opts.low_pass})." ) elif not opts.bandpass_filter: - build_log.warning("Bandpass filtering is disabled. ALFF outputs will not be generated.") + build_log.warning('Bandpass filtering is disabled. ALFF outputs will not be generated.') # Scrubbing parameters if opts.fd_thresh <= 0 and opts.min_time > 0: - ignored_params = "\n\t".join(["--min-time"]) + ignored_params = '\n\t'.join(['--min-time']) build_log.warning( - "Framewise displacement-based scrubbing is disabled. " - f"The following parameters will have no effect:\n\t{ignored_params}" + 'Framewise displacement-based scrubbing is disabled. ' + f'The following parameters will have no effect:\n\t{ignored_params}' ) opts.min_time = 0 - opts.output_interpolated = True if opts.output_type == "interpolated" else False + opts.output_interpolated = True if opts.output_type == 'interpolated' else False # Motion filtering parameters - if opts.motion_filter_type == "none": + if opts.motion_filter_type == 'none': opts.motion_filter_type = None - if opts.motion_filter_type == "notch": + if opts.motion_filter_type == 'notch': if not (opts.band_stop_min and opts.band_stop_max): error_messages.append( "Please set both '--band-stop-min' and '--band-stop-max' if you want to apply " @@ -1128,10 +1127,10 @@ def _validate_parameters(opts, build_log, parser): build_log.warning( f"Either '--band-stop-min' ({opts.band_stop_min}) or " f"'--band-stop-max' ({opts.band_stop_max}) is suspiciously low. " - "Please remember that these values should be in breaths-per-minute." + 'Please remember that these values should be in breaths-per-minute.' ) - elif opts.motion_filter_type == "lp": + elif opts.motion_filter_type == 'lp': if not opts.band_stop_min: error_messages.append( "Please set '--band-stop-min' if you want to apply the 'lp' motion filter." @@ -1139,7 +1138,7 @@ def _validate_parameters(opts, build_log, parser): elif opts.band_stop_min < 1: build_log.warning( f"'--band-stop-min' ({opts.band_stop_max}) is suspiciously low. " - "Please remember that this value should be in breaths-per-minute." + 'Please remember that this value should be in breaths-per-minute.' ) if opts.band_stop_max: @@ -1148,44 +1147,44 @@ def _validate_parameters(opts, build_log, parser): elif opts.band_stop_min or opts.band_stop_max: build_log.warning( "'--band-stop-min' and '--band-stop-max' are ignored if '--motion-filter-type' " - "is not set." + 'is not set.' ) # Parcellation parameters if not opts.atlases and opts.min_coverage != 0.5: build_log.warning( - "When no atlases are selected or parcellation is explicitly skipped " + 'When no atlases are selected or parcellation is explicitly skipped ' "('--skip-parcellation'), '--min-coverage' will have no effect." ) # Some parameters are automatically set depending on the input type. - if opts.input_type == "ukb": - if opts.file_format == "cifti": + if opts.input_type == 'ukb': + if opts.file_format == 'cifti': error_messages.append( "In order to process UK Biobank data, the file format must be set to 'nifti'." ) if opts.process_surfaces: error_messages.append( - "--warp-surfaces-native2std is not supported for UK Biobank data." + '--warp-surfaces-native2std is not supported for UK Biobank data.' ) - for cifti_only_atlas in ["MIDB", "MyersLabonte"]: - if (cifti_only_atlas in opts.atlases) and (opts.file_format == "nifti"): + for cifti_only_atlas in ['MIDB', 'MyersLabonte']: + if (cifti_only_atlas in opts.atlases) and (opts.file_format == 'nifti'): build_log.warning( f"Atlas '{cifti_only_atlas}' requires CIFTI processing. Skipping atlas." ) opts.atlases = [atlas for atlas in opts.atlases if atlas != cifti_only_atlas] # process_surfaces and nifti processing are incompatible. - if opts.process_surfaces and (opts.file_format == "nifti"): + if opts.process_surfaces and (opts.file_format == 'nifti'): error_messages.append( - "In order to perform surface normalization (--warp-surfaces-native2std), " - "you must enable cifti processing (--file-format cifti)." + 'In order to perform surface normalization (--warp-surfaces-native2std), ' + 'you must enable cifti processing (--file-format cifti).' ) if error_messages: - error_message_str = "Errors detected in parameter parsing:\n\t- " + "\n\t- ".join( + error_message_str = 'Errors detected in parameter parsing:\n\t- ' + '\n\t- '.join( error_messages ) parser.error(error_message_str) diff --git a/xcp_d/cli/parser_utils.py b/xcp_d/cli/parser_utils.py index 3e8f168c1..a11dd2f55 100644 --- a/xcp_d/cli/parser_utils.py +++ b/xcp_d/cli/parser_utils.py @@ -6,61 +6,61 @@ from argparse import Action from pathlib import Path -warnings.filterwarnings("ignore") +warnings.filterwarnings('ignore') -logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING -logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG -logger = logging.getLogger("cli") +logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING +logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG +logger = logging.getLogger('cli') def _int_or_auto(string, is_parser=True): """Check if argument is an integer >= 0 or the string "auto".""" - if string == "auto": + if string == 'auto': return string error = argparse.ArgumentTypeError if is_parser else ValueError try: intarg = int(string) - except ValueError: + except ValueError as exc: msg = "Argument must be a nonnegative integer or 'auto'." - raise error(msg) + raise error(msg) from exc if intarg < 0: - raise error("Int argument must be nonnegative.") + raise error('Int argument must be nonnegative.') return intarg def _float_or_auto(string, is_parser=True): """Check if argument is a float >= 0 or the string "auto".""" - if string == "auto": + if string == 'auto': return string error = argparse.ArgumentTypeError if is_parser else ValueError try: floatarg = float(string) - except ValueError: + except ValueError as exc: msg = "Argument must be a nonnegative float or 'auto'." - raise error(msg) + raise error(msg) from exc if floatarg < 0: - raise error("Float argument must be nonnegative.") + raise error('Float argument must be nonnegative.') return floatarg def _float_or_auto_or_none(string, is_parser=True): """Check if argument is a float >= 0 or the strings "all" or "none".""" - if string in ("all", "none"): + if string in ('all', 'none'): return string error = argparse.ArgumentTypeError if is_parser else ValueError try: floatarg = float(string) - except ValueError: + except ValueError as exc: msg = f"Argument must be a nonnegative float, 'all', or 'none', not '{string}'." - raise error(msg) + raise error(msg) from exc if floatarg < 0: - raise error("Float argument must be nonnegative.") + raise error('Float argument must be nonnegative.') return floatarg @@ -68,11 +68,11 @@ def _restricted_float(x): """From https://stackoverflow.com/a/12117065/2589328.""" try: x = float(x) - except ValueError: - raise argparse.ArgumentTypeError(f"{x} not a floating-point literal") + except ValueError as exc: + raise argparse.ArgumentTypeError(f'{x} not a floating-point literal') from exc if x < 0.0 or x > 1.0: - raise argparse.ArgumentTypeError(f"{x} not in range [0.0, 1.0]") + raise argparse.ArgumentTypeError(f'{x} not in range [0.0, 1.0]') return x @@ -80,7 +80,7 @@ def _restricted_float(x): def _path_exists(path, parser): """Ensure a given path exists.""" if path is None or not Path(path).exists(): - raise parser.error(f"Path does not exist: <{path}>.") + raise parser.error(f'Path does not exist: <{path}>.') return Path(path).absolute() @@ -88,7 +88,7 @@ def _is_file(path, parser): """Ensure a given path exists and it is a file.""" path = _path_exists(path, parser) if not path.is_file(): - raise parser.error(f"Path should point to a file (or symlink of file): <{path}>.") + raise parser.error(f'Path should point to a file (or symlink of file): <{path}>.') return path @@ -97,7 +97,7 @@ def _process_value(value): if value is None: return bids.layout.Query.NONE - elif value == "*": + elif value == '*': return bids.layout.Query.ANY else: return value @@ -120,10 +120,10 @@ def _bids_filter(value, parser): if Path(value).exists(): try: return loads(Path(value).read_text(), object_hook=_filter_pybids_none_any) - except JSONDecodeError: - raise parser.error(f"JSON syntax error in: <{value}>.") + except JSONDecodeError as exc: + raise parser.error(f'JSON syntax error in: <{value}>.') from exc else: - raise parser.error(f"Path does not exist: <{value}>.") + raise parser.error(f'Path does not exist: <{value}>.') def _min_one(value, parser): @@ -139,7 +139,7 @@ class YesNoAction(Action): def __call__(self, parser, namespace, values, option_string=None): # noqa: U100 """Call the argument.""" - lookup = {"y": True, "n": False, None: True, "auto": "auto"} + lookup = {'y': True, 'n': False, None: True, 'auto': 'auto'} if values not in lookup: raise parser.error(f"Invalid value '{values}' for {self.dest}") @@ -154,15 +154,15 @@ def __call__(self, parser, namespace, values, option_string=None): # noqa: U100 d = {} for spec in values: try: - name, loc = spec.split("=") + name, loc = spec.split('=') loc = Path(loc) except ValueError: loc = Path(spec) name = loc.name if name in d: - raise parser.error(f"Received duplicate derivative name: {name}") - elif name == "preprocessed": + raise parser.error(f'Received duplicate derivative name: {name}') + elif name == 'preprocessed': raise parser.error("The 'preprocessed' derivative is reserved for internal use.") d[name] = loc @@ -175,21 +175,21 @@ class ConfoundsAction(Action): def __call__(self, parser, namespace, values, option_string=None): # noqa: U100 """Call the argument.""" builtins = [ - "auto", - "27P", - "36P", - "24P", - "acompcor", - "aroma", - "acompcor_gsr", - "aroma_gsr", - "none", - "gsr_only", + 'auto', + '27P', + '36P', + '24P', + 'acompcor', + 'aroma', + 'acompcor_gsr', + 'aroma_gsr', + 'none', + 'gsr_only', ] if values in builtins: setattr(namespace, self.dest, values) else: if not Path(values).exists(): - raise parser.error(f"Nuisance configuration does not exist: <{values}>.") + raise parser.error(f'Nuisance configuration does not exist: <{values}>.') setattr(namespace, self.dest, Path(values)) diff --git a/xcp_d/cli/run.py b/xcp_d/cli/run.py index 9b01df468..5e6eed0bd 100644 --- a/xcp_d/cli/run.py +++ b/xcp_d/cli/run.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- """The XCP-D postprocessing worklow. XCP-D postprocessing workflow @@ -26,11 +24,11 @@ def main(): parse_args(args=sys.argv[1:]) - if "pdb" in config.execution.debug: + if 'pdb' in config.execution.debug: from xcp_d.utils.debug import setup_exceptionhook setup_exceptionhook() - config.nipype.plugin = "Linear" + config.nipype.plugin = 'Linear' sentry_sdk = None if not config.execution.notrack and not config.execution.debug: @@ -43,14 +41,14 @@ def main(): # CRITICAL Save the config to a file. This is necessary because the execution graph # is built as a separate process to keep the memory footprint low. The most # straightforward way to communicate with the child process is via the filesystem. - config_file = config.execution.work_dir / config.execution.run_uuid / "config.toml" + config_file = config.execution.work_dir / config.execution.run_uuid / 'config.toml' config_file.parent.mkdir(exist_ok=True, parents=True) config.to_filename(config_file) # CRITICAL Call build_workflow(config_file, retval) in a subprocess. # Because Python on Linux does not ever free virtual memory (VM), running the # workflow construction jailed within a process preempts excessive VM buildup. - if "pdb" not in config.execution.debug: + if 'pdb' not in config.execution.debug: with Manager() as mgr: retval = mgr.dict() p = Process(target=build_workflow, args=(str(config_file), retval)) @@ -59,13 +57,13 @@ def main(): retval = dict(retval.items()) # Convert to base dictionary if p.exitcode: - retval["return_code"] = p.exitcode + retval['return_code'] = p.exitcode else: retval = build_workflow(str(config_file), {}) - exitcode = retval.get("return_code", 0) - xcpd_wf = retval.get("workflow", None) + exitcode = retval.get('return_code', 0) + xcpd_wf = retval.get('workflow', None) # CRITICAL Load the config from the file. This is necessary because the ``build_workflow`` # function executed constrained in a process may change the config (and thus the global @@ -76,7 +74,7 @@ def main(): sys.exit(int(exitcode > 0)) if xcpd_wf and config.execution.write_graph: - xcpd_wf.write_graph(graph2use="colored", format="svg", simple_form=True) + xcpd_wf.write_graph(graph2use='colored', format='svg', simple_form=True) exitcode = exitcode or (xcpd_wf is None) * EX_SOFTWARE if exitcode != 0: @@ -99,17 +97,17 @@ def main(): # Sentry tracking if sentry_sdk is not None: with sentry_sdk.configure_scope() as scope: - scope.set_tag("run_uuid", config.execution.run_uuid) - scope.set_tag("npart", len(config.execution.participant_label)) + scope.set_tag('run_uuid', config.execution.run_uuid) + scope.set_tag('npart', len(config.execution.participant_label)) - sentry_sdk.add_breadcrumb(message="XCP-D started", level="info") - sentry_sdk.capture_message("XCP-D started", level="info") + sentry_sdk.add_breadcrumb(message='XCP-D started', level='info') + sentry_sdk.capture_message('XCP-D started', level='info') config.loggers.workflow.log( 15, - "\n".join(["XCP-D config:"] + [f"\t\t{s}" for s in config.dumps().splitlines()]), + '\n'.join(['XCP-D config:'] + [f'\t\t{s}' for s in config.dumps().splitlines()]), ) - config.loggers.workflow.log(25, "XCP-D started!") + config.loggers.workflow.log(25, 'XCP-D started!') errno = 1 # Default is error exit unless otherwise set try: xcpd_wf.run(**config.nipype.get_plugin()) @@ -118,40 +116,40 @@ def main(): from xcp_d.utils.sentry import process_crashfile crashfolders = [ - config.execution.output_dir / f"sub-{s}" / "log" / config.execution.run_uuid + config.execution.output_dir / f'sub-{s}' / 'log' / config.execution.run_uuid for s in config.execution.participant_label ] for crashfolder in crashfolders: - for crashfile in crashfolder.glob("crash*.*"): + for crashfile in crashfolder.glob('crash*.*'): process_crashfile(crashfile) - if sentry_sdk is not None and "Workflow did not execute cleanly" not in str(e): + if sentry_sdk is not None and 'Workflow did not execute cleanly' not in str(e): sentry_sdk.capture_exception(e) - config.loggers.workflow.critical("XCP-D failed: %s", e) + config.loggers.workflow.critical('XCP-D failed: %s', e) raise else: - config.loggers.workflow.log(25, "XCP-D finished successfully!") + config.loggers.workflow.log(25, 'XCP-D finished successfully!') if sentry_sdk is not None: - success_message = "XCP-D finished without errors" - sentry_sdk.add_breadcrumb(message=success_message, level="info") - sentry_sdk.capture_message(success_message, level="info") + success_message = 'XCP-D finished without errors' + sentry_sdk.add_breadcrumb(message=success_message, level='info') + sentry_sdk.capture_message(success_message, level='info') # Bother users with the boilerplate only iff the workflow went okay. - boiler_file = config.execution.output_dir / "logs" / "CITATION.md" + boiler_file = config.execution.output_dir / 'logs' / 'CITATION.md' if boiler_file.exists(): if config.environment.exec_env in ( - "apptainer", - "docker", + 'apptainer', + 'docker', ): - boiler_file = Path("") / boiler_file.relative_to( + boiler_file = Path('') / boiler_file.relative_to( config.execution.output_dir ) config.loggers.workflow.log( 25, - "Works derived from this XCP-D execution should include the " - f"boilerplate text found in {boiler_file}.", + 'Works derived from this XCP-D execution should include the ' + f'boilerplate text found in {boiler_file}.', ) errno = 0 @@ -168,11 +166,11 @@ def main(): ) if config.execution.atlases: - write_atlas_dataset_description(config.execution.output_dir / "atlases") + write_atlas_dataset_description(config.execution.output_dir / 'atlases') # Generate reports phase session_list = ( - config.execution.get().get("bids_filters", {}).get("bold", {}).get("session") + config.execution.get().get('bids_filters', {}).get('bold', {}).get('session') ) # Generate reports phase @@ -186,18 +184,18 @@ def main(): if failed_reports: msg = ( - "Report generation was not successful for the following participants " + 'Report generation was not successful for the following participants ' f': {", ".join(failed_reports)}.' ) config.loggers.cli.error(msg) if sentry_sdk is not None: - sentry_sdk.capture_message(msg, level="error") + sentry_sdk.capture_message(msg, level='error') sys.exit(int((errno + len(failed_reports)) > 0)) -if __name__ == "__main__": +if __name__ == '__main__': raise RuntimeError( - "xcp_d/cli/run.py should not be run directly;\n" - "Please use the `xcp_d` command-line interface." + 'xcp_d/cli/run.py should not be run directly;\n' + 'Please use the `xcp_d` command-line interface.' ) diff --git a/xcp_d/cli/version.py b/xcp_d/cli/version.py index 974fed896..a8d032766 100644 --- a/xcp_d/cli/version.py +++ b/xcp_d/cli/version.py @@ -1,8 +1,29 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: +# +# Copyright The NiPreps Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# We support and encourage derived works from this project, please read +# about our expectations at +# +# https://www.nipreps.org/community/licensing/ +# """Version CLI helpers.""" -from datetime import datetime +from contextlib import suppress +from datetime import datetime, timezone from pathlib import Path import requests @@ -10,7 +31,7 @@ from xcp_d import __version__ RELEASE_EXPIRY_DAYS = 14 -DATE_FMT = "%Y%m%d" +DATE_FMT = '%Y%m%d' def check_latest(): @@ -20,7 +41,8 @@ def check_latest(): latest = None date = None outdated = None - cachefile = Path.home() / ".cache" / "xcp_d" / "latest" + now = datetime.now(tz=timezone.utc) + cachefile = Path.home() / '.cache' / 'xcp_d' / 'latest' try: cachefile.parent.mkdir(parents=True, exist_ok=True) except OSError: @@ -28,27 +50,26 @@ def check_latest(): if cachefile and cachefile.exists(): try: - latest, date = cachefile.read_text().split("|") - except Exception: + latest, date = cachefile.read_text().split('|') + except Exception: # noqa: S110, BLE001 pass else: try: latest = Version(latest) - date = datetime.strptime(date, DATE_FMT) + date = datetime.strptime(date, DATE_FMT).astimezone(timezone.utc) except (InvalidVersion, ValueError): latest = None else: - if abs((datetime.now() - date).days) > RELEASE_EXPIRY_DAYS: + if abs((now - date).days) > RELEASE_EXPIRY_DAYS: outdated = True if latest is None or outdated is True: - try: - response = requests.get(url="https://pypi.org/pypi/alprep/json", timeout=1.0) - except Exception: - response = None + response = None + with suppress(Exception): + response = requests.get(url='https://pypi.org/pypi/xcp_d/json', timeout=1.0) if response and response.status_code == 200: - versions = [Version(rel) for rel in response.json()["releases"].keys()] + versions = [Version(rel) for rel in response.json()['releases'].keys()] versions = [rel for rel in versions if not rel.is_prerelease] if versions: latest = sorted(versions)[-1] @@ -56,10 +77,8 @@ def check_latest(): latest = None if cachefile is not None and latest is not None: - try: - cachefile.write_text("|".join((latest, datetime.now().strftime(DATE_FMT)))) - except Exception: - pass + with suppress(OSError): + cachefile.write_text(f'{latest}|{now.strftime(DATE_FMT)}') return latest @@ -67,18 +86,17 @@ def check_latest(): def is_flagged(): """Check whether current version is flagged.""" # https://raw.githubusercontent.com/pennlinc/xcp_d/main/.versions.json - flagged = tuple() - try: + flagged = () + response = None + with suppress(Exception): response = requests.get( url="""\ https://raw.githubusercontent.com/pennlinc/xcp_d/main/.versions.json""", timeout=1.0, ) - except Exception: - response = None if response and response.status_code == 200: - flagged = response.json().get("flagged", {}) or {} + flagged = response.json().get('flagged', {}) or {} if __version__ in flagged: return True, flagged[__version__] diff --git a/xcp_d/cli/workflow.py b/xcp_d/cli/workflow.py index 6e92b220d..750d32a07 100644 --- a/xcp_d/cli/workflow.py +++ b/xcp_d/cli/workflow.py @@ -23,30 +23,30 @@ def build_workflow(config_file, retval): version = config.environment.version - retval["return_code"] = 1 - retval["workflow"] = None + retval['return_code'] = 1 + retval['workflow'] = None - banner = [f"Running XCP-D version {version}"] - notice_path = data.load.readable("NOTICE") + banner = [f'Running XCP-D version {version}'] + notice_path = data.load.readable('NOTICE') if notice_path.exists(): - banner[0] += "\n" + banner[0] += '\n' banner += [f"License NOTICE {'#' * 50}"] - banner += [f"XCP-D {version}"] + banner += [f'XCP-D {version}'] banner += notice_path.read_text().splitlines(keepends=False)[1:] - banner += ["#" * len(banner[1])] + banner += ['#' * len(banner[1])] build_log.log(25, f"\n{' ' * 9}".join(banner)) # warn if older results exist: check for dataset_description.json in output folder msg = check_pipeline_version( - "XCP-D", + 'XCP-D', version, - config.execution.output_dir / "dataset_description.json", + config.execution.output_dir / 'dataset_description.json', ) if msg is not None: build_log.warning(msg) # Please note this is the input folder's dataset_description.json - dset_desc_path = config.execution.fmri_dir / "dataset_description.json" + dset_desc_path = config.execution.fmri_dir / 'dataset_description.json' if dset_desc_path.exists(): from hashlib import sha256 @@ -61,9 +61,9 @@ def build_workflow(config_file, retval): # Called with reports only if config.execution.reports_only: - build_log.log(25, "Running --reports-only on participants %s", ", ".join(subject_list)) + build_log.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list)) session_list = ( - config.execution.bids_filters.get("bold", {}).get("session") + config.execution.bids_filters.get('bold', {}).get('session') if config.execution.bids_filters else None ) @@ -77,44 +77,44 @@ def build_workflow(config_file, retval): ) if failed_reports: config.loggers.cli.error( - "Report generation was not successful for the following participants : %s.", - ", ".join(failed_reports), + 'Report generation was not successful for the following participants : %s.', + ', '.join(failed_reports), ) - retval["return_code"] = len(failed_reports) + retval['return_code'] = len(failed_reports) return retval # Build main workflow init_msg = [ "Building XCP-D's workflow:", - f"Preprocessing derivatives path: {config.execution.fmri_dir}.", - f"Participant list: {subject_list}.", - f"Run identifier: {config.execution.run_uuid}.", + f'Preprocessing derivatives path: {config.execution.fmri_dir}.', + f'Participant list: {subject_list}.', + f'Run identifier: {config.execution.run_uuid}.', ] if config.execution.datasets: - init_msg += [f"Searching for derivatives and atlases: {config.execution.datasets}."] + init_msg += [f'Searching for derivatives and atlases: {config.execution.datasets}.'] build_log.log(25, f"\n{' ' * 11}* ".join(init_msg)) - retval["workflow"] = init_xcpd_wf() + retval['workflow'] = init_xcpd_wf() # Check workflow for missing commands - missing = check_deps(retval["workflow"]) + missing = check_deps(retval['workflow']) if missing: build_log.critical( - "Cannot run XCP-D. Missing dependencies:%s", - "\n\t* ".join([""] + [f"{cmd} (Interface: {iface})" for iface, cmd in missing]), + 'Cannot run XCP-D. Missing dependencies:%s', + '\n\t* '.join([''] + [f'{cmd} (Interface: {iface})' for iface, cmd in missing]), ) - retval["return_code"] = 127 # 127 == command not found. + retval['return_code'] = 127 # 127 == command not found. return retval config.to_filename(config_file) build_log.info( - "XCP-D workflow graph with %d nodes built successfully.", - len(retval["workflow"]._get_all_nodes()), + 'XCP-D workflow graph with %d nodes built successfully.', + len(retval['workflow']._get_all_nodes()), ) - retval["return_code"] = 0 + retval['return_code'] = 0 return retval @@ -123,9 +123,9 @@ def build_boilerplate(config_file, workflow): from xcp_d import config config.load(config_file) - logs_path = config.execution.output_dir / "logs" + logs_path = config.execution.output_dir / 'logs' boilerplate = workflow.visit_desc() - citation_files = {ext: logs_path / f"CITATION.{ext}" for ext in ("bib", "tex", "md", "html")} + citation_files = {ext: logs_path / f'CITATION.{ext}' for ext in ('bib', 'tex', 'md', 'html')} if boilerplate: # To please git-annex users and also to guarantee consistency @@ -137,9 +137,9 @@ def build_boilerplate(config_file, workflow): except FileNotFoundError: pass - citation_files["md"].write_text(boilerplate) + citation_files['md'].write_text(boilerplate) - if not config.execution.md_only_boilerplate and citation_files["md"].exists(): + if not config.execution.md_only_boilerplate and citation_files['md'].exists(): from shutil import copyfile from subprocess import CalledProcessError, TimeoutExpired, check_call @@ -147,40 +147,40 @@ def build_boilerplate(config_file, workflow): # Generate HTML file resolving citations cmd = [ - "pandoc", - "-s", - "--bibliography", - str(load_data("boilerplate.bib")), - "--filter", - "pandoc-citeproc", - "--metadata", + 'pandoc', + '-s', + '--bibliography', + str(load_data('boilerplate.bib')), + '--filter', + 'pandoc-citeproc', + '--metadata', 'pagetitle="XCP-D citation boilerplate"', - str(citation_files["md"]), - "-o", - str(citation_files["html"]), + str(citation_files['md']), + '-o', + str(citation_files['html']), ] - config.loggers.cli.info("Generating an HTML version of the citation boilerplate...") + config.loggers.cli.info('Generating an HTML version of the citation boilerplate...') try: check_call(cmd, timeout=10) except (FileNotFoundError, CalledProcessError, TimeoutExpired): - config.loggers.cli.warning("Could not generate CITATION.html file:\n%s", " ".join(cmd)) + config.loggers.cli.warning('Could not generate CITATION.html file:\n%s', ' '.join(cmd)) # Generate LaTex file resolving citations cmd = [ - "pandoc", - "-s", - "--bibliography", - str(load_data("boilerplate.bib")), - "--natbib", - str(citation_files["md"]), - "-o", - str(citation_files["tex"]), + 'pandoc', + '-s', + '--bibliography', + str(load_data('boilerplate.bib')), + '--natbib', + str(citation_files['md']), + '-o', + str(citation_files['tex']), ] - config.loggers.cli.info("Generating a LaTeX version of the citation boilerplate...") + config.loggers.cli.info('Generating a LaTeX version of the citation boilerplate...') try: check_call(cmd, timeout=10) except (FileNotFoundError, CalledProcessError, TimeoutExpired): - config.loggers.cli.warning("Could not generate CITATION.tex file:\n%s", " ".join(cmd)) + config.loggers.cli.warning('Could not generate CITATION.tex file:\n%s', ' '.join(cmd)) else: - copyfile(load_data("boilerplate.bib"), citation_files["bib"]) + copyfile(load_data('boilerplate.bib'), citation_files['bib']) diff --git a/xcp_d/config.py b/xcp_d/config.py index f02f926c1..4ddaf2f7f 100644 --- a/xcp_d/config.py +++ b/xcp_d/config.py @@ -87,20 +87,21 @@ :py:class:`~bids.layout.BIDSLayout`, etc.) """ + import os from multiprocessing import set_start_method from templateflow.conf import TF_LAYOUT # Disable NiPype etelemetry always -_disable_et = bool(os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None) -os.environ["NIPYPE_NO_ET"] = "1" -os.environ["NO_ET"] = "1" +_disable_et = bool(os.getenv('NO_ET') is not None or os.getenv('NIPYPE_NO_ET') is not None) +os.environ['NIPYPE_NO_ET'] = '1' +os.environ['NO_ET'] = '1' -CONFIG_FILENAME = "xcp_d.toml" +CONFIG_FILENAME = 'xcp_d.toml' try: - set_start_method("forkserver") + set_start_method('forkserver') except RuntimeError: pass # context has been already set finally: @@ -118,29 +119,29 @@ from xcp_d import __version__ from xcp_d.data import load as load_data -if not hasattr(sys, "_is_pytest_session"): +if not hasattr(sys, '_is_pytest_session'): sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings # Disable all warnings in main and children processes only on production versions -if ("RUNNING_PYTEST" not in os.environ) and not any( +if ('RUNNING_PYTEST' not in os.environ) and not any( ( - "+" in __version__, - __version__.endswith(".dirty"), - os.getenv("XCP-D_DEV", "0").lower() in ("1", "on", "true", "y", "yes"), + '+' in __version__, + __version__.endswith('.dirty'), + os.getenv('XCP-D_DEV', '0').lower() in ('1', 'on', 'true', 'y', 'yes'), ) ): from xcp_d._warnings import logging - os.environ["PYTHONWARNINGS"] = "ignore" -elif os.getenv("XCP-D_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"): + os.environ['PYTHONWARNINGS'] = 'ignore' +elif os.getenv('XCP-D_WARNINGS', '0').lower() in ('1', 'on', 'true', 'y', 'yes'): # allow disabling warnings on development versions # https://github.com/nipreps/fmriprep/pull/2080#discussion_r409118765 from xcp_d._warnings import logging else: import logging -logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING -logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG +logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING +logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG DEFAULT_MEMORY_MIN_GB = 0.01 @@ -154,70 +155,70 @@ from requests import get as _get_url with suppress((ConnectionError, ReadTimeout)): - _get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05) + _get_url('https://rig.mit.edu/et/projects/nipy/nipype', timeout=0.05) # Execution environment _exec_env = os.name _docker_ver = None # special variable set in the container -if os.getenv("IS_DOCKER_8395080871"): - _exec_env = "apptainer" - _cgroup = Path("/proc/1/cgroup") - if _cgroup.exists() and "docker" in _cgroup.read_text(): - _docker_ver = os.getenv("DOCKER_VERSION_8395080871") - _exec_env = "xcp_d-docker" if _docker_ver else "docker" +if os.getenv('IS_DOCKER_8395080871'): + _exec_env = 'apptainer' + _cgroup = Path('/proc/1/cgroup') + if _cgroup.exists() and 'docker' in _cgroup.read_text(): + _docker_ver = os.getenv('DOCKER_VERSION_8395080871') + _exec_env = 'xcp_d-docker' if _docker_ver else 'docker' del _cgroup -_fs_license = os.getenv("FS_LICENSE") -if not _fs_license and os.getenv("FREESURFER_HOME"): - _fs_home = os.getenv("FREESURFER_HOME") - if _fs_home and (Path(_fs_home) / "license.txt").is_file(): - _fs_license = str(Path(_fs_home) / "license.txt") +_fs_license = os.getenv('FS_LICENSE') +if not _fs_license and os.getenv('FREESURFER_HOME'): + _fs_home = os.getenv('FREESURFER_HOME') + if _fs_home and (Path(_fs_home) / 'license.txt').is_file(): + _fs_license = str(Path(_fs_home) / 'license.txt') del _fs_home _templateflow_home = Path( - os.getenv("TEMPLATEFLOW_HOME", os.path.join(os.getenv("HOME"), ".cache", "templateflow")) + os.getenv('TEMPLATEFLOW_HOME', os.path.join(os.getenv('HOME'), '.cache', 'templateflow')) ) try: from psutil import virtual_memory _free_mem_at_start = round(virtual_memory().available / 1024**3, 1) -except Exception: +except ImportError: _free_mem_at_start = None -_oc_limit = "n/a" -_oc_policy = "n/a" +_oc_limit = 'n/a' +_oc_policy = 'n/a' try: # Memory policy may have a large effect on types of errors experienced - _proc_oc_path = Path("/proc/sys/vm/overcommit_memory") + _proc_oc_path = Path('/proc/sys/vm/overcommit_memory') if _proc_oc_path.exists(): - _oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get( - _proc_oc_path.read_text().strip(), "unknown" + _oc_policy = {'0': 'heuristic', '1': 'always', '2': 'never'}.get( + _proc_oc_path.read_text().strip(), 'unknown' ) - if _oc_policy != "never": - _proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes") + if _oc_policy != 'never': + _proc_oc_kbytes = Path('/proc/sys/vm/overcommit_kbytes') if _proc_oc_kbytes.exists(): _oc_limit = _proc_oc_kbytes.read_text().strip() - if _oc_limit in ("0", "n/a") and Path("/proc/sys/vm/overcommit_ratio").exists(): + if _oc_limit in ('0', 'n/a') and Path('/proc/sys/vm/overcommit_ratio').exists(): _oc_limit = f"{Path('/proc/sys/vm/overcommit_ratio').read_text().strip()}%" -except Exception: +except Exception: # noqa: S110, BLE001 pass # Debug modes are names that influence the exposure of internal details to # the user, either through additional derivatives or increased verbosity -DEBUG_MODES = ("pdb",) +DEBUG_MODES = ('pdb',) class _Config: """An abstract class forbidding instantiation.""" - _paths = tuple() + _paths = () def __init__(self): """Avert instantiation.""" - raise RuntimeError("Configuration type is not instantiable.") + raise RuntimeError('Configuration type is not instantiable.') @classmethod def load(cls, settings, init=True, ignore=None): @@ -227,7 +228,7 @@ def load(cls, settings, init=True, ignore=None): if k in ignore or v is None: continue if k in cls._paths: - if isinstance(v, (list, tuple)): + if isinstance(v, list | tuple): setattr(cls, k, [Path(val).absolute() for val in v]) elif isinstance(v, dict): setattr(cls, k, {key: Path(val).absolute() for key, val in v.items()}) @@ -249,19 +250,19 @@ def get(cls): out = {} for k, v in cls.__dict__.items(): - if k.startswith("_") or v is None: + if k.startswith('_') or v is None: continue if callable(getattr(cls, k)): continue if k in cls._paths: - if isinstance(v, (list, tuple)): + if isinstance(v, list | tuple): v = [str(val) for val in v] elif isinstance(v, dict): v = {key: str(val) for key, val in v.items()} else: v = str(v) if isinstance(v, SpatialReferences): - v = " ".join(str(s) for s in v.references) or None + v = ' '.join(str(s) for s in v.references) or None if isinstance(v, Reference): v = str(v) or None out[k] = v @@ -304,7 +305,7 @@ class environment(_Config): class nipype(_Config): """Nipype settings.""" - crashfile_format = "txt" + crashfile_format = 'txt' """The file format for crashfiles, either text or pickle.""" get_linked_libs = False """Run NiPype's tool to enlist linked libraries for every interface.""" @@ -314,11 +315,11 @@ class nipype(_Config): """Number of processes (compute tasks) that can be run in parallel (multiprocessing only).""" omp_nthreads = None """Number of CPUs a single process can access for multithreaded execution.""" - plugin = "MultiProc" + plugin = 'MultiProc' """NiPype's execution plugin.""" plugin_args = { - "maxtasksperchild": 1, - "raise_insufficient": False, + 'maxtasksperchild': 1, + 'raise_insufficient': False, } """Settings for NiPype's execution plugin.""" resource_monitor = False @@ -330,13 +331,13 @@ class nipype(_Config): def get_plugin(cls): """Format a dictionary for Nipype consumption.""" out = { - "plugin": cls.plugin, - "plugin_args": cls.plugin_args, + 'plugin': cls.plugin, + 'plugin_args': cls.plugin_args, } - if cls.plugin in ("MultiProc", "LegacyMultiProc"): - out["plugin_args"]["n_procs"] = int(cls.nprocs) + if cls.plugin in ('MultiProc', 'LegacyMultiProc'): + out['plugin_args']['n_procs'] = int(cls.nprocs) if cls.memory_gb: - out["plugin_args"]["memory_gb"] = float(cls.memory_gb) + out['plugin_args']['memory_gb'] = float(cls.memory_gb) return out @classmethod @@ -348,10 +349,10 @@ def init(cls): if cls.resource_monitor: ncfg.update_config( { - "monitoring": { - "enabled": cls.resource_monitor, - "sample_frequency": "0.5", - "summary_append": True, + 'monitoring': { + 'enabled': cls.resource_monitor, + 'sample_frequency': '0.5', + 'summary_append': True, } } ) @@ -360,12 +361,12 @@ def init(cls): # Nipype config (logs and execution) ncfg.update_config( { - "execution": { - "crashdump_dir": str(execution.log_dir), - "crashfile_format": cls.crashfile_format, - "get_linked_libs": cls.get_linked_libs, - "stop_on_first_crash": cls.stop_on_first_crash, - "check_version": False, # disable future telemetry + 'execution': { + 'crashdump_dir': str(execution.log_dir), + 'crashfile_format': cls.crashfile_format, + 'get_linked_libs': cls.get_linked_libs, + 'stop_on_first_crash': cls.stop_on_first_crash, + 'check_version': False, # disable future telemetry } } ) @@ -423,7 +424,7 @@ class execution(_Config): """Select a particular task from all available in the dataset.""" templateflow_home = _templateflow_home """The root folder of the TemplateFlow client.""" - work_dir = Path("work").absolute() + work_dir = Path('work').absolute() """Path to a working directory where intermediate results will be available.""" write_graph = None """Write out the computational graph corresponding to the planned preprocessing.""" @@ -433,24 +434,24 @@ class execution(_Config): _layout = None _paths = ( - "fmri_dir", - "datasets", - "bids_database_dir", - "fs_license_file", - "layout", - "log_dir", - "output_dir", - "templateflow_home", - "work_dir", - "dataset_links", - "confounds_config", + 'fmri_dir', + 'datasets', + 'bids_database_dir', + 'fs_license_file', + 'layout', + 'log_dir', + 'output_dir', + 'templateflow_home', + 'work_dir', + 'dataset_links', + 'confounds_config', ) @classmethod def init(cls): """Create a new BIDS Layout accessible with :attr:`~execution.layout`.""" if cls.fs_license_file and Path(cls.fs_license_file).is_file(): - os.environ["FS_LICENSE"] = str(cls.fs_license_file) + os.environ['FS_LICENSE'] = str(cls.fs_license_file) if cls._layout is None: import re @@ -458,39 +459,37 @@ def init(cls): from bids.layout import BIDSLayout from bids.layout.index import BIDSLayoutIndexer - _db_path = cls.bids_database_dir or (cls.work_dir / cls.run_uuid / "bids_db") + _db_path = cls.bids_database_dir or (cls.work_dir / cls.run_uuid / 'bids_db') _db_path.mkdir(exist_ok=True, parents=True) # Recommended after PyBIDS 12.1 ignore_patterns = [ - "code", - "stimuli", - "models", - re.compile(r"\/\.\w+|^\.\w+"), # hidden files + 'code', + 'stimuli', + 'models', + re.compile(r'\/\.\w+|^\.\w+'), # hidden files re.compile( - ( - r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/" - r"(beh|dwi|eeg|ieeg|meg|perf|pet|physio)" - ) + r'sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/' + r'(beh|dwi|eeg|ieeg|meg|perf|pet|physio)' ), ] if cls.participant_label and cls.bids_database_dir is None: # Ignore any subjects who aren't the requested ones. ignore_patterns.append( - re.compile(r"sub-(?!(" + "|".join(cls.participant_label) + r")(\b|_))") + re.compile(r'sub-(?!(' + '|'.join(cls.participant_label) + r')(\b|_))') ) _indexer = BIDSLayoutIndexer( validate=False, ignore=ignore_patterns, ) - xcp_d_config = str(load_data("xcp_d_bids_config2.json")) + xcp_d_config = str(load_data('xcp_d_bids_config2.json')) cls._layout = BIDSLayout( str(cls.fmri_dir), database_path=_db_path, reset_database=cls.bids_database_dir is None, indexer=_indexer, - config=["bids", "derivatives", xcp_d_config], + config=['bids', 'derivatives', xcp_d_config], ) cls.bids_database_dir = _db_path @@ -506,7 +505,7 @@ def _process_value(value): else: return ( getattr(Query, value[7:-4]) - if not isinstance(value, Query) and "Query" in value + if not isinstance(value, Query) and 'Query' in value else value ) @@ -517,21 +516,21 @@ def _process_value(value): if cls.task_id: cls.bids_filters = cls.bids_filters or {} - cls.bids_filters["bold"] = cls.bids_filters.get("bold", {}) - cls.bids_filters["bold"]["task"] = cls.task_id + cls.bids_filters['bold'] = cls.bids_filters.get('bold', {}) + cls.bids_filters['bold']['task'] = cls.task_id dataset_links = { - "preprocessed": cls.fmri_dir, - "templateflow": Path(TF_LAYOUT.root), + 'preprocessed': cls.fmri_dir, + 'templateflow': Path(TF_LAYOUT.root), } if cls.atlases: - dataset_links["atlas"] = cls.output_dir / "atlases" + dataset_links['atlas'] = cls.output_dir / 'atlases' for dset_name, dset_path in cls.datasets.items(): dataset_links[dset_name] = dset_path cls.dataset_links = dataset_links - if "all" in cls.debug: + if 'all' in cls.debug: cls.debug = list(DEBUG_MODES) @@ -608,18 +607,18 @@ def init(cls): class loggers: """Keep loggers easily accessible (see :py:func:`init`).""" - _fmt = "%(asctime)s,%(msecs)d %(name)-2s %(levelname)-2s:\n\t %(message)s" - _datefmt = "%y%m%d-%H:%M:%S" + _fmt = '%(asctime)s,%(msecs)d %(name)-2s %(levelname)-2s:\n\t %(message)s' + _datefmt = '%y%m%d-%H:%M:%S' default = logging.getLogger() """The root logger.""" - cli = logging.getLogger("cli") + cli = logging.getLogger('cli') """Command-line interface logging.""" - workflow = logging.getLogger("nipype.workflow") + workflow = logging.getLogger('nipype.workflow') """NiPype's workflow logger.""" - interface = logging.getLogger("nipype.interface") + interface = logging.getLogger('nipype.interface') """NiPype's interface logger.""" - utils = logging.getLogger("nipype.utils") + utils = logging.getLogger('nipype.utils') """NiPype's utils logger.""" @classmethod @@ -645,7 +644,7 @@ def init(cls): cls.workflow.setLevel(execution.log_level) cls.utils.setLevel(execution.log_level) ncfg.update_config( - {"logging": {"log_directory": str(execution.log_dir), "log_to_file": True}} + {'logging': {'log_directory': str(execution.log_dir), 'log_to_file': True}} ) @@ -685,10 +684,10 @@ def from_dict(settings, init=True, ignore=None): def initialize(x): return init if init in (True, False) else x in init - nipype.load(settings, init=initialize("nipype"), ignore=ignore) - execution.load(settings, init=initialize("execution"), ignore=ignore) - workflow.load(settings, init=initialize("workflow"), ignore=ignore) - seeds.load(settings, init=initialize("seeds"), ignore=ignore) + nipype.load(settings, init=initialize('nipype'), ignore=ignore) + execution.load(settings, init=initialize('execution'), ignore=ignore) + workflow.load(settings, init=initialize('workflow'), ignore=ignore) + seeds.load(settings, init=initialize('seeds'), ignore=ignore) loggers.init() @@ -716,7 +715,7 @@ def initialize(x): filename = Path(filename) settings = loads(filename.read_text()) for sectionname, configs in settings.items(): - if sectionname != "environment": + if sectionname != 'environment': section = getattr(sys.modules[__name__], sectionname) ignore = skip.get(sectionname) section.load(configs, ignore=ignore, init=initialize(sectionname)) @@ -725,17 +724,17 @@ def initialize(x): def get(flat=False): """Get config as a dict.""" settings = { - "environment": environment.get(), - "execution": execution.get(), - "workflow": workflow.get(), - "nipype": nipype.get(), - "seeds": seeds.get(), + 'environment': environment.get(), + 'execution': execution.get(), + 'workflow': workflow.get(), + 'nipype': nipype.get(), + 'seeds': seeds.get(), } if not flat: return settings return { - ".".join((section, k)): v + '.'.join((section, k)): v for section, configs in settings.items() for k, v in configs.items() } diff --git a/xcp_d/data/boilerplate.bib b/xcp_d/data/boilerplate.bib index 40d1e8b12..ba9591d6b 100644 --- a/xcp_d/data/boilerplate.bib +++ b/xcp_d/data/boilerplate.bib @@ -264,7 +264,6 @@ @article{Gordon_2014 issn = {1047-3211}, url = {https://doi.org/10.1093/cercor/bhu239}, doi = {10.1093/cercor/bhu239}, - abstract = {The cortical surface is organized into a large number of cortical areas; however, these areas have not been comprehensively mapped in the human. Abrupt transitions in resting-state functional connectivity (RSFC) patterns can noninvasively identify locations of putative borders between cortical areas (RSFC-boundary mapping; Cohen et al. 2008). Here we describe a technique for using RSFC-boundary maps to define parcels that represent putative cortical areas. These parcels had highly homogenous RSFC patterns, indicating that they contained one unique RSFC signal; furthermore, the parcels were much more homogenous than a null model matched for parcel size when tested in two separate datasets. Several alternative parcellation schemes were tested this way, and no other parcellation was as homogenous as or had as large a difference compared with its null model. The boundary map-derived parcellation contained parcels that overlapped with architectonic mapping of areas 17, 2, 3, and 4. These parcels had a network structure similar to the known network structure of the brain, and their connectivity patterns were reliable across individual subjects. These observations suggest that RSFC-boundary map-derived parcels provide information about the location and extent of human cortical areas. A parcellation generated using this method is available at http://www.nil.wustl.edu/labs/petersen/Resources.html.}, number = {1}, urldate = {2021-07-23}, journal = {Cerebral Cortex}, diff --git a/xcp_d/data/executive_summary_templates/brainsprite.js b/xcp_d/data/executive_summary_templates/brainsprite.js index 0381882c0..475e8447b 100644 --- a/xcp_d/data/executive_summary_templates/brainsprite.js +++ b/xcp_d/data/executive_summary_templates/brainsprite.js @@ -1,7 +1,7 @@ // The rest of this is scripts for brainsprite. Since brainsprite is no // longer supported, and since this is working, leave as is! -// This contant is only needed once (thank goodness) and does not need -// any values inserted. Just include it in the HTML whereever you have +// This constant is only needed once (thank goodness) and does not need +// any values inserted. Just include it in the HTML wherever you have // your scripts. function brainsprite(params) { @@ -100,7 +100,7 @@ function brainsprite(params) { //*************// brain.planes = {}; // A series of canvas to represent the sprites along the three possible - // plane X: sagital; + // plane X: sagittal; brain.planes.canvasX = document.createElement('canvas'); brain.planes.contextX = brain.planes.canvasX.getContext('2d'); @@ -319,7 +319,7 @@ function brainsprite(params) { // Now draw the slice switch(type) { case 'X': - // Draw a sagital slice + // Draw a sagittal slice pos.XW = ((brain.numSlice.X)%brain.nbCol); pos.XH = (brain.numSlice.X-pos.XW)/brain.nbCol; // Set fill color for the slice diff --git a/xcp_d/ingression/__init__.py b/xcp_d/ingression/__init__.py index 5cd7f1b3e..7793038a6 100644 --- a/xcp_d/ingression/__init__.py +++ b/xcp_d/ingression/__init__.py @@ -3,8 +3,8 @@ from xcp_d.ingression import abcdbids, hcpya, ukbiobank, utils __all__ = [ - "abcdbids", - "hcpya", - "ukbiobank", - "utils", + 'abcdbids', + 'hcpya', + 'ukbiobank', + 'utils', ] diff --git a/xcp_d/ingression/abcdbids.py b/xcp_d/ingression/abcdbids.py index 0b2b5d38d..a43f1e282 100644 --- a/xcp_d/ingression/abcdbids.py +++ b/xcp_d/ingression/abcdbids.py @@ -5,6 +5,7 @@ These functions are specifically designed to work with abcd-hcp-pipeline version 0.1.3. https://github.com/DCAN-Labs/abcd-hcp-pipeline/releases/tag/v0.1.3 """ + import glob import os import re @@ -25,7 +26,7 @@ ) from xcp_d.utils.filemanip import ensure_list -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def convert_dcan2bids(in_dir, out_dir, participant_ids=None): @@ -52,24 +53,24 @@ def convert_dcan2bids(in_dir, out_dir, participant_ids=None): Since the T1w is in standard space already, we use identity transforms instead of the individual transforms available in the DCAN derivatives. """ - LOGGER.warning("convert_dcan2bids is an experimental function.") + LOGGER.warning('convert_dcan2bids is an experimental function.') in_dir = os.path.abspath(in_dir) out_dir = os.path.abspath(out_dir) if participant_ids is None: - subject_folders = sorted(glob.glob(os.path.join(in_dir, "sub*"))) + subject_folders = sorted(glob.glob(os.path.join(in_dir, 'sub*'))) subject_folders = [ subject_folder for subject_folder in subject_folders if os.path.isdir(subject_folder) ] participant_ids = [os.path.basename(subject_folder) for subject_folder in subject_folders] if not participant_ids: - raise ValueError(f"No subject found in {in_dir}") + raise ValueError(f'No subject found in {in_dir}') else: participant_ids = ensure_list(participant_ids) for subject_id in participant_ids: - LOGGER.info(f"Processing {subject_id}") + LOGGER.info(f'Processing {subject_id}') convert_dcan_to_bids_single_subject( in_dir=in_dir, out_dir=out_dir, @@ -135,32 +136,32 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): └── wm_2mm__mask_eroded.nii.gz """ assert isinstance(in_dir, str) - assert os.path.isdir(in_dir), f"Folder DNE: {in_dir}" + assert os.path.isdir(in_dir), f'Folder DNE: {in_dir}' assert isinstance(out_dir, str) assert isinstance(sub_ent, str) - sub_id = sub_ent.replace("sub-", "") + sub_id = sub_ent.replace('sub-', '') # Reset the subject entity in case the sub- prefix wasn't included originally. - sub_ent = f"sub-{sub_id}" + sub_ent = f'sub-{sub_id}' - VOLSPACE = "MNI152NLin6Asym" - volspace_ent = f"space-{VOLSPACE}" - RES_ENT = "res-2" + VOLSPACE = 'MNI152NLin6Asym' + volspace_ent = f'space-{VOLSPACE}' + RES_ENT = 'res-2' subject_dir_bids = os.path.join(out_dir, sub_ent) os.makedirs(subject_dir_bids, exist_ok=True) # get session ids - session_folders = sorted(glob.glob(os.path.join(in_dir, sub_ent, "s*"))) + session_folders = sorted(glob.glob(os.path.join(in_dir, sub_ent, 's*'))) ses_entities = [ os.path.basename(ses_dir) for ses_dir in session_folders if os.path.isdir(ses_dir) ] if not ses_entities: - raise FileNotFoundError(f"No session volumes found in {os.path.join(in_dir, sub_ent)}") + raise FileNotFoundError(f'No session volumes found in {os.path.join(in_dir, sub_ent)}') - dataset_description_fmriprep = os.path.join(out_dir, "dataset_description.json") + dataset_description_fmriprep = os.path.join(out_dir, 'dataset_description.json') if os.path.isfile(dataset_description_fmriprep): - LOGGER.info("Converted dataset folder already exists. Skipping conversion.") + LOGGER.info('Converted dataset folder already exists. Skipping conversion.') return # A dictionary of mappings from HCP derivatives to fMRIPrep derivatives. @@ -168,19 +169,19 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): copy_dictionary = {} # The identity xform is used in place of any actual ones. - identity_xfm = str(load_data("transform/itkIdentityTransform.txt")) + identity_xfm = str(load_data('transform/itkIdentityTransform.txt')) copy_dictionary[identity_xfm] = [] morph_dict_all_ses = {} for ses_ent in ses_entities: - LOGGER.info(f"Processing {ses_ent}") - subses_ents = f"{sub_ent}_{ses_ent}" + LOGGER.info(f'Processing {ses_ent}') + subses_ents = f'{sub_ent}_{ses_ent}' session_dir_fmriprep = os.path.join(subject_dir_bids, ses_ent) - anat_dir_orig = os.path.join(in_dir, sub_ent, ses_ent, "files", "MNINonLinear") - anat_dir_bids = os.path.join(session_dir_fmriprep, "anat") - func_dir_orig = os.path.join(anat_dir_orig, "Results") - func_dir_bids = os.path.join(session_dir_fmriprep, "func") - work_dir = os.path.join(subject_dir_bids, "work") + anat_dir_orig = os.path.join(in_dir, sub_ent, ses_ent, 'files', 'MNINonLinear') + anat_dir_bids = os.path.join(session_dir_fmriprep, 'anat') + func_dir_orig = os.path.join(anat_dir_orig, 'Results') + func_dir_bids = os.path.join(session_dir_fmriprep, 'func') + work_dir = os.path.join(subject_dir_bids, 'work') os.makedirs(anat_dir_bids, exist_ok=True) os.makedirs(func_dir_bids, exist_ok=True) @@ -189,18 +190,18 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): # Create identity-based transforms t1w_to_template_fmriprep = os.path.join( anat_dir_bids, - f"{subses_ents}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt", + f'{subses_ents}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt', ) copy_dictionary[identity_xfm].append(t1w_to_template_fmriprep) template_to_t1w_fmriprep = os.path.join( anat_dir_bids, - f"{subses_ents}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt", + f'{subses_ents}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt', ) copy_dictionary[identity_xfm].append(template_to_t1w_fmriprep) # Collect anatomical files to copy - base_anatomical_ents = f"{subses_ents}_{volspace_ent}_{RES_ENT}" + base_anatomical_ents = f'{subses_ents}_{volspace_ent}_{RES_ENT}' anat_dict = collect_anatomical_files( anat_dir_orig, anat_dir_bids, @@ -215,82 +216,82 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): # Convert morphometry files morphometry_dict = collect_morphs(anat_dir_orig, anat_dir_bids, sub_id, subses_ents) morph_dict_all_ses = {**morph_dict_all_ses, **morphometry_dict} - LOGGER.info("Finished collecting anatomical files") + LOGGER.info('Finished collecting anatomical files') # Get masks to be used to extract confounds - wm_mask = os.path.join(anat_dir_orig, f"wm_2mm_{sub_id}_mask_eroded.nii.gz") - csf_mask = os.path.join(anat_dir_orig, f"vent_2mm_{sub_id}_mask_eroded.nii.gz") + wm_mask = os.path.join(anat_dir_orig, f'wm_2mm_{sub_id}_mask_eroded.nii.gz') + csf_mask = os.path.join(anat_dir_orig, f'vent_2mm_{sub_id}_mask_eroded.nii.gz') # Collect functional files to copy - task_dirs_orig = sorted(glob.glob(os.path.join(func_dir_orig, f"{ses_ent}_task-*"))) + task_dirs_orig = sorted(glob.glob(os.path.join(func_dir_orig, f'{ses_ent}_task-*'))) task_names = [os.path.basename(f) for f in task_dirs_orig if os.path.isdir(f)] for base_task_name in task_names: - LOGGER.info(f"Processing {base_task_name}") + LOGGER.info(f'Processing {base_task_name}') # Names seem to follow ses-X_task-Y_run-Z format. found_task_info = re.findall( - r".*_task-([0-9a-zA-Z]+[a-zA-Z]+)_run-(\d+)", + r'.*_task-([0-9a-zA-Z]+[a-zA-Z]+)_run-(\d+)', base_task_name, ) if len(found_task_info) != 1: LOGGER.warning( - f"Task name and run number could not be inferred for {base_task_name}. " - "Skipping." + f'Task name and run number could not be inferred for {base_task_name}. ' + 'Skipping.' ) continue task_id, run_id = found_task_info[0] - task_ent = f"task-{task_id}" - run_ent = f"run-{run_id}" + task_ent = f'task-{task_id}' + run_ent = f'run-{run_id}' task_dir_orig = os.path.join(func_dir_orig, base_task_name) - func_prefix = f"{subses_ents}_{task_ent}_{run_ent}" + func_prefix = f'{subses_ents}_{task_ent}_{run_ent}' # Find original task files - sbref_orig = os.path.join(task_dir_orig, f"{base_task_name}_SBRef.nii.gz") + sbref_orig = os.path.join(task_dir_orig, f'{base_task_name}_SBRef.nii.gz') boldref_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_{volspace_ent}_{RES_ENT}_boldref.nii.gz", + f'{func_prefix}_{volspace_ent}_{RES_ENT}_boldref.nii.gz', ) copy_dictionary[sbref_orig] = [boldref_fmriprep] - bold_nifti_orig = os.path.join(task_dir_orig, f"{base_task_name}.nii.gz") + bold_nifti_orig = os.path.join(task_dir_orig, f'{base_task_name}.nii.gz') bold_nifti_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.nii.gz", + f'{func_prefix}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.nii.gz', ) copy_dictionary[bold_nifti_orig] = [bold_nifti_fmriprep] - bold_cifti_orig = os.path.join(task_dir_orig, f"{base_task_name}_Atlas.dtseries.nii") + bold_cifti_orig = os.path.join(task_dir_orig, f'{base_task_name}_Atlas.dtseries.nii') bold_cifti_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_space-fsLR_den-91k_bold.dtseries.nii", + f'{func_prefix}_space-fsLR_den-91k_bold.dtseries.nii', ) copy_dictionary[bold_cifti_orig] = [bold_cifti_fmriprep] # Extract metadata for JSON files bold_metadata = { - "RepetitionTime": float(nb.load(bold_nifti_orig).header.get_zooms()[-1]), - "TaskName": task_id, + 'RepetitionTime': float(nb.load(bold_nifti_orig).header.get_zooms()[-1]), + 'TaskName': task_id, } bold_nifti_json_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.json", + f'{func_prefix}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.json', ) write_json(bold_metadata, bold_nifti_json_fmriprep) bold_metadata.update( { - "grayordinates": "91k", - "space": "HCP grayordinates", - "surface": "fsLR", - "surface_density": "32k", - "volume": "MNI152NLin6Asym", + 'grayordinates': '91k', + 'space': 'HCP grayordinates', + 'surface': 'fsLR', + 'surface_density': '32k', + 'volume': 'MNI152NLin6Asym', }, ) bold_cifti_json_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_space-fsLR_den-91k_bold.dtseries.json", + f'{func_prefix}_space-fsLR_den-91k_bold.dtseries.json', ) write_json(bold_metadata, bold_cifti_json_fmriprep) @@ -302,20 +303,20 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): work_dir=work_dir, bold_file=bold_nifti_orig, # This file is the anatomical brain mask downsampled to 2 mm3. - brainmask_file=os.path.join(task_dir_orig, "brainmask_fs.2.0.nii.gz"), + brainmask_file=os.path.join(task_dir_orig, 'brainmask_fs.2.0.nii.gz'), csf_mask_file=csf_mask, wm_mask_file=wm_mask, ) # Make figures - figdir = os.path.join(subject_dir_bids, "figures") + figdir = os.path.join(subject_dir_bids, 'figures') os.makedirs(figdir, exist_ok=True) bbref_fig_fmriprep = os.path.join( figdir, - f"{func_prefix}_desc-bbregister_bold.svg", + f'{func_prefix}_desc-bbregister_bold.svg', ) - t1w = os.path.join(anat_dir_orig, "T1w.nii.gz") - ribbon = os.path.join(anat_dir_orig, "ribbon.nii.gz") + t1w = os.path.join(anat_dir_orig, 'T1w.nii.gz') + ribbon = os.path.join(anat_dir_orig, 'ribbon.nii.gz') bbref_fig_fmriprep = plot_bbreg( fixed_image=t1w, moving_image=sbref_orig, @@ -323,25 +324,25 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): contour=ribbon, ) - LOGGER.info(f"Finished {base_task_name}") + LOGGER.info(f'Finished {base_task_name}') - LOGGER.info("Finished collecting functional files") + LOGGER.info('Finished collecting functional files') # Copy ABCD files to fMRIPrep folder - LOGGER.info("Copying files") + LOGGER.info('Copying files') copy_files_in_dict(copy_dictionary) - LOGGER.info("Finished copying files") + LOGGER.info('Finished copying files') # Write the dataset description out last dataset_description_dict = { - "Name": "ABCD-DCAN", - "BIDSVersion": "1.9.0", - "DatasetType": "derivative", - "GeneratedBy": [ + 'Name': 'ABCD-DCAN', + 'BIDSVersion': '1.9.0', + 'DatasetType': 'derivative', + 'GeneratedBy': [ { - "Name": "DCAN", - "Version": "0.0.4", - "CodeURL": "https://github.com/DCAN-Labs/abcd-hcp-pipeline", + 'Name': 'DCAN', + 'Version': '0.0.4', + 'CodeURL': 'https://github.com/DCAN-Labs/abcd-hcp-pipeline', }, ], } @@ -357,7 +358,7 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): scans_dict[item] = key scans_tuple = tuple(scans_dict.items()) - scans_df = pd.DataFrame(scans_tuple, columns=["filename", "source_file"]) - scans_tsv = os.path.join(subject_dir_bids, f"{subses_ents}_scans.tsv") - scans_df.to_csv(scans_tsv, sep="\t", index=False) - LOGGER.info("Conversion completed") + scans_df = pd.DataFrame(scans_tuple, columns=['filename', 'source_file']) + scans_tsv = os.path.join(subject_dir_bids, f'{subses_ents}_scans.tsv') + scans_df.to_csv(scans_tsv, sep='\t', index=False) + LOGGER.info('Conversion completed') diff --git a/xcp_d/ingression/hcpya.py b/xcp_d/ingression/hcpya.py index dbf7862f1..0b329f262 100644 --- a/xcp_d/ingression/hcpya.py +++ b/xcp_d/ingression/hcpya.py @@ -6,6 +6,7 @@ Because HCP-YA doesn't really version their processing pipeline and derivatives, we have to pin to download periods. """ + import glob import os import re @@ -26,7 +27,7 @@ ) from xcp_d.utils.filemanip import ensure_list -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def convert_hcp2bids(in_dir, out_dir, participant_ids=None): @@ -53,34 +54,34 @@ def convert_hcp2bids(in_dir, out_dir, participant_ids=None): Since the T1w is in standard space already, we use identity transforms instead of the individual transforms available in the DCAN derivatives. """ - LOGGER.warning("convert_hcp2bids is an experimental function.") + LOGGER.warning('convert_hcp2bids is an experimental function.') in_dir = os.path.abspath(in_dir) out_dir = os.path.abspath(out_dir) # a list of folders that are not subject identifiers EXCLUDE_LIST = [ - "BiasField", - "Native", - "ROIs", - "Results", - "T1w", - "T1w_restore", - "T1w_restore_brain", - "T2w", - "T2w_restore", - "T2w_restore_brain", - "aparc", - "aparc.a2009s+aseg", - "brainmask_fs", - "fsaverage_LR32k", - "ribbon", - "wmparc", - "xfms", + 'BiasField', + 'Native', + 'ROIs', + 'Results', + 'T1w', + 'T1w_restore', + 'T1w_restore_brain', + 'T2w', + 'T2w_restore', + 'T2w_restore_brain', + 'aparc', + 'aparc.a2009s+aseg', + 'brainmask_fs', + 'fsaverage_LR32k', + 'ribbon', + 'wmparc', + 'xfms', ] if participant_ids is None: subject_folders = sorted( - glob.glob(os.path.join(in_dir, "*", "*", "*", "*R.pial.32k_fs_LR.surf.gii")) + glob.glob(os.path.join(in_dir, '*', '*', '*', '*R.pial.32k_fs_LR.surf.gii')) ) subject_folders = [ subject_folder for subject_folder in subject_folders if os.path.exists(subject_folder) @@ -88,20 +89,20 @@ def convert_hcp2bids(in_dir, out_dir, participant_ids=None): participant_ids = [os.path.basename(subject_folder) for subject_folder in subject_folders] all_subject_ids = [] for subject_id in participant_ids: - subject_id = subject_id.split(".")[0] + subject_id = subject_id.split('.')[0] if subject_id not in all_subject_ids and subject_id not in EXCLUDE_LIST: - all_subject_ids.append(f"sub-{subject_id}") + all_subject_ids.append(f'sub-{subject_id}') participant_ids = all_subject_ids if len(participant_ids) == 0: - raise ValueError(f"No subject found in {in_dir}") + raise ValueError(f'No subject found in {in_dir}') else: participant_ids = ensure_list(participant_ids) for subject_id in participant_ids: - LOGGER.info(f"Converting {subject_id}") + LOGGER.info(f'Converting {subject_id}') convert_hcp_to_bids_single_subject( in_dir=in_dir, out_dir=out_dir, @@ -164,30 +165,30 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): └── ribbon.nii.gz """ assert isinstance(in_dir, str) - assert os.path.isdir(in_dir), f"Folder DNE: {in_dir}" + assert os.path.isdir(in_dir), f'Folder DNE: {in_dir}' assert isinstance(out_dir, str) assert isinstance(sub_ent, str) - sub_id = sub_ent.replace("sub-", "") + sub_id = sub_ent.replace('sub-', '') # Reset the subject entity in case the sub- prefix wasn't included originally. - sub_ent = f"sub-{sub_id}" + sub_ent = f'sub-{sub_id}' subses_ents = sub_ent - VOLSPACE = "MNI152NLin6Asym" - volspace_ent = f"space-{VOLSPACE}" - RES_ENT = "res-2" + VOLSPACE = 'MNI152NLin6Asym' + volspace_ent = f'space-{VOLSPACE}' + RES_ENT = 'res-2' - anat_dir_orig = os.path.join(in_dir, sub_id, "MNINonLinear") - func_dir_orig = os.path.join(anat_dir_orig, "Results") + anat_dir_orig = os.path.join(in_dir, sub_id, 'MNINonLinear') + func_dir_orig = os.path.join(anat_dir_orig, 'Results') subject_dir_bids = os.path.join(out_dir, sub_ent) - anat_dir_bids = os.path.join(subject_dir_bids, "anat") - func_dir_bids = os.path.join(subject_dir_bids, "func") - work_dir = os.path.join(subject_dir_bids, "work") + anat_dir_bids = os.path.join(subject_dir_bids, 'anat') + func_dir_bids = os.path.join(subject_dir_bids, 'func') + work_dir = os.path.join(subject_dir_bids, 'work') - dataset_description_fmriprep = os.path.join(out_dir, "dataset_description.json") + dataset_description_fmriprep = os.path.join(out_dir, 'dataset_description.json') if os.path.isfile(dataset_description_fmriprep): - LOGGER.info("Converted dataset already exists. Skipping conversion.") + LOGGER.info('Converted dataset already exists. Skipping conversion.') return os.makedirs(anat_dir_bids, exist_ok=True) @@ -195,31 +196,31 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): os.makedirs(work_dir, exist_ok=True) # Get masks to be used to extract confounds - csf_mask = str(load_data(f"masks/{volspace_ent}_{RES_ENT}_label-CSF_mask.nii.gz")) - wm_mask = str(load_data(f"masks/{volspace_ent}_{RES_ENT}_label-WM_mask.nii.gz")) + csf_mask = str(load_data(f'masks/{volspace_ent}_{RES_ENT}_label-CSF_mask.nii.gz')) + wm_mask = str(load_data(f'masks/{volspace_ent}_{RES_ENT}_label-WM_mask.nii.gz')) # A dictionary of mappings from HCP derivatives to fMRIPrep derivatives. # Values will be lists, to allow one-to-many mappings. copy_dictionary = {} # The identity xform is used in place of any actual ones. - identity_xfm = str(load_data("transform/itkIdentityTransform.txt")) + identity_xfm = str(load_data('transform/itkIdentityTransform.txt')) copy_dictionary[identity_xfm] = [] t1w_to_template_fmriprep = os.path.join( anat_dir_bids, - f"{subses_ents}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt", + f'{subses_ents}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt', ) copy_dictionary[identity_xfm].append(t1w_to_template_fmriprep) template_to_t1w_fmriprep = os.path.join( anat_dir_bids, - f"{subses_ents}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt", + f'{subses_ents}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt', ) copy_dictionary[identity_xfm].append(template_to_t1w_fmriprep) # Collect anatomical files to copy - base_anatomical_ents = f"{subses_ents}_{volspace_ent}_{RES_ENT}" + base_anatomical_ents = f'{subses_ents}_{volspace_ent}_{RES_ENT}' anat_dict = collect_anatomical_files(anat_dir_orig, anat_dir_bids, base_anatomical_ents) copy_dictionary = {**copy_dictionary, **anat_dict} @@ -229,19 +230,19 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): # Convert morphometry files morphometry_dict = collect_morphs(anat_dir_orig, anat_dir_bids, sub_id, subses_ents) - LOGGER.info("Finished collecting anatomical files") + LOGGER.info('Finished collecting anatomical files') # Collect functional files to copy - task_dirs_orig = sorted(glob.glob(os.path.join(func_dir_orig, "*"))) + task_dirs_orig = sorted(glob.glob(os.path.join(func_dir_orig, '*'))) task_names = [ - os.path.basename(f) for f in task_dirs_orig if f.endswith("RL") or f.endswith("LR") + os.path.basename(f) for f in task_dirs_orig if f.endswith('RL') or f.endswith('LR') ] for base_task_name in task_names: - LOGGER.info(f"Processing {base_task_name}") + LOGGER.info(f'Processing {base_task_name}') # NOTE: What is the first element in the folder name? - _, base_task_id, dir_id = base_task_name.split("_") - match = re.match(r"([A-Za-z0-9]+[a-zA-Z]+)(\d+)$", base_task_id) + _, base_task_id, dir_id = base_task_name.split('_') + match = re.match(r'([A-Za-z0-9]+[a-zA-Z]+)(\d+)$', base_task_id) if match: task_id = match.group(1).lower() run_id = int(match.group(2)) @@ -249,61 +250,61 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): task_id = base_task_id.lower() run_id = 1 - task_ent = f"task-{task_id}" - run_ent = f"run-{run_id}" - dir_ent = f"dir-{dir_id}" + task_ent = f'task-{task_id}' + run_ent = f'run-{run_id}' + dir_ent = f'dir-{dir_id}' task_dir_orig = os.path.join(func_dir_orig, base_task_name) - func_prefix = f"{subses_ents}_{task_ent}_{dir_ent}_{run_ent}" + func_prefix = f'{subses_ents}_{task_ent}_{dir_ent}_{run_ent}' # Find original task files - sbref_orig = os.path.join(task_dir_orig, "SBRef_dc.nii.gz") + sbref_orig = os.path.join(task_dir_orig, 'SBRef_dc.nii.gz') boldref_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_{volspace_ent}_{RES_ENT}_boldref.nii.gz", + f'{func_prefix}_{volspace_ent}_{RES_ENT}_boldref.nii.gz', ) copy_dictionary[sbref_orig] = [boldref_fmriprep] - bold_nifti_orig = os.path.join(task_dir_orig, f"{base_task_name}.nii.gz") + bold_nifti_orig = os.path.join(task_dir_orig, f'{base_task_name}.nii.gz') bold_nifti_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.nii.gz", + f'{func_prefix}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.nii.gz', ) copy_dictionary[bold_nifti_orig] = [bold_nifti_fmriprep] bold_cifti_orig = os.path.join( task_dir_orig, - f"{base_task_name}_Atlas_MSMAll.dtseries.nii", + f'{base_task_name}_Atlas_MSMAll.dtseries.nii', ) bold_cifti_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_space-fsLR_den-91k_bold.dtseries.nii", + f'{func_prefix}_space-fsLR_den-91k_bold.dtseries.nii', ) copy_dictionary[bold_cifti_orig] = [bold_cifti_fmriprep] # Extract metadata for JSON files bold_metadata = { - "RepetitionTime": float(nb.load(bold_nifti_orig).header.get_zooms()[-1]), - "TaskName": task_id, + 'RepetitionTime': float(nb.load(bold_nifti_orig).header.get_zooms()[-1]), + 'TaskName': task_id, } bold_nifti_json_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.json", + f'{func_prefix}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.json', ) write_json(bold_metadata, bold_nifti_json_fmriprep) bold_metadata.update( { - "grayordinates": "91k", - "space": "HCP grayordinates", - "surface": "fsLR", - "surface_density": "32k", - "volume": "MNI152NLin6Asym", + 'grayordinates': '91k', + 'space': 'HCP grayordinates', + 'surface': 'fsLR', + 'surface_density': '32k', + 'volume': 'MNI152NLin6Asym', }, ) bold_cifti_json_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_space-fsLR_den-91k_bold.dtseries.json", + f'{func_prefix}_space-fsLR_den-91k_bold.dtseries.json', ) write_json(bold_metadata, bold_cifti_json_fmriprep) @@ -314,20 +315,20 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): prefix=func_prefix, work_dir=work_dir, bold_file=bold_nifti_orig, - brainmask_file=os.path.join(task_dir_orig, "brainmask_fs.2.nii.gz"), + brainmask_file=os.path.join(task_dir_orig, 'brainmask_fs.2.nii.gz'), csf_mask_file=csf_mask, wm_mask_file=wm_mask, ) # Make figures - figdir = os.path.join(subject_dir_bids, "figures") + figdir = os.path.join(subject_dir_bids, 'figures') os.makedirs(figdir, exist_ok=True) bbref_fig_fmriprep = os.path.join( figdir, - f"{func_prefix}_desc-bbregister_bold.svg", + f'{func_prefix}_desc-bbregister_bold.svg', ) - t1w = os.path.join(anat_dir_orig, "T1w.nii.gz") - ribbon = os.path.join(anat_dir_orig, "ribbon.nii.gz") + t1w = os.path.join(anat_dir_orig, 'T1w.nii.gz') + ribbon = os.path.join(anat_dir_orig, 'ribbon.nii.gz') bbref_fig_fmriprep = plot_bbreg( fixed_image=t1w, moving_image=sbref_orig, @@ -335,25 +336,25 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): contour=ribbon, ) - LOGGER.info(f"Finished {base_task_name}") + LOGGER.info(f'Finished {base_task_name}') - LOGGER.info("Finished collecting functional files") + LOGGER.info('Finished collecting functional files') # Copy HCP files to fMRIPrep folder - LOGGER.info("Copying files") + LOGGER.info('Copying files') copy_files_in_dict(copy_dictionary) - LOGGER.info("Finished copying files") + LOGGER.info('Finished copying files') # Write the dataset description out last dataset_description_dict = { - "Name": "HCP", - "BIDSVersion": "1.9.0", - "DatasetType": "derivative", - "GeneratedBy": [ + 'Name': 'HCP', + 'BIDSVersion': '1.9.0', + 'DatasetType': 'derivative', + 'GeneratedBy': [ { - "Name": "HCP", - "Version": "unknown", - "CodeURL": "https://github.com/Washington-University/HCPpipelines", + 'Name': 'HCP', + 'Version': 'unknown', + 'CodeURL': 'https://github.com/Washington-University/HCPpipelines', }, ], } @@ -369,7 +370,7 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): scans_dict[item] = key scans_tuple = tuple(scans_dict.items()) - scans_df = pd.DataFrame(scans_tuple, columns=["filename", "source_file"]) - scans_tsv = os.path.join(subject_dir_bids, f"{subses_ents}_scans.tsv") - scans_df.to_csv(scans_tsv, sep="\t", index=False) - LOGGER.info("Conversion completed") + scans_df = pd.DataFrame(scans_tuple, columns=['filename', 'source_file']) + scans_tsv = os.path.join(subject_dir_bids, f'{subses_ents}_scans.tsv') + scans_df.to_csv(scans_tsv, sep='\t', index=False) + LOGGER.info('Conversion completed') diff --git a/xcp_d/ingression/ukbiobank.py b/xcp_d/ingression/ukbiobank.py index 7a6d58b41..56bd17be0 100644 --- a/xcp_d/ingression/ukbiobank.py +++ b/xcp_d/ingression/ukbiobank.py @@ -16,10 +16,10 @@ ) from xcp_d.utils.filemanip import ensure_list -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') -def convert_ukb2bids(in_dir, out_dir, participant_ids=None, bids_filters={}): +def convert_ukb2bids(in_dir, out_dir, participant_ids=None, bids_filters=None): """Convert UK Biobank derivatives to BIDS-compliant derivatives. Parameters @@ -45,17 +45,18 @@ def convert_ukb2bids(in_dir, out_dir, participant_ids=None, bids_filters={}): ----- Since the T1w is in standard space already, we use identity transforms. """ - LOGGER.warning("convert_ukb2bids is an experimental function.") + LOGGER.warning('convert_ukb2bids is an experimental function.') in_dir = os.path.abspath(in_dir) out_dir = os.path.abspath(out_dir) + bids_filters = bids_filters or {} if participant_ids is None: - subject_folders = sorted(glob.glob(os.path.join(in_dir, "*_*_2_0"))) + subject_folders = sorted(glob.glob(os.path.join(in_dir, '*_*_2_0'))) subject_folders = [ subject_folder for subject_folder in subject_folders if os.path.isdir(subject_folder) ] participant_ids = [ - os.path.basename(subject_folder).split("_")[0] for subject_folder in subject_folders + os.path.basename(subject_folder).split('_')[0] for subject_folder in subject_folders ] all_subject_ids = [] for subject_id in participant_ids: @@ -65,21 +66,21 @@ def convert_ukb2bids(in_dir, out_dir, participant_ids=None, bids_filters={}): participant_ids = all_subject_ids if len(participant_ids) == 0: - raise ValueError(f"No subject found in {in_dir}") + raise ValueError(f'No subject found in {in_dir}') else: participant_ids = ensure_list(participant_ids) for subject_id in participant_ids: - LOGGER.info(f"Converting {subject_id}") - session_ids = ensure_list(bids_filters.get("bold", {}).get("session", "*")) + LOGGER.info(f'Converting {subject_id}') + session_ids = ensure_list(bids_filters.get('bold', {}).get('session', '*')) subject_dirs = [] for session_id in session_ids: - subject_dir = sorted(glob.glob(os.path.join(in_dir, f"{subject_id}_{session_id}_2_0"))) + subject_dir = sorted(glob.glob(os.path.join(in_dir, f'{subject_id}_{session_id}_2_0'))) subject_dirs += subject_dir for subject_dir in subject_dirs: - session_id = os.path.basename(subject_dir).split("_")[1] + session_id = os.path.basename(subject_dir).split('_')[1] convert_ukb_to_bids_single_subject( in_dir=subject_dirs[0], out_dir=out_dir, @@ -130,31 +131,31 @@ def convert_ukb_to_bids_single_subject(in_dir, out_dir, sub_id, ses_id): └── T1_brain_to_MNI.nii.gz """ assert isinstance(in_dir, str) - assert os.path.isdir(in_dir), f"Folder DNE: {in_dir}" + assert os.path.isdir(in_dir), f'Folder DNE: {in_dir}' assert isinstance(out_dir, str) assert isinstance(sub_id, str) assert isinstance(ses_id, str) - subses_ents = f"sub-{sub_id}_ses-{ses_id}" - - task_dir_orig = os.path.join(in_dir, "fMRI", "rfMRI.ica") - bold_file = os.path.join(task_dir_orig, "filtered_func_data_clean.nii.gz") - assert os.path.isfile(bold_file), f"File DNE: {bold_file}" - bold_json = os.path.join(in_dir, "fMRI", "rfMRI.json") - assert os.path.isfile(bold_json), f"File DNE: {bold_json}" - boldref_file = os.path.join(task_dir_orig, "example_func.nii.gz") - assert os.path.isfile(boldref_file), f"File DNE: {boldref_file}" - brainmask_file = os.path.join(task_dir_orig, "mask.nii.gz") - assert os.path.isfile(brainmask_file), f"File DNE: {brainmask_file}" - t1w = os.path.join(in_dir, "T1", "T1_brain_to_MNI.nii.gz") - assert os.path.isfile(t1w), f"File DNE: {t1w}" - warp_file = os.path.join(task_dir_orig, "reg", "example_func2standard_warp.nii.gz") - assert os.path.isfile(warp_file), f"File DNE: {warp_file}" - - func_prefix = f"sub-{sub_id}_ses-{ses_id}_task-rest" - subject_dir_bids = os.path.join(out_dir, f"sub-{sub_id}", f"ses-{ses_id}") - anat_dir_bids = os.path.join(subject_dir_bids, "anat") - func_dir_bids = os.path.join(subject_dir_bids, "func") - work_dir = os.path.join(subject_dir_bids, "work") + subses_ents = f'sub-{sub_id}_ses-{ses_id}' + + task_dir_orig = os.path.join(in_dir, 'fMRI', 'rfMRI.ica') + bold_file = os.path.join(task_dir_orig, 'filtered_func_data_clean.nii.gz') + assert os.path.isfile(bold_file), f'File DNE: {bold_file}' + bold_json = os.path.join(in_dir, 'fMRI', 'rfMRI.json') + assert os.path.isfile(bold_json), f'File DNE: {bold_json}' + boldref_file = os.path.join(task_dir_orig, 'example_func.nii.gz') + assert os.path.isfile(boldref_file), f'File DNE: {boldref_file}' + brainmask_file = os.path.join(task_dir_orig, 'mask.nii.gz') + assert os.path.isfile(brainmask_file), f'File DNE: {brainmask_file}' + t1w = os.path.join(in_dir, 'T1', 'T1_brain_to_MNI.nii.gz') + assert os.path.isfile(t1w), f'File DNE: {t1w}' + warp_file = os.path.join(task_dir_orig, 'reg', 'example_func2standard_warp.nii.gz') + assert os.path.isfile(warp_file), f'File DNE: {warp_file}' + + func_prefix = f'sub-{sub_id}_ses-{ses_id}_task-rest' + subject_dir_bids = os.path.join(out_dir, f'sub-{sub_id}', f'ses-{ses_id}') + anat_dir_bids = os.path.join(subject_dir_bids, 'anat') + func_dir_bids = os.path.join(subject_dir_bids, 'func') + work_dir = os.path.join(subject_dir_bids, 'work') os.makedirs(anat_dir_bids, exist_ok=True) os.makedirs(func_dir_bids, exist_ok=True) os.makedirs(work_dir, exist_ok=True) @@ -168,24 +169,24 @@ def convert_ukb_to_bids_single_subject(in_dir, out_dir, sub_id, ses_id): brainmask_file=brainmask_file, ) - dataset_description_fmriprep = os.path.join(out_dir, "dataset_description.json") + dataset_description_fmriprep = os.path.join(out_dir, 'dataset_description.json') if os.path.isfile(dataset_description_fmriprep): - LOGGER.info("Converted dataset already exists. Skipping conversion.") + LOGGER.info('Converted dataset already exists. Skipping conversion.') return - VOLSPACE = "MNI152NLin6Asym" + VOLSPACE = 'MNI152NLin6Asym' # Warp BOLD, T1w, and brainmask to MNI152NLin6Asym # We use FSL's MNI152NLin6Asym 2 mm3 template instead of TemplateFlow's version, # because FSL uses LAS+ orientation, while TemplateFlow uses RAS+. - template_file = str(load_data("MNI152_T1_2mm.nii.gz")) + template_file = str(load_data('MNI152_T1_2mm.nii.gz')) copy_dictionary = {} warp_bold_to_std = ApplyWarp( - interp="spline", - output_type="NIFTI_GZ", + interp='spline', + output_type='NIFTI_GZ', ref_file=template_file, in_file=bold_file, field_file=warp_file, @@ -194,32 +195,32 @@ def convert_ukb_to_bids_single_subject(in_dir, out_dir, sub_id, ses_id): warp_bold_to_std_results = warp_bold_to_std.run(cwd=work_dir) bold_nifti_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_space-{VOLSPACE}_desc-preproc_bold.nii.gz", + f'{func_prefix}_space-{VOLSPACE}_desc-preproc_bold.nii.gz', ) copy_dictionary[warp_bold_to_std_results.outputs.out_file] = [bold_nifti_fmriprep] # Extract metadata for JSON file - with open(bold_json, "r") as fo: + with open(bold_json) as fo: bold_metadata = json.load(fo) # Keep only the relevant fields keep_keys = [ - "FlipAngle", - "EchoTime", - "Manufacturer", - "ManufacturersModelName", - "EffectiveEchoSpacing", - "RepetitionTime", - "PhaseEncodingDirection", + 'FlipAngle', + 'EchoTime', + 'Manufacturer', + 'ManufacturersModelName', + 'EffectiveEchoSpacing', + 'RepetitionTime', + 'PhaseEncodingDirection', ] bold_metadata = {k: bold_metadata[k] for k in keep_keys if k in bold_metadata} - bold_metadata["TaskName"] = "resting state" - bold_nifti_json_fmriprep = bold_nifti_fmriprep.replace(".nii.gz", ".json") + bold_metadata['TaskName'] = 'resting state' + bold_nifti_json_fmriprep = bold_nifti_fmriprep.replace('.nii.gz', '.json') write_json(bold_metadata, bold_nifti_json_fmriprep) warp_brainmask_to_std = ApplyWarp( - interp="nn", - output_type="NIFTI_GZ", + interp='nn', + output_type='NIFTI_GZ', ref_file=template_file, in_file=brainmask_file, field_file=warp_file, @@ -228,21 +229,21 @@ def convert_ukb_to_bids_single_subject(in_dir, out_dir, sub_id, ses_id): copy_dictionary[warp_brainmask_to_std_results.outputs.out_file] = [ os.path.join( func_dir_bids, - f"{func_prefix}_space-{VOLSPACE}_desc-brain_mask.nii.gz", + f'{func_prefix}_space-{VOLSPACE}_desc-brain_mask.nii.gz', ) ] # Use the brain mask as the anatomical brain mask too. copy_dictionary[warp_brainmask_to_std_results.outputs.out_file].append( os.path.join( anat_dir_bids, - f"{subses_ents}_space-{VOLSPACE}_desc-brain_mask.nii.gz", + f'{subses_ents}_space-{VOLSPACE}_desc-brain_mask.nii.gz', ) ) # Warp the reference file to MNI space. warp_boldref_to_std = ApplyWarp( - interp="spline", - output_type="NIFTI_GZ", + interp='spline', + output_type='NIFTI_GZ', ref_file=template_file, in_file=boldref_file, field_file=warp_file, @@ -250,54 +251,54 @@ def convert_ukb_to_bids_single_subject(in_dir, out_dir, sub_id, ses_id): warp_boldref_to_std_results = warp_boldref_to_std.run(cwd=work_dir) boldref_nifti_fmriprep = os.path.join( func_dir_bids, - f"{func_prefix}_space-{VOLSPACE}_boldref.nii.gz", + f'{func_prefix}_space-{VOLSPACE}_boldref.nii.gz', ) copy_dictionary[warp_boldref_to_std_results.outputs.out_file] = [boldref_nifti_fmriprep] # The MNI-space anatomical image. copy_dictionary[t1w] = [ - os.path.join(anat_dir_bids, f"{subses_ents}_space-{VOLSPACE}_desc-preproc_T1w.nii.gz") + os.path.join(anat_dir_bids, f'{subses_ents}_space-{VOLSPACE}_desc-preproc_T1w.nii.gz') ] # The identity xform is used in place of any actual ones. - identity_xfm = str(load_data("transform/itkIdentityTransform.txt")) + identity_xfm = str(load_data('transform/itkIdentityTransform.txt')) copy_dictionary[identity_xfm] = [] t1w_to_template_fmriprep = os.path.join( anat_dir_bids, - f"{subses_ents}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt", + f'{subses_ents}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt', ) copy_dictionary[identity_xfm].append(t1w_to_template_fmriprep) template_to_t1w_fmriprep = os.path.join( anat_dir_bids, - f"{subses_ents}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt", + f'{subses_ents}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt', ) copy_dictionary[identity_xfm].append(template_to_t1w_fmriprep) - LOGGER.info("Finished collecting functional files") + LOGGER.info('Finished collecting functional files') # Copy UK Biobank files to fMRIPrep folder - LOGGER.info("Copying files") + LOGGER.info('Copying files') copy_files_in_dict(copy_dictionary) - LOGGER.info("Finished copying files") + LOGGER.info('Finished copying files') # Write the dataset description out last dataset_description_dict = { - "Name": "UK Biobank", - "BIDSVersion": "1.9.0", - "DatasetType": "derivative", - "GeneratedBy": [ + 'Name': 'UK Biobank', + 'BIDSVersion': '1.9.0', + 'DatasetType': 'derivative', + 'GeneratedBy': [ { - "Name": "UK Biobank", - "Version": "unknown", - "CodeURL": "https://github.com/ucam-department-of-psychiatry/UKB", + 'Name': 'UK Biobank', + 'Version': 'unknown', + 'CodeURL': 'https://github.com/ucam-department-of-psychiatry/UKB', }, ], } if not os.path.isfile(dataset_description_fmriprep): - LOGGER.info(f"Writing dataset description to {dataset_description_fmriprep}") + LOGGER.info(f'Writing dataset description to {dataset_description_fmriprep}') write_json(dataset_description_dict, dataset_description_fmriprep) # Write out the mapping from UK Biobank to fMRIPrep @@ -307,7 +308,7 @@ def convert_ukb_to_bids_single_subject(in_dir, out_dir, sub_id, ses_id): scans_dict[item] = key scans_tuple = tuple(scans_dict.items()) - scans_df = pd.DataFrame(scans_tuple, columns=["filename", "source_file"]) - scans_tsv = os.path.join(subject_dir_bids, f"{subses_ents}_scans.tsv") - scans_df.to_csv(scans_tsv, sep="\t", index=False) - LOGGER.info("Conversion completed") + scans_df = pd.DataFrame(scans_tuple, columns=['filename', 'source_file']) + scans_tsv = os.path.join(subject_dir_bids, f'{subses_ents}_scans.tsv') + scans_df.to_csv(scans_tsv, sep='\t', index=False) + LOGGER.info('Conversion completed') diff --git a/xcp_d/ingression/utils.py b/xcp_d/ingression/utils.py index 89d33eb39..cd2320c38 100644 --- a/xcp_d/ingression/utils.py +++ b/xcp_d/ingression/utils.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Functions to support ingression of non-BIDS preprocessing derivatives.""" + import json import os @@ -11,26 +12,26 @@ from xcp_d.interfaces.workbench import CiftiCreateDenseScalar -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def collect_anatomical_files(anat_dir_orig, anat_dir_bids, base_anatomical_ents): """Collect anatomical files from ABCD or HCP-YA derivatives.""" ANAT_DICT = { # XXX: Why have T1w here and T1w_restore for HCP? - "T1w.nii.gz": "desc-preproc_T1w.nii.gz", - "brainmask_fs.nii.gz": "desc-brain_mask.nii.gz", - "ribbon.nii.gz": "desc-ribbon_T1w.nii.gz", + 'T1w.nii.gz': 'desc-preproc_T1w.nii.gz', + 'brainmask_fs.nii.gz': 'desc-brain_mask.nii.gz', + 'ribbon.nii.gz': 'desc-ribbon_T1w.nii.gz', } copy_dictionary = {} for in_str, out_str in ANAT_DICT.items(): anat_orig = os.path.join(anat_dir_orig, in_str) - anat_fmriprep = os.path.join(anat_dir_bids, f"{base_anatomical_ents}_{out_str}") + anat_fmriprep = os.path.join(anat_dir_bids, f'{base_anatomical_ents}_{out_str}') if os.path.isfile(anat_orig): copy_dictionary[anat_orig] = [anat_fmriprep] else: - LOGGER.warning(f"File DNE: {anat_orig}") + LOGGER.warning(f'File DNE: {anat_orig}') return copy_dictionary @@ -38,25 +39,25 @@ def collect_anatomical_files(anat_dir_orig, anat_dir_bids, base_anatomical_ents) def collect_meshes(anat_dir_orig, anat_dir_bids, sub_id, subses_ents): """Collect mesh files from ABCD or HCP-YA derivatives.""" SURFACE_DICT = { - "{hemi}.pial.32k_fs_LR.surf.gii": "hemi-{hemi}_pial.surf.gii", - "{hemi}.white.32k_fs_LR.surf.gii": "hemi-{hemi}_smoothwm.surf.gii", + '{hemi}.pial.32k_fs_LR.surf.gii': 'hemi-{hemi}_pial.surf.gii', + '{hemi}.white.32k_fs_LR.surf.gii': 'hemi-{hemi}_smoothwm.surf.gii', } - fsaverage_dir_orig = os.path.join(anat_dir_orig, "fsaverage_LR32k") + fsaverage_dir_orig = os.path.join(anat_dir_orig, 'fsaverage_LR32k') copy_dictionary = {} for in_str, out_str in SURFACE_DICT.items(): - for hemi in ["L", "R"]: + for hemi in ['L', 'R']: hemi_in_str = in_str.format(hemi=hemi) hemi_out_str = out_str.format(hemi=hemi) - surf_orig = os.path.join(fsaverage_dir_orig, f"{sub_id}.{hemi_in_str}") + surf_orig = os.path.join(fsaverage_dir_orig, f'{sub_id}.{hemi_in_str}') surf_fmriprep = os.path.join( anat_dir_bids, - f"{subses_ents}_space-fsLR_den-32k_{hemi_out_str}", + f'{subses_ents}_space-fsLR_den-32k_{hemi_out_str}', ) if os.path.isfile(surf_orig): copy_dictionary[surf_orig] = [surf_fmriprep] else: - LOGGER.warning(f"File DNE: {surf_orig}") + LOGGER.warning(f'File DNE: {surf_orig}') return copy_dictionary @@ -64,26 +65,26 @@ def collect_meshes(anat_dir_orig, anat_dir_bids, sub_id, subses_ents): def collect_morphs(anat_dir_orig, anat_dir_bids, sub_id, subses_ents): """Collect and convert morphometry files to CIFTIs.""" SURFACE_DICT = { - "thickness.32k_fs_LR.shape.gii": "thickness", - "corrThickness.32k_fs_LR.shape.gii": "desc-corrected_thickness", - "curvature.32k_fs_LR.shape.gii": "curv", - "sulc.32k_fs_LR.shape.gii": "sulc", - "MyelinMap.32k_fs_LR.func.gii": "myelinw", - "SmoothedMyelinMap.32k_fs_LR.func.gii": "desc-smoothed_myelinw", + 'thickness.32k_fs_LR.shape.gii': 'thickness', + 'corrThickness.32k_fs_LR.shape.gii': 'desc-corrected_thickness', + 'curvature.32k_fs_LR.shape.gii': 'curv', + 'sulc.32k_fs_LR.shape.gii': 'sulc', + 'MyelinMap.32k_fs_LR.func.gii': 'myelinw', + 'SmoothedMyelinMap.32k_fs_LR.func.gii': 'desc-smoothed_myelinw', } - fsaverage_dir_orig = os.path.join(anat_dir_orig, "fsaverage_LR32k") + fsaverage_dir_orig = os.path.join(anat_dir_orig, 'fsaverage_LR32k') mapping_dictionary = {} for in_str, out_str in SURFACE_DICT.items(): - lh_file = os.path.join(fsaverage_dir_orig, f"{sub_id}.L.{in_str}") - rh_file = os.path.join(fsaverage_dir_orig, f"{sub_id}.R.{in_str}") + lh_file = os.path.join(fsaverage_dir_orig, f'{sub_id}.L.{in_str}') + rh_file = os.path.join(fsaverage_dir_orig, f'{sub_id}.R.{in_str}') out_file = os.path.join( anat_dir_bids, - f"{subses_ents}_space-fsLR_den-91k_{out_str}.dscalar.nii", + f'{subses_ents}_space-fsLR_den-91k_{out_str}.dscalar.nii', ) if not os.path.isfile(lh_file) or not os.path.isfile(rh_file): - LOGGER.warning(f"File(s) DNE:\n\t{lh_file}\n\t{rh_file}") + LOGGER.warning(f'File(s) DNE:\n\t{lh_file}\n\t{rh_file}') continue interface = CiftiCreateDenseScalar( @@ -132,34 +133,34 @@ def collect_hcp_confounds( """ import pandas as pd - mvreg_file = os.path.join(task_dir_orig, "Movement_Regressors.txt") + mvreg_file = os.path.join(task_dir_orig, 'Movement_Regressors.txt') assert os.path.isfile(mvreg_file) - rmsd_file = os.path.join(task_dir_orig, "Movement_AbsoluteRMS.txt") + rmsd_file = os.path.join(task_dir_orig, 'Movement_AbsoluteRMS.txt') assert os.path.isfile(rmsd_file) - mvreg = pd.read_csv(mvreg_file, header=None, delimiter=r"\s+") + mvreg = pd.read_csv(mvreg_file, header=None, delimiter=r'\s+') # Only use the first six columns mvreg = mvreg.iloc[:, 0:6] - mvreg.columns = ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"] + mvreg.columns = ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'] # convert rotations from degrees to radians - rot_columns = [c for c in mvreg.columns if c.startswith("rot")] + rot_columns = [c for c in mvreg.columns if c.startswith('rot')] for col in rot_columns: mvreg[col] = mvreg[col] * np.pi / 180 # get derivatives of motion columns columns = mvreg.columns.tolist() for col in columns: - mvreg[f"{col}_derivative1"] = mvreg[col].diff() + mvreg[f'{col}_derivative1'] = mvreg[col].diff() # get powers columns = mvreg.columns.tolist() for col in columns: - mvreg[f"{col}_power2"] = mvreg[col] ** 2 + mvreg[f'{col}_power2'] = mvreg[col] ** 2 # Use dummy column for framewise displacement, which will be recalculated by XCP-D. - mvreg["framewise_displacement"] = 0 + mvreg['framewise_displacement'] = 0 # use masks: brain, csf, and wm mask to extract timeseries mean_gs = extract_mean_signal( @@ -180,22 +181,22 @@ def collect_hcp_confounds( rmsd = np.loadtxt(rmsd_file) brainreg = pd.DataFrame( - {"global_signal": mean_gs, "white_matter": mean_wm, "csf": mean_csf, "rmsd": rmsd} + {'global_signal': mean_gs, 'white_matter': mean_wm, 'csf': mean_csf, 'rmsd': rmsd} ) # get derivatives and powers - brainreg["global_signal_derivative1"] = brainreg["global_signal"].diff() - brainreg["white_matter_derivative1"] = brainreg["white_matter"].diff() - brainreg["csf_derivative1"] = brainreg["csf"].diff() + brainreg['global_signal_derivative1'] = brainreg['global_signal'].diff() + brainreg['white_matter_derivative1'] = brainreg['white_matter'].diff() + brainreg['csf_derivative1'] = brainreg['csf'].diff() - brainreg["global_signal_derivative1_power2"] = brainreg["global_signal_derivative1"] ** 2 - brainreg["global_signal_power2"] = brainreg["global_signal"] ** 2 + brainreg['global_signal_derivative1_power2'] = brainreg['global_signal_derivative1'] ** 2 + brainreg['global_signal_power2'] = brainreg['global_signal'] ** 2 - brainreg["white_matter_derivative1_power2"] = brainreg["white_matter_derivative1"] ** 2 - brainreg["white_matter_power2"] = brainreg["white_matter"] ** 2 + brainreg['white_matter_derivative1_power2'] = brainreg['white_matter_derivative1'] ** 2 + brainreg['white_matter_power2'] = brainreg['white_matter'] ** 2 - brainreg["csf_derivative1_power2"] = brainreg["csf_derivative1"] ** 2 - brainreg["csf_power2"] = brainreg["csf"] ** 2 + brainreg['csf_derivative1_power2'] = brainreg['csf_derivative1'] ** 2 + brainreg['csf_power2'] = brainreg['csf'] ** 2 # Merge the two DataFrames confounds_df = pd.concat([mvreg, brainreg], axis=1) @@ -203,15 +204,15 @@ def collect_hcp_confounds( # write out the confounds regressors_tsv_fmriprep = os.path.join( out_dir, - f"{prefix}_desc-confounds_timeseries.tsv", + f'{prefix}_desc-confounds_timeseries.tsv', ) - confounds_df.to_csv(regressors_tsv_fmriprep, sep="\t", na_rep="n/a", index=False) + confounds_df.to_csv(regressors_tsv_fmriprep, sep='\t', na_rep='n/a', index=False) regressors_json_fmriprep = os.path.join( out_dir, - f"{prefix}_desc-confounds_timeseries.json", + f'{prefix}_desc-confounds_timeseries.json', ) - confounds_dict = {col: {"Description": ""} for col in confounds_df.columns} + confounds_dict = {col: {'Description': ''} for col in confounds_df.columns} write_json(confounds_dict, regressors_json_fmriprep) @@ -246,79 +247,79 @@ def collect_ukbiobank_confounds( import pandas as pd # Find necessary files - par_file = os.path.join(task_dir_orig, "mc", "prefiltered_func_data_mcf.par") - assert os.path.isfile(par_file), os.listdir(os.path.join(task_dir_orig, "mc")) - rmsd_file = os.path.join(task_dir_orig, "mc", "prefiltered_func_data_mcf_abs.rms") + par_file = os.path.join(task_dir_orig, 'mc', 'prefiltered_func_data_mcf.par') + assert os.path.isfile(par_file), os.listdir(os.path.join(task_dir_orig, 'mc')) + rmsd_file = os.path.join(task_dir_orig, 'mc', 'prefiltered_func_data_mcf_abs.rms') assert os.path.isfile(rmsd_file) tmpdir = os.path.join(work_dir, prefix) os.makedirs(tmpdir, exist_ok=True) # Collect motion confounds and their expansions - normalize_motion = NormalizeMotionParams(format="FSL", in_file=par_file) + normalize_motion = NormalizeMotionParams(format='FSL', in_file=par_file) normalize_motion_results = normalize_motion.run(cwd=tmpdir) motion_data = np.loadtxt(normalize_motion_results.outputs.out_file) confounds_df = pd.DataFrame( data=motion_data, - columns=["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"], + columns=['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'], ) columns = confounds_df.columns.tolist() for col in columns: - new_col = f"{col}_derivative1" + new_col = f'{col}_derivative1' confounds_df[new_col] = confounds_df[col].diff() columns = confounds_df.columns.tolist() for col in columns: - new_col = f"{col}_power2" + new_col = f'{col}_power2' confounds_df[new_col] = confounds_df[col] ** 2 # Use dummy column for framewise displacement, which will be recalculated by XCP-D. - confounds_df["framewise_displacement"] = 0 + confounds_df['framewise_displacement'] = 0 # Add RMS rmsd = np.loadtxt(rmsd_file) - confounds_df["rmsd"] = rmsd + confounds_df['rmsd'] = rmsd # Collect global signal (the primary regressor used for denoising UKB data, # since the data are already denoised). - confounds_df["global_signal"] = extract_mean_signal( + confounds_df['global_signal'] = extract_mean_signal( mask=brainmask_file, nifti=bold_file, work_dir=work_dir, ) # get derivatives and powers - confounds_df["global_signal_derivative1"] = confounds_df["global_signal"].diff() - confounds_df["global_signal_derivative1_power2"] = ( - confounds_df["global_signal_derivative1"] ** 2 + confounds_df['global_signal_derivative1'] = confounds_df['global_signal'].diff() + confounds_df['global_signal_derivative1_power2'] = ( + confounds_df['global_signal_derivative1'] ** 2 ) - confounds_df["global_signal_power2"] = confounds_df["global_signal"] ** 2 + confounds_df['global_signal_power2'] = confounds_df['global_signal'] ** 2 # write out the confounds regressors_tsv_fmriprep = os.path.join( out_dir, - f"{prefix}_desc-confounds_timeseries.tsv", + f'{prefix}_desc-confounds_timeseries.tsv', ) - confounds_df.to_csv(regressors_tsv_fmriprep, sep="\t", na_rep="n/a", index=False) + confounds_df.to_csv(regressors_tsv_fmriprep, sep='\t', na_rep='n/a', index=False) regressors_json_fmriprep = os.path.join( out_dir, - f"{prefix}_desc-confounds_timeseries.json", + f'{prefix}_desc-confounds_timeseries.json', ) - confounds_dict = {col: {"Description": ""} for col in confounds_df.columns} + confounds_dict = {col: {'Description': ''} for col in confounds_df.columns} write_json(confounds_dict, regressors_json_fmriprep) def extract_mean_signal(mask, nifti, work_dir): """Extract mean signal within mask from NIFTI.""" - assert os.path.isfile(mask), f"File DNE: {mask}" - assert os.path.isfile(nifti), f"File DNE: {nifti}" + assert os.path.isfile(mask), f'File DNE: {mask}' + assert os.path.isfile(nifti), f'File DNE: {nifti}' masker = maskers.NiftiMasker(mask_img=mask, memory=work_dir, memory_level=5) signals = masker.fit_transform(nifti) return np.mean(signals, axis=1) -def plot_bbreg(fixed_image, moving_image, contour, out_file="report.svg"): +def plot_bbreg(fixed_image, moving_image, contour, out_file='report.svg'): """Plot bbref_fig_fmriprep results.""" import numpy as np from niworkflows.viz.utils import compose_view, cuts_from_bbox, plot_registration @@ -326,7 +327,7 @@ def plot_bbreg(fixed_image, moving_image, contour, out_file="report.svg"): fixed_image_nii = image.load_img(fixed_image) moving_image_nii = image.load_img(moving_image) moving_image_nii = image.resample_img( - moving_image_nii, target_affine=np.eye(3), interpolation="nearest" + moving_image_nii, target_affine=np.eye(3), interpolation='nearest' ) contour_nii = image.load_img(contour) if contour is not None else None @@ -341,21 +342,21 @@ def plot_bbreg(fixed_image, moving_image, contour, out_file="report.svg"): compose_view( plot_registration( fixed_image_nii, - "fixed-image", + 'fixed-image', estimate_brightness=True, cuts=cuts, - label="fixed", + label='fixed', contour=contour_nii, - compress="auto", + compress='auto', ), plot_registration( moving_image_nii, - "moving-image", + 'moving-image', estimate_brightness=True, cuts=cuts, - label="moving", + label='moving', contour=contour_nii, - compress="auto", + compress='auto', ), out_file=out_file, ) @@ -367,11 +368,11 @@ def copy_files_in_dict(copy_dictionary): for file_orig, files_fmriprep in copy_dictionary.items(): if not isinstance(files_fmriprep, list): raise ValueError( - f"Entry for {file_orig} should be a list, but is a {type(files_fmriprep)}" + f'Entry for {file_orig} should be a list, but is a {type(files_fmriprep)}' ) if len(files_fmriprep) > 1: - LOGGER.warning(f"File used for more than one output: {file_orig}") + LOGGER.warning(f'File used for more than one output: {file_orig}') for file_fmriprep in files_fmriprep: copy_file(file_orig, file_fmriprep) @@ -392,7 +393,7 @@ def copy_file(src, dst): def write_json(data, outfile): """Write dictionary to JSON file.""" - with open(outfile, "w") as f: + with open(outfile, 'w') as f: json.dump(data, f, sort_keys=True, indent=4) return outfile diff --git a/xcp_d/interfaces/__init__.py b/xcp_d/interfaces/__init__.py index 5515ba1a3..64797ab98 100644 --- a/xcp_d/interfaces/__init__.py +++ b/xcp_d/interfaces/__init__.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Initialize interfaces.""" + from xcp_d.interfaces import ( ants, bids, @@ -16,15 +17,15 @@ ) __all__ = [ - "ants", - "bids", - "c3", - "connectivity", - "execsummary", - "nilearn", - "plotting", - "censoring", - "report", - "restingstate", - "workbench", + 'ants', + 'bids', + 'c3', + 'connectivity', + 'execsummary', + 'nilearn', + 'plotting', + 'censoring', + 'report', + 'restingstate', + 'workbench', ] diff --git a/xcp_d/interfaces/ants.py b/xcp_d/interfaces/ants.py index ccd6352af..fbf38b366 100644 --- a/xcp_d/interfaces/ants.py +++ b/xcp_d/interfaces/ants.py @@ -21,16 +21,16 @@ from xcp_d.utils.filemanip import fname_presuffix -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _ConvertTransformFileInputSpec(CommandLineInputSpec): - dimension = traits.Enum(3, 2, usedefault=True, argstr="%d", position=0) - in_transform = traits.File(exists=True, argstr="%s", mandatory=True, position=1) + dimension = traits.Enum(3, 2, usedefault=True, argstr='%d', position=0) + in_transform = traits.File(exists=True, argstr='%s', mandatory=True, position=1) out_transform = traits.File( - argstr="%s", - name_source="in_transform", - name_template="%s.txt", + argstr='%s', + name_source='in_transform', + name_template='%s.txt', keep_extension=False, position=2, exists=False, @@ -51,7 +51,7 @@ class ConvertTransformFile(CommandLine): text-formatted transform file. """ - _cmd = "ConvertTransformFile" + _cmd = 'ConvertTransformFile' input_spec = _ConvertTransformFileInputSpec output_spec = _ConvertTransformFileOutputSpec @@ -60,46 +60,46 @@ class _CompositeTransformUtilInputSpec(ANTSCommandInputSpec): """Input specification for CompositeTransformUtil.""" process = traits.Enum( - "assemble", - "disassemble", - argstr="--%s", + 'assemble', + 'disassemble', + argstr='--%s', position=1, usedefault=True, - desc="What to do with the transform inputs (assemble or disassemble)", + desc='What to do with the transform inputs (assemble or disassemble)', ) inverse = traits.Bool( False, usedefault=True, - desc="Whether to invert the order of the transform components. Not used by the command.", + desc='Whether to invert the order of the transform components. Not used by the command.', ) out_file = File( exists=False, - argstr="%s", + argstr='%s', position=2, - desc="Output file path (only used for disassembly).", + desc='Output file path (only used for disassembly).', ) in_file = InputMultiPath( File(exists=True), mandatory=True, - argstr="%s...", + argstr='%s...', position=3, - desc="Input transform file(s)", + desc='Input transform file(s)', ) output_prefix = Str( - "transform", + 'transform', usedefault=True, - argstr="%s", + argstr='%s', position=4, - desc="A prefix that is prepended to all output files (only used for assembly).", + desc='A prefix that is prepended to all output files (only used for assembly).', ) class _CompositeTransformUtilOutputSpec(TraitedSpec): """Output specification for CompositeTransformUtil.""" - affine_transform = File(desc="Affine transform component") - displacement_field = File(desc="Displacement field component") - out_file = File(desc="Compound transformation file") + affine_transform = File(desc='Affine transform component') + displacement_field = File(desc='Displacement field component') + out_file = File(desc='Compound transformation file') class CompositeTransformUtil(ANTSCommand): @@ -129,7 +129,7 @@ class CompositeTransformUtil(ANTSCommand): >>> tran.run() # doctest: +SKIP """ - _cmd = "CompositeTransformUtil" + _cmd = 'CompositeTransformUtil' input_spec = _CompositeTransformUtilInputSpec output_spec = _CompositeTransformUtilOutputSpec @@ -142,34 +142,34 @@ def _num_threads_update(self): pass def _format_arg(self, name, spec, value): - if name == "output_prefix" and self.inputs.process == "assemble": - return "" - if name == "out_file" and self.inputs.process == "disassemble": - return "" - return super(CompositeTransformUtil, self)._format_arg(name, spec, value) + if name == 'output_prefix' and self.inputs.process == 'assemble': + return '' + if name == 'out_file' and self.inputs.process == 'disassemble': + return '' + return super()._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() if self.inputs.inverse: - if self.inputs.process == "disassemble": - outputs["affine_transform"] = os.path.abspath( - f"{self.inputs.output_prefix}_01_AffineTransform.mat" + if self.inputs.process == 'disassemble': + outputs['affine_transform'] = os.path.abspath( + f'{self.inputs.output_prefix}_01_AffineTransform.mat' ) - outputs["displacement_field"] = os.path.abspath( - f"{self.inputs.output_prefix}_00_DisplacementFieldTransform.nii.gz" + outputs['displacement_field'] = os.path.abspath( + f'{self.inputs.output_prefix}_00_DisplacementFieldTransform.nii.gz' ) - elif self.inputs.process == "assemble": - outputs["out_file"] = os.path.abspath(self.inputs.out_file) + elif self.inputs.process == 'assemble': + outputs['out_file'] = os.path.abspath(self.inputs.out_file) else: - if self.inputs.process == "disassemble": - outputs["affine_transform"] = os.path.abspath( - f"{self.inputs.output_prefix}_00_AffineTransform.mat" + if self.inputs.process == 'disassemble': + outputs['affine_transform'] = os.path.abspath( + f'{self.inputs.output_prefix}_00_AffineTransform.mat' ) - outputs["displacement_field"] = os.path.abspath( - f"{self.inputs.output_prefix}_01_DisplacementFieldTransform.nii.gz" + outputs['displacement_field'] = os.path.abspath( + f'{self.inputs.output_prefix}_01_DisplacementFieldTransform.nii.gz' ) - elif self.inputs.process == "assemble": - outputs["out_file"] = os.path.abspath(self.inputs.out_file) + elif self.inputs.process == 'assemble': + outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs @@ -177,17 +177,17 @@ def _list_outputs(self): class _ApplyTransformsInputSpec(_FixTraitApplyTransformsInputSpec): # Nipype's version doesn't have GenericLabel interpolation = traits.Enum( - "Linear", - "NearestNeighbor", - "CosineWindowedSinc", - "WelchWindowedSinc", - "HammingWindowedSinc", - "LanczosWindowedSinc", - "MultiLabel", - "Gaussian", - "BSpline", - "GenericLabel", - argstr="%s", + 'Linear', + 'NearestNeighbor', + 'CosineWindowedSinc', + 'WelchWindowedSinc', + 'HammingWindowedSinc', + 'LanczosWindowedSinc', + 'MultiLabel', + 'Gaussian', + 'BSpline', + 'GenericLabel', + argstr='%s', usedefault=True, ) @@ -208,10 +208,10 @@ def _run_interface(self, runtime): if not isdefined(self.inputs.output_image): self.inputs.output_image = fname_presuffix( self.inputs.input_image, - suffix="_trans.nii.gz", + suffix='_trans.nii.gz', newpath=runtime.cwd, use_ext=False, ) - runtime = super(ApplyTransforms, self)._run_interface(runtime) + runtime = super()._run_interface(runtime) return runtime diff --git a/xcp_d/interfaces/bids.py b/xcp_d/interfaces/bids.py index ee5ef316e..f4fcd7923 100644 --- a/xcp_d/interfaces/bids.py +++ b/xcp_d/interfaces/bids.py @@ -24,18 +24,18 @@ from xcp_d.utils.bids import _get_bidsuris, get_entity # NOTE: Modified for xcpd's purposes -xcp_d_spec = loads(load_data("xcp_d_bids_config.json").read_text()) -bids_config = Config.load("bids") -deriv_config = Config.load("derivatives") +xcp_d_spec = loads(load_data('xcp_d_bids_config.json').read_text()) +bids_config = Config.load('bids') +deriv_config = Config.load('derivatives') -xcp_d_entities = {v["name"]: v["pattern"] for v in xcp_d_spec["entities"]} +xcp_d_entities = {v['name']: v['pattern'] for v in xcp_d_spec['entities']} merged_entities = {**bids_config.entities, **deriv_config.entities} merged_entities = {k: v.pattern for k, v in merged_entities.items()} merged_entities = {**merged_entities, **xcp_d_entities} -merged_entities = [{"name": k, "pattern": v} for k, v in merged_entities.items()] -config_entities = frozenset({e["name"] for e in merged_entities}) +merged_entities = [{'name': k, 'pattern': v} for k, v in merged_entities.items()] +config_entities = frozenset({e['name'] for e in merged_entities}) -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class DerivativesDataSink(BaseDerivativesDataSink): @@ -44,40 +44,40 @@ class DerivativesDataSink(BaseDerivativesDataSink): A child class of the niworkflows DerivativesDataSink, using xcp_d's configuration files. """ - out_path_base = "" + out_path_base = '' _allowed_entities = set(config_entities) _config_entities = config_entities _config_entities_dict = merged_entities - _file_patterns = xcp_d_spec["default_path_patterns"] + _file_patterns = xcp_d_spec['default_path_patterns'] class _CollectRegistrationFilesInputSpec(BaseInterfaceInputSpec): software = traits.Enum( - "FreeSurfer", - "MCRIBS", + 'FreeSurfer', + 'MCRIBS', required=True, - desc="The software used for segmentation.", + desc='The software used for segmentation.', ) hemisphere = traits.Enum( - "L", - "R", + 'L', + 'R', required=True, - desc="The hemisphere being used.", + desc='The hemisphere being used.', ) class _CollectRegistrationFilesOutputSpec(TraitedSpec): source_sphere = File( exists=True, - desc="Source-space sphere (namely, fsaverage).", + desc='Source-space sphere (namely, fsaverage).', ) target_sphere = File( exists=True, - desc="Target-space sphere (fsLR for FreeSurfer, dhcpAsym for MCRIBS).", + desc='Target-space sphere (fsLR for FreeSurfer, dhcpAsym for MCRIBS).', ) sphere_to_sphere = File( exists=True, - desc="Warp file going from source space to target space.", + desc='Warp file going from source space to target space.', ) @@ -94,76 +94,76 @@ def _run_interface(self, runtime): hemisphere = self.inputs.hemisphere - if self.inputs.software == "FreeSurfer": + if self.inputs.software == 'FreeSurfer': # Load the fsaverage-164k sphere # FreeSurfer: tpl-fsaverage_hemi-?_den-164k_sphere.surf.gii - self._results["source_sphere"] = str( + self._results['source_sphere'] = str( get_template( - template="fsaverage", + template='fsaverage', space=None, hemi=hemisphere, - density="164k", + density='164k', desc=None, - suffix="sphere", + suffix='sphere', ) ) # TODO: Collect from templateflow once it's uploaded. # FreeSurfer: fs_?/fs_?-to-fs_LR_fsaverage.?_LR.spherical_std.164k_fs_?.surf.gii # Should be tpl-fsLR_hemi-?_space-fsaverage_den-164k_sphere.surf.gii on TemplateFlow - self._results["sphere_to_sphere"] = str( + self._results['sphere_to_sphere'] = str( load_data( - f"standard_mesh_atlases/fs_{hemisphere}/" - f"fs_{hemisphere}-to-fs_LR_fsaverage.{hemisphere}_LR.spherical_std." - f"164k_fs_{hemisphere}.surf.gii" + f'standard_mesh_atlases/fs_{hemisphere}/' + f'fs_{hemisphere}-to-fs_LR_fsaverage.{hemisphere}_LR.spherical_std.' + f'164k_fs_{hemisphere}.surf.gii' ) ) # FreeSurfer: tpl-fsLR_hemi-?_den-32k_sphere.surf.gii - self._results["target_sphere"] = str( + self._results['target_sphere'] = str( get_template( - template="fsLR", + template='fsLR', space=None, hemi=hemisphere, - density="32k", + density='32k', desc=None, - suffix="sphere", + suffix='sphere', ) ) - elif self.inputs.software == "MCRIBS": - self._results["source_sphere"] = str( + elif self.inputs.software == 'MCRIBS': + self._results['source_sphere'] = str( get_template( - template="fsaverage", + template='fsaverage', space=None, hemi=hemisphere, - density="41k", + density='41k', desc=None, - suffix="sphere", + suffix='sphere', ), ) - self._results["sphere_to_sphere"] = str( + self._results['sphere_to_sphere'] = str( get_template( - template="dhcpAsym", - cohort="42", - space="fsaverage", + template='dhcpAsym', + cohort='42', + space='fsaverage', hemi=hemisphere, - density="41k", - desc="reg", - suffix="sphere", + density='41k', + desc='reg', + suffix='sphere', ), ) - self._results["target_sphere"] = str( + self._results['target_sphere'] = str( get_template( - template="dhcpAsym", - cohort="42", + template='dhcpAsym', + cohort='42', space=None, hemi=hemisphere, - density="32k", + density='32k', desc=None, - suffix="sphere", + suffix='sphere', ), ) @@ -177,27 +177,27 @@ class _CopyAtlasInputSpec(BaseInterfaceInputSpec): ) in_file = File( exists=True, - desc="The atlas file to copy.", + desc='The atlas file to copy.', mandatory=True, ) meta_dict = traits.Either( traits.Dict(), None, - desc="The atlas metadata dictionary.", + desc='The atlas metadata dictionary.', mandatory=False, ) output_dir = Directory( exists=True, - desc="The output directory.", + desc='The output directory.', mandatory=True, ) atlas = traits.Str( - desc="The atlas name.", + desc='The atlas name.', mandatory=True, ) Sources = traits.List( traits.Str, - desc="List of sources for the atlas.", + desc='List of sources for the atlas.', mandatory=False, ) @@ -205,7 +205,7 @@ class _CopyAtlasInputSpec(BaseInterfaceInputSpec): class _CopyAtlasOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="The copied atlas file.", + desc='The copied atlas file.', ) @@ -252,30 +252,30 @@ def _run_interface(self, runtime): atlas = self.inputs.atlas Sources = self.inputs.Sources - atlas_out_dir = os.path.join(output_dir, f"atlases/atlas-{atlas}") + atlas_out_dir = os.path.join(output_dir, f'atlases/atlas-{atlas}') - if in_file.endswith(".tsv"): - out_basename = f"atlas-{atlas}_dseg" - extension = ".tsv" + if in_file.endswith('.tsv'): + out_basename = f'atlas-{atlas}_dseg' + extension = '.tsv' else: - extension = ".nii.gz" if name_source.endswith(".nii.gz") else ".dlabel.nii" - space = get_entity(name_source, "space") - res = get_entity(name_source, "res") - den = get_entity(name_source, "den") - cohort = get_entity(name_source, "cohort") - - cohort_str = f"_cohort-{cohort}" if cohort else "" - res_str = f"_res-{res}" if res else "" - den_str = f"_den-{den}" if den else "" - if extension == ".dlabel.nii": - out_basename = f"atlas-{atlas}_space-{space}{den_str}{cohort_str}_dseg" - elif extension == ".nii.gz": - out_basename = f"atlas-{atlas}_space-{space}{res_str}{cohort_str}_dseg" + extension = '.nii.gz' if name_source.endswith('.nii.gz') else '.dlabel.nii' + space = get_entity(name_source, 'space') + res = get_entity(name_source, 'res') + den = get_entity(name_source, 'den') + cohort = get_entity(name_source, 'cohort') + + cohort_str = f'_cohort-{cohort}' if cohort else '' + res_str = f'_res-{res}' if res else '' + den_str = f'_den-{den}' if den else '' + if extension == '.dlabel.nii': + out_basename = f'atlas-{atlas}_space-{space}{den_str}{cohort_str}_dseg' + elif extension == '.nii.gz': + out_basename = f'atlas-{atlas}_space-{space}{res_str}{cohort_str}_dseg' os.makedirs(atlas_out_dir, exist_ok=True) - out_file = os.path.join(atlas_out_dir, f"{out_basename}{extension}") + out_file = os.path.join(atlas_out_dir, f'{out_basename}{extension}') - if out_file.endswith(".nii.gz") and os.path.isfile(out_file): + if out_file.endswith('.nii.gz') and os.path.isfile(out_file): # Check that native-resolution atlas doesn't have a different resolution from the last # run's atlas. old_img = nb.load(out_file) @@ -283,7 +283,7 @@ def _run_interface(self, runtime): if not np.allclose(old_img.affine, new_img.affine): raise ValueError( f"Existing '{atlas}' atlas affine ({out_file}) is different from the input " - f"file affine ({in_file})." + f'file affine ({in_file}).' ) # Don't copy the file if it exists, to prevent any race conditions between parallel @@ -293,35 +293,35 @@ def _run_interface(self, runtime): # Only write out a sidecar if metadata are provided if meta_dict or Sources: - meta_file = os.path.join(atlas_out_dir, f"{out_basename}.json") + meta_file = os.path.join(atlas_out_dir, f'{out_basename}.json') meta_dict = meta_dict or {} meta_dict = meta_dict.copy() if Sources: - meta_dict["Sources"] = meta_dict.get("Sources", []) + Sources + meta_dict['Sources'] = meta_dict.get('Sources', []) + Sources - with open(meta_file, "w") as fo: + with open(meta_file, 'w') as fo: dump(meta_dict, fo, sort_keys=True, indent=4) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime class _BIDSURIInputSpec(DynamicTraitedSpec): - dataset_links = traits.Dict(mandatory=True, desc="Dataset links") - out_dir = traits.Str(mandatory=True, desc="Output directory") - metadata = traits.Dict(desc="Metadata dictionary") + dataset_links = traits.Dict(mandatory=True, desc='Dataset links') + out_dir = traits.Str(mandatory=True, desc='Output directory') + metadata = traits.Dict(desc='Metadata dictionary') field = traits.Str( - "Sources", + 'Sources', usedefault=True, - desc="Field to use for BIDS URIs in metadata dict", + desc='Field to use for BIDS URIs in metadata dict', ) class _BIDSURIOutputSpec(TraitedSpec): out = traits.List( traits.Str, - desc="BIDS URI(s) for file", + desc='BIDS URI(s) for file', ) metadata = traits.Dict( desc="Dictionary with 'Sources' field.", @@ -341,20 +341,20 @@ def __init__(self, numinputs=0, **inputs): super().__init__(**inputs) self._numinputs = numinputs if numinputs >= 1: - input_names = [f"in{i + 1}" for i in range(numinputs)] + input_names = [f'in{i + 1}' for i in range(numinputs)] else: input_names = [] add_traits(self.inputs, input_names) def _run_interface(self, runtime): - inputs = [getattr(self.inputs, f"in{i + 1}") for i in range(self._numinputs)] + inputs = [getattr(self.inputs, f'in{i + 1}') for i in range(self._numinputs)] uris = _get_bidsuris(inputs, self.inputs.dataset_links, self.inputs.out_dir) - self._results["out"] = uris + self._results['out'] = uris # Add the URIs to the metadata dictionary. metadata = self.inputs.metadata or {} metadata = metadata.copy() metadata[self.inputs.field] = metadata.get(self.inputs.field, []) + uris - self._results["metadata"] = metadata + self._results['metadata'] = metadata return runtime diff --git a/xcp_d/interfaces/c3.py b/xcp_d/interfaces/c3.py index 47bfd2aed..eed01ed5f 100644 --- a/xcp_d/interfaces/c3.py +++ b/xcp_d/interfaces/c3.py @@ -1,5 +1,5 @@ -# -*- coding: utf-8 -*- """Convert3D is a command-line tool for converting 3D images between common file formats.""" + import logging import os from glob import glob @@ -17,7 +17,7 @@ from xcp_d.utils.filemanip import split_filename -iflogger = logging.getLogger("interface") +iflogger = logging.getLogger('interface') class _C3dInputSpec(CommandLineInputSpec): @@ -26,98 +26,98 @@ class _C3dInputSpec(CommandLineInputSpec): in_file = InputMultiPath( File(exists=True), position=1, - argstr="%s", + argstr='%s', mandatory=True, - desc="Input file (wildcard and multiple are supported).", + desc='Input file (wildcard and multiple are supported).', ) out_file = File( exists=False, - argstr="-o %s", + argstr='-o %s', position=-1, - xor=["out_files"], - desc="Output file of last image on the stack.", + xor=['out_files'], + desc='Output file of last image on the stack.', ) out_files = InputMultiPath( File(exists=False), - argstr="-oo %s", - xor=["out_file"], + argstr='-oo %s', + xor=['out_file'], position=-1, desc=( - "Write all images on the convert3d stack as multiple files." - " Supports both list of output files or a pattern for the output" - " filenames (using %d substituion)." + 'Write all images on the convert3d stack as multiple files.' + ' Supports both list of output files or a pattern for the output' + ' filenames (using %d substitution).' ), ) pix_type = traits.Enum( - "float", - "char", - "uchar", - "short", - "ushort", - "int", - "uint", - "double", - argstr="-type %s", + 'float', + 'char', + 'uchar', + 'short', + 'ushort', + 'int', + 'uint', + 'double', + argstr='-type %s', desc=( - "Specifies the pixel type for the output image. By default," - " images are written in floating point (float) format" + 'Specifies the pixel type for the output image. By default,' + ' images are written in floating point (float) format' ), ) scale = traits.Either( traits.Int(), traits.Float(), - argstr="-scale %s", + argstr='-scale %s', desc=( - "Multiplies the intensity of each voxel in the last image on the" - " stack by the given factor." + 'Multiplies the intensity of each voxel in the last image on the' + ' stack by the given factor.' ), ) shift = traits.Either( traits.Int(), traits.Float(), - argstr="-shift %s", - desc="Adds the given constant to every voxel.", + argstr='-shift %s', + desc='Adds the given constant to every voxel.', ) interp = traits.Enum( - "Linear", - "NearestNeighbor", - "Cubic", - "Sinc", - "Gaussian", - argstr="-interpolation %s", + 'Linear', + 'NearestNeighbor', + 'Cubic', + 'Sinc', + 'Gaussian', + argstr='-interpolation %s', desc=( - "Specifies the interpolation used with -resample and other" - " commands. Default is Linear." + 'Specifies the interpolation used with -resample and other' + ' commands. Default is Linear.' ), ) resample = traits.Str( - argstr="-resample %s", + argstr='-resample %s', desc=( - "Resamples the image, keeping the bounding box the same, but" - " changing the number of voxels in the image. The dimensions can be" - " specified as a percentage, for example to double the number of voxels" - " in each direction. The -interpolation flag affects how sampling is" - " performed." + 'Resamples the image, keeping the bounding box the same, but' + ' changing the number of voxels in the image. The dimensions can be' + ' specified as a percentage, for example to double the number of voxels' + ' in each direction. The -interpolation flag affects how sampling is' + ' performed.' ), ) smooth = traits.Str( - argstr="-smooth %s", + argstr='-smooth %s', desc=( - "Applies Gaussian smoothing to the image. The parameter vector" - " specifies the standard deviation of the Gaussian kernel." + 'Applies Gaussian smoothing to the image. The parameter vector' + ' specifies the standard deviation of the Gaussian kernel.' ), ) multicomp_split = traits.Bool( False, usedefault=True, - argstr="-mcs", + argstr='-mcs', position=0, - desc="Enable reading of multi-component images.", + desc='Enable reading of multi-component images.', ) is_4d = traits.Bool( False, usedefault=True, - desc=("Changes command to support 4D file operations (default is false)."), + desc=('Changes command to support 4D file operations (default is false).'), ) @@ -156,16 +156,16 @@ class C3d(CommandLine): input_spec = _C3dInputSpec output_spec = _C3dOutputSpec - _cmd = "c3d" + _cmd = 'c3d' def __init__(self, **inputs): - super(C3d, self).__init__(**inputs) - self.inputs.on_trait_change(self._is_4d, "is_4d") + super().__init__(**inputs) + self.inputs.on_trait_change(self._is_4d, 'is_4d') if self.inputs.is_4d: self._is_4d() def _is_4d(self): - self._cmd = "c4d" if self.inputs.is_4d else "c3d" + self._cmd = 'c4d' if self.inputs.is_4d else 'c3d' def _run_interface(self, runtime): cmd = self._cmd @@ -173,27 +173,27 @@ def _run_interface(self, runtime): # Convert3d does not want to override file, by default # so we define a new output file self._gen_outfile() - runtime = super(C3d, self)._run_interface(runtime) + runtime = super()._run_interface(runtime) self._cmd = cmd return runtime def _gen_outfile(self): # if many infiles, raise exception - if (len(self.inputs.in_file) > 1) or ("*" in self.inputs.in_file[0]): + if (len(self.inputs.in_file) > 1) or ('*' in self.inputs.in_file[0]): raise AttributeError( - "Multiple in_files found - specify either `out_file` or `out_files`." + 'Multiple in_files found - specify either `out_file` or `out_files`.' ) _, fn, ext = split_filename(self.inputs.in_file[0]) - self.inputs.out_file = fn + "_generated" + ext + self.inputs.out_file = fn + '_generated' + ext # if generated file will overwrite, raise error if os.path.exists(os.path.abspath(self.inputs.out_file)): - raise IOError("File already found - to overwrite, use `out_file`.") - iflogger.info("Generating `out_file`.") + raise OSError('File already found - to overwrite, use `out_file`.') + iflogger.info('Generating `out_file`.') def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_file): - outputs["out_files"] = os.path.abspath(self.inputs.out_file) + outputs['out_files'] = os.path.abspath(self.inputs.out_file) if isdefined(self.inputs.out_files): if len(self.inputs.out_files) == 1: _out_files = glob(os.path.abspath(self.inputs.out_files[0])) @@ -203,6 +203,6 @@ def _list_outputs(self): for f in self.inputs.out_files if os.path.exists(os.path.abspath(f)) ] - outputs["out_files"] = _out_files + outputs['out_files'] = _out_files return outputs diff --git a/xcp_d/interfaces/censoring.py b/xcp_d/interfaces/censoring.py index 92d54a0ad..c619a3696 100644 --- a/xcp_d/interfaces/censoring.py +++ b/xcp_d/interfaces/censoring.py @@ -20,18 +20,18 @@ from xcp_d.utils.filemanip import fname_presuffix from xcp_d.utils.modified_data import _drop_dummy_scans, compute_fd -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _RemoveDummyVolumesInputSpec(BaseInterfaceInputSpec): - bold_file = File(exists=True, mandatory=True, desc="Either cifti or nifti ") + bold_file = File(exists=True, mandatory=True, desc='Either cifti or nifti ') dummy_scans = traits.Either( traits.Int, - "auto", + 'auto', mandatory=True, desc=( - "Number of volumes to drop from the beginning, " - "calculated in an earlier workflow from dummy_scans." + 'Number of volumes to drop from the beginning, ' + 'calculated in an earlier workflow from dummy_scans.' ), ) confounds_tsv = traits.Either( @@ -39,54 +39,54 @@ class _RemoveDummyVolumesInputSpec(BaseInterfaceInputSpec): None, mandatory=False, desc=( - "TSV file with selected confounds for denoising. " - "May be None if denoising is disabled." + 'TSV file with selected confounds for denoising. ' + 'May be None if denoising is disabled.' ), ) confounds_images = traits.Either( traits.List(File(exists=True)), None, mandatory=False, - desc="List of images with confounds. May be None if denoising is disabled.", + desc='List of images with confounds. May be None if denoising is disabled.', ) motion_file = File( exists=True, mandatory=True, - desc="TSV file with motion regressors. Used for motion-based censoring.", + desc='TSV file with motion regressors. Used for motion-based censoring.', ) temporal_mask = File( exists=True, mandatory=True, - desc="Temporal mask file.", + desc='Temporal mask file.', ) class _RemoveDummyVolumesOutputSpec(TraitedSpec): bold_file_dropped_TR = File( exists=True, - desc="bold or cifti with volumes dropped", + desc='bold or cifti with volumes dropped', ) - dummy_scans = traits.Int(desc="Number of volumes dropped.") + dummy_scans = traits.Int(desc='Number of volumes dropped.') confounds_tsv_dropped_TR = traits.Either( File(exists=True), None, desc=( - "TSV file with selected confounds for denoising. " - "May be None if denoising is disabled." + 'TSV file with selected confounds for denoising. ' + 'May be None if denoising is disabled.' ), ) confounds_images_dropped_TR = traits.Either( traits.List(File(exists=True)), None, - desc="List of images with confounds. May be None if denoising is disabled.", + desc='List of images with confounds. May be None if denoising is disabled.', ) motion_file_dropped_TR = File( exists=True, - desc="TSV file with motion parameters.", + desc='TSV file with motion parameters.', ) temporal_mask_dropped_TR = File( exists=True, - desc="Temporal mask file.", + desc='Temporal mask file.', ) @@ -106,85 +106,85 @@ def _run_interface(self, runtime): confounds_file=self.inputs.motion_file, ) - self._results["dummy_scans"] = dummy_scans + self._results['dummy_scans'] = dummy_scans # Check if we need to do anything if dummy_scans == 0: # write the output out - self._results["bold_file_dropped_TR"] = self.inputs.bold_file - self._results["confounds_tsv_dropped_TR"] = self.inputs.confounds_tsv - self._results["confounds_images_dropped_TR"] = self.inputs.confounds_images - self._results["motion_file_dropped_TR"] = self.inputs.motion_file - self._results["temporal_mask_dropped_TR"] = self.inputs.temporal_mask + self._results['bold_file_dropped_TR'] = self.inputs.bold_file + self._results['confounds_tsv_dropped_TR'] = self.inputs.confounds_tsv + self._results['confounds_images_dropped_TR'] = self.inputs.confounds_images + self._results['motion_file_dropped_TR'] = self.inputs.motion_file + self._results['temporal_mask_dropped_TR'] = self.inputs.temporal_mask if ( isdefined(self.inputs.confounds_images) and self.inputs.confounds_images is not None ): - self._results["confounds_images_dropped_TR"] = self.inputs.confounds_images + self._results['confounds_images_dropped_TR'] = self.inputs.confounds_images if isdefined(self.inputs.confounds_tsv) and self.inputs.confounds_tsv is not None: - self._results["confounds_tsv_dropped_TR"] = self.inputs.confounds_tsv + self._results['confounds_tsv_dropped_TR'] = self.inputs.confounds_tsv return runtime # get the file names to output to - self._results["bold_file_dropped_TR"] = fname_presuffix( + self._results['bold_file_dropped_TR'] = fname_presuffix( self.inputs.bold_file, newpath=runtime.cwd, - suffix="_dropped", + suffix='_dropped', use_ext=True, ) - self._results["motion_file_dropped_TR"] = fname_presuffix( + self._results['motion_file_dropped_TR'] = fname_presuffix( self.inputs.motion_file, - suffix="_motion_dropped.tsv", + suffix='_motion_dropped.tsv', newpath=os.getcwd(), use_ext=False, ) - self._results["temporal_mask_dropped_TR"] = fname_presuffix( + self._results['temporal_mask_dropped_TR'] = fname_presuffix( self.inputs.bold_file, - suffix="_tmask_dropped.tsv", + suffix='_tmask_dropped.tsv', newpath=os.getcwd(), use_ext=False, ) if isdefined(self.inputs.confounds_tsv) and self.inputs.confounds_tsv is not None: - self._results["confounds_tsv_dropped_TR"] = fname_presuffix( + self._results['confounds_tsv_dropped_TR'] = fname_presuffix( self.inputs.bold_file, - suffix="_confounds_dropped.tsv", + suffix='_confounds_dropped.tsv', newpath=os.getcwd(), use_ext=False, ) confounds_df = pd.read_table(self.inputs.confounds_tsv) confounds_df_dropped = confounds_df.drop(np.arange(dummy_scans)) confounds_df_dropped.to_csv( - self._results["confounds_tsv_dropped_TR"], - sep="\t", + self._results['confounds_tsv_dropped_TR'], + sep='\t', index=False, ) if isdefined(self.inputs.confounds_images) and self.inputs.confounds_images is not None: - self._results["confounds_images_dropped_TR"] = [] + self._results['confounds_images_dropped_TR'] = [] for i_file, confound_file in enumerate(self.inputs.confounds_images): confound_file_dropped = fname_presuffix( confound_file, - suffix=f"_conf{i_file}_dropped", + suffix=f'_conf{i_file}_dropped', newpath=os.getcwd(), use_ext=True, ) dropped_confounds_image = _drop_dummy_scans(confound_file, dummy_scans=dummy_scans) dropped_confounds_image.to_filename(confound_file_dropped) - self._results["confounds_images_dropped_TR"].append(confound_file_dropped) + self._results['confounds_images_dropped_TR'].append(confound_file_dropped) # Remove the dummy volumes dropped_image = _drop_dummy_scans(self.inputs.bold_file, dummy_scans=dummy_scans) - dropped_image.to_filename(self._results["bold_file_dropped_TR"]) + dropped_image.to_filename(self._results['bold_file_dropped_TR']) # Drop the first N rows from the motion file motion_df = pd.read_table(self.inputs.motion_file) motion_df_dropped = motion_df.drop(np.arange(dummy_scans)) motion_df_dropped.to_csv( - self._results["motion_file_dropped_TR"], - sep="\t", + self._results['motion_file_dropped_TR'], + sep='\t', index=False, ) @@ -192,8 +192,8 @@ def _run_interface(self, runtime): censoring_df = pd.read_table(self.inputs.temporal_mask) censoring_df_dropped = censoring_df.drop(np.arange(dummy_scans)) censoring_df_dropped.to_csv( - self._results["temporal_mask_dropped_TR"], - sep="\t", + self._results['temporal_mask_dropped_TR'], + sep='\t', index=False, ) @@ -204,28 +204,28 @@ class _CensorInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="BOLD file after denoising, interpolation, and filtering", + desc='BOLD file after denoising, interpolation, and filtering', ) temporal_mask = File( exists=True, mandatory=True, desc=( - "Temporal mask; all motion outlier volumes set to 1. " + 'Temporal mask; all motion outlier volumes set to 1. ' "This is a TSV file with one column: 'framewise_displacement'." ), ) column = traits.Str( - "framewise_displacement", + 'framewise_displacement', usedefault=True, mandatory=False, - desc="Column name in the temporal mask to use for censoring.", + desc='Column name in the temporal mask to use for censoring.', ) class _CensorOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="Censored BOLD file", + desc='Censored BOLD file', ) @@ -248,19 +248,19 @@ def _run_interface(self, runtime): if self.inputs.column not in censoring_df.columns: raise ValueError( f"Column '{self.inputs.column}' not found in temporal mask file " - f"({self.inputs.temporal_mask})." + f'({self.inputs.temporal_mask}).' ) # Drop the high-motion volumes, because the image is already censored - if self.inputs.column != "framewise_displacement": - censoring_df = censoring_df.loc[censoring_df["framewise_displacement"] == 0] + if self.inputs.column != 'framewise_displacement': + censoring_df = censoring_df.loc[censoring_df['framewise_displacement'] == 0] censoring_df.reset_index(drop=True, inplace=True) retain_idx = censoring_df.loc[censoring_df[self.inputs.column] == 0].index.values motion_outliers = censoring_df.loc[censoring_df[self.inputs.column] != 0].index.values if motion_outliers.size == 0: # No censoring needed - self._results["out_file"] = self.inputs.in_file + self._results['out_file'] = self.inputs.in_file return runtime # Read in other files @@ -271,8 +271,8 @@ def _run_interface(self, runtime): if is_nifti: if censoring_df.shape[0] != img.shape[3]: raise ValueError( - f"Number of volumes in the temporal mask ({censoring_df.shape[0]}) " - f"does not match the NIfTI ({img.shape[3]})." + f'Number of volumes in the temporal mask ({censoring_df.shape[0]}) ' + f'does not match the NIfTI ({img.shape[3]}).' ) data_censored = data[:, :, :, retain_idx] @@ -285,13 +285,13 @@ def _run_interface(self, runtime): else: if censoring_df.shape[0] != img.shape[0]: raise ValueError( - f"Number of volumes in the temporal mask ({censoring_df.shape[0]}) " - f"does not match the CIFTI ({img.shape[0]})." + f'Number of volumes in the temporal mask ({censoring_df.shape[0]}) ' + f'does not match the CIFTI ({img.shape[0]}).' ) data_censored = data[retain_idx, :] - time_axis, brain_model_axis = [img.header.get_axis(i) for i in range(img.ndim)] + time_axis, brain_model_axis = (img.header.get_axis(i) for i in range(img.ndim)) new_total_volumes = data_censored.shape[0] censored_time_axis = time_axis[:new_total_volumes] # Note: not an error. A time axis cannot be accessed with irregularly spaced values. @@ -307,14 +307,14 @@ def _run_interface(self, runtime): ) # get the output - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.in_file, - suffix="_censored", + suffix='_censored', newpath=runtime.cwd, use_ext=True, ) - img_censored.to_filename(self._results["out_file"]) + img_censored.to_filename(self._results['out_file']) return runtime @@ -323,34 +323,34 @@ class _RandomCensorInputSpec(BaseInterfaceInputSpec): exists=True, mandatory=True, desc=( - "Temporal mask; all motion outlier volumes set to 1. " + 'Temporal mask; all motion outlier volumes set to 1. ' "This is a TSV file with one column: 'framewise_displacement'." ), ) temporal_mask_metadata = traits.Dict( - desc="Metadata associated with the temporal_mask output.", + desc='Metadata associated with the temporal_mask output.', ) exact_scans = traits.List( traits.Int, mandatory=True, - desc="Numbers of scans to retain. If None, no additional censoring will be performed.", + desc='Numbers of scans to retain. If None, no additional censoring will be performed.', ) random_seed = traits.Either( None, traits.Int, usedefault=True, mandatory=False, - desc="Random seed.", + desc='Random seed.', ) class _RandomCensorOutputSpec(TraitedSpec): temporal_mask = File( exists=True, - desc="Temporal mask file.", + desc='Temporal mask file.', ) temporal_mask_metadata = traits.Dict( - desc="Metadata associated with the temporal_mask output.", + desc='Metadata associated with the temporal_mask output.', ) @@ -366,59 +366,59 @@ def _run_interface(self, runtime): temporal_mask_metadata = self.inputs.temporal_mask_metadata.copy() if not self.inputs.exact_scans: - self._results["temporal_mask"] = self.inputs.temporal_mask - self._results["temporal_mask_metadata"] = temporal_mask_metadata + self._results['temporal_mask'] = self.inputs.temporal_mask + self._results['temporal_mask_metadata'] = temporal_mask_metadata return runtime - self._results["temporal_mask"] = fname_presuffix( + self._results['temporal_mask'] = fname_presuffix( self.inputs.temporal_mask, - suffix="_random", + suffix='_random', newpath=runtime.cwd, use_ext=True, ) rng = np.random.default_rng(self.inputs.random_seed) - low_motion_idx = censoring_df.loc[censoring_df["framewise_displacement"] != 1].index.values + low_motion_idx = censoring_df.loc[censoring_df['framewise_displacement'] != 1].index.values for exact_scan in self.inputs.exact_scans: random_censor = rng.choice(low_motion_idx, size=exact_scan, replace=False) - column_name = f"exact_{exact_scan}" + column_name = f'exact_{exact_scan}' censoring_df[column_name] = 0 censoring_df.loc[low_motion_idx, column_name] = 1 censoring_df.loc[random_censor, column_name] = 0 temporal_mask_metadata[column_name] = { - "Description": ( - f"Randomly selected low-motion volumes to retain exactly {exact_scan} " - "volumes." + 'Description': ( + f'Randomly selected low-motion volumes to retain exactly {exact_scan} ' + 'volumes.' ), - "Levels": { - "0": "Retained or high-motion volume", - "1": "Randomly censored volume", + 'Levels': { + '0': 'Retained or high-motion volume', + '1': 'Randomly censored volume', }, } - censoring_df.to_csv(self._results["temporal_mask"], sep="\t", index=False) - self._results["temporal_mask_metadata"] = temporal_mask_metadata + censoring_df.to_csv(self._results['temporal_mask'], sep='\t', index=False) + self._results['temporal_mask_metadata'] = temporal_mask_metadata return runtime class _ProcessMotionInputSpec(BaseInterfaceInputSpec): - TR = traits.Float(mandatory=True, desc="Repetition time in seconds") + TR = traits.Float(mandatory=True, desc='Repetition time in seconds') fd_thresh = traits.Float( 0.3, mandatory=False, usedefault=True, - desc="Framewise displacement threshold. All values above this will be dropped.", + desc='Framewise displacement threshold. All values above this will be dropped.', ) - head_radius = traits.Float(50, mandatory=False, usedefault=True, desc="Head radius in mm") + head_radius = traits.Float(50, mandatory=False, usedefault=True, desc='Head radius in mm') motion_file = File( exists=True, mandatory=True, - desc="fMRIPrep confounds tsv.", + desc='fMRIPrep confounds tsv.', ) motion_json = File( exists=True, mandatory=True, - desc="fMRIPrep confounds json.", + desc='fMRIPrep confounds json.', ) motion_filter_type = traits.Either( None, @@ -434,31 +434,31 @@ class _ProcessMotionInputSpec(BaseInterfaceInputSpec): None, traits.Float, mandatory=True, - desc="Lower frequency for the band-stop motion filter, in breaths-per-minute (bpm).", + desc='Lower frequency for the band-stop motion filter, in breaths-per-minute (bpm).', ) band_stop_max = traits.Either( None, traits.Float, mandatory=True, - desc="Upper frequency for the band-stop motion filter, in breaths-per-minute (bpm).", + desc='Upper frequency for the band-stop motion filter, in breaths-per-minute (bpm).', ) class _ProcessMotionOutputSpec(TraitedSpec): motion_file = File( exists=True, - desc="The filtered motion parameters.", + desc='The filtered motion parameters.', ) - motion_metadata = traits.Dict(desc="Metadata associated with the motion_file output.") + motion_metadata = traits.Dict(desc='Metadata associated with the motion_file output.') temporal_mask = File( exists=True, desc=( - "Temporal mask; all values above fd_thresh set to 1. " + 'Temporal mask; all values above fd_thresh set to 1. ' "This is a TSV file with one column: 'framewise_displacement'." ), ) temporal_mask_metadata = traits.Dict( - desc="Metadata associated with the temporal_mask output.", + desc='Metadata associated with the temporal_mask output.', ) @@ -469,7 +469,7 @@ class ProcessMotion(SimpleInterface): output_spec = _ProcessMotionOutputSpec def _run_interface(self, runtime): - with open(self.inputs.motion_json, "r") as f: + with open(self.inputs.motion_json) as f: motion_metadata = json.load(f) band_stop_min_adjusted, band_stop_max_adjusted, _ = _modify_motion_filter( @@ -489,104 +489,104 @@ def _run_interface(self, runtime): ) # Add in framewise displacement - motion_df["framewise_displacement"] = compute_fd( + motion_df['framewise_displacement'] = compute_fd( confound=motion_df, head_radius=self.inputs.head_radius, filtered=False, ) - fd_timeseries = motion_df["framewise_displacement"].to_numpy() - motion_metadata["framewise_displacement"] = { - "Description": "Framewise displacement calculated according to Power et al. (2012).", - "HeadRadius": self.inputs.head_radius, - "Units": "mm", + fd_timeseries = motion_df['framewise_displacement'].to_numpy() + motion_metadata['framewise_displacement'] = { + 'Description': 'Framewise displacement calculated according to Power et al. (2012).', + 'HeadRadius': self.inputs.head_radius, + 'Units': 'mm', } if self.inputs.motion_filter_type: - motion_df["framewise_displacement_filtered"] = compute_fd( + motion_df['framewise_displacement_filtered'] = compute_fd( confound=motion_df, head_radius=self.inputs.head_radius, filtered=True, ) - fd_timeseries = motion_df["framewise_displacement_filtered"].to_numpy() + fd_timeseries = motion_df['framewise_displacement_filtered'].to_numpy() # Compile motion metadata from confounds metadata, adding in filtering info # First drop any columns that are not motion parameters orig_cols = list(motion_metadata.keys()) orig_cols = [c for c in orig_cols if c[0] == c[0].lower()] - cols_to_drop = sorted(list(set(orig_cols) - set(motion_df.columns.tolist()))) + cols_to_drop = sorted(set(orig_cols) - set(motion_df.columns.tolist())) motion_metadata = {k: v for k, v in motion_metadata.items() if k not in cols_to_drop} for col in motion_df.columns.tolist(): col_metadata = motion_metadata.get(col, {}) - if col.endswith("_filtered") and col[:-9] in motion_metadata: + if col.endswith('_filtered') and col[:-9] in motion_metadata: col_metadata = motion_metadata[col[:-9]] - if self.inputs.motion_filter_type == "lp" and col.endswith("_filtered"): - filters = col_metadata.get("SoftwareFilters", {}) - filters["Butterworth low-pass filter"] = { - "cutoff": band_stop_min_adjusted / 60, - "order": int(np.floor(self.inputs.motion_filter_order / 2)), - "cutoff units": "Hz", - "function": "scipy.signal.filtfilt", + if self.inputs.motion_filter_type == 'lp' and col.endswith('_filtered'): + filters = col_metadata.get('SoftwareFilters', {}) + filters['Butterworth low-pass filter'] = { + 'cutoff': band_stop_min_adjusted / 60, + 'order': int(np.floor(self.inputs.motion_filter_order / 2)), + 'cutoff units': 'Hz', + 'function': 'scipy.signal.filtfilt', } - col_metadata["SoftwareFilters"] = filters + col_metadata['SoftwareFilters'] = filters - elif self.inputs.motion_filter_type == "notch" and col.endswith("_filtered"): - filters = col_metadata.get("SoftwareFilters", {}) - filters["IIR notch digital filter"] = { - "cutoff": [ + elif self.inputs.motion_filter_type == 'notch' and col.endswith('_filtered'): + filters = col_metadata.get('SoftwareFilters', {}) + filters['IIR notch digital filter'] = { + 'cutoff': [ band_stop_max_adjusted / 60, band_stop_min_adjusted / 60, ], - "order": int(np.floor(self.inputs.motion_filter_order / 4)), - "cutoff units": "Hz", - "function": "scipy.signal.filtfilt", + 'order': int(np.floor(self.inputs.motion_filter_order / 4)), + 'cutoff units': 'Hz', + 'function': 'scipy.signal.filtfilt', } - col_metadata["SoftwareFilters"] = filters + col_metadata['SoftwareFilters'] = filters motion_metadata[col] = col_metadata - self._results["motion_metadata"] = motion_metadata + self._results['motion_metadata'] = motion_metadata # Store the filtered motion parameters - self._results["motion_file"] = fname_presuffix( + self._results['motion_file'] = fname_presuffix( self.inputs.motion_file, - suffix="_motion", + suffix='_motion', newpath=runtime.cwd, use_ext=True, ) - motion_df.to_csv(self._results["motion_file"], sep="\t", index=False) + motion_df.to_csv(self._results['motion_file'], sep='\t', index=False) # Generate temporal mask with all timepoints have FD over threshold set to 1. outlier_mask = np.zeros(len(fd_timeseries), dtype=int) if self.inputs.fd_thresh > 0: outlier_mask[fd_timeseries > self.inputs.fd_thresh] = 1 else: - LOGGER.info(f"FD threshold set to {self.inputs.fd_thresh}. Censoring is disabled.") + LOGGER.info(f'FD threshold set to {self.inputs.fd_thresh}. Censoring is disabled.') - self._results["temporal_mask"] = fname_presuffix( - "desc-fd_outliers.tsv", + self._results['temporal_mask'] = fname_presuffix( + 'desc-fd_outliers.tsv', newpath=runtime.cwd, use_ext=True, ) - outliers_df = pd.DataFrame(data=outlier_mask, columns=["framewise_displacement"]) + outliers_df = pd.DataFrame(data=outlier_mask, columns=['framewise_displacement']) outliers_df.to_csv( - self._results["temporal_mask"], + self._results['temporal_mask'], index=False, header=True, - sep="\t", + sep='\t', ) outliers_metadata = { - "framewise_displacement": { - "Description": "Outlier time series based on framewise displacement.", - "Levels": { - "0": "Non-outlier volume", - "1": "Outlier volume", + 'framewise_displacement': { + 'Description': 'Outlier time series based on framewise displacement.', + 'Levels': { + '0': 'Non-outlier volume', + '1': 'Outlier volume', }, - "Threshold": self.inputs.fd_thresh, + 'Threshold': self.inputs.fd_thresh, } } - self._results["temporal_mask_metadata"] = outliers_metadata + self._results['temporal_mask_metadata'] = outliers_metadata return runtime @@ -595,29 +595,29 @@ class _GenerateConfoundsInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="Preprocessed BOLD file", + desc='Preprocessed BOLD file', ) - TR = traits.Float(mandatory=True, desc="Repetition time in seconds") + TR = traits.Float(mandatory=True, desc='Repetition time in seconds') confounds_files = traits.Dict( mandatory=True, desc=( - "Dictionary of confound names and paths to corresponding files. " + 'Dictionary of confound names and paths to corresponding files. ' "Keys are confound names, values are dictionaries with keys 'file' and 'metadata'." ), ) confounds_config = traits.Dict( mandatory=True, - desc="Configuration file for confounds.", + desc='Configuration file for confounds.', ) dataset_links = traits.Dict( mandatory=True, - desc="Dataset links for the XCP-D run.", + desc='Dataset links for the XCP-D run.', ) out_dir = traits.Str( mandatory=True, desc=( - "Output directory for the XCP-D run. " - "Not used to write out any files- just used for dataset links." + 'Output directory for the XCP-D run. ' + 'Not used to write out any files- just used for dataset links.' ), ) motion_filter_type = traits.Either( @@ -634,26 +634,26 @@ class _GenerateConfoundsInputSpec(BaseInterfaceInputSpec): None, traits.Float, mandatory=True, - desc="Lower frequency for the band-stop motion filter, in breaths-per-minute (bpm).", + desc='Lower frequency for the band-stop motion filter, in breaths-per-minute (bpm).', ) band_stop_max = traits.Either( None, traits.Float, mandatory=True, - desc="Upper frequency for the band-stop motion filter, in breaths-per-minute (bpm).", + desc='Upper frequency for the band-stop motion filter, in breaths-per-minute (bpm).', ) class _GenerateConfoundsOutputSpec(TraitedSpec): confounds_tsv = File( exists=True, - desc="The aggregated confounds in a tabular file.", + desc='The aggregated confounds in a tabular file.', ) confounds_images = traits.List( File(exists=True), - desc="The aggregated confounds in image files.", + desc='The aggregated confounds in image files.', ) - confounds_metadata = traits.Dict(desc="Metadata associated with the confounds output.") + confounds_metadata = traits.Dict(desc='Metadata associated with the confounds output.') class GenerateConfounds(SimpleInterface): @@ -712,22 +712,22 @@ def _run_interface(self, runtime): confound_files = [] confounds_image_names = [] for confound_name, confound_info in self.inputs.confounds_files.items(): - confound_file = confound_info["file"] + confound_file = confound_info['file'] confound_files.append(confound_file) - confound_metadata = confound_info["metadata"] - confound_params = self.inputs.confounds_config["confounds"][confound_name] - if "columns" in confound_params: # Tabular confounds + confound_metadata = confound_info['metadata'] + confound_params = self.inputs.confounds_config['confounds'][confound_name] + if 'columns' in confound_params: # Tabular confounds confound_df = pd.read_table(confound_file) if confound_df.shape[0] != n_volumes: raise ValueError( - f"Number of volumes in confounds file ({confound_df.shape[0]}) " - f"does not match number of volumes in the fMRI data ({n_volumes})." + f'Number of volumes in confounds file ({confound_df.shape[0]}) ' + f'does not match number of volumes in the fMRI data ({n_volumes}).' ) available_columns = confound_df.columns.tolist() - required_columns = confound_params["columns"] + required_columns = confound_params['columns'] for column in required_columns: - if column.startswith("^"): + if column.startswith('^'): # Regular expression found_columns = [ col_name @@ -742,8 +742,8 @@ def _run_interface(self, runtime): for found_column in found_columns: if found_column in new_confound_df: raise ValueError( - f"Duplicate column name ({found_column}) in confounds " - "configuration." + f'Duplicate column name ({found_column}) in confounds ' + 'configuration.' ) new_confound_df[found_column] = confound_df[found_column] @@ -754,7 +754,7 @@ def _run_interface(self, runtime): confounds_metadata[found_column] = confounds_metadata.get( found_column, {} ) - confounds_metadata[found_column]["Sources"] = _get_bidsuris( + confounds_metadata[found_column]['Sources'] = _get_bidsuris( in_files=[confound_file], dataset_links=self.inputs.dataset_links, out_dir=self.inputs.out_dir, @@ -765,7 +765,7 @@ def _run_interface(self, runtime): if column in new_confound_df: raise ValueError( - f"Duplicate column name ({column}) in confounds configuration." + f'Duplicate column name ({column}) in confounds configuration.' ) new_confound_df[column] = confound_df[column] @@ -774,7 +774,7 @@ def _run_interface(self, runtime): new_confound_df.fillna({column: 0}, inplace=True) confounds_metadata[column] = confounds_metadata.get(column, {}) - confounds_metadata[column]["Sources"] = _get_bidsuris( + confounds_metadata[column]['Sources'] = _get_bidsuris( in_files=[confound_file], dataset_links=self.inputs.dataset_links, out_dir=self.inputs.out_dir, @@ -789,8 +789,8 @@ def _run_interface(self, runtime): if n_volumes_check != n_volumes: raise ValueError( - f"Number of volumes in confounds image ({n_volumes_check}) " - f"does not match number of volumes in the fMRI data ({n_volumes})." + f'Number of volumes in confounds image ({n_volumes_check}) ' + f'does not match number of volumes in the fMRI data ({n_volumes}).' ) confounds_images.append(confound_file) @@ -799,18 +799,18 @@ def _run_interface(self, runtime): # Collect image metadata new_confound_df.loc[:, confound_name] = np.nan # fill with NaNs as a placeholder confounds_metadata[confound_name] = confound_metadata - confounds_metadata[confound_name]["Sources"] = _get_bidsuris( + confounds_metadata[confound_name]['Sources'] = _get_bidsuris( in_files=[confound_file], dataset_links=self.inputs.dataset_links, out_dir=self.inputs.out_dir, ) - confounds_metadata[confound_name]["Description"] = ( - "A placeholder column representing a voxel-wise confound. " - "The actual confound data are stored in an imaging file." + confounds_metadata[confound_name]['Description'] = ( + 'A placeholder column representing a voxel-wise confound. ' + 'The actual confound data are stored in an imaging file.' ) # This actually gets overwritten in init_postproc_derivatives_wf. - confounds_metadata["Sources"] = _get_bidsuris( + confounds_metadata['Sources'] = _get_bidsuris( in_files=confound_files, dataset_links=self.inputs.dataset_links, out_dir=self.inputs.out_dir, @@ -823,7 +823,7 @@ def _run_interface(self, runtime): # 3. Calculate the Volterra expansion of the filtered parameters # 4. For each selected motion confound, remove that column and replace with the # filtered version. Include `_filtered` in the new column name. - motion_params = ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"] + motion_params = ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'] motion_based_params = [ c for c in new_confound_df.columns if any(c.startswith(p) for p in motion_params) ] @@ -860,12 +860,12 @@ def _run_interface(self, runtime): ] if motion_unfiltered: raise ValueError( - f"Motion-based regressors {motion_unfiltered} were not filtered." + f'Motion-based regressors {motion_unfiltered} were not filtered.' ) # Select the relevant filtered motion parameter columns motion_df = motion_df[overlapping_columns] - motion_df.columns = [f"{c}_filtered" for c in motion_df.columns] + motion_df.columns = [f'{c}_filtered' for c in motion_df.columns] # Replace the original motion columns with the filtered versions new_confound_df.drop(columns=overlapping_columns, inplace=True) @@ -875,51 +875,51 @@ def _run_interface(self, runtime): for column in overlapping_columns: col_metadata = confounds_metadata[column] - if self.inputs.motion_filter_type == "lp": - filters = col_metadata.get("SoftwareFilters", {}) - filters["Butterworth low-pass filter"] = { - "cutoff": band_stop_min_adjusted / 60, - "order": int(np.floor(self.inputs.motion_filter_order / 2)), - "cutoff units": "Hz", - "function": "scipy.signal.filtfilt", + if self.inputs.motion_filter_type == 'lp': + filters = col_metadata.get('SoftwareFilters', {}) + filters['Butterworth low-pass filter'] = { + 'cutoff': band_stop_min_adjusted / 60, + 'order': int(np.floor(self.inputs.motion_filter_order / 2)), + 'cutoff units': 'Hz', + 'function': 'scipy.signal.filtfilt', } - col_metadata["SoftwareFilters"] = filters + col_metadata['SoftwareFilters'] = filters - elif self.inputs.motion_filter_type == "notch": - filters = col_metadata.get("SoftwareFilters", {}) - filters["IIR notch digital filter"] = { - "cutoff": [ + elif self.inputs.motion_filter_type == 'notch': + filters = col_metadata.get('SoftwareFilters', {}) + filters['IIR notch digital filter'] = { + 'cutoff': [ band_stop_max_adjusted / 60, band_stop_min_adjusted / 60, ], - "order": int(np.floor(self.inputs.motion_filter_order / 4)), - "cutoff units": "Hz", - "function": "scipy.signal.filtfilt", + 'order': int(np.floor(self.inputs.motion_filter_order / 4)), + 'cutoff units': 'Hz', + 'function': 'scipy.signal.filtfilt', } - col_metadata["SoftwareFilters"] = filters + col_metadata['SoftwareFilters'] = filters - confounds_metadata[f"{column}_filtered"] = col_metadata + confounds_metadata[f'{column}_filtered'] = col_metadata confounds_metadata.pop(column, None) # Orthogonalize nuisance regressors w.r.t. any signal regressors - signal_regressors = [c for c in new_confound_df.columns if c.startswith("signal__")] + signal_regressors = [c for c in new_confound_df.columns if c.startswith('signal__')] if signal_regressors: LOGGER.warning( "Signal regressors detected. " "Orthogonalizing nuisance regressors w.r.t. the following signal regressors: " f"{', '.join(signal_regressors)}" ) - noise_regressors = [c for c in new_confound_df.columns if not c.startswith("signal__")] + noise_regressors = [c for c in new_confound_df.columns if not c.startswith('signal__')] - orth_cols = [f"{c}_orth" for c in noise_regressors] + orth_cols = [f'{c}_orth' for c in noise_regressors] orth_confounds_df = pd.DataFrame( index=new_confound_df.index, columns=orth_cols, ) if confounds_image_names: raise NotImplementedError( - "Orthogonalization of confounds with respect to signal regressors is not yet " - "supported for voxelwise confounds." + 'Orthogonalization of confounds with respect to signal regressors is not yet ' + 'supported for voxelwise confounds.' ) else: # Do the orthogonalization @@ -943,19 +943,19 @@ def _run_interface(self, runtime): col_metadata = {} if col in confounds_metadata.keys(): col_metadata = confounds_metadata.pop(col) - if "Description" in col_metadata.keys(): + if 'Description' in col_metadata.keys(): desc_str = f"{col_metadata['Description']} {desc_str}" - col_metadata["Description"] = desc_str - confounds_metadata[f"{col}_orth"] = col_metadata + col_metadata['Description'] = desc_str + confounds_metadata[f'{col}_orth'] = col_metadata - self._results["confounds_tsv"] = fname_presuffix( - "desc-confounds_timeseries.tsv", + self._results['confounds_tsv'] = fname_presuffix( + 'desc-confounds_timeseries.tsv', newpath=runtime.cwd, use_ext=True, ) - new_confound_df.to_csv(self._results["confounds_tsv"], sep="\t", index=False) + new_confound_df.to_csv(self._results['confounds_tsv'], sep='\t', index=False) - self._results["confounds_images"] = confounds_images - self._results["confounds_metadata"] = confounds_metadata + self._results['confounds_images'] = confounds_images + self._results['confounds_metadata'] = confounds_metadata return runtime diff --git a/xcp_d/interfaces/concatenation.py b/xcp_d/interfaces/concatenation.py index e563bbb82..eaf805821 100644 --- a/xcp_d/interfaces/concatenation.py +++ b/xcp_d/interfaces/concatenation.py @@ -18,20 +18,20 @@ from xcp_d.utils.concatenation import concatenate_niimgs, concatenate_tsvs -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _CleanNameSourceInputSpec(BaseInterfaceInputSpec): name_source = traits.List( File(exists=True), mandatory=True, - desc="Name source files.", + desc='Name source files.', ) class _CleanNameSourceOutputSpec(TraitedSpec): name_source = traits.Str( - desc="Name source", + desc='Name source', ) @@ -45,11 +45,11 @@ def _run_interface(self, runtime): # Grab the first file and use that. name_source = self.inputs.name_source[0] # Remove the run entity. - cleaned_name_source = re.sub("_run-[0-9]+_", "_", name_source) + cleaned_name_source = re.sub('_run-[0-9]+_', '_', name_source) # Remove the dir entity. - cleaned_name_source = re.sub("_dir-[a-zA-Z0-9]+_", "_", cleaned_name_source) + cleaned_name_source = re.sub('_dir-[a-zA-Z0-9]+_', '_', cleaned_name_source) - self._results["name_source"] = cleaned_name_source + self._results['name_source'] = cleaned_name_source return runtime @@ -60,7 +60,7 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): Undefined, ), mandatory=True, - desc="Preprocessed BOLD files, after dummy volume removal.", + desc='Preprocessed BOLD files, after dummy volume removal.', ) motion_file = traits.List( traits.Either( @@ -68,7 +68,7 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): Undefined, ), mandatory=True, - desc="TSV files with fMRIPrep confounds for individual BOLD runs.", + desc='TSV files with fMRIPrep confounds for individual BOLD runs.', ) temporal_mask = traits.List( traits.Either( @@ -76,7 +76,7 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): Undefined, ), mandatory=True, - desc="TSV files with high-motion outliers indexed.", + desc='TSV files with high-motion outliers indexed.', ) denoised_bold = traits.List( traits.Either( @@ -84,7 +84,7 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): Undefined, ), mandatory=True, - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) denoised_interpolated_bold = traits.List( traits.Either( @@ -92,7 +92,7 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): Undefined, ), mandatory=True, - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) censored_denoised_bold = traits.List( traits.Either( @@ -100,34 +100,34 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): Undefined, ), mandatory=True, - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) smoothed_denoised_bold = traits.Either( traits.List(File(exists=True)), Undefined, - desc="Smoothed, denoised BOLD data. Only set if smoothing was done in postprocessing", + desc='Smoothed, denoised BOLD data. Only set if smoothing was done in postprocessing', ) bold_mask = traits.Either( traits.List(File(exists=True)), Undefined, - desc="BOLD-based brain mask file. Only used for NIFTI processing.", + desc='BOLD-based brain mask file. Only used for NIFTI processing.', ) boldref = traits.Either( traits.List(File(exists=True)), Undefined, - desc="BOLD reference files. Only used for NIFTI processing.", + desc='BOLD reference files. Only used for NIFTI processing.', ) timeseries = traits.List( traits.List(File(exists=True)), mandatory=True, - desc="List of lists of parcellated time series TSV files.", + desc='List of lists of parcellated time series TSV files.', ) timeseries_ciftis = traits.Either( traits.List(traits.List(File(exists=True))), Undefined, desc=( - "List of lists of parcellated time series CIFTI files. " - "Only defined for CIFTI processing." + 'List of lists of parcellated time series CIFTI files. ' + 'Only defined for CIFTI processing.' ), ) @@ -135,58 +135,58 @@ class _FilterOutFailedRunsInputSpec(BaseInterfaceInputSpec): class _FilterOutFailedRunsOutputSpec(TraitedSpec): preprocessed_bold = traits.List( File(exists=True), - desc="Preprocessed BOLD files, after dummy volume removal.", + desc='Preprocessed BOLD files, after dummy volume removal.', ) motion_file = traits.List( File(exists=True), - desc="fMRIPrep confounds files, after dummy volume removal.", + desc='fMRIPrep confounds files, after dummy volume removal.', ) temporal_mask = traits.List( traits.Either( File(exists=True), Undefined, ), - desc="TSV files with high-motion outliers indexed.", + desc='TSV files with high-motion outliers indexed.', ) denoised_bold = traits.List( File(exists=True), - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) denoised_interpolated_bold = traits.List( File(exists=True), - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) censored_denoised_bold = traits.List( File(exists=True), - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) smoothed_denoised_bold = traits.List( traits.Either( File(exists=True), Undefined, ), - desc="Smoothed, denoised BOLD data.", + desc='Smoothed, denoised BOLD data.', ) bold_mask = traits.List( traits.Either( File(exists=True), Undefined, ), - desc="Smoothed, denoised BOLD data.", + desc='Smoothed, denoised BOLD data.', ) boldref = traits.List( traits.Either( File(exists=True), Undefined, ), - desc="Smoothed, denoised BOLD data.", + desc='Smoothed, denoised BOLD data.', ) timeseries = traits.List( traits.Either( traits.List(File(exists=True)), Undefined, ), - desc="List of lists of parcellated time series TSV files.", + desc='List of lists of parcellated time series TSV files.', ) timeseries_ciftis = traits.List( traits.Either( @@ -194,8 +194,8 @@ class _FilterOutFailedRunsOutputSpec(TraitedSpec): Undefined, ), desc=( - "List of lists of parcellated time series CIFTI files. " - "Only defined for CIFTI processing." + 'List of lists of parcellated time series CIFTI files. ' + 'Only defined for CIFTI processing.' ), ) @@ -209,30 +209,30 @@ class FilterOutFailedRuns(SimpleInterface): def _run_interface(self, runtime): denoised_bold = self.inputs.denoised_bold inputs_to_filter = { - "preprocessed_bold": self.inputs.preprocessed_bold, - "motion_file": self.inputs.motion_file, - "temporal_mask": self.inputs.temporal_mask, - "denoised_interpolated_bold": self.inputs.denoised_interpolated_bold, - "censored_denoised_bold": self.inputs.censored_denoised_bold, - "smoothed_denoised_bold": self.inputs.smoothed_denoised_bold, - "bold_mask": self.inputs.bold_mask, - "boldref": self.inputs.boldref, - "timeseries": self.inputs.timeseries, - "timeseries_ciftis": self.inputs.timeseries_ciftis, + 'preprocessed_bold': self.inputs.preprocessed_bold, + 'motion_file': self.inputs.motion_file, + 'temporal_mask': self.inputs.temporal_mask, + 'denoised_interpolated_bold': self.inputs.denoised_interpolated_bold, + 'censored_denoised_bold': self.inputs.censored_denoised_bold, + 'smoothed_denoised_bold': self.inputs.smoothed_denoised_bold, + 'bold_mask': self.inputs.bold_mask, + 'boldref': self.inputs.boldref, + 'timeseries': self.inputs.timeseries, + 'timeseries_ciftis': self.inputs.timeseries_ciftis, } n_runs = len(denoised_bold) successful_runs = [i for i, f in enumerate(denoised_bold) if isdefined(f)] if len(successful_runs) < n_runs: - LOGGER.warning(f"Of {n_runs} runs, only runs {successful_runs} were successful.") + LOGGER.warning(f'Of {n_runs} runs, only runs {successful_runs} were successful.') - self._results["denoised_bold"] = [denoised_bold[i] for i in successful_runs] + self._results['denoised_bold'] = [denoised_bold[i] for i in successful_runs] for input_name, input_list in inputs_to_filter.items(): if len(input_list) != n_runs: LOGGER.warning( - f"{input_name} has {len(input_list)} elements, not {n_runs}. Ignoring." + f'{input_name} has {len(input_list)} elements, not {n_runs}. Ignoring.' ) input_list = [Undefined for _ in range(n_runs)] @@ -245,48 +245,48 @@ class _ConcatenateInputsInputSpec(BaseInterfaceInputSpec): preprocessed_bold = traits.List( File(exists=True), mandatory=True, - desc="Preprocessed BOLD files, after dummy volume removal.", + desc='Preprocessed BOLD files, after dummy volume removal.', ) motion_file = traits.List( File(exists=True), mandatory=True, - desc="TSV files with fMRIPrep confounds for individual BOLD runs.", + desc='TSV files with fMRIPrep confounds for individual BOLD runs.', ) temporal_mask = traits.List( traits.Either( File(exists=True), Undefined, ), - desc="TSV files with high-motion outliers indexed.", + desc='TSV files with high-motion outliers indexed.', ) denoised_bold = traits.List( File(exists=True), mandatory=True, - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) denoised_interpolated_bold = traits.List( File(exists=True), mandatory=True, - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) censored_denoised_bold = traits.List( File(exists=True), mandatory=True, - desc="Denoised BOLD data.", + desc='Denoised BOLD data.', ) smoothed_denoised_bold = traits.List( traits.Either( File(exists=True), Undefined, ), - desc="Smoothed, denoised BOLD data. Optional.", + desc='Smoothed, denoised BOLD data. Optional.', ) timeseries = traits.List( traits.Either( traits.List(File(exists=True)), Undefined, ), - desc="List of lists of parcellated time series TSV files.", + desc='List of lists of parcellated time series TSV files.', ) timeseries_ciftis = traits.List( traits.Either( @@ -294,8 +294,8 @@ class _ConcatenateInputsInputSpec(BaseInterfaceInputSpec): Undefined, ), desc=( - "List of lists of parcellated time series CIFTI files. " - "Only defined for CIFTI processing." + 'List of lists of parcellated time series CIFTI files. ' + 'Only defined for CIFTI processing.' ), ) @@ -303,49 +303,49 @@ class _ConcatenateInputsInputSpec(BaseInterfaceInputSpec): class _ConcatenateInputsOutputSpec(TraitedSpec): preprocessed_bold = File( exists=True, - desc="Concatenated preprocessed BOLD file.", + desc='Concatenated preprocessed BOLD file.', ) motion_file = File( exists=True, - desc="Concatenated TSV file with fMRIPrep confounds.", + desc='Concatenated TSV file with fMRIPrep confounds.', ) temporal_mask = traits.Either( File(exists=True), Undefined, - desc="Concatenated TSV file with high-motion outliers indexed.", + desc='Concatenated TSV file with high-motion outliers indexed.', ) denoised_bold = File( exists=True, - desc="Concatenated denoised BOLD data.", + desc='Concatenated denoised BOLD data.', ) denoised_interpolated_bold = File( exists=True, - desc="Concatenated denoised BOLD data.", + desc='Concatenated denoised BOLD data.', ) censored_denoised_bold = File( exists=True, - desc="Concatenated denoised BOLD data.", + desc='Concatenated denoised BOLD data.', ) smoothed_denoised_bold = traits.Either( File(exists=True), Undefined, - desc="Concatenated, smoothed, denoised BOLD data. Optional.", + desc='Concatenated, smoothed, denoised BOLD data. Optional.', ) timeseries = traits.List( File(exists=True), - desc="Concatenated list of parcellated time series TSV files.", + desc='Concatenated list of parcellated time series TSV files.', ) timeseries_ciftis = traits.Either( traits.List(File(exists=True)), Undefined, desc=( - "Concatenated list of parcellated time series CIFTI files. " - "Only defined for CIFTI processing." + 'Concatenated list of parcellated time series CIFTI files. ' + 'Only defined for CIFTI processing.' ), ) run_index = traits.List( traits.Int(), - desc="Index of join points between the *uncensored* runs.", + desc='Index of join points between the *uncensored* runs.', ) @@ -357,15 +357,15 @@ class ConcatenateInputs(SimpleInterface): def _run_interface(self, runtime): merge_inputs = { - "preprocessed_bold": self.inputs.preprocessed_bold, - "denoised_bold": self.inputs.denoised_bold, - "denoised_interpolated_bold": self.inputs.denoised_interpolated_bold, - "censored_denoised_bold": self.inputs.censored_denoised_bold, - "smoothed_denoised_bold": self.inputs.smoothed_denoised_bold, - "timeseries_ciftis": self.inputs.timeseries_ciftis, - "motion_file": self.inputs.motion_file, - "temporal_mask": self.inputs.temporal_mask, - "timeseries": self.inputs.timeseries, + 'preprocessed_bold': self.inputs.preprocessed_bold, + 'denoised_bold': self.inputs.denoised_bold, + 'denoised_interpolated_bold': self.inputs.denoised_interpolated_bold, + 'censored_denoised_bold': self.inputs.censored_denoised_bold, + 'smoothed_denoised_bold': self.inputs.smoothed_denoised_bold, + 'timeseries_ciftis': self.inputs.timeseries_ciftis, + 'motion_file': self.inputs.motion_file, + 'temporal_mask': self.inputs.temporal_mask, + 'timeseries': self.inputs.timeseries, } run_index, n_volumes = [], 0 @@ -373,17 +373,17 @@ def _run_interface(self, runtime): n_volumes = n_volumes + pd.read_table(run_motion).shape[0] run_index.append(n_volumes) - self._results["run_index"] = run_index + self._results['run_index'] = run_index for name, run_files in merge_inputs.items(): - LOGGER.info(f"Concatenating {name}") + LOGGER.info(f'Concatenating {name}') if len(run_files) == 0 or any(not isdefined(f) for f in run_files): - LOGGER.warning(f"No {name} files found") + LOGGER.warning(f'No {name} files found') self._results[name] = Undefined continue elif isinstance(run_files[0], list) and not isdefined(run_files[0][0]): - LOGGER.warning(f"No {name} files found") + LOGGER.warning(f'No {name} files found') self._results[name] = Undefined continue @@ -394,27 +394,27 @@ def _run_interface(self, runtime): ) out_files = [] for i_atlas, parc_files in enumerate(transposed_run_files): - extension = ".".join(os.path.basename(parc_files[0]).split(".")[1:]) - out_file = os.path.join(runtime.cwd, f"{name}_{i_atlas}.{extension}") - if out_file.endswith(".tsv"): + extension = '.'.join(os.path.basename(parc_files[0]).split('.')[1:]) + out_file = os.path.join(runtime.cwd, f'{name}_{i_atlas}.{extension}') + if out_file.endswith('.tsv'): concatenate_tsvs(parc_files, out_file=out_file) else: concatenate_niimgs(parc_files, out_file=out_file) - assert os.path.isfile(out_file), f"Output file {out_file} not created." + assert os.path.isfile(out_file), f'Output file {out_file} not created.' out_files.append(out_file) self._results[name] = out_files else: # Files are a single list of paths. - extension = ".".join(os.path.basename(run_files[0]).split(".")[1:]) - out_file = os.path.join(runtime.cwd, f"{name}.{extension}") - if out_file.endswith(".tsv"): + extension = '.'.join(os.path.basename(run_files[0]).split('.')[1:]) + out_file = os.path.join(runtime.cwd, f'{name}.{extension}') + if out_file.endswith('.tsv'): concatenate_tsvs(run_files, out_file=out_file) else: concatenate_niimgs(run_files, out_file=out_file) - assert os.path.isfile(out_file), f"Output file {out_file} not created." + assert os.path.isfile(out_file), f'Output file {out_file} not created.' self._results[name] = out_file return runtime diff --git a/xcp_d/interfaces/connectivity.py b/xcp_d/interfaces/connectivity.py index 5bce9ec4b..97be1483b 100644 --- a/xcp_d/interfaces/connectivity.py +++ b/xcp_d/interfaces/connectivity.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Handling functional connectivity.""" + import gc import matplotlib.pyplot as plt @@ -22,29 +23,29 @@ from xcp_d.utils.filemanip import fname_presuffix from xcp_d.utils.write_save import write_ndata -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _NiftiParcellateInputSpec(BaseInterfaceInputSpec): - filtered_file = File(exists=True, mandatory=True, desc="filtered file") - mask = File(exists=True, mandatory=True, desc="brain mask file") - atlas = File(exists=True, mandatory=True, desc="atlas file") - atlas_labels = File(exists=True, mandatory=True, desc="atlas labels file") + filtered_file = File(exists=True, mandatory=True, desc='filtered file') + mask = File(exists=True, mandatory=True, desc='brain mask file') + atlas = File(exists=True, mandatory=True, desc='atlas file') + atlas_labels = File(exists=True, mandatory=True, desc='atlas labels file') min_coverage = traits.Float( 0.5, usedefault=True, desc=( - "Coverage threshold to apply to parcels. " - "Any parcels with lower coverage than the threshold will be replaced with NaNs. " - "Must be a value between zero and one. " - "Default is 0.5." + 'Coverage threshold to apply to parcels. ' + 'Any parcels with lower coverage than the threshold will be replaced with NaNs. ' + 'Must be a value between zero and one. ' + 'Default is 0.5.' ), ) class _NiftiParcellateOutputSpec(TraitedSpec): - coverage = File(exists=True, desc="Parcel-wise coverage file.") - timeseries = File(exists=True, desc="Parcellated time series file.") + coverage = File(exists=True, desc='Parcel-wise coverage file.') + timeseries = File(exists=True, desc='Parcellated time series file.') class NiftiParcellate(SimpleInterface): @@ -63,14 +64,14 @@ def _run_interface(self, runtime): atlas = self.inputs.atlas min_coverage = self.inputs.min_coverage - node_labels_df = pd.read_table(self.inputs.atlas_labels, index_col="index") + node_labels_df = pd.read_table(self.inputs.atlas_labels, index_col='index') # Fix any nonsequential values or mismatch between atlas and DataFrame. atlas_img, node_labels_df = _sanitize_nifti_atlas(atlas, node_labels_df) - node_labels = node_labels_df["label"].tolist() + node_labels = node_labels_df['label'].tolist() # prepend "background" to node labels to satisfy NiftiLabelsMasker # The background "label" won't be present in the output timeseries. - masker_labels = ["background"] + node_labels + masker_labels = ['background'] + node_labels # Before anything, we need to measure coverage atlas_img_bin = nb.Nifti1Image( @@ -86,7 +87,7 @@ def _run_interface(self, runtime): mask_img=mask, smoothing_fwhm=None, standardize=False, - strategy="sum", + strategy='sum', resampling_target=None, # they should be in the same space/resolution already ) sum_masker_unmasked = NiftiLabelsMasker( @@ -95,7 +96,7 @@ def _run_interface(self, runtime): background_label=0, smoothing_fwhm=None, standardize=False, - strategy="sum", + strategy='sum', resampling_target=None, # they should be in the same space/resolution already ) n_voxels_in_masked_parcels = sum_masker_masked.fit_transform(atlas_img_bin) @@ -117,24 +118,24 @@ def _run_interface(self, runtime): if n_found_nodes != n_nodes: LOGGER.warning( - f"{n_nodes - n_found_nodes}/{n_nodes} of parcels not found in atlas file." + f'{n_nodes - n_found_nodes}/{n_nodes} of parcels not found in atlas file.' ) if n_bad_nodes: - LOGGER.warning(f"{n_bad_nodes}/{n_nodes} of parcels have 0% coverage.") + LOGGER.warning(f'{n_bad_nodes}/{n_nodes} of parcels have 0% coverage.') if n_poor_parcels: LOGGER.warning( - f"{n_poor_parcels}/{n_nodes} of parcels have <50% coverage. " + f'{n_poor_parcels}/{n_nodes} of parcels have <50% coverage. ' "These parcels' time series will be replaced with zeros." ) if n_partial_parcels: LOGGER.warning( - f"{n_partial_parcels}/{n_nodes} of parcels have at least one uncovered " - "voxel, but have enough good voxels to be useable. " + f'{n_partial_parcels}/{n_nodes} of parcels have at least one uncovered ' + 'voxel, but have enough good voxels to be usable. ' "The bad voxels will be ignored and the parcels' time series will be " - "calculated from the remaining voxels." + 'calculated from the remaining voxels.' ) masker = NiftiLabelsMasker( @@ -158,7 +159,7 @@ def _run_interface(self, runtime): timeseries_arr[:, coverage_thresholded] = np.nan # Region indices in the atlas may not be sequential, so we map them to sequential ints. - seq_mapper = {idx: i for i, idx in enumerate(node_labels_df["sanitized_index"].tolist())} + seq_mapper = {idx: i for i, idx in enumerate(node_labels_df['sanitized_index'].tolist())} if n_found_nodes != n_nodes: # parcels lost by warping/downsampling atlas # Fill in any missing nodes in the timeseries array with NaNs. @@ -186,45 +187,45 @@ def _run_interface(self, runtime): gc.collect() # The time series file is tab-delimited, with node names included in the first row. - self._results["timeseries"] = fname_presuffix( - "timeseries.tsv", + self._results['timeseries'] = fname_presuffix( + 'timeseries.tsv', newpath=runtime.cwd, use_ext=True, ) timeseries_df = pd.DataFrame(data=timeseries_arr, columns=node_labels) - timeseries_df.to_csv(self._results["timeseries"], sep="\t", na_rep="n/a", index=False) + timeseries_df.to_csv(self._results['timeseries'], sep='\t', na_rep='n/a', index=False) # Save out the coverage tsv coverage_df = pd.DataFrame( data=parcel_coverage.astype(np.float32), index=node_labels, - columns=["coverage"], + columns=['coverage'], ) - self._results["coverage"] = fname_presuffix( - "coverage.tsv", + self._results['coverage'] = fname_presuffix( + 'coverage.tsv', newpath=runtime.cwd, use_ext=True, ) - coverage_df.to_csv(self._results["coverage"], sep="\t", na_rep="n/a", index_label="Node") + coverage_df.to_csv(self._results['coverage'], sep='\t', na_rep='n/a', index_label='Node') return runtime class _TSVConnectInputSpec(BaseInterfaceInputSpec): - timeseries = File(exists=True, desc="Parcellated time series TSV file.") + timeseries = File(exists=True, desc='Parcellated time series TSV file.') temporal_mask = File( exists=True, mandatory=False, - desc="Temporal mask, after dummy scan removal.", + desc='Temporal mask, after dummy scan removal.', ) class _TSVConnectOutputSpec(TraitedSpec): - correlations = File(exists=True, desc="Correlation matrix file.") + correlations = File(exists=True, desc='Correlation matrix file.') correlations_exact = traits.Either( None, traits.List(File(exists=True)), - desc="Correlation matrix files limited to an exact number of volumes.", + desc='Correlation matrix files limited to an exact number of volumes.', ) @@ -238,13 +239,13 @@ def correlate_timeseries(timeseries, temporal_mask): # Determine if the time series is censored if censoring_df.shape[0] == timeseries_df.shape[0]: # The time series is not censored - timeseries_df = timeseries_df.loc[censoring_df["framewise_displacement"] == 0] + timeseries_df = timeseries_df.loc[censoring_df['framewise_displacement'] == 0] timeseries_df.reset_index(drop=True, inplace=True) # Now create correlation matrices limited to exact scan numbers - censored_censoring_df = censoring_df.loc[censoring_df["framewise_displacement"] == 0] + censored_censoring_df = censoring_df.loc[censoring_df['framewise_displacement'] == 0] censored_censoring_df.reset_index(drop=True, inplace=True) - exact_columns = [c for c in censoring_df.columns if c.startswith("exact_")] + exact_columns = [c for c in censoring_df.columns if c.startswith('exact_')] for exact_column in exact_columns: exact_timeseries_df = timeseries_df.loc[censored_censoring_df[exact_column] == 0] exact_correlations_df = exact_timeseries_df.corr() @@ -273,38 +274,38 @@ def _run_interface(self, runtime): temporal_mask=self.inputs.temporal_mask, ) - self._results["correlations"] = fname_presuffix( - "correlations.tsv", + self._results['correlations'] = fname_presuffix( + 'correlations.tsv', newpath=runtime.cwd, use_ext=True, ) correlations_df.to_csv( - self._results["correlations"], - sep="\t", - na_rep="n/a", - index_label="Node", + self._results['correlations'], + sep='\t', + na_rep='n/a', + index_label='Node', ) del correlations_df gc.collect() if not self.inputs.temporal_mask: - self._results["correlations_exact"] = None + self._results['correlations_exact'] = None return runtime - self._results["correlations_exact"] = [] + self._results['correlations_exact'] = [] for exact_column, exact_correlations_df in correlations_exact.items(): exact_correlations_file = fname_presuffix( - f"correlations_{exact_column}.tsv", + f'correlations_{exact_column}.tsv', newpath=runtime.cwd, use_ext=True, ) exact_correlations_df.to_csv( exact_correlations_file, - sep="\t", - na_rep="n/a", - index_label="Node", + sep='\t', + na_rep='n/a', + index_label='Node', ) - self._results["correlations_exact"].append(exact_correlations_file) + self._results['correlations_exact'].append(exact_correlations_file) return runtime @@ -313,18 +314,18 @@ class _ConnectPlotInputSpec(BaseInterfaceInputSpec): atlases = InputMultiObject( traits.Str, mandatory=True, - desc="List of atlases. Aligned with the list of time series in time_series_tsv.", + desc='List of atlases. Aligned with the list of time series in time_series_tsv.', ) atlas_tsvs = InputMultiObject( traits.Str, mandatory=True, - desc="The dseg.tsv associated with each atlas.", + desc='The dseg.tsv associated with each atlas.', ) correlations_tsv = InputMultiObject( File(exists=True), mandatory=True, desc=( - "List of TSV file with correlation matrices. " + 'List of TSV file with correlation matrices. ' "Aligned with the list of atlases in 'atlases'." ), ) @@ -334,7 +335,7 @@ class _ConnectPlotOutputSpec(TraitedSpec): connectplot = File( exists=True, mandatory=True, - desc="Path to SVG file with four correlation heat maps.", + desc='Path to SVG file with four correlation heat maps.', ) @@ -355,7 +356,7 @@ def plot_matrix(self, corr_mat, network_labels, ax): if label not in unique_labels: unique_labels.append(label) - mapper = {label: f"{i:03d}_{label}" for i, label in enumerate(unique_labels)} + mapper = {label: f'{i:03d}_{label}' for i, label in enumerate(unique_labels)} mapped_network_labels = [mapper[label] for label in network_labels] community_order = np.argsort(mapped_network_labels) @@ -365,7 +366,7 @@ def plot_matrix(self, corr_mat, network_labels, ax): # Get the community name associated with each network labels = np.array(network_labels)[community_order] - unique_labels = sorted(list(set(labels))) + unique_labels = sorted(set(labels)) unique_labels = [] for label in labels: if label not in unique_labels: @@ -390,12 +391,12 @@ def plot_matrix(self, corr_mat, network_labels, ax): np.fill_diagonal(corr_mat, 0) # Plot the correlation matrix - ax.imshow(corr_mat, vmin=-1, vmax=1, cmap="seismic") + ax.imshow(corr_mat, vmin=-1, vmax=1, cmap='seismic') # Add lines separating networks for idx in break_idx[1:-1]: - ax.axes.axvline(idx, color="black") - ax.axes.axhline(idx, color="black") + ax.axes.axvline(idx, color='black') + ax.axes.axhline(idx, color='black') # Add network names ax.axes.set_yticks(label_idx) @@ -407,22 +408,22 @@ def plot_matrix(self, corr_mat, network_labels, ax): def _run_interface(self, runtime): priority_list = [ - "MIDB", - "MyersLabonte", - "4S156Parcels", - "4S456Parcels", - "Gordon", - "Glasser", - "Tian", - "HCP", - "4S256Parcels", - "4S356Parcels", - "4S556Parcels", - "4S656Parcels", - "4S756Parcels", - "4S856Parcels", - "4S956Parcels", - "4S1056Parcels", + 'MIDB', + 'MyersLabonte', + '4S156Parcels', + '4S456Parcels', + 'Gordon', + 'Glasser', + 'Tian', + 'HCP', + '4S256Parcels', + '4S356Parcels', + '4S556Parcels', + '4S656Parcels', + '4S756Parcels', + '4S856Parcels', + '4S956Parcels', + '4S1056Parcels', ] external_atlases = [a for a in self.inputs.atlases if a not in priority_list] priority_list += external_atlases @@ -437,7 +438,7 @@ def _run_interface(self, runtime): break COMMUNITY_LOOKUP = { - "Glasser": "community_yeo", + 'Glasser': 'community_yeo', } if len(selected_atlases) == 4: @@ -457,22 +458,22 @@ def _run_interface(self, runtime): sel_ax_idx = ax_idx[i_ax] - column_name = COMMUNITY_LOOKUP.get(atlas, "network_label") + column_name = COMMUNITY_LOOKUP.get(atlas, 'network_label') dseg_df = pd.read_table(dseg_file) - corrs_df = pd.read_table(atlas_file, index_col="Node") + corrs_df = pd.read_table(atlas_file, index_col='Node') - if atlas.startswith("4S"): + if atlas.startswith('4S'): atlas_mapper = { - "CIT168Subcortical": "Subcortical", - "ThalamusHCP": "Thalamus", - "SubcorticalHCP": "Subcortical", + 'CIT168Subcortical': 'Subcortical', + 'ThalamusHCP': 'Thalamus', + 'SubcorticalHCP': 'Subcortical', } - network_labels = dseg_df[column_name].fillna(dseg_df["atlas_name"]).tolist() + network_labels = dseg_df[column_name].fillna(dseg_df['atlas_name']).tolist() network_labels = [atlas_mapper.get(network, network) for network in network_labels] elif column_name in dseg_df.columns: - network_labels = dseg_df[column_name].fillna("None").tolist() + network_labels = dseg_df[column_name].fillna('None').tolist() else: - network_labels = ["None"] * dseg_df.shape[0] + network_labels = ['None'] * dseg_df.shape[0] ax = axes[sel_ax_idx] ax = self.plot_matrix( @@ -482,20 +483,20 @@ def _run_interface(self, runtime): ) ax.set_title( atlas, - fontdict={"weight": "normal", "size": 20}, + fontdict={'weight': 'normal', 'size': 20}, ) fig.tight_layout() # Write the results out - self._results["connectplot"] = fname_presuffix( - "connectivityplot", - suffix="_matrixplot.svg", + self._results['connectplot'] = fname_presuffix( + 'connectivityplot', + suffix='_matrixplot.svg', newpath=runtime.cwd, use_ext=False, ) - fig.savefig(self._results["connectplot"], bbox_inches="tight", pad_inches=None) + fig.savefig(self._results['connectplot'], bbox_inches='tight', pad_inches=None) plt.close(fig) return runtime @@ -516,11 +517,11 @@ def _sanitize_nifti_atlas(atlas, df): found_values = np.unique(atlas_data) found_values = found_values[found_values != 0] # drop the background value if not np.all(np.isin(found_values, expected_values)): - raise ValueError("Atlas file contains values that are not present in the DataFrame.") + raise ValueError('Atlas file contains values that are not present in the DataFrame.') # Map the labels in the DataFrame to sequential values. label_mapper = {value: i + 1 for i, value in enumerate(expected_values)} - df["sanitized_index"] = [label_mapper[i] for i in df.index.values] + df['sanitized_index'] = [label_mapper[i] for i in df.index.values] # Map the values in the atlas image to sequential values. new_atlas_data = np.zeros(atlas_data.shape, dtype=np.int16) @@ -536,13 +537,13 @@ class _CiftiToTSVInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="Parcellated CIFTI file to extract into a TSV.", + desc='Parcellated CIFTI file to extract into a TSV.', ) - atlas_labels = File(exists=True, mandatory=True, desc="atlas labels file") + atlas_labels = File(exists=True, mandatory=True, desc='atlas labels file') class _CiftiToTSVOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Parcellated data TSV file.") + out_file = File(exists=True, desc='Parcellated data TSV file.') class CiftiToTSV(SimpleInterface): @@ -555,35 +556,41 @@ def _run_interface(self, runtime): in_file = self.inputs.in_file atlas_labels = self.inputs.atlas_labels - assert in_file.endswith((".ptseries.nii", ".pscalar.nii", ".pconn.nii")), in_file + assert in_file.endswith(('.ptseries.nii', '.pscalar.nii', '.pconn.nii')), in_file img = nb.load(in_file) - node_labels_df = pd.read_table(atlas_labels, index_col="index") + node_labels_df = pd.read_table(atlas_labels, index_col='index') node_labels_df.sort_index(inplace=True) # ensure index is in order # Explicitly remove label corresponding to background (index=0), if present. if 0 in node_labels_df.index: LOGGER.warning( - "Index value of 0 found in atlas labels file. " - "Will assume this describes the background and ignore it." + 'Index value of 0 found in atlas labels file. ' + 'Will assume this describes the background and ignore it.' ) node_labels_df = node_labels_df.drop(index=[0]) - if "cifti_label" in node_labels_df.columns: - parcel_label_mapper = dict(zip(node_labels_df["cifti_label"], node_labels_df["label"])) - elif "label_7network" in node_labels_df.columns: - node_labels_df["cifti_label"] = node_labels_df["label_7network"].fillna( - node_labels_df["label"] + if 'cifti_label' in node_labels_df.columns: + parcel_label_mapper = dict( + zip(node_labels_df['cifti_label'], node_labels_df['label'], strict=False) + ) + elif 'label_7network' in node_labels_df.columns: + node_labels_df['cifti_label'] = node_labels_df['label_7network'].fillna( + node_labels_df['label'] + ) + parcel_label_mapper = dict( + zip(node_labels_df['cifti_label'], node_labels_df['label'], strict=False) ) - parcel_label_mapper = dict(zip(node_labels_df["cifti_label"], node_labels_df["label"])) else: LOGGER.warning( "No 'cifti_label' column found in atlas labels file. " - "Assuming labels in TSV exactly match node names in CIFTI atlas." + 'Assuming labels in TSV exactly match node names in CIFTI atlas.' + ) + parcel_label_mapper = dict( + zip(node_labels_df['label'], node_labels_df['label'], strict=False) ) - parcel_label_mapper = dict(zip(node_labels_df["label"], node_labels_df["label"])) - if in_file.endswith(".pconn.nii"): + if in_file.endswith('.pconn.nii'): ax0 = img.header.get_axis(0) ax1 = img.header.get_axis(1) ax0_labels = ax0.name @@ -617,11 +624,11 @@ def _run_interface(self, runtime): if missing_index_values: raise ValueError( - f"Missing CIFTI labels in atlas labels DataFrame: {missing_index_values}" + f'Missing CIFTI labels in atlas labels DataFrame: {missing_index_values}' ) if missing_dict_values: - raise ValueError(f"Missing atlas labels in CIFTI file: {missing_dict_values}") + raise ValueError(f'Missing atlas labels in CIFTI file: {missing_dict_values}') # Replace the index values with the corresponding dictionary values. df.index = [parcel_label_mapper[i] for i in df.index] @@ -640,26 +647,26 @@ def _run_interface(self, runtime): if missing_columns: raise ValueError( - f"Missing CIFTI labels in atlas labels DataFrame: {missing_columns}" + f'Missing CIFTI labels in atlas labels DataFrame: {missing_columns}' ) if missing_dict_values: - raise ValueError(f"Missing atlas labels in CIFTI file: {missing_dict_values}") + raise ValueError(f'Missing atlas labels in CIFTI file: {missing_dict_values}') # Replace the column names with the corresponding dictionary values. df.columns = [parcel_label_mapper[i] for i in df.columns] # Save out the TSV - self._results["out_file"] = fname_presuffix( - "extracted.tsv", + self._results['out_file'] = fname_presuffix( + 'extracted.tsv', newpath=runtime.cwd, use_ext=True, ) - if in_file.endswith(".pconn.nii"): - df.to_csv(self._results["out_file"], sep="\t", na_rep="n/a", index_label="Node") + if in_file.endswith('.pconn.nii'): + df.to_csv(self._results['out_file'], sep='\t', na_rep='n/a', index_label='Node') else: - df.to_csv(self._results["out_file"], sep="\t", na_rep="n/a", index=False) + df.to_csv(self._results['out_file'], sep='\t', na_rep='n/a', index=False) return runtime @@ -668,17 +675,17 @@ class _CiftiMaskInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="CIFTI file to mask.", + desc='CIFTI file to mask.', ) mask = File( exists=True, mandatory=True, - desc="Mask pscalar or dscalar to apply to in_file.", + desc='Mask pscalar or dscalar to apply to in_file.', ) class _CiftiMaskOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Masked CIFTI file.") + out_file = File(exists=True, desc='Masked CIFTI file.') class CiftiMask(SimpleInterface): @@ -695,19 +702,19 @@ def _run_interface(self, runtime): in_file = self.inputs.in_file mask = self.inputs.mask - supported_extensions = (".ptseries.nii", ".pscalar.nii", ".dtseries.nii", ".dscalar.nii") + supported_extensions = ('.ptseries.nii', '.pscalar.nii', '.dtseries.nii', '.dscalar.nii') if not in_file.endswith(supported_extensions): raise ValueError(f"Unsupported CIFTI extension for 'in_file': {in_file}") - if not mask.endswith((".pscalar.nii", ".dscalar.nii")): + if not mask.endswith(('.pscalar.nii', '.dscalar.nii')): raise ValueError(f"Unsupported CIFTI extension for 'mask': {mask}") in_img = nb.load(in_file) mask_img = nb.load(mask) if in_img.shape[1] != mask_img.shape[1]: raise ValueError( - "CIFTI files have different number of parcels/vertices. " - f"{in_file} ({in_img.shape}) vs {mask} ({mask_img.shape})" + 'CIFTI files have different number of parcels/vertices. ' + f'{in_file} ({in_img.shape}) vs {mask} ({mask_img.shape})' ) in_data = in_img.get_fdata() @@ -716,13 +723,13 @@ def _run_interface(self, runtime): in_data[:, ~mask_data] = np.nan # Save out the TSV - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.in_file, - prefix="masked_", + prefix='masked_', newpath=runtime.cwd, use_ext=True, ) - write_ndata(in_data.T, template=in_file, filename=self._results["out_file"]) + write_ndata(in_data.T, template=in_file, filename=self._results['out_file']) return runtime @@ -731,12 +738,12 @@ class _CiftiVertexMaskInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="CIFTI file to mask.", + desc='CIFTI file to mask.', ) class _CiftiVertexMaskOutputSpec(TraitedSpec): - mask_file = File(exists=True, desc="CIFTI mask.") + mask_file = File(exists=True, desc='CIFTI mask.') class CiftiVertexMask(SimpleInterface): @@ -760,12 +767,12 @@ def _run_interface(self, runtime): vertex_weights_arr = np.all(~np.isnan(data_arr), axis=0).astype(int) # Save out the TSV - self._results["mask_file"] = fname_presuffix( + self._results['mask_file'] = fname_presuffix( self.inputs.in_file, - suffix=".dscalar.nii", + suffix='.dscalar.nii', newpath=runtime.cwd, use_ext=False, ) - write_ndata(vertex_weights_arr, template=data_file, filename=self._results["mask_file"]) + write_ndata(vertex_weights_arr, template=data_file, filename=self._results['mask_file']) return runtime diff --git a/xcp_d/interfaces/execsummary.py b/xcp_d/interfaces/execsummary.py index a3212ba68..c92e4bdc8 100644 --- a/xcp_d/interfaces/execsummary.py +++ b/xcp_d/interfaces/execsummary.py @@ -1,5 +1,5 @@ -#! /usr/bin/env python """Classes for building an executive summary file.""" + import os import re from pathlib import Path @@ -23,7 +23,7 @@ from xcp_d.utils.filemanip import fname_presuffix -class ExecutiveSummary(object): +class ExecutiveSummary: """A class to build an executive summary. Parameters @@ -44,7 +44,7 @@ def __init__(self, xcpd_path, subject_id, session_id=None): else: self.session_id = None - self.layout = BIDSLayout(xcpd_path, config="figures", validate=False) + self.layout = BIDSLayout(xcpd_path, config='figures', validate=False) def write_html(self, document, filename): """Write an html document to a filename. @@ -56,11 +56,11 @@ def write_html(self, document, filename): filename : :obj:`str` name of html file. """ - soup = BeautifulSoup(document, features="lxml") + soup = BeautifulSoup(document, features='lxml') html = soup.prettify() # prettify the html filepath = os.path.join(self.xcpd_path, filename) - with open(filepath, "w") as fo: + with open(filepath, 'w') as fo: fo.write(html) def _get_bids_file(self, query): @@ -69,91 +69,91 @@ def _get_bids_file(self, query): found_file = files[0].path found_file = os.path.relpath(found_file, self.xcpd_path) else: - found_file = "None" + found_file = 'None' return found_file def collect_inputs(self): """Collect inputs.""" ANAT_SLICEWISE_PNG_DESCS = [ - "AxialBasalGangliaPutamen", - "AxialInferiorTemporalCerebellum", - "AxialSuperiorFrontal", - "CoronalCaudateAmygdala", - "CoronalOrbitoFrontal", - "CoronalPosteriorParietalLingual", - "SagittalCorpusCallosum", - "SagittalInsulaFrontoTemporal", - "SagittalInsulaTemporalHippocampalSulcus", + 'AxialBasalGangliaPutamen', + 'AxialInferiorTemporalCerebellum', + 'AxialSuperiorFrontal', + 'CoronalCaudateAmygdala', + 'CoronalOrbitoFrontal', + 'CoronalPosteriorParietalLingual', + 'SagittalCorpusCallosum', + 'SagittalInsulaFrontoTemporal', + 'SagittalInsulaTemporalHippocampalSulcus', ] ANAT_REGISTRATION_DESCS = [ - "AtlasOnAnat", - "AnatOnAtlas", + 'AtlasOnAnat', + 'AnatOnAtlas', # "AtlasOnSubcorticals", # "SubcorticalsOnAtlas", ] ANAT_REGISTRATION_TITLES = [ - "Atlas On {modality}", # noqa: FS003 - "{modality} On Atlas", # noqa: FS003 + 'Atlas On {modality}', # noqa: FS003 + '{modality} On Atlas', # noqa: FS003 # "Atlas On {modality} Subcorticals", # noqa: FS003 # "{modality} Subcorticals On Atlas", # noqa: FS003 ] TASK_REGISTRATION_DESCS = [ - "TaskOnT1w", - "T1wOnTask", - "TaskOnT2w", - "T2wOnTask", + 'TaskOnT1w', + 'T1wOnTask', + 'TaskOnT2w', + 'T2wOnTask', ] TASK_REGISTRATION_TITLES = [ - "Task On T1w", - "T1w On Task", - "Task On T2w", - "T2w On Task", + 'Task On T1w', + 'T1w On Task', + 'Task On T2w', + 'T2w On Task', ] ORDERING = [ - "session", - "task", - "acquisition", - "ceagent", - "reconstruction", - "direction", - "run", - "echo", + 'session', + 'task', + 'acquisition', + 'ceagent', + 'reconstruction', + 'direction', + 'run', + 'echo', ] query = { - "subject": self.subject_id, + 'subject': self.subject_id, } structural_files = {} - for modality in ["T1w", "T2w"]: + for modality in ['T1w', 'T2w']: structural_files[modality] = {} - query["suffix"] = modality + query['suffix'] = modality # Get mosaic file for brainsprite. - query["desc"] = "mosaic" - query["extension"] = ".png" + query['desc'] = 'mosaic' + query['extension'] = '.png' mosaic = self._get_bids_file(query) - structural_files[modality]["mosaic"] = mosaic + structural_files[modality]['mosaic'] = mosaic # Get slicewise PNG files for brainsprite. - structural_files[modality]["slices"] = [] + structural_files[modality]['slices'] = [] for slicewise_png_desc in ANAT_SLICEWISE_PNG_DESCS: - query["desc"] = slicewise_png_desc + query['desc'] = slicewise_png_desc slicewise_pngs = self._get_bids_file(query) - structural_files[modality]["slices"].append(slicewise_pngs) + structural_files[modality]['slices'].append(slicewise_pngs) # Get structural registration files. - structural_files[modality]["registration_files"] = [] - structural_files[modality]["registration_titles"] = [ + structural_files[modality]['registration_files'] = [] + structural_files[modality]['registration_titles'] = [ title.format(modality=modality) for title in ANAT_REGISTRATION_TITLES ] for registration_desc in ANAT_REGISTRATION_DESCS: - query["desc"] = registration_desc + query['desc'] = registration_desc found_file = self._get_bids_file(query) - structural_files[modality]["registration_files"].append(found_file) + structural_files[modality]['registration_files'].append(found_file) self.structural_files_ = structural_files @@ -161,26 +161,26 @@ def collect_inputs(self): concatenated_rest_files = {} query = { - "subject": self.subject_id, - "task": "rest", - "run": Query.NONE, - "desc": "preprocESQC", - "suffix": "bold", - "extension": ".svg", + 'subject': self.subject_id, + 'task': 'rest', + 'run': Query.NONE, + 'desc': 'preprocESQC', + 'suffix': 'bold', + 'extension': '.svg', } - concatenated_rest_files["preproc_carpet"] = self._get_bids_file(query) + concatenated_rest_files['preproc_carpet'] = self._get_bids_file(query) - query["desc"] = "postprocESQC" - concatenated_rest_files["postproc_carpet"] = self._get_bids_file(query) + query['desc'] = 'postprocESQC' + concatenated_rest_files['postproc_carpet'] = self._get_bids_file(query) self.concatenated_rest_files_ = concatenated_rest_files # Determine the unique entity-sets for the task data. postproc_files = self.layout.get( subject=self.subject_id, - datatype="func", - suffix="bold", - extension=[".dtseries.nii", ".nii.gz"], + datatype='func', + suffix='bold', + extension=['.dtseries.nii', '.nii.gz'], ) unique_entity_sets = [] for postproc_file in postproc_files: @@ -192,7 +192,7 @@ def collect_inputs(self): # Unique dictionary filter in list # Using map() + set() + items() + sorted() + tuple() unique_entity_sets = list( - map(dict, set(tuple(sorted(sub.items())) for sub in unique_entity_sets)) + map(dict, {tuple(sorted(sub.items())) for sub in unique_entity_sets}) ) task_entity_sets = [] for entity_set in unique_entity_sets: @@ -207,13 +207,13 @@ def collect_inputs(self): # Remove concatenated resting-state scans # (there must also be at least one resting-state scan with run or direction) - mask_not_nan = (task_entity_sets["task"] == "rest") & task_entity_sets[ - ["direction", "run"] + mask_not_nan = (task_entity_sets['task'] == 'rest') & task_entity_sets[ + ['direction', 'run'] ].notna().any(axis=1) # Create a mask for rows where 'run' is 'rest' and 'direction' and 'run' are NaN - mask_nan = (task_entity_sets["task"] == "rest") & task_entity_sets[ - ["direction", "run"] + mask_nan = (task_entity_sets['task'] == 'rest') & task_entity_sets[ + ['direction', 'run'] ].isna().all(axis=1) # If there are rows where 'run' is 'rest', and 'direction' and 'run' are not NaN, @@ -227,14 +227,14 @@ def collect_inputs(self): # Extract entities with variability # This lets us name the sections based on multiple entities (not just task and run) nunique = task_entity_sets.nunique() - nunique.loc["task"] = 2 # ensure we keep task - nunique.loc["run"] = 2 # ensure we keep run + nunique.loc['task'] = 2 # ensure we keep task + nunique.loc['run'] = 2 # ensure we keep run cols_to_drop = nunique[nunique == 1].index task_entity_namer = task_entity_sets.drop(cols_to_drop, axis=1) # Convert back to dictionary - task_entity_sets = task_entity_sets.to_dict(orient="records") - task_entity_namer = task_entity_namer.to_dict(orient="records") + task_entity_sets = task_entity_sets.to_dict(orient='records') + task_entity_namer = task_entity_namer.to_dict(orient='records') task_files = [] @@ -250,38 +250,38 @@ def collect_inputs(self): temp_dict[k] = v # String used for subsection headers - task_file_figures["key"] = " ".join([f"{k}-{v}" for k, v in temp_dict.items()]) + task_file_figures['key'] = ' '.join([f'{k}-{v}' for k, v in temp_dict.items()]) query = { - "subject": self.subject_id, - "desc": "preprocESQC", - "suffix": "bold", - "extension": [".svg", ".png"], + 'subject': self.subject_id, + 'desc': 'preprocESQC', + 'suffix': 'bold', + 'extension': ['.svg', '.png'], **task_entity_set, } - task_file_figures["preproc_carpet"] = self._get_bids_file(query) + task_file_figures['preproc_carpet'] = self._get_bids_file(query) - query["desc"] = "postprocESQC" - task_file_figures["postproc_carpet"] = self._get_bids_file(query) + query['desc'] = 'postprocESQC' + task_file_figures['postproc_carpet'] = self._get_bids_file(query) - query["desc"] = "boldref" - task_file_figures["reference"] = self._get_bids_file(query) + query['desc'] = 'boldref' + task_file_figures['reference'] = self._get_bids_file(query) - query["desc"] = "mean" - task_file_figures["bold"] = self._get_bids_file(query) + query['desc'] = 'mean' + task_file_figures['bold'] = self._get_bids_file(query) - task_file_figures["registration_files"] = [] - task_file_figures["registration_titles"] = TASK_REGISTRATION_TITLES + task_file_figures['registration_files'] = [] + task_file_figures['registration_titles'] = TASK_REGISTRATION_TITLES for registration_desc in TASK_REGISTRATION_DESCS: - query["desc"] = registration_desc + query['desc'] = registration_desc found_file = self._get_bids_file(query) - task_file_figures["registration_files"].append(found_file) + task_file_figures['registration_files'].append(found_file) # If there no mean BOLD figure, then the "run" was made by the concatenation workflow. # Skip the concatenated resting-state scan, since it has its own section. - if query["task"] == "rest" and not task_file_figures["bold"]: + if query['task'] == 'rest' and not task_file_figures['bold']: continue task_files.append(task_file_figures) @@ -290,42 +290,42 @@ def collect_inputs(self): def generate_report(self, out_file=None): """Generate the report.""" - logs_path = Path(self.xcpd_path) / "logs" + logs_path = Path(self.xcpd_path) / 'logs' if out_file is None: if self.session_id: - out_file = f"sub-{self.subject_id}_ses-{self.session_id}_executive_summary.html" + out_file = f'sub-{self.subject_id}_ses-{self.session_id}_executive_summary.html' else: - out_file = f"sub-{self.subject_id}_executive_summary.html" + out_file = f'sub-{self.subject_id}_executive_summary.html' out_file = os.path.join(self.xcpd_path, out_file) boilerplate = [] boiler_idx = 0 - if (logs_path / "CITATION.html").exists(): + if (logs_path / 'CITATION.html').exists(): text = ( - re.compile("(.*?)", re.DOTALL | re.IGNORECASE) - .findall((logs_path / "CITATION.html").read_text())[0] + re.compile('(.*?)', re.DOTALL | re.IGNORECASE) + .findall((logs_path / 'CITATION.html').read_text())[0] .strip() ) - boilerplate.append((boiler_idx, "HTML", f'
{text}
')) + boilerplate.append((boiler_idx, 'HTML', f'
{text}
')) boiler_idx += 1 - if (logs_path / "CITATION.md").exists(): - text = (logs_path / "CITATION.md").read_text() - boilerplate.append((boiler_idx, "Markdown", f"
{text}
\n")) + if (logs_path / 'CITATION.md').exists(): + text = (logs_path / 'CITATION.md').read_text() + boilerplate.append((boiler_idx, 'Markdown', f'
{text}
\n')) boiler_idx += 1 - if (logs_path / "CITATION.tex").exists(): + if (logs_path / 'CITATION.tex').exists(): text = ( - re.compile(r"\\begin{document}(.*?)\\end{document}", re.DOTALL | re.IGNORECASE) - .findall((logs_path / "CITATION.tex").read_text())[0] + re.compile(r'\\begin{document}(.*?)\\end{document}', re.DOTALL | re.IGNORECASE) + .findall((logs_path / 'CITATION.tex').read_text())[0] .strip() ) boilerplate.append( ( boiler_idx, - "LaTeX", + 'LaTeX', f"""
{text}

Bibliography

{load_data("boilerplate.bib").read_text()}
@@ -337,17 +337,17 @@ def generate_report(self, out_file=None): def include_file(name): return Markup(loader.get_source(environment, name)[0]) - template_folder = str(load_data("executive_summary_templates/")) + template_folder = str(load_data('executive_summary_templates/')) loader = FileSystemLoader(template_folder) - environment = Environment(loader=loader) - environment.filters["basename"] = os.path.basename - environment.globals["include_file"] = include_file + environment = Environment(loader=loader, autoescape=True) + environment.filters['basename'] = os.path.basename + environment.globals['include_file'] = include_file - template = environment.get_template("executive_summary.html.jinja") + template = environment.get_template('executive_summary.html.jinja') html = template.render( - subject=f"sub-{self.subject_id}", - session=f"ses-{self.session_id}" if self.session_id else None, + subject=f'sub-{self.subject_id}', + session=f'ses-{self.session_id}' if self.session_id else None, structural_files=self.structural_files_, concatenated_rest_files=self.concatenated_rest_files_, task_files=self.task_files_, @@ -362,13 +362,13 @@ class _FormatForBrainSwipesInputSpec(BaseInterfaceInputSpec): File(exists=True), desc=( "Figure files. Must be the derivative's filename, " - "not the file from the working directory." + 'not the file from the working directory.' ), ) class _FormatForBrainSwipesOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Reformatted png file.") + out_file = File(exists=True, desc='Reformatted png file.') class FormatForBrainSwipes(SimpleInterface): @@ -384,7 +384,7 @@ class FormatForBrainSwipes(SimpleInterface): def _run_interface(self, runtime): input_files = self.inputs.in_files - assert len(input_files) == 9, "There must be 9 input files." + assert len(input_files) == 9, 'There must be 9 input files.' idx = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] widths, rows = [], [] for i_row in range(3): @@ -403,20 +403,20 @@ def _run_interface(self, runtime): pad = max_width - width prepad = pad // 2 postpad = pad - prepad - rows[i_row] = np.pad(row, ((0, 0), (prepad, postpad), (0, 0)), mode="constant") + rows[i_row] = np.pad(row, ((0, 0), (prepad, postpad), (0, 0)), mode='constant') x = np.concatenate(rows, axis=0) - new_x = ((x - x.min()) * (1 / (x.max() - x.min()) * 255)).astype("uint8") + new_x = ((x - x.min()) * (1 / (x.max() - x.min()) * 255)).astype('uint8') new_im = Image.fromarray(np.uint8(new_x)) output_file = fname_presuffix( input_files[0], newpath=runtime.cwd, - suffix="_reformatted.png", + suffix='_reformatted.png', use_ext=False, ) # all images should have the a .png extension new_im.save(output_file) - self._results["out_file"] = output_file + self._results['out_file'] = output_file return runtime diff --git a/xcp_d/interfaces/nilearn.py b/xcp_d/interfaces/nilearn.py index a6d9b9745..e634c4a81 100644 --- a/xcp_d/interfaces/nilearn.py +++ b/xcp_d/interfaces/nilearn.py @@ -22,25 +22,25 @@ class _IndexImageInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="A 4D image to index.", + desc='A 4D image to index.', ) index = traits.Int( 0, usedefault=True, - desc="Volume index to select from in_file.", + desc='Volume index to select from in_file.', ) out_file = File( - "img_3d.nii.gz", + 'img_3d.nii.gz', usedefault=True, exists=False, - desc="The name of the indexed file.", + desc='The name of the indexed file.', ) class _IndexImageOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="Concatenated output file.", + desc='Concatenated output file.', ) @@ -54,8 +54,8 @@ def _run_interface(self, runtime): from nilearn.image import index_img img_3d = index_img(self.inputs.in_file, self.inputs.index) - self._results["out_file"] = os.path.join(runtime.cwd, self.inputs.out_file) - img_3d.to_filename(self._results["out_file"]) + self._results['out_file'] = os.path.join(runtime.cwd, self.inputs.out_file) + img_3d.to_filename(self._results['out_file']) return runtime @@ -64,20 +64,20 @@ class _MergeInputSpec(BaseInterfaceInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, - desc="A list of images to concatenate.", + desc='A list of images to concatenate.', ) out_file = File( - "concat_4d.nii.gz", + 'concat_4d.nii.gz', usedefault=True, exists=False, - desc="The name of the concatenated file to write out. concat_4d.nii.gz by default.", + desc='The name of the concatenated file to write out. concat_4d.nii.gz by default.', ) class _MergeOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="Concatenated output file.", + desc='Concatenated output file.', ) @@ -91,8 +91,8 @@ def _run_interface(self, runtime): from nilearn.image import concat_imgs img_4d = concat_imgs(self.inputs.in_files) - self._results["out_file"] = os.path.join(runtime.cwd, self.inputs.out_file) - img_4d.to_filename(self._results["out_file"]) + self._results['out_file'] = os.path.join(runtime.cwd, self.inputs.out_file) + img_4d.to_filename(self._results['out_file']) return runtime @@ -101,7 +101,7 @@ class _SmoothInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="An image to smooth.", + desc='An image to smooth.', ) fwhm = traits.Either( traits.Float(), @@ -111,22 +111,22 @@ class _SmoothInputSpec(BaseInterfaceInputSpec): maxlen=3, ), desc=( - "Full width at half maximum. " - "Smoothing strength, as a full-width at half maximum, in millimeters." + 'Full width at half maximum. ' + 'Smoothing strength, as a full-width at half maximum, in millimeters.' ), ) out_file = File( - "smooth_img.nii.gz", + 'smooth_img.nii.gz', usedefault=True, exists=False, - desc="The name of the smoothed file to write out. smooth_img.nii.gz by default.", + desc='The name of the smoothed file to write out. smooth_img.nii.gz by default.', ) class _SmoothOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="Smoothed output file.", + desc='Smoothed output file.', ) @@ -140,8 +140,8 @@ def _run_interface(self, runtime): from nilearn.image import smooth_img img_smoothed = smooth_img(self.inputs.in_file, fwhm=self.inputs.fwhm) - self._results["out_file"] = os.path.join(runtime.cwd, self.inputs.out_file) - img_smoothed.to_filename(self._results["out_file"]) + self._results['out_file'] = os.path.join(runtime.cwd, self.inputs.out_file) + img_smoothed.to_filename(self._results['out_file']) return runtime @@ -150,24 +150,24 @@ class _BinaryMathInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="An image to do math on.", + desc='An image to do math on.', ) expression = traits.String( mandatory=True, desc="A mathematical expression to apply to the image. Must have 'img' in it.", ) out_file = File( - "out_img.nii.gz", + 'out_img.nii.gz', usedefault=True, exists=False, - desc="The name of the mathified file to write out. out_img.nii.gz by default.", + desc='The name of the mathified file to write out. out_img.nii.gz by default.', ) class _BinaryMathOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="Mathified output file.", + desc='Mathified output file.', ) @@ -181,8 +181,8 @@ def _run_interface(self, runtime): from nilearn.image import math_img img_mathed = math_img(self.inputs.expression, img=self.inputs.in_file) - self._results["out_file"] = os.path.join(runtime.cwd, self.inputs.out_file) - img_mathed.to_filename(self._results["out_file"]) + self._results['out_file'] = os.path.join(runtime.cwd, self.inputs.out_file) + img_mathed.to_filename(self._results['out_file']) return runtime @@ -191,25 +191,25 @@ class _ResampleToImageInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="An image to average over time.", + desc='An image to average over time.', ) target_file = File( exists=True, mandatory=True, - desc="", + desc='', ) out_file = File( - "out_img.nii.gz", + 'out_img.nii.gz', usedefault=True, exists=False, - desc="The name of the resampled file to write out. out_img.nii.gz by default.", + desc='The name of the resampled file to write out. out_img.nii.gz by default.', ) class _ResampleToImageOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="Resampled output file.", + desc='Resampled output file.', ) @@ -228,10 +228,10 @@ def _run_interface(self, runtime): resampled_img = resample_to_img( source_img=self.inputs.in_file, target_img=self.inputs.target_file, - interpolation="continuous", + interpolation='continuous', ) - self._results["out_file"] = os.path.join(runtime.cwd, self.inputs.out_file) - resampled_img.to_filename(self._results["out_file"]) + self._results['out_file'] = os.path.join(runtime.cwd, self.inputs.out_file) + resampled_img.to_filename(self._results['out_file']) class _DenoiseImageInputSpec(BaseInterfaceInputSpec): @@ -241,29 +241,29 @@ class _DenoiseImageInputSpec(BaseInterfaceInputSpec): exists=True, mandatory=True, desc=( - "Preprocessed BOLD data, after dummy volume removal, " - "but without any additional censoring." + 'Preprocessed BOLD data, after dummy volume removal, ' + 'but without any additional censoring.' ), ) confounds_tsv = traits.Either( File(exists=True), None, - desc="A tab-delimited file containing the confounds to remove from the BOLD data.", + desc='A tab-delimited file containing the confounds to remove from the BOLD data.', ) confounds_images = traits.List( File(exists=True), - desc="A list of 4D images containing voxelwise confounds.", + desc='A list of 4D images containing voxelwise confounds.', ) temporal_mask = File( exists=True, mandatory=True, - desc="The tab-delimited high-motion outliers file.", + desc='The tab-delimited high-motion outliers file.', ) - TR = traits.Float(mandatory=True, desc="Repetition time") - bandpass_filter = traits.Bool(mandatory=True, desc="To apply bandpass or not") - low_pass = traits.Float(mandatory=True, desc="Lowpass filter in Hz") - high_pass = traits.Float(mandatory=True, desc="Highpass filter in Hz") - filter_order = traits.Int(mandatory=True, desc="Filter order") + TR = traits.Float(mandatory=True, desc='Repetition time') + bandpass_filter = traits.Bool(mandatory=True, desc='To apply bandpass or not') + low_pass = traits.Float(mandatory=True, desc='Lowpass filter in Hz') + high_pass = traits.Float(mandatory=True, desc='Highpass filter in Hz') + filter_order = traits.Int(mandatory=True, desc='Filter order') class _DenoiseImageOutputSpec(TraitedSpec): @@ -272,8 +272,8 @@ class _DenoiseImageOutputSpec(TraitedSpec): denoised_interpolated_bold = File( exists=True, desc=( - "The result of denoising the censored preprocessed BOLD data, " - "followed by cubic spline interpolation and band-pass filtering." + 'The result of denoising the censored preprocessed BOLD data, ' + 'followed by cubic spline interpolation and band-pass filtering.' ), ) @@ -303,24 +303,24 @@ def _run_interface(self, runtime): censoring_df = pd.read_table(self.inputs.temporal_mask) if censoring_df.shape[0] != n_volumes: raise ValueError( - f"Temporal mask file has {censoring_df.shape[0]} rows, " - f"but BOLD data has {n_volumes} volumes." + f'Temporal mask file has {censoring_df.shape[0]} rows, ' + f'but BOLD data has {n_volumes} volumes.' ) # Invert temporal mask, so low-motion volumes are True and high-motion volumes are False. - sample_mask = ~censoring_df["framewise_displacement"].to_numpy().astype(bool) + sample_mask = ~censoring_df['framewise_displacement'].to_numpy().astype(bool) confounds_df = None if self.inputs.confounds_tsv: confounds_df = pd.read_table(self.inputs.confounds_tsv) if confounds_df.shape[0] != n_volumes: raise ValueError( - f"Confounds file has {confounds_df.shape[0]} rows, " - f"but BOLD data has {n_volumes} volumes." + f'Confounds file has {confounds_df.shape[0]} rows, ' + f'but BOLD data has {n_volumes} volumes.' ) # Drop all-NaN columns representing voxel-wise confounds - confounds_df = confounds_df.dropna(axis=1, how="all") + confounds_df = confounds_df.dropna(axis=1, how='all') voxelwise_confounds = None if self.inputs.confounds_images: @@ -339,14 +339,14 @@ def _run_interface(self, runtime): # Transpose from TxS (nilearn order) to SxT (xcpd order) denoised_interpolated_bold = denoised_interpolated_bold.T - self._results["denoised_interpolated_bold"] = os.path.join( + self._results['denoised_interpolated_bold'] = os.path.join( runtime.cwd, - "filtered_denoised.dtseries.nii", + 'filtered_denoised.dtseries.nii', ) write_ndata( denoised_interpolated_bold, template=self.inputs.preprocessed_bold, - filename=self._results["denoised_interpolated_bold"], + filename=self._results['denoised_interpolated_bold'], TR=self.inputs.TR, ) @@ -357,7 +357,7 @@ class _DenoiseNiftiInputSpec(_DenoiseImageInputSpec): mask = File( exists=True, mandatory=True, - desc="A binary brain mask.", + desc='A binary brain mask.', ) @@ -387,24 +387,24 @@ def _run_interface(self, runtime): censoring_df = pd.read_table(self.inputs.temporal_mask) if censoring_df.shape[0] != n_volumes: raise ValueError( - f"Temporal mask file has {censoring_df.shape[0]} rows, " - f"but BOLD data has {n_volumes} volumes." + f'Temporal mask file has {censoring_df.shape[0]} rows, ' + f'but BOLD data has {n_volumes} volumes.' ) # Invert temporal mask, so low-motion volumes are True and high-motion volumes are False. - sample_mask = ~censoring_df["framewise_displacement"].to_numpy().astype(bool) + sample_mask = ~censoring_df['framewise_displacement'].to_numpy().astype(bool) confounds_df = None if self.inputs.confounds_tsv: confounds_df = pd.read_table(self.inputs.confounds_tsv) if confounds_df.shape[0] != n_volumes: raise ValueError( - f"Confounds file has {confounds_df.shape[0]} rows, " - f"but BOLD data has {n_volumes} volumes." + f'Confounds file has {confounds_df.shape[0]} rows, ' + f'but BOLD data has {n_volumes} volumes.' ) # Drop all-NaN columns representing voxel-wise confounds - confounds_df = confounds_df.dropna(axis=1, how="all") + confounds_df = confounds_df.dropna(axis=1, how='all') voxelwise_confounds = None if self.inputs.confounds_images: @@ -423,9 +423,9 @@ def _run_interface(self, runtime): TR=self.inputs.TR, ) - self._results["denoised_interpolated_bold"] = os.path.join( + self._results['denoised_interpolated_bold'] = os.path.join( runtime.cwd, - "filtered_denoised.nii.gz", + 'filtered_denoised.nii.gz', ) filtered_denoised_img = masking.unmask( X=denoised_interpolated_bold, @@ -436,6 +436,6 @@ def _run_interface(self, runtime): pixdim = list(filtered_denoised_img.header.get_zooms()) pixdim[3] = self.inputs.TR filtered_denoised_img.header.set_zooms(pixdim) - filtered_denoised_img.to_filename(self._results["denoised_interpolated_bold"]) + filtered_denoised_img.to_filename(self._results['denoised_interpolated_bold']) return runtime diff --git a/xcp_d/interfaces/plotting.py b/xcp_d/interfaces/plotting.py index 1a2492576..b61b34ddf 100644 --- a/xcp_d/interfaces/plotting.py +++ b/xcp_d/interfaces/plotting.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Plotting interfaces.""" + import os import matplotlib.pyplot as plt @@ -33,25 +34,25 @@ from xcp_d.utils.qcmetrics import compute_dvars from xcp_d.utils.write_save import read_ndata -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _CensoringPlotInputSpec(BaseInterfaceInputSpec): - motion_file = File(exists=True, mandatory=True, desc="fMRIPrep confounds file.") + motion_file = File(exists=True, mandatory=True, desc='fMRIPrep confounds file.') temporal_mask = File( exists=True, mandatory=True, - desc="Temporal mask after dummy scan removal.", + desc='Temporal mask after dummy scan removal.', ) - dummy_scans = traits.Int(mandatory=True, desc="Dummy time to drop") - TR = traits.Float(mandatory=True, desc="Repetition Time") - head_radius = traits.Float(mandatory=True, desc="Head radius for FD calculation") + dummy_scans = traits.Int(mandatory=True, desc='Dummy time to drop') + TR = traits.Float(mandatory=True, desc='Repetition Time') + head_radius = traits.Float(mandatory=True, desc='Head radius for FD calculation') motion_filter_type = traits.Either(None, traits.Str, mandatory=True) - fd_thresh = traits.Float(mandatory=True, desc="Framewise displacement threshold.") + fd_thresh = traits.Float(mandatory=True, desc='Framewise displacement threshold.') class _CensoringPlotOutputSpec(TraitedSpec): - out_file = File(exists=True, mandatory=True, desc="Censoring plot.") + out_file = File(exists=True, mandatory=True, desc='Censoring plot.') class CensoringPlot(SimpleInterface): @@ -68,26 +69,26 @@ class CensoringPlot(SimpleInterface): def _run_interface(self, runtime): # Load confound matrix and load motion with motion filtering motion_df = pd.read_table(self.inputs.motion_file) - preproc_fd_timeseries = motion_df["framewise_displacement"].values + preproc_fd_timeseries = motion_df['framewise_displacement'].values # Load temporal mask censoring_df = pd.read_table(self.inputs.temporal_mask) # The number of colors in the palette depends on whether there are random censors or not - palette = sns.color_palette("colorblind", 4 + censoring_df.shape[1]) + palette = sns.color_palette('colorblind', 4 + censoring_df.shape[1]) time_array = np.arange(preproc_fd_timeseries.size) * self.inputs.TR - with sns.axes_style("whitegrid"): + with sns.axes_style('whitegrid'): fig, ax = plt.subplots(figsize=(8, 4)) ax.plot( time_array, preproc_fd_timeseries, - label="Raw Framewise Displacement", + label='Raw Framewise Displacement', color=palette[0], ) - ax.axhline(self.inputs.fd_thresh, label="Outlier Threshold", color="salmon", alpha=0.5) + ax.axhline(self.inputs.fd_thresh, label='Outlier Threshold', color='salmon', alpha=0.5) dummy_scans = self.inputs.dummy_scans # This check is necessary, because init_prepare_confounds_wf connects dummy_scans from the @@ -99,7 +100,7 @@ def _run_interface(self, runtime): ax.axvspan( 0, dummy_scans * self.inputs.TR, - label="Dummy Volumes", + label='Dummy Volumes', alpha=0.5, color=palette[1], ) @@ -109,12 +110,12 @@ def _run_interface(self, runtime): # Compute filtered framewise displacement to plot censoring if self.inputs.motion_filter_type: - filtered_fd_timeseries = motion_df["framewise_displacement_filtered"].values + filtered_fd_timeseries = motion_df['framewise_displacement_filtered'].values ax.plot( time_array, filtered_fd_timeseries, - label="Filtered Framewise Displacement", + label='Filtered Framewise Displacement', color=palette[2], ) else: @@ -139,7 +140,7 @@ def _run_interface(self, runtime): # Plot randomly censored volumes as well # These vertical lines start at the top and only go 20% of the way down the plot. # They are plotted in non-overlapping segments. - exact_columns = [col for col in censoring_df.columns if col.startswith("exact_")] + exact_columns = [col for col in censoring_df.columns if col.startswith('exact_')] vline_ymax = 1 for i_col, exact_col in enumerate(exact_columns): tmask_arr = censoring_df[exact_col].values @@ -148,7 +149,7 @@ def _run_interface(self, runtime): vline_ymin = vline_ymax - vline_yspan for j_idx, idx in enumerate(tmask_idx): - label = f"Randomly Censored Volumes {exact_col}" if j_idx == 0 else "" + label = f'Randomly Censored Volumes {exact_col}' if j_idx == 0 else '' ax.axvline( idx * self.inputs.TR, ymin=vline_ymin, @@ -161,11 +162,11 @@ def _run_interface(self, runtime): vline_ymax = vline_ymin # Plot motion-censored volumes as vertical lines - tmask_arr = censoring_df["framewise_displacement"].values + tmask_arr = censoring_df['framewise_displacement'].values assert preproc_fd_timeseries.size == tmask_arr.size tmask_idx = np.where(tmask_arr)[0] for i_idx, idx in enumerate(tmask_idx): - label = "Motion-Censored Volumes" if i_idx == 0 else "" + label = 'Motion-Censored Volumes' if i_idx == 0 else '' ax.axvline( idx * self.inputs.TR, label=label, @@ -173,19 +174,19 @@ def _run_interface(self, runtime): alpha=0.5, ) - ax.set_xlabel("Time (seconds)", fontsize=10) - ax.set_ylabel("Movement (millimeters)", fontsize=10) + ax.set_xlabel('Time (seconds)', fontsize=10) + ax.set_ylabel('Movement (millimeters)', fontsize=10) ax.legend(fontsize=10) fig.tight_layout() - self._results["out_file"] = fname_presuffix( - "censoring", - suffix="_motion.svg", + self._results['out_file'] = fname_presuffix( + 'censoring', + suffix='_motion.svg', newpath=runtime.cwd, use_ext=False, ) - fig.savefig(self._results["out_file"]) + fig.savefig(self._results['out_file']) plt.close(fig) return runtime @@ -194,39 +195,39 @@ class _QCPlotsInputSpec(BaseInterfaceInputSpec): bold_file = File( exists=True, mandatory=True, - desc="Preprocessed BOLD file, after dummy scan removal. Used in carpet plot.", + desc='Preprocessed BOLD file, after dummy scan removal. Used in carpet plot.', ) temporal_mask = traits.Either( File(exists=True), Undefined, - desc="Temporal mask", + desc='Temporal mask', ) motion_file = File( exists=True, mandatory=True, - desc="fMRIPrep confounds file, after dummy scans removal", + desc='fMRIPrep confounds file, after dummy scans removal', ) cleaned_file = File( exists=True, mandatory=True, - desc="Processed file, after denoising and censoring.", + desc='Processed file, after denoising and censoring.', ) - TR = traits.Float(mandatory=True, desc="Repetition Time") - head_radius = traits.Float(mandatory=True, desc="Head radius for FD calculation") + TR = traits.Float(mandatory=True, desc='Repetition Time') + head_radius = traits.Float(mandatory=True, desc='Head radius for FD calculation') mask_file = traits.Either( None, File(exists=True), mandatory=True, - desc="Mask file from nifti. May be None, for CIFTI processing.", + desc='Mask file from nifti. May be None, for CIFTI processing.', ) # Inputs used only for nifti data - seg_file = File(exists=True, mandatory=False, desc="Seg file for nifti") + seg_file = File(exists=True, mandatory=False, desc='Seg file for nifti') class _QCPlotsOutputSpec(TraitedSpec): - raw_qcplot = File(exists=True, desc="qc plot before regression") - clean_qcplot = File(exists=True, desc="qc plot after regression") + raw_qcplot = File(exists=True, desc='qc plot before regression') + clean_qcplot = File(exists=True, desc='qc plot after regression') class QCPlots(SimpleInterface): @@ -256,15 +257,15 @@ class QCPlots(SimpleInterface): def _run_interface(self, runtime): # Load confound matrix and load motion without motion filtering motion_df = pd.read_table(self.inputs.motion_file) - if "framewise_displacement_filtered" in motion_df.columns: - preproc_fd_timeseries = motion_df["framewise_displacement_filtered"].values + if 'framewise_displacement_filtered' in motion_df.columns: + preproc_fd_timeseries = motion_df['framewise_displacement_filtered'].values else: - preproc_fd_timeseries = motion_df["framewise_displacement"].values + preproc_fd_timeseries = motion_df['framewise_displacement'].values # Determine number of dummy volumes and load temporal mask if isdefined(self.inputs.temporal_mask): censoring_df = pd.read_table(self.inputs.temporal_mask) - tmask_arr = censoring_df["framewise_displacement"].values + tmask_arr = censoring_df['framewise_displacement'].values else: tmask_arr = np.zeros(preproc_fd_timeseries.size, dtype=int) @@ -272,15 +273,15 @@ def _run_interface(self, runtime): postproc_fd_timeseries = preproc_fd_timeseries[tmask_arr == 0] # get QC plot names - self._results["raw_qcplot"] = fname_presuffix( - "preprocess", - suffix="_raw_qcplot.svg", + self._results['raw_qcplot'] = fname_presuffix( + 'preprocess', + suffix='_raw_qcplot.svg', newpath=runtime.cwd, use_ext=False, ) - self._results["clean_qcplot"] = fname_presuffix( - "postprocess", - suffix="_clean_qcplot.svg", + self._results['clean_qcplot'] = fname_presuffix( + 'postprocess', + suffix='_clean_qcplot.svg', newpath=runtime.cwd, use_ext=False, ) @@ -299,12 +300,12 @@ def _run_interface(self, runtime): )[1] if preproc_fd_timeseries.size != dvars_before_processing.size: raise ValueError( - f"FD {preproc_fd_timeseries.size} != DVARS {dvars_before_processing.size}\n" + f'FD {preproc_fd_timeseries.size} != DVARS {dvars_before_processing.size}\n' ) preproc_confounds = pd.DataFrame( { - "FD": preproc_fd_timeseries, - "DVARS": dvars_before_processing, + 'FD': preproc_fd_timeseries, + 'DVARS': dvars_before_processing, } ) @@ -316,15 +317,15 @@ def _run_interface(self, runtime): ).plot(labelsize=8) preproc_fig.savefig( - self._results["raw_qcplot"], - bbox_inches="tight", + self._results['raw_qcplot'], + bbox_inches='tight', ) plt.close(preproc_fig) postproc_confounds = pd.DataFrame( { - "FD": postproc_fd_timeseries, - "DVARS": dvars_after_processing, + 'FD': postproc_fd_timeseries, + 'DVARS': dvars_after_processing, } ) @@ -336,8 +337,8 @@ def _run_interface(self, runtime): ).plot(labelsize=8) postproc_fig.savefig( - self._results["clean_qcplot"], - bbox_inches="tight", + self._results['clean_qcplot'], + bbox_inches='tight', ) plt.close(postproc_fig) @@ -349,54 +350,54 @@ class _QCPlotsESInputSpec(BaseInterfaceInputSpec): exists=True, mandatory=True, desc=( - "Preprocessed BOLD file, after mean-centering and detrending " - "*using only the low-motion volumes*." + 'Preprocessed BOLD file, after mean-centering and detrending ' + '*using only the low-motion volumes*.' ), ) denoised_interpolated_bold = File( exists=True, mandatory=True, - desc="Data after filtering, interpolation, etc. This is not plotted.", + desc='Data after filtering, interpolation, etc. This is not plotted.', ) motion_file = File( exists=True, mandatory=True, - desc="TSV file with filtered motion parameters.", + desc='TSV file with filtered motion parameters.', ) temporal_mask = traits.Either( File(exists=True), Undefined, - desc="TSV file with temporal mask.", + desc='TSV file with temporal mask.', ) - TR = traits.Float(1, usedefault=True, desc="Repetition time") + TR = traits.Float(1, usedefault=True, desc='Repetition time') standardize = traits.Bool( mandatory=True, desc=( - "Whether to standardize the data or not. " - "If False, then the preferred DCAN version of the plot will be generated, " - "where the BOLD data are not rescaled, and the carpet plot has color limits from " - "the 2.5th percentile to the 97.5th percentile. " - "If True, then the BOLD data will be z-scored and the color limits will be -2 and 2." + 'Whether to standardize the data or not. ' + 'If False, then the preferred DCAN version of the plot will be generated, ' + 'where the BOLD data are not rescaled, and the carpet plot has color limits from ' + 'the 2.5th percentile to the 97.5th percentile. ' + 'If True, then the BOLD data will be z-scored and the color limits will be -2 and 2.' ), ) # Optional inputs - mask = File(exists=True, mandatory=False, desc="Bold mask") - seg_data = File(exists=True, mandatory=False, desc="Segmentation file") + mask = File(exists=True, mandatory=False, desc='Bold mask') + seg_data = File(exists=True, mandatory=False, desc='Segmentation file') run_index = traits.Either( traits.List(traits.Int()), Undefined, mandatory=False, desc=( - "An index indicating splits between runs, for concatenated data. " - "If not Undefined, this should be a list of integers, indicating the volumes." + 'An index indicating splits between runs, for concatenated data. ' + 'If not Undefined, this should be a list of integers, indicating the volumes.' ), ) class _QCPlotsESOutputSpec(TraitedSpec): - before_process = File(exists=True, mandatory=True, desc=".SVG file before processing") - after_process = File(exists=True, mandatory=True, desc=".SVG file after processing") + before_process = File(exists=True, mandatory=True, desc='.SVG file before processing') + after_process = File(exists=True, mandatory=True, desc='.SVG file after processing') class QCPlotsES(SimpleInterface): @@ -416,15 +417,15 @@ class QCPlotsES(SimpleInterface): def _run_interface(self, runtime): preprocessed_figure = fname_presuffix( - "carpetplot_before_", - suffix="file.svg", + 'carpetplot_before_', + suffix='file.svg', newpath=runtime.cwd, use_ext=False, ) denoised_figure = fname_presuffix( - "carpetplot_after_", - suffix="file.svg", + 'carpetplot_after_', + suffix='file.svg', newpath=runtime.cwd, use_ext=False, ) @@ -438,7 +439,7 @@ def _run_interface(self, runtime): run_index = self.inputs.run_index run_index = np.array(run_index) if isdefined(run_index) else None - self._results["before_process"], self._results["after_process"] = plot_fmri_es( + self._results['before_process'], self._results['after_process'] = plot_fmri_es( preprocessed_bold=self.inputs.preprocessed_bold, denoised_interpolated_bold=self.inputs.denoised_interpolated_bold, TR=self.inputs.TR, @@ -457,11 +458,11 @@ def _run_interface(self, runtime): class _AnatomicalPlotInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="plot image") + in_file = File(exists=True, mandatory=True, desc='plot image') class _AnatomicalPlotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="out image") + out_file = File(exists=True, desc='out image') class AnatomicalPlot(SimpleInterface): @@ -471,8 +472,8 @@ class AnatomicalPlot(SimpleInterface): output_spec = _AnatomicalPlotOutputSpec def _run_interface(self, runtime): - self._results["out_file"] = fname_presuffix( - self.inputs.in_file, suffix="_file.svg", newpath=runtime.cwd, use_ext=False + self._results['out_file'] = fname_presuffix( + self.inputs.in_file, suffix='_file.svg', newpath=runtime.cwd, use_ext=False ) img = nb.load(self.inputs.in_file) arr = img.get_fdata() @@ -487,7 +488,7 @@ def _run_interface(self, runtime): cut_coords=[0, 0, 0], annotate=False, ) - fig.savefig(self._results["out_file"], bbox_inches="tight", pad_inches=None) + fig.savefig(self._results['out_file'], bbox_inches='tight', pad_inches=None) plt.close(fig) return runtime @@ -495,49 +496,49 @@ def _run_interface(self, runtime): class _SlicesDirInputSpec(FSLCommandInputSpec): is_pairs = traits.Bool( - argstr="-o", + argstr='-o', position=0, - desc="filelist is pairs ( ) of images", + desc='filelist is pairs ( ) of images', ) outline_image = File( exists=True, - argstr="-p %s", + argstr='-p %s', position=1, - desc="use as red-outline image on top of all images in ", + desc='use as red-outline image on top of all images in ', ) edge_threshold = traits.Float( - argstr="-e %.03f", + argstr='-e %.03f', position=2, desc=( - "use the specified threshold for edges (if >0 use this proportion of max-min, " - "if <0, use the absolute value)" + 'use the specified threshold for edges (if >0 use this proportion of max-min, ' + 'if <0, use the absolute value)' ), ) output_odd_axials = traits.Bool( - argstr="-S", + argstr='-S', position=3, - desc="output every second axial slice rather than just 9 ortho slices", + desc='output every second axial slice rather than just 9 ortho slices', ) in_files = InputMultiPath( File(exists=True), - argstr="%s", + argstr='%s', mandatory=True, position=-1, - desc="List of files to process.", + desc='List of files to process.', ) out_extension = traits.Enum( - (".gif", ".png", ".svg"), + ('.gif', '.png', '.svg'), usedefault=True, - desc="Convenience parameter to let xcp_d select the extension.", + desc='Convenience parameter to let xcp_d select the extension.', ) class _SlicesDirOutputSpec(TraitedSpec): - out_dir = Directory(exists=True, desc="Output directory.") - out_files = OutputMultiPath(File(exists=True), desc="Concatenated PNG files.") - slicewise_files = OutputMultiPath(File(exists=True), desc="List of generated PNG files.") + out_dir = Directory(exists=True, desc='Output directory.') + out_files = OutputMultiPath(File(exists=True), desc='Concatenated PNG files.') + slicewise_files = OutputMultiPath(File(exists=True), desc='List of generated PNG files.') class SlicesDir(FSLCommand): @@ -553,7 +554,7 @@ class SlicesDir(FSLCommand): -S : output every second axial slice rather than just 9 ortho slices """ - _cmd = "slicesdir" + _cmd = 'slicesdir' input_spec = _SlicesDirInputSpec output_spec = _SlicesDirOutputSpec @@ -572,32 +573,32 @@ def _list_outputs(self): """ outputs = self._outputs().get() - out_dir = os.path.abspath(os.path.join(os.getcwd(), "slicesdir")) - outputs["out_dir"] = out_dir - outputs["out_files"] = [ + out_dir = os.path.abspath(os.path.join(os.getcwd(), 'slicesdir')) + outputs['out_dir'] = out_dir + outputs['out_files'] = [ self._gen_fname( - basename=f.replace(os.sep, "_"), + basename=f.replace(os.sep, '_'), cwd=out_dir, ext=self.inputs.out_extension, ) for f in self.inputs.in_files ] temp_files = [ - "grota.png", - "grotb.png", - "grotc.png", - "grotd.png", - "grote.png", - "grotf.png", - "grotg.png", - "groth.png", - "groti.png", + 'grota.png', + 'grotb.png', + 'grotc.png', + 'grotd.png', + 'grote.png', + 'grotf.png', + 'grotg.png', + 'groth.png', + 'groti.png', ] - outputs["slicewise_files"] = [os.path.join(out_dir, f) for f in temp_files] + outputs['slicewise_files'] = [os.path.join(out_dir, f) for f in temp_files] return outputs def _gen_filename(self, name): - if name == "out_files": + if name == 'out_files': return self._list_outputs()[name] return None @@ -607,15 +608,15 @@ class _PNGAppendInputSpec(FSLCommandInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="List of files to process.", + desc='List of files to process.', ) - out_file = File(exists=False, mandatory=True, argstr="%s", position=1, desc="Output file.") + out_file = File(exists=False, mandatory=True, argstr='%s', position=1, desc='Output file.') class _PNGAppendOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Output file.") + out_file = File(exists=True, desc='Output file.') class PNGAppend(FSLCommand): @@ -633,22 +634,22 @@ class PNGAppend(FSLCommand): note that files with .gif extension will be input/output in GIF format """ - _cmd = "pngappend" + _cmd = 'pngappend' input_spec = _PNGAppendInputSpec output_spec = _PNGAppendOutputSpec def _format_arg(self, name, spec, value): - if name == "in_files": + if name == 'in_files': if isinstance(value, str): value = [value] - return " + ".join(value) + return ' + '.join(value) - return super(PNGAppend, self)._format_arg(name, spec, value) + return super()._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() - outputs["out_file"] = os.path.abspath(self.inputs.out_file) + outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs @@ -656,7 +657,7 @@ class _PlotCiftiParcellationInputSpec(BaseInterfaceInputSpec): in_files = traits.List( File(exists=True), mandatory=True, - desc="CIFTI files to plot.", + desc='CIFTI files to plot.', ) cortical_atlases = traits.List( traits.Str, @@ -666,48 +667,48 @@ class _PlotCiftiParcellationInputSpec(BaseInterfaceInputSpec): labels = traits.List( traits.Str, mandatory=True, - desc="Labels for the CIFTI files.", + desc='Labels for the CIFTI files.', ) out_file = File( - "plot.svg", + 'plot.svg', exists=False, mandatory=False, - desc="Output file.", + desc='Output file.', usedefault=True, ) vmin = traits.Float( 0, mandatory=False, usedefault=True, - desc="Minimum value for the colormap.", + desc='Minimum value for the colormap.', ) vmax = traits.Float( 0, mandatory=False, usedefault=True, - desc="Maximum value for the colormap.", + desc='Maximum value for the colormap.', ) base_desc = traits.Str( - "", + '', mandatory=False, usedefault=True, - desc="Base description for the output file.", + desc='Base description for the output file.', ) lh_underlay = File( exists=True, mandatory=False, - desc="Left hemisphere underlay.", + desc='Left hemisphere underlay.', ) rh_underlay = File( exists=True, mandatory=False, - desc="Right hemisphere underlay.", + desc='Right hemisphere underlay.', ) class _PlotCiftiParcellationOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Output file.") - desc = traits.Str(desc="Description of the output file.") + out_file = File(exists=True, desc='Output file.') + desc = traits.Str(desc='Description of the output file.') class PlotCiftiParcellation(SimpleInterface): @@ -721,27 +722,27 @@ def _run_interface(self, runtime): assert len(self.inputs.cortical_atlases) > 0 if not (isdefined(self.inputs.lh_underlay) and isdefined(self.inputs.rh_underlay)): - self._results["desc"] = f"{self.inputs.base_desc}ParcellatedStandard" + self._results['desc'] = f'{self.inputs.base_desc}ParcellatedStandard' rh = str( get_template( - template="fsLR", - hemi="R", - density="32k", - suffix="midthickness", - extension=".surf.gii", + template='fsLR', + hemi='R', + density='32k', + suffix='midthickness', + extension='.surf.gii', ) ) lh = str( get_template( - template="fsLR", - hemi="L", - density="32k", - suffix="midthickness", - extension=".surf.gii", + template='fsLR', + hemi='L', + density='32k', + suffix='midthickness', + extension='.surf.gii', ) ) else: - self._results["desc"] = f"{self.inputs.base_desc}ParcellatedSubject" + self._results['desc'] = f'{self.inputs.base_desc}ParcellatedSubject' rh = self.inputs.rh_underlay lh = self.inputs.lh_underlay @@ -798,7 +799,7 @@ def _run_interface(self, runtime): # Create 4 Axes (2 rows, 2 columns) from the subplot gs_inner = GridSpecFromSubplotSpec(2, 2, subplot_spec=subplot_gridspec) inner_subplots = [ - fig.add_subplot(gs_inner[i, j], projection="3d") + fig.add_subplot(gs_inner[i, j], projection='3d') for i in range(2) for j in range(2) ] @@ -809,12 +810,12 @@ def _run_interface(self, runtime): lh_surf_data = surf_data_from_cifti( img_data, img_axes[1], - "CIFTI_STRUCTURE_CORTEX_LEFT", + 'CIFTI_STRUCTURE_CORTEX_LEFT', ) rh_surf_data = surf_data_from_cifti( img_data, img_axes[1], - "CIFTI_STRUCTURE_CORTEX_RIGHT", + 'CIFTI_STRUCTURE_CORTEX_RIGHT', ) plot_surf_stat_map( @@ -823,10 +824,10 @@ def _run_interface(self, runtime): threshold=threshold, vmin=vmin, vmax=vmax, - hemi="left", - view="lateral", - engine="matplotlib", - cmap="cool", + hemi='left', + view='lateral', + engine='matplotlib', + cmap='cool', colorbar=False, axes=inner_subplots[0], figure=fig, @@ -837,10 +838,10 @@ def _run_interface(self, runtime): threshold=threshold, vmin=vmin, vmax=vmax, - hemi="right", - view="lateral", - engine="matplotlib", - cmap="cool", + hemi='right', + view='lateral', + engine='matplotlib', + cmap='cool', colorbar=False, axes=inner_subplots[1], figure=fig, @@ -851,10 +852,10 @@ def _run_interface(self, runtime): threshold=threshold, vmin=vmin, vmax=vmax, - hemi="left", - view="medial", - engine="matplotlib", - cmap="cool", + hemi='left', + view='medial', + engine='matplotlib', + cmap='cool', colorbar=False, axes=inner_subplots[2], figure=fig, @@ -865,10 +866,10 @@ def _run_interface(self, runtime): threshold=threshold, vmin=vmin, vmax=vmax, - hemi="right", - view="medial", - engine="matplotlib", - cmap="cool", + hemi='right', + view='medial', + engine='matplotlib', + cmap='cool', colorbar=False, axes=inner_subplots[3], figure=fig, @@ -878,24 +879,24 @@ def _run_interface(self, runtime): ax.set_rasterized(True) # Create a ScalarMappable with the "cool" colormap and the specified vmin and vmax - sm = ScalarMappable(cmap="cool", norm=Normalize(vmin=vmin, vmax=vmax)) + sm = ScalarMappable(cmap='cool', norm=Normalize(vmin=vmin, vmax=vmax)) for colorbar_gridspec in cbar_gs_list: colorbar_ax = fig.add_subplot(colorbar_gridspec) # Add a colorbar to colorbar_ax using the ScalarMappable fig.colorbar(sm, cax=colorbar_ax) - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( cortical_files[0], - suffix="_file.svg", + suffix='_file.svg', newpath=runtime.cwd, use_ext=False, ) fig.savefig( - self._results["out_file"], - bbox_inches="tight", + self._results['out_file'], + bbox_inches='tight', pad_inches=None, - format="svg", + format='svg', ) plt.close(fig) @@ -906,29 +907,29 @@ class _PlotDenseCiftiInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="CIFTI file to plot.", + desc='CIFTI file to plot.', ) base_desc = traits.Str( - "", + '', mandatory=False, usedefault=True, - desc="Base description for the output file.", + desc='Base description for the output file.', ) lh_underlay = File( exists=True, mandatory=False, - desc="Left hemisphere underlay.", + desc='Left hemisphere underlay.', ) rh_underlay = File( exists=True, mandatory=False, - desc="Right hemisphere underlay.", + desc='Right hemisphere underlay.', ) class _PlotDenseCiftiOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Output file.") - desc = traits.Str(desc="Description of the output file.") + out_file = File(exists=True, desc='Output file.') + desc = traits.Str(desc='Description of the output file.') class PlotDenseCifti(SimpleInterface): @@ -939,27 +940,27 @@ class PlotDenseCifti(SimpleInterface): def _run_interface(self, runtime): if not (isdefined(self.inputs.lh_underlay) and isdefined(self.inputs.rh_underlay)): - self._results["desc"] = f"{self.inputs.base_desc}SurfaceStandard" + self._results['desc'] = f'{self.inputs.base_desc}SurfaceStandard' rh = str( get_template( - template="fsLR", - hemi="R", - density="32k", - suffix="midthickness", - extension=".surf.gii", + template='fsLR', + hemi='R', + density='32k', + suffix='midthickness', + extension='.surf.gii', ) ) lh = str( get_template( - template="fsLR", - hemi="L", - density="32k", - suffix="midthickness", - extension=".surf.gii", + template='fsLR', + hemi='L', + density='32k', + suffix='midthickness', + extension='.surf.gii', ) ) else: - self._results["desc"] = f"{self.inputs.base_desc}SurfaceSubject" + self._results['desc'] = f'{self.inputs.base_desc}SurfaceSubject' rh = self.inputs.rh_underlay lh = self.inputs.lh_underlay @@ -981,18 +982,18 @@ def _run_interface(self, runtime): # Create 4 Axes (2 rows, 2 columns) from the subplot gs_inner = GridSpecFromSubplotSpec(2, 2, subplot_spec=subplot_gridspec) inner_subplots = [ - fig.add_subplot(gs_inner[i, j], projection="3d") for i in range(2) for j in range(2) + fig.add_subplot(gs_inner[i, j], projection='3d') for i in range(2) for j in range(2) ] lh_surf_data = surf_data_from_cifti( cifti_data, cifti_axes[1], - "CIFTI_STRUCTURE_CORTEX_LEFT", + 'CIFTI_STRUCTURE_CORTEX_LEFT', ) rh_surf_data = surf_data_from_cifti( cifti_data, cifti_axes[1], - "CIFTI_STRUCTURE_CORTEX_RIGHT", + 'CIFTI_STRUCTURE_CORTEX_RIGHT', ) vmax = np.nanmax([np.nanmax(lh_surf_data), np.nanmax(rh_surf_data)]) @@ -1003,10 +1004,10 @@ def _run_interface(self, runtime): lh_surf_data, vmin=vmin, vmax=vmax, - hemi="left", - view="lateral", - engine="matplotlib", - cmap="cool", + hemi='left', + view='lateral', + engine='matplotlib', + cmap='cool', colorbar=False, axes=inner_subplots[0], figure=fig, @@ -1016,10 +1017,10 @@ def _run_interface(self, runtime): rh_surf_data, vmin=vmin, vmax=vmax, - hemi="right", - view="lateral", - engine="matplotlib", - cmap="cool", + hemi='right', + view='lateral', + engine='matplotlib', + cmap='cool', colorbar=False, axes=inner_subplots[1], figure=fig, @@ -1029,10 +1030,10 @@ def _run_interface(self, runtime): lh_surf_data, vmin=vmin, vmax=vmax, - hemi="left", - view="medial", - engine="matplotlib", - cmap="cool", + hemi='left', + view='medial', + engine='matplotlib', + cmap='cool', colorbar=False, axes=inner_subplots[2], figure=fig, @@ -1042,40 +1043,40 @@ def _run_interface(self, runtime): rh_surf_data, vmin=vmin, vmax=vmax, - hemi="right", - view="medial", - engine="matplotlib", - cmap="cool", + hemi='right', + view='medial', + engine='matplotlib', + cmap='cool', colorbar=False, axes=inner_subplots[3], figure=fig, ) - inner_subplots[0].set_title("Left Hemisphere", fontsize=10) - inner_subplots[1].set_title("Right Hemisphere", fontsize=10) + inner_subplots[0].set_title('Left Hemisphere', fontsize=10) + inner_subplots[1].set_title('Right Hemisphere', fontsize=10) for ax in inner_subplots: ax.set_rasterized(True) # Create a ScalarMappable with the "cool" colormap and the specified vmin and vmax - sm = ScalarMappable(cmap="cool", norm=Normalize(vmin=vmin, vmax=vmax)) + sm = ScalarMappable(cmap='cool', norm=Normalize(vmin=vmin, vmax=vmax)) colorbar_ax = fig.add_subplot(colorbar_gridspec) # Add a colorbar to colorbar_ax using the ScalarMappable fig.colorbar(sm, cax=colorbar_ax) - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.in_file, - suffix="_file.svg", + suffix='_file.svg', newpath=runtime.cwd, use_ext=False, ) fig.tight_layout() fig.savefig( - self._results["out_file"], - bbox_inches="tight", + self._results['out_file'], + bbox_inches='tight', pad_inches=None, - format="svg", + format='svg', ) plt.close(fig) @@ -1086,17 +1087,17 @@ class _PlotNiftiInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="CIFTI file to plot.", + desc='CIFTI file to plot.', ) name_source = File( exists=False, mandatory=True, - desc="File to use as the name source.", + desc='File to use as the name source.', ) class _PlotNiftiOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Output file.") + out_file = File(exists=True, desc='Output file.') class PlotNifti(SimpleInterface): @@ -1108,25 +1109,25 @@ class PlotNifti(SimpleInterface): def _run_interface(self, runtime): from bids.layout import parse_file_entities - ENTITIES_TO_USE = ["cohort", "den", "res"] + ENTITIES_TO_USE = ['cohort', 'den', 'res'] # templateflow uses the full entity names in its BIDSLayout config, # so we need to map the abbreviated names used by xcpd and pybids to the full ones. - ENTITY_NAMES_MAPPER = {"den": "density", "res": "resolution"} - space = parse_file_entities(self.inputs.name_source)["space"] + ENTITY_NAMES_MAPPER = {'den': 'density', 'res': 'resolution'} + space = parse_file_entities(self.inputs.name_source)['space'] file_entities = parse_file_entities(self.inputs.name_source) entities_to_use = {f: file_entities[f] for f in file_entities if f in ENTITIES_TO_USE} entities_to_use = {ENTITY_NAMES_MAPPER.get(k, k): v for k, v in entities_to_use.items()} - template_file = get_template(template=space, **entities_to_use, suffix="T1w", desc=None) + template_file = get_template(template=space, **entities_to_use, suffix='T1w', desc=None) if isinstance(template_file, list): template_file = template_file[0] template = str(template_file) - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.in_file, - suffix="_plot.svg", + suffix='_plot.svg', newpath=runtime.cwd, use_ext=False, ) @@ -1134,9 +1135,9 @@ def _run_interface(self, runtime): plot_stat_map( self.inputs.in_file, bg_img=template, - display_mode="mosaic", + display_mode='mosaic', cut_coords=8, colorbar=True, - output_file=self._results["out_file"], + output_file=self._results['out_file'], ) return runtime diff --git a/xcp_d/interfaces/report.py b/xcp_d/interfaces/report.py index 5ca4ba02f..c2923a008 100644 --- a/xcp_d/interfaces/report.py +++ b/xcp_d/interfaces/report.py @@ -52,7 +52,7 @@ class _SummaryInterfaceOutputSpec(TraitedSpec): """Output specification for SummaryInterface.""" - out_report = File(exists=True, desc="HTML segment containing summary") + out_report = File(exists=True, desc='HTML segment containing summary') class SummaryInterface(SimpleInterface): @@ -66,10 +66,10 @@ class SummaryInterface(SimpleInterface): def _run_interface(self, runtime): # Open a file to write information to segment = self._generate_segment() - file_name = os.path.join(runtime.cwd, "report.html") - with open(file_name, "w") as file_object: + file_name = os.path.join(runtime.cwd, 'report.html') + with open(file_name, 'w') as file_object: file_object.write(segment) - self._results["out_report"] = file_name + self._results['out_report'] = file_name return runtime def _generate_segment(self): @@ -79,14 +79,14 @@ def _generate_segment(self): class _SubjectSummaryInputSpec(BaseInterfaceInputSpec): """Input specification for SubjectSummaryInterface.""" - subject_id = Str(desc="Subject ID") + subject_id = Str(desc='Subject ID') # A list of files or a list of lists of files? bold = InputMultiObject( traits.Either( File(exists=True), traits.List(File(exists=True)), ), - desc="BOLD or CIFTI functional series", + desc='BOLD or CIFTI functional series', ) @@ -95,7 +95,7 @@ class _SubjectSummaryOutputSpec(_SummaryInterfaceOutputSpec): # This exists to ensure that the summary is run prior to the first ReconAll # call, allowing a determination whether there is a pre-existing directory - subject_id = Str(desc="Subject ID") + subject_id = Str(desc='Subject ID') class SubjectSummary(SummaryInterface): @@ -106,8 +106,8 @@ class SubjectSummary(SummaryInterface): def _run_interface(self, runtime): if isdefined(self.inputs.subject_id): - self._results["subject_id"] = self.inputs.subject_id - return super(SubjectSummary, self)._run_interface(runtime) + self._results['subject_id'] = self.inputs.subject_id + return super()._run_interface(runtime) def _generate_segment(self): # Add list of tasks with number of runs @@ -125,15 +125,15 @@ class _FunctionalSummaryInputSpec(BaseInterfaceInputSpec): exists=False, mandatory=True, desc=( - "CIFTI or NIfTI BOLD file. " - "This file does not need to exist, " - "because this input is just used for extracting filename information." + 'CIFTI or NIfTI BOLD file. ' + 'This file does not need to exist, ' + 'because this input is just used for extracting filename information.' ), ) - qc_file = traits.File(exists=True, mandatory=True, desc="qc file") + qc_file = traits.File(exists=True, mandatory=True, desc='qc file') TR = traits.Float( mandatory=True, - desc="Repetition time", + desc='Repetition time', ) @@ -144,11 +144,11 @@ class FunctionalSummary(SummaryInterface): # Get information from the QC file and return it def _generate_segment(self): - space = get_entity(self.inputs.bold_file, "space") + space = get_entity(self.inputs.bold_file, 'space') qcfile = pd.read_table(self.inputs.qc_file) - mean_fd = str(round(qcfile["mean_fd"][0], 4)) - mean_relative_rms = str(round(qcfile["mean_relative_rms"][0], 4)) - max_relative_rms = str(round(qcfile["max_relative_rms"][0], 4)) + mean_fd = str(round(qcfile['mean_fd'][0], 4)) + mean_relative_rms = str(round(qcfile['mean_relative_rms'][0], 4)) + max_relative_rms = str(round(qcfile['max_relative_rms'][0], 4)) dvars = ( f"{round(qcfile['mean_dvars_initial'][0], 4)}, " f"{round(qcfile['mean_dvars_final'][0], 4)}" @@ -157,7 +157,7 @@ def _generate_segment(self): f"{round(qcfile['fd_dvars_correlation_initial'][0], 4)}, " f"{round(qcfile['fd_dvars_correlation_final'][0], 4)}" ) - num_vols_censored = str(round(qcfile["num_censored_volumes"][0], 4)) + num_vols_censored = str(round(qcfile['num_censored_volumes'][0], 4)) return QC_TEMPLATE.format( space=space, @@ -174,8 +174,8 @@ def _generate_segment(self): class _AboutSummaryInputSpec(BaseInterfaceInputSpec): """Input specification for AboutSummary.""" - version = Str(desc="xcp_d version") - command = Str(desc="xcp_d command") + version = Str(desc='xcp_d version') + command = Str(desc='xcp_d command') # Date not included - update timestamp only if version or command changes @@ -188,5 +188,5 @@ def _generate_segment(self): return ABOUT_TEMPLATE.format( version=self.inputs.version, command=self.inputs.command, - date=time.strftime("%Y-%m-%d %H:%M:%S %z"), + date=time.strftime('%Y-%m-%d %H:%M:%S %z'), ) diff --git a/xcp_d/interfaces/restingstate.py b/xcp_d/interfaces/restingstate.py index 011f5bfdf..83ef883ae 100644 --- a/xcp_d/interfaces/restingstate.py +++ b/xcp_d/interfaces/restingstate.py @@ -5,6 +5,7 @@ .. testsetup:: """ + import os import shutil @@ -26,18 +27,18 @@ from xcp_d.utils.restingstate import compute_2d_reho, mesh_adjacency from xcp_d.utils.write_save import read_gii, read_ndata, write_gii, write_ndata -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') # compute 2D reho class _SurfaceReHoInputSpec(BaseInterfaceInputSpec): - surf_bold = File(exists=True, mandatory=True, desc="left or right hemisphere gii ") + surf_bold = File(exists=True, mandatory=True, desc='left or right hemisphere gii ') # TODO: Change to Enum - surf_hemi = traits.Str(mandatory=True, desc="L or R ") + surf_hemi = traits.Str(mandatory=True, desc='L or R ') class _SurfaceReHoOutputSpec(TraitedSpec): - surf_gii = File(exists=True, mandatory=True, desc=" lh hemisphere reho") + surf_gii = File(exists=True, mandatory=True, desc=' lh hemisphere reho') class SurfaceReHo(SimpleInterface): @@ -72,13 +73,13 @@ def _run_interface(self, runtime): reho_surf = compute_2d_reho(datat=data_matrix, adjacency_matrix=mesh_matrix) # Write the output out - self._results["surf_gii"] = fname_presuffix( - self.inputs.surf_bold, suffix=".shape.gii", newpath=runtime.cwd, use_ext=False + self._results['surf_gii'] = fname_presuffix( + self.inputs.surf_bold, suffix='.shape.gii', newpath=runtime.cwd, use_ext=False ) write_gii( datat=reho_surf, template=self.inputs.surf_bold, - filename=self._results["surf_gii"], + filename=self._results['surf_gii'], hemi=self.inputs.surf_hemi, ) @@ -86,37 +87,37 @@ def _run_interface(self, runtime): class _ComputeALFFInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="nifti, cifti or gifti") - TR = traits.Float(mandatory=True, desc="repetition time") + in_file = File(exists=True, mandatory=True, desc='nifti, cifti or gifti') + TR = traits.Float(mandatory=True, desc='repetition time') low_pass = traits.Float( mandatory=True, - desc="low_pass filter in Hz", + desc='low_pass filter in Hz', ) high_pass = traits.Float( mandatory=True, - desc="high_pass filter in Hz", + desc='high_pass filter in Hz', ) mask = File( exists=True, mandatory=False, - desc=" brain mask for nifti file", + desc=' brain mask for nifti file', ) temporal_mask = traits.Either( File(exists=True), Undefined, mandatory=False, - desc="Temporal mask.", + desc='Temporal mask.', ) n_threads = traits.Int( 1, usedefault=True, - desc="number of threads to use", + desc='number of threads to use', nohash=True, ) class _ComputeALFFOutputSpec(TraitedSpec): - alff = File(exists=True, mandatory=True, desc=" alff") + alff = File(exists=True, mandatory=True, desc=' alff') class ComputeALFF(SimpleInterface): @@ -163,8 +164,8 @@ def _run_interface(self, runtime): if isinstance(temporal_mask, str) and os.path.isfile(temporal_mask): censoring_df = pd.read_table(temporal_mask) # Invert the temporal mask to make retained volumes 1s and dropped volumes 0s. - sample_mask = ~censoring_df["framewise_displacement"].values.astype(bool) - assert sample_mask.size == n_volumes, f"{sample_mask.size} != {n_volumes}" + sample_mask = ~censoring_df['framewise_displacement'].values.astype(bool) + assert sample_mask.size == n_volumes, f'{sample_mask.size} != {n_volumes}' alff_mat = np.zeros(n_voxels) with Pool(processes=self.inputs.n_threads) as pool: @@ -187,12 +188,12 @@ def _run_interface(self, runtime): alff_mat = alff_mat[:, None] # Write out the data - if self.inputs.in_file.endswith(".dtseries.nii"): - suffix = "_alff.dscalar.nii" - elif self.inputs.in_file.endswith(".nii.gz"): - suffix = "_alff.nii.gz" + if self.inputs.in_file.endswith('.dtseries.nii'): + suffix = '_alff.dscalar.nii' + elif self.inputs.in_file.endswith('.nii.gz'): + suffix = '_alff.nii.gz' - self._results["alff"] = fname_presuffix( + self._results['alff'] = fname_presuffix( self.inputs.in_file, suffix=suffix, newpath=runtime.cwd, @@ -201,7 +202,7 @@ def _run_interface(self, runtime): write_ndata( data_matrix=alff_mat, template=self.inputs.in_file, - filename=self._results["alff"], + filename=self._results['alff'], mask=self.inputs.mask, ) return runtime @@ -225,33 +226,33 @@ class ReHoNamePatch(SimpleInterface): >>> res = reho.run() # doctest: +SKIP """ - _cmd = "3dReHo" + _cmd = '3dReHo' input_spec = ReHoInputSpec output_spec = ReHoOutputSpec def _run_interface(self, runtime): - out_file = os.path.join(runtime.cwd, "reho.nii.gz") + out_file = os.path.join(runtime.cwd, 'reho.nii.gz') - in_file = os.path.join(runtime.cwd, "inset.nii.gz") + in_file = os.path.join(runtime.cwd, 'inset.nii.gz') shutil.copyfile(self.inputs.in_file, in_file) if traits_extension.isdefined(self.inputs.mask_file): - mask_file = os.path.join(runtime.cwd, "mask.nii.gz") + mask_file = os.path.join(runtime.cwd, 'mask.nii.gz') shutil.copyfile(self.inputs.mask_file, mask_file) - mask_cmd = f"-mask {mask_file}" + mask_cmd = f'-mask {mask_file}' else: - mask_cmd = "" + mask_cmd = '' - os.system(f"3dReHo -inset {in_file} {mask_cmd} -nneigh 27 -prefix {out_file}") - self._results["out_file"] = out_file + os.system(f'3dReHo -inset {in_file} {mask_cmd} -nneigh 27 -prefix {out_file}') # noqa: S605 + self._results['out_file'] = out_file class _DespikePatchInputSpec(DespikeInputSpec): out_file = File( mandatory=False, genfile=True, - desc="output image file name", - argstr="-prefix %s", + desc='output image file name', + argstr='-prefix %s', ) @@ -274,12 +275,12 @@ class DespikePatch(Despike): input_spec = _DespikePatchInputSpec def _gen_filename(self, name): - if name == "out_file": - return "inset.nii.gz" + if name == 'out_file': + return 'inset.nii.gz' else: return None def _list_outputs(self): outputs = self.output_spec().get() - outputs["out_file"] = os.path.abspath(self._gen_filename("out_file")) + outputs['out_file'] = os.path.abspath(self._gen_filename('out_file')) return outputs diff --git a/xcp_d/interfaces/utils.py b/xcp_d/interfaces/utils.py index d2c05e581..388857a62 100644 --- a/xcp_d/interfaces/utils.py +++ b/xcp_d/interfaces/utils.py @@ -24,42 +24,42 @@ from xcp_d.utils.qcmetrics import compute_dvars, compute_registration_qc from xcp_d.utils.write_save import read_ndata -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _ConvertTo32InputSpec(BaseInterfaceInputSpec): bold_file = traits.Either( None, File(exists=True), - desc="BOLD file", + desc='BOLD file', mandatory=False, usedefault=True, ) boldref = traits.Either( None, File(exists=True), - desc="BOLD reference file", + desc='BOLD reference file', mandatory=False, usedefault=True, ) bold_mask = traits.Either( None, File(exists=True), - desc="BOLD mask file", + desc='BOLD mask file', mandatory=False, usedefault=True, ) t1w = traits.Either( None, File(exists=True), - desc="T1-weighted anatomical file", + desc='T1-weighted anatomical file', mandatory=False, usedefault=True, ) t2w = traits.Either( None, File(exists=True), - desc="T2-weighted anatomical file", + desc='T2-weighted anatomical file', mandatory=False, usedefault=True, ) @@ -69,31 +69,31 @@ class _ConvertTo32OutputSpec(TraitedSpec): bold_file = traits.Either( None, File(exists=True), - desc="BOLD file", + desc='BOLD file', mandatory=False, ) boldref = traits.Either( None, File(exists=True), - desc="BOLD reference file", + desc='BOLD reference file', mandatory=False, ) bold_mask = traits.Either( None, File(exists=True), - desc="BOLD mask file", + desc='BOLD mask file', mandatory=False, ) t1w = traits.Either( None, File(exists=True), - desc="T1-weighted anatomical file", + desc='T1-weighted anatomical file', mandatory=False, ) t2w = traits.Either( None, File(exists=True), - desc="T2-weighted anatomical file", + desc='T2-weighted anatomical file', mandatory=False, ) @@ -105,11 +105,11 @@ class ConvertTo32(SimpleInterface): output_spec = _ConvertTo32OutputSpec def _run_interface(self, runtime): - self._results["bold_file"] = downcast_to_32(self.inputs.bold_file) - self._results["boldref"] = downcast_to_32(self.inputs.boldref) - self._results["bold_mask"] = downcast_to_32(self.inputs.bold_mask) - self._results["t1w"] = downcast_to_32(self.inputs.t1w) - self._results["t2w"] = downcast_to_32(self.inputs.t2w) + self._results['bold_file'] = downcast_to_32(self.inputs.bold_file) + self._results['boldref'] = downcast_to_32(self.inputs.boldref) + self._results['bold_mask'] = downcast_to_32(self.inputs.bold_mask) + self._results['t1w'] = downcast_to_32(self.inputs.t1w) + self._results['t2w'] = downcast_to_32(self.inputs.t2w) return runtime @@ -122,14 +122,14 @@ class _FilterUndefinedInputSpec(BaseInterfaceInputSpec): Undefined, ), mandatory=True, - desc="List of objects to filter.", + desc='List of objects to filter.', ) class _FilterUndefinedOutputSpec(TraitedSpec): outlist = OutputMultiObject( traits.Str, - desc="Filtered list of objects.", + desc='Filtered list of objects.', ) @@ -145,7 +145,7 @@ def _run_interface(self, runtime): for item in inlist: if item is not None and traits_extension.isdefined(item): outlist.append(item) - self._results["outlist"] = outlist + self._results['outlist'] = outlist return runtime @@ -154,44 +154,44 @@ class _LINCQCInputSpec(BaseInterfaceInputSpec): exists=False, mandatory=True, desc=( - "Preprocessed BOLD file. Used to find files. " - "In the case of the concatenation workflow, " - "this may be a nonexistent file " - "(i.e., the preprocessed BOLD file, with the run entity removed)." + 'Preprocessed BOLD file. Used to find files. ' + 'In the case of the concatenation workflow, ' + 'this may be a nonexistent file ' + '(i.e., the preprocessed BOLD file, with the run entity removed).' ), ) bold_file = File( exists=True, mandatory=True, - desc="Preprocessed BOLD file, after dummy scan removal. Used in carpet plot.", + desc='Preprocessed BOLD file, after dummy scan removal. Used in carpet plot.', ) - dummy_scans = traits.Int(mandatory=True, desc="Dummy time to drop") + dummy_scans = traits.Int(mandatory=True, desc='Dummy time to drop') temporal_mask = traits.Either( File(exists=True), Undefined, - desc="Temporal mask", + desc='Temporal mask', ) motion_file = File( exists=True, mandatory=True, - desc="fMRIPrep confounds file, after dummy scans removal", + desc='fMRIPrep confounds file, after dummy scans removal', ) cleaned_file = File( exists=True, mandatory=True, - desc="Processed file, after denoising and censoring.", + desc='Processed file, after denoising and censoring.', ) - TR = traits.Float(mandatory=True, desc="Repetition time, in seconds.") - head_radius = traits.Float(mandatory=True, desc="Head radius for FD calculation, in mm.") + TR = traits.Float(mandatory=True, desc='Repetition time, in seconds.') + head_radius = traits.Float(mandatory=True, desc='Head radius for FD calculation, in mm.') bold_mask_inputspace = traits.Either( None, File(exists=True), mandatory=True, desc=( - "Mask file from NIfTI. May be None, for CIFTI processing. " - "The mask is in the same space as the BOLD data, which may not be the same as the " - "bold_mask_stdspace file. " - "Used to load the masked BOLD data. Not used for QC metrics." + 'Mask file from NIfTI. May be None, for CIFTI processing. ' + 'The mask is in the same space as the BOLD data, which may not be the same as the ' + 'bold_mask_stdspace file. ' + 'Used to load the masked BOLD data. Not used for QC metrics.' ), ) @@ -200,8 +200,8 @@ class _LINCQCInputSpec(BaseInterfaceInputSpec): exists=True, mandatory=False, desc=( - "Anatomically-derived brain mask in anatomical space. " - "Used to calculate coregistration QC metrics." + 'Anatomically-derived brain mask in anatomical space. ' + 'Used to calculate coregistration QC metrics.' ), ) template_mask = File( @@ -209,31 +209,31 @@ class _LINCQCInputSpec(BaseInterfaceInputSpec): mandatory=False, desc=( "Template's official brain mask. " - "This matches the space of bold_mask_stdspace, " - "but does not necessarily match the space of bold_mask_inputspace. " - "Used to calculate normalization QC metrics." + 'This matches the space of bold_mask_stdspace, ' + 'but does not necessarily match the space of bold_mask_inputspace. ' + 'Used to calculate normalization QC metrics.' ), ) bold_mask_anatspace = File( exists=True, mandatory=False, - desc="BOLD mask in anatomical space. Used to calculate coregistration QC metrics.", + desc='BOLD mask in anatomical space. Used to calculate coregistration QC metrics.', ) bold_mask_stdspace = File( exists=True, mandatory=False, desc=( - "BOLD mask in template space. " - "This matches the space of template_mask, " - "but does not necessarily match the space of bold_mask_inputspace. " - "Used to calculate normalization QC metrics." + 'BOLD mask in template space. ' + 'This matches the space of template_mask, ' + 'but does not necessarily match the space of bold_mask_inputspace. ' + 'Used to calculate normalization QC metrics.' ), ) class _LINCQCOutputSpec(TraitedSpec): - qc_file = File(exists=True, desc="QC TSV file.") - qc_metadata = File(exists=True, desc="Sidecar JSON for QC TSV file.") + qc_file = File(exists=True, desc='QC TSV file.') + qc_metadata = File(exists=True, desc='Sidecar JSON for QC TSV file.') class LINCQC(SimpleInterface): @@ -245,14 +245,14 @@ class LINCQC(SimpleInterface): def _run_interface(self, runtime): # Load confound matrix and load motion without motion filtering motion_df = pd.read_table(self.inputs.motion_file) - preproc_fd = motion_df["framewise_displacement"].to_numpy() - rmsd = motion_df["rmsd"].to_numpy() + preproc_fd = motion_df['framewise_displacement'].to_numpy() + rmsd = motion_df['rmsd'].to_numpy() # Determine number of dummy volumes and load temporal mask dummy_scans = self.inputs.dummy_scans if isdefined(self.inputs.temporal_mask): censoring_df = pd.read_table(self.inputs.temporal_mask) - tmask_arr = censoring_df["framewise_displacement"].values + tmask_arr = censoring_df['framewise_displacement'].values else: tmask_arr = np.zeros(preproc_fd.size, dtype=int) @@ -276,17 +276,17 @@ def _run_interface(self, runtime): ), )[1] if preproc_fd.size != dvars_before_processing.size: - raise ValueError(f"FD {preproc_fd.size} != DVARS {dvars_before_processing.size}\n") + raise ValueError(f'FD {preproc_fd.size} != DVARS {dvars_before_processing.size}\n') # Get the different components in the bold file name # eg: ['sub-colornest001', 'ses-1'], etc. _, bold_file_name = os.path.split(self.inputs.name_source) - bold_file_name_components = bold_file_name.split("_") + bold_file_name_components = bold_file_name.split('_') # Fill out dictionary with entities from filename qc_values_dict = {} for entity in bold_file_name_components[:-1]: - qc_values_dict[entity.split("-")[0]] = entity.split("-")[1] + qc_values_dict[entity.split('-')[0]] = entity.split('-')[1] # Calculate QC measures mean_fd = np.mean(preproc_fd) @@ -301,111 +301,111 @@ def _run_interface(self, runtime): # A summary of all the values qc_values_dict.update( { - "mean_fd": [mean_fd], - "mean_fd_post_censoring": [mean_fd_post_censoring], - "mean_relative_rms": [mean_relative_rms], - "max_relative_rms": [rmsd_max_value], - "mean_dvars_initial": [mean_dvars_before_processing], - "mean_dvars_final": [mean_dvars_after_processing], - "num_dummy_volumes": [dummy_scans], - "num_censored_volumes": [num_censored_volumes], - "num_retained_volumes": [num_retained_volumes], - "fd_dvars_correlation_initial": [fd_dvars_correlation_initial], - "fd_dvars_correlation_final": [fd_dvars_correlation_final], + 'mean_fd': [mean_fd], + 'mean_fd_post_censoring': [mean_fd_post_censoring], + 'mean_relative_rms': [mean_relative_rms], + 'max_relative_rms': [rmsd_max_value], + 'mean_dvars_initial': [mean_dvars_before_processing], + 'mean_dvars_final': [mean_dvars_after_processing], + 'num_dummy_volumes': [dummy_scans], + 'num_censored_volumes': [num_censored_volumes], + 'num_retained_volumes': [num_retained_volumes], + 'fd_dvars_correlation_initial': [fd_dvars_correlation_initial], + 'fd_dvars_correlation_final': [fd_dvars_correlation_final], } ) qc_metadata = { - "mean_fd": { - "LongName": "Mean Framewise Displacement", - "Description": ( - "Average framewise displacement without any motion parameter filtering. " - "This value includes high-motion outliers, but not dummy volumes. " - "FD is calculated according to the Power definition." + 'mean_fd': { + 'LongName': 'Mean Framewise Displacement', + 'Description': ( + 'Average framewise displacement without any motion parameter filtering. ' + 'This value includes high-motion outliers, but not dummy volumes. ' + 'FD is calculated according to the Power definition.' ), - "Units": "mm", - "Term URL": "https://doi.org/10.1016/j.neuroimage.2011.10.018", + 'Units': 'mm', + 'Term URL': 'https://doi.org/10.1016/j.neuroimage.2011.10.018', }, - "mean_fd_post_censoring": { - "LongName": "Mean Framewise Displacement After Censoring", - "Description": ( - "Average framewise displacement without any motion parameter filtering. " - "This value does not include high-motion outliers or dummy volumes. " - "FD is calculated according to the Power definition." + 'mean_fd_post_censoring': { + 'LongName': 'Mean Framewise Displacement After Censoring', + 'Description': ( + 'Average framewise displacement without any motion parameter filtering. ' + 'This value does not include high-motion outliers or dummy volumes. ' + 'FD is calculated according to the Power definition.' ), - "Units": "mm", - "Term URL": "https://doi.org/10.1016/j.neuroimage.2011.10.018", + 'Units': 'mm', + 'Term URL': 'https://doi.org/10.1016/j.neuroimage.2011.10.018', }, - "mean_relative_rms": { - "LongName": "Mean Relative Root Mean Squared", - "Description": ( - "Average relative root mean squared calculated from motion parameters, " - "after removal of dummy volumes and high-motion outliers. " + 'mean_relative_rms': { + 'LongName': 'Mean Relative Root Mean Squared', + 'Description': ( + 'Average relative root mean squared calculated from motion parameters, ' + 'after removal of dummy volumes and high-motion outliers. ' "Relative in this case means 'relative to the previous scan'." ), - "Units": "arbitrary", + 'Units': 'arbitrary', }, - "max_relative_rms": { - "LongName": "Maximum Relative Root Mean Squared", - "Description": ( - "Maximum relative root mean squared calculated from motion parameters, " - "after removal of dummy volumes and high-motion outliers. " + 'max_relative_rms': { + 'LongName': 'Maximum Relative Root Mean Squared', + 'Description': ( + 'Maximum relative root mean squared calculated from motion parameters, ' + 'after removal of dummy volumes and high-motion outliers. ' "Relative in this case means 'relative to the previous scan'." ), - "Units": "arbitrary", + 'Units': 'arbitrary', }, - "mean_dvars_initial": { - "LongName": "Mean DVARS Before Postprocessing", - "Description": ( - "Average DVARS (temporal derivative of root mean squared variance over " - "voxels) calculated from the preprocessed BOLD file, after dummy scan removal." + 'mean_dvars_initial': { + 'LongName': 'Mean DVARS Before Postprocessing', + 'Description': ( + 'Average DVARS (temporal derivative of root mean squared variance over ' + 'voxels) calculated from the preprocessed BOLD file, after dummy scan removal.' ), - "TermURL": "https://doi.org/10.1016/j.neuroimage.2011.02.073", + 'TermURL': 'https://doi.org/10.1016/j.neuroimage.2011.02.073', }, - "mean_dvars_final": { - "LongName": "Mean DVARS After Postprocessing", - "Description": ( - "Average DVARS (temporal derivative of root mean squared variance over " - "voxels) calculated from the denoised BOLD file." + 'mean_dvars_final': { + 'LongName': 'Mean DVARS After Postprocessing', + 'Description': ( + 'Average DVARS (temporal derivative of root mean squared variance over ' + 'voxels) calculated from the denoised BOLD file.' ), - "TermURL": "https://doi.org/10.1016/j.neuroimage.2011.02.073", + 'TermURL': 'https://doi.org/10.1016/j.neuroimage.2011.02.073', }, - "num_dummy_volumes": { - "LongName": "Number of Dummy Volumes", - "Description": ( - "The number of non-steady state volumes removed from the time series by XCP-D." + 'num_dummy_volumes': { + 'LongName': 'Number of Dummy Volumes', + 'Description': ( + 'The number of non-steady state volumes removed from the time series by XCP-D.' ), }, - "num_censored_volumes": { - "LongName": "Number of Censored Volumes", - "Description": ( - "The number of high-motion outlier volumes censored by XCP-D. " - "This does not include dummy volumes." + 'num_censored_volumes': { + 'LongName': 'Number of Censored Volumes', + 'Description': ( + 'The number of high-motion outlier volumes censored by XCP-D. ' + 'This does not include dummy volumes.' ), }, - "num_retained_volumes": { - "LongName": "Number of Retained Volumes", - "Description": ( - "The number of volumes retained in the denoised dataset. " - "This does not include dummy volumes or high-motion outliers." + 'num_retained_volumes': { + 'LongName': 'Number of Retained Volumes', + 'Description': ( + 'The number of volumes retained in the denoised dataset. ' + 'This does not include dummy volumes or high-motion outliers.' ), }, - "fd_dvars_correlation_initial": { - "LongName": "FD-DVARS Correlation Before Postprocessing", - "Description": ( - "The Pearson correlation coefficient between framewise displacement and DVARS " - "(temporal derivative of root mean squared variance over voxels), " - "after removal of dummy volumes, but before removal of high-motion outliers." + 'fd_dvars_correlation_initial': { + 'LongName': 'FD-DVARS Correlation Before Postprocessing', + 'Description': ( + 'The Pearson correlation coefficient between framewise displacement and DVARS ' + '(temporal derivative of root mean squared variance over voxels), ' + 'after removal of dummy volumes, but before removal of high-motion outliers.' ), }, - "fd_dvars_correlation_final": { - "LongName": "FD-DVARS Correlation After Postprocessing", - "Description": ( - "The Pearson correlation coefficient between framewise displacement and DVARS " - "(temporal derivative of root mean squared variance over voxels), " - "after postprocessing. " - "The FD time series is unfiltered, but censored. " - "The DVARS time series is calculated from the denoised BOLD data." + 'fd_dvars_correlation_final': { + 'LongName': 'FD-DVARS Correlation After Postprocessing', + 'Description': ( + 'The Pearson correlation coefficient between framewise displacement and DVARS ' + '(temporal derivative of root mean squared variance over voxels), ' + 'after postprocessing. ' + 'The FD time series is unfiltered, but censored. ' + 'The DVARS time series is calculated from the denoised BOLD data.' ), }, } @@ -423,22 +423,22 @@ def _run_interface(self, runtime): # Convert dictionary to df and write out the qc file df = pd.DataFrame(qc_values_dict) - self._results["qc_file"] = fname_presuffix( + self._results['qc_file'] = fname_presuffix( self.inputs.cleaned_file, - suffix="qc_bold.tsv", + suffix='qc_bold.tsv', newpath=runtime.cwd, use_ext=False, ) - df.to_csv(self._results["qc_file"], index=False, header=True, sep="\t") + df.to_csv(self._results['qc_file'], index=False, header=True, sep='\t') # Write out the metadata file - self._results["qc_metadata"] = fname_presuffix( + self._results['qc_metadata'] = fname_presuffix( self.inputs.cleaned_file, - suffix="qc_bold.json", + suffix='qc_bold.json', newpath=runtime.cwd, use_ext=False, ) - with open(self._results["qc_metadata"], "w") as fo: + with open(self._results['qc_metadata'], 'w') as fo: json.dump(qc_metadata, fo, indent=4, sort_keys=True) return runtime @@ -448,13 +448,13 @@ class _ABCCQCInputSpec(BaseInterfaceInputSpec): motion_file = File( exists=True, mandatory=True, - desc="", + desc='', ) - TR = traits.Float(mandatory=True, desc="Repetition Time") + TR = traits.Float(mandatory=True, desc='Repetition Time') class _ABCCQCOutputSpec(TraitedSpec): - qc_file = File(exists=True, desc="ABCC QC HDF5 file.") + qc_file = File(exists=True, desc='ABCC QC HDF5 file.') class ABCCQC(SimpleInterface): @@ -486,58 +486,58 @@ class ABCCQC(SimpleInterface): def _run_interface(self, runtime): TR = self.inputs.TR - self._results["qc_file"] = fname_presuffix( + self._results['qc_file'] = fname_presuffix( self.inputs.motion_file, - suffix="qc_bold.hdf5", + suffix='qc_bold.hdf5', newpath=runtime.cwd, use_ext=False, ) # Load filtered framewise_displacement values from file motion_df = pd.read_table(self.inputs.motion_file) - if "framewise_displacement_filtered" in motion_df.columns: - fd = motion_df["framewise_displacement_filtered"].values + if 'framewise_displacement_filtered' in motion_df.columns: + fd = motion_df['framewise_displacement_filtered'].values else: - fd = motion_df["framewise_displacement"].values + fd = motion_df['framewise_displacement'].values - with h5py.File(self._results["qc_file"], "w") as dcan: + with h5py.File(self._results['qc_file'], 'w') as dcan: for thresh in np.linspace(0, 1, 101): thresh = np.around(thresh, 2) dcan.create_dataset( - f"/dcan_motion/fd_{thresh}/skip", + f'/dcan_motion/fd_{thresh}/skip', data=0, - dtype="float", + dtype='float', ) dcan.create_dataset( - f"/dcan_motion/fd_{thresh}/binary_mask", + f'/dcan_motion/fd_{thresh}/binary_mask', data=(fd > thresh).astype(int), - dtype="float", + dtype='float', ) dcan.create_dataset( - f"/dcan_motion/fd_{thresh}/threshold", + f'/dcan_motion/fd_{thresh}/threshold', data=thresh, - dtype="float", + dtype='float', ) dcan.create_dataset( - f"/dcan_motion/fd_{thresh}/total_frame_count", + f'/dcan_motion/fd_{thresh}/total_frame_count', data=len(fd), - dtype="float", + dtype='float', ) dcan.create_dataset( - f"/dcan_motion/fd_{thresh}/remaining_total_frame_count", + f'/dcan_motion/fd_{thresh}/remaining_total_frame_count', data=len(fd[fd <= thresh]), - dtype="float", + dtype='float', ) dcan.create_dataset( - f"/dcan_motion/fd_{thresh}/remaining_seconds", + f'/dcan_motion/fd_{thresh}/remaining_seconds', data=len(fd[fd <= thresh]) * TR, - dtype="float", + dtype='float', ) dcan.create_dataset( - f"/dcan_motion/fd_{thresh}/remaining_frame_mean_FD", + f'/dcan_motion/fd_{thresh}/remaining_frame_mean_FD', data=(fd[fd <= thresh]).mean(), - dtype="float", + dtype='float', ) return runtime diff --git a/xcp_d/interfaces/workbench.py b/xcp_d/interfaces/workbench.py index 557c5773a..761e70d03 100644 --- a/xcp_d/interfaces/workbench.py +++ b/xcp_d/interfaces/workbench.py @@ -18,11 +18,11 @@ from xcp_d.utils.filemanip import fname_presuffix, split_filename from xcp_d.utils.write_save import get_cifti_intents -iflogger = logging.getLogger("nipype.interface") +iflogger = logging.getLogger('nipype.interface') class _WBCommandInputSpec(CommandLineInputSpec): - num_threads = traits.Int(1, usedefault=True, nohash=True, desc="set number of threads") + num_threads = traits.Int(1, usedefault=True, nohash=True, desc='set number of threads') class WBCommand(WBCommandBase): @@ -43,20 +43,20 @@ def num_threads(self, value): def __init__(self, **inputs): super().__init__(**inputs) - if hasattr(self.inputs, "num_threads"): - self.inputs.on_trait_change(self._nthreads_update, "num_threads") + if hasattr(self.inputs, 'num_threads'): + self.inputs.on_trait_change(self._nthreads_update, 'num_threads') def _nthreads_update(self): """Update environment with new number of threads.""" - self.inputs.environ["OMP_NUM_THREADS"] = str(self.inputs.num_threads) + self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.num_threads) class _FixCiftiIntentInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="CIFTI file to check.") + in_file = File(exists=True, mandatory=True, desc='CIFTI file to check.') class _FixCiftiIntentOutputSpec(TraitedSpec): - out_file = File(exists=True, mandatory=True, desc="Fixed CIFTI file.") + out_file = File(exists=True, mandatory=True, desc='Fixed CIFTI file.') class FixCiftiIntent(SimpleInterface): @@ -91,7 +91,7 @@ def _run_interface(self, runtime): if img.nifti_header.get_intent()[0] != target_intent: out_file = fname_presuffix( self.inputs.in_file, - suffix="_modified", + suffix='_modified', newpath=runtime.cwd, use_ext=True, ) @@ -99,7 +99,7 @@ def _run_interface(self, runtime): img.nifti_header.set_intent(target_intent) img.to_filename(out_file) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime @@ -108,27 +108,27 @@ class _ConvertAffineInputSpec(_WBCommandInputSpec): fromwhat = traits.Str( mandatory=True, - argstr="-from-%s", + argstr='-from-%s', position=0, - desc="world, itk, or flirt", + desc='world, itk, or flirt', ) in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=1, - desc="The input file", + desc='The input file', ) towhat = traits.Str( mandatory=True, - argstr="-to-%s", + argstr='-to-%s', position=2, - desc="world, itk, or flirt", + desc='world, itk, or flirt', ) out_file = File( - argstr="%s", - name_source="in_file", - name_template="%s_world.nii.gz", + argstr='%s', + name_source='in_file', + name_template='%s_world.nii.gz', keep_extension=False, position=3, ) @@ -137,7 +137,7 @@ class _ConvertAffineInputSpec(_WBCommandInputSpec): class _ConvertAffineOutputSpec(TraitedSpec): """Output specification for ConvertAffine.""" - out_file = File(exists=True, desc="output file") + out_file = File(exists=True, desc='output file') class ConvertAffine(WBCommand): @@ -145,7 +145,7 @@ class ConvertAffine(WBCommand): input_spec = _ConvertAffineInputSpec output_spec = _ConvertAffineOutputSpec - _cmd = "wb_command -convert-affine" + _cmd = 'wb_command -convert-affine' class _ApplyAffineInputSpec(_WBCommandInputSpec): @@ -154,23 +154,23 @@ class _ApplyAffineInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="The input file", + desc='The input file', ) affine = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=1, - desc="The affine file", + desc='The affine file', ) out_file = File( - argstr="%s", - name_source="in_file", - name_template="MNIAffine_%s.gii", + argstr='%s', + name_source='in_file', + name_template='MNIAffine_%s.gii', keep_extension=True, - extensions=[".surf.gii", ".shape.gii"], + extensions=['.surf.gii', '.shape.gii'], position=2, ) @@ -178,7 +178,7 @@ class _ApplyAffineInputSpec(_WBCommandInputSpec): class _ApplyAffineOutputSpec(TraitedSpec): """Output specification for ApplyAffine.""" - out_file = File(exists=True, desc="output file") + out_file = File(exists=True, desc='output file') class ApplyAffine(WBCommand): @@ -204,7 +204,7 @@ class ApplyAffine(WBCommand): input_spec = _ApplyAffineInputSpec output_spec = _ApplyAffineOutputSpec - _cmd = "wb_command -surface-apply-affine" + _cmd = 'wb_command -surface-apply-affine' class _ApplyWarpfieldInputSpec(_WBCommandInputSpec): @@ -213,35 +213,35 @@ class _ApplyWarpfieldInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="The input file", + desc='The input file', ) warpfield = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=1, - desc="The warpfield file", + desc='The warpfield file', ) out_file = File( - argstr="%s", - name_source="in_file", - name_template="MNIwarped_%s.gii", - extensions=[".surf.gii", ".shape.gii"], + argstr='%s', + name_source='in_file', + name_template='MNIwarped_%s.gii', + extensions=['.surf.gii', '.shape.gii'], position=2, ) forward_warp = File( - argstr="-fnirt %s", + argstr='-fnirt %s', position=3, - desc="fnirt forward warpfield", + desc='fnirt forward warpfield', ) class _ApplyWarpfieldOutputSpec(TraitedSpec): """Output specification for ApplyWarpfield.""" - out_file = File(exists=True, desc="output file") + out_file = File(exists=True, desc='output file') class ApplyWarpfield(WBCommand): @@ -268,7 +268,7 @@ class ApplyWarpfield(WBCommand): input_spec = _ApplyWarpfieldInputSpec output_spec = _ApplyWarpfieldOutputSpec - _cmd = "wb_command -surface-apply-warpfield" + _cmd = 'wb_command -surface-apply-warpfield' class _SurfaceSphereProjectUnprojectInputSpec(_WBCommandInputSpec): @@ -277,38 +277,38 @@ class _SurfaceSphereProjectUnprojectInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="a sphere with the desired output mesh", + desc='a sphere with the desired output mesh', ) sphere_project_to = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=1, - desc="a sphere that aligns with sphere-in", + desc='a sphere that aligns with sphere-in', ) sphere_unproject_from = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=2, - desc="deformed to the desired output space", + desc='deformed to the desired output space', ) out_file = File( - name_source="in_file", - name_template="%s_deformed.surf.gii", + name_source='in_file', + name_template='%s_deformed.surf.gii', keep_extension=False, - argstr="%s", + argstr='%s', position=3, - desc="The sphere output file", + desc='The sphere output file', ) class _SurfaceSphereProjectUnprojectOutputSpec(TraitedSpec): """Input specification for SurfaceSphereProjectUnproject.""" - out_file = File(exists=True, desc="output file") + out_file = File(exists=True, desc='output file') class SurfaceSphereProjectUnproject(WBCommand): @@ -359,11 +359,11 @@ class SurfaceSphereProjectUnproject(WBCommand): input_spec = _SurfaceSphereProjectUnprojectInputSpec output_spec = _SurfaceSphereProjectUnprojectOutputSpec - _cmd = "wb_command -surface-sphere-project-unproject" + _cmd = 'wb_command -surface-sphere-project-unproject' class _ChangeXfmTypeInputSpec(_WBCommandInputSpec): - in_transform = File(exists=True, argstr="%s", mandatory=True, position=0) + in_transform = File(exists=True, argstr='%s', mandatory=True, position=0) class _ChangeXfmTypeOutputSpec(TraitedSpec): @@ -379,15 +379,15 @@ class ChangeXfmType(SimpleInterface): def _run_interface(self, runtime): with open(self.inputs.in_transform) as f: lines = f.readlines() - listcomp = [line.replace("AffineTransform", "MatrixOffsetTransformBase") for line in lines] + listcomp = [line.replace('AffineTransform', 'MatrixOffsetTransformBase') for line in lines] outfile = fname_presuffix( self.inputs.in_transform, - suffix="_MatrixOffsetTransformBase", + suffix='_MatrixOffsetTransformBase', newpath=runtime.cwd, ) - with open(outfile, "w") as write_file: - write_file.write("".join(listcomp)) - self._results["out_transform"] = outfile + with open(outfile, 'w') as write_file: + write_file.write(''.join(listcomp)) + self._results['out_transform'] = outfile return runtime @@ -397,31 +397,31 @@ class _SurfaceAverageInputSpec(_WBCommandInputSpec): surface_in1 = File( exists=True, mandatory=True, - argstr="-surf %s", + argstr='-surf %s', position=1, - desc="specify a surface to include in the average", + desc='specify a surface to include in the average', ) surface_in2 = File( exists=True, mandatory=True, - argstr="-surf %s", + argstr='-surf %s', position=2, - desc="specify a surface to include in the average", + desc='specify a surface to include in the average', ) out_file = File( - name_source="surface_in1", + name_source='surface_in1', keep_extension=False, - name_template="%s-avg.surf.gii", - argstr="%s", + name_template='%s-avg.surf.gii', + argstr='%s', position=0, - desc="output - the output averaged surface", + desc='output - the output averaged surface', ) class _SurfaceAverageOutputSpec(TraitedSpec): """Output specification for SurfaceAverage.""" - out_file = File(exists=True, desc="output file") + out_file = File(exists=True, desc='output file') class SurfaceAverage(WBCommand): @@ -452,7 +452,7 @@ class SurfaceAverage(WBCommand): input_spec = _SurfaceAverageInputSpec output_spec = _SurfaceAverageOutputSpec - _cmd = "wb_command -surface-average" + _cmd = 'wb_command -surface-average' class _SurfaceGenerateInflatedInputSpec(_WBCommandInputSpec): @@ -461,39 +461,39 @@ class _SurfaceGenerateInflatedInputSpec(_WBCommandInputSpec): anatomical_surface_in = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="the anatomical surface", + desc='the anatomical surface', ) inflated_out_file = File( - name_source="anatomical_surface_in", + name_source='anatomical_surface_in', keep_extension=False, - name_template="%s-hcpinflated.surf.gii", - argstr="%s", + name_template='%s-hcpinflated.surf.gii', + argstr='%s', position=1, - desc="output - the output inflated surface", + desc='output - the output inflated surface', ) very_inflated_out_file = File( - name_source="anatomical_surface_in", + name_source='anatomical_surface_in', keep_extension=False, - name_template="%s-hcpveryinflated.surf.gii", - argstr="%s", + name_template='%s-hcpveryinflated.surf.gii', + argstr='%s', position=2, - desc="output - the output very inflated surface", + desc='output - the output very inflated surface', ) iterations_scale_value = traits.Float( mandatory=False, - argstr="-iterations-scale %f", + argstr='-iterations-scale %f', position=3, - desc="iterations-scale value", + desc='iterations-scale value', ) class _SurfaceGenerateInflatedOutputSpec(TraitedSpec): """Output specification for SurfaceGenerateInflated.""" - inflated_out_file = File(exists=True, desc="inflated output file") - very_inflated_out_file = File(exists=True, desc="very inflated output file") + inflated_out_file = File(exists=True, desc='inflated output file') + very_inflated_out_file = File(exists=True, desc='very inflated output file') class SurfaceGenerateInflated(WBCommand): @@ -518,7 +518,7 @@ class SurfaceGenerateInflated(WBCommand): input_spec = _SurfaceGenerateInflatedInputSpec output_spec = _SurfaceGenerateInflatedOutputSpec - _cmd = "wb_command -surface-generate-inflated" + _cmd = 'wb_command -surface-generate-inflated' class _CiftiParcellateWorkbenchInputSpec(_WBCommandInputSpec): @@ -527,116 +527,116 @@ class _CiftiParcellateWorkbenchInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="The cifti file to parcellate", + desc='The cifti file to parcellate', ) atlas_label = File( mandatory=True, - argstr="%s", + argstr='%s', position=1, - desc="A cifti label file to use for the parcellation", + desc='A cifti label file to use for the parcellation', ) direction = traits.Enum( - "ROW", - "COLUMN", + 'ROW', + 'COLUMN', mandatory=True, - argstr="%s", + argstr='%s', position=2, - desc="Which mapping to parcellate (integer, ROW, or COLUMN)", + desc='Which mapping to parcellate (integer, ROW, or COLUMN)', ) out_file = File( - name_source=["in_file"], - name_template="parcellated_%s.ptseries.nii", + name_source=['in_file'], + name_template='parcellated_%s.ptseries.nii', keep_extension=False, - argstr="%s", + argstr='%s', position=3, - desc="Output cifti file", + desc='Output cifti file', ) # NOTE: These are not organized well. # -spatial-weights should appear before any in this group. spatial_weights = traits.Str( - argstr="-spatial-weights", + argstr='-spatial-weights', position=4, - desc="Use voxel volume and either vertex areas or metric files as weights", + desc='Use voxel volume and either vertex areas or metric files as weights', ) left_area_surf = File( exists=True, position=5, - argstr="-left-area-surface %s", - desc="Specify the left surface to use", + argstr='-left-area-surface %s', + desc='Specify the left surface to use', ) right_area_surf = File( exists=True, position=6, - argstr="-right-area-surface %s", - desc="Specify the right surface to use", + argstr='-right-area-surface %s', + desc='Specify the right surface to use', ) cerebellum_area_surf = File( exists=True, position=7, - argstr="-cerebellum-area-surf %s", - desc="specify the cerebellum surface to use", + argstr='-cerebellum-area-surf %s', + desc='specify the cerebellum surface to use', ) left_area_metric = File( exists=True, position=8, - argstr="-left-area-metric %s", - desc="Specify the left surface metric to use", + argstr='-left-area-metric %s', + desc='Specify the left surface metric to use', ) right_area_metric = File( exists=True, position=9, - argstr="-right-area-metric %s", - desc="Specify the right surface metric to use", + argstr='-right-area-metric %s', + desc='Specify the right surface metric to use', ) cerebellum_area_metric = File( exists=True, position=10, - argstr="-cerebellum-area-metric %s", - desc="specify the cerebellum surface metricto use", + argstr='-cerebellum-area-metric %s', + desc='specify the cerebellum surface metricto use', ) cifti_weights = File( exists=True, position=11, - argstr="-cifti-weights %s", - desc="Use a cifti file containing weights", + argstr='-cifti-weights %s', + desc='Use a cifti file containing weights', ) cor_method = traits.Enum( - "MEAN", - "MAX", - "MIN", - "INDEXMAX", - "INDEXMIN", - "SUM", - "PRODUCT", - "STDEV", - "SAMPSTDEV", - "VARIANCE", - "TSNR", - "COV", - "L2NORM", - "MEDIAN", - "MODE", - "COUNT_NONZERO", + 'MEAN', + 'MAX', + 'MIN', + 'INDEXMAX', + 'INDEXMIN', + 'SUM', + 'PRODUCT', + 'STDEV', + 'SAMPSTDEV', + 'VARIANCE', + 'TSNR', + 'COV', + 'L2NORM', + 'MEDIAN', + 'MODE', + 'COUNT_NONZERO', position=12, usedefault=True, - argstr="-method %s", - desc="Specify method of parcellation (default MEAN, or MODE if label data)", + argstr='-method %s', + desc='Specify method of parcellation (default MEAN, or MODE if label data)', ) only_numeric = traits.Bool( position=13, - argstr="-only-numeric", - desc="Exclude non-numeric values", + argstr='-only-numeric', + desc='Exclude non-numeric values', ) class _CiftiParcellateWorkbenchOutputSpec(TraitedSpec): """Output specification for the CiftiParcellateWorkbench command.""" - out_file = File(exists=True, desc="output CIFTI file") + out_file = File(exists=True, desc='output CIFTI file') class CiftiParcellateWorkbench(WBCommand): @@ -660,7 +660,7 @@ class CiftiParcellateWorkbench(WBCommand): input_spec = _CiftiParcellateWorkbenchInputSpec output_spec = _CiftiParcellateWorkbenchOutputSpec - _cmd = "wb_command -cifti-parcellate" + _cmd = 'wb_command -cifti-parcellate' class _CiftiSurfaceResampleInputSpec(_WBCommandInputSpec): @@ -680,52 +680,52 @@ class _CiftiSurfaceResampleInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="the surface file to resample", + desc='the surface file to resample', ) current_sphere = File( exists=True, position=1, - argstr="%s", - desc="a sphere surface with the mesh that the input surface is currently on", + argstr='%s', + desc='a sphere surface with the mesh that the input surface is currently on', ) new_sphere = File( exists=True, position=2, - argstr="%s", + argstr='%s', desc=( - "a sphere surface that is in register with " - "and has the desired output mesh" + 'a sphere surface that is in register with ' + 'and has the desired output mesh' ), ) method = traits.Enum( - "BARYCENTRIC", - "ADAP_BARY_AREA", - argstr="%s", + 'BARYCENTRIC', + 'ADAP_BARY_AREA', + argstr='%s', position=3, desc=( - "the method name. " - "The BARYCENTRIC method is generally recommended for anatomical surfaces, " - "in order to minimize smoothing." + 'the method name. ' + 'The BARYCENTRIC method is generally recommended for anatomical surfaces, ' + 'in order to minimize smoothing.' ), usedefault=True, ) out_file = File( - name_source=["in_file"], - name_template="resampled_%s.gii", + name_source=['in_file'], + name_template='resampled_%s.gii', keep_extension=True, - extensions=[".shape.gii", ".surf.gii"], - argstr="%s", + extensions=['.shape.gii', '.surf.gii'], + argstr='%s', position=4, - desc="The output surface file.", + desc='The output surface file.', ) class _CiftiSurfaceResampleOutputSpec(TraitedSpec): """Output specification for the CiftiSurfaceResample command.""" - out_file = File(exists=True, desc="output gifti file") + out_file = File(exists=True, desc='output gifti file') class CiftiSurfaceResample(WBCommand): @@ -736,7 +736,7 @@ class CiftiSurfaceResample(WBCommand): input_spec = _CiftiSurfaceResampleInputSpec output_spec = _CiftiSurfaceResampleOutputSpec - _cmd = "wb_command -surface-resample" + _cmd = 'wb_command -surface-resample' class _CiftiSeparateMetricInputSpec(_WBCommandInputSpec): @@ -745,39 +745,39 @@ class _CiftiSeparateMetricInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s ", + argstr='%s ', position=0, - desc="The input dense series", + desc='The input dense series', ) direction = traits.Enum( - "ROW", - "COLUMN", + 'ROW', + 'COLUMN', mandatory=True, - argstr="%s ", + argstr='%s ', position=1, - desc="which dimension to smooth along, ROW or COLUMN", + desc='which dimension to smooth along, ROW or COLUMN', ) metric = traits.Str( mandatory=True, - argstr=" -metric %s ", + argstr=' -metric %s ', position=2, - desc="which of the structure eg CORTEX_LEFT CORTEX_RIGHT" - "check https://www.humanconnectome.org/software/workbench-command/-cifti-separate ", + desc='which of the structure eg CORTEX_LEFT CORTEX_RIGHT' + 'check https://www.humanconnectome.org/software/workbench-command/-cifti-separate ', ) out_file = File( - name_source=["in_file"], - name_template="correlation_matrix_%s.func.gii", + name_source=['in_file'], + name_template='correlation_matrix_%s.func.gii', keep_extension=True, - argstr=" %s", + argstr=' %s', position=3, - desc="The gifti output, either left and right", + desc='The gifti output, either left and right', ) class _CiftiSeparateMetricOutputSpec(TraitedSpec): """Output specification for the CiftiSeparateMetric command.""" - out_file = File(exists=True, desc="output CIFTI file") + out_file = File(exists=True, desc='output CIFTI file') class CiftiSeparateMetric(WBCommand): @@ -801,7 +801,7 @@ class CiftiSeparateMetric(WBCommand): input_spec = _CiftiSeparateMetricInputSpec output_spec = _CiftiSeparateMetricOutputSpec - _cmd = "wb_command -cifti-separate " + _cmd = 'wb_command -cifti-separate ' class _CiftiSeparateVolumeAllInputSpec(_WBCommandInputSpec): @@ -810,41 +810,41 @@ class _CiftiSeparateVolumeAllInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="The input dense series", + desc='The input dense series', ) direction = traits.Enum( - "ROW", - "COLUMN", + 'ROW', + 'COLUMN', mandatory=True, - argstr="%s", + argstr='%s', position=1, - desc="which dimension to smooth along, ROW or COLUMN", + desc='which dimension to smooth along, ROW or COLUMN', ) out_file = File( - name_source=["in_file"], - name_template="%s_volumetric_data.nii.gz", + name_source=['in_file'], + name_template='%s_volumetric_data.nii.gz', keep_extension=False, - argstr="-volume-all %s -crop", + argstr='-volume-all %s -crop', position=2, - desc="The NIFTI output.", + desc='The NIFTI output.', ) label_file = File( - name_source=["in_file"], - name_template="%s_labels.nii.gz", + name_source=['in_file'], + name_template='%s_labels.nii.gz', keep_extension=False, - argstr="-label %s", + argstr='-label %s', position=3, - desc="A discrete segmentation NIFTI output.", + desc='A discrete segmentation NIFTI output.', ) class _CiftiSeparateVolumeAllOutputSpec(TraitedSpec): """Output specification for the CiftiSeparateVolumeAll command.""" - label_file = File(exists=True, desc="NIFTI file with labels.") - out_file = File(exists=True, desc="NIFTI file with volumetric data.") + label_file = File(exists=True, desc='NIFTI file with labels.') + out_file = File(exists=True, desc='NIFTI file with volumetric data.') class CiftiSeparateVolumeAll(WBCommand): @@ -869,7 +869,7 @@ class CiftiSeparateVolumeAll(WBCommand): input_spec = _CiftiSeparateVolumeAllInputSpec output_spec = _CiftiSeparateVolumeAllOutputSpec - _cmd = "wb_command -cifti-separate " + _cmd = 'wb_command -cifti-separate ' class _CiftiCreateDenseScalarInputSpec(_WBCommandInputSpec): @@ -879,44 +879,44 @@ class _CiftiCreateDenseScalarInputSpec(_WBCommandInputSpec): exists=False, mandatory=False, genfile=True, - argstr="%s", + argstr='%s', position=0, - desc="The CIFTI output.", + desc='The CIFTI output.', ) left_metric = File( exists=True, mandatory=False, - argstr="-left-metric %s", + argstr='-left-metric %s', position=1, - desc="The input surface data from the left hemisphere.", + desc='The input surface data from the left hemisphere.', ) right_metric = File( exists=True, mandatory=False, - argstr="-right-metric %s", + argstr='-right-metric %s', position=2, - desc="The input surface data from the right hemisphere.", + desc='The input surface data from the right hemisphere.', ) volume_data = File( exists=True, mandatory=False, - argstr="-volume %s", + argstr='-volume %s', position=3, - desc="The input volumetric data.", + desc='The input volumetric data.', ) structure_label_volume = File( exists=True, mandatory=False, - argstr="%s", + argstr='%s', position=4, - desc="A label file indicating the structure of each voxel in volume_data.", + desc='A label file indicating the structure of each voxel in volume_data.', ) class _CiftiCreateDenseScalarOutputSpec(TraitedSpec): """Output specification for the CiftiCreateDenseScalar command.""" - out_file = File(exists=True, desc="output CIFTI file") + out_file = File(exists=True, desc='output CIFTI file') class CiftiCreateDenseScalar(WBCommand): @@ -943,10 +943,10 @@ class CiftiCreateDenseScalar(WBCommand): input_spec = _CiftiCreateDenseScalarInputSpec output_spec = _CiftiCreateDenseScalarOutputSpec - _cmd = "wb_command -cifti-create-dense-scalar" + _cmd = 'wb_command -cifti-create-dense-scalar' def _gen_filename(self, name): - if name != "out_file": + if name != 'out_file': return None if isdefined(self.inputs.out_file): @@ -956,11 +956,11 @@ def _gen_filename(self, name): else: _, fname, _ = split_filename(self.inputs.left_metric) - return f"{fname}_converted.dscalar.nii" + return f'{fname}_converted.dscalar.nii' def _list_outputs(self): outputs = self.output_spec().get() - outputs["out_file"] = os.path.abspath(self._gen_filename("out_file")) + outputs['out_file'] = os.path.abspath(self._gen_filename('out_file')) return outputs @@ -968,7 +968,7 @@ class _ShowSceneInputSpec(_WBCommandInputSpec): scene_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, ) scene_name_or_number = traits.Either( @@ -976,33 +976,33 @@ class _ShowSceneInputSpec(_WBCommandInputSpec): traits.Str, mandatory=True, position=1, - argstr="%s", - desc="name or number (starting at one) of the scene in the scene file", + argstr='%s', + desc='name or number (starting at one) of the scene in the scene file', ) out_file = File( exists=False, mandatory=False, - argstr="%s", + argstr='%s', genfile=True, position=2, - desc="output image file name", + desc='output image file name', ) image_width = traits.Int( mandatory=True, - argstr="%s", + argstr='%s', position=3, - desc="width of output image(s), in pixels", + desc='width of output image(s), in pixels', ) image_height = traits.Int( mandatory=True, - argstr="%s", + argstr='%s', position=4, - desc="height of output image(s), in pixels", + desc='height of output image(s), in pixels', ) class _ShowSceneOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output image file name") + out_file = File(exists=True, desc='output image file name') class ShowScene(WBCommand): @@ -1069,22 +1069,22 @@ class ShowScene(WBCommand): input_spec = _ShowSceneInputSpec output_spec = _ShowSceneOutputSpec - _cmd = "wb_command -show-scene" + _cmd = 'wb_command -show-scene' def _list_outputs(self): outputs = self.output_spec().get() - outputs["out_file"] = os.path.abspath(self._gen_outfilename()) + outputs['out_file'] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): - return self._gen_outfilename() if name == "out_file" else None + return self._gen_outfilename() if name == 'out_file' else None def _gen_outfilename(self): frame_number = self.inputs.scene_name_or_number return ( - f"frame_{frame_number:06g}.png" + f'frame_{frame_number:06g}.png' if isinstance(frame_number, int) - else f"frame_{frame_number}.png" + else f'frame_{frame_number}.png' ) @@ -1092,40 +1092,40 @@ class _CiftiConvertInputSpec(_WBCommandInputSpec): """Input specification for the CiftiConvert command.""" target = traits.Enum( - "from", - "to", + 'from', + 'to', mandatory=True, position=0, - argstr="-%s-nifti", - desc="Convert either to or from nifti.", + argstr='-%s-nifti', + desc='Convert either to or from nifti.', ) in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=1, - desc="The input file.", + desc='The input file.', ) cifti_template = File( exists=True, mandatory=False, - argstr="%s", + argstr='%s', position=2, - desc="A cifti file with the dimension(s) and mapping(s) that should be used.", + desc='A cifti file with the dimension(s) and mapping(s) that should be used.', ) TR = traits.Float( mandatory=False, - desc="Repetition time in seconds. Used to reset timepoints.", + desc='Repetition time in seconds. Used to reset timepoints.', position=4, - argstr="-reset-timepoints %s 0", + argstr='-reset-timepoints %s 0', ) out_file = File( exists=True, mandatory=False, genfile=True, - argstr="%s", + argstr='%s', position=3, - desc="The output file.", + desc='The output file.', ) @@ -1134,7 +1134,7 @@ class _CiftiConvertOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="The output file.", + desc='The output file.', ) @@ -1153,20 +1153,20 @@ class CiftiConvert(WBCommand): input_spec = _CiftiConvertInputSpec output_spec = _CiftiConvertOutputSpec - _cmd = "wb_command -cifti-convert" + _cmd = 'wb_command -cifti-convert' def _gen_filename(self, name): - if name != "out_file": + if name != 'out_file': return None _, fname, ext = split_filename(self.inputs.in_file) # if we want to support other cifti outputs, we'll need to change this. - ext = ".dtseries.nii" if self.inputs.target == "from" else ".nii.gz" - return f"{fname}_converted{ext}" + ext = '.dtseries.nii' if self.inputs.target == 'from' else '.nii.gz' + return f'{fname}_converted{ext}' def _list_outputs(self): outputs = self.output_spec().get() - outputs["out_file"] = os.path.abspath(self._gen_filename("out_file")) + outputs['out_file'] = os.path.abspath(self._gen_filename('out_file')) return outputs @@ -1176,60 +1176,60 @@ class _CiftiCreateDenseFromTemplateInputSpec(_WBCommandInputSpec): template_cifti = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="File to match brainordinates of.", + desc='File to match brainordinates of.', ) out_file = File( exists=False, mandatory=False, genfile=True, - argstr="%s", + argstr='%s', position=1, - desc="The output cifti file.", + desc='The output cifti file.', ) volume_all = File( exists=True, mandatory=False, - argstr="-volume-all %s", + argstr='-volume-all %s', position=2, - desc="Use input data from volume files. Input volume file.", + desc='Use input data from volume files. Input volume file.', ) from_cropped = traits.Bool( False, usedefault=True, mandatory=False, - argstr="-from-cropped", + argstr='-from-cropped', position=3, - desc="Use input data from cropped volume files.", + desc='Use input data from cropped volume files.', ) left_metric = File( exists=True, mandatory=False, - argstr="-metric CORTEX_LEFT %s", + argstr='-metric CORTEX_LEFT %s', position=4, - desc="Use input data from surface files. Input surface file.", + desc='Use input data from surface files. Input surface file.', ) right_metric = File( exists=True, mandatory=False, - argstr="-metric CORTEX_RIGHT %s", + argstr='-metric CORTEX_RIGHT %s', position=5, - desc="Use input data from surface files. Input surface file.", + desc='Use input data from surface files. Input surface file.', ) label = File( exists=True, mandatory=False, - argstr="-cifti %s", + argstr='-cifti %s', position=6, - desc="Use input data from surface label files. Input label file.", + desc='Use input data from surface label files. Input label file.', ) class _CiftiCreateDenseFromTemplateOutputSpec(TraitedSpec): """Output specification for the CiftiCreateDenseFromTemplate command.""" - out_file = File(exists=True, desc="output CIFTI file") + out_file = File(exists=True, desc='output CIFTI file') class CiftiCreateDenseFromTemplate(WBCommand): @@ -1268,10 +1268,10 @@ class CiftiCreateDenseFromTemplate(WBCommand): input_spec = _CiftiCreateDenseFromTemplateInputSpec output_spec = _CiftiCreateDenseFromTemplateOutputSpec - _cmd = "wb_command -cifti-create-dense-from-template" + _cmd = 'wb_command -cifti-create-dense-from-template' def _gen_filename(self, name): - if name != "out_file": + if name != 'out_file': return None if isdefined(self.inputs.out_file): @@ -1281,11 +1281,11 @@ def _gen_filename(self, name): else: _, fname, _ = split_filename(self.inputs.template_cifti) - return f"{fname}_converted.dscalar.nii" + return f'{fname}_converted.dscalar.nii' def _list_outputs(self): outputs = self.output_spec().get() - outputs["out_file"] = os.path.abspath(self._gen_filename("out_file")) + outputs['out_file'] = os.path.abspath(self._gen_filename('out_file')) return outputs @@ -1295,37 +1295,37 @@ class _CiftiMathInputSpec(_WBCommandInputSpec): data = File( exists=True, mandatory=True, - argstr="-var data %s", + argstr='-var data %s', position=2, - desc="First data file to use in the math operation", + desc='First data file to use in the math operation', ) mask = File( exists=True, mandatory=False, - argstr="-var mask %s -select 1 1", + argstr='-var mask %s -select 1 1', position=3, - desc="Second data file to use in the math operation", + desc='Second data file to use in the math operation', ) expression = traits.Str( mandatory=True, argstr='"%s"', position=0, - desc="Math expression", + desc='Math expression', ) out_file = File( - name_source=["data"], - name_template="mathed_%s.nii", + name_source=['data'], + name_template='mathed_%s.nii', keep_extension=True, - argstr="%s", + argstr='%s', position=1, - desc="Output cifti file", + desc='Output cifti file', ) class _CiftiMathOutputSpec(TraitedSpec): """Output specification for the CiftiMath command.""" - out_file = File(exists=True, desc="output CIFTI file") + out_file = File(exists=True, desc='output CIFTI file') class CiftiMath(WBCommand): @@ -1347,7 +1347,7 @@ class CiftiMath(WBCommand): input_spec = _CiftiMathInputSpec output_spec = _CiftiMathOutputSpec - _cmd = "wb_command -cifti-math" + _cmd = 'wb_command -cifti-math' class _CiftiCorrelationInputSpec(_WBCommandInputSpec): @@ -1356,24 +1356,24 @@ class _CiftiCorrelationInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="Input file to correlate", + desc='Input file to correlate', ) out_file = File( - name_source=["in_file"], - name_template="corr_%s.pconn.nii", + name_source=['in_file'], + name_template='corr_%s.pconn.nii', keep_extension=False, - argstr="%s", + argstr='%s', position=1, - desc="Output cifti file", + desc='Output cifti file', ) class _CiftiCorrelationOutputSpec(TraitedSpec): """Output specification for the CiftiCorrelation command.""" - out_file = File(exists=True, desc="output CIFTI file") + out_file = File(exists=True, desc='output CIFTI file') class CiftiCorrelation(WBCommand): @@ -1394,112 +1394,112 @@ class CiftiCorrelation(WBCommand): input_spec = _CiftiCorrelationInputSpec output_spec = _CiftiCorrelationOutputSpec - _cmd = "wb_command -cifti-correlation" + _cmd = 'wb_command -cifti-correlation' class _CiftiSmoothInputSpec(_WBCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr="%s", + argstr='%s', position=0, - desc="The input CIFTI file", + desc='The input CIFTI file', ) sigma_surf = traits.Float( mandatory=True, - argstr="%s", + argstr='%s', position=1, - desc="the sigma for the gaussian surface smoothing kernel, in mm", + desc='the sigma for the gaussian surface smoothing kernel, in mm', ) sigma_vol = traits.Float( mandatory=True, - argstr="%s", + argstr='%s', position=2, - desc="the sigma for the gaussian volume smoothing kernel, in mm", + desc='the sigma for the gaussian volume smoothing kernel, in mm', ) direction = traits.Enum( - "ROW", - "COLUMN", + 'ROW', + 'COLUMN', mandatory=True, - argstr="%s", + argstr='%s', position=3, - desc="which dimension to smooth along, ROW or COLUMN", + desc='which dimension to smooth along, ROW or COLUMN', ) out_file = File( - name_source=["in_file"], - name_template="smoothed_%s.nii", + name_source=['in_file'], + name_template='smoothed_%s.nii', keep_extension=True, - argstr="%s", + argstr='%s', position=4, - desc="The output CIFTI", + desc='The output CIFTI', ) left_surf = File( exists=True, mandatory=True, position=5, - argstr="-left-surface %s", - desc="Specify the left surface to use", + argstr='-left-surface %s', + desc='Specify the left surface to use', ) left_corrected_areas = File( exists=True, position=6, - argstr="-left-corrected-areas %s", - desc="vertex areas (as a metric) to use instead of computing them from " - "the left surface.", + argstr='-left-corrected-areas %s', + desc='vertex areas (as a metric) to use instead of computing them from ' + 'the left surface.', ) right_surf = File( exists=True, mandatory=True, position=7, - argstr="-right-surface %s", - desc="Specify the right surface to use", + argstr='-right-surface %s', + desc='Specify the right surface to use', ) right_corrected_areas = File( exists=True, position=8, - argstr="-right-corrected-areas %s", - desc="vertex areas (as a metric) to use instead of computing them from " - "the right surface", + argstr='-right-corrected-areas %s', + desc='vertex areas (as a metric) to use instead of computing them from ' + 'the right surface', ) cerebellum_surf = File( exists=True, position=9, - argstr="-cerebellum-surface %s", - desc="specify the cerebellum surface to use", + argstr='-cerebellum-surface %s', + desc='specify the cerebellum surface to use', ) cerebellum_corrected_areas = File( exists=True, position=10, - requires=["cerebellum_surf"], - argstr="cerebellum-corrected-areas %s", - desc="vertex areas (as a metric) to use instead of computing them from " - "the cerebellum surface", + requires=['cerebellum_surf'], + argstr='cerebellum-corrected-areas %s', + desc='vertex areas (as a metric) to use instead of computing them from ' + 'the cerebellum surface', ) cifti_roi = File( exists=True, position=11, - argstr="-cifti-roi %s", - desc="CIFTI file for ROI smoothing", + argstr='-cifti-roi %s', + desc='CIFTI file for ROI smoothing', ) fix_zeros_vol = traits.Bool( position=12, - argstr="-fix-zeros-volume", - desc="treat values of zero in the volume as missing data", + argstr='-fix-zeros-volume', + desc='treat values of zero in the volume as missing data', ) fix_zeros_surf = traits.Bool( position=13, - argstr="-fix-zeros-surface", - desc="treat values of zero on the surface as missing data", + argstr='-fix-zeros-surface', + desc='treat values of zero on the surface as missing data', ) merged_volume = traits.Bool( position=14, - argstr="-merged-volume", - desc="smooth across subcortical structure boundaries", + argstr='-merged-volume', + desc='smooth across subcortical structure boundaries', ) class _CiftiSmoothOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output CIFTI file") + out_file = File(exists=True, desc='output CIFTI file') class CiftiSmooth(WBCommand): @@ -1540,4 +1540,4 @@ class CiftiSmooth(WBCommand): input_spec = _CiftiSmoothInputSpec output_spec = _CiftiSmoothOutputSpec - _cmd = "wb_command -cifti-smoothing" + _cmd = 'wb_command -cifti-smoothing' diff --git a/xcp_d/reports/core.py b/xcp_d/reports/core.py index caa16a7ce..2908a1816 100644 --- a/xcp_d/reports/core.py +++ b/xcp_d/reports/core.py @@ -18,9 +18,9 @@ def run_reports( subject_label, run_uuid, bootstrap_file=None, - out_filename="report.html", + out_filename='report.html', reportlets_dir=None, - errorname="report.err", + errorname='report.err', **entities, ): """Run the reports.""" @@ -44,7 +44,7 @@ def run_reports( import traceback # Store the list of subjects for which report generation failed - traceback.print_exception(*sys.exc_info(), file=str(Path(output_dir) / "logs" / errorname)) + traceback.print_exception(*sys.exc_info(), file=str(Path(output_dir) / 'logs' / errorname)) return subject_label return None @@ -62,7 +62,7 @@ def generate_reports( """Generate reports for a list of subjects.""" reportlets_dir = None if work_dir is not None: - reportlets_dir = Path(work_dir) / "reportlets" + reportlets_dir = Path(work_dir) / 'reportlets' if isinstance(subject_list, str): subject_list = [subject_list] @@ -76,15 +76,15 @@ def generate_reports( if bootstrap_file is not None: # If a config file is precised, we do not override it - html_report = "report.html" + html_report = 'report.html' elif n_ses <= config.execution.aggr_ses_reports: # If there are only a few session for this subject, # we aggregate them in a single visual report. - bootstrap_file = data.load("reports-spec.yml") - html_report = "report.html" + bootstrap_file = data.load('reports-spec.yml') + html_report = 'report.html' else: # Beyond a threshold, we separate the anatomical report from the functional. - bootstrap_file = data.load("reports-spec-anat.yml") + bootstrap_file = data.load('reports-spec-anat.yml') html_report = f'sub-{subject_label.lstrip("sub-")}_anat.html' report_error = run_reports( @@ -94,7 +94,7 @@ def generate_reports( bootstrap_file=bootstrap_file, out_filename=html_report, reportlets_dir=reportlets_dir, - errorname=f"report-{run_uuid}-{subject_label}.err", + errorname=f'report-{run_uuid}-{subject_label}.err', subject=subject_label, ) # If the report generation failed, append the subject label for which it failed @@ -106,16 +106,16 @@ def generate_reports( # we separate the functional reports per session if session_list is None: all_filters = config.execution.bids_filters or {} - filters = all_filters.get("bold", {}) + filters = all_filters.get('bold', {}) session_list = config.execution.layout.get_sessions( subject=subject_label, **filters ) # Drop ses- prefixes - session_list = [ses[4:] if ses.startswith("ses-") else ses for ses in session_list] + session_list = [ses[4:] if ses.startswith('ses-') else ses for ses in session_list] for session_label in session_list: - bootstrap_file = data.load("reports-spec-func.yml") + bootstrap_file = data.load('reports-spec-func.yml') html_report = f'sub-{subject_label.lstrip("sub-")}_ses-{session_label}_func.html' report_error = run_reports( @@ -125,7 +125,7 @@ def generate_reports( bootstrap_file=bootstrap_file, out_filename=html_report, reportlets_dir=reportlets_dir, - errorname=f"report-{run_uuid}-{subject_label}-func.err", + errorname=f'report-{run_uuid}-{subject_label}-func.err', subject=subject_label, session=session_label, ) @@ -134,24 +134,24 @@ def generate_reports( errors.append(report_error) if errors: - error_list = ", ".join( - f"{subid} ({err})" for subid, err in zip(subject_list, errors) if err + error_list = ', '.join( + f'{subid} ({err})' for subid, err in zip(subject_list, errors, strict=False) if err ) config.loggers.cli.error( - "Processing did not finish successfully. Errors occurred while processing " - "data from participants: %s. Check the HTML reports for details.", + 'Processing did not finish successfully. Errors occurred while processing ' + 'data from participants: %s. Check the HTML reports for details.', error_list, ) elif abcc_qc: - config.loggers.cli.info("Generating executive summary.") + config.loggers.cli.info('Generating executive summary.') if session_list is None: all_filters = config.execution.bids_filters or {} - filters = all_filters.get("bold", {}) + filters = all_filters.get('bold', {}) session_list = config.execution.layout.get_sessions(subject=subject_label, **filters) # Drop ses- prefixes - session_list = [ses[4:] if ses.startswith("ses-") else ses for ses in session_list] + session_list = [ses[4:] if ses.startswith('ses-') else ses for ses in session_list] if not session_list: session_list = [None] @@ -164,6 +164,6 @@ def generate_reports( exsumm.collect_inputs() exsumm.generate_report() - config.loggers.cli.info("Reports generated successfully") + config.loggers.cli.info('Reports generated successfully') return errors diff --git a/xcp_d/tests/conftest.py b/xcp_d/tests/conftest.py index 42f71290f..d038020fb 100644 --- a/xcp_d/tests/conftest.py +++ b/xcp_d/tests/conftest.py @@ -8,209 +8,209 @@ def pytest_addoption(parser): """Collect pytest parameters for running tests.""" parser.addoption( - "--working_dir", - action="store", + '--working_dir', + action='store', default=( - "/usr/local/miniconda/lib/python3.10/site-packages/xcp_d/xcp_d/tests/data/test_data/" - "run_pytests/work" + '/usr/local/miniconda/lib/python3.10/site-packages/xcp_d/xcp_d/tests/data/test_data/' + 'run_pytests/work' ), ) parser.addoption( - "--data_dir", - action="store", + '--data_dir', + action='store', default=( - "/usr/local/miniconda/lib/python3.10/site-packages/xcp_d/xcp_d/tests/data/test_data" + '/usr/local/miniconda/lib/python3.10/site-packages/xcp_d/xcp_d/tests/data/test_data' ), ) parser.addoption( - "--output_dir", - action="store", + '--output_dir', + action='store', default=( - "/usr/local/miniconda/lib/python3.10/site-packages/xcp_d/xcp_d/tests/data/test_data/" - "run_pytests/out" + '/usr/local/miniconda/lib/python3.10/site-packages/xcp_d/xcp_d/tests/data/test_data/' + 'run_pytests/out' ), ) # Set up the commandline options as fixtures -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def data_dir(request): """Grab data directory.""" - return request.config.getoption("--data_dir") + return request.config.getoption('--data_dir') -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def working_dir(request): """Grab working directory.""" - workdir = request.config.getoption("--working_dir") + workdir = request.config.getoption('--working_dir') os.makedirs(workdir, exist_ok=True) return workdir -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def output_dir(request): """Grab output directory.""" - outdir = request.config.getoption("--output_dir") + outdir = request.config.getoption('--output_dir') os.makedirs(outdir, exist_ok=True) return outdir -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def datasets(data_dir): """Locate downloaded datasets.""" dsets = {} - dsets["ds001419"] = os.path.join(data_dir, "ds001419", "ds001419") - dsets["ds001419-aroma"] = os.path.join(data_dir, "ds001419-aroma", "ds001419-aroma") - dsets["pnc"] = os.path.join(data_dir, "pnc") - dsets["nibabies"] = os.path.join(data_dir, "nibabies/derivatives/nibabies") - dsets["fmriprep_without_freesurfer"] = os.path.join( + dsets['ds001419'] = os.path.join(data_dir, 'ds001419', 'ds001419') + dsets['ds001419-aroma'] = os.path.join(data_dir, 'ds001419-aroma', 'ds001419-aroma') + dsets['pnc'] = os.path.join(data_dir, 'pnc') + dsets['nibabies'] = os.path.join(data_dir, 'nibabies/derivatives/nibabies') + dsets['fmriprep_without_freesurfer'] = os.path.join( data_dir, - "fmriprepwithoutfreesurfer", + 'fmriprepwithoutfreesurfer', ) - dsets["schaefer100"] = os.path.join(data_dir, "schaefer100") + dsets['schaefer100'] = os.path.join(data_dir, 'schaefer100') return dsets -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def ds001419_data(datasets): """Collect a list of files from ds001419 that will be used by misc. tests.""" - subj_dir = os.path.join(datasets["ds001419"], "sub-01") - func_dir = os.path.join(subj_dir, "func") - anat_dir = os.path.join(subj_dir, "anat") + subj_dir = os.path.join(datasets['ds001419'], 'sub-01') + func_dir = os.path.join(subj_dir, 'func') + anat_dir = os.path.join(subj_dir, 'anat') if not os.path.isdir(subj_dir): - raise Exception(os.listdir(datasets["ds001419"])) + raise Exception(os.listdir(datasets['ds001419'])) files = {} - files["nifti_file"] = os.path.join( + files['nifti_file'] = os.path.join( func_dir, - "sub-01_task-rest_space-MNI152NLin6Asym_res-2_desc-preproc_bold.nii.gz", + 'sub-01_task-rest_space-MNI152NLin6Asym_res-2_desc-preproc_bold.nii.gz', ) - files["cifti_file"] = os.path.join( + files['cifti_file'] = os.path.join( func_dir, - "sub-01_task-rest_space-fsLR_den-91k_bold.dtseries.nii", + 'sub-01_task-rest_space-fsLR_den-91k_bold.dtseries.nii', ) - files["gifti_file"] = os.path.join( + files['gifti_file'] = os.path.join( func_dir, - "sub-01_task-rest_hemi-L_space-fsaverage5_bold.func.gii", + 'sub-01_task-rest_hemi-L_space-fsaverage5_bold.func.gii', ) - files["brain_mask_file"] = os.path.join( + files['brain_mask_file'] = os.path.join( func_dir, - "sub-01_task-rest_space-MNI152NLin6Asym_res-2_desc-brain_mask.nii.gz", + 'sub-01_task-rest_space-MNI152NLin6Asym_res-2_desc-brain_mask.nii.gz', ) - files["confounds_file"] = os.path.join( + files['confounds_file'] = os.path.join( func_dir, - "sub-01_task-rest_desc-confounds_timeseries.tsv", + 'sub-01_task-rest_desc-confounds_timeseries.tsv', ) - files["confounds_json"] = os.path.join( + files['confounds_json'] = os.path.join( func_dir, - "sub-01_task-rest_desc-confounds_timeseries.json", + 'sub-01_task-rest_desc-confounds_timeseries.json', ) - files["anat_to_template_xfm"] = os.path.join( + files['anat_to_template_xfm'] = os.path.join( anat_dir, - "sub-01_from-T1w_to-MNI152NLin6Asym_mode-image_xfm.h5", + 'sub-01_from-T1w_to-MNI152NLin6Asym_mode-image_xfm.h5', ) - files["template_to_anat_xfm"] = os.path.join( + files['template_to_anat_xfm'] = os.path.join( anat_dir, - "sub-01_from-MNI152NLin6Asym_to-T1w_mode-image_xfm.h5", + 'sub-01_from-MNI152NLin6Asym_to-T1w_mode-image_xfm.h5', ) - files["boldref"] = os.path.join( + files['boldref'] = os.path.join( func_dir, - "sub-01_task-rest_space-MNI152NLin6Asym_res-2_boldref.nii.gz", + 'sub-01_task-rest_space-MNI152NLin6Asym_res-2_boldref.nii.gz', ) - files["boldref_t1w"] = os.path.join(func_dir, "sub-01_task-rest_space-T1w_boldref.nii.gz") - files["t1w"] = os.path.join(anat_dir, "sub-01_desc-preproc_T1w.nii.gz") - files["t1w_mni"] = os.path.join( + files['boldref_t1w'] = os.path.join(func_dir, 'sub-01_task-rest_space-T1w_boldref.nii.gz') + files['t1w'] = os.path.join(anat_dir, 'sub-01_desc-preproc_T1w.nii.gz') + files['t1w_mni'] = os.path.join( anat_dir, - "sub-01_space-MNI152NLin6Asym_res-2_desc-preproc_T1w.nii.gz", + 'sub-01_space-MNI152NLin6Asym_res-2_desc-preproc_T1w.nii.gz', ) return files -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def pnc_data(datasets): """Collect a list of files from pnc that will be used by misc. tests.""" - subj_dir = os.path.join(datasets["pnc"], "sub-1648798153", "ses-PNC1") - func_dir = os.path.join(subj_dir, "func") - anat_dir = os.path.join(subj_dir, "anat") + subj_dir = os.path.join(datasets['pnc'], 'sub-1648798153', 'ses-PNC1') + func_dir = os.path.join(subj_dir, 'func') + anat_dir = os.path.join(subj_dir, 'anat') files = {} - files["nifti_file"] = os.path.join( + files['nifti_file'] = os.path.join( func_dir, ( - "sub-1648798153_ses-PNC1_task-rest_acq-singleband_space-MNI152NLin6Asym_res-2_" - "desc-preproc_bold.nii.gz" + 'sub-1648798153_ses-PNC1_task-rest_acq-singleband_space-MNI152NLin6Asym_res-2_' + 'desc-preproc_bold.nii.gz' ), ) - files["cifti_file"] = os.path.join( + files['cifti_file'] = os.path.join( func_dir, - "sub-1648798153_ses-PNC1_task-rest_acq-singleband_space-fsLR_den-91k_bold.dtseries.nii", + 'sub-1648798153_ses-PNC1_task-rest_acq-singleband_space-fsLR_den-91k_bold.dtseries.nii', ) - files["brain_mask_file"] = os.path.join( + files['brain_mask_file'] = os.path.join( func_dir, - "sub-1648798153_ses-PNC1_task-rest_space-MNI152NLin6Asym_res-2_desc-brain_mask.nii.gz", + 'sub-1648798153_ses-PNC1_task-rest_space-MNI152NLin6Asym_res-2_desc-brain_mask.nii.gz', ) - files["confounds_file"] = os.path.join( + files['confounds_file'] = os.path.join( func_dir, - "sub-1648798153_ses-PNC1_task-rest_desc-confounds_timeseries.tsv", + 'sub-1648798153_ses-PNC1_task-rest_desc-confounds_timeseries.tsv', ) - files["confounds_json"] = os.path.join( + files['confounds_json'] = os.path.join( func_dir, - "sub-1648798153_ses-PNC1_task-rest_desc-confounds_timeseries.json", + 'sub-1648798153_ses-PNC1_task-rest_desc-confounds_timeseries.json', ) - files["anat_to_template_xfm"] = os.path.join( + files['anat_to_template_xfm'] = os.path.join( anat_dir, - "sub-1648798153_ses-PNC1_acq-refaced_from-T1w_to-MNI152NLin6Asym_mode-image_xfm.h5", + 'sub-1648798153_ses-PNC1_acq-refaced_from-T1w_to-MNI152NLin6Asym_mode-image_xfm.h5', ) - files["template_to_anat_xfm"] = os.path.join( + files['template_to_anat_xfm'] = os.path.join( anat_dir, - "sub-1648798153_ses-PNC1_acq-refaced_from-MNI152NLin6Asym_to-T1w_mode-image_xfm.h5", + 'sub-1648798153_ses-PNC1_acq-refaced_from-MNI152NLin6Asym_to-T1w_mode-image_xfm.h5', ) - files["boldref"] = os.path.join( + files['boldref'] = os.path.join( func_dir, - "sub-1648798153_ses-PNC1_task-rest_space-MNI152NLin6Asym_res-2_boldref.nii.gz", + 'sub-1648798153_ses-PNC1_task-rest_space-MNI152NLin6Asym_res-2_boldref.nii.gz', ) - files["anat_mask"] = os.path.join( + files['anat_mask'] = os.path.join( anat_dir, - "sub-1648798153_ses-PNC1_acq-refaced_desc-brain_mask.nii.gz", + 'sub-1648798153_ses-PNC1_acq-refaced_desc-brain_mask.nii.gz', ) - files["t1w"] = os.path.join( + files['t1w'] = os.path.join( anat_dir, - "sub-1648798153_ses-PNC1_acq-refaced_desc-preproc_T1w.nii.gz", + 'sub-1648798153_ses-PNC1_acq-refaced_desc-preproc_T1w.nii.gz', ) - files["t1w_mni"] = os.path.join( + files['t1w_mni'] = os.path.join( anat_dir, ( - "sub-1648798153_ses-PNC1_acq-refaced_space-MNI152NLin6Asym_" - "res-2_desc-preproc_T1w.nii.gz" + 'sub-1648798153_ses-PNC1_acq-refaced_space-MNI152NLin6Asym_' + 'res-2_desc-preproc_T1w.nii.gz' ), ) return files -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def fmriprep_without_freesurfer_data(datasets): """Collect a list of fmriprepwithoutfreesurfer files that will be used by misc. tests.""" - subj_dir = os.path.join(datasets["fmriprep_without_freesurfer"], "sub-01") - func_dir = os.path.join(subj_dir, "func") + subj_dir = os.path.join(datasets['fmriprep_without_freesurfer'], 'sub-01') + func_dir = os.path.join(subj_dir, 'func') files = {} - files["nifti_file"] = os.path.join( + files['nifti_file'] = os.path.join( func_dir, - "sub-01_task-mixedgamblestask_run-1_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz", + 'sub-01_task-mixedgamblestask_run-1_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz', ) - files["brain_mask_file"] = os.path.join( + files['brain_mask_file'] = os.path.join( func_dir, - "sub-01_task-mixedgamblestask_run-1_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz", + 'sub-01_task-mixedgamblestask_run-1_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz', ) - files["confounds_file"] = os.path.join( + files['confounds_file'] = os.path.join( func_dir, - "sub-01_task-mixedgamblestask_run-1_desc-confounds_timeseries.tsv", + 'sub-01_task-mixedgamblestask_run-1_desc-confounds_timeseries.tsv', ) - files["boldref"] = os.path.join( + files['boldref'] = os.path.join( func_dir, - "sub-01_task-mixedgamblestask_run-1_space-MNI152NLin2009cAsym_boldref.nii.gz", + 'sub-01_task-mixedgamblestask_run-1_space-MNI152NLin2009cAsym_boldref.nii.gz', ) return files @@ -222,22 +222,23 @@ def fmriprep_without_freesurfer_data(datasets): # FS_LICENSE = os.path.join(working_dir, "license.txt") # os.environ["FS_LICENSE"] = FS_LICENSE # LICENSE_CODE = ( -# "bWF0dGhldy5jaWVzbGFrQHBzeWNoLnVjc2IuZWR1CjIwNzA2CipDZmVWZEg1VVQ4clkKRlNCWVouVWtlVElDdwo=" +# "bWF0dGhldy5jaWVzbGFrQHBzeWNoLnVjc2IuZWR1CjIwNzA2CipDZmVWZEg1VVQ4clkKRlNCWVouVWtlVElDdwo" +# "=" # ) # with open(FS_LICENSE, "w") as f: # f.write(base64.b64decode(LICENSE_CODE).decode()) -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def base_config(): from xcp_d.tests.tests import mock_config return mock_config -@pytest.fixture(scope="session") +@pytest.fixture(scope='session') def base_parser(): from argparse import ArgumentParser - parser = ArgumentParser(description="Test parser") + parser = ArgumentParser(description='Test parser') return parser diff --git a/xcp_d/tests/run_local_tests.py b/xcp_d/tests/run_local_tests.py old mode 100644 new mode 100755 index eb84bbe96..023986eaa --- a/xcp_d/tests/run_local_tests.py +++ b/xcp_d/tests/run_local_tests.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 """Run tests locally by calling Docker.""" + import argparse import os import subprocess @@ -14,20 +15,20 @@ def _get_parser(): """ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( - "-k", - dest="test_regex", - metavar="PATTERN", + '-k', + dest='test_regex', + metavar='PATTERN', type=str, - help="Test pattern.", + help='Test pattern.', required=False, default=None, ) parser.add_argument( - "-m", - dest="test_mark", - metavar="LABEL", + '-m', + dest='test_mark', + metavar='LABEL', type=str, - help="Test mark label.", + help='Test mark label.', required=False, default=None, ) @@ -37,52 +38,65 @@ def _get_parser(): def run_command(command, env=None): """Run a given shell command with certain environment variables set. - Keep this out of the real xcp_d code so that devs don't need to install ASLPrep to run tests. + Keep this out of the real XCP-D code so that devs don't need to install XCP-D to run tests. """ merged_env = os.environ if env: merged_env.update(env) process = subprocess.Popen( - command, + command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - shell=True, + shell=False, env=merged_env, ) while True: line = process.stdout.readline() - line = str(line, "utf-8")[:-1] + line = str(line, 'utf-8')[:-1] print(line) - if line == "" and process.poll() is not None: + if line == '' and process.poll() is not None: break if process.returncode != 0: raise RuntimeError( - f"Non zero return code: {process.returncode}\n" f"{command}\n\n{process.stdout.read()}" + f'Non zero return code: {process.returncode}\n' f'{command}\n\n{process.stdout.read()}' ) -def run_tests(test_regex, test_mark): +def run_tests(test_regex, test_mark, check_path): """Run the tests.""" local_patch = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - mounted_code = "/usr/local/miniconda/lib/python3.10/site-packages/xcp_d" - run_str = "docker run --rm -ti " - run_str += f"-v {local_patch}:/usr/local/miniconda/lib/python3.10/site-packages/xcp_d " - run_str += "--entrypoint pytest " - run_str += "pennlinc/xcp_d:unstable " - run_str += ( - f"{mounted_code}/xcp_d " - f"--data_dir={mounted_code}/xcp_d/tests/test_data " - f"--output_dir={mounted_code}/xcp_d/tests/pytests/out " - f"--working_dir={mounted_code}/xcp_d/tests/pytests/work " - ) - if test_regex: - run_str += f"-k {test_regex} " - elif test_mark: - run_str += f"-rP -o log_cli=true -m {test_mark} " + mounted_code = '/usr/local/miniconda/lib/python3.10/site-packages/xcp_d' + + if check_path: + run_str = ( + 'docker run --rm -ti ' + '--entrypoint /bin/ls ' + f'pennlinc/xcp_d:unstable {mounted_code}' + ) + try: + run_command(run_str) + print(f'Path found: {mounted_code}.') + except RuntimeError as exc: + raise FileNotFoundError(f'Path not found: {mounted_code}') from exc + else: + run_str = 'docker run --rm -ti ' + run_str += f'-v {local_patch}:/usr/local/miniconda/lib/python3.10/site-packages/xcp_d ' + run_str += '--entrypoint pytest ' + run_str += 'pennlinc/xcp_d:unstable ' + run_str += ( + f'{mounted_code}/xcp_d ' + f'--data_dir={mounted_code}/xcp_d/tests/test_data ' + f'--output_dir={mounted_code}/xcp_d/tests/pytests/out ' + f'--working_dir={mounted_code}/xcp_d/tests/pytests/work ' + ) + if test_regex: + run_str += f'-k {test_regex} ' + elif test_mark: + run_str += f'-rP -o log_cli=true -m {test_mark} ' - run_command(run_str) + run_command(run_str) def _main(argv=None): @@ -92,5 +106,5 @@ def _main(argv=None): run_tests(**kwargs) -if __name__ == "__main__": +if __name__ == '__main__': _main() diff --git a/xcp_d/tests/test_cli.py b/xcp_d/tests/test_cli.py index 939f71590..9f96f7619 100644 --- a/xcp_d/tests/test_cli.py +++ b/xcp_d/tests/test_cli.py @@ -27,54 +27,54 @@ write_derivative_description, ) -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') @pytest.mark.integration @pytest.mark.ds001419_nifti def test_ds001419_nifti(data_dir, output_dir, working_dir): """Run xcp_d on ds001419 fMRIPrep derivatives, with nifti options.""" - test_name = "test_ds001419_nifti" + test_name = 'test_ds001419_nifti' - dataset_dir = download_test_data("ds001419", data_dir) - derivs_dir = download_test_data("ds001419-aroma", data_dir) + dataset_dir = download_test_data('ds001419', data_dir) + derivs_dir = download_test_data('ds001419-aroma', data_dir) out_dir = os.path.join(output_dir, test_name) work_dir = os.path.join(working_dir, test_name) test_data_dir = get_test_data_path() - filter_file = os.path.join(test_data_dir, "ds001419_nifti_filter.json") + filter_file = os.path.join(test_data_dir, 'ds001419_nifti_filter.json') parameters = [ dataset_dir, out_dir, - "participant", - "--mode=none", - "--datasets", - f"aroma={derivs_dir}", - f"-w={work_dir}", - f"--bids-filter-file={filter_file}", - "--nuisance-regressors=aroma_gsr", - "--dummy-scans=4", - "--fd-thresh=0.2", - "--head_radius=40", - "--motion-filter-type=lp", - "--band-stop-min=6", - "--skip-parcellation", - "--min-time=100", - "--combine-runs", - "--output-type=censored", - "--combine-runs=y", - "--linc-qc=y", - "--abcc-qc=n", - "--despike=n", - "--file-format=nifti", - "--input-type=fmriprep", - "--warp-surfaces-native2std=n", + 'participant', + '--mode=none', + '--datasets', + f'aroma={derivs_dir}', + f'-w={work_dir}', + f'--bids-filter-file={filter_file}', + '--nuisance-regressors=aroma_gsr', + '--dummy-scans=4', + '--fd-thresh=0.2', + '--head_radius=40', + '--motion-filter-type=lp', + '--band-stop-min=6', + '--skip-parcellation', + '--min-time=100', + '--combine-runs', + '--output-type=censored', + '--combine-runs=y', + '--linc-qc=y', + '--abcc-qc=n', + '--despike=n', + '--file-format=nifti', + '--input-type=fmriprep', + '--warp-surfaces-native2std=n', ] _run_and_generate( test_name=test_name, parameters=parameters, - input_type="nifti", + input_type='nifti', ) @@ -82,48 +82,48 @@ def test_ds001419_nifti(data_dir, output_dir, working_dir): @pytest.mark.ds001419_cifti def test_ds001419_cifti(data_dir, output_dir, working_dir): """Run xcp_d on ds001419 fMRIPrep derivatives, with cifti options.""" - test_name = "test_ds001419_cifti" + test_name = 'test_ds001419_cifti' - dataset_dir = download_test_data("ds001419", data_dir) + dataset_dir = download_test_data('ds001419', data_dir) out_dir = os.path.join(output_dir, test_name) work_dir = os.path.join(working_dir, test_name) test_data_dir = get_test_data_path() - filter_file = os.path.join(test_data_dir, "ds001419_cifti_filter.json") + filter_file = os.path.join(test_data_dir, 'ds001419_cifti_filter.json') parameters = [ dataset_dir, out_dir, - "participant", - "--mode=abcd", - f"-w={work_dir}", - "--task-id=imagery", - f"--bids-filter-file={filter_file}", - "--nuisance-regressors=acompcor_gsr", - "--warp_surfaces_native2std=n", - "--head_radius=40", - "--motion-filter-type=notch", - "--motion-filter-order=4", - "--band-stop-min=12", - "--band-stop-max=18", - "--dummy-scans=auto", - "--upper-bpf=0.0", - "--min-time=100", - "--create-matrices", - "all", - "80", - "200", - "--atlases", - "4S156Parcels", - "4S256Parcels", - "4S356Parcels", - "4S456Parcels", - "--linc-qc", + 'participant', + '--mode=abcd', + f'-w={work_dir}', + '--task-id=imagery', + f'--bids-filter-file={filter_file}', + '--nuisance-regressors=acompcor_gsr', + '--warp_surfaces_native2std=n', + '--head_radius=40', + '--motion-filter-type=notch', + '--motion-filter-order=4', + '--band-stop-min=12', + '--band-stop-max=18', + '--dummy-scans=auto', + '--upper-bpf=0.0', + '--min-time=100', + '--create-matrices', + 'all', + '80', + '200', + '--atlases', + '4S156Parcels', + '4S256Parcels', + '4S356Parcels', + '4S456Parcels', + '--linc-qc', ] _run_and_generate( test_name=test_name, parameters=parameters, - input_type="cifti", + input_type='cifti', ) @@ -131,36 +131,36 @@ def test_ds001419_cifti(data_dir, output_dir, working_dir): @pytest.mark.ukbiobank def test_ukbiobank(data_dir, output_dir, working_dir): """Run xcp_d on UK Biobank derivatives.""" - test_name = "test_ukbiobank" + test_name = 'test_ukbiobank' - dataset_dir = download_test_data("ukbiobank", data_dir) + dataset_dir = download_test_data('ukbiobank', data_dir) out_dir = os.path.join(output_dir, test_name) work_dir = os.path.join(working_dir, test_name) parameters = [ dataset_dir, out_dir, - "participant", - "--mode=linc", - "--file-format=nifti", - "--warp-surfaces-native2std=n", - "--combine-runs=n", - f"-w={work_dir}", - "--input-type=ukb", - "--nuisance-regressors=gsr_only", - "--dummy-scans=4", - "--fd-thresh=0.2", - "--head_radius=40", - "--motion-filter-type=lp", - "--band-stop-min=6", - "--min-coverage=0.1", - "--random-seed=8675309", - "--min-time=100", + 'participant', + '--mode=linc', + '--file-format=nifti', + '--warp-surfaces-native2std=n', + '--combine-runs=n', + f'-w={work_dir}', + '--input-type=ukb', + '--nuisance-regressors=gsr_only', + '--dummy-scans=4', + '--fd-thresh=0.2', + '--head_radius=40', + '--motion-filter-type=lp', + '--band-stop-min=6', + '--min-coverage=0.1', + '--random-seed=8675309', + '--min-time=100', ] _run_and_generate( test_name=test_name, parameters=parameters, - input_type="ukb", + input_type='ukb', ) @@ -168,56 +168,56 @@ def test_ukbiobank(data_dir, output_dir, working_dir): @pytest.mark.pnc_cifti def test_pnc_cifti(data_dir, output_dir, working_dir): """Run xcp_d on pnc fMRIPrep derivatives, with cifti options.""" - test_name = "test_pnc_cifti" + test_name = 'test_pnc_cifti' - dataset_dir = download_test_data("pnc", data_dir) + dataset_dir = download_test_data('pnc', data_dir) out_dir = os.path.join(output_dir, test_name) work_dir = os.path.join(working_dir, test_name) test_data_dir = get_test_data_path() - filter_file = os.path.join(test_data_dir, "pnc_cifti_filter.json") + filter_file = os.path.join(test_data_dir, 'pnc_cifti_filter.json') # Make the last few volumes outliers to check https://github.com/PennLINC/xcp_d/issues/949 motion_file = os.path.join( dataset_dir, - "sub-1648798153/ses-PNC1/func/" - "sub-1648798153_ses-PNC1_task-rest_acq-singleband_desc-confounds_timeseries.tsv", + 'sub-1648798153/ses-PNC1/func/' + 'sub-1648798153_ses-PNC1_task-rest_acq-singleband_desc-confounds_timeseries.tsv', ) motion_df = pd.read_table(motion_file) - motion_df.loc[56:, "trans_x"] = np.arange(1, 5) * 20 - motion_df.to_csv(motion_file, sep="\t", index=False) - LOGGER.warning(f"Overwrote confounds file at {motion_file}.") + motion_df.loc[56:, 'trans_x'] = np.arange(1, 5) * 20 + motion_df.to_csv(motion_file, sep='\t', index=False) + LOGGER.warning(f'Overwrote confounds file at {motion_file}.') parameters = [ dataset_dir, out_dir, - "participant", - "--mode=abcd", - f"-w={work_dir}", - f"--bids-filter-file={filter_file}", - "--min-time=60", - "--nuisance-regressors=acompcor_gsr", - "--head-radius=40", - "--motion-filter-type=notch", - "--band-stop-min=12", - "--band-stop-max=18", - "--dummy-scans=auto", - "--upper-bpf=0.0", - "--atlases", - "Tian", - "HCP", - "MyersLabonte", - "--aggregate-session-reports=1", - "--create-matrices", - "300", - "480", - "all", - "--linc-qc=n", + 'participant', + '--mode=abcd', + f'-w={work_dir}', + f'--bids-filter-file={filter_file}', + '--min-time=60', + '--nuisance-regressors=acompcor_gsr', + '--head-radius=40', + '--motion-filter-type=notch', + '--band-stop-min=12', + '--band-stop-max=18', + '--dummy-scans=auto', + '--upper-bpf=0.0', + '--atlases', + 'Tian', + 'HCP', + 'MyersLabonte', + '--aggregate-session-reports=1', + '--create-matrices', + '300', + '480', + 'all', + '--linc-qc=n', ] _run_and_generate( test_name=test_name, parameters=parameters, - input_type="cifti", + input_type='cifti', ) @@ -225,53 +225,53 @@ def test_pnc_cifti(data_dir, output_dir, working_dir): @pytest.mark.pnc_cifti_t2wonly def test_pnc_cifti_t2wonly(data_dir, output_dir, working_dir): """Run xcp_d on pnc fMRIPrep derivatives, with cifti options and a simulated T2w image.""" - test_name = "test_pnc_cifti_t2wonly" + test_name = 'test_pnc_cifti_t2wonly' - dataset_dir = download_test_data("pnc", data_dir) + dataset_dir = download_test_data('pnc', data_dir) out_dir = os.path.join(output_dir, test_name) work_dir = os.path.join(working_dir, test_name) # Rename all T1w-related files in anat folder to T2w. # T1w-related files in func folder should not impact XCP-D. - anat_dir = os.path.join(dataset_dir, "sub-1648798153/ses-PNC1/anat") - files_to_copy = sorted(glob(os.path.join(anat_dir, "*T1w*"))) + anat_dir = os.path.join(dataset_dir, 'sub-1648798153/ses-PNC1/anat') + files_to_copy = sorted(glob(os.path.join(anat_dir, '*T1w*'))) for file_to_copy in files_to_copy: - t2w_file = file_to_copy.replace("T1w", "T2w") + t2w_file = file_to_copy.replace('T1w', 'T2w') if not os.path.isfile(t2w_file): os.rename(os.path.join(anat_dir, file_to_copy), t2w_file) tree = list_files(dataset_dir) - LOGGER.info(f"Tree after adding T2w:\n{tree}") + LOGGER.info(f'Tree after adding T2w:\n{tree}') test_data_dir = get_test_data_path() - filter_file = os.path.join(test_data_dir, "pnc_cifti_t2wonly_filter.json") + filter_file = os.path.join(test_data_dir, 'pnc_cifti_t2wonly_filter.json') parameters = [ dataset_dir, out_dir, - "participant", - "--mode=abcd", - f"-w={work_dir}", - f"--bids-filter-file={filter_file}", - "--nuisance-regressors=none", - "--head_radius=40", - "--motion-filter-type=notch", - "--band-stop-min=12", - "--band-stop-max=18", - "--dummy-scans=auto", - "--atlases", - "4S156Parcels", - "MIDB", - "--min-time=100", - "--despike=n", - "--disable-bandpass-filter", - "--create-matrices=all", - "--linc-qc=n", + 'participant', + '--mode=abcd', + f'-w={work_dir}', + f'--bids-filter-file={filter_file}', + '--nuisance-regressors=none', + '--head_radius=40', + '--motion-filter-type=notch', + '--band-stop-min=12', + '--band-stop-max=18', + '--dummy-scans=auto', + '--atlases', + '4S156Parcels', + 'MIDB', + '--min-time=100', + '--despike=n', + '--disable-bandpass-filter', + '--create-matrices=all', + '--linc-qc=n', ] _run_and_generate( test_name=test_name, parameters=parameters, - input_type="cifti", + input_type='cifti', test_main=False, ) @@ -288,46 +288,46 @@ def test_fmriprep_without_freesurfer(data_dir, output_dir, working_dir): This test uses a bash call to run XCP-D. This won't count toward coverage, but will help test the command-line interface. """ - test_name = "test_fmriprep_without_freesurfer" + test_name = 'test_fmriprep_without_freesurfer' - dataset_dir = download_test_data("fmriprepwithoutfreesurfer", data_dir) - atlas_dir = download_test_data("schaefer100", data_dir) + dataset_dir = download_test_data('fmriprepwithoutfreesurfer', data_dir) + atlas_dir = download_test_data('schaefer100', data_dir) tmpdir = os.path.join(output_dir, test_name) - out_dir = os.path.join(tmpdir, "xcp_d") + out_dir = os.path.join(tmpdir, 'xcp_d') work_dir = os.path.join(working_dir, test_name) parameters = [ dataset_dir, out_dir, - "participant", - "--mode=linc", - f"-w={work_dir}", - "--file-format=nifti", - "--datasets", - f"schaefer={atlas_dir}", - "--atlases", - "4S156Parcels", - "Schaefer100", - "--nthreads=2", - "--omp-nthreads=2", - "--head_radius=40", - "-f=100", - "--nuisance-regressors=27P", - "--despike=n", - "--disable-bandpass-filter", - "--min-time=20", - "--dummy-scans=1", - "--abcc-qc", + 'participant', + '--mode=linc', + f'-w={work_dir}', + '--file-format=nifti', + '--datasets', + f'schaefer={atlas_dir}', + '--atlases', + '4S156Parcels', + 'Schaefer100', + '--nthreads=2', + '--omp-nthreads=2', + '--head_radius=40', + '-f=100', + '--nuisance-regressors=27P', + '--despike=n', + '--disable-bandpass-filter', + '--min-time=20', + '--dummy-scans=1', + '--abcc-qc', ] _run_and_generate( test_name=test_name, parameters=parameters, - input_type="nifti", + input_type='nifti', ) # Run combine-qc too - combineqc.main([out_dir, "summary"]) + combineqc.main([out_dir, 'summary']) @pytest.mark.integration @@ -342,100 +342,100 @@ def test_fmriprep_without_freesurfer_with_main(data_dir, output_dir, working_dir This test uses a bash call to run XCP-D. This won't count toward coverage, but will help test the command-line interface. """ - test_name = "test_fmriprep_without_freesurfer" + test_name = 'test_fmriprep_without_freesurfer' - dataset_dir = download_test_data("fmriprepwithoutfreesurfer", data_dir) - atlas_dir = download_test_data("schaefer100", data_dir) - tmpdir = os.path.join(output_dir, f"{test_name}_with_main") - out_dir = os.path.join(tmpdir, "xcp_d") - work_dir = os.path.join(working_dir, f"{test_name}_with_main") + dataset_dir = download_test_data('fmriprepwithoutfreesurfer', data_dir) + atlas_dir = download_test_data('schaefer100', data_dir) + tmpdir = os.path.join(output_dir, f'{test_name}_with_main') + out_dir = os.path.join(tmpdir, 'xcp_d') + work_dir = os.path.join(working_dir, f'{test_name}_with_main') parameters = [ dataset_dir, out_dir, - "participant", - "--mode=linc", - f"-w={work_dir}", - "--file-format=nifti", - "--datasets", - f"schaefer={atlas_dir}", - "--atlases", - "4S156Parcels", - "Schaefer100", - "--nthreads=2", - "--omp-nthreads=2", - "--head_radius=40", - "-f=100", - "--nuisance-regressors=27P", - "--despike=n", - "--disable-bandpass-filter", - "--min-time=20", - "--dummy-scans=1", - "--abcc-qc", + 'participant', + '--mode=linc', + f'-w={work_dir}', + '--file-format=nifti', + '--datasets', + f'schaefer={atlas_dir}', + '--atlases', + '4S156Parcels', + 'Schaefer100', + '--nthreads=2', + '--omp-nthreads=2', + '--head_radius=40', + '-f=100', + '--nuisance-regressors=27P', + '--despike=n', + '--disable-bandpass-filter', + '--min-time=20', + '--dummy-scans=1', + '--abcc-qc', ] _run_and_generate( test_name=test_name, parameters=parameters, - input_type="nifti", + input_type='nifti', test_main=True, ) # Run combine-qc too - combineqc.main([out_dir, "summary"]) + combineqc.main([out_dir, 'summary']) @pytest.mark.integration @pytest.mark.nibabies def test_nibabies(data_dir, output_dir, working_dir): """Run xcp_d on Nibabies derivatives, with nifti options.""" - test_name = "test_nibabies" - input_type = "nibabies" + test_name = 'test_nibabies' + input_type = 'nibabies' - dataset_dir = download_test_data("nibabies", data_dir) - dataset_dir = os.path.join(dataset_dir, "derivatives", "nibabies") + dataset_dir = download_test_data('nibabies', data_dir) + dataset_dir = os.path.join(dataset_dir, 'derivatives', 'nibabies') out_dir = os.path.join(output_dir, test_name) work_dir = os.path.join(working_dir, test_name) parameters = [ dataset_dir, out_dir, - "participant", - "--mode=hbcd", - "--file-format=nifti", - "--warp-surfaces-native2std=n", - f"-w={work_dir}", - f"--input-type={input_type}", - "--nuisance-regressors=27P", - "--head_radius=auto", - "--smoothing=0", - "--fd-thresh=0", - "--create-matrices=all", - "--motion-filter-type=none", - "--linc-qc=n", + 'participant', + '--mode=hbcd', + '--file-format=nifti', + '--warp-surfaces-native2std=n', + f'-w={work_dir}', + f'--input-type={input_type}', + '--nuisance-regressors=27P', + '--head_radius=auto', + '--smoothing=0', + '--fd-thresh=0', + '--create-matrices=all', + '--motion-filter-type=none', + '--linc-qc=n', ] _run_and_generate( test_name=test_name, parameters=parameters, - input_type="nibabies", + input_type='nibabies', ) def _run_and_generate(test_name, parameters, input_type, test_main=False): from xcp_d import config - parameters.append("--clean-workdir") - parameters.append("--stop-on-first-crash") - parameters.append("--notrack") - parameters.append("-vv") + parameters.append('--clean-workdir') + parameters.append('--stop-on-first-crash') + parameters.append('--notrack') + parameters.append('-vv') # Add concurrency options if they're not already specified parameters = update_resources(parameters) if test_main: # This runs, but for some reason doesn't count toward coverage. - argv = ["xcp_d"] + parameters - with patch.object(sys, "argv", argv): + argv = ['xcp_d'] + parameters + with patch.object(sys, 'argv', argv): with pytest.raises(SystemExit) as e: run.main() @@ -444,12 +444,12 @@ def _run_and_generate(test_name, parameters, input_type, test_main=False): # XXX: I want to drop this option and use the main function, # but the main function doesn't track coverage correctly. parse_args(parameters) - config_file = config.execution.work_dir / f"config-{config.execution.run_uuid}.toml" - config.loggers.cli.warning(f"Saving config file to {config_file}") + config_file = config.execution.work_dir / f'config-{config.execution.run_uuid}.toml' + config.loggers.cli.warning(f'Saving config file to {config_file}') config.to_filename(config_file) retval = build_workflow(config_file, retval={}) - xcpd_wf = retval["workflow"] + xcpd_wf = retval['workflow'] xcpd_wf.run(**config.nipype.get_plugin()) write_derivative_description( config.execution.fmri_dir, @@ -457,11 +457,11 @@ def _run_and_generate(test_name, parameters, input_type, test_main=False): dataset_links=config.execution.dataset_links, ) if config.execution.atlases: - write_atlas_dataset_description(config.execution.output_dir / "atlases") + write_atlas_dataset_description(config.execution.output_dir / 'atlases') build_boilerplate(str(config_file), xcpd_wf) session_list = ( - config.execution.bids_filters.get("bold", {}).get("session") + config.execution.bids_filters.get('bold', {}).get('session') if config.execution.bids_filters else None ) @@ -473,6 +473,6 @@ def _run_and_generate(test_name, parameters, input_type, test_main=False): session_list=session_list, ) - output_list_file = os.path.join(get_test_data_path(), f"{test_name}_outputs.txt") + output_list_file = os.path.join(get_test_data_path(), f'{test_name}_outputs.txt') check_generated_files(config.execution.output_dir, output_list_file) check_affines(config.execution.fmri_dir, config.execution.output_dir, input_type=input_type) diff --git a/xcp_d/tests/test_cli_run.py b/xcp_d/tests/test_cli_run.py index 1ad18a867..9733b256e 100644 --- a/xcp_d/tests/test_cli_run.py +++ b/xcp_d/tests/test_cli_run.py @@ -21,37 +21,37 @@ def __init__(self, **entries): self.__dict__.update(entries) -@pytest.fixture(scope="module") +@pytest.fixture(scope='module') def base_opts(): """Create base options.""" opts_dict = { - "fmri_dir": Path("dset"), - "output_dir": Path("out"), - "work_dir": Path("work"), - "analysis_level": "participant", - "datasets": {}, - "mode": "linc", - "file_format": "auto", - "input_type": "auto", - "confounds_config": "auto", - "high_pass": 0.01, - "low_pass": 0.1, - "bandpass_filter": True, - "fd_thresh": "auto", - "min_time": 240, - "motion_filter_type": None, - "band_stop_min": None, - "band_stop_max": None, - "motion_filter_order": None, - "process_surfaces": "auto", - "atlases": ["Glasser"], - "dcan_correlation_lengths": None, - "despike": "auto", - "abcc_qc": "auto", - "linc_qc": "auto", - "combine_runs": "auto", - "output_type": "auto", - "fs_license_file": None, + 'fmri_dir': Path('dset'), + 'output_dir': Path('out'), + 'work_dir': Path('work'), + 'analysis_level': 'participant', + 'datasets': {}, + 'mode': 'linc', + 'file_format': 'auto', + 'input_type': 'auto', + 'confounds_config': 'auto', + 'high_pass': 0.01, + 'low_pass': 0.1, + 'bandpass_filter': True, + 'fd_thresh': 'auto', + 'min_time': 240, + 'motion_filter_type': None, + 'band_stop_min': None, + 'band_stop_max': None, + 'motion_filter_order': None, + 'process_surfaces': 'auto', + 'atlases': ['Glasser'], + 'dcan_correlation_lengths': None, + 'despike': 'auto', + 'abcc_qc': 'auto', + 'linc_qc': 'auto', + 'combine_runs': 'auto', + 'output_type': 'auto', + 'fs_license_file': None, } opts = FakeOptions(**opts_dict) return opts @@ -73,7 +73,7 @@ def test_validate_parameters_02(base_opts, base_parser, caplog): opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) assert opts.min_time == 0 - assert "Framewise displacement-based scrubbing is disabled." in caplog.text + assert 'Framewise displacement-based scrubbing is disabled.' in caplog.text def test_validate_parameters_03(base_opts, base_parser): @@ -81,7 +81,7 @@ def test_validate_parameters_03(base_opts, base_parser): opts = deepcopy(base_opts) # Set min > max for notch filter - opts.input_type = "dcan" + opts.input_type = 'dcan' opts.process_surfaces = False opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) @@ -95,12 +95,12 @@ def test_validate_parameters_04(base_opts, base_parser, capsys): # Set min > max for notch filter opts.process_surfaces = True - opts.file_format = "nifti" + opts.file_format = 'nifti' - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "you must enable cifti processing" in capsys.readouterr().err + assert 'you must enable cifti processing' in capsys.readouterr().err def test_validate_parameters_05(base_opts, base_parser, caplog): @@ -111,22 +111,22 @@ def test_validate_parameters_05(base_opts, base_parser, caplog): _ = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "When no atlases are selected" in caplog.text + assert 'When no atlases are selected' in caplog.text def test_validate_parameters_06(base_opts, base_parser, capsys): """Test parser._validate_parameters nifti + process_surfaces.""" opts = deepcopy(base_opts) - opts.input_type = "ukb" - opts.file_format = "nifti" + opts.input_type = 'ukb' + opts.file_format = 'nifti' opts.process_surfaces = True - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) stderr = capsys.readouterr().err - assert "--warp-surfaces-native2std is not supported" in stderr - assert "In order to perform surface normalization" in stderr + assert '--warp-surfaces-native2std is not supported' in stderr + assert 'In order to perform surface normalization' in stderr def test_validate_parameters_motion_filtering(base_opts, base_parser, caplog, capsys): @@ -134,55 +134,55 @@ def test_validate_parameters_motion_filtering(base_opts, base_parser, caplog, ca opts = deepcopy(base_opts) # Set notch filter with no min or max - opts.motion_filter_type = "notch" + opts.motion_filter_type = 'notch' opts.band_stop_min = None opts.band_stop_max = None - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "Please set both" in capsys.readouterr().err + assert 'Please set both' in capsys.readouterr().err # Set min > max for notch filter - opts.motion_filter_type = "notch" + opts.motion_filter_type = 'notch' opts.band_stop_min = 18 opts.band_stop_max = 12 - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "must be lower than" in capsys.readouterr().err + assert 'must be lower than' in capsys.readouterr().err # Set min <1 for notch filter - opts.motion_filter_type = "notch" + opts.motion_filter_type = 'notch' opts.band_stop_min = 0.01 opts.band_stop_max = 15 _ = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "suspiciously low." in caplog.text + assert 'suspiciously low.' in caplog.text # Set lp without min - opts.motion_filter_type = "lp" + opts.motion_filter_type = 'lp' opts.band_stop_min = None opts.band_stop_max = None - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) assert "Please set '--band-stop-min'" in capsys.readouterr().err # Set min > max for notch filter - opts.motion_filter_type = "lp" + opts.motion_filter_type = 'lp' opts.band_stop_min = 0.01 opts.band_stop_max = None _ = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "suspiciously low." in caplog.text + assert 'suspiciously low.' in caplog.text # Set min > max for notch filter - opts.motion_filter_type = "lp" + opts.motion_filter_type = 'lp' opts.band_stop_min = 12 opts.band_stop_max = 18 @@ -213,17 +213,17 @@ def test_validate_parameters_bandpass_filter(base_opts, base_parser, caplog, cap opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) assert opts.bandpass_filter is False - assert "Bandpass filtering is disabled." in caplog.text + assert 'Bandpass filtering is disabled.' in caplog.text # Set upper BPF below lower one opts.bandpass_filter = True opts.high_pass = 0.01 opts.low_pass = 0.001 - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "must be lower than" in capsys.readouterr().err + assert 'must be lower than' in capsys.readouterr().err def _test_validate_parameters_fs_license(base_opts, base_parser, caplog, capsys, tmp_path_factory): @@ -231,55 +231,55 @@ def _test_validate_parameters_fs_license(base_opts, base_parser, caplog, capsys, Not run now that the license isn't required. """ - tmpdir = tmp_path_factory.mktemp("test_validate_parameters_fs_license") + tmpdir = tmp_path_factory.mktemp('test_validate_parameters_fs_license') opts = deepcopy(base_opts) opts.fs_license_file = None # FS_LICENSE exists (set in conftest) parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "A valid FreeSurfer license file is required." not in caplog.text + assert 'A valid FreeSurfer license file is required.' not in caplog.text # FS_LICENSE doesn't exist - with pytest.raises(SystemExit, match="2"): - with modified_environ(FS_LICENSE="/path/to/missing/file.txt"): + with pytest.raises(SystemExit, match='2'): + with modified_environ(FS_LICENSE='/path/to/missing/file.txt'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "A valid FreeSurfer license file is required." in capsys.readouterr().err + assert 'A valid FreeSurfer license file is required.' in capsys.readouterr().err # FS_LICENSE is an existing file - license_file = os.path.join(tmpdir, "license.txt") - with open(license_file, "w") as fo: - fo.write("TEMP") + license_file = os.path.join(tmpdir, 'license.txt') + with open(license_file, 'w') as fo: + fo.write('TEMP') # If file exists, return_code should be 0 opts.fs_license_file = Path(license_file) parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "Freesurfer license DNE" not in caplog.text + assert 'Freesurfer license DNE' not in caplog.text # If file doesn't exist, return_code should be 1 - with pytest.raises(SystemExit, match="2"): - opts.fs_license_file = Path("/path/to/missing/file.txt") + opts.fs_license_file = Path('/path/to/missing/file.txt') + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert "Freesurfer license DNE" in capsys.readouterr().err + assert 'Freesurfer license DNE' in capsys.readouterr().err def test_validate_parameters_linc_mode(base_opts, base_parser, capsys): """Test parser._validate_parameters with linc mode.""" opts = deepcopy(base_opts) - opts.mode = "linc" + opts.mode = 'linc' # linc mode doesn't use abcc_qc but does use linc_qc opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) assert opts.abcc_qc is False assert opts.linc_qc is True - assert opts.file_format == "cifti" + assert opts.file_format == 'cifti' # --create-matrices is not supported opts.dcan_correlation_lengths = [300] - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) stderr = capsys.readouterr().err @@ -289,8 +289,8 @@ def test_validate_parameters_linc_mode(base_opts, base_parser, capsys): def test_validate_parameters_abcd_mode(base_opts, base_parser, capsys): """Test parser._validate_parameters with abcd mode.""" opts = deepcopy(base_opts) - opts.mode = "abcd" - opts.motion_filter_type = "lp" + opts.mode = 'abcd' + opts.motion_filter_type = 'lp' opts.band_stop_min = 10 # abcd mode does use abcc_qc but doesn't use linc_qc @@ -301,20 +301,20 @@ def test_validate_parameters_abcd_mode(base_opts, base_parser, capsys): assert opts.dcan_correlation_lengths == [] assert opts.despike is True assert opts.fd_thresh == 0.3 - assert opts.file_format == "cifti" - assert opts.input_type == "fmriprep" + assert opts.file_format == 'cifti' + assert opts.input_type == 'fmriprep' assert opts.linc_qc is True assert opts.output_correlations is False assert opts.process_surfaces is True - opts.dcan_correlation_lengths = ["300", "all"] + opts.dcan_correlation_lengths = ['300', 'all'] opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert opts.dcan_correlation_lengths == ["300"] + assert opts.dcan_correlation_lengths == ['300'] assert opts.output_correlations is True # --motion-filter-type is required opts.motion_filter_type = None - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) stderr = capsys.readouterr().err @@ -324,8 +324,8 @@ def test_validate_parameters_abcd_mode(base_opts, base_parser, capsys): def test_validate_parameters_hbcd_mode(base_opts, base_parser, capsys): """Test parser._validate_parameters with hbcd mode.""" opts = deepcopy(base_opts) - opts.mode = "hbcd" - opts.motion_filter_type = "lp" + opts.mode = 'hbcd' + opts.motion_filter_type = 'lp' opts.band_stop_min = 10 # hbcd mode does use abcc_qc but doesn't use linc_qc @@ -336,20 +336,20 @@ def test_validate_parameters_hbcd_mode(base_opts, base_parser, capsys): assert opts.dcan_correlation_lengths == [] assert opts.despike is True assert opts.fd_thresh == 0.3 - assert opts.file_format == "cifti" - assert opts.input_type == "nibabies" + assert opts.file_format == 'cifti' + assert opts.input_type == 'nibabies' assert opts.linc_qc is True assert opts.output_correlations is False assert opts.process_surfaces is True - opts.dcan_correlation_lengths = ["300", "all"] + opts.dcan_correlation_lengths = ['300', 'all'] opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) - assert opts.dcan_correlation_lengths == ["300"] + assert opts.dcan_correlation_lengths == ['300'] assert opts.output_correlations is True # --motion-filter-type is required opts.motion_filter_type = None - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) stderr = capsys.readouterr().err @@ -359,9 +359,9 @@ def test_validate_parameters_hbcd_mode(base_opts, base_parser, capsys): def test_validate_parameters_none_mode(base_opts, base_parser, capsys): """Test parser._validate_parameters with none mode.""" opts = deepcopy(base_opts) - opts.mode = "none" + opts.mode = 'none' - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) stderr = capsys.readouterr().err @@ -379,15 +379,15 @@ def test_validate_parameters_none_mode(base_opts, base_parser, capsys): opts.abcc_qc = False opts.combine_runs = False - opts.confounds_config = "none" + opts.confounds_config = 'none' opts.despike = False opts.fd_thresh = 0 - opts.file_format = "nifti" - opts.input_type = "fmriprep" + opts.file_format = 'nifti' + opts.input_type = 'fmriprep' opts.linc_qc = False - opts.motion_filter_type = "none" - opts.output_type = "censored" - opts.params = "36P" + opts.motion_filter_type = 'none' + opts.output_type = 'censored' + opts.params = '36P' opts.process_surfaces = False opts = parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) @@ -395,7 +395,7 @@ def test_validate_parameters_none_mode(base_opts, base_parser, capsys): def test_validate_parameters_other_mode(base_opts, base_parser, capsys): """Test parser._validate_parameters with 'other' mode.""" opts = deepcopy(base_opts) - opts.mode = "other" + opts.mode = 'other' with pytest.raises(AssertionError, match="Unsupported mode 'other'"): parser._validate_parameters(deepcopy(opts), build_log, parser=base_parser) @@ -403,11 +403,11 @@ def test_validate_parameters_other_mode(base_opts, base_parser, capsys): def test_build_parser_01(tmp_path_factory): """Test parser._build_parser with abcd mode.""" - tmpdir = tmp_path_factory.mktemp("test_build_parser_01") - data_dir = os.path.join(tmpdir, "data") + tmpdir = tmp_path_factory.mktemp('test_build_parser_01') + data_dir = os.path.join(tmpdir, 'data') data_path = Path(data_dir) os.makedirs(data_dir, exist_ok=True) - out_dir = os.path.join(tmpdir, "out") + out_dir = os.path.join(tmpdir, 'out') out_path = Path(out_dir) os.makedirs(out_dir, exist_ok=True) @@ -415,36 +415,36 @@ def test_build_parser_01(tmp_path_factory): base_args = [ data_dir, out_dir, - "participant", - "--mode", - "abcd", - "--motion-filter-type", - "lp", - "--band-stop-min", - "10", + 'participant', + '--mode', + 'abcd', + '--motion-filter-type', + 'lp', + '--band-stop-min', + '10', ] parser_obj = parser._build_parser() opts = parser_obj.parse_args(args=base_args, namespace=None) assert opts.fmri_dir == data_path assert opts.output_dir == out_path - assert opts.despike == "auto" + assert opts.despike == 'auto' test_args = base_args[:] - test_args.extend(["--create-matrices", "all", "300", "480"]) + test_args.extend(['--create-matrices', 'all', '300', '480']) opts = parser_obj.parse_args(args=test_args, namespace=None) assert opts.fmri_dir == data_path assert opts.output_dir == out_path - assert opts.dcan_correlation_lengths == ["all", 300, 480] + assert opts.dcan_correlation_lengths == ['all', 300, 480] def test_build_parser_02(tmp_path_factory): """Test parser._build_parser with hbcd mode.""" - tmpdir = tmp_path_factory.mktemp("test_build_parser_02") - data_dir = os.path.join(tmpdir, "data") + tmpdir = tmp_path_factory.mktemp('test_build_parser_02') + data_dir = os.path.join(tmpdir, 'data') data_path = Path(data_dir) os.makedirs(data_dir, exist_ok=True) - out_dir = os.path.join(tmpdir, "out") + out_dir = os.path.join(tmpdir, 'out') out_path = Path(out_dir) os.makedirs(out_dir, exist_ok=True) @@ -452,75 +452,75 @@ def test_build_parser_02(tmp_path_factory): base_args = [ data_dir, out_dir, - "participant", - "--mode", - "hbcd", - "--motion-filter-type", - "lp", - "--band-stop-min", - "10", + 'participant', + '--mode', + 'hbcd', + '--motion-filter-type', + 'lp', + '--band-stop-min', + '10', ] parser_obj = parser._build_parser() opts = parser_obj.parse_args(args=base_args, namespace=None) assert opts.fmri_dir == data_path assert opts.output_dir == out_path - assert opts.despike == "auto" + assert opts.despike == 'auto' test_args = base_args[:] - test_args.extend(["--create-matrices", "all", "300", "480"]) + test_args.extend(['--create-matrices', 'all', '300', '480']) opts = parser_obj.parse_args(args=test_args, namespace=None) assert opts.fmri_dir == data_path assert opts.output_dir == out_path - assert opts.dcan_correlation_lengths == ["all", 300, 480] + assert opts.dcan_correlation_lengths == ['all', 300, 480] @pytest.mark.parametrize( - "mode,combine_runs,expectation", + ('mode', 'combine_runs', 'expectation'), [ - ("linc", "auto", False), - ("abcd", "auto", True), - ("hbcd", "auto", True), - ("linc", None, True), - ("abcd", None, True), - ("hbcd", None, True), - ("linc", "y", True), - ("abcd", "y", True), - ("hbcd", "y", True), - ("linc", "n", False), - ("abcd", "n", False), - ("hbcd", "n", False), + ('linc', 'auto', False), + ('abcd', 'auto', True), + ('hbcd', 'auto', True), + ('linc', None, True), + ('abcd', None, True), + ('hbcd', None, True), + ('linc', 'y', True), + ('abcd', 'y', True), + ('hbcd', 'y', True), + ('linc', 'n', False), + ('abcd', 'n', False), + ('hbcd', 'n', False), ], ) def test_build_parser_03(tmp_path_factory, mode, combine_runs, expectation): """Test processing of the "combine_runs" parameter.""" - tmpdir = tmp_path_factory.mktemp("test_build_parser_03") - data_dir = os.path.join(tmpdir, "data") + tmpdir = tmp_path_factory.mktemp('test_build_parser_03') + data_dir = os.path.join(tmpdir, 'data') os.makedirs(data_dir, exist_ok=True) - out_dir = os.path.join(tmpdir, "out") + out_dir = os.path.join(tmpdir, 'out') os.makedirs(out_dir, exist_ok=True) # Parameters for hbcd mode base_args = [ data_dir, out_dir, - "participant", - "--mode", + 'participant', + '--mode', mode, - "--motion-filter-type", - "lp", - "--band-stop-min", - "10", + '--motion-filter-type', + 'lp', + '--band-stop-min', + '10', ] - if combine_runs not in ("auto", None): - base_args += ["--combine-runs", combine_runs] + if combine_runs not in ('auto', None): + base_args += ['--combine-runs', combine_runs] elif combine_runs is None: - base_args += ["--combine-runs"] + base_args += ['--combine-runs'] parser_obj = parser._build_parser() opts = parser_obj.parse_args(args=base_args, namespace=None) - if combine_runs == "auto": - assert opts.combine_runs == "auto" + if combine_runs == 'auto': + assert opts.combine_runs == 'auto' opts = parser._validate_parameters(opts=opts, build_log=build_log, parser=parser_obj) @@ -528,51 +528,51 @@ def test_build_parser_03(tmp_path_factory, mode, combine_runs, expectation): @pytest.mark.parametrize( - "mode,despike,expectation", + ('mode', 'despike', 'expectation'), [ - ("linc", "auto", True), - ("abcd", "auto", True), - ("hbcd", "auto", True), - ("linc", None, True), - ("abcd", None, True), - ("hbcd", None, True), - ("linc", "y", True), - ("abcd", "y", True), - ("hbcd", "y", True), - ("linc", "n", False), - ("abcd", "n", False), - ("hbcd", "n", False), + ('linc', 'auto', True), + ('abcd', 'auto', True), + ('hbcd', 'auto', True), + ('linc', None, True), + ('abcd', None, True), + ('hbcd', None, True), + ('linc', 'y', True), + ('abcd', 'y', True), + ('hbcd', 'y', True), + ('linc', 'n', False), + ('abcd', 'n', False), + ('hbcd', 'n', False), ], ) def test_build_parser_04(tmp_path_factory, mode, despike, expectation): """Test processing of the "despike" parameter.""" - tmpdir = tmp_path_factory.mktemp("test_build_parser_04") - data_dir = os.path.join(tmpdir, "data") + tmpdir = tmp_path_factory.mktemp('test_build_parser_04') + data_dir = os.path.join(tmpdir, 'data') os.makedirs(data_dir, exist_ok=True) - out_dir = os.path.join(tmpdir, "out") + out_dir = os.path.join(tmpdir, 'out') os.makedirs(out_dir, exist_ok=True) # Parameters for hbcd mode base_args = [ data_dir, out_dir, - "participant", - "--mode", + 'participant', + '--mode', mode, - "--motion-filter-type", - "lp", - "--band-stop-min", - "10", + '--motion-filter-type', + 'lp', + '--band-stop-min', + '10', ] - if despike not in ("auto", None): - base_args += ["--despike", despike] + if despike not in ('auto', None): + base_args += ['--despike', despike] elif despike is None: - base_args += ["--despike"] + base_args += ['--despike'] parser_obj = parser._build_parser() opts = parser_obj.parse_args(args=base_args, namespace=None) - if despike == "auto": - assert opts.despike == "auto" + if despike == 'auto': + assert opts.despike == 'auto' else: assert opts.despike is expectation @@ -582,53 +582,53 @@ def test_build_parser_04(tmp_path_factory, mode, despike, expectation): @pytest.mark.parametrize( - "mode,process_surfaces,expectation", + ('mode', 'process_surfaces', 'expectation'), [ - ("linc", "auto", False), - ("abcd", "auto", True), - ("hbcd", "auto", True), - ("linc", None, True), - ("abcd", None, True), - ("hbcd", None, True), - ("linc", "y", True), - ("abcd", "y", True), - ("hbcd", "y", True), - ("linc", "n", False), - ("abcd", "n", False), - ("hbcd", "n", False), + ('linc', 'auto', False), + ('abcd', 'auto', True), + ('hbcd', 'auto', True), + ('linc', None, True), + ('abcd', None, True), + ('hbcd', None, True), + ('linc', 'y', True), + ('abcd', 'y', True), + ('hbcd', 'y', True), + ('linc', 'n', False), + ('abcd', 'n', False), + ('hbcd', 'n', False), ], ) def test_build_parser_05(tmp_path_factory, mode, process_surfaces, expectation): """Test processing of the "process_surfaces" parameter.""" - tmpdir = tmp_path_factory.mktemp("test_build_parser_05") - data_dir = os.path.join(tmpdir, "data") + tmpdir = tmp_path_factory.mktemp('test_build_parser_05') + data_dir = os.path.join(tmpdir, 'data') os.makedirs(data_dir, exist_ok=True) - out_dir = os.path.join(tmpdir, "out") + out_dir = os.path.join(tmpdir, 'out') os.makedirs(out_dir, exist_ok=True) # Parameters for hbcd mode base_args = [ data_dir, out_dir, - "participant", - "--mode", + 'participant', + '--mode', mode, - "--motion-filter-type", - "lp", - "--band-stop-min", - "10", - "--file-format", - "cifti", + '--motion-filter-type', + 'lp', + '--band-stop-min', + '10', + '--file-format', + 'cifti', ] - if process_surfaces not in ("auto", None): - base_args += ["--warp-surfaces-native2std", process_surfaces] + if process_surfaces not in ('auto', None): + base_args += ['--warp-surfaces-native2std', process_surfaces] elif process_surfaces is None: - base_args += ["--warp-surfaces-native2std"] + base_args += ['--warp-surfaces-native2std'] parser_obj = parser._build_parser() opts = parser_obj.parse_args(args=base_args, namespace=None) - if process_surfaces == "auto": - assert opts.process_surfaces == "auto" + if process_surfaces == 'auto': + assert opts.process_surfaces == 'auto' opts = parser._validate_parameters(opts=opts, build_log=build_log, parser=parser_obj) @@ -636,48 +636,48 @@ def test_build_parser_05(tmp_path_factory, mode, process_surfaces, expectation): @pytest.mark.parametrize( - "mode,file_format,expectation", + ('mode', 'file_format', 'expectation'), [ - ("linc", "auto", "cifti"), - ("abcd", "auto", "cifti"), - ("hbcd", "auto", "cifti"), - ("linc", "nifti", "nifti"), - ("abcd", "nifti", "nifti"), - ("hbcd", "nifti", "nifti"), - ("linc", "cifti", "cifti"), - ("abcd", "cifti", "cifti"), - ("hbcd", "cifti", "cifti"), + ('linc', 'auto', 'cifti'), + ('abcd', 'auto', 'cifti'), + ('hbcd', 'auto', 'cifti'), + ('linc', 'nifti', 'nifti'), + ('abcd', 'nifti', 'nifti'), + ('hbcd', 'nifti', 'nifti'), + ('linc', 'cifti', 'cifti'), + ('abcd', 'cifti', 'cifti'), + ('hbcd', 'cifti', 'cifti'), ], ) def test_build_parser_06(tmp_path_factory, mode, file_format, expectation): """Test processing of the "file_format" parameter.""" - tmpdir = tmp_path_factory.mktemp("test_build_parser_06") - data_dir = os.path.join(tmpdir, "data") + tmpdir = tmp_path_factory.mktemp('test_build_parser_06') + data_dir = os.path.join(tmpdir, 'data') os.makedirs(data_dir, exist_ok=True) - out_dir = os.path.join(tmpdir, "out") + out_dir = os.path.join(tmpdir, 'out') os.makedirs(out_dir, exist_ok=True) # Parameters for hbcd mode base_args = [ data_dir, out_dir, - "participant", - "--mode", + 'participant', + '--mode', mode, - "--motion-filter-type", - "lp", - "--band-stop-min", - "10", - "--warp-surfaces-native2std", - "n", + '--motion-filter-type', + 'lp', + '--band-stop-min', + '10', + '--warp-surfaces-native2std', + 'n', ] - if file_format != "auto": - base_args += ["--file-format", file_format] + if file_format != 'auto': + base_args += ['--file-format', file_format] parser_obj = parser._build_parser() opts = parser_obj.parse_args(args=base_args, namespace=None) - if file_format == "auto": - assert opts.file_format == "auto" + if file_format == 'auto': + assert opts.file_format == 'auto' opts = parser._validate_parameters(opts=opts, build_log=build_log, parser=parser_obj) diff --git a/xcp_d/tests/test_cli_utils.py b/xcp_d/tests/test_cli_utils.py index 9c2494ac4..4ec6259de 100644 --- a/xcp_d/tests/test_cli_utils.py +++ b/xcp_d/tests/test_cli_utils.py @@ -14,15 +14,15 @@ def test_int_or_auto(): ArgumentTypeError, match="Argument must be a nonnegative integer or 'auto'.", ): - parser_utils._int_or_auto("hello") + parser_utils._int_or_auto('hello') - with pytest.raises(ArgumentTypeError, match="Int argument must be nonnegative."): + with pytest.raises(ArgumentTypeError, match='Int argument must be nonnegative.'): parser_utils._int_or_auto(-2) - out = parser_utils._int_or_auto("auto") - assert out == "auto" + out = parser_utils._int_or_auto('auto') + assert out == 'auto' - out = parser_utils._int_or_auto("3") + out = parser_utils._int_or_auto('3') assert out == 3 out = parser_utils._int_or_auto(3) @@ -35,15 +35,15 @@ def test_float_or_auto(): ArgumentTypeError, match="Argument must be a nonnegative float or 'auto'.", ): - parser_utils._float_or_auto("hello") + parser_utils._float_or_auto('hello') - with pytest.raises(ArgumentTypeError, match="Float argument must be nonnegative."): + with pytest.raises(ArgumentTypeError, match='Float argument must be nonnegative.'): parser_utils._float_or_auto(-2) - out = parser_utils._float_or_auto("auto") - assert out == "auto" + out = parser_utils._float_or_auto('auto') + assert out == 'auto' - out = parser_utils._float_or_auto("3") + out = parser_utils._float_or_auto('3') assert out == 3.0 out = parser_utils._float_or_auto(3) @@ -52,13 +52,13 @@ def test_float_or_auto(): def test_restricted_float(): """Test parser_utils._restricted_float.""" - with pytest.raises(ArgumentTypeError, match="not a floating-point literal"): - parser_utils._restricted_float("hello") + with pytest.raises(ArgumentTypeError, match='not a floating-point literal'): + parser_utils._restricted_float('hello') - with pytest.raises(ArgumentTypeError, match="not in range"): + with pytest.raises(ArgumentTypeError, match='not in range'): parser_utils._restricted_float(1.5) - out = parser_utils._restricted_float("0.5") + out = parser_utils._restricted_float('0.5') assert out == 0.5 out = parser_utils._restricted_float(0.5) @@ -71,18 +71,18 @@ def test_float_or_auto_or_none(): ArgumentTypeError, match="Argument must be a nonnegative float, 'all', or 'none', not 'hello'.", ): - parser_utils._float_or_auto_or_none("hello") + parser_utils._float_or_auto_or_none('hello') - with pytest.raises(ArgumentTypeError, match="Float argument must be nonnegative."): + with pytest.raises(ArgumentTypeError, match='Float argument must be nonnegative.'): parser_utils._float_or_auto_or_none(-2) - out = parser_utils._float_or_auto_or_none("all") - assert out == "all" + out = parser_utils._float_or_auto_or_none('all') + assert out == 'all' - out = parser_utils._float_or_auto_or_none("none") - assert out == "none" + out = parser_utils._float_or_auto_or_none('none') + assert out == 'none' - out = parser_utils._float_or_auto_or_none("3") + out = parser_utils._float_or_auto_or_none('3') assert out == 3.0 out = parser_utils._float_or_auto_or_none(3) @@ -91,32 +91,32 @@ def test_float_or_auto_or_none(): def test_is_file(tmp_path_factory): """Test parser_utils._is_file.""" - tmpdir = tmp_path_factory.mktemp("test_is_file") + tmpdir = tmp_path_factory.mktemp('test_is_file') # Existing file - with open(tmpdir / "file.txt", "w") as f: - f.write("") + with open(tmpdir / 'file.txt', 'w') as f: + f.write('') parser = ArgumentParser() - result = parser_utils._is_file(str(tmpdir / "file.txt"), parser) + result = parser_utils._is_file(str(tmpdir / 'file.txt'), parser) assert isinstance(result, Path) - assert result == Path(tmpdir / "file.txt").absolute() + assert result == Path(tmpdir / 'file.txt').absolute() # Nonexistent file parser = ArgumentParser() - path = "/path/to/nonexistent/file.txt" - with pytest.raises(SystemExit, match="2"): + path = '/path/to/nonexistent/file.txt' + with pytest.raises(SystemExit, match='2'): parser_utils._is_file(path, parser) # Path, not a file parser = ArgumentParser() - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser_utils._is_file(str(tmpdir), parser) def test_path_exists(tmp_path_factory): """Test parser_utils._path_exists.""" - tmpdir = tmp_path_factory.mktemp("test_path_exists") + tmpdir = tmp_path_factory.mktemp('test_path_exists') # Existing path parser = ArgumentParser() @@ -126,19 +126,19 @@ def test_path_exists(tmp_path_factory): # Nonexistent path parser = ArgumentParser() - path = "/path/to/nonexistent/file.txt" - with pytest.raises(SystemExit, match="2"): + path = '/path/to/nonexistent/file.txt' + with pytest.raises(SystemExit, match='2'): parser_utils._path_exists(path, parser) def test_bids_filter(tmp_path_factory): """Test parser_utils._bids_filter.""" - tmpdir = tmp_path_factory.mktemp("test_bids_filter_existing_path") + tmpdir = tmp_path_factory.mktemp('test_bids_filter_existing_path') # Existing path with valid JSON - json_file = str(tmpdir / "file.json") - with open(json_file, "w") as f: - f.write("{}") + json_file = str(tmpdir / 'file.json') + with open(json_file, 'w') as f: + f.write('{}') parser = ArgumentParser() result = parser_utils._bids_filter(json_file, parser) @@ -146,18 +146,18 @@ def test_bids_filter(tmp_path_factory): # Nonexistent path parser = ArgumentParser() - value = "/path/to/nonexistent/file.json" - with pytest.raises(SystemExit, match="2"): + value = '/path/to/nonexistent/file.json' + with pytest.raises(SystemExit, match='2'): parser_utils._bids_filter(value, parser) # Invalid JSON - tmpdir = tmp_path_factory.mktemp("test_bids_filter_invalid_json") - json_file = str(tmpdir / "invalid.json") - with open(json_file, "w") as f: - f.write("invalid json") + tmpdir = tmp_path_factory.mktemp('test_bids_filter_invalid_json') + json_file = str(tmpdir / 'invalid.json') + with open(json_file, 'w') as f: + f.write('invalid json') parser = ArgumentParser() - with pytest.raises(SystemExit, match="2"): + with pytest.raises(SystemExit, match='2'): parser_utils._bids_filter(json_file, parser) # No value @@ -169,66 +169,66 @@ def test_bids_filter(tmp_path_factory): def test_yes_no_action(): """Test parser_utils.YesNoAction.""" parser = ArgumentParser() - parser.add_argument("--option", nargs="?", action=parser_utils.YesNoAction) + parser.add_argument('--option', nargs='?', action=parser_utils.YesNoAction) # A value of y should be True - args = parser.parse_args(["--option", "y"]) + args = parser.parse_args(['--option', 'y']) assert args.option is True # A value of n should be False - args = parser.parse_args(["--option", "n"]) + args = parser.parse_args(['--option', 'n']) assert args.option is False # The parameter without a value should default to True - args = parser.parse_args(["--option"]) + args = parser.parse_args(['--option']) assert args.option is True # Auto is an option - args = parser.parse_args(["--option", "auto"]) - assert args.option == "auto" + args = parser.parse_args(['--option', 'auto']) + assert args.option == 'auto' # Invalid value raises an error with pytest.raises(SystemExit): - parser.parse_args(["--option", "invalid"]) + parser.parse_args(['--option', 'invalid']) def test_to_dict(): """Test parser_utils.ToDict.""" parser = ArgumentParser() - parser.add_argument("--option", action=parser_utils.ToDict, nargs="+") + parser.add_argument('--option', action=parser_utils.ToDict, nargs='+') # Two key-value pairs - args = parser.parse_args(["--option", "key1=value1", "key2=value2"]) - assert args.option == {"key1": Path("value1"), "key2": Path("value2")} + args = parser.parse_args(['--option', 'key1=value1', 'key2=value2']) + assert args.option == {'key1': Path('value1'), 'key2': Path('value2')} # Providing the same key twice with pytest.raises(SystemExit): - parser.parse_args(["--option", "key1=value1", "key1=value2"]) + parser.parse_args(['--option', 'key1=value1', 'key1=value2']) # Trying to use one of the reserved keys with pytest.raises(SystemExit): - parser.parse_args(["--option", "preprocessed=value1"]) + parser.parse_args(['--option', 'preprocessed=value1']) # Dataset with no name - args = parser.parse_args(["--option", "value1"]) - assert args.option == {"value1": Path("value1")} + args = parser.parse_args(['--option', 'value1']) + assert args.option == {'value1': Path('value1')} def test_confounds_action(tmp_path): """Test parser_utils.ConfoundsAction.""" parser = ArgumentParser() - parser.add_argument("--confounds", action=parser_utils.ConfoundsAction) + parser.add_argument('--confounds', action=parser_utils.ConfoundsAction) # A value of auto should be "auto" - args = parser.parse_args(["--confounds", "auto"]) - assert args.confounds == "auto" + args = parser.parse_args(['--confounds', 'auto']) + assert args.confounds == 'auto' # A valid custom confounds option - valid_path = tmp_path / "valid_confounds.yml" + valid_path = tmp_path / 'valid_confounds.yml' valid_path.touch() # Create the file - args = parser.parse_args(["--confounds", str(valid_path)]) + args = parser.parse_args(['--confounds', str(valid_path)]) assert args.confounds == valid_path # Path to a non-existent file should raise an error with pytest.raises(SystemExit): - parser.parse_args(["--confounds", "/invalid/path/to/confounds.yml"]) + parser.parse_args(['--confounds', '/invalid/path/to/confounds.yml']) diff --git a/xcp_d/tests/test_despike.py b/xcp_d/tests/test_despike.py index 7dfe38e3b..5a8279179 100644 --- a/xcp_d/tests/test_despike.py +++ b/xcp_d/tests/test_despike.py @@ -24,9 +24,9 @@ def test_nifti_despike(fmriprep_without_freesurfer_data, tmp_path_factory): Confirm that the maximum and minimum voxel values decrease after despiking. """ # Read in the necessary inputs - tempdir = tmp_path_factory.mktemp("test_despike_nifti") - boldfile = fmriprep_without_freesurfer_data["nifti_file"] - maskfile = fmriprep_without_freesurfer_data["brain_mask_file"] + tempdir = tmp_path_factory.mktemp('test_despike_nifti') + boldfile = fmriprep_without_freesurfer_data['nifti_file'] + maskfile = fmriprep_without_freesurfer_data['brain_mask_file'] # Create some spikes in the second voxel file_data = read_ndata(boldfile, maskfile) @@ -42,7 +42,7 @@ def test_nifti_despike(fmriprep_without_freesurfer_data, tmp_path_factory): # Let's write this temp file out for despiking file_data[2, :] = voxel_data - spikedfile = os.path.join(tempdir, "spikedfile.nii.gz") + spikedfile = os.path.join(tempdir, 'spikedfile.nii.gz') write_ndata( data_matrix=file_data, mask=maskfile, @@ -52,7 +52,7 @@ def test_nifti_despike(fmriprep_without_freesurfer_data, tmp_path_factory): ) # Let's despike the image and write it out to a temp file - despike_nifti = pe.Node(DespikePatch(outputtype="NIFTI_GZ", args="-NEW"), name="Despike") + despike_nifti = pe.Node(DespikePatch(outputtype='NIFTI_GZ', args='-NEW'), name='Despike') despike_nifti.inputs.in_file = spikedfile res = despike_nifti.run() despiked_file = res.outputs.out_file @@ -82,7 +82,7 @@ def test_cifti_despike(ds001419_data, tmp_path_factory): Confirm that the maximum and minimum voxel values decrease after despiking. """ - boldfile = ds001419_data["cifti_file"] + boldfile = ds001419_data['cifti_file'] TR = 0.8 # Let's add some noise @@ -99,24 +99,24 @@ def test_cifti_despike(ds001419_data, tmp_path_factory): # Let's write this out file_data[2, :] = voxel_data - tempdir = tmp_path_factory.mktemp("test_despike_cifti") - filename = os.path.join(tempdir, "test.dtseries.nii") + tempdir = tmp_path_factory.mktemp('test_despike_cifti') + filename = os.path.join(tempdir, 'test.dtseries.nii') write_ndata(data_matrix=file_data, template=boldfile, TR=TR, filename=filename) # Let's despike the data # first, convert the cifti to a nifti - convert_to_nifti = CiftiConvert(target="to") + convert_to_nifti = CiftiConvert(target='to') convert_to_nifti.inputs.in_file = filename convert_to_nifti_results = convert_to_nifti.run(cwd=tempdir) # next, run 3dDespike - despike3d = DespikePatch(outputtype="NIFTI_GZ", args="-nomask -NEW") + despike3d = DespikePatch(outputtype='NIFTI_GZ', args='-nomask -NEW') despike3d.inputs.in_file = convert_to_nifti_results.outputs.out_file despike3d_results = despike3d.run(cwd=tempdir) # finally, convert the despiked nifti back to cifti - convert_to_cifti = CiftiConvert(target="from", TR=TR) + convert_to_cifti = CiftiConvert(target='from', TR=TR) convert_to_cifti.inputs.in_file = despike3d_results.outputs.out_file convert_to_cifti.inputs.cifti_template = filename convert_to_cifti_results = convert_to_cifti.run(cwd=tempdir) diff --git a/xcp_d/tests/test_interfaces_bids.py b/xcp_d/tests/test_interfaces_bids.py index 55d39ea4b..fe6ed7482 100644 --- a/xcp_d/tests/test_interfaces_bids.py +++ b/xcp_d/tests/test_interfaces_bids.py @@ -4,99 +4,99 @@ import pytest -from xcp_d.interfaces import bids from xcp_d.data import load as load_data +from xcp_d.interfaces import bids def test_copy_atlas(tmp_path_factory): """Test xcp_d.interfaces.bids.CopyAtlas.""" - tmpdir = tmp_path_factory.mktemp("test_copy_atlas") - os.makedirs(os.path.join(tmpdir, "xcp_d"), exist_ok=True) + tmpdir = tmp_path_factory.mktemp('test_copy_atlas') + os.makedirs(os.path.join(tmpdir, 'xcp_d'), exist_ok=True) # NIfTI atlas_info = { - "image": load_data( - "atlases/atlas-Gordon/atlas-Gordon_space-MNI152NLin6Asym_res-01_dseg.nii.gz" + 'image': load_data( + 'atlases/atlas-Gordon/atlas-Gordon_space-MNI152NLin6Asym_res-01_dseg.nii.gz' ), - "labels": load_data("atlases/atlas-Gordon/atlas-Gordon_dseg.tsv"), - "metadata": {"thing": "stuff"}, - "dataset": "xcpdatlases", + 'labels': load_data('atlases/atlas-Gordon/atlas-Gordon_dseg.tsv'), + 'metadata': {'thing': 'stuff'}, + 'dataset': 'xcpdatlases', } - name_source = "sub-01_task-A_run-01_space-MNI152NLin2009cAsym_res-2_desc-z_bold.nii.gz" + name_source = 'sub-01_task-A_run-01_space-MNI152NLin2009cAsym_res-2_desc-z_bold.nii.gz' copyatlas = bids.CopyAtlas( name_source=name_source, - in_file=atlas_info["image"], + in_file=atlas_info['image'], output_dir=tmpdir, - atlas="Y", - meta_dict=atlas_info["metadata"], + atlas='Y', + meta_dict=atlas_info['metadata'], ) result = copyatlas.run(cwd=tmpdir) assert os.path.isfile(result.outputs.out_file) - assert os.path.isfile(result.outputs.out_file.replace(".nii.gz", ".json")) + assert os.path.isfile(result.outputs.out_file.replace('.nii.gz', '.json')) assert ( os.path.basename(result.outputs.out_file) - == "atlas-Y_space-MNI152NLin2009cAsym_res-2_dseg.nii.gz" + == 'atlas-Y_space-MNI152NLin2009cAsym_res-2_dseg.nii.gz' ) # Check that the NIfTI file raises an error if the resolution varies # Gordon atlas is 1mm, HCP is 2mm atlas_info_diff_affine = { - "image": load_data("atlases/atlas-HCP/atlas-HCP_space-MNI152NLin6Asym_res-02_dseg.nii.gz"), - "labels": load_data("atlases/atlas-HCP/atlas-HCP_dseg.tsv"), - "metadata": {"thing": "stuff"}, - "dataset": "xcpdatlases", + 'image': load_data('atlases/atlas-HCP/atlas-HCP_space-MNI152NLin6Asym_res-02_dseg.nii.gz'), + 'labels': load_data('atlases/atlas-HCP/atlas-HCP_dseg.tsv'), + 'metadata': {'thing': 'stuff'}, + 'dataset': 'xcpdatlases', } - with pytest.raises(ValueError, match="is different from the input file affine"): - copyatlas = bids.CopyAtlas( - name_source=name_source, - in_file=atlas_info_diff_affine["image"], - output_dir=tmpdir, - atlas="Y", - ) + copyatlas = bids.CopyAtlas( + name_source=name_source, + in_file=atlas_info_diff_affine['image'], + output_dir=tmpdir, + atlas='Y', + ) + with pytest.raises(ValueError, match='is different from the input file affine'): copyatlas.run(cwd=tmpdir) # CIFTI atlas_info = { - "image": load_data("atlases/atlas-Gordon/atlas-Gordon_space-fsLR_den-32k_dseg.dlabel.nii"), - "labels": load_data("atlases/atlas-Gordon/atlas-Gordon_dseg.tsv"), - "metadata": {"thing": "stuff"}, - "dataset": "xcpdatlases", + 'image': load_data('atlases/atlas-Gordon/atlas-Gordon_space-fsLR_den-32k_dseg.dlabel.nii'), + 'labels': load_data('atlases/atlas-Gordon/atlas-Gordon_dseg.tsv'), + 'metadata': {'thing': 'stuff'}, + 'dataset': 'xcpdatlases', } - name_source = "sub-01_task-imagery_run-01_space-fsLR_den-91k_desc-denoised_bold.dtseries.nii" + name_source = 'sub-01_task-imagery_run-01_space-fsLR_den-91k_desc-denoised_bold.dtseries.nii' copyatlas = bids.CopyAtlas( name_source=name_source, - in_file=atlas_info["image"], + in_file=atlas_info['image'], output_dir=tmpdir, - atlas="Y", - meta_dict=atlas_info["metadata"], + atlas='Y', + meta_dict=atlas_info['metadata'], ) result = copyatlas.run(cwd=tmpdir) assert os.path.isfile(result.outputs.out_file) - assert os.path.isfile(result.outputs.out_file.replace(".dlabel.nii", ".json")) + assert os.path.isfile(result.outputs.out_file.replace('.dlabel.nii', '.json')) assert ( - os.path.basename(result.outputs.out_file) == "atlas-Y_space-fsLR_den-91k_dseg.dlabel.nii" + os.path.basename(result.outputs.out_file) == 'atlas-Y_space-fsLR_den-91k_dseg.dlabel.nii' ) # TSV - name_source = "sub-01_task-imagery_run-01_space-fsLR_den-91k_desc-denoised_bold.dtseries.nii" + name_source = 'sub-01_task-imagery_run-01_space-fsLR_den-91k_desc-denoised_bold.dtseries.nii' copyatlas = bids.CopyAtlas( - name_source=name_source, in_file=atlas_info["labels"], output_dir=tmpdir, atlas="Y" + name_source=name_source, in_file=atlas_info['labels'], output_dir=tmpdir, atlas='Y' ) result = copyatlas.run(cwd=tmpdir) assert os.path.isfile(result.outputs.out_file) - assert os.path.basename(result.outputs.out_file) == "atlas-Y_dseg.tsv" + assert os.path.basename(result.outputs.out_file) == 'atlas-Y_dseg.tsv' # Ensure that out_file isn't overwritten if it already exists - fake_in_file = os.path.join(tmpdir, "fake.tsv") - with open(fake_in_file, "w") as fo: - fo.write("fake") + fake_in_file = os.path.join(tmpdir, 'fake.tsv') + with open(fake_in_file, 'w') as fo: + fo.write('fake') copyatlas = bids.CopyAtlas( - name_source=name_source, in_file=fake_in_file, output_dir=tmpdir, atlas="Y" + name_source=name_source, in_file=fake_in_file, output_dir=tmpdir, atlas='Y' ) result = copyatlas.run(cwd=tmpdir) assert os.path.isfile(result.outputs.out_file) - assert os.path.basename(result.outputs.out_file) == "atlas-Y_dseg.tsv" + assert os.path.basename(result.outputs.out_file) == 'atlas-Y_dseg.tsv' # The file should not be overwritten, so the contents shouldn't be "fake" - with open(result.outputs.out_file, "r") as fo: - assert fo.read() != "fake" + with open(result.outputs.out_file) as fo: + assert fo.read() != 'fake' diff --git a/xcp_d/tests/test_interfaces_censoring.py b/xcp_d/tests/test_interfaces_censoring.py index be103eb84..99b597028 100644 --- a/xcp_d/tests/test_interfaces_censoring.py +++ b/xcp_d/tests/test_interfaces_censoring.py @@ -2,12 +2,12 @@ import json import os -import yaml import nibabel as nb import numpy as np import pandas as pd import pytest +import yaml from xcp_d.data import load as load_data from xcp_d.interfaces import censoring @@ -15,34 +15,34 @@ def test_generate_confounds(ds001419_data, tmp_path_factory): """Check results.""" - tmpdir = tmp_path_factory.mktemp("test_generate_confounds") - in_file = ds001419_data["nifti_file"] - confounds_file = ds001419_data["confounds_file"] - confounds_json = ds001419_data["confounds_json"] + tmpdir = tmp_path_factory.mktemp('test_generate_confounds') + in_file = ds001419_data['nifti_file'] + confounds_file = ds001419_data['confounds_file'] + confounds_json = ds001419_data['confounds_json'] df = pd.read_table(confounds_file) - with open(confounds_json, "r") as fo: + with open(confounds_json) as fo: metadata = json.load(fo) # Replace confounds tsv values with values that should be omitted - df.loc[1:3, "trans_x"] = [6, 8, 9] - df.loc[4:6, "trans_y"] = [7, 8, 9] - df.loc[7:9, "trans_z"] = [12, 8, 9] + df.loc[1:3, 'trans_x'] = [6, 8, 9] + df.loc[4:6, 'trans_y'] = [7, 8, 9] + df.loc[7:9, 'trans_z'] = [12, 8, 9] # Modify JSON file - metadata["trans_x"] = {"test": "hello"} + metadata['trans_x'] = {'test': 'hello'} # Rename with same convention as initial confounds tsv - confounds_tsv = os.path.join(tmpdir, "edited_confounds.tsv") - df.to_csv(confounds_tsv, sep="\t", index=False, header=True) - confounds_tsv2 = os.path.join(tmpdir, "edited_confounds_with_signal.tsv") - df["signal__fingerpress_condition"] = np.random.random(df.shape[0]) - df.to_csv(confounds_tsv2, sep="\t", index=False, header=True) + confounds_tsv = os.path.join(tmpdir, 'edited_confounds.tsv') + df.to_csv(confounds_tsv, sep='\t', index=False, header=True) + confounds_tsv2 = os.path.join(tmpdir, 'edited_confounds_with_signal.tsv') + df['signal__fingerpress_condition'] = np.random.random(df.shape[0]) + df.to_csv(confounds_tsv2, sep='\t', index=False, header=True) - confounds_files = {"preproc_confounds": {"file": confounds_tsv, "metadata": metadata}} + confounds_files = {'preproc_confounds': {'file': confounds_tsv, 'metadata': metadata}} # Test with no motion filtering - config = load_data.readable("nuisance/24P.yml") + config = load_data.readable('nuisance/24P.yml') config = yaml.safe_load(config.read_text()) interface = censoring.GenerateConfounds( in_file=in_file, @@ -62,17 +62,17 @@ def test_generate_confounds(ds001419_data, tmp_path_factory): out_confounds_file = results.outputs.confounds_tsv out_df = pd.read_table(out_confounds_file) assert out_df.shape[1] == 24 # 24(P) - assert "trans_x" in out_df.columns + assert 'trans_x' in out_df.columns # Test with notch motion filtering - config = load_data.readable("nuisance/24P.yml") + config = load_data.readable('nuisance/24P.yml') config = yaml.safe_load(config.read_text()) interface = censoring.GenerateConfounds( in_file=in_file, confounds_config=config, TR=0.8, confounds_files=confounds_files, - motion_filter_type="notch", + motion_filter_type='notch', motion_filter_order=4, band_stop_min=12, band_stop_max=20, @@ -85,17 +85,17 @@ def test_generate_confounds(ds001419_data, tmp_path_factory): out_confounds_file = results.outputs.confounds_tsv out_df = pd.read_table(out_confounds_file) assert out_df.shape[1] == 24 # 24(P) - assert "trans_x_filtered" in out_df.columns + assert 'trans_x_filtered' in out_df.columns # Test with low-pass motion filtering - config = load_data.readable("nuisance/24P.yml") + config = load_data.readable('nuisance/24P.yml') config = yaml.safe_load(config.read_text()) interface = censoring.GenerateConfounds( in_file=in_file, confounds_config=config, TR=0.8, confounds_files=confounds_files, - motion_filter_type="lp", + motion_filter_type='lp', motion_filter_order=4, band_stop_min=6, band_stop_max=0, @@ -108,10 +108,10 @@ def test_generate_confounds(ds001419_data, tmp_path_factory): out_confounds_file = results.outputs.confounds_tsv out_df = pd.read_table(out_confounds_file) assert out_df.shape[1] == 24 # 24(P) - assert "trans_x_filtered" in out_df.columns + assert 'trans_x_filtered' in out_df.columns # Test with regular expressions in confounds_config - config = load_data.readable("nuisance/acompcor_gsr.yml") + config = load_data.readable('nuisance/acompcor_gsr.yml') config = yaml.safe_load(config.read_text()) interface = censoring.GenerateConfounds( in_file=in_file, @@ -133,10 +133,10 @@ def test_generate_confounds(ds001419_data, tmp_path_factory): assert out_df.shape[1] == 31 # 31 parameters # Test with signal regressors - config = load_data.readable("nuisance/24P.yml") + config = load_data.readable('nuisance/24P.yml') config = yaml.safe_load(config.read_text()) - config["confounds"]["preproc_confounds"]["columns"].append("signal__fingerpress_condition") - confounds_files = {"preproc_confounds": {"file": confounds_tsv2, "metadata": metadata}} + config['confounds']['preproc_confounds']['columns'].append('signal__fingerpress_condition') + confounds_files = {'preproc_confounds': {'file': confounds_tsv2, 'metadata': metadata}} interface = censoring.GenerateConfounds( in_file=in_file, confounds_config=config, @@ -155,16 +155,16 @@ def test_generate_confounds(ds001419_data, tmp_path_factory): out_confounds_file = results.outputs.confounds_tsv out_df = pd.read_table(out_confounds_file) assert out_df.shape[1] == 24 # 24 parameters (doesn't include 25th signal column) - assert "signal__fingerpress_condition" not in out_df.columns - assert all([col.endswith("_orth") for col in out_df.columns]) - assert "signal__fingerpress_condition" in results.outputs.confounds_metadata.keys() + assert 'signal__fingerpress_condition' not in out_df.columns + assert all(col.endswith('_orth') for col in out_df.columns) + assert 'signal__fingerpress_condition' in results.outputs.confounds_metadata.keys() # Test with image-based confounds - config = load_data.readable("nuisance/rapidtide+24P.yml") + config = load_data.readable('nuisance/rapidtide+24P.yml') config = yaml.safe_load(config.read_text()) confounds_files = { - "preproc_confounds": {"file": confounds_tsv, "metadata": metadata}, - "rapidtide_slfo": {"file": in_file, "metadata": {}}, + 'preproc_confounds': {'file': confounds_tsv, 'metadata': metadata}, + 'rapidtide_slfo': {'file': in_file, 'metadata': {}}, } interface = censoring.GenerateConfounds( in_file=in_file, @@ -185,16 +185,16 @@ def test_generate_confounds(ds001419_data, tmp_path_factory): out_df = pd.read_table(out_confounds_file) assert out_df.shape[1] == 25 # 24P + rapidtide (stand-in for the voxel-wise regressor) assert os.path.isfile(results.outputs.confounds_images[0]) - assert out_df["rapidtide_slfo"].isna().all() + assert out_df['rapidtide_slfo'].isna().all() # Test with image-based confounds and a signal column (will fail) - config = load_data.readable("nuisance/rapidtide+24P.yml") + config = load_data.readable('nuisance/rapidtide+24P.yml') config = yaml.safe_load(config.read_text()) - config["confounds"]["preproc_confounds"]["columns"].append("signal__fingerpress_condition") + config['confounds']['preproc_confounds']['columns'].append('signal__fingerpress_condition') confounds_files = { - "preproc_confounds": {"file": confounds_tsv2, "metadata": metadata}, - "rapidtide_slfo": {"file": in_file, "metadata": {}}, + 'preproc_confounds': {'file': confounds_tsv2, 'metadata': metadata}, + 'rapidtide_slfo': {'file': in_file, 'metadata': {}}, } interface = censoring.GenerateConfounds( in_file=in_file, @@ -214,10 +214,10 @@ def test_generate_confounds(ds001419_data, tmp_path_factory): def test_process_motion(ds001419_data, tmp_path_factory): """Test censoring.ProcessMotion.""" - tmpdir = tmp_path_factory.mktemp("test_process_motion") + tmpdir = tmp_path_factory.mktemp('test_process_motion') - motion_file = ds001419_data["confounds_file"] - motion_json = ds001419_data["confounds_json"] + motion_file = ds001419_data['confounds_file'] + motion_json = ds001419_data['confounds_json'] # Basic test without filtering interface = censoring.ProcessMotion( @@ -258,7 +258,7 @@ def test_process_motion(ds001419_data, tmp_path_factory): TR=2.0, fd_thresh=0, head_radius=50, - motion_filter_type="notch", + motion_filter_type='notch', motion_filter_order=4, band_stop_min=12, band_stop_max=20, @@ -274,7 +274,7 @@ def test_process_motion(ds001419_data, tmp_path_factory): TR=2.0, fd_thresh=0.2, head_radius=50, - motion_filter_type="notch", + motion_filter_type='notch', motion_filter_order=4, band_stop_min=12, band_stop_max=20, @@ -287,10 +287,10 @@ def test_process_motion(ds001419_data, tmp_path_factory): def test_removedummyvolumes_nifti(ds001419_data, tmp_path_factory): """Test RemoveDummyVolumes() for NIFTI input data.""" # Define inputs - tmpdir = tmp_path_factory.mktemp("test_RemoveDummyVolumes_nifti") + tmpdir = tmp_path_factory.mktemp('test_RemoveDummyVolumes_nifti') - boldfile = ds001419_data["nifti_file"] - confounds_file = ds001419_data["confounds_file"] + boldfile = ds001419_data['nifti_file'] + confounds_file = ds001419_data['confounds_file'] # Find the original number of volumes acc. to nifti & confounds timeseries original_confounds = pd.read_table(confounds_file) @@ -337,24 +337,19 @@ def test_removedummyvolumes_nifti(ds001419_data, tmp_path_factory): # Have the confounds changed correctly? assert dropped_confounds.shape[0] == original_confounds.shape[0] - n # Has the nifti changed correctly? - try: - assert ( - nb.load(results.outputs.bold_file_dropped_TR).get_fdata().shape[3] - == original_nvols_nifti - n - ) - except Exception as exc: - exc = nb.load(results.outputs.bold_file_dropped_TR).get_fdata().shape[3] - print(f"Tests failing at N = {n}.") - raise Exception(f"Number of volumes in dropped nifti is {exc}.") + n_vols = nb.load(results.outputs.bold_file_dropped_TR).get_fdata().shape[3] + if n_vols != (original_nvols_nifti - n): + print(f'Tests failing at N = {n}.') + raise ValueError(f'Number of volumes in dropped nifti is {n_vols}.') def test_removedummyvolumes_cifti(ds001419_data, tmp_path_factory): """Test RemoveDummyVolumes() for CIFTI input data.""" # Define inputs - tmpdir = tmp_path_factory.mktemp("test_RemoveDummyVolumes_cifti") + tmpdir = tmp_path_factory.mktemp('test_RemoveDummyVolumes_cifti') - boldfile = ds001419_data["cifti_file"] - confounds_file = ds001419_data["confounds_file"] + boldfile = ds001419_data['cifti_file'] + confounds_file = ds001419_data['confounds_file'] # Find the original number of volumes acc. to cifti & confounds timeseries original_confounds = pd.read_table(confounds_file) @@ -400,20 +395,15 @@ def test_removedummyvolumes_cifti(ds001419_data, tmp_path_factory): # Have the confounds changed correctly? assert dropped_confounds.shape[0] == original_confounds.shape[0] - n # Has the cifti changed correctly? - try: - assert ( - nb.load(results.outputs.bold_file_dropped_TR).get_fdata().shape[0] - == original_nvols_cifti - n - ) - except Exception as exc: - exc = nb.load(results.outputs.bold_file_dropped_TR).get_fdata().shape[0] - print(f"Tests failing at N = {n}.") - raise Exception(f"Number of volumes in dropped cifti is {exc}.") + n_vols = nb.load(results.outputs.bold_file_dropped_TR).get_fdata().shape[0] + if n_vols != (original_nvols_cifti - n): + print(f'Tests failing at N = {n}.') + raise ValueError(f'Number of volumes in dropped cifti is {n_vols}.') def test_random_censor(tmp_path_factory): """Test RandomCensor.""" - tmpdir = tmp_path_factory.mktemp("test_random_censor") + tmpdir = tmp_path_factory.mktemp('test_random_censor') n_volumes, n_outliers = 500, 100 exact_scans = [100, 200, 300, 400] @@ -421,9 +411,9 @@ def test_random_censor(tmp_path_factory): rng = np.random.default_rng(0) outlier_idx = rng.choice(np.arange(n_volumes, dtype=int), size=n_outliers, replace=False) outliers_arr[outlier_idx] = 1 - temporal_mask_df = pd.DataFrame(data=outliers_arr, columns=["framewise_displacement"]) - original_temporal_mask = os.path.join(tmpdir, "orig_tmask.tsv") - temporal_mask_df.to_csv(original_temporal_mask, index=False, sep="\t") + temporal_mask_df = pd.DataFrame(data=outliers_arr, columns=['framewise_displacement']) + original_temporal_mask = os.path.join(tmpdir, 'orig_tmask.tsv') + temporal_mask_df.to_csv(original_temporal_mask, index=False, sep='\t') # Run the RandomCensor interface without any exact_scans. interface = censoring.RandomCensor( @@ -448,10 +438,10 @@ def test_random_censor(tmp_path_factory): assert isinstance(results.outputs.temporal_mask_metadata, dict) new_temporal_mask_df = pd.read_table(results.outputs.temporal_mask) new_temporal_mask_df_no_outliers = new_temporal_mask_df.loc[ - new_temporal_mask_df["framewise_displacement"] == 0 + new_temporal_mask_df['framewise_displacement'] == 0 ] for exact_scan in exact_scans: - exact_scan_col = f"exact_{exact_scan}" + exact_scan_col = f'exact_{exact_scan}' assert exact_scan_col in new_temporal_mask_df_no_outliers.columns # The column's values should sum to the number of volumes minus the number of retained. # Outliers don't show up here. @@ -460,7 +450,7 @@ def test_random_censor(tmp_path_factory): ) # The outlier volumes and exact-scan censored volumes shouldn't overlap. assert all( - new_temporal_mask_df_no_outliers[[exact_scan_col, "framewise_displacement"]].sum( + new_temporal_mask_df_no_outliers[[exact_scan_col, 'framewise_displacement']].sum( axis=1 ) <= 1 @@ -469,20 +459,20 @@ def test_random_censor(tmp_path_factory): def test_censor(ds001419_data, tmp_path_factory): """Test Censor interface.""" - tmpdir = tmp_path_factory.mktemp("test_censor") - nifti_file = ds001419_data["nifti_file"] - cifti_file = ds001419_data["cifti_file"] + tmpdir = tmp_path_factory.mktemp('test_censor') + nifti_file = ds001419_data['nifti_file'] + cifti_file = ds001419_data['cifti_file'] in_img = nb.load(nifti_file) n_volumes = in_img.shape[3] - censoring_df = pd.DataFrame(columns=["framewise_displacement"], data=np.zeros(n_volumes)) - temporal_mask = os.path.join(tmpdir, "temporal_mask.tsv") - censoring_df.to_csv(temporal_mask, sep="\t", index=False) + censoring_df = pd.DataFrame(columns=['framewise_displacement'], data=np.zeros(n_volumes)) + temporal_mask = os.path.join(tmpdir, 'temporal_mask.tsv') + censoring_df.to_csv(temporal_mask, sep='\t', index=False) # Test with a NIfTI file, with no censored volumes interface = censoring.Censor( in_file=nifti_file, temporal_mask=temporal_mask, - column="framewise_displacement", + column='framewise_displacement', ) results = interface.run(cwd=tmpdir) out_file = results.outputs.out_file @@ -494,7 +484,7 @@ def test_censor(ds001419_data, tmp_path_factory): interface = censoring.Censor( in_file=cifti_file, temporal_mask=temporal_mask, - column="framewise_displacement", + column='framewise_displacement', ) results = interface.run(cwd=tmpdir) out_file = results.outputs.out_file @@ -505,17 +495,17 @@ def test_censor(ds001419_data, tmp_path_factory): # Create a temporal mask with some censored volumes n_censored_volumes = 10 n_retained_volumes = n_volumes - n_censored_volumes - censoring_df.loc[range(10), "framewise_displacement"] = 1 + censoring_df.loc[range(10), 'framewise_displacement'] = 1 # Add random censor column - censoring_df["random_censor"] = 0 - censoring_df.loc[20:29, "random_censor"] = 1 - censoring_df.to_csv(temporal_mask, sep="\t", index=False) + censoring_df['random_censor'] = 0 + censoring_df.loc[20:29, 'random_censor'] = 1 + censoring_df.to_csv(temporal_mask, sep='\t', index=False) # Test with a NIfTI file, with some censored volumes interface = censoring.Censor( in_file=nifti_file, temporal_mask=temporal_mask, - column="framewise_displacement", + column='framewise_displacement', ) results = interface.run(cwd=tmpdir) out_file = results.outputs.out_file @@ -527,7 +517,7 @@ def test_censor(ds001419_data, tmp_path_factory): interface2 = censoring.Censor( in_file=results.outputs.out_file, temporal_mask=temporal_mask, - column="random_censor", + column='random_censor', ) results2 = interface2.run(cwd=tmpdir) out_file2 = results2.outputs.out_file @@ -539,7 +529,7 @@ def test_censor(ds001419_data, tmp_path_factory): interface = censoring.Censor( in_file=cifti_file, temporal_mask=temporal_mask, - column="framewise_displacement", + column='framewise_displacement', ) results = interface.run(cwd=tmpdir) out_file = results.outputs.out_file @@ -551,7 +541,7 @@ def test_censor(ds001419_data, tmp_path_factory): interface2 = censoring.Censor( in_file=results.outputs.out_file, temporal_mask=temporal_mask, - column="random_censor", + column='random_censor', ) results2 = interface2.run(cwd=tmpdir) out_file2 = results2.outputs.out_file diff --git a/xcp_d/tests/test_interfaces_concatenation.py b/xcp_d/tests/test_interfaces_concatenation.py index 6068ae4e0..28438820d 100644 --- a/xcp_d/tests/test_interfaces_concatenation.py +++ b/xcp_d/tests/test_interfaces_concatenation.py @@ -10,10 +10,10 @@ def test_cleannamesource(datasets): """Test xcp_d.interfaces.concatenation.CleanNameSource.""" nifti_file = os.path.join( - datasets["ds001419"], - "sub-01", - "func", - "sub-01_task-imagery_run-02_space-MNI152NLin6Asym_res-2_desc-preproc_bold.nii.gz", + datasets['ds001419'], + 'sub-01', + 'func', + 'sub-01_task-imagery_run-02_space-MNI152NLin6Asym_res-2_desc-preproc_bold.nii.gz', ) interface = concatenation.CleanNameSource( @@ -23,18 +23,18 @@ def test_cleannamesource(datasets): name_source = results.outputs.name_source expected_name_source = os.path.join( - datasets["ds001419"], - "sub-01", - "func", - "sub-01_task-imagery_space-MNI152NLin6Asym_res-2_desc-preproc_bold.nii.gz", + datasets['ds001419'], + 'sub-01', + 'func', + 'sub-01_task-imagery_space-MNI152NLin6Asym_res-2_desc-preproc_bold.nii.gz', ) assert name_source == expected_name_source def test_filteroutfailedruns(ds001419_data): """Test xcp_d.interfaces.concatenation.FilterOutFailedRuns.""" - nifti_file = ds001419_data["nifti_file"] - tsv_file = ds001419_data["confounds_file"] + nifti_file = ds001419_data['nifti_file'] + tsv_file = ds001419_data['confounds_file'] denoised_bold = [Undefined, nifti_file, Undefined, Undefined, nifti_file] n_runs = len(denoised_bold) @@ -84,11 +84,11 @@ def test_filteroutfailedruns(ds001419_data): def test_concatenateinputs(ds001419_data, tmp_path_factory): """Test xcp_d.interfaces.concatenation.ConcatenateInputs.""" - tmpdir = tmp_path_factory.mktemp("test_concatenateinputs") + tmpdir = tmp_path_factory.mktemp('test_concatenateinputs') - nifti_file = ds001419_data["nifti_file"] - cifti_file = ds001419_data["cifti_file"] - tsv_file = ds001419_data["confounds_file"] + nifti_file = ds001419_data['nifti_file'] + cifti_file = ds001419_data['cifti_file'] + tsv_file = ds001419_data['confounds_file'] n_runs = 2 n_atlases = 3 diff --git a/xcp_d/tests/test_interfaces_nilearn.py b/xcp_d/tests/test_interfaces_nilearn.py index 9a4745146..1ee5d1d4c 100644 --- a/xcp_d/tests/test_interfaces_nilearn.py +++ b/xcp_d/tests/test_interfaces_nilearn.py @@ -11,12 +11,12 @@ def test_nilearn_merge(ds001419_data, tmp_path_factory): """Test xcp_d.interfaces.nilearn.Merge.""" - tmpdir = tmp_path_factory.mktemp("test_nilearn_merge") + tmpdir = tmp_path_factory.mktemp('test_nilearn_merge') - in_file = ds001419_data["boldref"] + in_file = ds001419_data['boldref'] interface = nilearn.Merge( in_files=[in_file, in_file], - out_file="merged.nii.gz", + out_file='merged.nii.gz', ) results = interface.run(cwd=tmpdir) assert os.path.isfile(results.outputs.out_file) @@ -27,13 +27,13 @@ def test_nilearn_merge(ds001419_data, tmp_path_factory): def test_nilearn_smooth(ds001419_data, tmp_path_factory): """Test xcp_d.interfaces.nilearn.Smooth.""" - tmpdir = tmp_path_factory.mktemp("test_nilearn_smooth") + tmpdir = tmp_path_factory.mktemp('test_nilearn_smooth') - in_file = ds001419_data["boldref"] + in_file = ds001419_data['boldref'] interface = nilearn.Smooth( in_file=in_file, fwhm=6, - out_file="smoothed_1len.nii.gz", + out_file='smoothed_1len.nii.gz', ) results = interface.run(cwd=tmpdir) assert os.path.isfile(results.outputs.out_file) @@ -43,7 +43,7 @@ def test_nilearn_smooth(ds001419_data, tmp_path_factory): interface = nilearn.Smooth( in_file=in_file, fwhm=[2, 3, 4], - out_file="smoothed_3len.nii.gz", + out_file='smoothed_3len.nii.gz', ) results = interface.run(cwd=tmpdir) assert os.path.isfile(results.outputs.out_file) @@ -53,13 +53,13 @@ def test_nilearn_smooth(ds001419_data, tmp_path_factory): def test_nilearn_binarymath(ds001419_data, tmp_path_factory): """Test xcp_d.interfaces.nilearn.BinaryMath.""" - tmpdir = tmp_path_factory.mktemp("test_nilearn_binarymath") + tmpdir = tmp_path_factory.mktemp('test_nilearn_binarymath') - in_file = ds001419_data["brain_mask_file"] + in_file = ds001419_data['brain_mask_file'] interface = nilearn.BinaryMath( in_file=in_file, - expression="img * 5", - out_file="mathed.nii.gz", + expression='img * 5', + out_file='mathed.nii.gz', ) results = interface.run(cwd=tmpdir) assert os.path.isfile(results.outputs.out_file) @@ -71,21 +71,21 @@ def test_nilearn_binarymath(ds001419_data, tmp_path_factory): def test_nilearn_resampletoimage(datasets, tmp_path_factory): """Test xcp_d.interfaces.nilearn.ResampleToImage.""" - tmpdir = tmp_path_factory.mktemp("test_nilearn_resampletoimage") + tmpdir = tmp_path_factory.mktemp('test_nilearn_resampletoimage') source_file = os.path.join( - datasets["nibabies"], - "sub-01", - "ses-1mo", - "func", - "sub-01_ses-1mo_task-rest_acq-PA_run-001_space-MNIInfant_cohort-1_boldref.nii.gz", + datasets['nibabies'], + 'sub-01', + 'ses-1mo', + 'func', + 'sub-01_ses-1mo_task-rest_acq-PA_run-001_space-MNIInfant_cohort-1_boldref.nii.gz', ) target_file = os.path.join( - datasets["nibabies"], - "sub-01", - "ses-1mo", - "anat", - "sub-01_ses-1mo_run-001_space-MNIInfant_cohort-1_desc-preproc_T1w.nii.gz", + datasets['nibabies'], + 'sub-01', + 'ses-1mo', + 'anat', + 'sub-01_ses-1mo_run-001_space-MNIInfant_cohort-1_desc-preproc_T1w.nii.gz', ) target_img = nb.load(target_file) source_img = nb.load(source_file) @@ -93,7 +93,7 @@ def test_nilearn_resampletoimage(datasets, tmp_path_factory): interface = nilearn.ResampleToImage( in_file=source_file, target_file=target_file, - out_file="resampled.nii.gz", + out_file='resampled.nii.gz', ) results = interface.run(cwd=tmpdir) assert os.path.isfile(results.outputs.out_file) @@ -105,24 +105,24 @@ def test_nilearn_resampletoimage(datasets, tmp_path_factory): def test_nilearn_denoisenifti(ds001419_data, tmp_path_factory): """Test xcp_d.interfaces.nilearn.DenoiseNifti.""" - tmpdir = tmp_path_factory.mktemp("test_nilearn_denoisenifti") + tmpdir = tmp_path_factory.mktemp('test_nilearn_denoisenifti') - preprocessed_bold = ds001419_data["nifti_file"] - mask = ds001419_data["brain_mask_file"] - confounds_file = ds001419_data["confounds_file"] + preprocessed_bold = ds001419_data['nifti_file'] + mask = ds001419_data['brain_mask_file'] + confounds_file = ds001419_data['confounds_file'] # Select some confounds to use for denoising confounds_df = pd.read_table(confounds_file) - reduced_confounds_df = confounds_df[["csf", "white_matter"]] - reduced_confounds_file = os.path.join(tmpdir, "confounds.tsv") - reduced_confounds_df.to_csv(reduced_confounds_file, sep="\t", index=False) + reduced_confounds_df = confounds_df[['csf', 'white_matter']] + reduced_confounds_file = os.path.join(tmpdir, 'confounds.tsv') + reduced_confounds_df.to_csv(reduced_confounds_file, sep='\t', index=False) # Create the censoring file - censoring_df = confounds_df[["framewise_displacement"]] - censoring_df["framewise_displacement"] = censoring_df["framewise_displacement"] > 0.3 - assert censoring_df["framewise_displacement"].sum() > 0 - temporal_mask = os.path.join(tmpdir, "censoring.tsv") - censoring_df.to_csv(temporal_mask, sep="\t", index=False) + censoring_df = confounds_df[['framewise_displacement']] + censoring_df['framewise_displacement'] = censoring_df['framewise_displacement'] > 0.3 + assert censoring_df['framewise_displacement'].sum() > 0 + temporal_mask = os.path.join(tmpdir, 'censoring.tsv') + censoring_df.to_csv(temporal_mask, sep='\t', index=False) preprocessed_img = nb.load(preprocessed_bold) @@ -144,23 +144,23 @@ def test_nilearn_denoisenifti(ds001419_data, tmp_path_factory): def test_nilearn_denoisecifti(ds001419_data, tmp_path_factory): """Test xcp_d.interfaces.nilearn.DenoiseCifti.""" - tmpdir = tmp_path_factory.mktemp("test_nilearn_denoisecifti") + tmpdir = tmp_path_factory.mktemp('test_nilearn_denoisecifti') - preprocessed_bold = ds001419_data["cifti_file"] - confounds_file = ds001419_data["confounds_file"] + preprocessed_bold = ds001419_data['cifti_file'] + confounds_file = ds001419_data['confounds_file'] # Select some confounds to use for denoising confounds_df = pd.read_table(confounds_file) - reduced_confounds_df = confounds_df[["csf", "white_matter"]] - reduced_confounds_file = os.path.join(tmpdir, "confounds.tsv") - reduced_confounds_df.to_csv(reduced_confounds_file, sep="\t", index=False) + reduced_confounds_df = confounds_df[['csf', 'white_matter']] + reduced_confounds_file = os.path.join(tmpdir, 'confounds.tsv') + reduced_confounds_df.to_csv(reduced_confounds_file, sep='\t', index=False) # Create the censoring file - censoring_df = confounds_df[["framewise_displacement"]] - censoring_df["framewise_displacement"] = censoring_df["framewise_displacement"] > 0.3 - assert censoring_df["framewise_displacement"].sum() > 0 - temporal_mask = os.path.join(tmpdir, "censoring.tsv") - censoring_df.to_csv(temporal_mask, sep="\t", index=False) + censoring_df = confounds_df[['framewise_displacement']] + censoring_df['framewise_displacement'] = censoring_df['framewise_displacement'] > 0.3 + assert censoring_df['framewise_displacement'].sum() > 0 + temporal_mask = os.path.join(tmpdir, 'censoring.tsv') + censoring_df.to_csv(temporal_mask, sep='\t', index=False) preprocessed_img = nb.load(preprocessed_bold) @@ -182,10 +182,10 @@ def test_nilearn_denoisecifti(ds001419_data, tmp_path_factory): def _check_denoising_outputs(preprocessed_img, outputs, cifti): if cifti: ndim = 2 - hdr_attr = "nifti_header" + hdr_attr = 'nifti_header' else: ndim = 4 - hdr_attr = "header" + hdr_attr = 'header' preprocessed_img_header = getattr(preprocessed_img, hdr_attr) diff --git a/xcp_d/tests/test_interfaces_utils.py b/xcp_d/tests/test_interfaces_utils.py index c2dba02bb..7796f3f7d 100644 --- a/xcp_d/tests/test_interfaces_utils.py +++ b/xcp_d/tests/test_interfaces_utils.py @@ -10,13 +10,13 @@ def test_conversion_to_32bit_nifti(ds001419_data, tmp_path_factory): """Convert nifti files to 32-bit.""" - tmpdir = tmp_path_factory.mktemp("test_conversion_to_32bit") + tmpdir = tmp_path_factory.mktemp('test_conversion_to_32bit') - float_file = ds001419_data["nifti_file"] - int_file = ds001419_data["brain_mask_file"] + float_file = ds001419_data['nifti_file'] + int_file = ds001419_data['brain_mask_file'] - float64_file = os.path.join(tmpdir, "float64.nii.gz") - int64_file = os.path.join(tmpdir, "int64.nii.gz") + float64_file = os.path.join(tmpdir, 'float64.nii.gz') + int64_file = os.path.join(tmpdir, 'int64.nii.gz') # Create a float64 image to downcast float_img = nb.load(float_file) @@ -62,12 +62,12 @@ def test_conversion_to_32bit_nifti(ds001419_data, tmp_path_factory): def test_conversion_to_32bit_cifti(ds001419_data, tmp_path_factory): """Convert nifti files to 32-bit.""" - tmpdir = tmp_path_factory.mktemp("test_conversion_to_32bit") + tmpdir = tmp_path_factory.mktemp('test_conversion_to_32bit') - float_file = ds001419_data["cifti_file"] + float_file = ds001419_data['cifti_file'] - float64_file = os.path.join(tmpdir, "float64.dtseries.nii") - int64_file = os.path.join(tmpdir, "int64.dtseries.nii") + float64_file = os.path.join(tmpdir, 'float64.dtseries.nii') + int64_file = os.path.join(tmpdir, 'int64.dtseries.nii') # Create a float64 image to downcast float_img = nb.load(float_file) diff --git a/xcp_d/tests/test_smoothing.py b/xcp_d/tests/test_smoothing.py index 7ebfc086d..6cbf3098c 100644 --- a/xcp_d/tests/test_smoothing.py +++ b/xcp_d/tests/test_smoothing.py @@ -16,23 +16,21 @@ def test_smoothing_nifti(fmriprep_without_freesurfer_data): """Test NIFTI smoothing.""" # Specify inputs - in_file = fmriprep_without_freesurfer_data["nifti_file"] - mask = fmriprep_without_freesurfer_data["brain_mask_file"] + in_file = fmriprep_without_freesurfer_data['nifti_file'] + mask = fmriprep_without_freesurfer_data['brain_mask_file'] # Let's get into a temp dir tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) # Run AFNI'S FWHMx via CLI, the nipype interface doesn't have what we need - os.system( - ( - f"3dFWHMx -ShowMeClassicFWHM -acf -detrend -input {in_file} -mask {mask} " - "-detprefix detrend.nii.gz -out test_file.out > test_fwhm.out" - ) - ) + os.system( # noqa: S605 + f'3dFWHMx -ShowMeClassicFWHM -acf -detrend -input {in_file} -mask {mask} ' # noqa: S605 + '-detprefix detrend.nii.gz -out test_file.out > test_fwhm.out' # noqa: S605 + ) # noqa: S605 # Read the FWHM values from the .out file into an array - with open("test_fwhm.out", "r") as file: + with open('test_fwhm.out') as file: first_line = file.readline() first_line = first_line.split() fwhm = [] @@ -42,11 +40,14 @@ def test_smoothing_nifti(fmriprep_without_freesurfer_data): fwhm_unsmoothed = np.array(fwhm) # else this will need to be overwritten later - os.system("rm -rf 3dFWHMx.1D test_fwhm.out test_file.out") + os.remove('3dFWHMx.1D') + os.remove('test_fwhm.out') + os.remove('test_file.out') # Smooth the data smooth_data = pe.Node( - Smooth(fwhm=6), name="nifti_smoothing" # FWHM = kernel size + Smooth(fwhm=6), + name='nifti_smoothing', # FWHM = kernel size ) # Use fslmaths to smooth the image smooth_data.inputs.in_file = in_file results = smooth_data.run() @@ -54,15 +55,13 @@ def test_smoothing_nifti(fmriprep_without_freesurfer_data): # Run AFNI'S FWHMx via CLI, the nipype interface doesn't have what we need # i.e : the "ShowMeClassicFWHM" option - os.system( - ( - f"3dFWHMx -ShowMeClassicFWHM -acf -detrend -input {out_file} -mask {mask} " - "-detprefix detrend.nii.gz -out test_file.out > test_fwhm.out" - ) - ) + os.system( # noqa: S605 + f'3dFWHMx -ShowMeClassicFWHM -acf -detrend -input {out_file} -mask {mask} ' # noqa: S605 + '-detprefix detrend.nii.gz -out test_file.out > test_fwhm.out' # noqa: S605 + ) # noqa: S605 # Read the FWHM values from the .out file into an array - with open("test_fwhm.out", "r") as file: + with open('test_fwhm.out') as file: first_line = file.readline() first_line = first_line.split() fwhm = [] @@ -76,75 +75,77 @@ def test_smoothing_nifti(fmriprep_without_freesurfer_data): return -def test_smoothing_cifti(ds001419_data, tmp_path_factory, sigma_lx=fwhm2sigma(6)): +def test_smoothing_cifti(ds001419_data, tmp_path_factory, sigma_lx=None): """Test CIFTI smoothing.""" - tmpdir = tmp_path_factory.mktemp("test_smoothing_cifti") - in_file = ds001419_data["cifti_file"] + sigma_lx = fwhm2sigma(6) if sigma_lx is None else sigma_lx + + tmpdir = tmp_path_factory.mktemp('test_smoothing_cifti') + in_file = ds001419_data['cifti_file'] # pull out atlases for each hemisphere right_surf = str( get_template( - template="fsLR", + template='fsLR', space=None, - hemi="R", - density="32k", + hemi='R', + density='32k', desc=None, - suffix="sphere", + suffix='sphere', ) ) left_surf = str( get_template( - template="fsLR", + template='fsLR', space=None, - hemi="L", - density="32k", + hemi='L', + density='32k', desc=None, - suffix="sphere", + suffix='sphere', ) ) # Estimate the smoothness of the unsmoothed file - in_file_smoothness = os.popen( - f"wb_command -cifti-estimate-fwhm {in_file} " - f"-surface CORTEX_LEFT {left_surf} " - f"-surface CORTEX_RIGHT {right_surf} " - "-whole-file -merged-volume" - ).read() - in_file_smoothness = re.findall(r"\d.+", in_file_smoothness) - in_file_smoothness = [x.split(",") for x in in_file_smoothness] + in_file_smoothness = os.popen( # noqa: S605 + f'wb_command -cifti-estimate-fwhm {in_file} ' # noqa: S605 + f'-surface CORTEX_LEFT {left_surf} ' # noqa: S605 + f'-surface CORTEX_RIGHT {right_surf} ' # noqa: S605 + '-whole-file -merged-volume' # noqa: S605 + ).read() # noqa: S605 + in_file_smoothness = re.findall(r'\d.+', in_file_smoothness) + in_file_smoothness = [x.split(',') for x in in_file_smoothness] in_file_smoothness = [item for sublist in in_file_smoothness for item in sublist] in_file_smoothness = list(map(float, in_file_smoothness)) - in_file_smoothness = np.sum((in_file_smoothness)) + in_file_smoothness = np.sum(in_file_smoothness) # Smooth the file smooth_data = pe.Node( CiftiSmooth( sigma_surf=sigma_lx, # the size of the surface kernel sigma_vol=sigma_lx, # the volume of the surface kernel - direction="COLUMN", # which direction to smooth along@ + direction='COLUMN', # which direction to smooth along@ right_surf=right_surf, left_surf=left_surf, num_threads=1, ), - name="cifti_smoothing", + name='cifti_smoothing', n_procs=1, ) smooth_data.inputs.in_file = in_file smooth_data.base_dir = tmpdir - smooth_data.inputs.out_file = os.path.join(tmpdir, "test.dtseries.nii") + smooth_data.inputs.out_file = os.path.join(tmpdir, 'test.dtseries.nii') results = smooth_data.run() out_file = results.outputs.out_file # Estimate the smoothness of the smoothed file - out_file_smoothness = os.popen( - f"wb_command -cifti-estimate-fwhm {out_file} " - f"-surface CORTEX_LEFT {left_surf} " - f"-surface CORTEX_RIGHT {right_surf} " - "-whole-file -merged-volume" - ).read() - out_file_smoothness = re.findall(r"\d.+", out_file_smoothness) - out_file_smoothness = [x.split(",") for x in out_file_smoothness] + out_file_smoothness = os.popen( # noqa: S605 + f'wb_command -cifti-estimate-fwhm {out_file} ' # noqa: S605 + f'-surface CORTEX_LEFT {left_surf} ' # noqa: S605 + f'-surface CORTEX_RIGHT {right_surf} ' # noqa: S605 + '-whole-file -merged-volume' # noqa: S605 + ).read() # noqa: S605 + out_file_smoothness = re.findall(r'\d.+', out_file_smoothness) + out_file_smoothness = [x.split(',') for x in out_file_smoothness] out_file_smoothness = [item for sublist in out_file_smoothness for item in sublist] out_file_smoothness = list(map(float, out_file_smoothness)) - out_file_smoothness = np.sum((out_file_smoothness)) + out_file_smoothness = np.sum(out_file_smoothness) assert in_file_smoothness < out_file_smoothness diff --git a/xcp_d/tests/test_utils_atlas.py b/xcp_d/tests/test_utils_atlas.py index 9e06adaf4..d40e4dae0 100644 --- a/xcp_d/tests/test_utils_atlas.py +++ b/xcp_d/tests/test_utils_atlas.py @@ -10,7 +10,7 @@ def test_get_atlas_names(): """Test xcp_d.utils.atlas.select_atlases.""" - selected_atlases = atlas.select_atlases(atlases=["4S156Parcels", "4S256Parcels"], subset="all") + selected_atlases = atlas.select_atlases(atlases=['4S156Parcels', '4S256Parcels'], subset='all') assert isinstance(selected_atlases, list) assert all(isinstance(name, str) for name in selected_atlases) assert len(selected_atlases) == 2 @@ -18,106 +18,106 @@ def test_get_atlas_names(): def test_collect_atlases(datasets, caplog, tmp_path_factory): """Test xcp_d.utils.atlas.collect_atlases.""" - schaefer_dset = datasets["schaefer100"] + schaefer_dset = datasets['schaefer100'] atlas_datasets = { - "xcpdatlases": str(load_data("atlases")), + 'xcpdatlases': str(load_data('atlases')), } atlas_cache = atlas.collect_atlases( datasets=atlas_datasets, - atlases=["Gordon", "Schaefer100"], - file_format="nifti", + atlases=['Gordon', 'Schaefer100'], + file_format='nifti', bids_filters={}, ) - assert "Gordon" in atlas_cache - assert "Schaefer100" not in atlas_cache - assert "No atlas images found for Schaefer100" in caplog.text + assert 'Gordon' in atlas_cache + assert 'Schaefer100' not in atlas_cache + assert 'No atlas images found for Schaefer100' in caplog.text # Add the schaefer dataset - atlas_datasets["schaefer100"] = schaefer_dset + atlas_datasets['schaefer100'] = schaefer_dset atlas_cache = atlas.collect_atlases( datasets=atlas_datasets, - atlases=["Gordon", "Schaefer100"], - file_format="nifti", + atlases=['Gordon', 'Schaefer100'], + file_format='nifti', bids_filters={}, ) - assert "Gordon" in atlas_cache - assert "Schaefer100" in atlas_cache + assert 'Gordon' in atlas_cache + assert 'Schaefer100' in atlas_cache # Skip over the schaefer dataset - atlas_datasets["schaefer100"] = schaefer_dset + atlas_datasets['schaefer100'] = schaefer_dset atlas_cache = atlas.collect_atlases( datasets=atlas_datasets, - atlases=["Gordon"], - file_format="cifti", + atlases=['Gordon'], + file_format='cifti', bids_filters={}, ) - assert "Gordon" in atlas_cache - assert "Schaefer100" not in atlas_cache + assert 'Gordon' in atlas_cache + assert 'Schaefer100' not in atlas_cache # Add a duplicate atlas - atlas_datasets["duplicate"] = str(load_data("atlases")) + atlas_datasets['duplicate'] = str(load_data('atlases')) with pytest.raises(ValueError, match="Multiple datasets contain the same atlas 'Gordon'"): atlas.collect_atlases( datasets=atlas_datasets, - atlases=["Gordon"], - file_format="nifti", + atlases=['Gordon'], + file_format='nifti', bids_filters={}, ) # Create a dataset that has atlases, but is missing information - tmpdir = tmp_path_factory.mktemp("test_collect_atlases") + tmpdir = tmp_path_factory.mktemp('test_collect_atlases') # Make the dataset_description.json - with open(tmpdir / "dataset_description.json", "w") as fo: - json.dump({"DatasetType": "atlas", "BIDSVersion": "1.9.0", "Name": "Test"}, fo) + with open(tmpdir / 'dataset_description.json', 'w') as fo: + json.dump({'DatasetType': 'atlas', 'BIDSVersion': '1.9.0', 'Name': 'Test'}, fo) # Create fake atlas file - (tmpdir / "atlas-TEST").mkdir() - (tmpdir / "atlas-TEST" / "atlas-TEST_space-MNI152NLin6Asym_res-01_dseg.nii.gz").write_text( - "test" + (tmpdir / 'atlas-TEST').mkdir() + (tmpdir / 'atlas-TEST' / 'atlas-TEST_space-MNI152NLin6Asym_res-01_dseg.nii.gz').write_text( + 'test' ) # First there's an image, but no TSV or metadata - with pytest.raises(FileNotFoundError, match="No TSV file found for"): + with pytest.raises(FileNotFoundError, match='No TSV file found for'): atlas.collect_atlases( - datasets={"test": tmpdir}, - atlases=["TEST"], - file_format="nifti", + datasets={'test': tmpdir}, + atlases=['TEST'], + file_format='nifti', bids_filters={}, ) # Now there's an image and a TSV, but the TSV doesn't have a "label" column - with open(tmpdir / "atlas-TEST" / "atlas-TEST_dseg.tsv", "w") as fo: - fo.write("index\n1\n") + with open(tmpdir / 'atlas-TEST' / 'atlas-TEST_dseg.tsv', 'w') as fo: + fo.write('index\n1\n') with pytest.raises(ValueError, match="'label' column not found"): atlas.collect_atlases( - datasets={"test": tmpdir}, - atlases=["TEST"], - file_format="nifti", + datasets={'test': tmpdir}, + atlases=['TEST'], + file_format='nifti', bids_filters={}, ) # Now there's an image and a TSV, but the TSV doesn't have an "index" column - with open(tmpdir / "atlas-TEST" / "atlas-TEST_dseg.tsv", "w") as fo: - fo.write("label\ntest\n") + with open(tmpdir / 'atlas-TEST' / 'atlas-TEST_dseg.tsv', 'w') as fo: + fo.write('label\ntest\n') with pytest.raises(ValueError, match="'index' column not found"): atlas.collect_atlases( - datasets={"test": tmpdir}, - atlases=["TEST"], - file_format="nifti", + datasets={'test': tmpdir}, + atlases=['TEST'], + file_format='nifti', bids_filters={}, ) # Now there's an image, a TSV, and metadata - with open(tmpdir / "atlas-TEST" / "atlas-TEST_dseg.tsv", "w") as fo: - fo.write("index\tlabel\n1\ttest\n") + with open(tmpdir / 'atlas-TEST' / 'atlas-TEST_dseg.tsv', 'w') as fo: + fo.write('index\tlabel\n1\ttest\n') atlas_cache = atlas.collect_atlases( - datasets={"test": tmpdir}, - atlases=["TEST"], - file_format="nifti", + datasets={'test': tmpdir}, + atlases=['TEST'], + file_format='nifti', bids_filters={}, ) - assert "TEST" in atlas_cache + assert 'TEST' in atlas_cache diff --git a/xcp_d/tests/test_utils_bids.py b/xcp_d/tests/test_utils_bids.py index 26dc44f83..7528c4d75 100644 --- a/xcp_d/tests/test_utils_bids.py +++ b/xcp_d/tests/test_utils_bids.py @@ -16,198 +16,198 @@ def test_collect_participants(datasets): This also covers BIDSError and BIDSWarning. """ - bids_dir = datasets["ds001419"] + bids_dir = datasets['ds001419'] bids_layout = BIDSLayout(bids_dir, validate=False) nonbids_layout = BIDSLayout(os.path.dirname(bids_dir), validate=False) # Pass in non-BIDS folder to get BIDSError. - with pytest.raises(xbids.BIDSError, match="Could not find participants"): - xbids.collect_participants(nonbids_layout, participant_label="fail") + with pytest.raises(xbids.BIDSError, match='Could not find participants'): + xbids.collect_participants(nonbids_layout, participant_label='fail') # Pass in BIDS folder with no matching participants to get BIDSWarning. - with pytest.raises(xbids.BIDSError, match="Could not find participants"): - xbids.collect_participants(bids_layout, participant_label="fail") + with pytest.raises(xbids.BIDSError, match='Could not find participants'): + xbids.collect_participants(bids_layout, participant_label='fail') # Pass in BIDS folder with only some participants to get BIDSWarning. - with pytest.warns(xbids.BIDSWarning, match="Some participants were not found"): - xbids.collect_participants(bids_layout, participant_label=["01", "fail"]) + with pytest.warns(xbids.BIDSWarning, match='Some participants were not found'): + xbids.collect_participants(bids_layout, participant_label=['01', 'fail']) # Pass in BIDS folder with only some participants to get BIDSError. - with pytest.raises(xbids.BIDSError, match="Some participants were not found"): - xbids.collect_participants(bids_layout, participant_label=["01", "fail"], strict=True) + with pytest.raises(xbids.BIDSError, match='Some participants were not found'): + xbids.collect_participants(bids_layout, participant_label=['01', 'fail'], strict=True) found_labels = xbids.collect_participants(bids_layout, participant_label=None) - assert found_labels == ["01"] + assert found_labels == ['01'] - found_labels = xbids.collect_participants(bids_layout, participant_label="01") - assert found_labels == ["01"] + found_labels = xbids.collect_participants(bids_layout, participant_label='01') + assert found_labels == ['01'] def test_collect_data_ds001419(datasets): """Test the collect_data function.""" - bids_dir = datasets["ds001419"] + bids_dir = datasets['ds001419'] layout = BIDSLayout(bids_dir, validate=False) # NIFTI workflow, but also get a BIDSLayout subj_data = xbids.collect_data( layout=layout, - input_type="fmriprep", - participant_label="01", + input_type='fmriprep', + participant_label='01', bids_filters=None, - file_format="nifti", + file_format='nifti', ) - assert len(subj_data["bold"]) == 4 - assert "space-MNI152NLin6Asym" in subj_data["bold"][0] - assert os.path.basename(subj_data["t1w"]) == "sub-01_desc-preproc_T1w.nii.gz" - assert "space-" not in subj_data["t1w"] - assert "to-MNI152NLin6Asym" in subj_data["anat_to_template_xfm"] - assert "from-MNI152NLin6Asym" in subj_data["template_to_anat_xfm"] + assert len(subj_data['bold']) == 4 + assert 'space-MNI152NLin6Asym' in subj_data['bold'][0] + assert os.path.basename(subj_data['t1w']) == 'sub-01_desc-preproc_T1w.nii.gz' + assert 'space-' not in subj_data['t1w'] + assert 'to-MNI152NLin6Asym' in subj_data['anat_to_template_xfm'] + assert 'from-MNI152NLin6Asym' in subj_data['template_to_anat_xfm'] # CIFTI workflow subj_data = xbids.collect_data( layout=layout, - input_type="fmriprep", - participant_label="01", - bids_filters={"bold": {"task": "rest"}}, - file_format="cifti", + input_type='fmriprep', + participant_label='01', + bids_filters={'bold': {'task': 'rest'}}, + file_format='cifti', ) - assert len(subj_data["bold"]) == 1 - assert "space-fsLR" in subj_data["bold"][0] - assert "space-" not in subj_data["t1w"] - assert os.path.basename(subj_data["t1w"]) == "sub-01_desc-preproc_T1w.nii.gz" - assert "to-MNI152NLin6Asym" in subj_data["anat_to_template_xfm"] - assert "from-MNI152NLin6Asym" in subj_data["template_to_anat_xfm"] + assert len(subj_data['bold']) == 1 + assert 'space-fsLR' in subj_data['bold'][0] + assert 'space-' not in subj_data['t1w'] + assert os.path.basename(subj_data['t1w']) == 'sub-01_desc-preproc_T1w.nii.gz' + assert 'to-MNI152NLin6Asym' in subj_data['anat_to_template_xfm'] + assert 'from-MNI152NLin6Asym' in subj_data['template_to_anat_xfm'] def test_collect_data_nibabies(datasets): """Test the collect_data function.""" - bids_dir = datasets["nibabies"] - xcp_d_config = str(load_data("xcp_d_bids_config2.json")) + bids_dir = datasets['nibabies'] + xcp_d_config = str(load_data('xcp_d_bids_config2.json')) layout = BIDSLayout( bids_dir, validate=False, - config=["bids", "derivatives", xcp_d_config], + config=['bids', 'derivatives', xcp_d_config], ) - cohort_files = layout.get(subject="01", cohort="1", space="MNIInfant", suffix="boldref") + cohort_files = layout.get(subject='01', cohort='1', space='MNIInfant', suffix='boldref') assert len(cohort_files) > 0 # NIFTI workflow subj_data = xbids.collect_data( layout=layout, - input_type="fmriprep", - participant_label="01", + input_type='fmriprep', + participant_label='01', bids_filters=None, - file_format="nifti", + file_format='nifti', ) - assert len(subj_data["bold"]) == 1 - assert "space-MNIInfant" in subj_data["bold"][0] - assert "cohort-1" in subj_data["bold"][0] - assert os.path.basename(subj_data["t1w"]) == "sub-01_ses-1mo_run-001_desc-preproc_T1w.nii.gz" - assert "space-" not in subj_data["t1w"] - assert "to-MNIInfant" in subj_data["anat_to_template_xfm"] - assert "from-MNIInfant" in subj_data["template_to_anat_xfm"] + assert len(subj_data['bold']) == 1 + assert 'space-MNIInfant' in subj_data['bold'][0] + assert 'cohort-1' in subj_data['bold'][0] + assert os.path.basename(subj_data['t1w']) == 'sub-01_ses-1mo_run-001_desc-preproc_T1w.nii.gz' + assert 'space-' not in subj_data['t1w'] + assert 'to-MNIInfant' in subj_data['anat_to_template_xfm'] + assert 'from-MNIInfant' in subj_data['template_to_anat_xfm'] # CIFTI workflow with pytest.raises(FileNotFoundError): subj_data = xbids.collect_data( layout=layout, - input_type="fmriprep", - participant_label="01", + input_type='fmriprep', + participant_label='01', bids_filters=None, - file_format="cifti", + file_format='cifti', ) def test_collect_mesh_data(datasets, tmp_path_factory): """Test collect_mesh_data.""" # Dataset without mesh files - layout = BIDSLayout(datasets["fmriprep_without_freesurfer"], validate=False) + layout = BIDSLayout(datasets['fmriprep_without_freesurfer'], validate=False) mesh_available, standard_space_mesh, _, _ = xbids.collect_mesh_data( - layout, "1648798153", bids_filters={} + layout, '1648798153', bids_filters={} ) assert mesh_available is False assert standard_space_mesh is False # Dataset with native-space mesh files (one file matching each query) - layout = BIDSLayout(datasets["pnc"], validate=False) + layout = BIDSLayout(datasets['pnc'], validate=False) mesh_available, standard_space_mesh, _, _ = xbids.collect_mesh_data( - layout, "1648798153", bids_filters={} + layout, '1648798153', bids_filters={} ) assert mesh_available is True assert standard_space_mesh is False # Dataset with standard-space mesh files (one file matching each query) - std_mesh_dir = tmp_path_factory.mktemp("standard_mesh") + std_mesh_dir = tmp_path_factory.mktemp('standard_mesh') shutil.copyfile( - os.path.join(datasets["pnc"], "dataset_description.json"), - std_mesh_dir / "dataset_description.json", + os.path.join(datasets['pnc'], 'dataset_description.json'), + std_mesh_dir / 'dataset_description.json', ) - os.makedirs(std_mesh_dir / "sub-1648798153/ses-PNC1/anat", exist_ok=True) + os.makedirs(std_mesh_dir / 'sub-1648798153/ses-PNC1/anat', exist_ok=True) files = [ - "sub-1648798153_ses-PNC1_hemi-L_space-fsLR_den-32k_pial.surf.gii", - "sub-1648798153_ses-PNC1_hemi-L_space-fsLR_den-32k_white.surf.gii", - "sub-1648798153_ses-PNC1_hemi-R_space-fsLR_den-32k_pial.surf.gii", - "sub-1648798153_ses-PNC1_hemi-R_space-fsLR_den-32k_white.surf.gii", - "sub-1648798153_ses-PNC1_hemi-L_pial.surf.gii", - "sub-1648798153_ses-PNC1_hemi-L_white.surf.gii", - "sub-1648798153_ses-PNC1_hemi-R_pial.surf.gii", - "sub-1648798153_ses-PNC1_hemi-R_white.surf.gii", + 'sub-1648798153_ses-PNC1_hemi-L_space-fsLR_den-32k_pial.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-L_space-fsLR_den-32k_white.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-R_space-fsLR_den-32k_pial.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-R_space-fsLR_den-32k_white.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-L_pial.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-L_white.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-R_pial.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-R_white.surf.gii', ] for f in files: - (std_mesh_dir / "sub-1648798153/ses-PNC1/anat").joinpath(f).touch() + (std_mesh_dir / 'sub-1648798153/ses-PNC1/anat').joinpath(f).touch() layout = BIDSLayout(std_mesh_dir, validate=False) mesh_available, standard_space_mesh, _, mesh_files = xbids.collect_mesh_data( - layout, "1648798153", bids_filters={} + layout, '1648798153', bids_filters={} ) assert mesh_available is True assert standard_space_mesh is True - order = ["lh_pial_surf", "lh_wm_surf", "rh_pial_surf", "rh_wm_surf"] + order = ['lh_pial_surf', 'lh_wm_surf', 'rh_pial_surf', 'rh_wm_surf'] for i, k in enumerate(order): assert mesh_files[k] == str( - (std_mesh_dir / "sub-1648798153/ses-PNC1/anat").joinpath(files[i]) + (std_mesh_dir / 'sub-1648798153/ses-PNC1/anat').joinpath(files[i]) ) # Dataset with multiple files matching each query (raises an error) - bad_mesh_dir = tmp_path_factory.mktemp("standard_mesh") + bad_mesh_dir = tmp_path_factory.mktemp('standard_mesh') shutil.copyfile( - os.path.join(datasets["pnc"], "dataset_description.json"), - bad_mesh_dir / "dataset_description.json", + os.path.join(datasets['pnc'], 'dataset_description.json'), + bad_mesh_dir / 'dataset_description.json', ) - os.makedirs(bad_mesh_dir / "sub-1648798153/ses-PNC1/anat", exist_ok=True) + os.makedirs(bad_mesh_dir / 'sub-1648798153/ses-PNC1/anat', exist_ok=True) files = [ - "sub-1648798153_ses-PNC1_hemi-L_space-fsLR_den-32k_pial.surf.gii", - "sub-1648798153_ses-PNC1_hemi-L_space-fsLR_den-32k_white.surf.gii", - "sub-1648798153_ses-PNC1_hemi-R_space-fsLR_den-32k_pial.surf.gii", - "sub-1648798153_ses-PNC1_hemi-R_space-fsLR_den-32k_white.surf.gii", - "sub-1648798153_ses-PNC1_acq-test_hemi-L_space-fsLR_den-32k_pial.surf.gii", - "sub-1648798153_ses-PNC1_acq-test_hemi-L_space-fsLR_den-32k_white.surf.gii", - "sub-1648798153_ses-PNC1_acq-test_hemi-R_space-fsLR_den-32k_pial.surf.gii", - "sub-1648798153_ses-PNC1_acq-test_hemi-R_space-fsLR_den-32k_white.surf.gii", + 'sub-1648798153_ses-PNC1_hemi-L_space-fsLR_den-32k_pial.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-L_space-fsLR_den-32k_white.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-R_space-fsLR_den-32k_pial.surf.gii', + 'sub-1648798153_ses-PNC1_hemi-R_space-fsLR_den-32k_white.surf.gii', + 'sub-1648798153_ses-PNC1_acq-test_hemi-L_space-fsLR_den-32k_pial.surf.gii', + 'sub-1648798153_ses-PNC1_acq-test_hemi-L_space-fsLR_den-32k_white.surf.gii', + 'sub-1648798153_ses-PNC1_acq-test_hemi-R_space-fsLR_den-32k_pial.surf.gii', + 'sub-1648798153_ses-PNC1_acq-test_hemi-R_space-fsLR_den-32k_white.surf.gii', ] for f in files: - (std_mesh_dir / "sub-1648798153/ses-PNC1/anat").joinpath(f).touch() + (std_mesh_dir / 'sub-1648798153/ses-PNC1/anat').joinpath(f).touch() layout = BIDSLayout(std_mesh_dir, validate=False) - with pytest.raises(ValueError, match="More than one surface found"): - xbids.collect_mesh_data(layout, "1648798153", bids_filters={}) + with pytest.raises(ValueError, match='More than one surface found'): + xbids.collect_mesh_data(layout, '1648798153', bids_filters={}) # If we include BIDS filters, we should be able to ignore the existing files - layout = BIDSLayout(datasets["pnc"], validate=False) + layout = BIDSLayout(datasets['pnc'], validate=False) mesh_available, standard_space_mesh, _, _ = xbids.collect_mesh_data( layout, - "1648798153", + '1648798153', bids_filters={ - "lh_pial_surf": {"acquisition": "test"}, - "rh_pial_surf": {"acquisition": "test"}, - "lh_wm_surf": {"acquisition": "test"}, - "rh_wm_surf": {"acquisition": "test"}, - "lh_subject_sphere": {"acquisition": "test"}, - "rh_subject_sphere": {"acquisition": "test"}, + 'lh_pial_surf': {'acquisition': 'test'}, + 'rh_pial_surf': {'acquisition': 'test'}, + 'lh_wm_surf': {'acquisition': 'test'}, + 'rh_wm_surf': {'acquisition': 'test'}, + 'lh_subject_sphere': {'acquisition': 'test'}, + 'rh_subject_sphere': {'acquisition': 'test'}, }, ) assert mesh_available is False @@ -217,46 +217,46 @@ def test_collect_mesh_data(datasets, tmp_path_factory): def test_collect_morphometry_data(datasets, tmp_path_factory): """Test collect_morphometry_data.""" # Dataset without morphometry files - layout = BIDSLayout(datasets["fmriprep_without_freesurfer"], validate=False) - morph_file_types, _ = xbids.collect_morphometry_data(layout, "1648798153", bids_filters={}) + layout = BIDSLayout(datasets['fmriprep_without_freesurfer'], validate=False) + morph_file_types, _ = xbids.collect_morphometry_data(layout, '1648798153', bids_filters={}) assert morph_file_types == [] # Dataset with morphometry files (one file matching each query) - layout = BIDSLayout(datasets["pnc"], validate=False) - morph_file_types, _ = xbids.collect_morphometry_data(layout, "1648798153", bids_filters={}) - assert morph_file_types == ["cortical_thickness", "sulcal_curv", "sulcal_depth"] + layout = BIDSLayout(datasets['pnc'], validate=False) + morph_file_types, _ = xbids.collect_morphometry_data(layout, '1648798153', bids_filters={}) + assert morph_file_types == ['cortical_thickness', 'sulcal_curv', 'sulcal_depth'] # Dataset with multiple files matching each query (raises an error) - bad_morph_dir = tmp_path_factory.mktemp("bad_morph") + bad_morph_dir = tmp_path_factory.mktemp('bad_morph') shutil.copyfile( - os.path.join(datasets["pnc"], "dataset_description.json"), - bad_morph_dir / "dataset_description.json", + os.path.join(datasets['pnc'], 'dataset_description.json'), + bad_morph_dir / 'dataset_description.json', ) - os.makedirs(bad_morph_dir / "sub-1648798153/ses-PNC1/anat", exist_ok=True) + os.makedirs(bad_morph_dir / 'sub-1648798153/ses-PNC1/anat', exist_ok=True) files = [ - "sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_den-91k_thickness.dscalar.nii", - "sub-1648798153_ses-PNC1_acq-refaced2_space-fsLR_den-91k_thickness.dscalar.nii", - "sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_den-91k_sulc.dscalar.nii", - "sub-1648798153_ses-PNC1_acq-refaced2_space-fsLR_den-91k_sulc.dscalar.nii", - "sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_den-91k_curv.dscalar.nii", - "sub-1648798153_ses-PNC1_acq-refaced2_space-fsLR_den-91k_curv.dscalar.nii", + 'sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_den-91k_thickness.dscalar.nii', + 'sub-1648798153_ses-PNC1_acq-refaced2_space-fsLR_den-91k_thickness.dscalar.nii', + 'sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_den-91k_sulc.dscalar.nii', + 'sub-1648798153_ses-PNC1_acq-refaced2_space-fsLR_den-91k_sulc.dscalar.nii', + 'sub-1648798153_ses-PNC1_acq-refaced_space-fsLR_den-91k_curv.dscalar.nii', + 'sub-1648798153_ses-PNC1_acq-refaced2_space-fsLR_den-91k_curv.dscalar.nii', ] for f in files: - (bad_morph_dir / "sub-1648798153/ses-PNC1/anat").joinpath(f).touch() + (bad_morph_dir / 'sub-1648798153/ses-PNC1/anat').joinpath(f).touch() layout = BIDSLayout(bad_morph_dir, validate=False) - with pytest.raises(ValueError, match="More than one .* found"): - xbids.collect_morphometry_data(layout, "1648798153", bids_filters={}) + with pytest.raises(ValueError, match='More than one .* found'): + xbids.collect_morphometry_data(layout, '1648798153', bids_filters={}) # If we include BIDS filters, we should be able to ignore the existing files - layout = BIDSLayout(datasets["pnc"], validate=False) + layout = BIDSLayout(datasets['pnc'], validate=False) morph_file_types, _ = xbids.collect_morphometry_data( layout, - "1648798153", + '1648798153', bids_filters={ - "cortical_thickness": {"acquisition": "test"}, - "sulcal_curv": {"acquisition": "test"}, - "sulcal_depth": {"acquisition": "test"}, + 'cortical_thickness': {'acquisition': 'test'}, + 'sulcal_curv': {'acquisition': 'test'}, + 'sulcal_depth': {'acquisition': 'test'}, }, ) assert morph_file_types == [] @@ -264,11 +264,11 @@ def test_collect_morphometry_data(datasets, tmp_path_factory): def test_write_derivative_description(datasets, tmp_path_factory, caplog): """Test write_derivative_description.""" - tmpdir = tmp_path_factory.mktemp("test_write_derivative_description") - dset_description = os.path.join(tmpdir, "dataset_description.json") + tmpdir = tmp_path_factory.mktemp('test_write_derivative_description') + dset_description = os.path.join(tmpdir, 'dataset_description.json') # The function expects a description file in the fmri_dir. - with pytest.raises(FileNotFoundError, match="Dataset description DNE"): + with pytest.raises(FileNotFoundError, match='Dataset description DNE'): xbids.write_derivative_description( tmpdir, tmpdir, @@ -278,17 +278,17 @@ def test_write_derivative_description(datasets, tmp_path_factory, caplog): assert not os.path.isfile(dset_description) # It will work when we give it a real fmri_dir. - fmri_dir = datasets["ds001419"] + fmri_dir = datasets['ds001419'] xbids.write_derivative_description( fmri_dir, tmpdir, - atlases=["Gordon"], - dataset_links={"preprocessed": "/fake/path1", "confounds": "/fake/path4"}, + atlases=['Gordon'], + dataset_links={'preprocessed': '/fake/path1', 'confounds': '/fake/path4'}, ) assert os.path.isfile(dset_description) # Now overwrite the description. - with open(dset_description, "r") as fo: + with open(dset_description) as fo: desc = json.load(fo) assert "DatasetLink 'preprocessed' does not match" not in caplog.text @@ -296,46 +296,46 @@ def test_write_derivative_description(datasets, tmp_path_factory, caplog): xbids.write_derivative_description( tmpdir, tmpdir, - atlases=["Gordon"], - dataset_links={"preprocessed": "/fake/path2", "confounds": "/fake/path5"}, + atlases=['Gordon'], + dataset_links={'preprocessed': '/fake/path2', 'confounds': '/fake/path5'}, ) assert "DatasetLink 'preprocessed' does not match" in caplog.text assert "DatasetLink 'confounds' does not match" in caplog.text # Now change the version and re-run the function. - desc["GeneratedBy"][0]["Version"] = "0.0.1" - with open(dset_description, "w") as fo: + desc['GeneratedBy'][0]['Version'] = '0.0.1' + with open(dset_description, 'w') as fo: json.dump(desc, fo, indent=4) - assert "Previous output generated by version" not in caplog.text + assert 'Previous output generated by version' not in caplog.text xbids.write_derivative_description( fmri_dir, tmpdir, atlases=None, dataset_links={}, ) - assert "Previous output generated by version" in caplog.text + assert 'Previous output generated by version' in caplog.text # Should raise a warning if DatasetType is not in the description - desc.pop("DatasetType") - with open(dset_description, "w") as fo: + desc.pop('DatasetType') + with open(dset_description, 'w') as fo: json.dump(desc, fo, indent=4) - assert "DatasetType key not in" not in caplog.text + assert 'DatasetType key not in' not in caplog.text xbids.write_derivative_description( tmpdir, tmpdir, atlases=None, dataset_links={}, ) - assert "DatasetType key not in" in caplog.text + assert 'DatasetType key not in' in caplog.text # Should raise an error if DatasetType is present, but isn't "derivative" - desc["DatasetType"] = "raw" - with open(dset_description, "w") as fo: + desc['DatasetType'] = 'raw' + with open(dset_description, 'w') as fo: json.dump(desc, fo, indent=4) - with pytest.raises(ValueError, match="XCP-D only works on derivative datasets."): + with pytest.raises(ValueError, match='XCP-D only works on derivative datasets.'): xbids.write_derivative_description( tmpdir, tmpdir, @@ -346,134 +346,134 @@ def test_write_derivative_description(datasets, tmp_path_factory, caplog): def test_write_atlas_dataset_description(tmp_path_factory, caplog): """Test write_atlas_dataset_description.""" - tmpdir = tmp_path_factory.mktemp("test_write_atlas_dataset_description") - dset_description = os.path.join(tmpdir, "dataset_description.json") + tmpdir = tmp_path_factory.mktemp('test_write_atlas_dataset_description') + dset_description = os.path.join(tmpdir, 'dataset_description.json') xbids.write_atlas_dataset_description(tmpdir) assert os.path.isfile(dset_description) # Now change the version and re-run the function. - with open(dset_description, "r") as fo: + with open(dset_description) as fo: desc = json.load(fo) - desc["GeneratedBy"][0]["Version"] = "0.0.1" - with open(dset_description, "w") as fo: + desc['GeneratedBy'][0]['Version'] = '0.0.1' + with open(dset_description, 'w') as fo: json.dump(desc, fo, indent=4) - assert "Previous output generated by version" not in caplog.text + assert 'Previous output generated by version' not in caplog.text xbids.write_atlas_dataset_description(tmpdir) - assert "Previous output generated by version" in caplog.text + assert 'Previous output generated by version' in caplog.text def test_get_preproc_pipeline_info(datasets, tmp_path_factory, caplog): """Test get_preproc_pipeline_info.""" - input_types = ["fmriprep", "nibabies", "hcp", "dcan", "ukb"] + input_types = ['fmriprep', 'nibabies', 'hcp', 'dcan', 'ukb'] for input_type in input_types: - info_dict = xbids.get_preproc_pipeline_info(input_type, datasets["ds001419"]) - assert "references" in info_dict.keys() + info_dict = xbids.get_preproc_pipeline_info(input_type, datasets['ds001419']) + assert 'references' in info_dict.keys() - with pytest.raises(ValueError, match="Unsupported input_type"): - xbids.get_preproc_pipeline_info("fail", datasets["ds001419"]) + with pytest.raises(ValueError, match='Unsupported input_type'): + xbids.get_preproc_pipeline_info('fail', datasets['ds001419']) - assert "Dataset description DNE" not in caplog.text - info_dict = xbids.get_preproc_pipeline_info("fmriprep", ".") - assert "Dataset description DNE" in caplog.text - assert info_dict["name"] == "fmriprep" - assert info_dict["version"] == "unknown" + assert 'Dataset description DNE' not in caplog.text + info_dict = xbids.get_preproc_pipeline_info('fmriprep', '.') + assert 'Dataset description DNE' in caplog.text + assert info_dict['name'] == 'fmriprep' + assert info_dict['version'] == 'unknown' - tmpdir = tmp_path_factory.mktemp("test_get_preproc_pipeline_info") - dset_description = os.path.join(tmpdir, "dataset_description.json") - with open(dset_description, "w") as fo: + tmpdir = tmp_path_factory.mktemp('test_get_preproc_pipeline_info') + dset_description = os.path.join(tmpdir, 'dataset_description.json') + with open(dset_description, 'w') as fo: json.dump({}, fo) - assert "GeneratedBy key DNE" not in caplog.text - info_dict = xbids.get_preproc_pipeline_info("dcan", tmpdir) - assert "GeneratedBy key DNE" in caplog.text - assert info_dict["name"] == "dcan" - assert info_dict["version"] == "unknown" + assert 'GeneratedBy key DNE' not in caplog.text + info_dict = xbids.get_preproc_pipeline_info('dcan', tmpdir) + assert 'GeneratedBy key DNE' in caplog.text + assert info_dict['name'] == 'dcan' + assert info_dict['version'] == 'unknown' def test_get_tr(ds001419_data): """Test _get_tr.""" - t_r = xbids._get_tr(ds001419_data["nifti_file"]) + t_r = xbids._get_tr(ds001419_data['nifti_file']) assert t_r == 3.0 - t_r = xbids._get_tr(ds001419_data["cifti_file"]) + t_r = xbids._get_tr(ds001419_data['cifti_file']) assert t_r == 3.0 def test_get_entity(datasets): """Test get_entity.""" - fname = os.path.join(datasets["ds001419"], "sub-01", "anat", "sub-01_desc-preproc_T1w.nii.gz") - entity = xbids.get_entity(fname, "space") - assert entity == "T1w" + fname = os.path.join(datasets['ds001419'], 'sub-01', 'anat', 'sub-01_desc-preproc_T1w.nii.gz') + entity = xbids.get_entity(fname, 'space') + assert entity == 'T1w' fname = os.path.join( - datasets["ds001419"], - "sub-01", - "func", - "sub-01_task-rest_desc-preproc_bold.nii.gz", + datasets['ds001419'], + 'sub-01', + 'func', + 'sub-01_task-rest_desc-preproc_bold.nii.gz', ) - entity = xbids.get_entity(fname, "space") - assert entity == "native" - entity = xbids.get_entity(fname, "desc") - assert entity == "preproc" - entity = xbids.get_entity(fname, "fail") + entity = xbids.get_entity(fname, 'space') + assert entity == 'native' + entity = xbids.get_entity(fname, 'desc') + assert entity == 'preproc' + entity = xbids.get_entity(fname, 'fail') assert entity is None fname = os.path.join( - datasets["ds001419"], - "sub-01", - "fmap", - "sub-01_fmapid-auto00001_desc-coeff1_fieldmap.nii.gz", + datasets['ds001419'], + 'sub-01', + 'fmap', + 'sub-01_fmapid-auto00001_desc-coeff1_fieldmap.nii.gz', ) - with pytest.raises(ValueError, match="Unknown space"): - xbids.get_entity(fname, "space") + with pytest.raises(ValueError, match='Unknown space'): + xbids.get_entity(fname, 'space') def test_group_across_runs(): """Test group_across_runs.""" in_files = [ - "/path/sub-01_task-axcpt_run-03_bold.nii.gz", - "/path/sub-01_task-rest_run-03_bold.nii.gz", - "/path/sub-01_task-rest_run-01_bold.nii.gz", - "/path/sub-01_task-axcpt_run-02_bold.nii.gz", - "/path/sub-01_task-rest_run-02_bold.nii.gz", - "/path/sub-01_task-axcpt_run-01_bold.nii.gz", + '/path/sub-01_task-axcpt_run-03_bold.nii.gz', + '/path/sub-01_task-rest_run-03_bold.nii.gz', + '/path/sub-01_task-rest_run-01_bold.nii.gz', + '/path/sub-01_task-axcpt_run-02_bold.nii.gz', + '/path/sub-01_task-rest_run-02_bold.nii.gz', + '/path/sub-01_task-axcpt_run-01_bold.nii.gz', ] grouped_files = xbids.group_across_runs(in_files) assert isinstance(grouped_files, list) assert len(grouped_files[0]) == 3 assert grouped_files[0] == [ - "/path/sub-01_task-axcpt_run-01_bold.nii.gz", - "/path/sub-01_task-axcpt_run-02_bold.nii.gz", - "/path/sub-01_task-axcpt_run-03_bold.nii.gz", + '/path/sub-01_task-axcpt_run-01_bold.nii.gz', + '/path/sub-01_task-axcpt_run-02_bold.nii.gz', + '/path/sub-01_task-axcpt_run-03_bold.nii.gz', ] assert len(grouped_files[1]) == 3 assert grouped_files[1] == [ - "/path/sub-01_task-rest_run-01_bold.nii.gz", - "/path/sub-01_task-rest_run-02_bold.nii.gz", - "/path/sub-01_task-rest_run-03_bold.nii.gz", + '/path/sub-01_task-rest_run-01_bold.nii.gz', + '/path/sub-01_task-rest_run-02_bold.nii.gz', + '/path/sub-01_task-rest_run-03_bold.nii.gz', ] in_files = [ - "/path/sub-01_task-rest_dir-LR_run-2_bold.nii.gz", - "/path/sub-01_task-rest_dir-RL_run-1_bold.nii.gz", - "/path/sub-01_task-axcpt_dir-LR_bold.nii.gz", - "/path/sub-01_task-rest_dir-RL_run-2_bold.nii.gz", - "/path/sub-01_task-rest_dir-LR_run-1_bold.nii.gz", - "/path/sub-01_task-axcpt_dir-RL_bold.nii.gz", + '/path/sub-01_task-rest_dir-LR_run-2_bold.nii.gz', + '/path/sub-01_task-rest_dir-RL_run-1_bold.nii.gz', + '/path/sub-01_task-axcpt_dir-LR_bold.nii.gz', + '/path/sub-01_task-rest_dir-RL_run-2_bold.nii.gz', + '/path/sub-01_task-rest_dir-LR_run-1_bold.nii.gz', + '/path/sub-01_task-axcpt_dir-RL_bold.nii.gz', ] grouped_files = xbids.group_across_runs(in_files) assert isinstance(grouped_files, list) assert len(grouped_files[0]) == 2 assert grouped_files[0] == [ - "/path/sub-01_task-axcpt_dir-LR_bold.nii.gz", - "/path/sub-01_task-axcpt_dir-RL_bold.nii.gz", + '/path/sub-01_task-axcpt_dir-LR_bold.nii.gz', + '/path/sub-01_task-axcpt_dir-RL_bold.nii.gz', ] assert len(grouped_files[1]) == 4 assert grouped_files[1] == [ - "/path/sub-01_task-rest_dir-LR_run-1_bold.nii.gz", - "/path/sub-01_task-rest_dir-RL_run-1_bold.nii.gz", - "/path/sub-01_task-rest_dir-LR_run-2_bold.nii.gz", - "/path/sub-01_task-rest_dir-RL_run-2_bold.nii.gz", + '/path/sub-01_task-rest_dir-LR_run-1_bold.nii.gz', + '/path/sub-01_task-rest_dir-RL_run-1_bold.nii.gz', + '/path/sub-01_task-rest_dir-LR_run-2_bold.nii.gz', + '/path/sub-01_task-rest_dir-RL_run-2_bold.nii.gz', ] diff --git a/xcp_d/tests/test_utils_boilerplate.py b/xcp_d/tests/test_utils_boilerplate.py index 808e26332..87b3e7138 100644 --- a/xcp_d/tests/test_utils_boilerplate.py +++ b/xcp_d/tests/test_utils_boilerplate.py @@ -16,52 +16,52 @@ def test_describe_motion_parameters(): band_stop_max=None, TR=0.8, ) - assert "filtered to remove signals" not in desc + assert 'filtered to remove signals' not in desc desc = boilerplate.describe_motion_parameters( - motion_filter_type="notch", + motion_filter_type='notch', motion_filter_order=1, band_stop_min=12, band_stop_max=20, TR=0.8, ) - assert "band-stop filtered to remove signals" in desc - assert "automatically modified" not in desc + assert 'band-stop filtered to remove signals' in desc + assert 'automatically modified' not in desc desc = boilerplate.describe_motion_parameters( - motion_filter_type="notch", + motion_filter_type='notch', motion_filter_order=1, band_stop_min=12, band_stop_max=20, TR=3, ) - assert "band-stop filtered to remove signals" in desc - assert "automatically modified" in desc + assert 'band-stop filtered to remove signals' in desc + assert 'automatically modified' in desc desc = boilerplate.describe_motion_parameters( - motion_filter_type="lp", + motion_filter_type='lp', motion_filter_order=1, band_stop_min=12, band_stop_max=20, TR=0.8, ) - assert "low-pass filtered" in desc - assert "automatically modified" not in desc + assert 'low-pass filtered' in desc + assert 'automatically modified' not in desc desc = boilerplate.describe_motion_parameters( - motion_filter_type="lp", + motion_filter_type='lp', motion_filter_order=1, band_stop_min=12, band_stop_max=20, TR=3, ) - assert "low-pass filtered" in desc - assert "automatically modified" in desc + assert 'low-pass filtered' in desc + assert 'automatically modified' in desc def test_describe_censoring(): """Test boilerplate.describe_censoring.""" - motion_filter_type = "notch" + motion_filter_type = 'notch' fd_thresh = 0.2 exact_scans = [] desc = boilerplate.describe_censoring( @@ -70,7 +70,7 @@ def test_describe_censoring(): fd_thresh=fd_thresh, exact_scans=exact_scans, ) - assert "Volumes with filtered framewise displacement" in desc + assert 'Volumes with filtered framewise displacement' in desc motion_filter_type = None fd_thresh = 0.2 @@ -81,7 +81,7 @@ def test_describe_censoring(): fd_thresh=fd_thresh, exact_scans=exact_scans, ) - assert "Volumes with framewise displacement" in desc + assert 'Volumes with framewise displacement' in desc motion_filter_type = None fd_thresh = 0.2 @@ -92,8 +92,8 @@ def test_describe_censoring(): fd_thresh=fd_thresh, exact_scans=exact_scans, ) - assert "Volumes with framewise displacement" in desc - assert "limited to 100, 200, and 300 volumes" in desc + assert 'Volumes with framewise displacement' in desc + assert 'limited to 100, 200, and 300 volumes' in desc motion_filter_type = None fd_thresh = 0 @@ -104,10 +104,10 @@ def test_describe_censoring(): fd_thresh=fd_thresh, exact_scans=exact_scans, ) - assert "Volumes were randomly selected for censoring" in desc - assert "limited to 100, 200, and 300 volumes" in desc + assert 'Volumes were randomly selected for censoring' in desc + assert 'limited to 100, 200, and 300 volumes' in desc - motion_filter_type = "notch" + motion_filter_type = 'notch' fd_thresh = 0 exact_scans = [100, 200, 300] desc = boilerplate.describe_censoring( @@ -116,28 +116,28 @@ def test_describe_censoring(): fd_thresh=fd_thresh, exact_scans=exact_scans, ) - assert "Volumes were randomly selected for censoring" in desc - assert "limited to 100, 200, and 300 volumes" in desc + assert 'Volumes were randomly selected for censoring' in desc + assert 'limited to 100, 200, and 300 volumes' in desc def test_describe_regression(tmp_path_factory): """Test boilerplate.describe_regression.""" - _check_describe_regression_result("24P", "24 nuisance regressors were selected") - _check_describe_regression_result("27P", "27 nuisance regressors were selected") - _check_describe_regression_result("36P", "36 nuisance regressors were selected") - _check_describe_regression_result("acompcor", "The top 5 aCompCor principal components") - _check_describe_regression_result("acompcor_gsr", "The top 5 aCompCor principal components") - _check_describe_regression_result("aroma", "AROMA motion-labeled components") - _check_describe_regression_result("aroma_gsr", "AROMA motion-labeled components") - _check_describe_regression_result(None, "No nuisance regression was performed") + _check_describe_regression_result('24P', '24 nuisance regressors were selected') + _check_describe_regression_result('27P', '27 nuisance regressors were selected') + _check_describe_regression_result('36P', '36 nuisance regressors were selected') + _check_describe_regression_result('acompcor', 'The top 5 aCompCor principal components') + _check_describe_regression_result('acompcor_gsr', 'The top 5 aCompCor principal components') + _check_describe_regression_result('aroma', 'AROMA motion-labeled components') + _check_describe_regression_result('aroma_gsr', 'AROMA motion-labeled components') + _check_describe_regression_result(None, 'No nuisance regression was performed') # Try with motion filter - config = load_data.readable("nuisance/24P.yml") + config = load_data.readable('nuisance/24P.yml') config = yaml.safe_load(config.read_text()) result = boilerplate.describe_regression( confounds_config=config, - motion_filter_type="lp", + motion_filter_type='lp', motion_filter_order=4, band_stop_min=6, band_stop_max=0, @@ -147,9 +147,9 @@ def test_describe_regression(tmp_path_factory): assert isinstance(result, str) # Fails. Need to replace with a better test. - with pytest.raises(TypeError, match="string indices must be integers"): + with pytest.raises(TypeError, match='string indices must be integers'): boilerplate.describe_regression( - confounds_config="test", + confounds_config='test', motion_filter_type=None, motion_filter_order=0, band_stop_min=0, @@ -161,7 +161,7 @@ def test_describe_regression(tmp_path_factory): def _check_describe_regression_result(config, match): if isinstance(config, str): - config = load_data.readable(f"nuisance/{config}.yml") + config = load_data.readable(f'nuisance/{config}.yml') config = yaml.safe_load(config.read_text()) result = boilerplate.describe_regression( @@ -181,16 +181,16 @@ def _check_describe_regression_result(config, match): def test_describe_atlases(): """Test boilerplate.describe_atlases.""" - atlases = ["4S156Parcels", "4S256Parcels", "Glasser"] + atlases = ['4S156Parcels', '4S256Parcels', 'Glasser'] atlas_desc = boilerplate.describe_atlases(atlases) - assert "156 and 256 parcels" in atlas_desc - assert "Glasser" in atlas_desc + assert '156 and 256 parcels' in atlas_desc + assert 'Glasser' in atlas_desc - atlases = ["Glasser", "Tian"] + atlases = ['Glasser', 'Tian'] atlas_desc = boilerplate.describe_atlases(atlases) - assert "Tian" in atlas_desc - assert "Glasser" in atlas_desc + assert 'Tian' in atlas_desc + assert 'Glasser' in atlas_desc # This no longer fails. It just adds the missing atlas to the description. - atlases = ["4S156Parcels", "4S256Parcels", "Glasser", "fail"] - assert "the fail atlas" in boilerplate.describe_atlases(atlases) + atlases = ['4S156Parcels', '4S256Parcels', 'Glasser', 'fail'] + assert 'the fail atlas' in boilerplate.describe_atlases(atlases) diff --git a/xcp_d/tests/test_utils_concatenation.py b/xcp_d/tests/test_utils_concatenation.py index bbe6baf90..8d771c655 100644 --- a/xcp_d/tests/test_utils_concatenation.py +++ b/xcp_d/tests/test_utils_concatenation.py @@ -11,13 +11,13 @@ def test_concatenate_tsvs(ds001419_data, tmp_path_factory): """Test xcp_d.utils.concatenation.concatenate_tsvs.""" - tmpdir = tmp_path_factory.mktemp("test_concatenate_tsvs") + tmpdir = tmp_path_factory.mktemp('test_concatenate_tsvs') n_repeats = 3 # First, concatenate TSVs with headers - tsv_file_with_header = ds001419_data["confounds_file"] - concat_tsv_file_with_header = os.path.join(tmpdir, "concat_with_header.tsv") + tsv_file_with_header = ds001419_data['confounds_file'] + concat_tsv_file_with_header = os.path.join(tmpdir, 'concat_with_header.tsv') concatenation.concatenate_tsvs( [tsv_file_with_header] * n_repeats, out_file=concat_tsv_file_with_header, @@ -29,11 +29,11 @@ def test_concatenate_tsvs(ds001419_data, tmp_path_factory): assert concat_tsv_df.shape[1] == tsv_df.shape[1] # Now, concatenate TSVs without headers - tsv_file_without_header = os.path.join(tmpdir, "without_header.tsv") + tsv_file_without_header = os.path.join(tmpdir, 'without_header.tsv') data = pd.read_table(tsv_file_with_header).to_numpy() - np.savetxt(tsv_file_without_header, data, fmt="%.5f", delimiter="\t") + np.savetxt(tsv_file_without_header, data, fmt='%.5f', delimiter='\t') - concat_tsv_file_without_header = os.path.join(tmpdir, "concat_without_header.tsv") + concat_tsv_file_without_header = os.path.join(tmpdir, 'concat_without_header.tsv') concatenation.concatenate_tsvs( [tsv_file_without_header] * n_repeats, out_file=concat_tsv_file_without_header, @@ -50,13 +50,13 @@ def test_concatenate_niimgs(ds001419_data, tmp_path_factory): We don't have non-dtseries CIFTIs to test, so this test is a little limited. """ - tmpdir = tmp_path_factory.mktemp("test_concatenate_niimgs") + tmpdir = tmp_path_factory.mktemp('test_concatenate_niimgs') n_repeats = 3 # First, concatenate niftis - nifti_file = ds001419_data["nifti_file"] - concat_nifti_file = os.path.join(tmpdir, "concat_nifti.nii.gz") + nifti_file = ds001419_data['nifti_file'] + concat_nifti_file = os.path.join(tmpdir, 'concat_nifti.nii.gz') concatenation.concatenate_niimgs( [nifti_file] * n_repeats, out_file=concat_nifti_file, @@ -68,8 +68,8 @@ def test_concatenate_niimgs(ds001419_data, tmp_path_factory): assert concat_nifti_img.shape[3] == nifti_img.shape[3] * n_repeats # Now, concatenate dtseries ciftis - cifti_file = ds001419_data["cifti_file"] - concat_cifti_file = os.path.join(tmpdir, "concat_cifti.dtseries.nii") + cifti_file = ds001419_data['cifti_file'] + concat_cifti_file = os.path.join(tmpdir, 'concat_cifti.dtseries.nii') concatenation.concatenate_niimgs( [cifti_file] * n_repeats, out_file=concat_cifti_file, diff --git a/xcp_d/tests/test_utils_confounds.py b/xcp_d/tests/test_utils_confounds.py index 9a734147b..f5ed504ad 100644 --- a/xcp_d/tests/test_utils_confounds.py +++ b/xcp_d/tests/test_utils_confounds.py @@ -16,7 +16,7 @@ def test_modify_motion_filter(): with pytest.warns(match="The parameter 'band_stop_max' will be ignored."): band_stop_min2, _, is_modified = confounds._modify_motion_filter( - motion_filter_type="lp", + motion_filter_type='lp', band_stop_min=band_stop_min, band_stop_max=18, TR=TR, @@ -28,13 +28,13 @@ def test_modify_motion_filter(): with pytest.warns( UserWarning, match=re.escape( - "Low-pass filter frequency is above Nyquist frequency (37.5 BPM), " - "so it has been changed (42 --> 33.0 BPM)." + 'Low-pass filter frequency is above Nyquist frequency (37.5 BPM), ' + 'so it has been changed (42 --> 33.0 BPM).' ), ): band_stop_min2, _, is_modified = confounds._modify_motion_filter( TR=TR, # 1.25 Hz - motion_filter_type="lp", + motion_filter_type='lp', band_stop_min=42, # 0.7 Hz > (1.25 / 2) band_stop_max=None, ) @@ -47,13 +47,13 @@ def test_modify_motion_filter(): with pytest.warns( UserWarning, match=re.escape( - "One or both filter frequencies are above Nyquist frequency (37.5 BPM), " - "so they have been changed (42 --> 33.0, 45 --> 30.0 BPM)." + 'One or both filter frequencies are above Nyquist frequency (37.5 BPM), ' + 'so they have been changed (42 --> 33.0, 45 --> 30.0 BPM).' ), ): band_stop_min2, band_stop_max2, is_modified = confounds._modify_motion_filter( TR=TR, - motion_filter_type="notch", + motion_filter_type='notch', band_stop_min=42, band_stop_max=45, # 0.7 Hz > (1.25 / 2) ) @@ -65,7 +65,7 @@ def test_modify_motion_filter(): # Notch without modification band_stop_min2, band_stop_max2, is_modified = confounds._modify_motion_filter( TR=TR, - motion_filter_type="notch", + motion_filter_type='notch', band_stop_min=30, band_stop_max=33, ) @@ -86,15 +86,15 @@ def test_motion_filtering_lp(): b, a = signal.butter( 1, low_pass, - btype="lowpass", - output="ba", + btype='lowpass', + output='ba', fs=1 / TR, ) lowpass_data_true = signal.filtfilt( b, a, raw_data, - padtype="constant", + padtype='constant', padlen=raw_data.size - 1, ) @@ -103,7 +103,7 @@ def test_motion_filtering_lp(): lowpass_data_test = confounds.filter_motion( raw_data, TR=TR, - motion_filter_type="lp", + motion_filter_type='lp', band_stop_min=band_stop_min, band_stop_max=None, motion_filter_order=2, @@ -117,7 +117,7 @@ def test_motion_filtering_lp(): confounds.filter_motion( raw_data, TR=TR, - motion_filter_type="fail", + motion_filter_type='fail', band_stop_min=band_stop_min, band_stop_max=None, motion_filter_order=2, @@ -142,7 +142,7 @@ def test_motion_filtering_notch(): b, a, raw_data, - padtype="constant", + padtype='constant', padlen=raw_data.size - 1, ) @@ -151,7 +151,7 @@ def test_motion_filtering_notch(): notch_data_test = confounds.filter_motion( raw_data, TR=TR, # 1.25 Hz - motion_filter_type="notch", + motion_filter_type='notch', band_stop_min=band_stop_min, band_stop_max=band_stop_max, motion_filter_order=4, diff --git a/xcp_d/tests/test_utils_doc.py b/xcp_d/tests/test_utils_doc.py index fb75d5a43..580dc6cbe 100644 --- a/xcp_d/tests/test_utils_doc.py +++ b/xcp_d/tests/test_utils_doc.py @@ -7,6 +7,6 @@ def test_download_example_data(tmp_path_factory): """Test download_example_data.""" - tmpdir = tmp_path_factory.mktemp("test_download_example_data") + tmpdir = tmp_path_factory.mktemp('test_download_example_data') example_data_dir = doc.download_example_data(out_dir=tmpdir) assert os.path.isdir(example_data_dir) diff --git a/xcp_d/tests/test_utils_execsummary.py b/xcp_d/tests/test_utils_execsummary.py index 65408e115..98a86dc5c 100644 --- a/xcp_d/tests/test_utils_execsummary.py +++ b/xcp_d/tests/test_utils_execsummary.py @@ -11,12 +11,12 @@ def test_make_mosaic(tmp_path_factory): """Test make_mosaic.""" - tmpdir = tmp_path_factory.mktemp("test_make_mosaic") + tmpdir = tmp_path_factory.mktemp('test_make_mosaic') # Make a simple PNG file - png_file = os.path.join(tmpdir, "temp.png") + png_file = os.path.join(tmpdir, 'temp.png') fig, ax = plt.subplots(figsize=(1, 1)) - ax.set_facecolor("yellow") + ax.set_facecolor('yellow') ax.set_xticks([]) ax.set_yticks([]) fig.savefig(png_file) @@ -31,22 +31,22 @@ def test_make_mosaic(tmp_path_factory): def test_modify_brainsprite_scene_template(tmp_path_factory): """Test modify_brainsprite_scene_template.""" - tmpdir = tmp_path_factory.mktemp("test_modify_brainsprite_scene_template") + tmpdir = tmp_path_factory.mktemp('test_modify_brainsprite_scene_template') brainsprite_scene_template = str( load_data( - "executive_summary_scenes/brainsprite_template.scene.gz", + 'executive_summary_scenes/brainsprite_template.scene.gz', ) ) with chdir(tmpdir): scene_file = execsummary.modify_brainsprite_scene_template( slice_number=10, - anat_file="anat.nii.gz", - rh_pial_surf="rh_pial.surf.gii", - lh_pial_surf="lh_pial.surf.gii", - rh_wm_surf="rh_wm.surf.gii", - lh_wm_surf="lh_wm.surf.gii", + anat_file='anat.nii.gz', + rh_pial_surf='rh_pial.surf.gii', + lh_pial_surf='lh_pial.surf.gii', + rh_wm_surf='rh_wm.surf.gii', + lh_wm_surf='lh_wm.surf.gii', scene_template=brainsprite_scene_template, ) @@ -55,17 +55,17 @@ def test_modify_brainsprite_scene_template(tmp_path_factory): def test_modify_pngs_scene_template(tmp_path_factory): """Test modify_pngs_scene_template.""" - tmpdir = tmp_path_factory.mktemp("test_modify_pngs_scene_template") + tmpdir = tmp_path_factory.mktemp('test_modify_pngs_scene_template') - pngs_scene_template = str(load_data("executive_summary_scenes/pngs_template.scene.gz")) + pngs_scene_template = str(load_data('executive_summary_scenes/pngs_template.scene.gz')) with chdir(tmpdir): scene_file = execsummary.modify_pngs_scene_template( - anat_file="anat.nii.gz", - rh_pial_surf="rh_pial.surf.gii", - lh_pial_surf="lh_pial.surf.gii", - rh_wm_surf="rh_wm.surf.gii", - lh_wm_surf="lh_wm.surf.gii", + anat_file='anat.nii.gz', + rh_pial_surf='rh_pial.surf.gii', + lh_pial_surf='lh_pial.surf.gii', + rh_wm_surf='rh_wm.surf.gii', + lh_wm_surf='lh_wm.surf.gii', scene_template=pngs_scene_template, ) @@ -74,7 +74,7 @@ def test_modify_pngs_scene_template(tmp_path_factory): def test_get_n_frames(ds001419_data): """Test get_n_frames.""" - anat_file = ds001419_data["brain_mask_file"] + anat_file = ds001419_data['brain_mask_file'] frame_numbers = execsummary.get_n_frames(anat_file) assert len(frame_numbers) == 183 diff --git a/xcp_d/tests/test_utils_plotting.py b/xcp_d/tests/test_utils_plotting.py index e70a8bbe3..af3b16c51 100644 --- a/xcp_d/tests/test_utils_plotting.py +++ b/xcp_d/tests/test_utils_plotting.py @@ -10,23 +10,23 @@ def test_plot_fmri_es(ds001419_data, tmp_path_factory): """Run smoke test on xcp_d.utils.plotting.plot_fmri_es.""" - tmpdir = tmp_path_factory.mktemp("test_plot_fmri_es") + tmpdir = tmp_path_factory.mktemp('test_plot_fmri_es') - preprocessed_bold = ds001419_data["cifti_file"] - denoised_interpolated_bold = ds001419_data["cifti_file"] + preprocessed_bold = ds001419_data['cifti_file'] + denoised_interpolated_bold = ds001419_data['cifti_file'] # Using unfiltered FD instead of calculating filtered version. - motion_file = ds001419_data["confounds_file"] - preprocessed_figure = os.path.join(tmpdir, "unprocessed.svg") - denoised_figure = os.path.join(tmpdir, "processed.svg") + motion_file = ds001419_data['confounds_file'] + preprocessed_figure = os.path.join(tmpdir, 'unprocessed.svg') + denoised_figure = os.path.join(tmpdir, 'processed.svg') t_r = 2 n_volumes = pd.read_table(motion_file).shape[0] tmask_arr = np.zeros(n_volumes, dtype=bool) tmask_arr[:10] = True # flag first 10 volumes as bad tmask_arr = tmask_arr.astype(int) - temporal_mask = os.path.join(tmpdir, "temporal_mask.tsv") - pd.DataFrame(columns=["framewise_displacement"], data=tmask_arr).to_csv( - temporal_mask, sep="\t", index=False + temporal_mask = os.path.join(tmpdir, 'temporal_mask.tsv') + pd.DataFrame(columns=['framewise_displacement'], data=tmask_arr).to_csv( + temporal_mask, sep='\t', index=False ) out_file1, out_file2 = plotting.plot_fmri_es( diff --git a/xcp_d/tests/test_utils_restingstate.py b/xcp_d/tests/test_utils_restingstate.py index 06191b989..11efc2d14 100644 --- a/xcp_d/tests/test_utils_restingstate.py +++ b/xcp_d/tests/test_utils_restingstate.py @@ -15,8 +15,8 @@ def test_compute_alff(ds001419_data): """ # Get the file names - bold_file = ds001419_data["nifti_file"] - bold_mask = ds001419_data["brain_mask_file"] + bold_file = ds001419_data['nifti_file'] + bold_mask = ds001419_data['brain_mask_file'] # Let's initialize the ALFF node TR = 3 diff --git a/xcp_d/tests/test_utils_utils.py b/xcp_d/tests/test_utils_utils.py index 3a165432f..e577d41cb 100644 --- a/xcp_d/tests/test_utils_utils.py +++ b/xcp_d/tests/test_utils_utils.py @@ -10,9 +10,9 @@ def test_estimate_brain_radius(ds001419_data): """Ensure that the brain radius estimation function returns the right value.""" - bold_mask = ds001419_data["brain_mask_file"] + bold_mask = ds001419_data['brain_mask_file'] - radius = utils.estimate_brain_radius(bold_mask, head_radius="auto") + radius = utils.estimate_brain_radius(bold_mask, head_radius='auto') assert radius == 77.39268749395897 radius = utils.estimate_brain_radius(bold_mask, head_radius=50) @@ -67,7 +67,7 @@ def test_denoise_with_nilearn(): data_arr += confound_arr confounds_df = pd.DataFrame( confound_timeseries, - columns=[f"confound_{i}" for i in range(n_confounds)], + columns=[f'confound_{i}' for i in range(n_confounds)], ) # Check that signals are present in the "raw" data at this point @@ -95,13 +95,13 @@ def test_denoise_with_nilearn(): # First, try out filtering without censoring or denoising params = { - "confounds": None, - "voxelwise_confounds": None, - "sample_mask": np.ones(n_volumes, dtype=bool), - "low_pass": low_pass, - "high_pass": high_pass, - "filter_order": filter_order, - "TR": TR, + 'confounds': None, + 'voxelwise_confounds': None, + 'sample_mask': np.ones(n_volumes, dtype=bool), + 'low_pass': low_pass, + 'high_pass': high_pass, + 'filter_order': filter_order, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) @@ -113,13 +113,13 @@ def test_denoise_with_nilearn(): sample_mask[10:20] = False sample_mask[150:160] = False params = { - "confounds": confounds_df, - "voxelwise_confounds": None, - "sample_mask": sample_mask, - "low_pass": None, - "high_pass": None, - "filter_order": 0, - "TR": TR, + 'confounds': confounds_df, + 'voxelwise_confounds': None, + 'sample_mask': sample_mask, + 'low_pass': None, + 'high_pass': None, + 'filter_order': 0, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) @@ -133,13 +133,13 @@ def test_denoise_with_nilearn(): # Denoise without censoring or filtering params = { - "confounds": confounds_df, - "voxelwise_confounds": None, - "sample_mask": np.ones(n_volumes, dtype=bool), - "low_pass": None, - "high_pass": None, - "filter_order": 0, - "TR": TR, + 'confounds': confounds_df, + 'voxelwise_confounds': None, + 'sample_mask': np.ones(n_volumes, dtype=bool), + 'low_pass': None, + 'high_pass': None, + 'filter_order': 0, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) @@ -154,13 +154,13 @@ def test_denoise_with_nilearn(): sample_mask[10:20] = False sample_mask[150:160] = False params = { - "confounds": None, - "voxelwise_confounds": None, - "sample_mask": sample_mask, - "low_pass": low_pass, - "high_pass": high_pass, - "filter_order": filter_order, - "TR": TR, + 'confounds': None, + 'voxelwise_confounds': None, + 'sample_mask': sample_mask, + 'low_pass': low_pass, + 'high_pass': high_pass, + 'filter_order': filter_order, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) @@ -182,13 +182,13 @@ def test_denoise_with_nilearn(): # Run without denoising or filtering (censoring + interpolation only) params = { - "confounds": None, - "voxelwise_confounds": None, - "sample_mask": sample_mask, - "low_pass": None, - "high_pass": None, - "filter_order": 0, - "TR": TR, + 'confounds': None, + 'voxelwise_confounds': None, + 'sample_mask': sample_mask, + 'low_pass': None, + 'high_pass': None, + 'filter_order': 0, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) @@ -220,7 +220,7 @@ def test_denoise_with_nilearn_voxelwise(): confounds = np.random.random((n_volumes, n_confounds)) confounds_df = pd.DataFrame( confounds, - columns=[f"confound_{i}" for i in range(n_confounds)], + columns=[f'confound_{i}' for i in range(n_confounds)], ) voxelwise_confounds = [ np.random.random((n_volumes, n_voxels)) for _ in range(n_voxelwise_confounds) @@ -230,52 +230,52 @@ def test_denoise_with_nilearn_voxelwise(): # Denoising with bandpass filtering and censoring params = { - "confounds": confounds_df, - "voxelwise_confounds": voxelwise_confounds, - "sample_mask": sample_mask, - "low_pass": low_pass, - "high_pass": high_pass, - "filter_order": filter_order, - "TR": TR, + 'confounds': confounds_df, + 'voxelwise_confounds': voxelwise_confounds, + 'sample_mask': sample_mask, + 'low_pass': low_pass, + 'high_pass': high_pass, + 'filter_order': filter_order, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) # Denoising without bandpass filtering params = { - "confounds": confounds_df, - "voxelwise_confounds": voxelwise_confounds, - "sample_mask": sample_mask, - "low_pass": None, - "high_pass": None, - "filter_order": None, - "TR": TR, + 'confounds': confounds_df, + 'voxelwise_confounds': voxelwise_confounds, + 'sample_mask': sample_mask, + 'low_pass': None, + 'high_pass': None, + 'filter_order': None, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) # Denoising with bandpass filtering but no general confounds params = { - "confounds": None, - "voxelwise_confounds": voxelwise_confounds, - "sample_mask": sample_mask, - "low_pass": low_pass, - "high_pass": high_pass, - "filter_order": filter_order, - "TR": TR, + 'confounds': None, + 'voxelwise_confounds': voxelwise_confounds, + 'sample_mask': sample_mask, + 'low_pass': low_pass, + 'high_pass': high_pass, + 'filter_order': filter_order, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) # Denoising without bandpass filtering or general confounds params = { - "confounds": None, - "voxelwise_confounds": voxelwise_confounds, - "sample_mask": sample_mask, - "low_pass": None, - "high_pass": None, - "filter_order": None, - "TR": TR, + 'confounds': None, + 'voxelwise_confounds': voxelwise_confounds, + 'sample_mask': sample_mask, + 'low_pass': None, + 'high_pass': None, + 'filter_order': None, + 'TR': TR, } out_arr = utils.denoise_with_nilearn(preprocessed_bold=data_arr, **params) assert out_arr.shape == (n_volumes, n_voxels) @@ -324,23 +324,23 @@ def _check_signal(data, signals, sample_mask, atol=0.001): def test_list_to_str(): """Test the list_to_str function.""" - string = utils.list_to_str(["a"]) - assert string == "a" + string = utils.list_to_str(['a']) + assert string == 'a' - string = utils.list_to_str(["a", "b"]) - assert string == "a and b" + string = utils.list_to_str(['a', 'b']) + assert string == 'a and b' - string = utils.list_to_str(["a", "b", "c"]) - assert string == "a, b, and c" + string = utils.list_to_str(['a', 'b', 'c']) + assert string == 'a, b, and c' - with pytest.raises(ValueError, match="Zero-length list provided."): + with pytest.raises(ValueError, match='Zero-length list provided.'): utils.list_to_str([]) def test_get_bold2std_and_t1w_xfms(ds001419_data): """Test get_bold2std_and_t1w_xfms.""" - bold_file_nlin6asym = ds001419_data["nifti_file"] - nlin6asym_to_anat_xfm = ds001419_data["template_to_anat_xfm"] + bold_file_nlin6asym = ds001419_data['nifti_file'] + nlin6asym_to_anat_xfm = ds001419_data['template_to_anat_xfm'] # MNI152NLin6Asym --> MNI152NLin2009cAsym/T1w ( @@ -359,12 +359,12 @@ def test_get_bold2std_and_t1w_xfms(ds001419_data): # MNI152NLin2009cAsym --> MNI152NLin2009cAsym/T1w bold_file_nlin2009c = bold_file_nlin6asym.replace( - "space-MNI152NLin6Asym_", - "space-MNI152NLin2009cAsym_", + 'space-MNI152NLin6Asym_', + 'space-MNI152NLin2009cAsym_', ) nlin2009c_to_anat_xfm = nlin6asym_to_anat_xfm.replace( - "from-MNI152NLin6Asym_", - "from-MNI152NLin2009cAsym_", + 'from-MNI152NLin6Asym_', + 'from-MNI152NLin2009cAsym_', ) ( xforms_to_mni, @@ -382,12 +382,12 @@ def test_get_bold2std_and_t1w_xfms(ds001419_data): # MNIInfant --> MNI152NLin2009cAsym/T1w bold_file_infant = bold_file_nlin6asym.replace( - "space-MNI152NLin6Asym_", - "space-MNIInfant_cohort-1_", + 'space-MNI152NLin6Asym_', + 'space-MNIInfant_cohort-1_', ) infant_to_anat_xfm = nlin6asym_to_anat_xfm.replace( - "from-MNI152NLin6Asym_", - "from-MNIInfant+1_", + 'from-MNI152NLin6Asym_', + 'from-MNIInfant+1_', ) ( xforms_to_mni, @@ -404,7 +404,7 @@ def test_get_bold2std_and_t1w_xfms(ds001419_data): assert len(xforms_to_t1w_invert) == 1 # T1w --> MNI152NLin2009cAsym/T1w - bold_file_t1w = bold_file_nlin6asym.replace("space-MNI152NLin6Asym_", "space-T1w_") + bold_file_t1w = bold_file_nlin6asym.replace('space-MNI152NLin6Asym_', 'space-T1w_') with pytest.raises(ValueError, match="BOLD space 'T1w' not supported."): utils.get_bold2std_and_t1w_xfms( bold_file_t1w, @@ -412,7 +412,7 @@ def test_get_bold2std_and_t1w_xfms(ds001419_data): ) # T1w --> MNI152NLin6Asym --> MNI152NLin2009cAsym/T1w - bold_file_t1w = bold_file_nlin6asym.replace("space-MNI152NLin6Asym_", "space-T1w_") + bold_file_t1w = bold_file_nlin6asym.replace('space-MNI152NLin6Asym_', 'space-T1w_') with pytest.raises(ValueError, match="BOLD space 'T1w' not supported."): utils.get_bold2std_and_t1w_xfms( bold_file_t1w, @@ -420,7 +420,7 @@ def test_get_bold2std_and_t1w_xfms(ds001419_data): ) # native --> MNI152NLin2009cAsym/T1w - bold_file_native = bold_file_nlin6asym.replace("space-MNI152NLin6Asym_", "") + bold_file_native = bold_file_nlin6asym.replace('space-MNI152NLin6Asym_', '') with pytest.raises(ValueError, match="BOLD space 'native' not supported."): utils.get_bold2std_and_t1w_xfms( bold_file_native, @@ -428,7 +428,7 @@ def test_get_bold2std_and_t1w_xfms(ds001419_data): ) # native --> MNI152NLin6Asym --> MNI152NLin2009cAsym/T1w - bold_file_native = bold_file_nlin6asym.replace("space-MNI152NLin6Asym_", "") + bold_file_native = bold_file_nlin6asym.replace('space-MNI152NLin6Asym_', '') with pytest.raises(ValueError, match="BOLD space 'native' not supported."): utils.get_bold2std_and_t1w_xfms( bold_file_native, @@ -436,14 +436,14 @@ def test_get_bold2std_and_t1w_xfms(ds001419_data): ) # tofail --> MNI152NLin2009cAsym/T1w - bold_file_tofail = bold_file_nlin6asym.replace("space-MNI152NLin6Asym_", "space-tofail_") - with pytest.raises(ValueError, match="Transform does not match BOLD space"): + bold_file_tofail = bold_file_nlin6asym.replace('space-MNI152NLin6Asym_', 'space-tofail_') + with pytest.raises(ValueError, match='Transform does not match BOLD space'): utils.get_bold2std_and_t1w_xfms( bold_file_tofail, nlin6asym_to_anat_xfm, ) - tofail_to_anat_xfm = nlin6asym_to_anat_xfm.replace("from-MNI152NLin6Asym_", "from-tofail_") + tofail_to_anat_xfm = nlin6asym_to_anat_xfm.replace('from-MNI152NLin6Asym_', 'from-tofail_') with pytest.raises(ValueError, match="Space 'tofail'"): utils.get_bold2std_and_t1w_xfms( bold_file_tofail, @@ -456,12 +456,12 @@ def test_get_std2bold_xfms(ds001419_data): get_std2bold_xfms finds transforms to go from a source file's space to the BOLD file's space. """ - bold_file_nlin6asym = ds001419_data["nifti_file"] + bold_file_nlin6asym = ds001419_data['nifti_file'] # MNI152NLin6Asym --> MNI152NLin6Asym with source file containing tpl entity xforms_to_mni = utils.get_std2bold_xfms( bold_file_nlin6asym, - source_file="tpl-MNI152NLin6Asym_T1w.nii.gz", + source_file='tpl-MNI152NLin6Asym_T1w.nii.gz', source_space=None, ) assert len(xforms_to_mni) == 1 @@ -469,27 +469,27 @@ def test_get_std2bold_xfms(ds001419_data): # MNI152NLin6Asym --> MNI152NLin6Asym with source file containing space entity xforms_to_mni = utils.get_std2bold_xfms( bold_file_nlin6asym, - source_file="space-MNI152NLin6Asym_T1w.nii.gz", + source_file='space-MNI152NLin6Asym_T1w.nii.gz', source_space=None, ) assert len(xforms_to_mni) == 1 SPACES = [ - ("MNI152NLin6Asym", "MNI152NLin6Asym", 1), - ("MNI152NLin6Asym", "MNI152NLin2009cAsym", 1), - ("MNI152NLin6Asym", "MNIInfant", 2), - ("MNI152NLin2009cAsym", "MNI152NLin2009cAsym", 1), - ("MNI152NLin2009cAsym", "MNI152NLin6Asym", 1), - ("MNI152NLin2009cAsym", "MNIInfant", 1), - ("MNIInfant", "MNIInfant", 1), - ("MNIInfant", "MNI152NLin2009cAsym", 1), - ("MNIInfant", "MNI152NLin6Asym", 2), + ('MNI152NLin6Asym', 'MNI152NLin6Asym', 1), + ('MNI152NLin6Asym', 'MNI152NLin2009cAsym', 1), + ('MNI152NLin6Asym', 'MNIInfant', 2), + ('MNI152NLin2009cAsym', 'MNI152NLin2009cAsym', 1), + ('MNI152NLin2009cAsym', 'MNI152NLin6Asym', 1), + ('MNI152NLin2009cAsym', 'MNIInfant', 1), + ('MNIInfant', 'MNIInfant', 1), + ('MNIInfant', 'MNI152NLin2009cAsym', 1), + ('MNIInfant', 'MNI152NLin6Asym', 2), ] for space_check in SPACES: target_space, source_space, n_xforms = space_check bold_file_target_space = bold_file_nlin6asym.replace( - "space-MNI152NLin6Asym_", - f"space-{target_space}_", + 'space-MNI152NLin6Asym_', + f'space-{target_space}_', ) xforms_to_mni = utils.get_std2bold_xfms( bold_file_target_space, @@ -500,17 +500,17 @@ def test_get_std2bold_xfms(ds001419_data): # Outside of the supported spaces, we expect an error # No space or tpl entity in source file - with pytest.raises(ValueError, match="Unknown space"): - utils.get_std2bold_xfms(bold_file_nlin6asym, source_file="T1w.nii.gz", source_space=None) + with pytest.raises(ValueError, match='Unknown space'): + utils.get_std2bold_xfms(bold_file_nlin6asym, source_file='T1w.nii.gz', source_space=None) # MNI152NLin6Asym --> tofail - bold_file_tofail = bold_file_nlin6asym.replace("space-MNI152NLin6Asym_", "space-tofail_") + bold_file_tofail = bold_file_nlin6asym.replace('space-MNI152NLin6Asym_', 'space-tofail_') with pytest.raises(ValueError, match="BOLD space 'tofail' not supported"): - utils.get_std2bold_xfms(bold_file_tofail, source_file=None, source_space="MNI152NLin6Asym") + utils.get_std2bold_xfms(bold_file_tofail, source_file=None, source_space='MNI152NLin6Asym') # tofail --> MNI152NLin6Asym with pytest.raises(ValueError, match="Source space 'tofail' not supported"): - utils.get_std2bold_xfms(bold_file_nlin6asym, source_file=None, source_space="tofail") + utils.get_std2bold_xfms(bold_file_nlin6asym, source_file=None, source_space='tofail') def test_fwhm2sigma(): @@ -522,35 +522,35 @@ def test_fwhm2sigma(): def test_select_first(): """Test _select_first.""" - lst = ["a", "b", "c"] - assert utils._select_first(lst) == "a" + lst = ['a', 'b', 'c'] + assert utils._select_first(lst) == 'a' - lst = "abc" - assert utils._select_first(lst) == "a" + lst = 'abc' + assert utils._select_first(lst) == 'a' def test_transpose_lol(): """Test _transpose_lol.""" inputs = [ [ - ["a", "b", "c"], + ['a', 'b', 'c'], [1, 2, 3], ], [ - ["a", "b", "c", "d"], + ['a', 'b', 'c', 'd'], [1, 2, 3], ], ] outputs = [ [ - ["a", 1], - ["b", 2], - ["c", 3], + ['a', 1], + ['b', 2], + ['c', 3], ], [ - ["a", 1], - ["b", 2], - ["c", 3], + ['a', 1], + ['b', 2], + ['c', 3], ], ] for i, input_ in enumerate(inputs): diff --git a/xcp_d/tests/test_utils_write_save.py b/xcp_d/tests/test_utils_write_save.py index a6f6838ca..1a4da84d3 100644 --- a/xcp_d/tests/test_utils_write_save.py +++ b/xcp_d/tests/test_utils_write_save.py @@ -10,20 +10,20 @@ def test_read_ndata(ds001419_data): """Test write_save.read_ndata.""" # Try to load a gifti - gifti_file = ds001419_data["gifti_file"] - with pytest.raises(ValueError, match="Unknown extension"): + gifti_file = ds001419_data['gifti_file'] + with pytest.raises(ValueError, match='Unknown extension'): write_save.read_ndata(gifti_file) # Load cifti - cifti_file = ds001419_data["cifti_file"] + cifti_file = ds001419_data['cifti_file'] cifti_data = write_save.read_ndata(cifti_file) assert cifti_data.shape == (91282, 60) # Load nifti - nifti_file = ds001419_data["nifti_file"] - mask_file = ds001419_data["brain_mask_file"] + nifti_file = ds001419_data['nifti_file'] + mask_file = ds001419_data['brain_mask_file'] - with pytest.raises(AssertionError, match="must be provided"): + with pytest.raises(AssertionError, match='must be provided'): write_save.read_ndata(nifti_file, maskfile=None) nifti_data = write_save.read_ndata(nifti_file, maskfile=mask_file) @@ -32,14 +32,14 @@ def test_read_ndata(ds001419_data): def test_write_ndata(ds001419_data, tmp_path_factory): """Test write_save.write_ndata.""" - tmpdir = tmp_path_factory.mktemp("test_write_ndata") + tmpdir = tmp_path_factory.mktemp('test_write_ndata') - orig_cifti = ds001419_data["cifti_file"] + orig_cifti = ds001419_data['cifti_file'] cifti_data = write_save.read_ndata(orig_cifti) cifti_data[1000, 50] = 1000 # Write an unmodified CIFTI - temp_cifti = os.path.join(tmpdir, "file.dtseries.nii") + temp_cifti = os.path.join(tmpdir, 'file.dtseries.nii') write_save.write_ndata(cifti_data, template=orig_cifti, filename=temp_cifti) assert os.path.isfile(temp_cifti) cifti_data_loaded = write_save.read_ndata(temp_cifti) @@ -50,7 +50,7 @@ def test_write_ndata(ds001419_data, tmp_path_factory): # Write a shortened CIFTI, so that the time axis will need to be created by write_ndata cifti_data = cifti_data[:, ::2] assert cifti_data.shape == (91282, 30) - temp_cifti = os.path.join(tmpdir, "file.dtseries.nii") + temp_cifti = os.path.join(tmpdir, 'file.dtseries.nii') write_save.write_ndata(cifti_data, template=orig_cifti, filename=temp_cifti) assert os.path.isfile(temp_cifti) cifti_data_loaded = write_save.read_ndata(temp_cifti) @@ -61,7 +61,7 @@ def test_write_ndata(ds001419_data, tmp_path_factory): # Write a dscalar file (no time points) cifti_data = cifti_data[:, 24:25] assert cifti_data.shape == (91282, 1) - temp_cifti = os.path.join(tmpdir, "file.dscalar.nii") + temp_cifti = os.path.join(tmpdir, 'file.dscalar.nii') write_save.write_ndata(cifti_data, template=orig_cifti, filename=temp_cifti) assert os.path.isfile(temp_cifti) cifti_data_loaded = write_save.read_ndata(temp_cifti) @@ -72,7 +72,7 @@ def test_write_ndata(ds001419_data, tmp_path_factory): # Write a 1D dscalar file (no time points) cifti_data = cifti_data[:, 0] assert cifti_data.shape == (91282,) - temp_cifti = os.path.join(tmpdir, "file.dscalar.nii") + temp_cifti = os.path.join(tmpdir, 'file.dscalar.nii') write_save.write_ndata(cifti_data, template=orig_cifti, filename=temp_cifti) assert os.path.isfile(temp_cifti) cifti_data_loaded = write_save.read_ndata(temp_cifti) @@ -81,19 +81,19 @@ def test_write_ndata(ds001419_data, tmp_path_factory): assert (cifti_data_loaded[1000, 0] - 1000) < 1 # Try writing out a different CIFTI filetype (should fail) - temp_cifti = os.path.join(tmpdir, "file.dlabel.nii") - with pytest.raises(ValueError, match="Unsupported CIFTI extension"): + temp_cifti = os.path.join(tmpdir, 'file.dlabel.nii') + with pytest.raises(ValueError, match='Unsupported CIFTI extension'): write_save.write_ndata(cifti_data, template=orig_cifti, filename=temp_cifti) # Try writing out a completely different filetype (should fail) - out_file = os.path.join(tmpdir, "file.txt") - with pytest.raises(ValueError, match="Unsupported CIFTI extension"): + out_file = os.path.join(tmpdir, 'file.txt') + with pytest.raises(ValueError, match='Unsupported CIFTI extension'): write_save.write_ndata(cifti_data, template=orig_cifti, filename=out_file) # Try using a txt file as a template - fake_template = os.path.join(tmpdir, "file.txt") - with open(fake_template, "w") as fo: - fo.write("TEST") + fake_template = os.path.join(tmpdir, 'file.txt') + with open(fake_template, 'w') as fo: + fo.write('TEST') - with pytest.raises(ValueError, match="Unknown extension"): + with pytest.raises(ValueError, match='Unknown extension'): write_save.write_ndata(cifti_data, template=fake_template, filename=temp_cifti) diff --git a/xcp_d/tests/test_workflows_anatomical.py b/xcp_d/tests/test_workflows_anatomical.py index b55fea32a..732fb3627 100644 --- a/xcp_d/tests/test_workflows_anatomical.py +++ b/xcp_d/tests/test_workflows_anatomical.py @@ -15,45 +15,45 @@ @pytest.fixture def surface_files(datasets, tmp_path_factory): """Collect real and fake surface files to test the anatomical workflow.""" - tmpdir = tmp_path_factory.mktemp("surface_files") - anat_dir = os.path.join(datasets["pnc"], "sub-1648798153", "ses-PNC1", "anat") + tmpdir = tmp_path_factory.mktemp('surface_files') + anat_dir = os.path.join(datasets['pnc'], 'sub-1648798153', 'ses-PNC1', 'anat') files = { - "native_lh_pial": os.path.join( - anat_dir, "sub-1648798153_ses-PNC1_acq-refaced_hemi-L_pial.surf.gii" + 'native_lh_pial': os.path.join( + anat_dir, 'sub-1648798153_ses-PNC1_acq-refaced_hemi-L_pial.surf.gii' ), - "native_lh_wm": os.path.join( - anat_dir, "sub-1648798153_ses-PNC1_acq-refaced_hemi-L_white.surf.gii" + 'native_lh_wm': os.path.join( + anat_dir, 'sub-1648798153_ses-PNC1_acq-refaced_hemi-L_white.surf.gii' ), - "native_rh_pial": os.path.join( - anat_dir, "sub-1648798153_ses-PNC1_acq-refaced_hemi-R_pial.surf.gii" + 'native_rh_pial': os.path.join( + anat_dir, 'sub-1648798153_ses-PNC1_acq-refaced_hemi-R_pial.surf.gii' ), - "native_rh_wm": os.path.join( - anat_dir, "sub-1648798153_ses-PNC1_acq-refaced_hemi-R_white.surf.gii" + 'native_rh_wm': os.path.join( + anat_dir, 'sub-1648798153_ses-PNC1_acq-refaced_hemi-R_white.surf.gii' ), } final_files = files.copy() for fref, fpath in files.items(): - std_fref = fref.replace("native_", "fsLR_") + std_fref = fref.replace('native_', 'fsLR_') std_fname = os.path.basename(fpath) std_fname = std_fname.replace( - "sub-1648798153_ses-PNC1_acq-refaced_hemi-L_", - "sub-1648798153_ses-PNC1_acq-refaced_hemi-L_space-fsLR_den-32k_", + 'sub-1648798153_ses-PNC1_acq-refaced_hemi-L_', + 'sub-1648798153_ses-PNC1_acq-refaced_hemi-L_space-fsLR_den-32k_', ).replace( - "sub-1648798153_ses-PNC1_acq-refaced_hemi-R_", - "sub-1648798153_ses-PNC1_acq-refaced_hemi-R_space-fsLR_den-32k_", + 'sub-1648798153_ses-PNC1_acq-refaced_hemi-R_', + 'sub-1648798153_ses-PNC1_acq-refaced_hemi-R_space-fsLR_den-32k_', ) std_fpath = os.path.join(tmpdir, std_fname) shutil.copyfile(fpath, std_fpath) final_files[std_fref] = std_fpath - final_files["lh_subject_sphere"] = os.path.join( + final_files['lh_subject_sphere'] = os.path.join( anat_dir, - "sub-1648798153_ses-PNC1_acq-refaced_hemi-L_desc-reg_sphere.surf.gii", + 'sub-1648798153_ses-PNC1_acq-refaced_hemi-L_desc-reg_sphere.surf.gii', ) - final_files["rh_subject_sphere"] = os.path.join( + final_files['rh_subject_sphere'] = os.path.join( anat_dir, - "sub-1648798153_ses-PNC1_acq-refaced_hemi-R_desc-reg_sphere.surf.gii", + 'sub-1648798153_ses-PNC1_acq-refaced_hemi-R_desc-reg_sphere.surf.gii', ) return final_files @@ -68,60 +68,60 @@ def test_warp_surfaces_to_template_wf( The transforms should be applied and all of the standard-space outputs should be generated. """ - tmpdir = tmp_path_factory.mktemp("test_warp_surfaces_to_template_wf") + tmpdir = tmp_path_factory.mktemp('test_warp_surfaces_to_template_wf') with mock_config(): config.nipype.omp_nthreads = 1 config.execution.output_dir = tmpdir wf = anatomical.surface.init_warp_surfaces_to_template_wf( - software="FreeSurfer", + software='FreeSurfer', omp_nthreads=1, ) - wf.inputs.inputnode.lh_pial_surf = surface_files["native_lh_pial"] - wf.inputs.inputnode.rh_pial_surf = surface_files["native_rh_pial"] - wf.inputs.inputnode.lh_wm_surf = surface_files["native_lh_wm"] - wf.inputs.inputnode.rh_wm_surf = surface_files["native_rh_wm"] - wf.inputs.inputnode.lh_subject_sphere = surface_files["lh_subject_sphere"] - wf.inputs.inputnode.rh_subject_sphere = surface_files["rh_subject_sphere"] + wf.inputs.inputnode.lh_pial_surf = surface_files['native_lh_pial'] + wf.inputs.inputnode.rh_pial_surf = surface_files['native_rh_pial'] + wf.inputs.inputnode.lh_wm_surf = surface_files['native_lh_wm'] + wf.inputs.inputnode.rh_wm_surf = surface_files['native_rh_wm'] + wf.inputs.inputnode.lh_subject_sphere = surface_files['lh_subject_sphere'] + wf.inputs.inputnode.rh_subject_sphere = surface_files['rh_subject_sphere'] # transforms (only used if warp_to_standard is True) - wf.inputs.inputnode.anat_to_template_xfm = pnc_data["anat_to_template_xfm"] - wf.inputs.inputnode.template_to_anat_xfm = pnc_data["template_to_anat_xfm"] + wf.inputs.inputnode.anat_to_template_xfm = pnc_data['anat_to_template_xfm'] + wf.inputs.inputnode.template_to_anat_xfm = pnc_data['template_to_anat_xfm'] wf.base_dir = tmpdir wf = clean_datasinks(wf) wf.run() # All of the possible fsLR surfaces should be available. - out_anat_dir = os.path.join(tmpdir, "sub-1648798153", "ses-PNC1", "anat") + out_anat_dir = os.path.join(tmpdir, 'sub-1648798153', 'ses-PNC1', 'anat') for key, filename in surface_files.items(): - if "fsLR" in key: + if 'fsLR' in key: out_fname = os.path.basename(filename) out_file = os.path.join(out_anat_dir, out_fname) - assert os.path.isfile(out_file), "\n".join(sorted(os.listdir(tmpdir))) + assert os.path.isfile(out_file), '\n'.join(sorted(os.listdir(tmpdir))) def test_postprocess_anat_wf(ds001419_data, tmp_path_factory): """Test xcp_d.workflows.anatomical.volume.init_postprocess_anat_wf.""" - tmpdir = tmp_path_factory.mktemp("test_postprocess_anat_wf") + tmpdir = tmp_path_factory.mktemp('test_postprocess_anat_wf') - anat_to_template_xfm = ds001419_data["anat_to_template_xfm"] - t1w = ds001419_data["t1w"] - t2w = os.path.join(tmpdir, "sub-01_desc-preproc_T2w.nii.gz") # pretend t1w is t2w + anat_to_template_xfm = ds001419_data['anat_to_template_xfm'] + t1w = ds001419_data['t1w'] + t2w = os.path.join(tmpdir, 'sub-01_desc-preproc_T2w.nii.gz') # pretend t1w is t2w shutil.copyfile(t1w, t2w) with mock_config(): config.execution.output_dir = tmpdir - config.workflow.input_type = "fmriprep" + config.workflow.input_type = 'fmriprep' config.nipype.omp_nthreads = 1 config.nipype.mem_gb = 0.1 wf = anatomical.volume.init_postprocess_anat_wf( t1w_available=True, t2w_available=True, - target_space="MNI152NLin2009cAsym", - name="postprocess_anat_wf", + target_space='MNI152NLin2009cAsym', + name='postprocess_anat_wf', ) wf.inputs.inputnode.anat_to_template_xfm = anat_to_template_xfm @@ -133,9 +133,9 @@ def test_postprocess_anat_wf(ds001419_data, tmp_path_factory): wf_nodes = get_nodes(wf_res) - out_anat_dir = os.path.join(tmpdir, "xcp_d", "sub-01", "anat") - out_t1w = wf_nodes["postprocess_anat_wf.ds_t1w_std"].get_output("out_file") + out_anat_dir = os.path.join(tmpdir, 'xcp_d', 'sub-01', 'anat') + out_t1w = wf_nodes['postprocess_anat_wf.ds_t1w_std'].get_output('out_file') assert os.path.isfile(out_t1w), os.listdir(out_anat_dir) - out_t2w = wf_nodes["postprocess_anat_wf.ds_t2w_std"].get_output("out_file") + out_t2w = wf_nodes['postprocess_anat_wf.ds_t2w_std'].get_output('out_file') assert os.path.isfile(out_t2w), os.listdir(out_anat_dir) diff --git a/xcp_d/tests/test_workflows_connectivity.py b/xcp_d/tests/test_workflows_connectivity.py index 276ed6a25..bd20d55e6 100644 --- a/xcp_d/tests/test_workflows_connectivity.py +++ b/xcp_d/tests/test_workflows_connectivity.py @@ -29,72 +29,72 @@ def test_init_load_atlases_wf_nifti(ds001419_data, tmp_path_factory): """Test init_load_atlases_wf with a nifti input.""" - tmpdir = tmp_path_factory.mktemp("test_init_load_atlases_wf_nifti") + tmpdir = tmp_path_factory.mktemp('test_init_load_atlases_wf_nifti') - bold_file = ds001419_data["nifti_file"] + bold_file = ds001419_data['nifti_file'] with mock_config(): config.execution.output_dir = tmpdir - config.workflow.file_format = "nifti" - config.execution.atlases = ["4S156Parcels", "Glasser"] + config.workflow.file_format = 'nifti' + config.execution.atlases = ['4S156Parcels', 'Glasser'] config.execution.datasets = { - "xcpdatlases": str(load_data("atlases")), - "xcpd4s": "/AtlasPack", + 'xcpdatlases': str(load_data('atlases')), + 'xcpd4s': '/AtlasPack', } config.nipype.omp_nthreads = 1 - load_atlases_wf = init_load_atlases_wf(name="load_atlases_wf") + load_atlases_wf = init_load_atlases_wf(name='load_atlases_wf') load_atlases_wf.inputs.inputnode.name_source = bold_file load_atlases_wf.inputs.inputnode.bold_file = bold_file load_atlases_wf.base_dir = tmpdir load_atlases_wf_res = load_atlases_wf.run() nodes = get_nodes(load_atlases_wf_res) - atlas_names = nodes["load_atlases_wf.warp_atlases_to_bold_space"].get_output( - "output_image" + atlas_names = nodes['load_atlases_wf.warp_atlases_to_bold_space'].get_output( + 'output_image' ) assert len(atlas_names) == 2 def test_init_load_atlases_wf_cifti(ds001419_data, tmp_path_factory): """Test init_load_atlases_wf with a cifti input.""" - tmpdir = tmp_path_factory.mktemp("test_init_load_atlases_wf_cifti") + tmpdir = tmp_path_factory.mktemp('test_init_load_atlases_wf_cifti') - bold_file = ds001419_data["cifti_file"] + bold_file = ds001419_data['cifti_file'] with mock_config(): config.execution.output_dir = tmpdir - config.workflow.file_format = "cifti" - config.execution.atlases = ["4S156Parcels", "Glasser"] + config.workflow.file_format = 'cifti' + config.execution.atlases = ['4S156Parcels', 'Glasser'] config.execution.datasets = { - "xcpdatlases": str(load_data("atlases")), - "xcpd4s": "/AtlasPack", + 'xcpdatlases': str(load_data('atlases')), + 'xcpd4s': '/AtlasPack', } config.nipype.omp_nthreads = 1 - load_atlases_wf = init_load_atlases_wf(name="load_atlases_wf") + load_atlases_wf = init_load_atlases_wf(name='load_atlases_wf') load_atlases_wf.inputs.inputnode.name_source = bold_file load_atlases_wf.inputs.inputnode.bold_file = bold_file load_atlases_wf.base_dir = tmpdir load_atlases_wf_res = load_atlases_wf.run() nodes = get_nodes(load_atlases_wf_res) - atlas_names = nodes["load_atlases_wf.copy_atlas"].get_output("out_file") + atlas_names = nodes['load_atlases_wf.copy_atlas'].get_output('out_file') assert len(atlas_names) == 2 def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): """Test the nifti workflow.""" - tmpdir = tmp_path_factory.mktemp("test_init_functional_connectivity_nifti_wf") + tmpdir = tmp_path_factory.mktemp('test_init_functional_connectivity_nifti_wf') - bold_file = ds001419_data["nifti_file"] - boldref = ds001419_data["boldref"] - bold_mask = ds001419_data["brain_mask_file"] + bold_file = ds001419_data['nifti_file'] + boldref = ds001419_data['boldref'] + bold_mask = ds001419_data['brain_mask_file'] # Generate fake signal bold_data = read_ndata(bold_file, bold_mask) fake_signal = np.random.randint(1, 500, size=bold_data.shape) - fake_bold_file = os.path.join(tmpdir, "fake_signal_file.nii.gz") + fake_bold_file = os.path.join(tmpdir, 'fake_signal_file.nii.gz') write_ndata( fake_signal, template=bold_file, @@ -108,30 +108,30 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): # Create a fake temporal mask to satisfy the workflow n_volumes = bold_data.shape[1] censoring_df = pd.DataFrame( - columns=["framewise_displacement", "exact_10"], + columns=['framewise_displacement', 'exact_10'], data=np.stack( (np.zeros(n_volumes), np.concatenate((np.ones(10), np.zeros(n_volumes - 10)))), axis=1, ), ) - temporal_mask = os.path.join(tmpdir, "temporal_mask.tsv") - censoring_df.to_csv(temporal_mask, sep="\t", index=False) + temporal_mask = os.path.join(tmpdir, 'temporal_mask.tsv') + censoring_df.to_csv(temporal_mask, sep='\t', index=False) # Load atlases - atlas_names = ["Gordon", "Glasser"] + atlas_names = ['Gordon', 'Glasser'] atlas_files = [ str( - load_data("atlases/atlas-Gordon/atlas-Gordon_space-MNI152NLin6Asym_res-01_dseg.nii.gz") + load_data('atlases/atlas-Gordon/atlas-Gordon_space-MNI152NLin6Asym_res-01_dseg.nii.gz') ), str( load_data( - "atlases/atlas-Glasser/atlas-Glasser_space-MNI152NLin6Asym_res-01_dseg.nii.gz" + 'atlases/atlas-Glasser/atlas-Glasser_space-MNI152NLin6Asym_res-01_dseg.nii.gz' ) ), ] atlas_labels_files = [ - str(load_data("atlases/atlas-Gordon/atlas-Gordon_dseg.tsv")), - str(load_data("atlases/atlas-Glasser/atlas-Glasser_dseg.tsv")), + str(load_data('atlases/atlas-Gordon/atlas-Gordon_dseg.tsv')), + str(load_data('atlases/atlas-Glasser/atlas-Glasser_dseg.tsv')), ] # Perform the resampling and parcellation done by init_load_atlases_wf @@ -140,7 +140,7 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): transforms_from_MNI152NLin6Asym = get_std2bold_xfms( bold_file, source_file=None, - source_space="MNI152NLin6Asym", + source_space='MNI152NLin6Asym', ) for atlas_file in atlas_files: # Using the generated transforms, apply them to get everything in the correct MNI form @@ -148,7 +148,7 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): reference_image=boldref, transforms=transforms_from_MNI152NLin6Asym, input_image=atlas_file, - interpolation="GenericLabel", + interpolation='GenericLabel', input_image_type=3, dimension=3, ) @@ -171,7 +171,7 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): connectivity_wf = init_functional_connectivity_nifti_wf( mem_gb=mem_gbx, - name="connectivity_wf", + name='connectivity_wf', ) connectivity_wf.inputs.inputnode.denoised_bold = fake_bold_file connectivity_wf.inputs.inputnode.temporal_mask = temporal_mask @@ -189,28 +189,28 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): # Let's find the correct workflow outputs assert os.path.isfile(atlas_file) - coverage = nodes["connectivity_wf.parcellate_data"].get_output("coverage")[0] + coverage = nodes['connectivity_wf.parcellate_data'].get_output('coverage')[0] assert os.path.isfile(coverage) - timeseries = nodes["connectivity_wf.parcellate_data"].get_output("timeseries")[0] + timeseries = nodes['connectivity_wf.parcellate_data'].get_output('timeseries')[0] assert os.path.isfile(timeseries) - correlations = nodes["connectivity_wf.functional_connectivity"].get_output("correlations")[ + correlations = nodes['connectivity_wf.functional_connectivity'].get_output('correlations')[ 0 ] assert os.path.isfile(correlations) # Read that into a df - coverage_df = pd.read_table(coverage, index_col="Node") + coverage_df = pd.read_table(coverage, index_col='Node') coverage_arr = coverage_df.to_numpy() assert coverage_arr.shape[0] == n_parcels - correlations_arr = pd.read_table(correlations, index_col="Node").to_numpy() + correlations_arr = pd.read_table(correlations, index_col='Node').to_numpy() assert correlations_arr.shape == (n_parcels, n_parcels) # Now to get ground truth correlations - labels_df = pd.read_table(atlas_labels_file, index_col="index") + labels_df = pd.read_table(atlas_labels_file, index_col='index') atlas_img, _ = _sanitize_nifti_atlas(atlas_file, labels_df) masker = NiftiLabelsMasker( labels_img=atlas_img, - labels=["background"] + coverage_df.index.tolist(), + labels=['background'] + coverage_df.index.tolist(), smoothing_fwhm=None, standardize=False, ) @@ -223,7 +223,7 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): atlas_idx = np.arange(len(coverage_df.index.tolist()), dtype=int) idx_not_in_atlas = np.setdiff1d(atlas_idx + 1, masker.labels_) idx_in_atlas = np.array(masker.labels_, dtype=int) - 1 - n_partial_parcels = np.where(coverage_df["coverage"] >= 0.5)[0].size + n_partial_parcels = np.where(coverage_df['coverage'] >= 0.5)[0].size # Drop missing parcels correlations_arr = correlations_arr[idx_in_atlas, :] @@ -251,9 +251,9 @@ def test_init_functional_connectivity_nifti_wf(ds001419_data, tmp_path_factory): def test_init_functional_connectivity_cifti_wf(ds001419_data, tmp_path_factory): """Test the cifti workflow - only correlation, not parcellation.""" - tmpdir = tmp_path_factory.mktemp("test_init_functional_connectivity_cifti_wf") + tmpdir = tmp_path_factory.mktemp('test_init_functional_connectivity_cifti_wf') - bold_file = ds001419_data["cifti_file"] + bold_file = ds001419_data['cifti_file'] TR = _get_tr(nb.load(bold_file)) # Generate fake signal @@ -261,7 +261,7 @@ def test_init_functional_connectivity_cifti_wf(ds001419_data, tmp_path_factory): fake_signal = np.random.randint(1, 500, size=bold_data.shape).astype(np.float32) # Make half the vertices all zeros fake_signal[:5000, :] = 0 - fake_bold_file = os.path.join(tmpdir, "fake_signal_file.dtseries.nii") + fake_bold_file = os.path.join(tmpdir, 'fake_signal_file.dtseries.nii') write_ndata( fake_signal, template=bold_file, @@ -275,30 +275,30 @@ def test_init_functional_connectivity_cifti_wf(ds001419_data, tmp_path_factory): # Create a fake temporal mask to satisfy the workflow n_volumes = bold_data.shape[1] censoring_df = pd.DataFrame( - columns=["framewise_displacement", "exact_10"], + columns=['framewise_displacement', 'exact_10'], data=np.stack( (np.zeros(n_volumes), np.concatenate((np.ones(10), np.zeros(n_volumes - 10)))), axis=1, ), ) - temporal_mask = os.path.join(tmpdir, "temporal_mask.tsv") - censoring_df.to_csv(temporal_mask, sep="\t", index=False) + temporal_mask = os.path.join(tmpdir, 'temporal_mask.tsv') + censoring_df.to_csv(temporal_mask, sep='\t', index=False) # Load atlases - atlas_names = ["4S1056Parcels", "4S156Parcels", "4S456Parcels", "Gordon", "Glasser"] + atlas_names = ['4S1056Parcels', '4S156Parcels', '4S456Parcels', 'Gordon', 'Glasser'] atlas_files = [ - "/AtlasPack/atlas-4S1056Parcels/atlas-4S1056Parcels_space-fsLR_den-91k_dseg.dlabel.nii", - "/AtlasPack/atlas-4S156Parcels/atlas-4S156Parcels_space-fsLR_den-91k_dseg.dlabel.nii", - "/AtlasPack/atlas-4S456Parcels/atlas-4S456Parcels_space-fsLR_den-91k_dseg.dlabel.nii", - str(load_data("atlases/atlas-Gordon/atlas-Gordon_space-fsLR_den-32k_dseg.dlabel.nii")), - str(load_data("atlases/atlas-Glasser/atlas-Glasser_space-fsLR_den-32k_dseg.dlabel.nii")), + '/AtlasPack/atlas-4S1056Parcels/atlas-4S1056Parcels_space-fsLR_den-91k_dseg.dlabel.nii', + '/AtlasPack/atlas-4S156Parcels/atlas-4S156Parcels_space-fsLR_den-91k_dseg.dlabel.nii', + '/AtlasPack/atlas-4S456Parcels/atlas-4S456Parcels_space-fsLR_den-91k_dseg.dlabel.nii', + str(load_data('atlases/atlas-Gordon/atlas-Gordon_space-fsLR_den-32k_dseg.dlabel.nii')), + str(load_data('atlases/atlas-Glasser/atlas-Glasser_space-fsLR_den-32k_dseg.dlabel.nii')), ] atlas_labels_files = [ - "/AtlasPack/atlas-4S1056Parcels/atlas-4S1056Parcels_dseg.tsv", - "/AtlasPack/atlas-4S156Parcels/atlas-4S156Parcels_dseg.tsv", - "/AtlasPack/atlas-4S456Parcels/atlas-4S456Parcels_dseg.tsv", - str(load_data("atlases/atlas-Gordon/atlas-Gordon_dseg.tsv")), - str(load_data("atlases/atlas-Glasser/atlas-Glasser_dseg.tsv")), + '/AtlasPack/atlas-4S1056Parcels/atlas-4S1056Parcels_dseg.tsv', + '/AtlasPack/atlas-4S156Parcels/atlas-4S156Parcels_dseg.tsv', + '/AtlasPack/atlas-4S456Parcels/atlas-4S456Parcels_dseg.tsv', + str(load_data('atlases/atlas-Gordon/atlas-Gordon_dseg.tsv')), + str(load_data('atlases/atlas-Glasser/atlas-Glasser_dseg.tsv')), ] # Create the node and a tmpdir to write its results out to @@ -313,7 +313,7 @@ def test_init_functional_connectivity_cifti_wf(ds001419_data, tmp_path_factory): connectivity_wf = init_functional_connectivity_cifti_wf( mem_gb=mem_gbx, exact_scans=[], - name="connectivity_wf", + name='connectivity_wf', ) connectivity_wf.inputs.inputnode.denoised_bold = fake_bold_file connectivity_wf.inputs.inputnode.temporal_mask = temporal_mask @@ -329,27 +329,27 @@ def test_init_functional_connectivity_cifti_wf(ds001419_data, tmp_path_factory): nodes = get_nodes(connectivity_wf_res) # Let's find the cifti files - pscalar = nodes["connectivity_wf.parcellate_bold_wf.parcellate_coverage"].get_output( - "out_file" + pscalar = nodes['connectivity_wf.parcellate_bold_wf.parcellate_coverage'].get_output( + 'out_file' )[0] assert os.path.isfile(pscalar) timeseries_ciftis = nodes[ - "connectivity_wf.parcellate_bold_wf.mask_parcellated_data" - ].get_output("out_file")[0] + 'connectivity_wf.parcellate_bold_wf.mask_parcellated_data' + ].get_output('out_file')[0] assert os.path.isfile(timeseries_ciftis) - correlation_ciftis = nodes["connectivity_wf.correlate_bold"].get_output("out_file")[0] + correlation_ciftis = nodes['connectivity_wf.correlate_bold'].get_output('out_file')[0] assert os.path.isfile(correlation_ciftis) # Let's find the tsv files - coverage = nodes["connectivity_wf.parcellate_bold_wf.coverage_to_tsv"].get_output( - "out_file" + coverage = nodes['connectivity_wf.parcellate_bold_wf.coverage_to_tsv'].get_output( + 'out_file' )[0] assert os.path.isfile(coverage) - timeseries = nodes["connectivity_wf.parcellate_bold_wf.cifti_to_tsv"].get_output( - "out_file" + timeseries = nodes['connectivity_wf.parcellate_bold_wf.cifti_to_tsv'].get_output( + 'out_file' )[0] assert os.path.isfile(timeseries) - correlations = nodes["connectivity_wf.dconn_to_tsv"].get_output("out_file")[0] + correlations = nodes['connectivity_wf.dconn_to_tsv'].get_output('out_file')[0] assert os.path.isfile(correlations) # Let's read in the ciftis' data @@ -363,7 +363,7 @@ def test_init_functional_connectivity_cifti_wf(ds001419_data, tmp_path_factory): # Read in the tsvs' data coverage_arr = pd.read_table(coverage).to_numpy().T timeseries_arr = pd.read_table(timeseries).to_numpy() - correlations_arr = pd.read_table(correlations, index_col="Node").to_numpy() + correlations_arr = pd.read_table(correlations, index_col='Node').to_numpy() assert coverage_arr.shape == pscalar_arr.shape assert timeseries_arr.shape == ptseries_arr.shape @@ -395,10 +395,10 @@ def test_init_functional_connectivity_cifti_wf(ds001419_data, tmp_path_factory): np.where(np.isnan(pconn_arr) != np.isnan(calculated_correlations)) ).T raise ValueError( - f"{mismatch_idx.shape} mismatches\n\n" - f"{mismatch_idx}\n\n" - f"{pconn_arr[mismatch_idx[:, 0], mismatch_idx[:, 1]]}\n\n" - f"{calculated_correlations[mismatch_idx[:, 0], mismatch_idx[:, 1]]}" + f'{mismatch_idx.shape} mismatches\n\n' + f'{mismatch_idx}\n\n' + f'{pconn_arr[mismatch_idx[:, 0], mismatch_idx[:, 1]]}\n\n' + f'{calculated_correlations[mismatch_idx[:, 0], mismatch_idx[:, 1]]}' ) if not np.allclose(pconn_arr, calculated_correlations, atol=0.01, equal_nan=True): diff --git a/xcp_d/tests/test_workflows_metrics.py b/xcp_d/tests/test_workflows_metrics.py index d0b8ac494..1783ad8d5 100644 --- a/xcp_d/tests/test_workflows_metrics.py +++ b/xcp_d/tests/test_workflows_metrics.py @@ -23,11 +23,11 @@ def test_nifti_alff(ds001419_data, tmp_path_factory): and confirm the mean ALFF after addition to lower frequencies has increased. """ - tempdir = tmp_path_factory.mktemp("test_nifti_alff_01") + tempdir = tmp_path_factory.mktemp('test_nifti_alff_01') # Get the file names - bold_file = ds001419_data["nifti_file"] - bold_mask = ds001419_data["brain_mask_file"] + bold_file = ds001419_data['nifti_file'] + bold_mask = ds001419_data['brain_mask_file'] # Let's initialize the ALFF node TR = _get_tr(nb.load(bold_file)) @@ -35,7 +35,7 @@ def test_nifti_alff(ds001419_data, tmp_path_factory): with mock_config(): config.execution.output_dir = tempdir - config.workflow.file_format = "nifti" + config.workflow.file_format = 'nifti' config.workflow.low_pass = 0.08 config.workflow.high_pass = 0.01 config.workflow.fd_thresh = 0 @@ -58,7 +58,7 @@ def test_nifti_alff(ds001419_data, tmp_path_factory): nodes = get_nodes(compute_alff_res) # Let's get the mean of the ALFF for later comparison - original_alff = nodes["alff_wf.alff_compt"].get_output("alff") + original_alff = nodes['alff_wf.alff_compt'].get_output('alff') original_alff_data_mean = nb.load(original_alff).get_fdata().mean() # Now let's do an FFT @@ -78,13 +78,13 @@ def test_nifti_alff(ds001419_data, tmp_path_factory): # Let's replace the original value with the fake data original_bold_data[2, :] = changed_voxel_data # Let's write this out - filename = os.path.join(tempdir, "editedfile.nii.gz") + filename = os.path.join(tempdir, 'editedfile.nii.gz') write_ndata(original_bold_data, template=bold_file, mask=bold_mask, filename=filename) # Now let's compute ALFF for the new file and see how it compares # to the original ALFF - it should increase since we increased # the amplitude in low frequencies for a voxel - tempdir = tmp_path_factory.mktemp("test_nifti_alff_02") + tempdir = tmp_path_factory.mktemp('test_nifti_alff_02') alff_wf = metrics.init_alff_wf( name_source=bold_file, @@ -99,7 +99,7 @@ def test_nifti_alff(ds001419_data, tmp_path_factory): nodes = get_nodes(compute_alff_res) # Let's get the new ALFF mean - new_alff = nodes["alff_wf.alff_compt"].get_output("alff") + new_alff = nodes['alff_wf.alff_compt'].get_output('alff') assert os.path.isfile(new_alff) new_alff_data_mean = nb.load(new_alff).get_fdata().mean() @@ -114,17 +114,17 @@ def test_cifti_alff(ds001419_data, tmp_path_factory): and confirm the ALFF after addition to lower frequencies has changed in the expected direction. """ - bold_file = ds001419_data["cifti_file"] - bold_mask = ds001419_data["brain_mask_file"] + bold_file = ds001419_data['cifti_file'] + bold_mask = ds001419_data['brain_mask_file'] # Let's initialize the ALFF node TR = _get_tr(nb.load(bold_file)) mem_gbx = _create_mem_gb(bold_file) - tempdir = tmp_path_factory.mktemp("test_cifti_alff_01") + tempdir = tmp_path_factory.mktemp('test_cifti_alff_01') with mock_config(): config.execution.output_dir = tempdir - config.workflow.file_format = "cifti" + config.workflow.file_format = 'cifti' config.workflow.low_pass = 0.08 config.workflow.high_pass = 0.01 config.workflow.fd_thresh = 0.1 @@ -146,7 +146,7 @@ def test_cifti_alff(ds001419_data, tmp_path_factory): nodes = get_nodes(compute_alff_res) # Let's get the mean of the data for later comparison - original_alff = nodes["alff_wf.alff_compt"].get_output("alff") + original_alff = nodes['alff_wf.alff_compt'].get_output('alff') original_alff_data_mean = nb.load(original_alff).get_fdata().mean() # Now let's do an FFT @@ -164,11 +164,11 @@ def test_cifti_alff(ds001419_data, tmp_path_factory): original_bold_data[2, :] = changed_voxel_data # Let's write this out - filename = os.path.join(tempdir, "editedfile.dtseries.nii") + filename = os.path.join(tempdir, 'editedfile.dtseries.nii') write_ndata(original_bold_data, template=bold_file, mask=bold_mask, filename=filename) # Now let's compute ALFF for the new file and see how it compares - tempdir = tmp_path_factory.mktemp("test_cifti_alff_02") + tempdir = tmp_path_factory.mktemp('test_cifti_alff_02') alff_wf.base_dir = tempdir alff_wf.inputs.inputnode.bold_mask = bold_mask alff_wf.inputs.inputnode.denoised_bold = filename @@ -176,7 +176,7 @@ def test_cifti_alff(ds001419_data, tmp_path_factory): nodes = get_nodes(compute_alff_res) # Let's get the new ALFF mean - new_alff = nodes["alff_wf.alff_compt"].get_output("alff") + new_alff = nodes['alff_wf.alff_compt'].get_output('alff') assert os.path.isfile(new_alff) new_alff_data_mean = nb.load(new_alff).get_fdata().mean() @@ -208,11 +208,11 @@ def test_nifti_reho(ds001419_data, tmp_path_factory): Confirm that ReHo decreases after adding noise to a Nifti image. """ - tempdir = tmp_path_factory.mktemp("test_nifti_reho") + tempdir = tmp_path_factory.mktemp('test_nifti_reho') # Get the names of the files - bold_file = ds001419_data["nifti_file"] - bold_mask = ds001419_data["brain_mask_file"] + bold_file = ds001419_data['nifti_file'] + bold_mask = ds001419_data['brain_mask_file'] mem_gbx = _create_mem_gb(bold_file) # Set up and run the ReHo wf in a tempdir @@ -230,13 +230,13 @@ def test_nifti_reho(ds001419_data, tmp_path_factory): nodes = get_nodes(reho_res) # Get the original mean of the ReHo for later comparison - original_reho = nodes["reho_nifti_wf.reho_3d"].get_output("out_file") + original_reho = nodes['reho_nifti_wf.reho_3d'].get_output('out_file') original_reho_mean = nb.load(original_reho).get_fdata().mean() original_bold_data = read_ndata(bold_file, bold_mask) # Add some noise to the original data and write it out noisy_bold_data = _add_noise(original_bold_data) - noisy_bold_file = os.path.join(tempdir, "test.nii.gz") + noisy_bold_file = os.path.join(tempdir, 'test.nii.gz') write_ndata( noisy_bold_data, template=bold_file, @@ -251,7 +251,7 @@ def test_nifti_reho(ds001419_data, tmp_path_factory): nodes = get_nodes(reho_res) # Has the new ReHo's mean decreased? - new_reho = nodes["reho_nifti_wf.reho_3d"].get_output("out_file") + new_reho = nodes['reho_nifti_wf.reho_3d'].get_output('out_file') new_reho_mean = nb.load(new_reho).get_fdata().mean() assert new_reho_mean < original_reho_mean @@ -263,11 +263,11 @@ def test_cifti_reho(ds001419_data, tmp_path_factory): Cifti image. """ # Get the names of the files - tempdir = tmp_path_factory.mktemp("test_cifti_reho") - source_file = ds001419_data["cifti_file"] + tempdir = tmp_path_factory.mktemp('test_cifti_reho') + source_file = ds001419_data['cifti_file'] # Create a copy of the BOLD file to control the filename - orig_bold_file = os.path.join(tempdir, "original.dtseries.nii") + orig_bold_file = os.path.join(tempdir, 'original.dtseries.nii') shutil.copyfile(source_file, orig_bold_file) mem_gbx = _create_mem_gb(orig_bold_file) @@ -280,7 +280,7 @@ def test_cifti_reho(ds001419_data, tmp_path_factory): reho_wf = metrics.init_reho_cifti_wf( name_source=source_file, mem_gb=mem_gbx, - name="orig_reho_wf", + name='orig_reho_wf', ) reho_wf.base_dir = tempdir reho_wf.inputs.inputnode.denoised_bold = orig_bold_file @@ -290,13 +290,13 @@ def test_cifti_reho(ds001419_data, tmp_path_factory): nodes = get_nodes(reho_res) # Get the original mean of the ReHo for later comparison - original_reho = nodes["orig_reho_wf.merge_cifti"].get_output("out_file") + original_reho = nodes['orig_reho_wf.merge_cifti'].get_output('out_file') original_reho_mean = nb.load(original_reho).get_fdata().mean() # Add some noise to the original data and write it out original_bold_data = read_ndata(orig_bold_file) noisy_bold_data = _add_noise(original_bold_data) - noisy_bold_file = os.path.join(tempdir, "noisy.dtseries.nii") + noisy_bold_file = os.path.join(tempdir, 'noisy.dtseries.nii') write_ndata(noisy_bold_data, template=orig_bold_file, filename=noisy_bold_file) # Run ReHo again @@ -305,7 +305,7 @@ def test_cifti_reho(ds001419_data, tmp_path_factory): reho_wf = metrics.init_reho_cifti_wf( name_source=source_file, mem_gb=mem_gbx, - name="noisy_reho_wf", + name='noisy_reho_wf', ) reho_wf.base_dir = tempdir reho_wf.inputs.inputnode.denoised_bold = noisy_bold_file @@ -315,6 +315,6 @@ def test_cifti_reho(ds001419_data, tmp_path_factory): nodes = get_nodes(reho_res) # Has the new ReHo's mean decreased? - noisy_reho = nodes["noisy_reho_wf.merge_cifti"].get_output("out_file") + noisy_reho = nodes['noisy_reho_wf.merge_cifti'].get_output('out_file') noisy_reho_mean = nb.load(noisy_reho).get_fdata().mean() assert noisy_reho_mean < original_reho_mean diff --git a/xcp_d/tests/test_workflows_plotting.py b/xcp_d/tests/test_workflows_plotting.py index 8a371c0f3..b6b44606e 100644 --- a/xcp_d/tests/test_workflows_plotting.py +++ b/xcp_d/tests/test_workflows_plotting.py @@ -13,10 +13,10 @@ def test_init_plot_custom_slices_wf(ds001419_data, tmp_path_factory): """Test init_plot_custom_slices_wf.""" - tmpdir = tmp_path_factory.mktemp("test_init_plot_custom_slices_wf") + tmpdir = tmp_path_factory.mktemp('test_init_plot_custom_slices_wf') - nifti_file = ds001419_data["nifti_file"] - nifti_3d = os.path.join(tmpdir, "img3d.nii.gz") + nifti_file = ds001419_data['nifti_file'] + nifti_3d = os.path.join(tmpdir, 'img3d.nii.gz') img_3d = image.index_img(nifti_file, 5) img_3d.to_filename(nifti_3d) @@ -24,16 +24,16 @@ def test_init_plot_custom_slices_wf(ds001419_data, tmp_path_factory): config.execution.output_dir = tmpdir wf = plotting.init_plot_custom_slices_wf( - desc="SubcorticalOnAtlas", - name="plot_custom_slices_wf", + desc='SubcorticalOnAtlas', + name='plot_custom_slices_wf', ) wf.inputs.inputnode.name_source = nifti_file wf.inputs.inputnode.overlay_file = nifti_3d - wf.inputs.inputnode.underlay_file = ds001419_data["t1w_mni"] + wf.inputs.inputnode.underlay_file = ds001419_data['t1w_mni'] wf.base_dir = tmpdir wf = clean_datasinks(wf) wf_res = wf.run() nodes = get_nodes(wf_res) - overlay_figure = nodes["plot_custom_slices_wf.ds_report_overlay"].get_output("out_file") + overlay_figure = nodes['plot_custom_slices_wf.ds_report_overlay'].get_output('out_file') assert os.path.isfile(overlay_figure) diff --git a/xcp_d/tests/tests.py b/xcp_d/tests/tests.py index d48af981a..2d3303b3f 100644 --- a/xcp_d/tests/tests.py +++ b/xcp_d/tests/tests.py @@ -21,6 +21,7 @@ # https://www.nipreps.org/community/licensing/ # """Utilities and mocks for testing and documentation building.""" + import os import shutil from contextlib import contextmanager @@ -38,20 +39,20 @@ def mock_config(): """Create a mock config for documentation and testing purposes.""" from xcp_d import config - _old_fs = os.getenv("FREESURFER_HOME") + _old_fs = os.getenv('FREESURFER_HOME') if not _old_fs: - os.environ["FREESURFER_HOME"] = mkdtemp() + os.environ['FREESURFER_HOME'] = mkdtemp() - filename = load_data("tests/config.toml").resolve() + filename = load_data('tests/config.toml').resolve() if not filename.exists(): base_path = os.path.dirname(filename) raise FileNotFoundError( - f"File not found: {filename}\nFiles in {base_path}:\n{os.listdir(base_path)}" + f'File not found: {filename}\nFiles in {base_path}:\n{os.listdir(base_path)}' ) settings = loads(filename.read_text()) for sectionname, configs in settings.items(): - if sectionname != "environment": + if sectionname != 'environment': section = getattr(config, sectionname) section.load(configs, init=False) @@ -72,4 +73,4 @@ def mock_config(): shutil.rmtree(config.execution.output_dir) if not _old_fs: - del os.environ["FREESURFER_HOME"] + del os.environ['FREESURFER_HOME'] diff --git a/xcp_d/tests/utils.py b/xcp_d/tests/utils.py index d1851e390..34c33bbbf 100644 --- a/xcp_d/tests/utils.py +++ b/xcp_d/tests/utils.py @@ -14,7 +14,7 @@ from bids.layout import BIDSLayout from nipype import logging -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def _check_arg_specified(argname, arglist): @@ -26,7 +26,7 @@ def _check_arg_specified(argname, arglist): def get_cpu_count(max_cpus=4): """Figure out how many cpus are available in the test environment.""" - env_cpus = os.getenv("CIRCLE_CPUS") + env_cpus = os.getenv('CIRCLE_CPUS') if env_cpus: return int(env_cpus) return max_cpus @@ -41,10 +41,10 @@ def update_resources(parameters): this variable doesn't work, just set it to 4. """ nthreads = get_cpu_count() - if not _check_arg_specified("--nthreads", parameters): - parameters.append(f"--nthreads={nthreads}") - if not _check_arg_specified("--omp-nthreads", parameters): - parameters.append(f"--omp-nthreads={nthreads}") + if not _check_arg_specified('--nthreads', parameters): + parameters.append(f'--nthreads={nthreads}') + if not _check_arg_specified('--omp-nthreads', parameters): + parameters.append(f'--omp-nthreads={nthreads}') return parameters @@ -56,21 +56,21 @@ def get_nodes(wf_results): def download_test_data(dset, data_dir=None): """Download test data.""" URLS = { - "fmriprepwithoutfreesurfer": ( - "https://upenn.box.com/shared/static/seyp1cu9w5v3ds6iink37hlsa217yge1.tar.gz" + 'fmriprepwithoutfreesurfer': ( + 'https://upenn.box.com/shared/static/seyp1cu9w5v3ds6iink37hlsa217yge1.tar.gz' ), - "nibabies": "https://upenn.box.com/shared/static/rsd7vpny5imv3qkd7kpuvdy9scpnfpe2.tar.gz", - "ds001419": "https://upenn.box.com/shared/static/yye7ljcdodj9gd6hm2r6yzach1o6xq1d.tar.gz", - "ds001419-aroma": ( - "https://upenn.box.com/shared/static/dexcmnlj7yujudr3muu05kch66sko4mt.tar.gz" + 'nibabies': 'https://upenn.box.com/shared/static/rsd7vpny5imv3qkd7kpuvdy9scpnfpe2.tar.gz', + 'ds001419': 'https://upenn.box.com/shared/static/yye7ljcdodj9gd6hm2r6yzach1o6xq1d.tar.gz', + 'ds001419-aroma': ( + 'https://upenn.box.com/shared/static/dexcmnlj7yujudr3muu05kch66sko4mt.tar.gz' ), - "pnc": "https://upenn.box.com/shared/static/ui2847ys49d82pgn5ewai1mowcmsv2br.tar.gz", - "ukbiobank": "https://upenn.box.com/shared/static/p5h1eg4p5cd2ef9ehhljlyh1uku0xe97.tar.gz", - "schaefer100": ( - "https://upenn.box.com/shared/static/b9pn9qebr41kteant4ym2q5u4kcbgiy6.tar.gz" + 'pnc': 'https://upenn.box.com/shared/static/ui2847ys49d82pgn5ewai1mowcmsv2br.tar.gz', + 'ukbiobank': 'https://upenn.box.com/shared/static/p5h1eg4p5cd2ef9ehhljlyh1uku0xe97.tar.gz', + 'schaefer100': ( + 'https://upenn.box.com/shared/static/b9pn9qebr41kteant4ym2q5u4kcbgiy6.tar.gz' ), } - if dset == "*": + if dset == '*': for k in URLS: download_test_data(k, data_dir=data_dir) @@ -80,29 +80,29 @@ def download_test_data(dset, data_dir=None): raise ValueError(f"dset ({dset}) must be one of: {', '.join(URLS.keys())}") if not data_dir: - data_dir = os.path.join(os.path.dirname(get_test_data_path()), "test_data") + data_dir = os.path.join(os.path.dirname(get_test_data_path()), 'test_data') out_dir = os.path.join(data_dir, dset) if os.path.isdir(out_dir): LOGGER.info( - f"Dataset {dset} already exists. " - "If you need to re-download the data, please delete the folder." + f'Dataset {dset} already exists. ' + 'If you need to re-download the data, please delete the folder.' ) - if dset.startswith("ds001419"): + if dset.startswith('ds001419'): # These test datasets have an extra folder level out_dir = os.path.join(out_dir, dset) return out_dir else: - LOGGER.info(f"Downloading {dset} to {out_dir}") + LOGGER.info(f'Downloading {dset} to {out_dir}') os.makedirs(out_dir, exist_ok=True) - with requests.get(URLS[dset], stream=True) as req: + with requests.get(URLS[dset], stream=True, timeout=10) as req: with tarfile.open(fileobj=GzipFile(fileobj=BytesIO(req.content))) as t: - t.extractall(out_dir) + t.extractall(out_dir) # noqa: S202 - if dset.startswith("ds001419"): + if dset.startswith('ds001419'): # These test datasets have an extra folder level out_dir = os.path.join(out_dir, dset) @@ -115,36 +115,36 @@ def get_test_data_path(): Test-related data are kept in tests folder in "data". Based on function by Yaroslav Halchenko used in Neurosynth Python package. """ - return os.path.abspath(os.path.join(os.path.dirname(__file__), "data") + os.path.sep) + return os.path.abspath(os.path.join(os.path.dirname(__file__), 'data') + os.path.sep) def check_generated_files(output_dir, output_list_file): """Compare files generated by xcp_d with a list of expected files.""" - found_files = sorted(glob(os.path.join(output_dir, "**/*"), recursive=True)) + found_files = sorted(glob(os.path.join(output_dir, '**/*'), recursive=True)) found_files = [os.path.relpath(f, output_dir) for f in found_files] # Ignore figures - found_files = [f for f in found_files if "figures" not in f] + found_files = [f for f in found_files if 'figures' not in f] # Ignore logs - found_files = [f for f in found_files if "log" not in f.split(os.path.sep)] + found_files = [f for f in found_files if 'log' not in f.split(os.path.sep)] - with open(output_list_file, "r") as fo: + with open(output_list_file) as fo: expected_files = fo.readlines() expected_files = [f.rstrip() for f in expected_files] if sorted(found_files) != sorted(expected_files): - expected_not_found = sorted(list(set(expected_files) - set(found_files))) - found_not_expected = sorted(list(set(found_files) - set(expected_files))) + expected_not_found = sorted(set(expected_files) - set(found_files)) + found_not_expected = sorted(set(found_files) - set(expected_files)) - msg = "" + msg = '' if expected_not_found: - msg += "\nExpected but not found:\n\t" - msg += "\n\t".join(expected_not_found) + msg += '\nExpected but not found:\n\t' + msg += '\n\t'.join(expected_not_found) if found_not_expected: - msg += "\nFound but not expected:\n\t" - msg += "\n\t".join(found_not_expected) + msg += '\nFound but not expected:\n\t' + msg += '\n\t'.join(found_not_expected) raise ValueError(msg) @@ -152,49 +152,49 @@ def check_affines(data_dir, out_dir, input_type): """Confirm affines don't change across XCP-D runs.""" preproc_layout = BIDSLayout(str(data_dir), validate=False) xcp_layout = BIDSLayout(str(out_dir), validate=False) - if input_type == "cifti": # Get the .dtseries.nii + if input_type == 'cifti': # Get the .dtseries.nii denoised_files = xcp_layout.get( - invalid_filters="allow", - datatype="func", - extension=".dtseries.nii", + invalid_filters='allow', + datatype='func', + extension='.dtseries.nii', ) - space = denoised_files[0].get_entities()["space"] + space = denoised_files[0].get_entities()['space'] preproc_files = preproc_layout.get( - invalid_filters="allow", - datatype="func", + invalid_filters='allow', + datatype='func', space=space, - extension=".dtseries.nii", + extension='.dtseries.nii', ) - elif input_type in ("nifti", "ukb"): # Get the .nii.gz + elif input_type in ('nifti', 'ukb'): # Get the .nii.gz # Problem: it's collecting native-space data denoised_files = xcp_layout.get( - datatype="func", - suffix="bold", - extension=".nii.gz", + datatype='func', + suffix='bold', + extension='.nii.gz', ) - space = denoised_files[0].get_entities()["space"] + space = denoised_files[0].get_entities()['space'] preproc_files = preproc_layout.get( - invalid_filters="allow", - datatype="func", + invalid_filters='allow', + datatype='func', space=space, - suffix="bold", - extension=".nii.gz", + suffix='bold', + extension='.nii.gz', ) else: # Nibabies denoised_files = xcp_layout.get( - datatype="func", - space="MNIInfant", - suffix="bold", - extension=".nii.gz", + datatype='func', + space='MNIInfant', + suffix='bold', + extension='.nii.gz', ) preproc_files = preproc_layout.get( - invalid_filters="allow", - datatype="func", - space="MNIInfant", - suffix="bold", - extension=".nii.gz", + invalid_filters='allow', + datatype='func', + space='MNIInfant', + suffix='bold', + extension='.nii.gz', ) preproc_file = preproc_files[0].path @@ -202,39 +202,43 @@ def check_affines(data_dir, out_dir, input_type): img1 = nb.load(preproc_file) img2 = nb.load(denoised_file) - if input_type == "cifti": + if input_type == 'cifti': assert img1._nifti_header.get_intent() == img2._nifti_header.get_intent() np.testing.assert_array_equal(img1.nifti_header.get_zooms(), img2.nifti_header.get_zooms()) else: np.testing.assert_array_equal(img1.affine, img2.affine) - if input_type != "ukb": + if input_type != 'ukb': # The UK Biobank test dataset has the wrong TR in the header. # I'll fix it at some point, but it's not the software's fault. np.testing.assert_array_equal(img1.header.get_zooms(), img2.header.get_zooms()) def run_command(command, env=None): - """Run a given shell command with certain environment variables set.""" + """Run a given shell command with certain environment variables set. + + Keep this out of the real XCP-D code so that devs don't need to install XCP-D to run tests. + """ merged_env = os.environ if env: merged_env.update(env) + process = subprocess.Popen( - command, + command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - shell=True, + shell=False, env=merged_env, ) while True: line = process.stdout.readline() - line = str(line, "utf-8")[:-1] + line = str(line, 'utf-8')[:-1] print(line) - if line == "" and process.poll() is not None: + if line == '' and process.poll() is not None: break if process.returncode != 0: - raise Exception( - f"Non zero return code: {process.returncode}\n" f"{command}\n\n{process.stdout.read()}" + raise RuntimeError( + f'Non zero return code: {process.returncode}\n' f'{command}\n\n{process.stdout.read()}' ) @@ -258,29 +262,29 @@ def reorder_expected_outputs(): This function is called manually by devs when they modify the test outputs. """ test_data_path = get_test_data_path() - expected_output_files = sorted(glob(os.path.join(test_data_path, "test_*_outputs.txt"))) + expected_output_files = sorted(glob(os.path.join(test_data_path, 'test_*_outputs.txt'))) for expected_output_file in expected_output_files: - LOGGER.info(f"Sorting {expected_output_file}") + LOGGER.info(f'Sorting {expected_output_file}') - with open(expected_output_file, "r") as fo: + with open(expected_output_file) as fo: file_contents = fo.readlines() - file_contents = sorted(list(set(file_contents))) + file_contents = sorted(set(file_contents)) - with open(expected_output_file, "w") as fo: + with open(expected_output_file, 'w') as fo: fo.writelines(file_contents) def list_files(startpath): """List files in a directory.""" - tree = "" + tree = '' for root, _, files in os.walk(startpath): - level = root.replace(startpath, "").count(os.sep) - indent = " " * 4 * (level) - tree += f"{indent}{os.path.basename(root)}/\n" - subindent = " " * 4 * (level + 1) + level = root.replace(startpath, '').count(os.sep) + indent = ' ' * 4 * (level) + tree += f'{indent}{os.path.basename(root)}/\n' + subindent = ' ' * 4 * (level + 1) for f in files: - tree += f"{subindent}{f}\n" + tree += f'{subindent}{f}\n' return tree diff --git a/xcp_d/utils/__init__.py b/xcp_d/utils/__init__.py index 9fbae2047..0450982b8 100644 --- a/xcp_d/utils/__init__.py +++ b/xcp_d/utils/__init__.py @@ -21,19 +21,19 @@ ) __all__ = [ - "atlas", - "bids", - "boilerplate", - "concatenation", - "confounds", - "doc", - "execsummary", - "filemanip", - "modified_data", - "plotting", - "qcmetrics", - "restingstate", - "sentry", - "utils", - "write_save", + 'atlas', + 'bids', + 'boilerplate', + 'concatenation', + 'confounds', + 'doc', + 'execsummary', + 'filemanip', + 'modified_data', + 'plotting', + 'qcmetrics', + 'restingstate', + 'sentry', + 'utils', + 'write_save', ] diff --git a/xcp_d/utils/atlas.py b/xcp_d/utils/atlas.py index 6eca875dd..2c003681f 100644 --- a/xcp_d/utils/atlas.py +++ b/xcp_d/utils/atlas.py @@ -2,7 +2,7 @@ from nipype import logging -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def select_atlases(atlases, subset): @@ -24,33 +24,33 @@ def select_atlases(atlases, subset): List of atlases. """ BUILTIN_ATLASES = { - "cortical": [ - "4S156Parcels", - "4S256Parcels", - "4S356Parcels", - "4S456Parcels", - "4S556Parcels", - "4S656Parcels", - "4S756Parcels", - "4S856Parcels", - "4S956Parcels", - "4S1056Parcels", - "Glasser", - "Gordon", - "MIDB", - "MyersLabonte", + 'cortical': [ + '4S156Parcels', + '4S256Parcels', + '4S356Parcels', + '4S456Parcels', + '4S556Parcels', + '4S656Parcels', + '4S756Parcels', + '4S856Parcels', + '4S956Parcels', + '4S1056Parcels', + 'Glasser', + 'Gordon', + 'MIDB', + 'MyersLabonte', ], - "subcortical": [ - "Tian", - "HCP", + 'subcortical': [ + 'Tian', + 'HCP', ], } - BUILTIN_ATLASES["all"] = sorted( - list(set(BUILTIN_ATLASES["cortical"] + BUILTIN_ATLASES["subcortical"])) + BUILTIN_ATLASES['all'] = sorted( + set(BUILTIN_ATLASES['cortical'] + BUILTIN_ATLASES['subcortical']) ) subset_atlases = BUILTIN_ATLASES[subset] if atlases: - external_atlases = [atlas for atlas in atlases if atlas not in BUILTIN_ATLASES["all"]] + external_atlases = [atlas for atlas in atlases if atlas not in BUILTIN_ATLASES['all']] selected_atlases = [atlas for atlas in atlases if atlas in subset_atlases] selected_atlases += external_atlases else: @@ -59,7 +59,7 @@ def select_atlases(atlases, subset): return selected_atlases -def collect_atlases(datasets, atlases, file_format, bids_filters={}): +def collect_atlases(datasets, atlases, file_format, bids_filters=None): """Collect atlases from a list of BIDS-Atlas datasets. Selection of labels files and metadata does not leverage the inheritance principle. @@ -99,21 +99,21 @@ def collect_atlases(datasets, atlases, file_format, bids_filters={}): from xcp_d.data import load as load_data - atlas_cfg = load_data("atlas_bids_config.json") + atlas_cfg = load_data('atlas_bids_config.json') bids_filters = bids_filters or {} - atlas_filter = bids_filters.get("atlas", {}) - atlas_filter["suffix"] = atlas_filter.get("suffix") or "dseg" # XCP-D only supports dsegs - atlas_filter["extension"] = [".nii.gz", ".nii"] if file_format == "nifti" else ".dlabel.nii" + atlas_filter = bids_filters.get('atlas', {}) + atlas_filter['suffix'] = atlas_filter.get('suffix') or 'dseg' # XCP-D only supports dsegs + atlas_filter['extension'] = ['.nii.gz', '.nii'] if file_format == 'nifti' else '.dlabel.nii' # Hardcoded spaces for now - if file_format == "cifti": - atlas_filter["space"] = atlas_filter.get("space") or "fsLR" - atlas_filter["den"] = atlas_filter.get("den") or ["32k", "91k"] + if file_format == 'cifti': + atlas_filter['space'] = atlas_filter.get('space') or 'fsLR' + atlas_filter['den'] = atlas_filter.get('den') or ['32k', '91k'] else: - atlas_filter["space"] = atlas_filter.get("space") or [ - "MNI152NLin6Asym", - "MNI152NLin2009cAsym", - "MNIInfant", + atlas_filter['space'] = atlas_filter.get('space') or [ + 'MNI152NLin6Asym', + 'MNI152NLin2009cAsym', + 'MNIInfant', ] atlas_cache = {} @@ -123,60 +123,60 @@ def collect_atlases(datasets, atlases, file_format, bids_filters={}): else: layout = dataset_path - if layout.get_dataset_description().get("DatasetType") != "atlas": + if layout.get_dataset_description().get('DatasetType') != 'atlas': continue for atlas in atlases: atlas_images = layout.get( atlas=atlas, **atlas_filter, - return_type="file", + return_type='file', ) if not atlas_images: continue elif len(atlas_images) > 1: - bulleted_list = "\n".join([f" - {img}" for img in atlas_images]) + bulleted_list = '\n'.join([f' - {img}' for img in atlas_images]) LOGGER.warning( - f"Multiple atlas images found for {atlas} with query {atlas_filter}:\n" - f"{bulleted_list}\nUsing {atlas_images[0]}." + f'Multiple atlas images found for {atlas} with query {atlas_filter}:\n' + f'{bulleted_list}\nUsing {atlas_images[0]}.' ) if atlas in atlas_cache: raise ValueError(f"Multiple datasets contain the same atlas '{atlas}'") atlas_image = atlas_images[0] - atlas_labels = layout.get_nearest(atlas_image, extension=".tsv", strict=False) - atlas_metadata_file = layout.get_nearest(atlas_image, extension=".json", strict=True) + atlas_labels = layout.get_nearest(atlas_image, extension='.tsv', strict=False) + atlas_metadata_file = layout.get_nearest(atlas_image, extension='.json', strict=True) if not atlas_labels: - raise FileNotFoundError(f"No TSV file found for {atlas_image}") + raise FileNotFoundError(f'No TSV file found for {atlas_image}') atlas_metadata = None if atlas_metadata_file: - with open(atlas_metadata_file, "r") as fo: + with open(atlas_metadata_file) as fo: atlas_metadata = json.load(fo) atlas_cache[atlas] = { - "dataset": dataset_name, - "image": atlas_image, - "labels": atlas_labels, - "metadata": atlas_metadata, + 'dataset': dataset_name, + 'image': atlas_image, + 'labels': atlas_labels, + 'metadata': atlas_metadata, } for atlas in atlases: if atlas not in atlas_cache: - LOGGER.warning(f"No atlas images found for {atlas} with query {atlas_filter}") + LOGGER.warning(f'No atlas images found for {atlas} with query {atlas_filter}') - for atlas, atlas_info in atlas_cache.items(): - if not atlas_info["labels"]: + for _atlas, atlas_info in atlas_cache.items(): + if not atlas_info['labels']: raise FileNotFoundError(f"No TSV file found for {atlas_info['image']}") # Check the contents of the labels file - df = pd.read_table(atlas_info["labels"]) - if "label" not in df.columns: + df = pd.read_table(atlas_info['labels']) + if 'label' not in df.columns: raise ValueError(f"'label' column not found in {atlas_info['labels']}") - if "index" not in df.columns: + if 'index' not in df.columns: raise ValueError(f"'index' column not found in {atlas_info['labels']}") return atlas_cache diff --git a/xcp_d/utils/bids.py b/xcp_d/utils/bids.py index 3be537abb..258cea1cf 100644 --- a/xcp_d/utils/bids.py +++ b/xcp_d/utils/bids.py @@ -23,30 +23,30 @@ from xcp_d.utils.doc import fill_doc from xcp_d.utils.filemanip import ensure_list -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') # TODO: Add and test fsaverage. DEFAULT_ALLOWED_SPACES = { - "cifti": ["fsLR"], - "nifti": [ - "MNI152NLin6Asym", - "MNI152NLin2009cAsym", - "MNIInfant", + 'cifti': ['fsLR'], + 'nifti': [ + 'MNI152NLin6Asym', + 'MNI152NLin2009cAsym', + 'MNIInfant', ], } INPUT_TYPE_ALLOWED_SPACES = { - "nibabies": { - "cifti": ["fsLR"], - "nifti": [ - "MNI152NLin6Asym", - "MNIInfant", - "MNI152NLin2009cAsym", + 'nibabies': { + 'cifti': ['fsLR'], + 'nifti': [ + 'MNI152NLin6Asym', + 'MNIInfant', + 'MNI152NLin2009cAsym', ], }, } # The volumetric NIFTI template associated with each supported CIFTI template. ASSOCIATED_TEMPLATES = { - "fsLR": "MNI152NLin6Asym", + 'fsLR': 'MNI152NLin6Asym', } @@ -71,7 +71,7 @@ def __init__(self, message, bids_root): f"\n{header}\n{''.join([' '] * (indent + 1))}{message}\n" f"{''.join(['-'] * len(header))}" ) - super(BIDSError, self).__init__(self.msg) + super().__init__(self.msg) self.bids_root = bids_root @@ -112,8 +112,8 @@ def collect_participants(layout, participant_label=None, strict=False): # Error: bids_dir does not contain subjects if not all_participants: raise BIDSError( - "Could not find participants. Please make sure the BIDS derivatives " - "are accessible to Docker/ are in BIDS directory structure.", + 'Could not find participants. Please make sure the BIDS derivatives ' + 'are accessible to Docker/ are in BIDS directory structure.', layout, ) @@ -125,7 +125,7 @@ def collect_participants(layout, participant_label=None, strict=False): participant_label = [participant_label] # Drop sub- prefixes - participant_label = [sub[4:] if sub.startswith("sub-") else sub for sub in participant_label] + participant_label = [sub[4:] if sub.startswith('sub-') else sub for sub in participant_label] # Remove duplicates participant_label = sorted(set(participant_label)) # Remove labels not found @@ -143,7 +143,7 @@ def collect_participants(layout, participant_label=None, strict=False): ) if strict: raise exc - warnings.warn(exc.msg, BIDSWarning) + warnings.warn(exc.msg, BIDSWarning, stacklevel=2) return found_label @@ -171,15 +171,15 @@ def collect_data( %(layout)s subj_data : dict """ - _spec = yaml.safe_load(load_data.readable("io_spec.yaml").read_text()) - queries = _spec["queries"]["base"] - if input_type in ("hcp", "dcan", "ukb"): + _spec = yaml.safe_load(load_data.readable('io_spec.yaml').read_text()) + queries = _spec['queries']['base'] + if input_type in ('hcp', 'dcan', 'ukb'): # HCP/DCAN data have anats only in standard space - queries["t1w"]["space"] = "MNI152NLin6Asym" - queries["t2w"]["space"] = "MNI152NLin6Asym" - queries["anat_brainmask"]["space"] = "MNI152NLin6Asym" + queries['t1w']['space'] = 'MNI152NLin6Asym' + queries['t2w']['space'] = 'MNI152NLin6Asym' + queries['anat_brainmask']['space'] = 'MNI152NLin6Asym' - queries["bold"]["extension"] = ".dtseries.nii" if (file_format == "cifti") else ".nii.gz" + queries['bold']['extension'] = '.dtseries.nii' if (file_format == 'cifti') else '.nii.gz' # Apply filters. These may override anything. bids_filters = bids_filters or {} @@ -188,10 +188,10 @@ def collect_data( queries[acq].update(bids_filters[acq]) # Select the best available space. - if "space" in queries["bold"]: + if 'space' in queries['bold']: # Hopefully no one puts in multiple spaces here, # but we'll grab the first one with available data if they did. - allowed_spaces = ensure_list(queries["bold"]["space"]) + allowed_spaces = ensure_list(queries['bold']['space']) else: allowed_spaces = INPUT_TYPE_ALLOWED_SPACES.get( input_type, @@ -199,15 +199,15 @@ def collect_data( )[file_format] for space in allowed_spaces: - queries["bold"]["space"] = space - bold_data = layout.get(**queries["bold"]) + queries['bold']['space'] = space + bold_data = layout.get(**queries['bold']) if bold_data: # will leave the best available space in the query break if not bold_data: - filenames = "\n\t".join( - [f.path for f in layout.get(extension=[".nii.gz", ".dtseries.nii"])] + filenames = '\n\t'.join( + [f.path for f in layout.get(extension=['.nii.gz', '.dtseries.nii'])] ) raise FileNotFoundError( f"No BOLD data found in allowed spaces ({', '.join(allowed_spaces)}).\n\n" @@ -215,24 +215,24 @@ def collect_data( f"Found files:\n\n{filenames}" ) - if file_format == "cifti": + if file_format == 'cifti': # Select the appropriate volumetric space for the CIFTI template. # This space will be used in the executive summary and T1w/T2w workflows. allowed_spaces = INPUT_TYPE_ALLOWED_SPACES.get( input_type, DEFAULT_ALLOWED_SPACES, - )["nifti"] + )['nifti'] - temp_bold_query = queries["bold"].copy() - temp_bold_query.pop("den", None) - temp_bold_query["extension"] = ".nii.gz" + temp_bold_query = queries['bold'].copy() + temp_bold_query.pop('den', None) + temp_bold_query['extension'] = '.nii.gz' - temp_xfm_query = queries["anat_to_template_xfm"].copy() + temp_xfm_query = queries['anat_to_template_xfm'].copy() for volspace in allowed_spaces: - temp_bold_query["space"] = volspace + temp_bold_query['space'] = volspace bold_data = layout.get(**temp_bold_query) - temp_xfm_query["to"] = volspace + temp_xfm_query['to'] = volspace transform_files = layout.get(**temp_xfm_query) if bold_data and transform_files: @@ -241,86 +241,86 @@ def collect_data( if not bold_data or not transform_files: raise FileNotFoundError( - f"No BOLD NIfTI or transforms found to allowed space ({volspace})" + f'No BOLD NIfTI or transforms found to allowed space ({volspace})' ) - queries["anat_to_template_xfm"]["to"] = volspace - queries["template_to_anat_xfm"]["from"] = volspace - queries["anat_brainmask"]["space"] = volspace + queries['anat_to_template_xfm']['to'] = volspace + queries['template_to_anat_xfm']['from'] = volspace + queries['anat_brainmask']['space'] = volspace else: # use the BOLD file's space if the BOLD file is a nifti. - queries["anat_to_template_xfm"]["to"] = queries["bold"]["space"] - queries["template_to_anat_xfm"]["from"] = queries["bold"]["space"] - queries["anat_brainmask"]["space"] = queries["bold"]["space"] + queries['anat_to_template_xfm']['to'] = queries['bold']['space'] + queries['template_to_anat_xfm']['from'] = queries['bold']['space'] + queries['anat_brainmask']['space'] = queries['bold']['space'] # Grab the first (and presumably best) density and resolution if there are multiple. # This probably works well for resolution (1 typically means 1x1x1, # 2 typically means 2x2x2, etc.), but probably doesn't work well for density. - resolutions = layout.get_res(**queries["bold"]) + resolutions = layout.get_res(**queries['bold']) if len(resolutions) >= 1: # This will also select res-* when there are both res-* and native-resolution files. - queries["bold"]["res"] = resolutions[0] + queries['bold']['res'] = resolutions[0] - densities = layout.get_den(**queries["bold"]) + densities = layout.get_den(**queries['bold']) if len(densities) >= 1: - queries["bold"]["den"] = densities[0] + queries['bold']['den'] = densities[0] # Check for anatomical images, and determine if T2w xfms must be used. - t1w_files = layout.get(return_type="file", subject=participant_label, **queries["t1w"]) - t2w_files = layout.get(return_type="file", subject=participant_label, **queries["t2w"]) + t1w_files = layout.get(return_type='file', subject=participant_label, **queries['t1w']) + t2w_files = layout.get(return_type='file', subject=participant_label, **queries['t2w']) if not t1w_files and not t2w_files: - raise FileNotFoundError("No T1w or T2w files found.") + raise FileNotFoundError('No T1w or T2w files found.') elif t1w_files and t2w_files: - LOGGER.warning("Both T1w and T2w found. Checking for T1w-space T2w.") - temp_query = queries["t2w"].copy() - temp_query["space"] = "T1w" - temp_t2w_files = layout.get(return_type="file", subject=participant_label, **temp_query) + LOGGER.warning('Both T1w and T2w found. Checking for T1w-space T2w.') + temp_query = queries['t2w'].copy() + temp_query['space'] = 'T1w' + temp_t2w_files = layout.get(return_type='file', subject=participant_label, **temp_query) if not temp_t2w_files: - LOGGER.warning("No T1w-space T2w found. Checking for T2w-space T1w.") - temp_query = queries["t1w"].copy() - temp_query["space"] = "T2w" + LOGGER.warning('No T1w-space T2w found. Checking for T2w-space T1w.') + temp_query = queries['t1w'].copy() + temp_query['space'] = 'T2w' temp_t1w_files = layout.get( - return_type="file", + return_type='file', subject=participant_label, **temp_query, ) - queries["t1w"]["space"] = "T2w" + queries['t1w']['space'] = 'T2w' if not temp_t1w_files: - LOGGER.warning("No T2w-space T1w found. Attempting T2w-only processing.") - temp_query = queries["anat_to_template_xfm"].copy() - temp_query["from"] = "T2w" + LOGGER.warning('No T2w-space T1w found. Attempting T2w-only processing.') + temp_query = queries['anat_to_template_xfm'].copy() + temp_query['from'] = 'T2w' temp_xfm_files = layout.get( - return_type="file", + return_type='file', subject=participant_label, **temp_query, ) if not temp_xfm_files: LOGGER.warning( - "T2w-to-template transform not found. Attempting T1w-only processing." + 'T2w-to-template transform not found. Attempting T1w-only processing.' ) - queries["t1w"]["space"] = ["T1w", None] - queries["template_to_anat_xfm"]["to"] = "T1w" - queries["anat_to_template_xfm"]["from"] = "T1w" + queries['t1w']['space'] = ['T1w', None] + queries['template_to_anat_xfm']['to'] = 'T1w' + queries['anat_to_template_xfm']['from'] = 'T1w' else: - LOGGER.info("Performing T2w-only processing.") - queries["template_to_anat_xfm"]["to"] = "T2w" - queries["anat_to_template_xfm"]["from"] = "T2w" + LOGGER.info('Performing T2w-only processing.') + queries['template_to_anat_xfm']['to'] = 'T2w' + queries['anat_to_template_xfm']['from'] = 'T2w' else: - LOGGER.warning("T2w-space T1w found. Processing anatomical images in T2w space.") + LOGGER.warning('T2w-space T1w found. Processing anatomical images in T2w space.') else: - LOGGER.warning("T1w-space T2w found. Processing anatomical images in T1w space.") - queries["t2w"]["space"] = "T1w" - queries["t1w"]["space"] = ["T1w", None] + LOGGER.warning('T1w-space T2w found. Processing anatomical images in T1w space.') + queries['t2w']['space'] = 'T1w' + queries['t1w']['space'] = ['T1w', None] elif t2w_files and not t1w_files: - LOGGER.warning("T2w found, but no T1w. Enabling T2w-only processing.") - queries["template_to_anat_xfm"]["to"] = "T2w" - queries["anat_to_template_xfm"]["from"] = "T2w" + LOGGER.warning('T2w found, but no T1w. Enabling T2w-only processing.') + queries['template_to_anat_xfm']['to'] = 'T2w' + queries['anat_to_template_xfm']['from'] = 'T2w' # Search for the files. subj_data = { dtype: sorted( layout.get( - return_type="file", + return_type='file', subject=participant_label, **query, ) @@ -331,14 +331,14 @@ def collect_data( # Check the query results. for field, filenames in subj_data.items(): # All fields except the BOLD data should have a single file - if field != "bold" and isinstance(filenames, list): - if field not in ("t1w", "t2w") and not filenames: - raise FileNotFoundError(f"No {field} found with query: {queries[field]}") + if field != 'bold' and isinstance(filenames, list): + if field not in ('t1w', 't2w') and not filenames: + raise FileNotFoundError(f'No {field} found with query: {queries[field]}') if len(filenames) == 1: subj_data[field] = filenames[0] elif len(filenames) > 1: - filenames_str = "\n\t".join(filenames) + filenames_str = '\n\t'.join(filenames) LOGGER.warning(f"Multiple files found for query '{field}':\n\t{filenames_str}") subj_data[field] = filenames[0] else: @@ -346,7 +346,7 @@ def collect_data( LOGGER.log( 25, - f"Collected data:\n{yaml.dump(subj_data, default_flow_style=False, indent=4)}", + f'Collected data:\n{yaml.dump(subj_data, default_flow_style=False, indent=4)}', ) return subj_data @@ -381,8 +381,8 @@ def collect_mesh_data(layout, participant_label, bids_filters): # Surfaces to use for brainsprite and anatomical workflow # The base surfaces can be used to generate the derived surfaces. # The base surfaces may be in native or standard space. - _spec = yaml.safe_load(load_data.readable("io_spec.yaml").read_text()) - queries = _spec["queries"]["mesh"] + _spec = yaml.safe_load(load_data.readable('io_spec.yaml').read_text()) + queries = _spec['queries']['mesh'] # Apply filters. These may override anything. bids_filters = bids_filters or {} @@ -395,43 +395,43 @@ def collect_mesh_data(layout, participant_label, bids_filters): standard_space_mesh = True for name, query in queries.items(): # Don't look for fsLR-space versions of the subject spheres. - if "subject_sphere" in name: + if 'subject_sphere' in name: continue temp_files = layout.get( - return_type="file", + return_type='file', subject=participant_label, - space="fsLR", - den="32k", + space='fsLR', + den='32k', **query, ) if len(temp_files) == 0: standard_space_mesh = False elif len(temp_files) > 1: - LOGGER.warning(f"{name}: More than one standard-space surface found.") + LOGGER.warning(f'{name}: More than one standard-space surface found.') if not standard_space_mesh: - LOGGER.info("No standard-space surfaces found.") + LOGGER.info('No standard-space surfaces found.') # Now that we know if there are standard-space surfaces available, we can grab the files. - query_extras = {"space": None} + query_extras = {'space': None} if standard_space_mesh: query_extras = { - "space": "fsLR", - "den": "32k", + 'space': 'fsLR', + 'den': '32k', } initial_mesh_files = {} for name, query in queries.items(): queries[name] = { - "subject": participant_label, + 'subject': participant_label, **query, } - if "subject_sphere" not in name: + if 'subject_sphere' not in name: queries[name].update(query_extras) - initial_mesh_files[name] = layout.get(return_type="file", **queries[name]) + initial_mesh_files[name] = layout.get(return_type='file', **queries[name]) mesh_files = {} mesh_available = True @@ -442,38 +442,38 @@ def collect_mesh_data(layout, participant_label, bids_filters): elif len(surface_files_) == 0: mesh_files[dtype] = None # We don't need subject spheres if we have standard-space meshes already - if not ("subject_sphere" in dtype and standard_space_mesh): + if not ('subject_sphere' in dtype and standard_space_mesh): mesh_available = False else: mesh_available = False - surface_str = "\n\t".join(surface_files_) + surface_str = '\n\t'.join(surface_files_) raise ValueError( - "More than one surface found.\n" - f"Surfaces found:\n\t{surface_str}\n" - f"Query: {queries[dtype]}" + 'More than one surface found.\n' + f'Surfaces found:\n\t{surface_str}\n' + f'Query: {queries[dtype]}' ) # Check for *_space-dhcpAsym_desc-reg_sphere.surf.gii # If we find it, we assume segmentation was done with MCRIBS. Otherwise, assume FreeSurfer. dhcp_file = layout.get( - return_type="file", - datatype="anat", + return_type='file', + datatype='anat', subject=participant_label, - hemi="L", - space="dhcpAsym", - desc="reg", - suffix="sphere", - extension=".surf.gii", + hemi='L', + space='dhcpAsym', + desc='reg', + suffix='sphere', + extension='.surf.gii', ) - software = "MCRIBS" if bool(len(dhcp_file)) else "FreeSurfer" + software = 'MCRIBS' if bool(len(dhcp_file)) else 'FreeSurfer' LOGGER.log( 25, - f"Collected mesh files:\n{yaml.dump(mesh_files, default_flow_style=False, indent=4)}", + f'Collected mesh files:\n{yaml.dump(mesh_files, default_flow_style=False, indent=4)}', ) if mesh_available: - LOGGER.log(25, f"Assuming segmentation was performed with {software}.") + LOGGER.log(25, f'Assuming segmentation was performed with {software}.') return mesh_available, standard_space_mesh, software, mesh_files @@ -500,8 +500,8 @@ def collect_morphometry_data(layout, participant_label, bids_filters): Dictionary of surface file identifiers and their paths. If the surface files weren't found, then the paths will be Nones. """ - _spec = yaml.safe_load(load_data.readable("io_spec.yaml").read_text()) - queries = _spec["queries"]["morphometry"] + _spec = yaml.safe_load(load_data.readable('io_spec.yaml').read_text()) + queries = _spec['queries']['morphometry'] # Apply filters. These may override anything. bids_filters = bids_filters or {} @@ -512,18 +512,18 @@ def collect_morphometry_data(layout, participant_label, bids_filters): morphometry_files = {} for name, query in queries.items(): files = layout.get( - return_type="file", + return_type='file', subject=participant_label, **query, ) if len(files) == 1: morphometry_files[name] = files[0] elif len(files) > 1: - surface_str = "\n\t".join(files) + surface_str = '\n\t'.join(files) raise ValueError( - f"More than one {name} found.\n" - f"Surfaces found:\n\t{surface_str}\n" - f"Query: {query}" + f'More than one {name} found.\n' + f'Surfaces found:\n\t{surface_str}\n' + f'Query: {query}' ) else: morphometry_files[name] = None @@ -534,8 +534,8 @@ def collect_morphometry_data(layout, participant_label, bids_filters): LOGGER.log( 25, ( - f"Collected morphometry files:\n" - f"{yaml.dump(morphometry_files, default_flow_style=False, indent=4)}" + f'Collected morphometry files:\n' + f'{yaml.dump(morphometry_files, default_flow_style=False, indent=4)}' ), ) @@ -564,97 +564,97 @@ def collect_run_data(layout, bold_file, file_format, target_space): bids_file = layout.get_file(bold_file) run_data, metadata = {}, {} - run_data["motion_file"] = layout.get_nearest( + run_data['motion_file'] = layout.get_nearest( bids_file.path, strict=True, - ignore_strict_entities=["space", "res", "den", "desc", "suffix", "extension"], - desc="confounds", - suffix="timeseries", - extension=".tsv", + ignore_strict_entities=['space', 'res', 'den', 'desc', 'suffix', 'extension'], + desc='confounds', + suffix='timeseries', + extension='.tsv', ) - if not run_data["motion_file"]: - raise FileNotFoundError(f"No confounds file detected for {bids_file.path}") + if not run_data['motion_file']: + raise FileNotFoundError(f'No confounds file detected for {bids_file.path}') - run_data["motion_json"] = layout.get_nearest(run_data["motion_file"], extension=".json") + run_data['motion_json'] = layout.get_nearest(run_data['motion_file'], extension='.json') - metadata["bold_metadata"] = layout.get_metadata(bold_file) + metadata['bold_metadata'] = layout.get_metadata(bold_file) # Ensure that we know the TR - if "RepetitionTime" not in metadata["bold_metadata"].keys(): - metadata["bold_metadata"]["RepetitionTime"] = _get_tr(bold_file) + if 'RepetitionTime' not in metadata['bold_metadata'].keys(): + metadata['bold_metadata']['RepetitionTime'] = _get_tr(bold_file) - if file_format == "nifti": - run_data["boldref"] = layout.get_nearest( + if file_format == 'nifti': + run_data['boldref'] = layout.get_nearest( bids_file.path, strict=True, - ignore_strict_entities=["desc", "suffix"], - suffix="boldref", - extension=[".nii", ".nii.gz"], + ignore_strict_entities=['desc', 'suffix'], + suffix='boldref', + extension=['.nii', '.nii.gz'], ) - run_data["boldmask"] = layout.get_nearest( + run_data['boldmask'] = layout.get_nearest( bids_file.path, strict=True, - ignore_strict_entities=["desc", "suffix"], - desc="brain", - suffix="mask", - extension=[".nii", ".nii.gz"], + ignore_strict_entities=['desc', 'suffix'], + desc='brain', + suffix='mask', + extension=['.nii', '.nii.gz'], ) else: # Split cohort out of the space for MNIInfant templates. cohort = None - if "+" in target_space: - target_space, cohort = target_space.split("+") + if '+' in target_space: + target_space, cohort = target_space.split('+') - run_data["boldref"] = layout.get_nearest( + run_data['boldref'] = layout.get_nearest( bids_file.path, strict=True, ignore_strict_entities=[ - "cohort", - "space", - "res", - "den", - "desc", - "suffix", - "extension", + 'cohort', + 'space', + 'res', + 'den', + 'desc', + 'suffix', + 'extension', ], space=target_space, cohort=cohort, - suffix="boldref", - extension=[".nii", ".nii.gz"], - invalid_filters="allow", + suffix='boldref', + extension=['.nii', '.nii.gz'], + invalid_filters='allow', ) - run_data["nifti_file"] = layout.get_nearest( + run_data['nifti_file'] = layout.get_nearest( bids_file.path, strict=True, ignore_strict_entities=[ - "cohort", - "space", - "res", - "den", - "desc", - "suffix", - "extension", + 'cohort', + 'space', + 'res', + 'den', + 'desc', + 'suffix', + 'extension', ], space=target_space, cohort=cohort, - desc="preproc", - suffix="bold", - extension=[".nii", ".nii.gz"], - invalid_filters="allow", + desc='preproc', + suffix='bold', + extension=['.nii', '.nii.gz'], + invalid_filters='allow', ) LOGGER.log( 25, ( - f"Collected run data for {os.path.basename(bold_file)}:\n" - f"{yaml.dump(run_data, default_flow_style=False, indent=4)}" + f'Collected run data for {os.path.basename(bold_file)}:\n' + f'{yaml.dump(run_data, default_flow_style=False, indent=4)}' ), ) for k, v in run_data.items(): if v is None: - raise FileNotFoundError(f"No {k} file found for {bids_file.path}") + raise FileNotFoundError(f'No {k} file found for {bids_file.path}') - metadata[f"{k}_metadata"] = layout.get_metadata(v) + metadata[f'{k}_metadata'] = layout.get_metadata(v) run_data.update(metadata) @@ -676,12 +676,12 @@ def collect_confounds( # Recommended after PyBIDS 12.1 ignore_patterns = [ - "code", - "stimuli", - "models", - re.compile(r"\/\.\w+|^\.\w+"), # hidden files + 'code', + 'stimuli', + 'models', + re.compile(r'\/\.\w+|^\.\w+'), # hidden files re.compile( - r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(anat|beh|dwi|eeg|ieeg|meg|perf|pet|physio)" + r'sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(anat|beh|dwi|eeg|ieeg|meg|perf|pet|physio)' ), ] _indexer = BIDSLayoutIndexer( @@ -689,61 +689,61 @@ def collect_confounds( ignore=ignore_patterns, index_metadata=False, # we don't need metadata to find confound files ) - xcp_d_config = str(load_data("xcp_d_bids_config2.json")) + xcp_d_config = str(load_data('xcp_d_bids_config2.json')) # Step 0: Determine derivatives we care about for confounds. req_datasets = [] - for confound_def in confound_spec["confounds"].values(): - req_datasets.append(confound_def["dataset"]) + for confound_def in confound_spec['confounds'].values(): + req_datasets.append(confound_def['dataset']) - req_datasets = sorted(list(set(req_datasets))) + req_datasets = sorted(set(req_datasets)) # Step 1: Build a dictionary of dataset: layout pairs. layout_dict = {} - layout_dict["preprocessed"] = preproc_dataset + layout_dict['preprocessed'] = preproc_dataset if derivatives_datasets is not None: for k, v in derivatives_datasets.items(): # Don't index datasets we don't need for confounds. if k not in req_datasets: - print(f"Not required: {k}") + print(f'Not required: {k}') continue - if isinstance(v, (Path, str)): + if isinstance(v, Path | str): layout = BIDSLayout( v, - config=["bids", "derivatives", xcp_d_config], + config=['bids', 'derivatives', xcp_d_config], indexer=_indexer, ) - if layout.get_dataset_description().get("DatasetType") != "derivatives": - print(f"Dataset {k} is not a derivatives dataset. Skipping.") + if layout.get_dataset_description().get('DatasetType') != 'derivatives': + print(f'Dataset {k} is not a derivatives dataset. Skipping.') layout_dict[k] = layout else: layout_dict[k] = v # Step 2: Loop over the confounds spec and search for each file in the corresponding dataset. - confounds = dict() - for confound_name, confound_def in confound_spec["confounds"].items(): - if confound_def["dataset"] not in layout_dict.keys(): + confounds = {} + for confound_name, confound_def in confound_spec['confounds'].items(): + if confound_def['dataset'] not in layout_dict.keys(): raise ValueError( f"Missing dataset required by confound spec: *{confound_def['dataset']}*. " "Did you provide it with the `--datasets` flag?" ) - layout = layout_dict[confound_def["dataset"]] + layout = layout_dict[confound_def['dataset']] bold_file_entities = bold_file.get_entities() - query = {**bold_file_entities, **confound_def["query"]} + query = {**bold_file_entities, **confound_def['query']} confound_file = layout.get(**query) if not confound_file: raise FileNotFoundError( - f"Could not find confound file for {confound_name} with query {query}" + f'Could not find confound file for {confound_name} with query {query}' ) confound_file = confound_file[0] confound_metadata = confound_file.get_metadata() confounds[confound_name] = {} - confounds[confound_name]["file"] = confound_file.path - confounds[confound_name]["metadata"] = confound_metadata + confounds[confound_name]['file'] = confound_file.path + confounds[confound_name]['metadata'] = confound_metadata return confounds @@ -752,7 +752,7 @@ def write_derivative_description( fmri_dir, output_dir, atlases=None, - dataset_links={}, + dataset_links=None, ): """Write dataset_description.json file for derivatives. @@ -772,60 +772,62 @@ def write_derivative_description( from xcp_d.__about__ import DOWNLOAD_URL, __version__ - orig_dset_description = os.path.join(fmri_dir, "dataset_description.json") + dataset_links = dataset_links or {} + + orig_dset_description = os.path.join(fmri_dir, 'dataset_description.json') if not os.path.isfile(orig_dset_description): - raise FileNotFoundError(f"Dataset description DNE: {orig_dset_description}") + raise FileNotFoundError(f'Dataset description DNE: {orig_dset_description}') # Base the new dataset description on the preprocessing pipeline's dataset description - with open(orig_dset_description, "r") as fo: + with open(orig_dset_description) as fo: desc = json.load(fo) # Check if the dataset type is derivative - if "DatasetType" not in desc.keys(): + if 'DatasetType' not in desc.keys(): LOGGER.warning(f"DatasetType key not in {orig_dset_description}. Assuming 'derivative'.") - desc["DatasetType"] = "derivative" + desc['DatasetType'] = 'derivative' - if desc.get("DatasetType", "derivative") != "derivative": + if desc.get('DatasetType', 'derivative') != 'derivative': raise ValueError( f"DatasetType key in {orig_dset_description} is not 'derivative'. " - "XCP-D only works on derivative datasets." + 'XCP-D only works on derivative datasets.' ) # Update dataset description # TODO: Add derivatives' dataset_description.json info as well. Especially the GeneratedBy. - desc["Name"] = "XCP-D: A Robust Postprocessing Pipeline of fMRI data" - generated_by = desc.get("GeneratedBy", []) + desc['Name'] = 'XCP-D: A Robust Postprocessing Pipeline of fMRI data' + generated_by = desc.get('GeneratedBy', []) generated_by.insert( 0, { - "Name": "xcp_d", - "Version": __version__, - "CodeURL": DOWNLOAD_URL, + 'Name': 'xcp_d', + 'Version': __version__, + 'CodeURL': DOWNLOAD_URL, }, ) - desc["GeneratedBy"] = generated_by - desc["HowToAcknowledge"] = "Include the generated boilerplate in the methods section." + desc['GeneratedBy'] = generated_by + desc['HowToAcknowledge'] = 'Include the generated boilerplate in the methods section.' dataset_links = dataset_links.copy() # Replace local templateflow path with URL - dataset_links["templateflow"] = "https://github.com/templateflow/templateflow" + dataset_links['templateflow'] = 'https://github.com/templateflow/templateflow' if atlases: - dataset_links["atlases"] = os.path.join(output_dir, "atlases") + dataset_links['atlases'] = os.path.join(output_dir, 'atlases') # Don't inherit DatasetLinks from preprocessing derivatives - desc["DatasetLinks"] = {k: str(v) for k, v in dataset_links.items()} + desc['DatasetLinks'] = {k: str(v) for k, v in dataset_links.items()} - xcpd_dset_description = Path(output_dir / "dataset_description.json") + xcpd_dset_description = Path(output_dir / 'dataset_description.json') if xcpd_dset_description.is_file(): old_desc = json.loads(xcpd_dset_description.read_text()) - old_version = old_desc["GeneratedBy"][0]["Version"] + old_version = old_desc['GeneratedBy'][0]['Version'] if Version(__version__).public != Version(old_version).public: - LOGGER.warning(f"Previous output generated by version {old_version} found.") + LOGGER.warning(f'Previous output generated by version {old_version} found.') - for k, v in desc["DatasetLinks"].items(): - if k in old_desc["DatasetLinks"].keys() and old_desc["DatasetLinks"][k] != str(v): + for k, v in desc['DatasetLinks'].items(): + if k in old_desc['DatasetLinks'].keys() and old_desc['DatasetLinks'][k] != str(v): LOGGER.warning( f"DatasetLink '{k}' does not match ({v} != {old_desc['DatasetLinks'][k]})." ) @@ -847,30 +849,30 @@ def write_atlas_dataset_description(atlas_dir): from xcp_d.__about__ import DOWNLOAD_URL, __version__ desc = { - "Name": "XCP-D Atlases", - "DatasetType": "atlas", - "GeneratedBy": [ + 'Name': 'XCP-D Atlases', + 'DatasetType': 'atlas', + 'GeneratedBy': [ { - "Name": "xcp_d", - "Version": __version__, - "CodeURL": DOWNLOAD_URL, + 'Name': 'xcp_d', + 'Version': __version__, + 'CodeURL': DOWNLOAD_URL, }, ], - "HowToAcknowledge": "Include the generated boilerplate in the methods section.", + 'HowToAcknowledge': 'Include the generated boilerplate in the methods section.', } os.makedirs(atlas_dir, exist_ok=True) - atlas_dset_description = os.path.join(atlas_dir, "dataset_description.json") + atlas_dset_description = os.path.join(atlas_dir, 'dataset_description.json') if os.path.isfile(atlas_dset_description): - with open(atlas_dset_description, "r") as fo: + with open(atlas_dset_description) as fo: old_desc = json.load(fo) - old_version = old_desc["GeneratedBy"][0]["Version"] + old_version = old_desc['GeneratedBy'][0]['Version'] if Version(__version__).public != Version(old_version).public: - LOGGER.warning(f"Previous output generated by version {old_version} found.") + LOGGER.warning(f'Previous output generated by version {old_version} found.') else: - with open(atlas_dset_description, "w") as fo: + with open(atlas_dset_description, 'w') as fo: json.dump(desc, fo, indent=4, sort_keys=True) @@ -893,38 +895,38 @@ def get_preproc_pipeline_info(input_type, fmri_dir): import os references = { - "fmriprep": "[@esteban2019fmriprep;@esteban2020analysis, RRID:SCR_016216]", - "dcan": "[@Feczko_Earl_perrone_Fair_2021;@feczko2021adolescent]", - "hcp": "[@glasser2013minimal]", - "nibabies": "[@goncalves_mathias_2022_7072346]", - "ukb": "[@miller2016multimodal]", + 'fmriprep': '[@esteban2019fmriprep;@esteban2020analysis, RRID:SCR_016216]', + 'dcan': '[@Feczko_Earl_perrone_Fair_2021;@feczko2021adolescent]', + 'hcp': '[@glasser2013minimal]', + 'nibabies': '[@goncalves_mathias_2022_7072346]', + 'ukb': '[@miller2016multimodal]', } if input_type not in references.keys(): raise ValueError(f"Unsupported input_type '{input_type}'") info_dict = { - "name": input_type, - "version": "unknown", - "references": references[input_type], + 'name': input_type, + 'version': 'unknown', + 'references': references[input_type], } # Now try to modify the dictionary based on the dataset description - dataset_description = os.path.join(fmri_dir, "dataset_description.json") + dataset_description = os.path.join(fmri_dir, 'dataset_description.json') if os.path.isfile(dataset_description): with open(dataset_description) as f: dataset_dict = json.load(f) - if "GeneratedBy" in dataset_dict.keys(): - info_dict["name"] = dataset_dict["GeneratedBy"][0]["Name"] - info_dict["version"] = ( - dataset_dict["GeneratedBy"][0]["Version"] - if "Version" in dataset_dict["GeneratedBy"][0].keys() - else "unknown" + if 'GeneratedBy' in dataset_dict.keys(): + info_dict['name'] = dataset_dict['GeneratedBy'][0]['Name'] + info_dict['version'] = ( + dataset_dict['GeneratedBy'][0]['Version'] + if 'Version' in dataset_dict['GeneratedBy'][0].keys() + else 'unknown' ) else: - LOGGER.warning(f"GeneratedBy key DNE: {dataset_description}. Using partial info.") + LOGGER.warning(f'GeneratedBy key DNE: {dataset_description}. Using partial info.') else: - LOGGER.warning(f"Dataset description DNE: {dataset_description}. Using partial info.") + LOGGER.warning(f'Dataset description DNE: {dataset_description}. Using partial info.') return info_dict @@ -972,16 +974,16 @@ def get_entity(filename, entity): # Allow + sign, which is not allowed in BIDS, # but is used by templateflow for the MNIInfant template. - entity_values = re.findall(f"{entity}-([a-zA-Z0-9+]+)", file_base) + entity_values = re.findall(f'{entity}-([a-zA-Z0-9+]+)', file_base) entity_value = None if len(entity_values) < 1 else entity_values[0] - if entity == "space" and entity_value is None: + if entity == 'space' and entity_value is None: foldername = os.path.basename(folder) - if foldername == "anat": - entity_value = "T1w" - elif foldername == "func": - entity_value = "native" + if foldername == 'anat': + entity_value = 'T1w' + elif foldername == 'func': + entity_value = 'native' else: - raise ValueError(f"Unknown space for {filename}") + raise ValueError(f'Unknown space for {filename}') return entity_value @@ -1010,19 +1012,19 @@ def group_across_runs(in_files): # so that any cases where files are not already in ascending run order get fixed. run_numbers, directions = [], [] for in_file in in_files: - run = get_entity(in_file, "run") + run = get_entity(in_file, 'run') if run is None: run = 0 - direction = get_entity(in_file, "dir") + direction = get_entity(in_file, 'dir') if direction is None: - direction = "none" + direction = 'none' run_numbers.append(int(run)) directions.append(direction) # Combine the three lists into a list of tuples - combined_data = list(zip(run_numbers, directions, in_files)) + combined_data = list(zip(run_numbers, directions, in_files, strict=False)) # Sort the list of tuples first by run and then by direction sorted_data = sorted(combined_data, key=lambda x: (x[0], x[1], x[2])) @@ -1031,8 +1033,8 @@ def group_across_runs(in_files): sorted_in_files = [item[2] for item in sorted_data] # Extract the unique sets of entities (i.e., the filename, minus the run and dir entities). - unique_filenames = [re.sub("_run-[0-9]+_", "_", os.path.basename(f)) for f in sorted_in_files] - unique_filenames = [re.sub("_dir-[0-9a-zA-Z]+_", "_", f) for f in unique_filenames] + unique_filenames = [re.sub('_run-[0-9]+_', '_', os.path.basename(f)) for f in sorted_in_files] + unique_filenames = [re.sub('_dir-[0-9a-zA-Z]+_', '_', f) for f in unique_filenames] # Assign each in_file to a group of files with the same entities, except run. out_files, grouped_unique_filenames = [], [] @@ -1072,16 +1074,16 @@ def check_pipeline_version(pipeline_name, cvers, data_desc): desc = json.loads(data_desc.read_text()) generators = { - generator["Name"]: generator.get("Version", "0+unknown") - for generator in desc.get("GeneratedBy", []) + generator['Name']: generator.get('Version', '0+unknown') + for generator in desc.get('GeneratedBy', []) } dvers = generators.get(pipeline_name) if dvers is None: # Very old style - dvers = desc.get("PipelineDescription", {}).get("Version", "0+unknown") + dvers = desc.get('PipelineDescription', {}).get('Version', '0+unknown') if Version(cvers).public != Version(dvers).public: - return f"Previous output generated by version {dvers} found." + return f'Previous output generated by version {dvers} found.' def _find_nearest_path(path_dict, input_path): @@ -1128,7 +1130,7 @@ def _find_nearest_path(path_dict, input_path): 'bids::sub-01/func/sub-01_task-rest_bold.nii.gz' """ # Don't modify BIDS-URIs - if isinstance(input_path, str) and input_path.startswith("bids:"): + if isinstance(input_path, str) and input_path.startswith('bids:'): return input_path input_path = Path(input_path) @@ -1143,7 +1145,7 @@ def _find_nearest_path(path_dict, input_path): if matching_path is None: matching_path = str(input_path.absolute()) else: - matching_path = f"{matching_key}{matching_path}" + matching_path = f'{matching_key}{matching_path}' return matching_path @@ -1155,8 +1157,8 @@ def _get_bidsuris(in_files, dataset_links, out_dir): # Remove undefined inputs in_files = [f for f in in_files if isdefined(f)] # Convert the dataset links to BIDS URI prefixes - updated_keys = {f"bids:{k}:": Path(v) for k, v in dataset_links.items()} - updated_keys["bids::"] = Path(out_dir) + updated_keys = {f'bids:{k}:': Path(v) for k, v in dataset_links.items()} + updated_keys['bids::'] = Path(out_dir) # Convert the paths to BIDS URIs out = [_find_nearest_path(updated_keys, f) for f in in_files] return out diff --git a/xcp_d/utils/boilerplate.py b/xcp_d/utils/boilerplate.py index b25122dad..3ebdc8945 100644 --- a/xcp_d/utils/boilerplate.py +++ b/xcp_d/utils/boilerplate.py @@ -32,7 +32,7 @@ def describe_motion_parameters( import numpy as np from num2words import num2words - desc = "" + desc = '' if motion_filter_type: band_stop_min_adjusted, band_stop_max_adjusted, is_modified = _modify_motion_filter( motion_filter_type=motion_filter_type, @@ -40,47 +40,47 @@ def describe_motion_parameters( band_stop_max=band_stop_max, TR=TR, ) - if motion_filter_type == "notch": + if motion_filter_type == 'notch': n_filter_applications = int(np.floor(motion_filter_order / 4)) if is_modified: desc = ( - "The six translation and rotation head motion traces were " - f"band-stop filtered to remove signals between {band_stop_min_adjusted} and " - f"{band_stop_max_adjusted} breaths-per-minute " - f"(automatically modified from {band_stop_min} and {band_stop_max} BPM due " - "to Nyquist frequency constraints) using a(n) " - f"{num2words(n_filter_applications, ordinal=True)}-order notch filter, " - "based on @fair2020correction. " + 'The six translation and rotation head motion traces were ' + f'band-stop filtered to remove signals between {band_stop_min_adjusted} and ' + f'{band_stop_max_adjusted} breaths-per-minute ' + f'(automatically modified from {band_stop_min} and {band_stop_max} BPM due ' + 'to Nyquist frequency constraints) using a(n) ' + f'{num2words(n_filter_applications, ordinal=True)}-order notch filter, ' + 'based on @fair2020correction. ' ) else: desc = ( - "The six translation and rotation head motion traces were " - f"band-stop filtered to remove signals between {band_stop_min} and " - f"{band_stop_max} breaths-per-minute using a(n) " - f"{num2words(n_filter_applications, ordinal=True)}-order notch filter, " - "based on @fair2020correction. " + 'The six translation and rotation head motion traces were ' + f'band-stop filtered to remove signals between {band_stop_min} and ' + f'{band_stop_max} breaths-per-minute using a(n) ' + f'{num2words(n_filter_applications, ordinal=True)}-order notch filter, ' + 'based on @fair2020correction. ' ) else: # lp n_filter_applications = int(np.floor(motion_filter_order / 2)) if is_modified: desc = ( - "The six translation and rotation head motion traces were " - f"low-pass filtered below {band_stop_min_adjusted} breaths-per-minute " - f"(automatically modified from {band_stop_min} BPM due to Nyquist frequency " - "constraints) using a(n) " - f"{num2words(n_filter_applications, ordinal=True)}-order Butterworth filter, " - "based on @gratton2020removal. " + 'The six translation and rotation head motion traces were ' + f'low-pass filtered below {band_stop_min_adjusted} breaths-per-minute ' + f'(automatically modified from {band_stop_min} BPM due to Nyquist frequency ' + 'constraints) using a(n) ' + f'{num2words(n_filter_applications, ordinal=True)}-order Butterworth filter, ' + 'based on @gratton2020removal. ' ) else: desc = ( - "The six translation and rotation head motion traces were " - f"low-pass filtered below {band_stop_min} breaths-per-minute " - "using a(n) " - f"{num2words(n_filter_applications, ordinal=True)}-order Butterworth filter, " - "based on @gratton2020removal. " + 'The six translation and rotation head motion traces were ' + f'low-pass filtered below {band_stop_min} breaths-per-minute ' + 'using a(n) ' + f'{num2words(n_filter_applications, ordinal=True)}-order Butterworth filter, ' + 'based on @gratton2020removal. ' ) - desc += "The Volterra expansion of these filtered motion parameters was then calculated. " + desc += 'The Volterra expansion of these filtered motion parameters was then calculated. ' return desc @@ -101,7 +101,7 @@ def describe_censoring(*, motion_filter_type, head_radius, fd_thresh, exact_scan desc : :obj:`str` A text description of the censoring procedure. """ - desc = "" + desc = '' if fd_thresh > 0: desc += ( "Framewise displacement was calculated from the " @@ -114,13 +114,13 @@ def describe_censoring(*, motion_filter_type, head_radius, fd_thresh, exact_scan if exact_scans and (fd_thresh > 0): desc += ( - " Additional sets of censoring volumes were randomly selected to produce additional " - f"correlation matrices limited to {list_to_str(exact_scans)} volumes." + ' Additional sets of censoring volumes were randomly selected to produce additional ' + f'correlation matrices limited to {list_to_str(exact_scans)} volumes.' ) elif exact_scans: desc += ( - "Volumes were randomly selected for censoring, to produce additional correlation " - f"matrices limited to {list_to_str(exact_scans)} volumes." + 'Volumes were randomly selected for censoring, to produce additional correlation ' + f'matrices limited to {list_to_str(exact_scans)} volumes.' ) return desc @@ -150,19 +150,19 @@ def describe_regression( A text description of the regression. """ if confounds_config is None: - return "No nuisance regression was performed." + return 'No nuisance regression was performed.' - desc = confounds_config["description"] + desc = confounds_config['description'] if (fd_thresh > 0) and motion_filter_type: # Censoring was done, so just refer back to the earlier description of the filter desc += ( - " Any motion parameters in the confounds file were filtered using the same " - "parameters as described above and the Volterra expansion was calculated." + ' Any motion parameters in the confounds file were filtered using the same ' + 'parameters as described above and the Volterra expansion was calculated.' ) elif motion_filter_type: # Censoring was not done, so describe the filter here - desc += " " + describe_motion_parameters( + desc += ' ' + describe_motion_parameters( motion_filter_type=motion_filter_type, motion_filter_order=motion_filter_order, band_stop_min=band_stop_min, @@ -176,30 +176,30 @@ def describe_regression( def describe_atlases(atlases): """Build a text description of the atlases that will be used.""" atlas_descriptions = { - "Glasser": "the Glasser atlas [@Glasser_2016]", - "Gordon": "the Gordon atlas [@Gordon_2014]", - "Tian": "the Tian subcortical atlas [@tian2020topographic]", - "HCP": "the HCP CIFTI subcortical atlas [@glasser2013minimal]", - "MIDB": ( - "the MIDB precision brain atlas derived from ABCD data and thresholded at 75% " - "probability [@hermosillo2022precision]" + 'Glasser': 'the Glasser atlas [@Glasser_2016]', + 'Gordon': 'the Gordon atlas [@Gordon_2014]', + 'Tian': 'the Tian subcortical atlas [@tian2020topographic]', + 'HCP': 'the HCP CIFTI subcortical atlas [@glasser2013minimal]', + 'MIDB': ( + 'the MIDB precision brain atlas derived from ABCD data and thresholded at 75% ' + 'probability [@hermosillo2022precision]' ), - "MyersLabonte": ( - "the Myers-Labonte infant atlas thresholded at 50% probability [@myers2023functional]" + 'MyersLabonte': ( + 'the Myers-Labonte infant atlas thresholded at 50% probability [@myers2023functional]' ), } atlas_strings = [] described_atlases = [] - atlases_4s = [atlas for atlas in atlases if str(atlas).startswith("4S")] + atlases_4s = [atlas for atlas in atlases if str(atlas).startswith('4S')] described_atlases += atlases_4s if atlases_4s: parcels = [int(str(atlas[2:-7])) for atlas in atlases_4s] s = ( - "the Schaefer Supplemented with Subcortical Structures (4S) atlas " - "[@Schaefer_2017;@pauli2018high;@king2019functional;@najdenovska2018vivo;" - "@glasser2013minimal] " - f"at {len(atlases_4s)} different resolutions ({list_to_str(parcels)} parcels)" + 'the Schaefer Supplemented with Subcortical Structures (4S) atlas ' + '[@Schaefer_2017;@pauli2018high;@king2019functional;@najdenovska2018vivo;' + '@glasser2013minimal] ' + f'at {len(atlases_4s)} different resolutions ({list_to_str(parcels)} parcels)' ) atlas_strings.append(s) @@ -210,6 +210,6 @@ def describe_atlases(atlases): undescribed_atlases = [atlas for atlas in atlases if atlas not in described_atlases] for atlas in undescribed_atlases: - atlas_strings.append(f"the {atlas} atlas") + atlas_strings.append(f'the {atlas} atlas') return list_to_str(atlas_strings) diff --git a/xcp_d/utils/concatenation.py b/xcp_d/utils/concatenation.py index 001c210d0..aeae2ad3c 100644 --- a/xcp_d/utils/concatenation.py +++ b/xcp_d/utils/concatenation.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Functions for concatenating scans across runs.""" + import os from contextlib import suppress @@ -10,7 +11,7 @@ from nilearn.image import concat_imgs from nipype import logging -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') def concatenate_tsvs(tsv_files, out_file): @@ -28,14 +29,14 @@ def concatenate_tsvs(tsv_files, out_file): try: # Assume file has no header first. # If it has a header with string column names, this will go the except clause. - data = [np.loadtxt(tsv_file, delimiter="\t") for tsv_file in tsv_files] + data = [np.loadtxt(tsv_file, delimiter='\t') for tsv_file in tsv_files] data = np.vstack(data) - np.savetxt(out_file, data, fmt="%.5f", delimiter="\t") + np.savetxt(out_file, data, fmt='%.5f', delimiter='\t') except ValueError: # Load file with header. data = [pd.read_table(tsv_file) for tsv_file in tsv_files] data = pd.concat(data, axis=0) - data.to_csv(out_file, sep="\t", index=False) + data.to_csv(out_file, sep='\t', index=False) return out_file @@ -58,4 +59,4 @@ def concatenate_niimgs(files, out_file): concat_preproc_img = concat_imgs(files) concat_preproc_img.to_filename(out_file) else: - os.system(f"wb_command -cifti-merge {out_file} -cifti {' -cifti '.join(files)}") + os.system(f'wb_command -cifti-merge {out_file} -cifti {" -cifti ".join(files)}') # noqa: S605 diff --git a/xcp_d/utils/confounds.py b/xcp_d/utils/confounds.py index f02e5e579..dda6eae1a 100644 --- a/xcp_d/utils/confounds.py +++ b/xcp_d/utils/confounds.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Confound matrix selection based on Ciric et al. 2007.""" + import os import warnings from pathlib import Path @@ -12,21 +13,21 @@ from xcp_d.utils.doc import fill_doc -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def volterra(df): """Perform Volterra expansion.""" # Ignore pandas SettingWithCopyWarning - with pd.option_context("mode.chained_assignment", None): + with pd.option_context('mode.chained_assignment', None): columns = df.columns.tolist() for col in columns: - new_col = f"{col}_derivative1" + new_col = f'{col}_derivative1' df[new_col] = df[col].diff() columns = df.columns.tolist() for col in columns: - new_col = f"{col}_power2" + new_col = f'{col}_power2' df[new_col] = df[col] ** 2 return df @@ -66,19 +67,19 @@ def load_motion( ---------- .. footbibliography:: """ - if motion_filter_type not in ("lp", "notch", None): + if motion_filter_type not in ('lp', 'notch', None): raise ValueError(f"Motion filter type '{motion_filter_type}' not supported.") # Select the motion columns from the overall confounds DataFrame - if isinstance(confounds_df, (str, Path)): + if isinstance(confounds_df, str | Path): confounds_df = pd.read_table(confounds_df) motion_confounds_df = confounds_df[ - ["rot_x", "rot_y", "rot_z", "trans_x", "trans_y", "trans_z"] + ['rot_x', 'rot_y', 'rot_z', 'trans_x', 'trans_y', 'trans_z'] ] # Apply LP or notch filter - if motion_filter_type in ("lp", "notch"): + if motion_filter_type in ('lp', 'notch'): motion_confounds = filter_motion( data=motion_confounds_df.to_numpy(), TR=TR, @@ -89,7 +90,7 @@ def load_motion( ) filtered_motion_confounds_df = pd.DataFrame( data=motion_confounds, - columns=[f"{c}_filtered" for c in motion_confounds_df.columns], + columns=[f'{c}_filtered' for c in motion_confounds_df.columns], ) motion_confounds_df = pd.concat( [motion_confounds_df, filtered_motion_confounds_df], @@ -98,7 +99,7 @@ def load_motion( # Add RMSD column (used for QC measures later on) motion_confounds_df = pd.concat( - [motion_confounds_df, confounds_df[["rmsd"]]], + [motion_confounds_df, confounds_df[['rmsd']]], axis=1, ) @@ -158,23 +159,23 @@ def filter_motion( ---------- .. footbibliography:: """ - if motion_filter_type not in ("lp", "notch"): + if motion_filter_type not in ('lp', 'notch'): raise ValueError(f"Motion filter type '{motion_filter_type}' not supported.") lowpass_hz = band_stop_min / 60 sampling_frequency = 1 / TR - if motion_filter_type == "lp": # low-pass filter + if motion_filter_type == 'lp': # low-pass filter n_filter_applications = int(np.floor(motion_filter_order / 2)) b, a = butter( n_filter_applications, lowpass_hz, - btype="lowpass", - output="ba", + btype='lowpass', + output='ba', fs=sampling_frequency, ) - filtered_data = filtfilt(b, a, data, axis=0, padtype="constant", padlen=data.shape[0] - 1) + filtered_data = filtfilt(b, a, data, axis=0, padtype='constant', padlen=data.shape[0] - 1) else: # notch filter highpass_hz = band_stop_max / 60 @@ -196,7 +197,7 @@ def filter_motion( a, filtered_data, axis=0, - padtype="constant", + padtype='constant', padlen=data.shape[0] - 1, ) @@ -231,12 +232,12 @@ def _modify_motion_filter(motion_filter_type, band_stop_min, band_stop_max, TR): nyquist_bpm = nyquist_frequency * 60 is_modified = False - if motion_filter_type == "lp": # low-pass filter + if motion_filter_type == 'lp': # low-pass filter # Remove any frequencies above band_stop_min. assert band_stop_min is not None assert band_stop_min > 0 if band_stop_max: - warnings.warn("The parameter 'band_stop_max' will be ignored.") + warnings.warn("The parameter 'band_stop_max' will be ignored.", stacklevel=2) lowpass_hz = band_stop_min / 60 # change BPM to right time unit @@ -250,20 +251,21 @@ def _modify_motion_filter(motion_filter_type, band_stop_min, band_stop_max, TR): band_stop_min_adjusted = lowpass_hz_adjusted * 60 # change Hertz back to BPM if band_stop_min_adjusted != band_stop_min: warnings.warn( - f"Low-pass filter frequency is above Nyquist frequency ({nyquist_bpm} BPM), " - f"so it has been changed ({band_stop_min} --> {band_stop_min_adjusted} BPM)." + f'Low-pass filter frequency is above Nyquist frequency ({nyquist_bpm} BPM), ' + f'so it has been changed ({band_stop_min} --> {band_stop_min_adjusted} BPM).', + stacklevel=2, ) is_modified = True band_stop_max_adjusted = None - elif motion_filter_type == "notch": # notch filter + elif motion_filter_type == 'notch': # notch filter # Retain any frequencies *outside* the band_stop_min-band_stop_max range. assert band_stop_max is not None assert band_stop_min is not None assert band_stop_max > 0 assert band_stop_min > 0 - assert band_stop_min < band_stop_max, f"{band_stop_min} >= {band_stop_max}" + assert band_stop_min < band_stop_max, f'{band_stop_min} >= {band_stop_max}' stopband = np.array([band_stop_min, band_stop_max]) stopband_hz = stopband / 60 # change BPM to Hertz @@ -278,10 +280,11 @@ def _modify_motion_filter(motion_filter_type, band_stop_min, band_stop_max, TR): stopband_adjusted = stopband_hz_adjusted * 60 # change Hertz back to BPM if not np.array_equal(stopband_adjusted, stopband): warnings.warn( - f"One or both filter frequencies are above Nyquist frequency ({nyquist_bpm} BPM), " - "so they have been changed " - f"({stopband[0]} --> {stopband_adjusted[0]}, " - f"{stopband[1]} --> {stopband_adjusted[1]} BPM)." + f'One or both filter frequencies are above Nyquist frequency ({nyquist_bpm} BPM), ' + 'so they have been changed ' + f'({stopband[0]} --> {stopband_adjusted[0]}, ' + f'{stopband[1]} --> {stopband_adjusted[1]} BPM).', + stacklevel=2, ) is_modified = True @@ -315,10 +318,10 @@ def _infer_dummy_scans(dummy_scans, confounds_file=None): dummy_scans : int Estimated number of dummy scans. """ - if dummy_scans == "auto": + if dummy_scans == 'auto': confounds_df = pd.read_table(confounds_file) - nss_cols = [c for c in confounds_df.columns if c.startswith("non_steady_state_outlier")] + nss_cols = [c for c in confounds_df.columns if c.startswith('non_steady_state_outlier')] if nss_cols: initial_volumes_df = confounds_df[nss_cols] @@ -327,10 +330,10 @@ def _infer_dummy_scans(dummy_scans, confounds_file=None): # reasonably assumes all NSS volumes are contiguous dummy_scans = int(dummy_scans[-1] + 1) - LOGGER.info(f"Found {dummy_scans} dummy scans in {os.path.basename(confounds_file)}") + LOGGER.info(f'Found {dummy_scans} dummy scans in {os.path.basename(confounds_file)}') else: - LOGGER.warning(f"No non-steady-state outliers found in {confounds_file}") + LOGGER.warning(f'No non-steady-state outliers found in {confounds_file}') dummy_scans = 0 return dummy_scans diff --git a/xcp_d/utils/debug.py b/xcp_d/utils/debug.py index 6a5724885..d78aa288e 100644 --- a/xcp_d/utils/debug.py +++ b/xcp_d/utils/debug.py @@ -43,6 +43,7 @@ # limitations under the License. # """Tools for debugging, from fMRIPrep.""" + import sys @@ -74,7 +75,7 @@ def _pdb_excepthook(type, value, tb): from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB( - mode="Verbose", + mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive(), ) diff --git a/xcp_d/utils/doc.py b/xcp_d/utils/doc.py index b3a3f0fcf..c7aca4554 100644 --- a/xcp_d/utils/doc.py +++ b/xcp_d/utils/doc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """Functions related to the documentation. docdict contains the standard documentation entries used across xcp_d. @@ -6,38 +5,31 @@ source: Eric Larson and MNE-python team. https://github.com/mne-tools/mne-python/blob/main/mne/utils/docs.py """ + import sys ################################### # Standard documentation entries # -docdict = dict() +docdict = {} -docdict[ - "omp_nthreads" -] = """ +docdict['omp_nthreads'] = """ omp_nthreads : :obj:`int` Maximum number of threads an individual process may use. """ -docdict[ - "mem_gb" -] = """ +docdict['mem_gb'] = """ mem_gb : :obj:`float` Memory limit, in gigabytes. """ -docdict[ - "fmri_dir" -] = """ +docdict['fmri_dir'] = """ fmri_dir : :obj:`str` Path to the preprocessed derivatives dataset. For example, "/path/to/dset/derivatives/fmriprep/". """ -docdict[ - "output_dir" -] = """ +docdict['output_dir'] = """ output_dir : :obj:`str` Path to the output directory for ``xcp_d`` derivatives. For example, "/path/to/dset/derivatives/xcp_d". @@ -45,80 +37,60 @@ As of version 0.7.0, 'xcp_d' will not be appended to the output directory. """ -docdict[ - "work_dir" -] = """ +docdict['work_dir'] = """ work_dir : :obj:`str` Directory in which to store workflow execution state and temporary files. """ -docdict[ - "analysis_level" -] = """ +docdict['analysis_level'] = """ analysis_level : {"participant"} The analysis level for ``xcp_d``. Must be specified as "participant" since xcp_d performs analyses at the participant level. """ -docdict[ - "anat_to_template_xfm" -] = """ +docdict['anat_to_template_xfm'] = """ anat_to_template_xfm : :obj:`str` Path to the T1w-to-MNI transform file. May be "identity", for testing purposes. """ -docdict[ - "template_to_anat_xfm" -] = """ +docdict['template_to_anat_xfm'] = """ template_to_anat_xfm : :obj:`str` Path to the MNI-to-T1w transform file. May be "identity", for testing purposes. """ -docdict[ - "name_source" -] = """ +docdict['name_source'] = """ name_source : :obj:`str` Path to the file that will be used as the ``source_file`` for derivatives. This is generally the preprocessed BOLD file. This file does not need to exist (e.g., in the case of a concatenated version of the filename). """ -docdict[ - "boldref" -] = """ +docdict['boldref'] = """ boldref : :obj:`str` Path to the BOLD reference file associated with the target BOLD run. This comes from the preprocessing derivatives. """ -docdict[ - "TR" -] = """ +docdict['TR'] = """ TR : :obj:`float` Repetition time of the BOLD run, in seconds. """ -docdict[ - "preproc_confounds_file" -] = """ +docdict['preproc_confounds_file'] = """ preproc_confounds_file : :obj:`str` Confounds TSV file from preprocessing derivatives. """ -docdict[ - "params" -] = """ +docdict['params'] = """ params : {"36P", "24P", "27P", "acompcor", "acompcor_gsr", \ "aroma", "aroma_gsr", "custom"}, optional Shorthand for the parameter set to extract from the confounds TSV. Default is "36P", most expansive option. """ -docdict[ - "input_type" -] = """ +docdict['input_type'] = """ input_type : {"fmriprep", "dcan", "hcp", "nibabies", "ukb"} The format of the incoming preprocessed BIDS derivatives. DCAN- and HCP-format derivatives will automatically be converted to a more @@ -127,9 +99,7 @@ file organization and naming. """ -docdict[ - "abcc_qc" -] = """ +docdict['abcc_qc'] = """ abcc_qc : :obj:`bool` This flag determines if DCAN-related QC steps will be taken. Enabling this flag will trigger the following steps: @@ -139,18 +109,14 @@ 3. DCAN QC files will be generated. """ -docdict[ - "smoothing" -] = """ +docdict['smoothing'] = """ smoothing : :obj:`float` The full width at half maximum (FWHM), in millimeters, of the Gaussian smoothing kernel that will be applied to the post-processed and denoised data. ALFF and ReHo outputs will also be smoothing with this kernel. """ -docdict[ - "custom_confounds_folder" -] = """ +docdict['custom_confounds_folder'] = """ custom_confounds_folder : :obj:`str` or None Path to folder containing custom nuisance regressors. Must be a folder containing confounds files, @@ -158,9 +124,7 @@ selected. """ -docdict[ - "head_radius" -] = """ +docdict['head_radius'] = """ head_radius : :obj:`float` or "auto" Radius of the head, in millimeters, for framewise displacement calculation. @@ -169,9 +133,7 @@ estimated from the preprocessed brain mask. """ -docdict[ - "fd_thresh" -] = """ +docdict['fd_thresh'] = """ fd_thresh : :obj:`float` Framewise displacement threshold for censoring, in millimeters. Any framewise displacement values higher than the threshold are flagged as "high motion". @@ -179,18 +141,14 @@ Default is 0.2 mm. """ -docdict[ - "bandpass_filter" -] = """ +docdict['bandpass_filter'] = """ bandpass_filter : :obj:`bool` If True, a Butterworth bandpass filter will be applied to the fMRI data after interpolation, but before regression. If False, bandpass filtering will not be performed. """ -docdict[ - "high_pass" -] = """ +docdict['high_pass'] = """ high_pass : :obj:`float` Lower cut-off frequency for the Butterworth bandpass filter, in Hertz. The bandpass filter is applied to the fMRI data after post-processing and denoising. @@ -201,9 +159,7 @@ Default value is 0.01. """ -docdict[ - "low_pass" -] = """ +docdict['low_pass'] = """ low_pass : :obj:`float` Upper cut-off frequency for the Butterworth bandpass filter, in Hertz. The bandpass filter is applied to the fMRI data after post-processing and denoising. @@ -214,9 +170,7 @@ Default value is 0.08. """ -docdict[ - "bpf_order" -] = """ +docdict['bpf_order'] = """ bpf_order : :obj:`int` Number of filter coefficients for Butterworth bandpass filter. Bandpass filtering will only be performed if ``bandpass_filter`` is True. @@ -224,9 +178,7 @@ ``upper_bpf``/``low_pass``. """ -docdict[ - "motion_filter_type" -] = """ +docdict['motion_filter_type'] = """ motion_filter_type : {None, "lp", "notch"} Type of filter to use for removing respiratory artifact from motion regressors. @@ -240,9 +192,7 @@ In this case, only ``band_stop_min`` must be defined. """ -docdict[ - "motion_filter_order" -] = """ +docdict['motion_filter_order'] = """ motion_filter_order : :obj:`int` Number of filter coefficients for the motion parameter filter. Motion filtering is only performed if ``motion_filter_type`` is not None. @@ -252,9 +202,7 @@ filtfilt applies the filter twice. """ -docdict[ - "band_stop_min" -] = """ +docdict['band_stop_min'] = """ band_stop_min : :obj:`float` or None Lower frequency for the motion parameter filter, in breaths-per-minute (bpm). Motion filtering is only performed if ``motion_filter_type`` is not None. @@ -281,9 +229,7 @@ this parameter is 6 BPM (equivalent to 0.1 Hertz), based on :footcite:t:`gratton2020removal`. """ -docdict[ - "band_stop_max" -] = """ +docdict['band_stop_max'] = """ band_stop_max : :obj:`float` or None Upper frequency for the motion parameter filter, in breaths-per-minute (bpm). Motion filtering is only performed if ``motion_filter_type`` is not None. @@ -306,9 +252,7 @@ ================= ================= """ -docdict[ - "dcan_correlation_lengths" -] = """ +docdict['dcan_correlation_lengths'] = """ dcan_correlation_lengths : :obj:`list` of :obj:`float`, optional If used, this parameter will produce correlation matrices limited to each requested amount of time. @@ -319,9 +263,7 @@ then the run will not be post-processed. """ -docdict[ - "exact_scans" -] = """ +docdict['exact_scans'] = """ exact_scans : :obj:`list` of :obj:`int`, optional If used, this parameter will produce correlation matrices limited to each requested amount of time. @@ -332,25 +274,19 @@ then the run will not be post-processed. """ -docdict[ - "name" -] = """ +docdict['name'] = """ name : :obj:`str`, optional Name of the workflow. This is used for working directories and workflow graphs. """ -docdict[ - "cifti" -] = """ +docdict['cifti'] = """ cifti : :obj:`bool` Post-process surface data (CIFTIs) instead of volumetric data (NIFTIs). This parameter is overridden when DCAN- or HCP-format data are provided. Default is False. """ -docdict[ - "process_surfaces" -] = """ +docdict['process_surfaces'] = """ process_surfaces : :obj:`bool`, optional If True, a workflow will be run to warp native-space (fsnative) reconstructed cortical surfaces (surf.gii files) produced by Freesurfer into standard (fsLR) space. @@ -358,41 +294,31 @@ By default, this workflow is disabled. """ -docdict[ - "subject_id" -] = """ +docdict['subject_id'] = """ subject_id : :obj:`str` The participant ID. This SHOULD NOT include the ``sub-`` prefix. """ -docdict[ - "layout" -] = """ +docdict['layout'] = """ layout : :obj:`bids.layout.BIDSLayout` BIDSLayout indexing the ingested (e.g., fMRIPrep-format) derivatives. """ -docdict[ - "dummy_scans" -] = """ +docdict['dummy_scans'] = """ dummy_scans : :obj:`int` or "auto" Number of volumes to remove from the beginning of each run. If set to 'auto', xcp_d will extract non-steady-state volume indices from the preprocessing derivatives' confounds file. """ -docdict[ - "random_seed" -] = """ +docdict['random_seed'] = """ random_seed : :obj:`int` or None Random seed for the workflow. This is currently only used with the ``--create-matrices`` parameter, when randomly selecting volumes to censor for correlation matrices. """ -docdict[ - "min_coverage" -] = """ +docdict['min_coverage'] = """ min_coverage : :obj:`float` Coverage threshold to apply to parcels in each atlas. Any parcels with lower coverage than the threshold will be replaced with NaNs. @@ -400,9 +326,7 @@ Default is 0.5. """ -docdict[ - "min_time" -] = """ +docdict['min_time'] = """ min_time : :obj:`float` Post-scrubbing threshold to apply to individual runs in the dataset. This threshold determines the minimum amount of time, in seconds, @@ -413,9 +337,7 @@ Default is 100. """ -docdict[ - "despike" -] = """ +docdict['despike'] = """ despike : :obj:`bool` If True, the BOLD data will be despiked before censoring/denoising/filtering/interpolation. If False, no despiking will be performed. @@ -425,25 +347,19 @@ the despiked data will be converted back to CIFTI format. """ -docdict[ - "motion_file" -] = """ +docdict['motion_file'] = """ motion_file : :obj:`str` Framewise displacement timeseries, potentially after bandstop or low-pass filtering. This is a TSV file with one column: 'framewise_displacement'. """ -docdict[ - "temporal_mask" -] = """ +docdict['temporal_mask'] = """ temporal_mask : :obj:`str` Temporal mask; all values above ``fd_thresh`` set to 1. This is a TSV file with one column: 'framewise_displacement'. """ -docdict[ - "denoised_interpolated_bold" -] = """ +docdict['denoised_interpolated_bold'] = """ denoised_interpolated_bold : :obj:`str` Path to the censored, denoised, interpolated, and filtered BOLD file. This file is the result of denoising the censored preprocessed BOLD data, @@ -452,9 +368,7 @@ This output should not be used for analysis. It is primarily for DCAN QC plots. """ -docdict[ - "censored_denoised_bold" -] = """ +docdict['censored_denoised_bold'] = """ censored_denoised_bold : :obj:`str` Path to the censored, denoised, interpolated, filtered, and re-censored BOLD file. This file is the result of denoising the censored preprocessed BOLD data, @@ -463,9 +377,7 @@ This output is the primary derivative for analysis. """ -docdict[ - "smoothed_denoised_bold" -] = """ +docdict['smoothed_denoised_bold'] = """ smoothed_denoised_bold : :obj:`str` Path to the censored, denoised, interpolated, filtered, re-censored, and smoothed BOLD file. This file is the result of denoising the censored preprocessed BOLD data, @@ -473,72 +385,54 @@ smoothing. """ -docdict[ - "atlases" -] = """ +docdict['atlases'] = """ atlases : :obj:`list` of :obj:`str` A list of atlases used for parcellating the BOLD data. The set of atlases to use is defined by the user. """ -docdict[ - "coverage" -] = """ +docdict['coverage'] = """ coverage : :obj:`list` of :obj:`str` List of paths to atlas-specific coverage TSV files. """ -docdict[ - "coverage_ciftis" -] = """ +docdict['coverage_ciftis'] = """ coverage_ciftis : :obj:`list` of :obj:`str` List of paths to atlas-specific coverage CIFTI (pscalar) files. """ -docdict[ - "timeseries" -] = """ +docdict['timeseries'] = """ timeseries : :obj:`list` of :obj:`str` List of paths to atlas-specific time series TSV files. These time series are produced from the ``censored_denoised_bold`` outputs. """ -docdict[ - "timeseries_ciftis" -] = """ +docdict['timeseries_ciftis'] = """ timeseries_ciftis : :obj:`list` of :obj:`str` List of paths to atlas-specific time series CIFTI (ptseries) files. These time series are produced from the ``censored_denoised_bold`` outputs. """ -docdict[ - "correlations" -] = """ +docdict['correlations'] = """ correlations : :obj:`list` of :obj:`str` List of paths to atlas-specific ROI-to-ROI correlation TSV files. These correlations are produced from the ``timeseries`` outputs. """ -docdict[ - "correlation_ciftis" -] = """ +docdict['correlation_ciftis'] = """ correlation_ciftis : :obj:`list` of :obj:`str` List of paths to atlas-specific ROI-to-ROI correlation CIFTI (pconn) files. These correlations are produced from the ``timeseries_cifti`` outputs. """ -docdict[ - "correlations_exact" -] = """ +docdict['correlations_exact'] = """ correlations_exact : :obj:`list` of :obj:`list` of :obj:`str` Exact-scan-wise list of lists of paths to atlas-specific ROI-to-ROI correlation TSV files. These correlations are produced from the ``timeseries`` outputs and the ``temporal_mask`` input. """ -docdict[ - "correlation_ciftis_exact" -] = """ +docdict['correlation_ciftis_exact'] = """ correlation_ciftis : :obj:`list` of :obj:`list` of :obj:`str` Exact-scan-wise list of lists of paths to atlas-specific ROI-to-ROI correlation CIFTI (pconn) files. @@ -602,7 +496,7 @@ def fill_doc(f): try: indented = docdict_indented[icount] except KeyError: - indent = " " * icount + indent = ' ' * icount docdict_indented[icount] = indented = {} for name, dstr in docdict.items(): lines = dstr.splitlines() @@ -610,15 +504,15 @@ def fill_doc(f): newlines = [lines[0]] for line in lines[1:]: newlines.append(indent + line) - indented[name] = "\n".join(newlines) + indented[name] = '\n'.join(newlines) except IndexError: indented[name] = dstr try: f.__doc__ = docstring % indented except (TypeError, ValueError, KeyError) as exp: funcname = f.__name__ - funcname = docstring.split("\n")[0] if funcname is None else funcname - raise RuntimeError(f"Error documenting {funcname}:\n{str(exp)}") + funcname = docstring.split('\n')[0] if funcname is None else funcname + raise RuntimeError(f'Error documenting {funcname}:\n{str(exp)}') from exp return f @@ -632,27 +526,27 @@ def download_example_data(out_dir=None, overwrite=False): from xcp_d.data import load as load_data if not out_dir: - out_dir = str(load_data(".")) + out_dir = str(load_data('.')) out_dir = os.path.abspath(out_dir) - url = "https://upenn.box.com/shared/static/1dd4u115invn60cr3qm8xl8p5axho5dp" - target_path = os.path.join(out_dir, "ds001419-example") + url = 'https://upenn.box.com/shared/static/1dd4u115invn60cr3qm8xl8p5axho5dp' + target_path = os.path.join(out_dir, 'ds001419-example') if overwrite or not os.path.isdir(target_path): - target_file = os.path.join(out_dir, "ds001419-example.tar.gz") + target_file = os.path.join(out_dir, 'ds001419-example.tar.gz') if overwrite or not os.path.isfile(target_file): - response = requests.get(url, stream=True) + response = requests.get(url, stream=True, timeout=10) if response.status_code == 200: - with open(target_file, "wb") as fo: + with open(target_file, 'wb') as fo: fo.write(response.raw.read()) if not os.path.isfile(target_file): - raise FileNotFoundError(f"File DNE: {target_file}") + raise FileNotFoundError(f'File DNE: {target_file}') # Expand the file - with tarfile.open(target_file, "r:gz") as fo: - fo.extractall(out_dir) + with tarfile.open(target_file, 'r:gz') as fo: + fo.extractall(out_dir) # noqa: S202 return target_path diff --git a/xcp_d/utils/execsummary.py b/xcp_d/utils/execsummary.py index cf8e1aa76..5d245c9b8 100644 --- a/xcp_d/utils/execsummary.py +++ b/xcp_d/utils/execsummary.py @@ -1,9 +1,10 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Functions for generating the executive summary.""" + from nipype import logging -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def make_mosaic(png_files): @@ -18,13 +19,13 @@ def make_mosaic(png_files): import numpy as np from PIL import Image # for BrainSprite - mosaic_file = os.path.abspath("mosaic.png") + mosaic_file = os.path.abspath('mosaic.png') files = png_files[::-1] # we want last first, I guess? IMAGE_DIM = 218 images_per_side = int(np.ceil(np.sqrt(len(files)))) square_dim = IMAGE_DIM * images_per_side - result = Image.new("RGB", (square_dim, square_dim), color=1) + result = Image.new('RGB', (square_dim, square_dim), color=1) for index, file_ in enumerate(files): # Get relative path to file, from user's home folder @@ -38,7 +39,7 @@ def make_mosaic(png_files): w, h = img.size result.paste(img, (x, y, x + w, y + h)) - result.save(mosaic_file, "PNG", quality=95) + result.save(mosaic_file, 'PNG', quality=95) return mosaic_file @@ -59,32 +60,32 @@ def modify_brainsprite_scene_template( import os paths = { - "TX_IMG": anat_file, - "RPIAL": rh_pial_surf, - "LPIAL": lh_pial_surf, - "RWHITE": rh_wm_surf, - "LWHITE": lh_wm_surf, + 'TX_IMG': anat_file, + 'RPIAL': rh_pial_surf, + 'LPIAL': lh_pial_surf, + 'RWHITE': rh_wm_surf, + 'LWHITE': lh_wm_surf, } - out_file = os.path.abspath("modified_scene.scene") + out_file = os.path.abspath('modified_scene.scene') - if scene_template.endswith(".gz"): - with gzip.open(scene_template, mode="rt") as fo: + if scene_template.endswith('.gz'): + with gzip.open(scene_template, mode='rt') as fo: data = fo.read() else: - with open(scene_template, "r") as fo: + with open(scene_template) as fo: data = fo.read() - data = data.replace("XAXIS_COORDINATE", str(slice_number)) + data = data.replace('XAXIS_COORDINATE', str(slice_number)) for template, path in paths.items(): filename = os.path.basename(path) # Replace templated pathnames and filenames in local copy. - data = data.replace(f"{template}_PATH", path) - data = data.replace(f"{template}_NAME", filename) + data = data.replace(f'{template}_PATH', path) + data = data.replace(f'{template}_NAME', filename) - with open(out_file, "w") as fo: + with open(out_file, 'w') as fo: fo.write(data) return out_file @@ -106,30 +107,30 @@ def modify_pngs_scene_template( import os paths = { - "TX_IMG": anat_file, - "RPIAL": rh_pial_surf, - "LPIAL": lh_pial_surf, - "RWHITE": rh_wm_surf, - "LWHITE": lh_wm_surf, + 'TX_IMG': anat_file, + 'RPIAL': rh_pial_surf, + 'LPIAL': lh_pial_surf, + 'RWHITE': rh_wm_surf, + 'LWHITE': lh_wm_surf, } - out_file = os.path.abspath("modified_scene.scene") + out_file = os.path.abspath('modified_scene.scene') - if scene_template.endswith(".gz"): - with gzip.open(scene_template, mode="rt") as fo: + if scene_template.endswith('.gz'): + with gzip.open(scene_template, mode='rt') as fo: data = fo.read() else: - with open(scene_template, "r") as fo: + with open(scene_template) as fo: data = fo.read() for template, path in paths.items(): filename = os.path.basename(path) # Replace templated pathnames and filenames in local copy. - data = data.replace(f"{template}_PATH", path) - data = data.replace(f"{template}_NAME", filename) + data = data.replace(f'{template}_PATH', path) + data = data.replace(f'{template}_NAME', filename) - with open(out_file, "w") as fo: + with open(out_file, 'w') as fo: fo.write(data) return out_file @@ -172,15 +173,15 @@ def get_png_image_names(): NOTE: This is a Node function. """ image_descriptions = [ - "AxialInferiorTemporalCerebellum", - "AxialBasalGangliaPutamen", - "AxialSuperiorFrontal", - "CoronalPosteriorParietalLingual", - "CoronalCaudateAmygdala", - "CoronalOrbitoFrontal", - "SagittalInsulaFrontoTemporal", - "SagittalCorpusCallosum", - "SagittalInsulaTemporalHippocampalSulcus", + 'AxialInferiorTemporalCerebellum', + 'AxialBasalGangliaPutamen', + 'AxialSuperiorFrontal', + 'CoronalPosteriorParietalLingual', + 'CoronalCaudateAmygdala', + 'CoronalOrbitoFrontal', + 'SagittalInsulaFrontoTemporal', + 'SagittalCorpusCallosum', + 'SagittalInsulaTemporalHippocampalSulcus', ] scene_index = list(range(1, len(image_descriptions) + 1)) diff --git a/xcp_d/utils/filemanip.py b/xcp_d/utils/filemanip.py index 881b835f7..91244d87d 100644 --- a/xcp_d/utils/filemanip.py +++ b/xcp_d/utils/filemanip.py @@ -1,16 +1,16 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Miscellaneous file manipulation functions.""" + import os.path as op import numpy as np from nipype import logging from nipype.utils.misc import is_container -fmlogger = logging.getLogger("nipype.utils") +fmlogger = logging.getLogger('nipype.utils') -related_filetype_sets = [(".hdr", ".img", ".mat"), (".nii", ".mat"), (".BRIK", ".HEAD")] +related_filetype_sets = [('.hdr', '.img', '.mat'), ('.nii', '.mat'), ('.BRIK', '.HEAD')] def split_filename(fname): @@ -45,35 +45,35 @@ def split_filename(fname): """ # TM 07152022 - edited to add cifti and workbench extensions special_extensions = [ - ".nii.gz", - ".tar.gz", - ".niml.dset", - ".dconn.nii", - ".dlabel.nii", - ".dpconn.nii", - ".dscalar.nii", - ".dtseries.nii", - ".fiberTEMP.nii", - ".trajTEMP.wbsparse", - ".pconn.nii", - ".pdconn.nii", - ".plabel.nii", - ".pscalar.nii", - ".ptseries.nii", - ".sdseries.nii", - ".label.gii", - ".label.gii", - ".func.gii", - ".shape.gii", - ".rgba.gii", - ".surf.gii", - ".dpconn.nii", - ".dtraj.nii", - ".pconnseries.nii", - ".pconnscalar.nii", - ".dfan.nii", - ".dfibersamp.nii", - ".dfansamp.nii", + '.nii.gz', + '.tar.gz', + '.niml.dset', + '.dconn.nii', + '.dlabel.nii', + '.dpconn.nii', + '.dscalar.nii', + '.dtseries.nii', + '.fiberTEMP.nii', + '.trajTEMP.wbsparse', + '.pconn.nii', + '.pdconn.nii', + '.plabel.nii', + '.pscalar.nii', + '.ptseries.nii', + '.sdseries.nii', + '.label.gii', + '.label.gii', + '.func.gii', + '.shape.gii', + '.rgba.gii', + '.surf.gii', + '.dpconn.nii', + '.dtraj.nii', + '.pconnseries.nii', + '.pconnscalar.nii', + '.dfan.nii', + '.dfibersamp.nii', + '.dfansamp.nii', ] pth = op.dirname(fname) @@ -92,7 +92,7 @@ def split_filename(fname): return pth, fname, ext -def fname_presuffix(fname, prefix="", suffix="", newpath=None, use_ext=True): +def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): """Manipulate path and name of input filename. Parameters @@ -128,7 +128,7 @@ def fname_presuffix(fname, prefix="", suffix="", newpath=None, use_ext=True): """ pth, fname, ext = split_filename(fname) if not use_ext: - ext = "" + ext = '' # No need for isdefined: bool(Undefined) evaluates to False if newpath: @@ -138,11 +138,11 @@ def fname_presuffix(fname, prefix="", suffix="", newpath=None, use_ext=True): def ensure_list(filename): """Return a list given either a string or a list.""" - if isinstance(filename, (str, bytes)): + if isinstance(filename, str | bytes): return [filename] - elif isinstance(filename, (list, tuple, type(None), np.ndarray)): + elif isinstance(filename, list | tuple | type(None) | np.ndarray): return filename elif is_container(filename): - return [x for x in filename] + return list(filename) else: return None diff --git a/xcp_d/utils/modified_data.py b/xcp_d/utils/modified_data.py index e2c0391e3..91d08e490 100644 --- a/xcp_d/utils/modified_data.py +++ b/xcp_d/utils/modified_data.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Functions for interpolating over high-motion volumes.""" + import os import nibabel as nb @@ -12,7 +13,7 @@ from xcp_d.utils.doc import fill_doc from xcp_d.utils.filemanip import fname_presuffix -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') @fill_doc @@ -34,9 +35,9 @@ def compute_fd(confound, head_radius=50, filtered=False): The framewise displacement time series. """ confound = confound.replace(np.nan, 0) - motion_columns = ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"] + motion_columns = ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'] if filtered: - motion_columns = [f"{col}_filtered" for col in motion_columns] + motion_columns = [f'{col}_filtered' for col in motion_columns] mpars = confound[motion_columns].to_numpy() diff = mpars[:-1, :6] - mpars[1:, :6] @@ -69,9 +70,9 @@ def _drop_dummy_scans(bold_file, dummy_scans): if bold_image.ndim == 2: # cifti dropped_data = data[dummy_scans:, ...] # time series is the first element - time_axis, brain_model_axis = [ + time_axis, brain_model_axis = ( bold_image.header.get_axis(i) for i in range(bold_image.ndim) - ] + ) new_total_volumes = dropped_data.shape[0] dropped_time_axis = time_axis[:new_total_volumes] dropped_header = nb.cifti2.Cifti2Header.from_axes((dropped_time_axis, brain_model_axis)) @@ -112,10 +113,10 @@ def downcast_to_32(in_file): return in_file elif not os.path.isfile(in_file): - raise FileNotFoundError(f"File not found: {in_file}") + raise FileNotFoundError(f'File not found: {in_file}') img = nb.load(in_file) - if hasattr(img, "nifti_header"): + if hasattr(img, 'nifti_header'): header = img.nifti_header else: header = img.header @@ -123,7 +124,7 @@ def downcast_to_32(in_file): SIZE32 = 4 # number of bytes in float32/int32 dtype = header.get_data_dtype() if dtype.itemsize > SIZE32: - LOGGER.warning(f"Downcasting {in_file} to 32-bit.") + LOGGER.warning(f'Downcasting {in_file} to 32-bit.') if np.issubdtype(dtype, np.integer): header.set_data_dtype(np.int32) elif np.issubdtype(dtype, np.floating): @@ -131,7 +132,7 @@ def downcast_to_32(in_file): else: raise TypeError(f"Unknown datatype '{dtype}'.") - out_file = fname_presuffix(in_file, newpath=os.getcwd(), suffix="_downcast", use_ext=True) + out_file = fname_presuffix(in_file, newpath=os.getcwd(), suffix='_downcast', use_ext=True) img.to_filename(out_file) else: out_file = in_file @@ -242,16 +243,16 @@ def calculate_exact_scans(exact_times, scan_length, t_r, bold_file): dropped_exact_times = [t for t in float_times if t > scan_length] if dropped_exact_times: LOGGER.warning( - f"{scan_length} seconds in {os.path.basename(bold_file)} " - "survive high-motion outlier scrubbing. " - "Only retaining exact-time values greater than this " - f"({retained_exact_times})." + f'{scan_length} seconds in {os.path.basename(bold_file)} ' + 'survive high-motion outlier scrubbing. ' + 'Only retaining exact-time values greater than this ' + f'({retained_exact_times}).' ) if non_float_times: LOGGER.warning( - f"Non-float values {non_float_times} in {os.path.basename(bold_file)} " - "will be ignored." + f'Non-float values {non_float_times} in {os.path.basename(bold_file)} ' + 'will be ignored.' ) exact_scans = [int(t // t_r) for t in retained_exact_times] diff --git a/xcp_d/utils/plotting.py b/xcp_d/utils/plotting.py index b37f0b50e..d7cc0f74d 100644 --- a/xcp_d/utils/plotting.py +++ b/xcp_d/utils/plotting.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Plotting tools.""" + import os import matplotlib.pyplot as plt @@ -58,7 +59,7 @@ def plot_confounds( units=None, TR=None, hide_x=True, - color="b", + color='b', cutoff=None, ylims=None, ): @@ -108,50 +109,50 @@ def plot_confounds( # Set x_axis if not hide_x: if no_repetition_time: - time_series_axis.set_xlabel("time (frame #)") + time_series_axis.set_xlabel('time (frame #)') else: - time_series_axis.set_xlabel("time (s)") + time_series_axis.set_xlabel('time (s)') labels = TR * np.array(xticks) - time_series_axis.set_xticklabels([f"{t:.02f}" for t in labels.tolist()]) + time_series_axis.set_xticklabels([f'{t:.02f}' for t in labels.tolist()]) else: time_series_axis.set_xticklabels([]) if name is not None: if units is not None: - name += f" [{units}]" + name += f' [{units}]' # Formatting time_series_axis.annotate( name, xy=(0.0, 0.7), xytext=(0, 0), - xycoords="axes fraction", - textcoords="offset points", - va="center", - ha="left", + xycoords='axes fraction', + textcoords='offset points', + va='center', + ha='left', color=color, size=16, bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, + 'boxstyle': 'round', + 'fc': 'w', + 'ec': 'none', + 'color': 'none', + 'lw': 0, + 'alpha': 0.8, }, ) - for side in ["top", "right"]: - time_series_axis.spines[side].set_color("none") + for side in ['top', 'right']: + time_series_axis.spines[side].set_color('none') time_series_axis.spines[side].set_visible(False) if not hide_x: - time_series_axis.spines["bottom"].set_position(("outward", 20)) - time_series_axis.xaxis.set_ticks_position("bottom") + time_series_axis.spines['bottom'].set_position(('outward', 20)) + time_series_axis.xaxis.set_ticks_position('bottom') else: - time_series_axis.spines["bottom"].set_color("none") - time_series_axis.spines["bottom"].set_visible(False) + time_series_axis.spines['bottom'].set_color('none') + time_series_axis.spines['bottom'].set_visible(False) - time_series_axis.spines["left"].set_color("none") - time_series_axis.spines["left"].set_visible(False) + time_series_axis.spines['left'].set_color('none') + time_series_axis.spines['left'].set_visible(False) time_series_axis.set_yticks([]) time_series_axis.set_yticklabels([]) @@ -183,39 +184,39 @@ def plot_confounds( p95 = 0 stats_label = ( - r"max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} " - r"$\bullet$ $\sigma$: {sigma:.3f}" - ).format(max=maxv, mean=mean, units=units or "", sigma=stdv) + r'max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} ' + r'$\bullet$ $\sigma$: {sigma:.3f}' + ).format(max=maxv, mean=mean, units=units or '', sigma=stdv) time_series_axis.annotate( stats_label, xy=(0.98, 0.7), - xycoords="axes fraction", + xycoords='axes fraction', xytext=(0, 0), - textcoords="offset points", - va="center", - ha="right", + textcoords='offset points', + va='center', + ha='right', color=color, size=14, bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, + 'boxstyle': 'round', + 'fc': 'w', + 'ec': 'none', + 'color': 'none', + 'lw': 0, + 'alpha': 0.8, }, ) # Annotate percentile 95 - time_series_axis.plot((0, ntsteps - 1), [p95] * 2, linewidth=0.1, color="lightgray") + time_series_axis.plot((0, ntsteps - 1), [p95] * 2, linewidth=0.1, color='lightgray') time_series_axis.annotate( - f"{p95:.2f}", + f'{p95:.2f}', xy=(0, p95), xytext=(-1, 0), - textcoords="offset points", - va="center", - ha="right", - color="lightgray", + textcoords='offset points', + va='center', + ha='right', + color='lightgray', size=3, ) @@ -223,16 +224,16 @@ def plot_confounds( cutoff = [] for threshold in enumerate(cutoff): - time_series_axis.plot((0, ntsteps - 1), [threshold] * 2, linewidth=0.2, color="dimgray") + time_series_axis.plot((0, ntsteps - 1), [threshold] * 2, linewidth=0.2, color='dimgray') time_series_axis.annotate( - f"{threshold:.2f}", + f'{threshold:.2f}', xy=(0, threshold), xytext=(-1, 0), - textcoords="offset points", - va="center", - ha="right", - color="dimgray", + textcoords='offset points', + va='center', + ha='right', + color='dimgray', size=3, ) @@ -243,7 +244,7 @@ def plot_confounds( if gs_dist is not None: ax_dist = plt.subplot(gs_dist) sns.distplot(time_series, vertical=True, ax=ax_dist) - ax_dist.set_xlabel("Timesteps") + ax_dist.set_xlabel('Timesteps') ax_dist.set_ylim(time_series_axis.get_ylim()) ax_dist.set_yticklabels([]) @@ -265,15 +266,15 @@ def plot_dvars_es(time_series, ax, run_index=None): ax.set_xticklabels([]) # Set y-axis labels - ax.set_ylabel("DVARS") + ax.set_ylabel('DVARS') columns = time_series.columns maximum_values = [] minimum_values = [] colors = { - "Pre regression": "#68AC57", - "Post all": "#EF8532", + 'Pre regression': '#68AC57', + 'Post all': '#EF8532', } for c in columns: color = colors[c] @@ -283,7 +284,7 @@ def plot_dvars_es(time_series, ax, run_index=None): if run_index is not None: for run_location in run_index: - ax.axvline(run_location, color="black", linestyle="--") + ax.axvline(run_location, color='black', linestyle='--') # Set limits and format minimum_x_value = [abs(x) for x in minimum_values] @@ -298,7 +299,7 @@ def plot_dvars_es(time_series, ax, run_index=None): ): item.set_fontsize(30) - for axis in ["top", "bottom", "left", "right"]: + for axis in ['top', 'bottom', 'left', 'right']: ax.spines[axis].set_linewidth(4) sns.despine() @@ -318,31 +319,31 @@ def plot_global_signal_es(time_series, ax, run_index=None): ax.set_xticklabels([]) # Set y-axis labels - ax.set_ylabel("WB") + ax.set_ylabel('WB') # Plot the whole brain mean and std. # Mean scale on the left, std scale on the right. mean_line = ax.plot( - time_series["Mean"], - label="Mean", + time_series['Mean'], + label='Mean', linewidth=2, alpha=1, - color="#D1352B", + color='#D1352B', ) ax_right = ax.twinx() - ax_right.set_ylabel("Standard Deviation") + ax_right.set_ylabel('Standard Deviation') std_line = ax_right.plot( - time_series["Std"], - label="Std", + time_series['Std'], + label='Std', linewidth=2, alpha=1, - color="#497DB3", + color='#497DB3', ) - std_mean = np.mean(time_series["Std"]) + std_mean = np.mean(time_series['Std']) ax_right.set_ylim( - (1.5 * np.min(time_series["Std"] - std_mean)) + std_mean, - (1.5 * np.max(time_series["Std"] - std_mean)) + std_mean, + (1.5 * np.min(time_series['Std'] - std_mean)) + std_mean, + (1.5 * np.max(time_series['Std'] - std_mean)) + std_mean, ) ax_right.yaxis.label.set_fontsize(30) for item in ax_right.get_yticklabels(): @@ -354,14 +355,14 @@ def plot_global_signal_es(time_series, ax, run_index=None): if run_index is not None: for run_location in run_index: - ax.axvline(run_location, color="black", linestyle="--") + ax.axvline(run_location, color='black', linestyle='--') ax.set_xlim((0, ntsteps - 1)) - mean_mean = np.mean(time_series["Mean"]) + mean_mean = np.mean(time_series['Mean']) ax.set_ylim( - (1.5 * np.min(time_series["Mean"] - mean_mean)) + mean_mean, - (1.5 * np.max(time_series["Mean"] - mean_mean)) + mean_mean, + (1.5 * np.min(time_series['Mean'] - mean_mean)) + mean_mean, + (1.5 * np.max(time_series['Mean'] - mean_mean)) + mean_mean, ) for item in ( @@ -369,7 +370,7 @@ def plot_global_signal_es(time_series, ax, run_index=None): ): item.set_fontsize(30) - for axis in ["top", "bottom", "left", "right"]: + for axis in ['top', 'bottom', 'left', 'right']: ax.spines[axis].set_linewidth(4) sns.despine() @@ -384,7 +385,7 @@ def plot_framewise_displacement_es( ): """Create framewise displacement plot for the executive summary.""" ntsteps = time_series.shape[0] - ax.grid(axis="y") + ax.grid(axis='y') # Set 10 frame markers in X axis interval = max((ntsteps // 10, ntsteps // 5, 1)) @@ -392,14 +393,14 @@ def plot_framewise_displacement_es( ax.set_xticks(xticks) # Set the x-axis labels based on time, not index - ax.set_xlabel("Time (s)") + ax.set_xlabel('Time (s)') labels = TR * np.array(xticks) labels = labels.astype(int) ax.set_xticklabels(labels) # Set y-axis labels - ax.set_ylabel("FD (mm)") - ax.plot(time_series, label="FD", linewidth=3, color="black") + ax.set_ylabel('FD (mm)') + ax.plot(time_series, label='FD', linewidth=3, color='black') # Threshold fd at 0.1, 0.2 and 0.5 and plot # Plot zero line @@ -411,25 +412,25 @@ def plot_framewise_displacement_es( fd_dots[fd_dots < 0] = np.nan THRESHOLDS = [0.05, 0.1, 0.2, 0.5] - COLORS = ["#969696", "#377C21", "#EF8532", "#EB392A"] + COLORS = ['#969696', '#377C21', '#EF8532', '#EB392A'] for i_thresh, threshold in enumerate(THRESHOLDS): color = COLORS[i_thresh] ax.axhline( y=threshold, color=color, - linestyle="-", + linestyle='-', linewidth=3, alpha=1, ) fd_dots[fd_dots < threshold] = np.nan - ax.plot(fd_dots, ".", color=color, markersize=10) + ax.plot(fd_dots, '.', color=color, markersize=10) fd_line = time_series.copy() fd_line[fd_line >= threshold] = top_line fd_line[fd_line < threshold] = np.nan - ax.plot(fd_line, ".", color=color, markersize=10) + ax.plot(fd_line, '.', color=color, markersize=10) # Plot the good volumes, i.e: thresholded at 0.1, 0.2, 0.5 good_vols = len(time_series[time_series < threshold]) @@ -438,8 +439,8 @@ def plot_framewise_displacement_es( threshold / ymax, good_vols, c=color, - verticalalignment="center", - horizontalalignment="left", + verticalalignment='center', + horizontalalignment='left', transform=ax.transAxes, fontsize=20, ) @@ -449,9 +450,9 @@ def plot_framewise_displacement_es( 1.01, top_line / ymax, time_series.size, - c="black", - verticalalignment="center", - horizontalalignment="left", + c='black', + verticalalignment='center', + horizontalalignment='left', transform=ax.transAxes, fontsize=20, ) @@ -459,7 +460,7 @@ def plot_framewise_displacement_es( if run_index is not None: # FD plots use time series index, not time, as x-axis for run_location in run_index: - ax.axvline(run_location, color="black", linestyle="--") + ax.axvline(run_location, color='black', linestyle='--') ax.set_xlim((0, ntsteps - 1)) ax.set_ylim(0, ymax) @@ -470,7 +471,7 @@ def plot_framewise_displacement_es( ): item.set_fontsize(30) - for axis in ["top", "bottom", "left", "right"]: + for axis in ['top', 'bottom', 'left', 'right']: ax.spines[axis].set_linewidth(4) sns.despine() @@ -535,27 +536,27 @@ def plot_fmri_es( if preprocessed_arr.shape != denoised_interpolated_arr.shape: raise ValueError( - "Shapes do not match:\n" - f"\t{preprocessed_bold}: {preprocessed_arr.shape}\n" - f"\t{denoised_interpolated_bold}: {denoised_interpolated_arr.shape}\n\n" + 'Shapes do not match:\n' + f'\t{preprocessed_bold}: {preprocessed_arr.shape}\n' + f'\t{denoised_interpolated_bold}: {denoised_interpolated_arr.shape}\n\n' ) # Create dataframes for the bold_data DVARS, FD dvars_regressors = pd.DataFrame( { - "Pre regression": preprocessed_dvars, - "Post all": denoised_interpolated_dvars, + 'Pre regression': preprocessed_dvars, + 'Post all': denoised_interpolated_dvars, } ) motion_df = pd.read_table(motion_file) - if "framewise_displacement_filtered" in motion_df.columns: - fd_regressor = motion_df["framewise_displacement_filtered"].values + if 'framewise_displacement_filtered' in motion_df.columns: + fd_regressor = motion_df['framewise_displacement_filtered'].values else: - fd_regressor = motion_df["framewise_displacement"].values + fd_regressor = motion_df['framewise_displacement'].values if temporal_mask: - tmask_arr = pd.read_table(temporal_mask)["framewise_displacement"].values.astype(bool) + tmask_arr = pd.read_table(temporal_mask)['framewise_displacement'].values.astype(bool) else: tmask_arr = np.zeros(fd_regressor.shape, dtype=bool) @@ -563,16 +564,16 @@ def plot_fmri_es( # after mean-centering and detrending. preprocessed_timeseries = pd.DataFrame( { - "Mean": np.nanmean(preprocessed_arr, axis=0), - "Std": np.nanstd(preprocessed_arr, axis=0), + 'Mean': np.nanmean(preprocessed_arr, axis=0), + 'Std': np.nanstd(preprocessed_arr, axis=0), } ) # The mean and standard deviation of the denoised data, with bad volumes included. denoised_interpolated_timeseries = pd.DataFrame( { - "Mean": np.nanmean(denoised_interpolated_arr, axis=0), - "Std": np.nanstd(denoised_interpolated_arr, axis=0), + 'Mean': np.nanmean(denoised_interpolated_arr, axis=0), + 'Std': np.nanstd(denoised_interpolated_arr, axis=0), } ) @@ -595,10 +596,10 @@ def plot_fmri_es( # Make a temporary file for niftis and ciftis rm_temp_file = True - if preprocessed_bold.endswith(".nii.gz"): - temp_preprocessed_file = os.path.join(temporary_file_dir, "filex_raw.nii.gz") + if preprocessed_bold.endswith('.nii.gz'): + temp_preprocessed_file = os.path.join(temporary_file_dir, 'filex_raw.nii.gz') else: - temp_preprocessed_file = os.path.join(temporary_file_dir, "filex_raw.dtseries.nii") + temp_preprocessed_file = os.path.join(temporary_file_dir, 'filex_raw.dtseries.nii') # Write out the scaled data temp_preprocessed_file = write_ndata( @@ -678,7 +679,7 @@ def plot_fmri_es( plot_framewise_displacement_es(fd_regressor, ax3, TR=TR, run_index=run_index) # Save out the before processing file - fig.savefig(figure_name, bbox_inches="tight", pad_inches=None, dpi=300) + fig.savefig(figure_name, bbox_inches='tight', pad_inches=None, dpi=300) plt.close(fig) # Remove temporary files @@ -707,7 +708,7 @@ class FMRIPlot: spikes_files """ - __slots__ = ("func_file", "mask_data", "TR", "seg_data", "confounds", "spikes") + __slots__ = ('func_file', 'mask_data', 'TR', 'seg_data', 'confounds', 'spikes') def __init__( self, @@ -732,7 +733,7 @@ def __init__( if not isinstance(func_img, nb.Cifti2Image): # If Nifti self.mask_data = nb.fileslice.strided_scalar(func_img.shape[:3], np.uint8(1)) if mask_file: - self.mask_data = np.asanyarray(nb.load(mask_file).dataobj).astype("uint8") + self.mask_data = np.asanyarray(nb.load(mask_file).dataobj).astype('uint8') if seg_file: self.seg_data = np.asanyarray(nb.load(seg_file).dataobj) @@ -742,14 +743,14 @@ def __init__( vlines = {} self.confounds = {} if data is None and confound_file: - data = pd.read_csv(confound_file, sep=r"[\t\s]+", usecols=usecols, index_col=False) + data = pd.read_csv(confound_file, sep=r'[\t\s]+', usecols=usecols, index_col=False) # Confounds information if data is not None: for name in data.columns.ravel(): self.confounds[name] = { - "values": data[[name]].values.ravel().tolist(), - "units": units.get(name), - "cutoff": vlines.get(name), + 'values': data[[name]].values.ravel().tolist(), + 'units': units.get(name), + 'cutoff': vlines.get(name), } # Spike information self.spikes = [] @@ -760,7 +761,7 @@ def __init__( def plot(self, labelsize, figure=None): """Perform main plotting step.""" # Layout settings - sns.set_context("paper", font_scale=1) + sns.set_context('paper', font_scale=1) if figure is None: figure = plt.gcf() @@ -775,7 +776,7 @@ def plot(self, labelsize, figure=None): ) grid_id = 0 - for _, name, _ in self.spikes: + for _, _, _ in self.spikes: # RF: What is this? # spikesplot(tsz, # title=name, @@ -788,10 +789,10 @@ def plot(self, labelsize, figure=None): if self.confounds: from seaborn import color_palette - palette = color_palette("husl", n_confounds) + palette = color_palette('husl', n_confounds) for i, (name, kwargs) in enumerate(self.confounds.items()): - time_series = kwargs.pop("values") + time_series = kwargs.pop('values') plot_confounds( time_series, grid[grid_id], TR=self.TR, color=palette[i], name=name, **kwargs ) @@ -866,33 +867,33 @@ def plot_carpet( if isinstance(img, nb.Cifti2Image): # CIFTI assert ( - img.nifti_header.get_intent()[0] == "ConnDenseSeries" - ), f"Not a dense timeseries: {img.nifti_header.get_intent()[0]}, {func}" + img.nifti_header.get_intent()[0] == 'ConnDenseSeries' + ), f'Not a dense timeseries: {img.nifti_header.get_intent()[0]}, {func}' # Get required information data = img.get_fdata().T matrix = img.header.matrix struct_map = { - "LEFT_CORTEX": 1, - "RIGHT_CORTEX": 2, - "SUBCORTICAL": 3, - "CEREBELLUM": 4, + 'LEFT_CORTEX': 1, + 'RIGHT_CORTEX': 2, + 'SUBCORTICAL': 3, + 'CEREBELLUM': 4, } - seg_data = np.zeros((data.shape[0],), dtype="uint32") + seg_data = np.zeros((data.shape[0],), dtype='uint32') # Get brain model information for brain_model in matrix.get_index_map(1).brain_models: - if "CORTEX" in brain_model.brain_structure: - lidx = (1, 2)["RIGHT" in brain_model.brain_structure] - elif "CEREBELLUM" in brain_model.brain_structure: + if 'CORTEX' in brain_model.brain_structure: + lidx = (1, 2)['RIGHT' in brain_model.brain_structure] + elif 'CEREBELLUM' in brain_model.brain_structure: lidx = 4 else: lidx = 3 index_final = brain_model.index_offset + brain_model.index_count seg_data[brain_model.index_offset : index_final] = lidx - assert len(seg_data[seg_data < 1]) == 0, "Unassigned labels" + assert len(seg_data[seg_data < 1]) == 0, 'Unassigned labels' else: # Volumetric NIfTI - img_nii = check_niimg_4d(img, dtype="auto") # Check the image is in nifti format + img_nii = check_niimg_4d(img, dtype='auto') # Check the image is in nifti format func_data = safe_get_data(img_nii, ensure_finite=True) ntsteps = func_data.shape[-1] data = func_data[atlaslabels > 0].reshape(-1, ntsteps) @@ -900,7 +901,7 @@ def plot_carpet( # Map segmentation if lut is None: - lut = np.zeros((256,), dtype="int") + lut = np.zeros((256,), dtype='int') lut[1:11] = 1 lut[255] = 2 lut[30:99] = 3 @@ -913,22 +914,22 @@ def plot_carpet( if isinstance(img, nb.Cifti2Image): # Preserve continuity - order = seg_data.argsort(kind="stable") + order = seg_data.argsort(kind='stable') # Get color maps - cmap = ListedColormap([plt.get_cmap("Paired").colors[i] for i in (1, 0, 7, 3)]) + cmap = ListedColormap([plt.get_cmap('Paired').colors[i] for i in (1, 0, 7, 3)]) assert len(cmap.colors) == len( struct_map - ), "Mismatch between expected # of structures and colors" + ), 'Mismatch between expected # of structures and colors' else: # Order following segmentation labels order = np.argsort(seg_data)[::-1] # Set colormap - cmap = ListedColormap(plt.get_cmap("tab10").colors[:4][::-1]) + cmap = ListedColormap(plt.get_cmap('tab10').colors[:4][::-1]) # Detrend and z-score data if standardize: # This does not account for the temporal mask. - data = clean(data.T, t_r=TR, detrend=True, filter=False, standardize="zscore_sample").T + data = clean(data.T, t_r=TR, detrend=True, filter=False, standardize='zscore_sample').T vlimits = (-2, 2) elif temporal_mask is not None: # If standardize is False and a temporal mask is provided, @@ -972,12 +973,12 @@ def plot_carpet( # Segmentation colorbar ax0.set_xticks([]) - ax0.imshow(seg_data[order, np.newaxis], interpolation="none", aspect="auto", cmap=cmap) + ax0.imshow(seg_data[order, np.newaxis], interpolation='none', aspect='auto', cmap=cmap) - if func.endswith("nii.gz"): # Nifti - labels = ["Cortical GM", "Subcortical GM", "Cerebellum", "CSF and WM"] + if func.endswith('nii.gz'): # Nifti + labels = ['Cortical GM', 'Subcortical GM', 'Cerebellum', 'CSF and WM'] else: # Cifti - labels = ["Left Cortex", "Right Cortex", "Subcortical", "Cerebellum"] + labels = ['Left Cortex', 'Right Cortex', 'Subcortical', 'Cerebellum'] # Formatting the plot tick_locs = [] @@ -985,20 +986,20 @@ def plot_carpet( tick_locs.append(np.argwhere(seg_data[order] == y).mean()) ax0.set_yticks(tick_locs) - ax0.set_yticklabels(labels, fontdict={"fontsize": labelsize}, rotation=0, va="center") + ax0.set_yticklabels(labels, fontdict={'fontsize': labelsize}, rotation=0, va='center') ax0.grid(False) - ax0.spines["left"].set_visible(False) - ax0.spines["bottom"].set_color("none") - ax0.spines["bottom"].set_visible(False) + ax0.spines['left'].set_visible(False) + ax0.spines['bottom'].set_color('none') + ax0.spines['bottom'].set_visible(False) ax0.set_xticks([]) ax0.set_xticklabels([]) # Carpet plot pos = ax1.imshow( data[order], - interpolation="nearest", - aspect="auto", - cmap="gray", + interpolation='nearest', + aspect='auto', + cmap='gray', vmin=vlimits[0], vmax=vlimits[1], ) @@ -1012,28 +1013,30 @@ def plot_carpet( # Add color bands to the carpet plot corresponding to censored volumes outlier_idx = list(np.where(temporal_mask)[0]) gaps = [ - [start, end] for start, end in zip(outlier_idx, outlier_idx[1:]) if start + 1 < end + [start, end] + for start, end in zip(outlier_idx, outlier_idx[1:], strict=False) + if start + 1 < end ] edges = iter(outlier_idx[:1] + sum(gaps, []) + outlier_idx[-1:]) - consecutive_outliers_idx = list(zip(edges, edges)) + consecutive_outliers_idx = list(zip(edges, edges, strict=False)) for band in consecutive_outliers_idx: start = band[0] - 0.5 end = band[1] + 0.5 - ax1.axvspan(start, end, color="red", alpha=0.5) + ax1.axvspan(start, end, color='red', alpha=0.5) # Remove and redefine spines - for side in ["top", "right"]: + for side in ['top', 'right']: # Toggle the spine objects - ax0.spines[side].set_color("none") + ax0.spines[side].set_color('none') ax0.spines[side].set_visible(False) - ax1.spines[side].set_color("none") + ax1.spines[side].set_color('none') ax1.spines[side].set_visible(False) - ax1.yaxis.set_ticks_position("left") - ax1.xaxis.set_ticks_position("bottom") - ax1.spines["bottom"].set_visible(False) - ax1.spines["left"].set_color("none") - ax1.spines["left"].set_visible(False) + ax1.yaxis.set_ticks_position('left') + ax1.xaxis.set_ticks_position('bottom') + ax1.spines['bottom'].set_visible(False) + ax1.spines['left'].set_color('none') + ax1.spines['left'].set_visible(False) # Use the last axis for a colorbar if colorbar: @@ -1046,7 +1049,7 @@ def plot_carpet( # Write out file if output_file is not None: figure = plt.gcf() - figure.savefig(output_file, bbox_inches="tight") + figure.savefig(output_file, bbox_inches='tight') plt.close(figure) figure = None return output_file @@ -1060,7 +1063,7 @@ def surf_data_from_cifti(data, axis, surf_name): https://nbviewer.org/github/neurohackademy/nh2020-curriculum/blob/master/\ we-nibabel-markiewicz/NiBabel.ipynb """ - assert isinstance(axis, (nb.cifti2.BrainModelAxis, nb.cifti2.ParcelsAxis)) + assert isinstance(axis, nb.cifti2.BrainModelAxis | nb.cifti2.ParcelsAxis) if isinstance(axis, nb.cifti2.BrainModelAxis): for name, data_indices, model in axis.iter_structures(): # Iterates over volumetric and surface structures @@ -1075,8 +1078,8 @@ def surf_data_from_cifti(data, axis, surf_name): else: if surf_name not in axis.nvertices: raise ValueError( - f"No structure named {surf_name}.\n\n" - f"Available structures are {list(axis.name.keys())}" + f'No structure named {surf_name}.\n\n' + f'Available structures are {list(axis.name.keys())}' ) nvertices = axis.nvertices[surf_name] surf_data = np.zeros(nvertices) @@ -1088,7 +1091,7 @@ def surf_data_from_cifti(data, axis, surf_name): return surf_data - raise ValueError(f"No structure named {surf_name}") + raise ValueError(f'No structure named {surf_name}') def plot_design_matrix(design_matrix, temporal_mask=None): @@ -1117,12 +1120,12 @@ def plot_design_matrix(design_matrix, temporal_mask=None): design_matrix_df = pd.read_table(design_matrix) if temporal_mask: censoring_df = pd.read_table(temporal_mask) - n_motion_outliers = censoring_df["framewise_displacement"].sum() + n_motion_outliers = censoring_df['framewise_displacement'].sum() motion_outliers_df = pd.DataFrame( data=np.zeros((censoring_df.shape[0], n_motion_outliers), dtype=np.int16), - columns=[f"outlier{i}" for i in range(1, n_motion_outliers + 1)], + columns=[f'outlier{i}' for i in range(1, n_motion_outliers + 1)], ) - motion_outlier_idx = np.where(censoring_df["framewise_displacement"])[0] + motion_outlier_idx = np.where(censoring_df['framewise_displacement'])[0] for i_outlier, outlier_col in enumerate(motion_outliers_df.columns): outlier_row = motion_outlier_idx[i_outlier] motion_outliers_df.loc[outlier_row, outlier_col] = 1 @@ -1132,7 +1135,7 @@ def plot_design_matrix(design_matrix, temporal_mask=None): axis=1, ) - design_matrix_figure = os.path.abspath("design_matrix.svg") + design_matrix_figure = os.path.abspath('design_matrix.svg') plotting.plot_design_matrix(design_matrix_df, output_file=design_matrix_figure) return design_matrix_figure diff --git a/xcp_d/utils/qcmetrics.py b/xcp_d/utils/qcmetrics.py index 32efe925a..b3e90089e 100644 --- a/xcp_d/utils/qcmetrics.py +++ b/xcp_d/utils/qcmetrics.py @@ -4,7 +4,7 @@ import numpy as np from nipype import logging -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def compute_registration_qc( @@ -48,71 +48,71 @@ def compute_registration_qc( template_mask_arr = nb.load(template_mask).get_fdata() reg_qc = { - "coreg_dice": [dice(bold_mask_anatspace_arr, anat_mask_anatspace_arr)], - "coreg_correlation": [pearson(bold_mask_anatspace_arr, anat_mask_anatspace_arr)], - "coreg_overlap": [overlap(bold_mask_anatspace_arr, anat_mask_anatspace_arr)], - "norm_dice": [dice(bold_mask_stdspace_arr, template_mask_arr)], - "norm_correlation": [pearson(bold_mask_stdspace_arr, template_mask_arr)], - "norm_overlap": [overlap(bold_mask_stdspace_arr, template_mask_arr)], + 'coreg_dice': [dice(bold_mask_anatspace_arr, anat_mask_anatspace_arr)], + 'coreg_correlation': [pearson(bold_mask_anatspace_arr, anat_mask_anatspace_arr)], + 'coreg_overlap': [overlap(bold_mask_anatspace_arr, anat_mask_anatspace_arr)], + 'norm_dice': [dice(bold_mask_stdspace_arr, template_mask_arr)], + 'norm_correlation': [pearson(bold_mask_stdspace_arr, template_mask_arr)], + 'norm_overlap': [overlap(bold_mask_stdspace_arr, template_mask_arr)], } qc_metadata = { - "coreg_dice": { - "LongName": "Coregistration Sørensen-Dice Coefficient", - "Description": ( - "The Sørensen-Dice coefficient calculated between the binary brain masks from the " - "coregistered anatomical and functional images. " - "Values are bounded between 0 and 1, " - "with higher values indicating better coregistration." + 'coreg_dice': { + 'LongName': 'Coregistration Sørensen-Dice Coefficient', + 'Description': ( + 'The Sørensen-Dice coefficient calculated between the binary brain masks from the ' + 'coregistered anatomical and functional images. ' + 'Values are bounded between 0 and 1, ' + 'with higher values indicating better coregistration.' ), - "Term URL": "https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient', }, - "coreg_correlation": { - "LongName": "Coregistration Pearson Correlation", - "Description": ( - "The Pearson correlation coefficient calculated between the binary brain masks " - "from the coregistered anatomical and functional images. " - "Values are bounded between 0 and 1, " - "with higher values indicating better coregistration." + 'coreg_correlation': { + 'LongName': 'Coregistration Pearson Correlation', + 'Description': ( + 'The Pearson correlation coefficient calculated between the binary brain masks ' + 'from the coregistered anatomical and functional images. ' + 'Values are bounded between 0 and 1, ' + 'with higher values indicating better coregistration.' ), - "Term URL": "https://en.wikipedia.org/wiki/Pearson_correlation_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/Pearson_correlation_coefficient', }, - "coreg_overlap": { - "LongName": "Coregistration Coverage Metric", - "Description": ( - "The Szymkiewicz-Simpson overlap coefficient calculated between the binary brain " - "masks from the normalized functional image and the associated template. " - "Higher values indicate better normalization." + 'coreg_overlap': { + 'LongName': 'Coregistration Coverage Metric', + 'Description': ( + 'The Szymkiewicz-Simpson overlap coefficient calculated between the binary brain ' + 'masks from the normalized functional image and the associated template. ' + 'Higher values indicate better normalization.' ), - "Term URL": "https://en.wikipedia.org/wiki/Overlap_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/Overlap_coefficient', }, - "norm_dice": { - "LongName": "Normalization Sørensen-Dice Coefficient", - "Description": ( - "The Sørensen-Dice coefficient calculated between the binary brain masks from the " - "normalized functional image and the associated template. " - "Values are bounded between 0 and 1, " - "with higher values indicating better normalization." + 'norm_dice': { + 'LongName': 'Normalization Sørensen-Dice Coefficient', + 'Description': ( + 'The Sørensen-Dice coefficient calculated between the binary brain masks from the ' + 'normalized functional image and the associated template. ' + 'Values are bounded between 0 and 1, ' + 'with higher values indicating better normalization.' ), - "Term URL": "https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient', }, - "norm_correlation": { - "LongName": "Normalization Pearson Correlation", - "Description": ( - "The Pearson correlation coefficient calculated between the binary brain masks " - "from the normalized functional image and the associated template. " - "Values are bounded between 0 and 1, " - "with higher values indicating better normalization." + 'norm_correlation': { + 'LongName': 'Normalization Pearson Correlation', + 'Description': ( + 'The Pearson correlation coefficient calculated between the binary brain masks ' + 'from the normalized functional image and the associated template. ' + 'Values are bounded between 0 and 1, ' + 'with higher values indicating better normalization.' ), - "Term URL": "https://en.wikipedia.org/wiki/Pearson_correlation_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/Pearson_correlation_coefficient', }, - "norm_overlap": { - "LongName": "Normalization Overlap Coefficient", - "Description": ( - "The Szymkiewicz-Simpson overlap coefficient calculated between the binary brain " - "masks from the normalized functional image and the associated template. " - "Higher values indicate better normalization." + 'norm_overlap': { + 'LongName': 'Normalization Overlap Coefficient', + 'Description': ( + 'The Szymkiewicz-Simpson overlap coefficient calculated between the binary brain ' + 'masks from the normalized functional image and the associated template. ' + 'Higher values indicate better normalization.' ), - "Term URL": "https://en.wikipedia.org/wiki/Overlap_coefficient", + 'Term URL': 'https://en.wikipedia.org/wiki/Overlap_coefficient', }, } return reg_qc, qc_metadata @@ -254,13 +254,13 @@ def compute_dvars( # Robust standard deviation (we are using "lower" interpolation because this is what FSL does try: func_sd = ( - np.percentile(datat, 75, axis=1, method="lower") - - np.percentile(datat, 25, axis=1, method="lower") + np.percentile(datat, 75, axis=1, method='lower') + - np.percentile(datat, 25, axis=1, method='lower') ) / 1.349 except TypeError: # NP < 1.22 func_sd = ( - np.percentile(datat, 75, axis=1, interpolation="lower") - - np.percentile(datat, 25, axis=1, interpolation="lower") + np.percentile(datat, 75, axis=1, interpolation='lower') + - np.percentile(datat, 25, axis=1, interpolation='lower') ) / 1.349 if remove_zerovariance: diff --git a/xcp_d/utils/restingstate.py b/xcp_d/utils/restingstate.py index da58353f9..c0033e369 100644 --- a/xcp_d/utils/restingstate.py +++ b/xcp_d/utils/restingstate.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Functions for calculating resting-state derivatives (ReHo and ALFF).""" + import nibabel as nb import numpy as np from nipype import logging @@ -8,7 +9,7 @@ from scipy.stats import rankdata from templateflow.api import get as get_template -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def compute_2d_reho(datat, adjacency_matrix): @@ -79,12 +80,12 @@ def mesh_adjacency(hemi): ----- Modified by Taylor Salo to loop over all vertices in faces. """ - surf = str(get_template("fsLR", space=None, hemi=hemi, suffix="sphere", density="32k")) + surf = str(get_template('fsLR', space=None, hemi=hemi, suffix='sphere', density='32k')) surf = nb.load(surf) # load via nibabel # Aggregate GIFTI data arrays into an ndarray or tuple of ndarray select the arrays in a # specific order - vertices_faces = surf.agg_data(("pointset", "triangle")) + vertices_faces = surf.agg_data(('pointset', 'triangle')) vertices = vertices_faces[0] faces = vertices_faces[1] n_vertices = vertices.shape[0] @@ -171,7 +172,7 @@ def compute_alff(*, data_matrix, low_pass, high_pass, TR, sample_mask): voxel_data_censored /= np.std(voxel_data_censored) time_arr = np.arange(n_volumes) * TR - assert sample_mask.size == time_arr.size, f"{sample_mask.size} != {time_arr.size}" + assert sample_mask.size == time_arr.size, f'{sample_mask.size} != {time_arr.size}' time_arr = time_arr[sample_mask] frequencies_hz = np.linspace(0, 0.5 * fs, (n_volumes // 2) + 1)[1:] angular_frequencies = 2 * np.pi * frequencies_hz @@ -188,7 +189,7 @@ def compute_alff(*, data_matrix, low_pass, high_pass, TR, sample_mask): frequencies_hz, power_spectrum = signal.periodogram( voxel_data, fs, - scaling="spectrum", + scaling='spectrum', ) # square root of power spectrum @@ -213,5 +214,5 @@ def compute_alff(*, data_matrix, low_pass, high_pass, TR, sample_mask): # Rescale ALFF based on original BOLD scale alff[i_voxel] *= sd_scale - assert alff.size == n_voxels, f"{alff.shape} != {n_voxels}" + assert alff.size == n_voxels, f'{alff.shape} != {n_voxels}' return alff diff --git a/xcp_d/utils/sentry.py b/xcp_d/utils/sentry.py index 9381c892e..94fce1794 100644 --- a/xcp_d/utils/sentry.py +++ b/xcp_d/utils/sentry.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Stripped out routines for Sentry.""" + import os import re @@ -12,42 +13,42 @@ CHUNK_SIZE = 16384 # Group common events with pre specified fingerprints KNOWN_ERRORS = { - "permission-denied": ["PermissionError: [Errno 13] Permission denied"], - "memory-error": [ - "MemoryError", - "Cannot allocate memory", - "Return code: 134", + 'permission-denied': ['PermissionError: [Errno 13] Permission denied'], + 'memory-error': [ + 'MemoryError', + 'Cannot allocate memory', + 'Return code: 134', ], - "reconall-already-running": ["ERROR: it appears that recon-all is already running"], - "no-disk-space": ["[Errno 28] No space left on device", "[Errno 122] Disk quota exceeded"], - "segfault": [ - "Segmentation Fault", - "Segfault", - "Return code: 139", + 'reconall-already-running': ['ERROR: it appears that recon-all is already running'], + 'no-disk-space': ['[Errno 28] No space left on device', '[Errno 122] Disk quota exceeded'], + 'segfault': [ + 'Segmentation Fault', + 'Segfault', + 'Return code: 139', ], - "potential-race-condition": [ - "[Errno 39] Directory not empty", - "_unfinished.json", + 'potential-race-condition': [ + '[Errno 39] Directory not empty', + '_unfinished.json', ], - "keyboard-interrupt": [ - "KeyboardInterrupt", + 'keyboard-interrupt': [ + 'KeyboardInterrupt', ], } def sentry_setup(): """Set up sentry.""" - release = config.environment.version or "dev" + release = config.environment.version or 'dev' environment = ( - "dev" + 'dev' if ( - os.getenv("XCP-D_DEV", "").lower in ("1", "on", "yes", "y", "true") or ("+" in release) + os.getenv('XCP-D_DEV', '').lower in ('1', 'on', 'yes', 'y', 'true') or ('+' in release) ) - else "prod" + else 'prod' ) sentry_sdk.init( - "https://729b52a70da149da97c69af55eebc4eb@o317280.ingest.sentry.io/5645951", + 'https://729b52a70da149da97c69af55eebc4eb@o317280.ingest.sentry.io/5645951', release=release, environment=environment, before_send=before_send, @@ -61,14 +62,14 @@ def process_crashfile(crashfile): """Parse the contents of a crashfile and submit sentry messages.""" crash_info = read_crashfile(str(crashfile)) with sentry_sdk.push_scope() as scope: - scope.level = "fatal" + scope.level = 'fatal' # Extract node name - node_name = crash_info.pop("node").split(".")[-1] - scope.set_tag("node_name", node_name) + node_name = crash_info.pop('node').split('.')[-1] + scope.set_tag('node_name', node_name) # Massage the traceback, extract the gist - traceback = crash_info.pop("traceback") + traceback = crash_info.pop('traceback') # last line is probably most informative summary gist = traceback.splitlines()[-1] exception_text_start = 1 @@ -77,12 +78,12 @@ def process_crashfile(crashfile): break exception_text_start += 1 - exception_text = "\n".join(traceback.splitlines()[exception_text_start:]) + exception_text = '\n'.join(traceback.splitlines()[exception_text_start:]) # Extract inputs, if present - inputs = crash_info.pop("inputs", None) + inputs = crash_info.pop('inputs', None) if inputs: - scope.set_extra("inputs", dict(inputs)) + scope.set_extra('inputs', dict(inputs)) # Extract any other possible metadata in the crash file for k, v in crash_info.items(): @@ -91,10 +92,10 @@ def process_crashfile(crashfile): scope.set_extra(k, strv[0]) else: for i, chunk in enumerate(strv): - scope.set_extra("%s_%02d" % (k, i), chunk) # noqa:FS001 + scope.set_extra('%s_%02d' % (k, i), chunk) # noqa:FS001 - fingerprint = "" - issue_title = f"{node_name}: {gist}" + fingerprint = '' + issue_title = f'{node_name}: {gist}' for new_fingerprint, error_snippets in KNOWN_ERRORS.items(): for error_snippet in error_snippets: if error_snippet in traceback: @@ -104,47 +105,47 @@ def process_crashfile(crashfile): if fingerprint: break - message = issue_title + "\n\n" + message = issue_title + '\n\n' message += exception_text[-(8192 - len(message)) :] if fingerprint: - sentry_sdk.add_breadcrumb(message=fingerprint, level="fatal") + sentry_sdk.add_breadcrumb(message=fingerprint, level='fatal') else: # remove file paths - fingerprint = re.sub(r"(/[^/ ]*)+/?", "", message) + fingerprint = re.sub(r'(/[^/ ]*)+/?', '', message) # remove words containing numbers - fingerprint = re.sub(r"([a-zA-Z]*[0-9]+[a-zA-Z]*)+", "", fingerprint) + fingerprint = re.sub(r'([a-zA-Z]*[0-9]+[a-zA-Z]*)+', '', fingerprint) # adding the return code if it exists for line in message.splitlines(): - if line.startswith("Return code"): + if line.startswith('Return code'): fingerprint += line break scope.fingerprint = [fingerprint] - sentry_sdk.capture_message(message, "fatal") + sentry_sdk.capture_message(message, 'fatal') def before_send(event, hints): # noqa:U100 """Filter log messages about crashed nodes.""" - if "logentry" in event and "message" in event["logentry"]: - msg = event["logentry"]["message"] - if msg.startswith("could not run node:"): + if 'logentry' in event and 'message' in event['logentry']: + msg = event['logentry']['message'] + if msg.startswith('could not run node:'): return None - if msg.startswith("Saving crash info to "): + if msg.startswith('Saving crash info to '): return None - if re.match("Node .+ failed to run on host .+", msg): + if re.match('Node .+ failed to run on host .+', msg): return None - if "breadcrumbs" in event and isinstance(event["breadcrumbs"], list): + if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list): fingerprints_to_propagate = [ - "no-disk-space", - "memory-error", - "permission-denied", - "keyboard-interrupt", + 'no-disk-space', + 'memory-error', + 'permission-denied', + 'keyboard-interrupt', ] - for bc in event["breadcrumbs"]: - msg = bc.get("message", "empty-msg") + for bc in event['breadcrumbs']: + msg = bc.get('message', 'empty-msg') if msg in fingerprints_to_propagate: - event["fingerprint"] = [msg] + event['fingerprint'] = [msg] break return event diff --git a/xcp_d/utils/utils.py b/xcp_d/utils/utils.py index 931368612..e82744141 100644 --- a/xcp_d/utils/utils.py +++ b/xcp_d/utils/utils.py @@ -6,7 +6,7 @@ from xcp_d.utils.doc import fill_doc -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def check_deps(workflow): @@ -16,7 +16,7 @@ def check_deps(workflow): return sorted( (node.interface.__class__.__name__, node.interface._cmd) for node in workflow._get_all_nodes() - if (hasattr(node.interface, "_cmd") and which(node.interface._cmd.split()[0]) is None) + if (hasattr(node.interface, '_cmd') and which(node.interface._cmd.split()[0]) is None) ) @@ -60,59 +60,59 @@ def get_bold2std_and_t1w_xfms(bold_file, template_to_anat_xfm): from xcp_d.utils.bids import get_entity # Extract the space of the BOLD file - bold_space = get_entity(bold_file, "space") + bold_space = get_entity(bold_file, 'space') - if bold_space in ("native", "T1w"): - base_std_space = get_entity(template_to_anat_xfm, "from") + if bold_space in ('native', 'T1w'): + base_std_space = get_entity(template_to_anat_xfm, 'from') raise ValueError(f"BOLD space '{bold_space}' not supported.") - elif f"from-{bold_space}" not in template_to_anat_xfm: + elif f'from-{bold_space}' not in template_to_anat_xfm: raise ValueError( - f"Transform does not match BOLD space: {bold_space} != {template_to_anat_xfm}" + f'Transform does not match BOLD space: {bold_space} != {template_to_anat_xfm}' ) # Pull out the correct transforms based on bold_file name and string them together. xforms_to_T1w = [template_to_anat_xfm] # used for all spaces except T1w and native xforms_to_T1w_invert = [False] - if bold_space == "MNI152NLin2009cAsym": + if bold_space == 'MNI152NLin2009cAsym': # Data already in MNI152NLin2009cAsym space. - xforms_to_MNI = ["identity"] + xforms_to_MNI = ['identity'] xforms_to_MNI_invert = [False] - elif bold_space == "MNI152NLin6Asym": + elif bold_space == 'MNI152NLin6Asym': # MNI152NLin6Asym --> MNI152NLin2009cAsym MNI152NLin6Asym_to_MNI152NLin2009cAsym = str( get_template( - template="MNI152NLin2009cAsym", - mode="image", - suffix="xfm", - extension=".h5", - **{"from": "MNI152NLin6Asym"}, + template='MNI152NLin2009cAsym', + mode='image', + suffix='xfm', + extension='.h5', + **{'from': 'MNI152NLin6Asym'}, ), ) xforms_to_MNI = [MNI152NLin6Asym_to_MNI152NLin2009cAsym] xforms_to_MNI_invert = [False] - elif bold_space == "MNIInfant": + elif bold_space == 'MNIInfant': # MNIInfant --> MNI152NLin2009cAsym MNIInfant_to_MNI152NLin2009cAsym = str( load_data( - "transform/tpl-MNI152NLin2009cAsym_from-MNIInfant_mode-image_xfm.h5", + 'transform/tpl-MNI152NLin2009cAsym_from-MNIInfant_mode-image_xfm.h5', ) ) xforms_to_MNI = [MNIInfant_to_MNI152NLin2009cAsym] xforms_to_MNI_invert = [False] - elif bold_space == "T1w": + elif bold_space == 'T1w': # T1w --> ?? (extract from template_to_anat_xfm) --> MNI152NLin2009cAsym # Should not be reachable, since xcpd doesn't support T1w-space BOLD inputs - if base_std_space != "MNI152NLin2009cAsym": + if base_std_space != 'MNI152NLin2009cAsym': std_to_mni_xfm = str( get_template( - template="MNI152NLin2009cAsym", - mode="image", - suffix="xfm", - extension=".h5", - **{"from": base_std_space}, + template='MNI152NLin2009cAsym', + mode='image', + suffix='xfm', + extension='.h5', + **{'from': base_std_space}, ), ) xforms_to_MNI = [std_to_mni_xfm, template_to_anat_xfm] @@ -121,7 +121,7 @@ def get_bold2std_and_t1w_xfms(bold_file, template_to_anat_xfm): xforms_to_MNI = [template_to_anat_xfm] xforms_to_MNI_invert = [True] - xforms_to_T1w = ["identity"] + xforms_to_T1w = ['identity'] xforms_to_T1w_invert = [False] else: @@ -169,78 +169,78 @@ def get_std2bold_xfms(bold_file, source_file, source_space=None): from xcp_d.utils.bids import get_entity # Extract the space of the BOLD file - bold_space = get_entity(bold_file, "space") + bold_space = get_entity(bold_file, 'space') if source_space is None: # If a source space is not provided, extract the space of the source file # First try tpl because that won't raise an error - source_space = get_entity(source_file, "tpl") + source_space = get_entity(source_file, 'tpl') if source_space is None: # If tpl isn't available, try space. # get_entity will raise an error if space isn't there. - source_space = get_entity(source_file, "space") + source_space = get_entity(source_file, 'space') - if source_space not in ("MNI152NLin6Asym", "MNI152NLin2009cAsym", "MNIInfant"): + if source_space not in ('MNI152NLin6Asym', 'MNI152NLin2009cAsym', 'MNIInfant'): raise ValueError(f"Source space '{source_space}' not supported.") - if bold_space not in ("MNI152NLin6Asym", "MNI152NLin2009cAsym", "MNIInfant"): + if bold_space not in ('MNI152NLin6Asym', 'MNI152NLin2009cAsym', 'MNIInfant'): raise ValueError(f"BOLD space '{bold_space}' not supported.") # Load useful inter-template transforms from templateflow and package data MNI152NLin6Asym_to_MNI152NLin2009cAsym = str( get_template( - template="MNI152NLin2009cAsym", - mode="image", - suffix="xfm", - extension=".h5", - **{"from": "MNI152NLin6Asym"}, + template='MNI152NLin2009cAsym', + mode='image', + suffix='xfm', + extension='.h5', + **{'from': 'MNI152NLin6Asym'}, ), ) MNI152NLin2009cAsym_to_MNI152NLin6Asym = str( get_template( - template="MNI152NLin6Asym", - mode="image", - suffix="xfm", - extension=".h5", - **{"from": "MNI152NLin2009cAsym"}, + template='MNI152NLin6Asym', + mode='image', + suffix='xfm', + extension='.h5', + **{'from': 'MNI152NLin2009cAsym'}, ), ) MNIInfant_to_MNI152NLin2009cAsym = str( load_data( - "transform/tpl-MNIInfant_from-MNI152NLin2009cAsym_mode-image_xfm.h5", + 'transform/tpl-MNIInfant_from-MNI152NLin2009cAsym_mode-image_xfm.h5', ) ) MNI152NLin2009cAsym_to_MNIInfant = str( load_data( - "transform/tpl-MNI152NLin2009cAsym_from-MNIInfant_mode-image_xfm.h5", + 'transform/tpl-MNI152NLin2009cAsym_from-MNIInfant_mode-image_xfm.h5', ) ) if bold_space == source_space: - transforms = ["identity"] + transforms = ['identity'] - elif bold_space == "MNI152NLin6Asym": - if source_space == "MNI152NLin2009cAsym": + elif bold_space == 'MNI152NLin6Asym': + if source_space == 'MNI152NLin2009cAsym': transforms = [MNI152NLin2009cAsym_to_MNI152NLin6Asym] - elif source_space == "MNIInfant": + elif source_space == 'MNIInfant': transforms = [ MNI152NLin2009cAsym_to_MNI152NLin6Asym, MNIInfant_to_MNI152NLin2009cAsym, ] - elif bold_space == "MNI152NLin2009cAsym": - if source_space == "MNI152NLin6Asym": + elif bold_space == 'MNI152NLin2009cAsym': + if source_space == 'MNI152NLin6Asym': transforms = [MNI152NLin6Asym_to_MNI152NLin2009cAsym] - elif source_space == "MNIInfant": + elif source_space == 'MNIInfant': transforms = [MNIInfant_to_MNI152NLin2009cAsym] - elif bold_space == "MNIInfant": - if source_space == "MNI152NLin6Asym": + elif bold_space == 'MNIInfant': + if source_space == 'MNI152NLin6Asym': transforms = [ MNI152NLin2009cAsym_to_MNIInfant, MNI152NLin6Asym_to_MNI152NLin2009cAsym, ] - elif source_space == "MNI152NLin2009cAsym": + elif source_space == 'MNI152NLin2009cAsym': transforms = [MNI152NLin2009cAsym_to_MNIInfant] return transforms @@ -263,7 +263,7 @@ def fwhm2sigma(fwhm): @fill_doc -def estimate_brain_radius(mask_file, head_radius="auto"): +def estimate_brain_radius(mask_file, head_radius='auto'): """Estimate brain radius from binary brain mask file. Parameters @@ -284,7 +284,7 @@ def estimate_brain_radius(mask_file, head_radius="auto"): This was Paul Taylor's idea, shared in this NeuroStars post: https://neurostars.org/t/estimating-head-brain-radius-automatically/24290/2. """ - if head_radius == "auto": + if head_radius == 'auto': mask_img = nb.load(mask_file) mask_data = mask_img.get_fdata() n_voxels = np.sum(mask_data) @@ -293,7 +293,7 @@ def estimate_brain_radius(mask_file, head_radius="auto"): brain_radius = ((3 * volume) / (4 * np.pi)) ** (1 / 3) - LOGGER.info(f"Brain radius estimated at {brain_radius} mm.") + LOGGER.info(f'Brain radius estimated at {brain_radius} mm.') else: brain_radius = head_radius @@ -431,12 +431,12 @@ def denoise_with_nilearn( if low_pass or high_pass: # Now apply the bandpass filter to the interpolated data and confounds butterworth_kwargs = { - "sampling_rate": 1.0 / TR, - "low_pass": low_pass, - "high_pass": high_pass, - "order": filter_order, - "padtype": "constant", - "padlen": n_volumes - 1, # maximum possible padding + 'sampling_rate': 1.0 / TR, + 'low_pass': low_pass, + 'high_pass': high_pass, + 'order': filter_order, + 'padtype': 'constant', + 'padlen': n_volumes - 1, # maximum possible padding } preprocessed_bold = butterworth(signals=preprocessed_bold, **butterworth_kwargs) if detrend_and_denoise: @@ -530,16 +530,20 @@ def _interpolate(*, arr, sample_mask, TR): # Replace any high-motion volumes at the beginning or end of the run with the closest # low-motion volume's data. # Use https://stackoverflow.com/a/48106843/2589328 to group consecutive blocks of outliers. - gaps = [[start, end] for start, end in zip(outlier_idx, outlier_idx[1:]) if start + 1 < end] + gaps = [ + [start, end] + for start, end in zip(outlier_idx, outlier_idx[1:], strict=False) + if start + 1 < end + ] edges = iter(outlier_idx[:1] + sum(gaps, []) + outlier_idx[-1:]) - consecutive_outliers_idx = list(zip(edges, edges)) + consecutive_outliers_idx = list(zip(edges, edges, strict=False)) first_outliers = consecutive_outliers_idx[0] last_outliers = consecutive_outliers_idx[-1] # Replace outliers at beginning of run if first_outliers[0] == 0: LOGGER.warning( - f"Outlier volumes at beginning of run ({first_outliers[0]}-{first_outliers[1]}) " + f'Outlier volumes at beginning of run ({first_outliers[0]}-{first_outliers[1]}) ' "will be replaced with first non-outlier volume's values." ) interpolated_arr[: first_outliers[1] + 1, :] = interpolated_arr[first_outliers[1] + 1, :] @@ -547,7 +551,7 @@ def _interpolate(*, arr, sample_mask, TR): # Replace outliers at end of run if last_outliers[1] == n_volumes - 1: LOGGER.warning( - f"Outlier volumes at end of run ({last_outliers[0]}-{last_outliers[1]}) " + f'Outlier volumes at end of run ({last_outliers[0]}-{last_outliers[1]}) ' "will be replaced with last non-outlier volume's values." ) interpolated_arr[last_outliers[0] :, :] = interpolated_arr[last_outliers[0] - 1, :] @@ -563,20 +567,20 @@ def _select_first(lst): def list_to_str(lst): """Convert a list to a pretty string.""" if not lst: - raise ValueError("Zero-length list provided.") + raise ValueError('Zero-length list provided.') lst_str = [str(item) for item in lst] if len(lst_str) == 1: return lst_str[0] elif len(lst_str) == 2: - return " and ".join(lst_str) + return ' and '.join(lst_str) else: return f"{', '.join(lst_str[:-1])}, and {lst_str[-1]}" def _transpose_lol(lol): """Transpose list of lists.""" - return list(map(list, zip(*lol))) + return list(map(list, zip(*lol, strict=False))) def _create_mem_gb(bold_fname): @@ -585,16 +589,16 @@ def _create_mem_gb(bold_fname): bold_size_gb = os.path.getsize(bold_fname) / (1024**3) bold_tlen = nb.load(bold_fname).shape[-1] mem_gbz = { - "derivative": bold_size_gb, - "resampled": bold_size_gb * 4, - "timeseries": bold_size_gb * (max(bold_tlen / 100, 1.0) + 4), + 'derivative': bold_size_gb, + 'resampled': bold_size_gb * 4, + 'timeseries': bold_size_gb * (max(bold_tlen / 100, 1.0) + 4), } - if mem_gbz["timeseries"] < 4.0: - mem_gbz["timeseries"] = 6.0 - mem_gbz["resampled"] = 2 - elif mem_gbz["timeseries"] > 8.0: - mem_gbz["timeseries"] = 8.0 - mem_gbz["resampled"] = 3 + if mem_gbz['timeseries'] < 4.0: + mem_gbz['timeseries'] = 6.0 + mem_gbz['resampled'] = 2 + elif mem_gbz['timeseries'] > 8.0: + mem_gbz['timeseries'] = 8.0 + mem_gbz['resampled'] = 3 return mem_gbz diff --git a/xcp_d/utils/write_save.py b/xcp_d/utils/write_save.py index 482fa6f67..3e2762492 100644 --- a/xcp_d/utils/write_save.py +++ b/xcp_d/utils/write_save.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utilities to read and write nifiti and cifti data.""" + import os import nibabel as nb @@ -12,7 +13,7 @@ from xcp_d.utils.doc import fill_doc from xcp_d.utils.filemanip import split_filename -LOGGER = logging.getLogger("nipype.utils") +LOGGER = logging.getLogger('nipype.utils') def read_ndata(datafile, maskfile=None): @@ -32,17 +33,17 @@ def read_ndata(datafile, maskfile=None): Vertices or voxels by timepoints. """ # read cifti series - cifti_extensions = [".dtseries.nii", ".dlabel.nii", ".ptseries.nii", ".dscalar.nii"] - if any([datafile.endswith(ext) for ext in cifti_extensions]): + cifti_extensions = ['.dtseries.nii', '.dlabel.nii', '.ptseries.nii', '.dscalar.nii'] + if any(datafile.endswith(ext) for ext in cifti_extensions): data = nb.load(datafile).get_fdata() # or nifti data, mask is required - elif datafile.endswith(".nii.gz"): - assert maskfile is not None, "Input `maskfile` must be provided if `datafile` is a nifti." + elif datafile.endswith('.nii.gz'): + assert maskfile is not None, 'Input `maskfile` must be provided if `datafile` is a nifti.' data = masking.apply_mask(datafile, maskfile) else: - raise ValueError(f"Unknown extension for {datafile}") + raise ValueError(f'Unknown extension for {datafile}') # transpose from TxS to SxT data = data.T @@ -56,20 +57,20 @@ def get_cifti_intents(): Copied from https://www.nitrc.org/projects/cifti/ PDF. """ CIFTI_INTENTS = { - ".dtseries.nii": "ConnDenseSeries", - ".dconn.nii": "ConnDense", - ".pconn.nii": "ConnParcels", - ".ptseries.nii": "ConnParcelSries", - ".dscalar.nii": "ConnDenseScalar", - ".dlabel.nii": "ConnDenseLabel", - ".pscalar.nii": "ConnParcelScalr", - ".pdconn.nii": "ConnParcelDense", - ".dpconn.nii": "ConnDenseParcel", - ".pconnseries.nii": "ConnPPSr", - ".pconnscalar.nii": "ConnPPSc", - ".dfan.nii": "ConnDenseSeries", - ".dfibersamp.nii": "ConnUnknown", - ".dfansamp.nii": "ConnUnknown", + '.dtseries.nii': 'ConnDenseSeries', + '.dconn.nii': 'ConnDense', + '.pconn.nii': 'ConnParcels', + '.ptseries.nii': 'ConnParcelSries', + '.dscalar.nii': 'ConnDenseScalar', + '.dlabel.nii': 'ConnDenseLabel', + '.pscalar.nii': 'ConnParcelScalr', + '.pdconn.nii': 'ConnParcelDense', + '.dpconn.nii': 'ConnDenseParcel', + '.pconnseries.nii': 'ConnPPSr', + '.pconnscalar.nii': 'ConnPPSc', + '.dfan.nii': 'ConnDenseSeries', + '.dfibersamp.nii': 'ConnUnknown', + '.dfansamp.nii': 'ConnUnknown', } return CIFTI_INTENTS @@ -102,45 +103,45 @@ def write_ndata(data_matrix, template, filename, mask=None, TR=1): ----- This function currently only works for NIfTIs and .dtseries.nii and .dscalar.nii CIFTIs. """ - assert data_matrix.ndim in (1, 2), f"Input data must be a 1-2D array, not {data_matrix.ndim}." + assert data_matrix.ndim in (1, 2), f'Input data must be a 1-2D array, not {data_matrix.ndim}.' assert os.path.isfile(template) cifti_intents = get_cifti_intents() _, _, template_extension = split_filename(template) if template_extension in cifti_intents.keys(): - file_format = "cifti" - elif template.endswith(".nii.gz"): - file_format = "nifti" - assert mask is not None, "A binary mask must be provided for nifti inputs." - assert os.path.isfile(mask), f"The mask file does not exist: {mask}" + file_format = 'cifti' + elif template.endswith('.nii.gz'): + file_format = 'nifti' + assert mask is not None, 'A binary mask must be provided for nifti inputs.' + assert os.path.isfile(mask), f'The mask file does not exist: {mask}' else: - raise ValueError(f"Unknown extension for {template}") + raise ValueError(f'Unknown extension for {template}') # transpose from SxT to TxS data_matrix = data_matrix.T - if file_format == "cifti": + if file_format == 'cifti': # write cifti series template_img = nb.load(template) if data_matrix.ndim == 1: - LOGGER.warning("1D data matrix provided. Adding singleton dimension.") + LOGGER.warning('1D data matrix provided. Adding singleton dimension.') data_matrix = data_matrix[None, :] n_volumes = data_matrix.shape[0] _, _, out_extension = split_filename(filename) - if filename.endswith((".dscalar.nii", ".pscalar.nii")): + if filename.endswith(('.dscalar.nii', '.pscalar.nii')): # Dense scalar files have (ScalarAxis, BrainModelAxis) # Parcellated scalar files have (ScalarAxis, ParcelsAxis) - scalar_names = [f"#{i + 1}" for i in range(n_volumes)] + scalar_names = [f'#{i + 1}' for i in range(n_volumes)] ax_0 = nb.cifti2.cifti2_axes.ScalarAxis(name=scalar_names) ax_1 = template_img.header.get_axis(1) new_header = nb.Cifti2Header.from_axes((ax_0, ax_1)) img = nb.Cifti2Image(data_matrix, new_header) - elif filename.endswith((".dtseries.nii", ".ptseries.nii")): + elif filename.endswith(('.dtseries.nii', '.ptseries.nii')): # Dense series files have (SeriesAxis, BrainModelAxis) # Parcellated series files have (SeriesAxis, ParcelsAxis) if n_volumes == template_img.shape[0]: @@ -199,9 +200,9 @@ def write_gii(datat, template, filename, hemi): ------- filename """ - datax = np.array(datat, dtype="float32") + datax = np.array(datat, dtype='float32') template = str( - get_template("fsLR", hemi=hemi, suffix="midthickness", density="32k", desc="vaavg") + get_template('fsLR', hemi=hemi, suffix='midthickness', density='32k', desc='vaavg') ) template = nb.load(template) dataimg = nb.gifti.GiftiImage( @@ -213,7 +214,7 @@ def write_gii(datat, template, filename, hemi): extra=template.extra, meta=template.meta, ) - d_timepoint = nb.gifti.GiftiDataArray(data=datax, intent="NIFTI_INTENT_NORMAL") + d_timepoint = nb.gifti.GiftiDataArray(data=datax, intent='NIFTI_INTENT_NORMAL') dataimg.add_gifti_data_array(d_timepoint) dataimg.to_filename(filename) return filename @@ -223,7 +224,7 @@ def read_gii(surf_gii): """Use nibabel to read surface file.""" bold_data = nb.load(surf_gii) # load the gifti gifti_data = bold_data.agg_data() # aggregate the data - if not hasattr(gifti_data, "__shape__"): # if it doesn't have 'shape', reshape + if not hasattr(gifti_data, '__shape__'): # if it doesn't have 'shape', reshape gifti_data = np.zeros((len(bold_data.darrays[0].data), len(bold_data.darrays))) for arr in range(len(bold_data.darrays)): gifti_data[:, arr] = bold_data.darrays[arr].data diff --git a/xcp_d/workflows/__init__.py b/xcp_d/workflows/__init__.py index 087c5c629..170bdedea 100644 --- a/xcp_d/workflows/__init__.py +++ b/xcp_d/workflows/__init__.py @@ -5,9 +5,9 @@ from xcp_d.workflows import anatomical, base, bold, parcellation, plotting __all__ = [ - "anatomical", - "base", - "bold", - "parcellation", - "plotting", + 'anatomical', + 'base', + 'bold', + 'parcellation', + 'plotting', ] diff --git a/xcp_d/workflows/anatomical/__init__.py b/xcp_d/workflows/anatomical/__init__.py index 9732db3fc..14378ea81 100644 --- a/xcp_d/workflows/anatomical/__init__.py +++ b/xcp_d/workflows/anatomical/__init__.py @@ -3,9 +3,9 @@ from xcp_d.workflows.anatomical import outputs, parcellation, plotting, surface, volume __all__ = [ - "outputs", - "parcellation", - "plotting", - "surface", - "volume", + 'outputs', + 'parcellation', + 'plotting', + 'surface', + 'volume', ] diff --git a/xcp_d/workflows/anatomical/outputs.py b/xcp_d/workflows/anatomical/outputs.py index aa2751388..ebf40069c 100644 --- a/xcp_d/workflows/anatomical/outputs.py +++ b/xcp_d/workflows/anatomical/outputs.py @@ -12,7 +12,7 @@ @fill_doc -def init_copy_inputs_to_outputs_wf(name="copy_inputs_to_outputs_wf"): +def init_copy_inputs_to_outputs_wf(name='copy_inputs_to_outputs_wf'): """Copy files from the preprocessing derivatives to the output folder, with no modifications. Workflow Graph @@ -50,60 +50,60 @@ def init_copy_inputs_to_outputs_wf(name="copy_inputs_to_outputs_wf"): inputnode = pe.Node( niu.IdentityInterface( fields=[ - "lh_pial_surf", - "rh_pial_surf", - "lh_wm_surf", - "rh_wm_surf", - "sulcal_depth", - "sulcal_curv", - "cortical_thickness", - "cortical_thickness_corr", - "myelin", - "myelin_smoothed", + 'lh_pial_surf', + 'rh_pial_surf', + 'lh_wm_surf', + 'rh_wm_surf', + 'sulcal_depth', + 'sulcal_curv', + 'cortical_thickness', + 'cortical_thickness_corr', + 'myelin', + 'myelin_smoothed', ], ), - name="inputnode", + name='inputnode', ) # Place the surfaces in a single node. collect_files = pe.Node( niu.Merge(10), - name="collect_files", + name='collect_files', ) workflow.connect([ (inputnode, collect_files, [ # fsLR-space surface mesh files - ("lh_pial_surf", "in1"), - ("rh_pial_surf", "in2"), - ("lh_wm_surf", "in3"), - ("rh_wm_surf", "in4"), + ('lh_pial_surf', 'in1'), + ('rh_pial_surf', 'in2'), + ('lh_wm_surf', 'in3'), + ('rh_wm_surf', 'in4'), # fsLR-space surface shape files - ("sulcal_depth", "in5"), - ("sulcal_curv", "in6"), - ("cortical_thickness", "in7"), - ("cortical_thickness_corr", "in8"), - ("myelin", "in9"), - ("myelin_smoothed", "in10"), + ('sulcal_depth', 'in5'), + ('sulcal_curv', 'in6'), + ('cortical_thickness', 'in7'), + ('cortical_thickness_corr', 'in8'), + ('myelin', 'in9'), + ('myelin_smoothed', 'in10'), ]), ]) # fmt:skip filter_out_undefined = pe.Node( FilterUndefined(), - name="filter_out_undefined", + name='filter_out_undefined', ) - workflow.connect([(collect_files, filter_out_undefined, [("out", "inlist")])]) + workflow.connect([(collect_files, filter_out_undefined, [('out', 'inlist')])]) ds_copied_outputs = pe.MapNode( DerivativesDataSink(check_hdr=False), - name="ds_copied_outputs", + name='ds_copied_outputs', run_without_submitting=True, mem_gb=1, - iterfield=["in_file", "source_file"], + iterfield=['in_file', 'source_file'], ) workflow.connect([ (filter_out_undefined, ds_copied_outputs, [ - ("outlist", "in_file"), - ("outlist", "source_file"), + ('outlist', 'in_file'), + ('outlist', 'source_file'), ]), ]) # fmt:skip diff --git a/xcp_d/workflows/anatomical/parcellation.py b/xcp_d/workflows/anatomical/parcellation.py index 5e4537ae5..1f2d2b774 100644 --- a/xcp_d/workflows/anatomical/parcellation.py +++ b/xcp_d/workflows/anatomical/parcellation.py @@ -11,11 +11,11 @@ from xcp_d.utils.doc import fill_doc from xcp_d.workflows.parcellation import init_parcellate_cifti_wf -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc -def init_parcellate_surfaces_wf(files_to_parcellate, name="parcellate_surfaces_wf"): +def init_parcellate_surfaces_wf(files_to_parcellate, name='parcellate_surfaces_wf'): """Parcellate surface files and write them out to the output directory. Workflow Graph @@ -55,37 +55,37 @@ def init_parcellate_surfaces_wf(files_to_parcellate, name="parcellate_surfaces_w workflow = Workflow(name=name) SURF_DESCS = { - "sulcal_depth": "sulc", - "sulcal_curv": "curv", - "cortical_thickness": "thickness", - "cortical_thickness_corr": "thicknessCorrected", - "myelin": "myelin", - "myelin_smoothed": "myelinSmoothed", + 'sulcal_depth': 'sulc', + 'sulcal_curv': 'curv', + 'cortical_thickness': 'thickness', + 'cortical_thickness_corr': 'thicknessCorrected', + 'myelin': 'myelin', + 'myelin_smoothed': 'myelinSmoothed', } inputnode = pe.Node( niu.IdentityInterface( fields=[ - "sulcal_depth", - "sulcal_curv", - "cortical_thickness", - "cortical_thickness_corr", - "myelin", - "myelin_smoothed", + 'sulcal_depth', + 'sulcal_curv', + 'cortical_thickness', + 'cortical_thickness_corr', + 'myelin', + 'myelin_smoothed', # atlases - "atlas_names", - "atlas_datasets", - "atlas_files", - "atlas_labels_files", - "atlas_metadata_files", + 'atlas_names', + 'atlas_datasets', + 'atlas_files', + 'atlas_labels_files', + 'atlas_metadata_files', ], ), - name="inputnode", + name='inputnode', ) - builtin_atlases = select_atlases(atlases=config.execution.atlases, subset="all") - external_atlases = sorted(list(set(config.execution.atlases) - set(builtin_atlases))) - builtin_cortical_atlases = select_atlases(atlases=builtin_atlases, subset="cortical") + builtin_atlases = select_atlases(atlases=config.execution.atlases, subset='all') + external_atlases = sorted(set(config.execution.atlases) - set(builtin_atlases)) + builtin_cortical_atlases = select_atlases(atlases=builtin_atlases, subset='cortical') selected_atlases = builtin_cortical_atlases + external_atlases atlases = collect_atlases( datasets=config.execution.datasets, @@ -98,12 +98,12 @@ def init_parcellate_surfaces_wf(files_to_parcellate, name="parcellate_surfaces_w atlas_names, atlas_files, atlas_labels_files, atlas_metadata_files = [], [], [], [] atlas_datasets = [] for atlas, atlas_dict in atlases.items(): - config.loggers.workflow.info(f"Loading atlas: {atlas}") + config.loggers.workflow.info(f'Loading atlas: {atlas}') atlas_names.append(atlas) - atlas_datasets.append(atlas_dict["dataset"]) - atlas_files.append(atlas_dict["image"]) - atlas_labels_files.append(atlas_dict["labels"]) - atlas_metadata_files.append(atlas_dict["metadata"]) + atlas_datasets.append(atlas_dict['dataset']) + atlas_files.append(atlas_dict['image']) + atlas_labels_files.append(atlas_dict['labels']) + atlas_metadata_files.append(atlas_dict['metadata']) inputnode.inputs.atlas_names = atlas_names inputnode.inputs.atlas_datasets = atlas_datasets @@ -113,7 +113,7 @@ def init_parcellate_surfaces_wf(files_to_parcellate, name="parcellate_surfaces_w if not atlases: LOGGER.warning( - "No cortical atlases have been selected, so surface metrics will not be parcellated." + 'No cortical atlases have been selected, so surface metrics will not be parcellated.' ) # If no cortical atlases are selected, inputnode could go unconnected, so add explicitly. workflow.add_nodes([inputnode]) @@ -123,56 +123,56 @@ def init_parcellate_surfaces_wf(files_to_parcellate, name="parcellate_surfaces_w for file_to_parcellate in files_to_parcellate: resample_atlas_to_surface = pe.MapNode( CiftiCreateDenseFromTemplate( - out_file="resampled_atlas.dlabel.nii", + out_file='resampled_atlas.dlabel.nii', num_threads=config.nipype.omp_nthreads, ), - name=f"resample_atlas_to_{file_to_parcellate}", - iterfield=["label"], + name=f'resample_atlas_to_{file_to_parcellate}', + iterfield=['label'], n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (inputnode, resample_atlas_to_surface, [ - ("atlas_files", "label"), - (file_to_parcellate, "template_cifti"), + ('atlas_files', 'label'), + (file_to_parcellate, 'template_cifti'), ]), ]) # fmt:skip parcellate_surface_wf = init_parcellate_cifti_wf( - mem_gb={"resampled": 2}, + mem_gb={'resampled': 2}, compute_mask=True, - name=f"parcellate_{file_to_parcellate}_wf", + name=f'parcellate_{file_to_parcellate}_wf', ) workflow.connect([ (inputnode, parcellate_surface_wf, [ - (file_to_parcellate, "inputnode.in_file"), - ("atlas_labels_files", "inputnode.atlas_labels_files"), + (file_to_parcellate, 'inputnode.in_file'), + ('atlas_labels_files', 'inputnode.atlas_labels_files'), ]), (resample_atlas_to_surface, parcellate_surface_wf, [ - ("out_file", "inputnode.atlas_files"), + ('out_file', 'inputnode.atlas_files'), ]), ]) # fmt:skip # Write out the parcellated files ds_parcellated_surface = pe.MapNode( DerivativesDataSink( - dismiss_entities=["hemi", "desc", "den", "res"], + dismiss_entities=['hemi', 'desc', 'den', 'res'], desc=SURF_DESCS[file_to_parcellate], - statistic="mean", - suffix="morph", - extension=".tsv", + statistic='mean', + suffix='morph', + extension='.tsv', ), - name=f"ds_parcellated_{file_to_parcellate}", + name=f'ds_parcellated_{file_to_parcellate}', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file"], + iterfield=['segmentation', 'in_file'], ) workflow.connect([ (inputnode, ds_parcellated_surface, [ - (file_to_parcellate, "source_file"), - ("atlas_names", "segmentation"), + (file_to_parcellate, 'source_file'), + ('atlas_names', 'segmentation'), ]), (parcellate_surface_wf, ds_parcellated_surface, [ - ("outputnode.parcellated_tsv", "in_file"), + ('outputnode.parcellated_tsv', 'in_file'), ]), ]) # fmt:skip diff --git a/xcp_d/workflows/anatomical/plotting.py b/xcp_d/workflows/anatomical/plotting.py index e88158a07..a931afec1 100644 --- a/xcp_d/workflows/anatomical/plotting.py +++ b/xcp_d/workflows/anatomical/plotting.py @@ -22,11 +22,11 @@ ) from xcp_d.workflows.plotting import init_plot_overlay_wf -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc -def init_brainsprite_figures_wf(t1w_available, t2w_available, name="brainsprite_figures_wf"): +def init_brainsprite_figures_wf(t1w_available, t2w_available, name='brainsprite_figures_wf'): """Create mosaic and PNG files for executive summary brainsprite. Workflow Graph @@ -70,44 +70,44 @@ def init_brainsprite_figures_wf(t1w_available, t2w_available, name="brainsprite_ inputnode = pe.Node( niu.IdentityInterface( fields=[ - "t1w", - "t2w", - "lh_wm_surf", - "rh_wm_surf", - "lh_pial_surf", - "rh_pial_surf", + 't1w', + 't2w', + 'lh_wm_surf', + 'rh_wm_surf', + 'lh_pial_surf', + 'rh_pial_surf', ], ), - name="inputnode", + name='inputnode', ) # Load template scene file brainsprite_scene_template = str( - load_data("executive_summary_scenes/brainsprite_template.scene.gz") + load_data('executive_summary_scenes/brainsprite_template.scene.gz') ) - pngs_scene_template = str(load_data("executive_summary_scenes/pngs_template.scene.gz")) + pngs_scene_template = str(load_data('executive_summary_scenes/pngs_template.scene.gz')) if t1w_available and t2w_available: - image_types = ["T1", "T2"] + image_types = ['T1', 'T2'] elif t2w_available: - image_types = ["T2"] + image_types = ['T2'] else: - image_types = ["T1"] + image_types = ['T1'] for image_type in image_types: - inputnode_anat_name = f"{image_type.lower()}w" + inputnode_anat_name = f'{image_type.lower()}w' # Create frame-wise PNGs get_number_of_frames = pe.Node( Function( function=get_n_frames, - input_names=["anat_file"], - output_names=["frame_numbers"], + input_names=['anat_file'], + output_names=['frame_numbers'], ), - name=f"get_number_of_frames_{image_type}", + name=f'get_number_of_frames_{image_type}', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, get_number_of_frames, [(inputnode_anat_name, "anat_file")]), + (inputnode, get_number_of_frames, [(inputnode_anat_name, 'anat_file')]), ]) # fmt:skip # Modify template scene file with file paths @@ -115,31 +115,31 @@ def init_brainsprite_figures_wf(t1w_available, t2w_available, name="brainsprite_ Function( function=modify_brainsprite_scene_template, input_names=[ - "slice_number", - "anat_file", - "rh_pial_surf", - "lh_pial_surf", - "rh_wm_surf", - "lh_wm_surf", - "scene_template", + 'slice_number', + 'anat_file', + 'rh_pial_surf', + 'lh_pial_surf', + 'rh_wm_surf', + 'lh_wm_surf', + 'scene_template', ], - output_names=["out_file"], + output_names=['out_file'], ), - name=f"modify_brainsprite_template_scene_{image_type}", - iterfield=["slice_number"], + name=f'modify_brainsprite_template_scene_{image_type}', + iterfield=['slice_number'], mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) modify_brainsprite_template_scene.inputs.scene_template = brainsprite_scene_template workflow.connect([ (inputnode, modify_brainsprite_template_scene, [ - (inputnode_anat_name, "anat_file"), - ("lh_wm_surf", "lh_wm_surf"), - ("rh_wm_surf", "rh_wm_surf"), - ("lh_pial_surf", "lh_pial_surf"), - ("rh_pial_surf", "rh_pial_surf"), + (inputnode_anat_name, 'anat_file'), + ('lh_wm_surf', 'lh_wm_surf'), + ('rh_wm_surf', 'rh_wm_surf'), + ('lh_pial_surf', 'lh_pial_surf'), + ('rh_pial_surf', 'rh_pial_surf'), ]), (get_number_of_frames, modify_brainsprite_template_scene, [ - ("frame_numbers", "slice_number"), + ('frame_numbers', 'slice_number'), ]), ]) # fmt:skip @@ -150,14 +150,14 @@ def init_brainsprite_figures_wf(t1w_available, t2w_available, name="brainsprite_ image_height=800, num_threads=config.nipype.omp_nthreads, ), - name=f"create_framewise_pngs_{image_type}", - iterfield=["scene_file"], + name=f'create_framewise_pngs_{image_type}', + iterfield=['scene_file'], mem_gb=1, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (modify_brainsprite_template_scene, create_framewise_pngs, [ - ("out_file", "scene_file"), + ('out_file', 'scene_file'), ]), ]) # fmt:skip @@ -165,27 +165,27 @@ def init_brainsprite_figures_wf(t1w_available, t2w_available, name="brainsprite_ make_mosaic_node = pe.Node( Function( function=make_mosaic, - input_names=["png_files"], - output_names=["mosaic_file"], + input_names=['png_files'], + output_names=['mosaic_file'], ), - name=f"make_mosaic_{image_type}", + name=f'make_mosaic_{image_type}', mem_gb=1, ) - workflow.connect([(create_framewise_pngs, make_mosaic_node, [("out_file", "png_files")])]) + workflow.connect([(create_framewise_pngs, make_mosaic_node, [('out_file', 'png_files')])]) ds_report_mosaic_file = pe.Node( DerivativesDataSink( - dismiss_entities=["desc"], - desc="mosaic", - suffix=f"{image_type}w", + dismiss_entities=['desc'], + desc='mosaic', + suffix=f'{image_type}w', ), - name=f"ds_report_mosaic_file_{image_type}", + name=f'ds_report_mosaic_file_{image_type}', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_mosaic_file, [(inputnode_anat_name, "source_file")]), - (make_mosaic_node, ds_report_mosaic_file, [("mosaic_file", "in_file")]), + (inputnode, ds_report_mosaic_file, [(inputnode_anat_name, 'source_file')]), + (make_mosaic_node, ds_report_mosaic_file, [('mosaic_file', 'in_file')]), ]) # fmt:skip # Start working on the selected PNG images for the button @@ -193,26 +193,26 @@ def init_brainsprite_figures_wf(t1w_available, t2w_available, name="brainsprite_ Function( function=modify_pngs_scene_template, input_names=[ - "anat_file", - "rh_pial_surf", - "lh_pial_surf", - "rh_wm_surf", - "lh_wm_surf", - "scene_template", + 'anat_file', + 'rh_pial_surf', + 'lh_pial_surf', + 'rh_wm_surf', + 'lh_wm_surf', + 'scene_template', ], - output_names=["out_file"], + output_names=['out_file'], ), - name=f"modify_pngs_template_scene_{image_type}", + name=f'modify_pngs_template_scene_{image_type}', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) modify_pngs_template_scene.inputs.scene_template = pngs_scene_template workflow.connect([ (inputnode, modify_pngs_template_scene, [ - (inputnode_anat_name, "anat_file"), - ("lh_wm_surf", "lh_wm_surf"), - ("rh_wm_surf", "rh_wm_surf"), - ("lh_pial_surf", "lh_pial_surf"), - ("rh_pial_surf", "rh_pial_surf"), + (inputnode_anat_name, 'anat_file'), + ('lh_wm_surf', 'lh_wm_surf'), + ('rh_wm_surf', 'rh_wm_surf'), + ('lh_pial_surf', 'lh_pial_surf'), + ('rh_pial_surf', 'rh_pial_surf'), ]) ]) # fmt:skip @@ -220,38 +220,38 @@ def init_brainsprite_figures_wf(t1w_available, t2w_available, name="brainsprite_ get_png_scene_names = pe.Node( Function( function=get_png_image_names, - output_names=["scene_index", "scene_descriptions"], + output_names=['scene_index', 'scene_descriptions'], ), - name=f"get_png_scene_names_{image_type}", + name=f'get_png_scene_names_{image_type}', ) create_scenewise_pngs = pe.MapNode( ShowScene(image_width=900, image_height=800), - name=f"create_scenewise_pngs_{image_type}", - iterfield=["scene_name_or_number"], + name=f'create_scenewise_pngs_{image_type}', + iterfield=['scene_name_or_number'], mem_gb=1, ) workflow.connect([ - (modify_pngs_template_scene, create_scenewise_pngs, [("out_file", "scene_file")]), + (modify_pngs_template_scene, create_scenewise_pngs, [('out_file', 'scene_file')]), (get_png_scene_names, create_scenewise_pngs, [ - ("scene_index", "scene_name_or_number"), + ('scene_index', 'scene_name_or_number'), ]), ]) # fmt:skip ds_report_scenewise_pngs = pe.MapNode( DerivativesDataSink( - dismiss_entities=["desc"], - suffix=f"{image_type}w", + dismiss_entities=['desc'], + suffix=f'{image_type}w', ), - name=f"ds_report_scenewise_pngs_{image_type}", + name=f'ds_report_scenewise_pngs_{image_type}', run_without_submitting=False, - iterfield=["desc", "in_file"], + iterfield=['desc', 'in_file'], mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, ds_report_scenewise_pngs, [(inputnode_anat_name, "source_file")]), - (get_png_scene_names, ds_report_scenewise_pngs, [("scene_descriptions", "desc")]), - (create_scenewise_pngs, ds_report_scenewise_pngs, [("out_file", "in_file")]), + (inputnode, ds_report_scenewise_pngs, [(inputnode_anat_name, 'source_file')]), + (get_png_scene_names, ds_report_scenewise_pngs, [('scene_descriptions', 'desc')]), + (create_scenewise_pngs, ds_report_scenewise_pngs, [('out_file', 'in_file')]), ]) # fmt:skip return workflow @@ -261,7 +261,7 @@ def init_brainsprite_figures_wf(t1w_available, t2w_available, name="brainsprite_ def init_execsummary_anatomical_plots_wf( t1w_available, t2w_available, - name="execsummary_anatomical_plots_wf", + name='execsummary_anatomical_plots_wf', ): """Generate the anatomical figures for an executive summary. @@ -301,53 +301,53 @@ def init_execsummary_anatomical_plots_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "t1w", - "t2w", - "template", + 't1w', + 't2w', + 'template', ], ), - name="inputnode", + name='inputnode', ) # Start plotting the overlay figures # Atlas in T1w/T2w, T1w/T2w in Atlas - anatomicals = (["t1w"] if t1w_available else []) + (["t2w"] if t2w_available else []) + anatomicals = (['t1w'] if t1w_available else []) + (['t2w'] if t2w_available else []) for anat in anatomicals: # Resample anatomical to match resolution of template data resample_anat = pe.Node( ResampleToImage(), - name=f"resample_{anat}", + name=f'resample_{anat}', mem_gb=1, ) workflow.connect([ (inputnode, resample_anat, [ - (anat, "in_file"), - ("template", "target_file"), + (anat, 'in_file'), + ('template', 'target_file'), ]), ]) # fmt:skip plot_anat_on_atlas_wf = init_plot_overlay_wf( - desc="AnatOnAtlas", - name=f"plot_{anat}_on_atlas_wf", + desc='AnatOnAtlas', + name=f'plot_{anat}_on_atlas_wf', ) workflow.connect([ (inputnode, plot_anat_on_atlas_wf, [ - ("template", "inputnode.underlay_file"), - (anat, "inputnode.name_source"), + ('template', 'inputnode.underlay_file'), + (anat, 'inputnode.name_source'), ]), - (resample_anat, plot_anat_on_atlas_wf, [("out_file", "inputnode.overlay_file")]), + (resample_anat, plot_anat_on_atlas_wf, [('out_file', 'inputnode.overlay_file')]), ]) # fmt:skip plot_atlas_on_anat_wf = init_plot_overlay_wf( - desc="AtlasOnAnat", - name=f"plot_atlas_on_{anat}_wf", + desc='AtlasOnAnat', + name=f'plot_atlas_on_{anat}_wf', ) workflow.connect([ (inputnode, plot_atlas_on_anat_wf, [ - ("template", "inputnode.overlay_file"), - (anat, "inputnode.name_source"), + ('template', 'inputnode.overlay_file'), + (anat, 'inputnode.name_source'), ]), - (resample_anat, plot_atlas_on_anat_wf, [("out_file", "inputnode.underlay_file")]), + (resample_anat, plot_atlas_on_anat_wf, [('out_file', 'inputnode.underlay_file')]), ]) # fmt:skip # TODO: Add subcortical overlay images as well. diff --git a/xcp_d/workflows/anatomical/surface.py b/xcp_d/workflows/anatomical/surface.py index 36c0f56c9..02d05f095 100644 --- a/xcp_d/workflows/anatomical/surface.py +++ b/xcp_d/workflows/anatomical/surface.py @@ -26,7 +26,7 @@ from xcp_d.workflows.anatomical.outputs import init_copy_inputs_to_outputs_wf from xcp_d.workflows.anatomical.plotting import init_brainsprite_figures_wf -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc @@ -37,7 +37,7 @@ def init_postprocess_surfaces_wf( t1w_available, t2w_available, software, - name="postprocess_surfaces_wf", + name='postprocess_surfaces_wf', ): """Postprocess surfaces. @@ -120,38 +120,38 @@ def init_postprocess_surfaces_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "t1w", - "t2w", - "anat_to_template_xfm", - "template_to_anat_xfm", - "lh_subject_sphere", - "rh_subject_sphere", - "lh_pial_surf", - "rh_pial_surf", - "lh_wm_surf", - "rh_wm_surf", - "sulcal_depth", - "sulcal_curv", - "cortical_thickness", - "cortical_thickness_corr", - "myelin", - "myelin_smoothed", + 't1w', + 't2w', + 'anat_to_template_xfm', + 'template_to_anat_xfm', + 'lh_subject_sphere', + 'rh_subject_sphere', + 'lh_pial_surf', + 'rh_pial_surf', + 'lh_wm_surf', + 'rh_wm_surf', + 'sulcal_depth', + 'sulcal_curv', + 'cortical_thickness', + 'cortical_thickness_corr', + 'myelin', + 'myelin_smoothed', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "lh_midthickness", - "rh_midthickness", + 'lh_midthickness', + 'rh_midthickness', ], ), - name="outputnode", + name='outputnode', ) workflow.add_nodes([outputnode]) # outputnode may not be used - workflow.__desc__ = "" + workflow.__desc__ = '' if abcc_qc and mesh_available: # Plot the white and pial surfaces on the brain in a brainsprite figure. @@ -161,8 +161,8 @@ def init_postprocess_surfaces_wf( ) workflow.connect([ (inputnode, brainsprite_wf, [ - ("t1w", "inputnode.t1w"), - ("t2w", "inputnode.t2w"), + ('t1w', 'inputnode.t1w'), + ('t2w', 'inputnode.t2w'), ]), ]) # fmt:skip @@ -172,10 +172,10 @@ def init_postprocess_surfaces_wf( # For DCAN/HCP derivatives, it will be standard-space surfaces. workflow.connect([ (inputnode, brainsprite_wf, [ - ("lh_pial_surf", "inputnode.lh_pial_surf"), - ("rh_pial_surf", "inputnode.rh_pial_surf"), - ("lh_wm_surf", "inputnode.lh_wm_surf"), - ("rh_wm_surf", "inputnode.rh_wm_surf"), + ('lh_pial_surf', 'inputnode.lh_pial_surf'), + ('rh_pial_surf', 'inputnode.rh_pial_surf'), + ('lh_wm_surf', 'inputnode.lh_wm_surf'), + ('rh_wm_surf', 'inputnode.rh_wm_surf'), ]), ]) # fmt:skip @@ -187,87 +187,87 @@ def init_postprocess_surfaces_wf( # At least some surfaces are already in fsLR space and must be copied, # without modification, to the output directory. copy_std_surfaces_to_datasink = init_copy_inputs_to_outputs_wf( - name="copy_std_surfaces_to_datasink", + name='copy_std_surfaces_to_datasink', ) if morphometry_files: workflow.__desc__ += ( - " fsLR-space morphometry surfaces were copied from the preprocessing derivatives to " - "the XCP-D derivatives." + ' fsLR-space morphometry surfaces were copied from the preprocessing derivatives to ' + 'the XCP-D derivatives.' ) for morphometry_file in morphometry_files: workflow.connect([ (inputnode, copy_std_surfaces_to_datasink, [ - (morphometry_file, f"inputnode.{morphometry_file}"), + (morphometry_file, f'inputnode.{morphometry_file}'), ]), ]) # fmt:skip if mesh_available: workflow.__desc__ += ( - " HCP-style midthickness, inflated, and very-inflated surfaces were generated from " - "the white-matter and pial surface meshes." + ' HCP-style midthickness, inflated, and very-inflated surfaces were generated from ' + 'the white-matter and pial surface meshes.' ) # Generate and output HCP-style surface files. hcp_surface_wfs = { - hemi: init_generate_hcp_surfaces_wf(name=f"{hemi}_generate_hcp_surfaces_wf") - for hemi in ["lh", "rh"] + hemi: init_generate_hcp_surfaces_wf(name=f'{hemi}_generate_hcp_surfaces_wf') + for hemi in ['lh', 'rh'] } workflow.connect([ - (inputnode, hcp_surface_wfs["lh"], [("lh_pial_surf", "inputnode.name_source")]), - (inputnode, hcp_surface_wfs["rh"], [("rh_pial_surf", "inputnode.name_source")]), - (hcp_surface_wfs["lh"], outputnode, [("outputnode.midthickness", "lh_midthickness")]), - (hcp_surface_wfs["rh"], outputnode, [("outputnode.midthickness", "rh_midthickness")]), + (inputnode, hcp_surface_wfs['lh'], [('lh_pial_surf', 'inputnode.name_source')]), + (inputnode, hcp_surface_wfs['rh'], [('rh_pial_surf', 'inputnode.name_source')]), + (hcp_surface_wfs['lh'], outputnode, [('outputnode.midthickness', 'lh_midthickness')]), + (hcp_surface_wfs['rh'], outputnode, [('outputnode.midthickness', 'rh_midthickness')]), ]) # fmt:skip if mesh_available and standard_space_mesh: workflow.__desc__ += ( - " All surface files were already in fsLR space, and were copied to the output " - "directory." + ' All surface files were already in fsLR space, and were copied to the output ' + 'directory.' ) # Mesh files are already in fsLR. workflow.connect([ (inputnode, copy_std_surfaces_to_datasink, [ - ("lh_pial_surf", "inputnode.lh_pial_surf"), - ("rh_pial_surf", "inputnode.rh_pial_surf"), - ("lh_wm_surf", "inputnode.lh_wm_surf"), - ("rh_wm_surf", "inputnode.rh_wm_surf"), + ('lh_pial_surf', 'inputnode.lh_pial_surf'), + ('rh_pial_surf', 'inputnode.rh_pial_surf'), + ('lh_wm_surf', 'inputnode.lh_wm_surf'), + ('rh_wm_surf', 'inputnode.rh_wm_surf'), ]), - (inputnode, hcp_surface_wfs["lh"], [ - ("lh_pial_surf", "inputnode.pial_surf"), - ("lh_wm_surf", "inputnode.wm_surf"), + (inputnode, hcp_surface_wfs['lh'], [ + ('lh_pial_surf', 'inputnode.pial_surf'), + ('lh_wm_surf', 'inputnode.wm_surf'), ]), - (inputnode, hcp_surface_wfs["rh"], [ - ("rh_pial_surf", "inputnode.pial_surf"), - ("rh_wm_surf", "inputnode.wm_surf"), + (inputnode, hcp_surface_wfs['rh'], [ + ('rh_pial_surf', 'inputnode.pial_surf'), + ('rh_wm_surf', 'inputnode.wm_surf'), ]), ]) # fmt:skip elif mesh_available: - workflow.__desc__ += " fsnative-space surfaces were then warped to fsLR space." + workflow.__desc__ += ' fsnative-space surfaces were then warped to fsLR space.' # Mesh files are in fsnative and must be warped to fsLR. warp_surfaces_to_template_wf = init_warp_surfaces_to_template_wf( software=software, omp_nthreads=omp_nthreads, - name="warp_surfaces_to_template_wf", + name='warp_surfaces_to_template_wf', ) workflow.connect([ (inputnode, warp_surfaces_to_template_wf, [ - ("lh_subject_sphere", "inputnode.lh_subject_sphere"), - ("rh_subject_sphere", "inputnode.rh_subject_sphere"), - ("lh_pial_surf", "inputnode.lh_pial_surf"), - ("rh_pial_surf", "inputnode.rh_pial_surf"), - ("lh_wm_surf", "inputnode.lh_wm_surf"), - ("rh_wm_surf", "inputnode.rh_wm_surf"), - ("anat_to_template_xfm", "inputnode.anat_to_template_xfm"), - ("template_to_anat_xfm", "inputnode.template_to_anat_xfm"), + ('lh_subject_sphere', 'inputnode.lh_subject_sphere'), + ('rh_subject_sphere', 'inputnode.rh_subject_sphere'), + ('lh_pial_surf', 'inputnode.lh_pial_surf'), + ('rh_pial_surf', 'inputnode.rh_pial_surf'), + ('lh_wm_surf', 'inputnode.lh_wm_surf'), + ('rh_wm_surf', 'inputnode.rh_wm_surf'), + ('anat_to_template_xfm', 'inputnode.anat_to_template_xfm'), + ('template_to_anat_xfm', 'inputnode.template_to_anat_xfm'), ]), - (warp_surfaces_to_template_wf, hcp_surface_wfs["lh"], [ - ("outputnode.lh_pial_surf", "inputnode.pial_surf"), - ("outputnode.lh_wm_surf", "inputnode.wm_surf"), + (warp_surfaces_to_template_wf, hcp_surface_wfs['lh'], [ + ('outputnode.lh_pial_surf', 'inputnode.pial_surf'), + ('outputnode.lh_wm_surf', 'inputnode.wm_surf'), ]), - (warp_surfaces_to_template_wf, hcp_surface_wfs["rh"], [ - ("outputnode.rh_pial_surf", "inputnode.pial_surf"), - ("outputnode.rh_wm_surf", "inputnode.wm_surf"), + (warp_surfaces_to_template_wf, hcp_surface_wfs['rh'], [ + ('outputnode.rh_pial_surf', 'inputnode.pial_surf'), + ('outputnode.rh_wm_surf', 'inputnode.wm_surf'), ]), ]) # fmt:skip @@ -275,16 +275,16 @@ def init_postprocess_surfaces_wf( # Use standard-space T1w and surfaces for brainsprite. workflow.connect([ (warp_surfaces_to_template_wf, brainsprite_wf, [ - ("outputnode.lh_pial_surf", "inputnode.lh_pial_surf"), - ("outputnode.rh_pial_surf", "inputnode.rh_pial_surf"), - ("outputnode.lh_wm_surf", "inputnode.lh_wm_surf"), - ("outputnode.rh_wm_surf", "inputnode.rh_wm_surf"), + ('outputnode.lh_pial_surf', 'inputnode.lh_pial_surf'), + ('outputnode.rh_pial_surf', 'inputnode.rh_pial_surf'), + ('outputnode.lh_wm_surf', 'inputnode.lh_wm_surf'), + ('outputnode.rh_wm_surf', 'inputnode.rh_wm_surf'), ]), ]) # fmt:skip elif not morphometry_files: raise ValueError( - "No surfaces found. Surfaces are required if `--warp-surfaces-native2std` is enabled." + 'No surfaces found. Surfaces are required if `--warp-surfaces-native2std` is enabled.' ) return workflow @@ -294,7 +294,7 @@ def init_postprocess_surfaces_wf( def init_warp_surfaces_to_template_wf( software, omp_nthreads, - name="warp_surfaces_to_template_wf", + name='warp_surfaces_to_template_wf', ): """Transform surfaces from native to standard fsLR-32k space. @@ -349,60 +349,60 @@ def init_warp_surfaces_to_template_wf( niu.IdentityInterface( fields=[ # transforms - "anat_to_template_xfm", - "template_to_anat_xfm", + 'anat_to_template_xfm', + 'template_to_anat_xfm', # surfaces - "lh_subject_sphere", - "rh_subject_sphere", - "lh_pial_surf", - "rh_pial_surf", - "lh_wm_surf", - "rh_wm_surf", + 'lh_subject_sphere', + 'rh_subject_sphere', + 'lh_pial_surf', + 'rh_pial_surf', + 'lh_wm_surf', + 'rh_wm_surf', ], ), - name="inputnode", + name='inputnode', ) # Feed the standard-space pial and white matter surfaces to the outputnode for the brainsprite # and the HCP-surface generation workflow. outputnode = pe.Node( niu.IdentityInterface( fields=[ - "lh_pial_surf", - "rh_pial_surf", - "lh_wm_surf", - "rh_wm_surf", + 'lh_pial_surf', + 'rh_pial_surf', + 'lh_wm_surf', + 'rh_wm_surf', ], ), - name="outputnode", + name='outputnode', ) # Warp the surfaces to space-fsLR, den-32k. # First, we create the Connectome WorkBench-compatible transform files. update_xfm_wf = init_ants_xfm_to_fsl_wf( mem_gb=1, - name="update_xfm_wf", + name='update_xfm_wf', ) workflow.connect([ (inputnode, update_xfm_wf, [ - ("anat_to_template_xfm", "inputnode.anat_to_template_xfm"), - ("template_to_anat_xfm", "inputnode.template_to_anat_xfm"), + ('anat_to_template_xfm', 'inputnode.anat_to_template_xfm'), + ('template_to_anat_xfm', 'inputnode.template_to_anat_xfm'), ]), ]) # fmt:skip # TODO: It would be nice to replace this for loop with MapNodes or iterables some day. - for hemi in ["L", "R"]: - hemi_label = f"{hemi.lower()}h" + for hemi in ['L', 'R']: + hemi_label = f'{hemi.lower()}h' # Place the surfaces in a single node. collect_surfaces = pe.Node( niu.Merge(2), - name=f"collect_surfaces_{hemi_label}", + name=f'collect_surfaces_{hemi_label}', ) # NOTE: Must match order of split_up_surfaces_fsLR_32k. workflow.connect([ (inputnode, collect_surfaces, [ - (f"{hemi_label}_pial_surf", "in1"), - (f"{hemi_label}_wm_surf", "in2"), + (f'{hemi_label}_pial_surf', 'in1'), + (f'{hemi_label}_wm_surf', 'in2'), ]), ]) # fmt:skip @@ -411,18 +411,18 @@ def init_warp_surfaces_to_template_wf( software=software, mem_gb=2, omp_nthreads=omp_nthreads, - name=f"{hemi_label}_apply_transforms_wf", + name=f'{hemi_label}_apply_transforms_wf', ) workflow.connect([ (inputnode, apply_transforms_wf, [ - (f"{hemi_label}_subject_sphere", "inputnode.subject_sphere"), + (f'{hemi_label}_subject_sphere', 'inputnode.subject_sphere'), ]), (update_xfm_wf, apply_transforms_wf, [ - ("outputnode.merged_warpfield", "inputnode.merged_warpfield"), - ("outputnode.merged_inv_warpfield", "inputnode.merged_inv_warpfield"), - ("outputnode.world_xfm", "inputnode.world_xfm"), + ('outputnode.merged_warpfield', 'inputnode.merged_warpfield'), + ('outputnode.merged_inv_warpfield', 'inputnode.merged_inv_warpfield'), + ('outputnode.world_xfm', 'inputnode.world_xfm'), ]), - (collect_surfaces, apply_transforms_wf, [("out", "inputnode.hemi_files")]), + (collect_surfaces, apply_transforms_wf, [('out', 'inputnode.hemi_files')]), ]) # fmt:skip # Split up the surfaces @@ -435,33 +435,33 @@ def init_warp_surfaces_to_template_wf( ], squeeze=True, ), - name=f"split_up_surfaces_fsLR_32k_{hemi_label}", + name=f'split_up_surfaces_fsLR_32k_{hemi_label}', ) workflow.connect([ (apply_transforms_wf, split_up_surfaces_fsLR_32k, [ - ("outputnode.warped_hemi_files", "inlist"), + ('outputnode.warped_hemi_files', 'inlist'), ]), (split_up_surfaces_fsLR_32k, outputnode, [ - ("out1", f"{hemi_label}_pial_surf"), - ("out2", f"{hemi_label}_wm_surf"), + ('out1', f'{hemi_label}_pial_surf'), + ('out2', f'{hemi_label}_wm_surf'), ]), ]) # fmt:skip ds_standard_space_surfaces = pe.MapNode( DerivativesDataSink( - space="fsLR", - den="32k", - extension=".surf.gii", # the extension is taken from the in_file by default + space='fsLR', + den='32k', + extension='.surf.gii', # the extension is taken from the in_file by default ), - name=f"ds_standard_space_surfaces_{hemi_label}", + name=f'ds_standard_space_surfaces_{hemi_label}', run_without_submitting=True, mem_gb=1, - iterfield=["in_file", "source_file"], + iterfield=['in_file', 'source_file'], ) workflow.connect([ - (collect_surfaces, ds_standard_space_surfaces, [("out", "source_file")]), + (collect_surfaces, ds_standard_space_surfaces, [('out', 'source_file')]), (apply_transforms_wf, ds_standard_space_surfaces, [ - ("outputnode.warped_hemi_files", "in_file"), + ('outputnode.warped_hemi_files', 'in_file'), ]), ]) # fmt:skip @@ -469,7 +469,7 @@ def init_warp_surfaces_to_template_wf( @fill_doc -def init_generate_hcp_surfaces_wf(name="generate_hcp_surfaces_wf"): +def init_generate_hcp_surfaces_wf(name='generate_hcp_surfaces_wf'): """Generate midthickness, inflated, and very-inflated HCP-style surfaces. Workflow Graph @@ -503,49 +503,49 @@ def init_generate_hcp_surfaces_wf(name="generate_hcp_surfaces_wf"): inputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "pial_surf", - "wm_surf", + 'name_source', + 'pial_surf', + 'wm_surf', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["midthickness"]), - name="outputnode", + niu.IdentityInterface(fields=['midthickness']), + name='outputnode', ) generate_midthickness = pe.Node( SurfaceAverage(num_threads=config.nipype.omp_nthreads), - name="generate_midthickness", + name='generate_midthickness', mem_gb=2, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (inputnode, generate_midthickness, [ - ("pial_surf", "surface_in1"), - ("wm_surf", "surface_in2"), + ('pial_surf', 'surface_in1'), + ('wm_surf', 'surface_in2'), ]), - (generate_midthickness, outputnode, [("out_file", "midthickness")]), + (generate_midthickness, outputnode, [('out_file', 'midthickness')]), ]) # fmt:skip ds_midthickness = pe.Node( DerivativesDataSink( check_hdr=False, - space="fsLR", - den="32k", - desc="hcp", - suffix="midthickness", - extension=".surf.gii", + space='fsLR', + den='32k', + desc='hcp', + suffix='midthickness', + extension='.surf.gii', ), - name="ds_midthickness", + name='ds_midthickness', run_without_submitting=False, mem_gb=2, ) workflow.connect([ - (inputnode, ds_midthickness, [("name_source", "source_file")]), - (generate_midthickness, ds_midthickness, [("out_file", "in_file")]), + (inputnode, ds_midthickness, [('name_source', 'source_file')]), + (generate_midthickness, ds_midthickness, [('out_file', 'in_file')]), ]) # fmt:skip # Generate (very-)inflated surface from standard-space midthickness surface. @@ -556,53 +556,53 @@ def init_generate_hcp_surfaces_wf(name="generate_hcp_surfaces_wf"): ), mem_gb=2, n_procs=config.nipype.omp_nthreads, - name="inflate_surface", + name='inflate_surface', ) workflow.connect([ - (generate_midthickness, inflate_surface, [("out_file", "anatomical_surface_in")]), + (generate_midthickness, inflate_surface, [('out_file', 'anatomical_surface_in')]), ]) # fmt:skip ds_inflated = pe.Node( DerivativesDataSink( check_hdr=False, - space="fsLR", - den="32k", - desc="hcp", - suffix="inflated", - extension=".surf.gii", + space='fsLR', + den='32k', + desc='hcp', + suffix='inflated', + extension='.surf.gii', ), - name="ds_inflated", + name='ds_inflated', run_without_submitting=False, mem_gb=2, ) workflow.connect([ - (inputnode, ds_inflated, [("name_source", "source_file")]), - (inflate_surface, ds_inflated, [("inflated_out_file", "in_file")]), + (inputnode, ds_inflated, [('name_source', 'source_file')]), + (inflate_surface, ds_inflated, [('inflated_out_file', 'in_file')]), ]) # fmt:skip ds_vinflated = pe.Node( DerivativesDataSink( check_hdr=False, - space="fsLR", - den="32k", - desc="hcp", - suffix="vinflated", - extension=".surf.gii", + space='fsLR', + den='32k', + desc='hcp', + suffix='vinflated', + extension='.surf.gii', ), - name="ds_vinflated", + name='ds_vinflated', run_without_submitting=False, mem_gb=2, ) workflow.connect([ - (inputnode, ds_vinflated, [("name_source", "source_file")]), - (inflate_surface, ds_vinflated, [("very_inflated_out_file", "in_file")]), + (inputnode, ds_vinflated, [('name_source', 'source_file')]), + (inflate_surface, ds_vinflated, [('very_inflated_out_file', 'in_file')]), ]) # fmt:skip return workflow @fill_doc -def init_ants_xfm_to_fsl_wf(mem_gb, name="ants_xfm_to_fsl_wf"): +def init_ants_xfm_to_fsl_wf(mem_gb, name='ants_xfm_to_fsl_wf'): """Modify ANTS-style fMRIPrep transforms to work with Connectome Workbench/FSL FNIRT. XXX: Does this only work if the template is MNI152NLin6Asym? @@ -647,13 +647,13 @@ def init_ants_xfm_to_fsl_wf(mem_gb, name="ants_xfm_to_fsl_wf"): workflow = Workflow(name=name) inputnode = pe.Node( - niu.IdentityInterface(fields=["anat_to_template_xfm", "template_to_anat_xfm"]), - name="inputnode", + niu.IdentityInterface(fields=['anat_to_template_xfm', 'template_to_anat_xfm']), + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["world_xfm", "merged_warpfield", "merged_inv_warpfield"]), - name="outputnode", + niu.IdentityInterface(fields=['world_xfm', 'merged_warpfield', 'merged_inv_warpfield']), + name='outputnode', ) # Now we can start the actual workflow. @@ -661,176 +661,176 @@ def init_ants_xfm_to_fsl_wf(mem_gb, name="ants_xfm_to_fsl_wf"): disassemble_h5 = pe.Node( CompositeTransformUtil( inverse=False, - process="disassemble", - output_prefix="T1w_to_MNI152NLin6Asym", + process='disassemble', + output_prefix='T1w_to_MNI152NLin6Asym', ), - name="disassemble_h5", + name='disassemble_h5', mem_gb=mem_gb, ) - workflow.connect([(inputnode, disassemble_h5, [("anat_to_template_xfm", "in_file")])]) + workflow.connect([(inputnode, disassemble_h5, [('anat_to_template_xfm', 'in_file')])]) # Nipype's CompositeTransformUtil assumes a certain file naming and # concatenation order of xfms which does not work for the inverse .h5, # so we use our modified class with an additional inverse flag. disassemble_h5_inv = pe.Node( CompositeTransformUtil( - process="disassemble", + process='disassemble', inverse=True, - output_prefix="MNI152NLin6Asym_to_T1w", + output_prefix='MNI152NLin6Asym_to_T1w', ), - name="disassemble_h5_inv", + name='disassemble_h5_inv', mem_gb=mem_gb, ) - workflow.connect([(inputnode, disassemble_h5_inv, [("template_to_anat_xfm", "in_file")])]) + workflow.connect([(inputnode, disassemble_h5_inv, [('template_to_anat_xfm', 'in_file')])]) # Convert anat-to-template affine from ITK binary to txt convert_ants_xfm = pe.Node( ConvertTransformFile(dimension=3), - name="convert_ants_xfm", + name='convert_ants_xfm', ) - workflow.connect([(disassemble_h5, convert_ants_xfm, [("affine_transform", "in_transform")])]) + workflow.connect([(disassemble_h5, convert_ants_xfm, [('affine_transform', 'in_transform')])]) # Change xfm type from "AffineTransform" to "MatrixOffsetTransformBase" # since wb_command doesn't recognize "AffineTransform" # (AffineTransform is a subclass of MatrixOffsetTransformBase which prob makes this okay to do) change_xfm_type = pe.Node( ChangeXfmType(num_threads=config.nipype.omp_nthreads), - name="change_xfm_type", + name='change_xfm_type', n_procs=config.nipype.omp_nthreads, ) - workflow.connect([(convert_ants_xfm, change_xfm_type, [("out_transform", "in_transform")])]) + workflow.connect([(convert_ants_xfm, change_xfm_type, [('out_transform', 'in_transform')])]) # Convert affine xfm to "world" so it works with -surface-apply-affine convert_xfm2world = pe.Node( - ConvertAffine(fromwhat="itk", towhat="world", num_threads=config.nipype.omp_nthreads), - name="convert_xfm2world", + ConvertAffine(fromwhat='itk', towhat='world', num_threads=config.nipype.omp_nthreads), + name='convert_xfm2world', n_procs=config.nipype.omp_nthreads, ) - workflow.connect([(change_xfm_type, convert_xfm2world, [("out_transform", "in_file")])]) + workflow.connect([(change_xfm_type, convert_xfm2world, [('out_transform', 'in_file')])]) # Use C3d to separate the combined warpfield xfm into x, y, and z components get_xyz_components = pe.Node( C3d( is_4d=True, multicomp_split=True, - out_files=["e1.nii.gz", "e2.nii.gz", "e3.nii.gz"], + out_files=['e1.nii.gz', 'e2.nii.gz', 'e3.nii.gz'], ), - name="get_xyz_components", + name='get_xyz_components', mem_gb=mem_gb, ) get_inv_xyz_components = pe.Node( C3d( is_4d=True, multicomp_split=True, - out_files=["e1inv.nii.gz", "e2inv.nii.gz", "e3inv.nii.gz"], + out_files=['e1inv.nii.gz', 'e2inv.nii.gz', 'e3inv.nii.gz'], ), - name="get_inv_xyz_components", + name='get_inv_xyz_components', mem_gb=mem_gb, ) workflow.connect([ - (disassemble_h5, get_xyz_components, [("displacement_field", "in_file")]), - (disassemble_h5_inv, get_inv_xyz_components, [("displacement_field", "in_file")]), + (disassemble_h5, get_xyz_components, [('displacement_field', 'in_file')]), + (disassemble_h5_inv, get_inv_xyz_components, [('displacement_field', 'in_file')]), ]) # fmt:skip # Select x-component after separating warpfield above select_x_component = pe.Node( niu.Select(index=[0]), - name="select_x_component", + name='select_x_component', mem_gb=mem_gb, ) select_inv_x_component = pe.Node( niu.Select(index=[0]), - name="select_inv_x_component", + name='select_inv_x_component', mem_gb=mem_gb, ) # Select y-component select_y_component = pe.Node( niu.Select(index=[1]), - name="select_y_component", + name='select_y_component', mem_gb=mem_gb, ) select_inv_y_component = pe.Node( niu.Select(index=[1]), - name="select_inv_y_component", + name='select_inv_y_component', mem_gb=mem_gb, ) # Select z-component select_z_component = pe.Node( niu.Select(index=[2]), - name="select_z_component", + name='select_z_component', mem_gb=mem_gb, ) select_inv_z_component = pe.Node( niu.Select(index=[2]), - name="select_inv_z_component", + name='select_inv_z_component', mem_gb=mem_gb, ) workflow.connect([ - (get_xyz_components, select_x_component, [("out_files", "inlist")]), - (get_xyz_components, select_y_component, [("out_files", "inlist")]), - (get_xyz_components, select_z_component, [("out_files", "inlist")]), - (get_inv_xyz_components, select_inv_x_component, [("out_files", "inlist")]), - (get_inv_xyz_components, select_inv_y_component, [("out_files", "inlist")]), - (get_inv_xyz_components, select_inv_z_component, [("out_files", "inlist")]), + (get_xyz_components, select_x_component, [('out_files', 'inlist')]), + (get_xyz_components, select_y_component, [('out_files', 'inlist')]), + (get_xyz_components, select_z_component, [('out_files', 'inlist')]), + (get_inv_xyz_components, select_inv_x_component, [('out_files', 'inlist')]), + (get_inv_xyz_components, select_inv_y_component, [('out_files', 'inlist')]), + (get_inv_xyz_components, select_inv_z_component, [('out_files', 'inlist')]), ]) # fmt:skip # Reverse y-component of the warpfield # (need to do this when converting a warpfield from ANTs to FNIRT format # for use with wb_command -surface-apply-warpfield) reverse_y_component = pe.Node( - BinaryMath(expression="img * -1"), - name="reverse_y_component", + BinaryMath(expression='img * -1'), + name='reverse_y_component', mem_gb=mem_gb, ) reverse_inv_y_component = pe.Node( - BinaryMath(expression="img * -1"), - name="reverse_inv_y_component", + BinaryMath(expression='img * -1'), + name='reverse_inv_y_component', mem_gb=mem_gb, ) workflow.connect([ - (select_y_component, reverse_y_component, [("out", "in_file")]), - (select_inv_y_component, reverse_inv_y_component, [("out", "in_file")]), + (select_y_component, reverse_y_component, [('out', 'in_file')]), + (select_inv_y_component, reverse_inv_y_component, [('out', 'in_file')]), ]) # fmt:skip # Collect new warpfield components in individual nodes collect_new_components = pe.Node( niu.Merge(3), - name="collect_new_components", + name='collect_new_components', mem_gb=mem_gb, ) collect_new_inv_components = pe.Node( niu.Merge(3), - name="collect_new_inv_components", + name='collect_new_inv_components', mem_gb=mem_gb, ) workflow.connect([ - (select_x_component, collect_new_components, [("out", "in1")]), - (reverse_y_component, collect_new_components, [("out_file", "in2")]), - (select_z_component, collect_new_components, [("out", "in3")]), - (select_inv_x_component, collect_new_inv_components, [("out", "in1")]), - (reverse_inv_y_component, collect_new_inv_components, [("out_file", "in2")]), - (select_inv_z_component, collect_new_inv_components, [("out", "in3")]), + (select_x_component, collect_new_components, [('out', 'in1')]), + (reverse_y_component, collect_new_components, [('out_file', 'in2')]), + (select_z_component, collect_new_components, [('out', 'in3')]), + (select_inv_x_component, collect_new_inv_components, [('out', 'in1')]), + (reverse_inv_y_component, collect_new_inv_components, [('out_file', 'in2')]), + (select_inv_z_component, collect_new_inv_components, [('out', 'in3')]), ]) # fmt:skip # Merge warpfield components in FSL FNIRT format, with the reversed y-component from above remerge_warpfield = pe.Node( Merge(), - name="remerge_warpfield", + name='remerge_warpfield', mem_gb=mem_gb, ) remerge_inv_warpfield = pe.Node( Merge(), - name="remerge_inv_warpfield", + name='remerge_inv_warpfield', mem_gb=mem_gb, ) workflow.connect([ - (collect_new_components, remerge_warpfield, [("out", "in_files")]), - (collect_new_inv_components, remerge_inv_warpfield, [("out", "in_files")]), - (convert_xfm2world, outputnode, [("out_file", "world_xfm")]), - (remerge_warpfield, outputnode, [("out_file", "merged_warpfield")]), - (remerge_inv_warpfield, outputnode, [("out_file", "merged_inv_warpfield")]), + (collect_new_components, remerge_warpfield, [('out', 'in_files')]), + (collect_new_inv_components, remerge_inv_warpfield, [('out', 'in_files')]), + (convert_xfm2world, outputnode, [('out_file', 'world_xfm')]), + (remerge_warpfield, outputnode, [('out_file', 'merged_warpfield')]), + (remerge_inv_warpfield, outputnode, [('out_file', 'merged_inv_warpfield')]), ]) # fmt:skip return workflow @@ -842,7 +842,7 @@ def init_warp_one_hemisphere_wf( software, mem_gb, omp_nthreads, - name="warp_one_hemisphere_wf", + name='warp_one_hemisphere_wf', ): """Apply transforms to warp one hemisphere's surface files into standard space. @@ -934,23 +934,23 @@ def init_warp_one_hemisphere_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "hemi_files", - "world_xfm", - "merged_warpfield", - "merged_inv_warpfield", - "subject_sphere", + 'hemi_files', + 'world_xfm', + 'merged_warpfield', + 'merged_inv_warpfield', + 'subject_sphere', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["warped_hemi_files"]), - name="outputnode", + niu.IdentityInterface(fields=['warped_hemi_files']), + name='outputnode', ) collect_registration_files = pe.Node( CollectRegistrationFiles(hemisphere=hemisphere, software=software), - name="collect_registration_files", + name='collect_registration_files', mem_gb=0.1, n_procs=1, ) @@ -962,61 +962,61 @@ def init_warp_one_hemisphere_wf( # So what's the result? The fsLR or dhcpAsym vertices with coordinates on the fsnative sphere? surface_sphere_project_unproject = pe.Node( SurfaceSphereProjectUnproject(num_threads=omp_nthreads), - name="surface_sphere_project_unproject", + name='surface_sphere_project_unproject', n_procs=omp_nthreads, ) workflow.connect([ - (inputnode, surface_sphere_project_unproject, [("subject_sphere", "in_file")]), + (inputnode, surface_sphere_project_unproject, [('subject_sphere', 'in_file')]), (collect_registration_files, surface_sphere_project_unproject, [ - ("source_sphere", "sphere_project_to"), - ("sphere_to_sphere", "sphere_unproject_from"), + ('source_sphere', 'sphere_project_to'), + ('sphere_to_sphere', 'sphere_unproject_from'), ]), ]) # fmt:skip # Resample the pial and white matter surfaces from fsnative to fsLR-32k or dhcpAsym-32k resample_to_fsLR32k = pe.MapNode( - CiftiSurfaceResample(method="BARYCENTRIC", num_threads=omp_nthreads), - name="resample_to_fsLR32k", + CiftiSurfaceResample(method='BARYCENTRIC', num_threads=omp_nthreads), + name='resample_to_fsLR32k', mem_gb=mem_gb, n_procs=omp_nthreads, - iterfield=["in_file"], + iterfield=['in_file'], ) workflow.connect([ - (inputnode, resample_to_fsLR32k, [("hemi_files", "in_file")]), - (collect_registration_files, resample_to_fsLR32k, [("target_sphere", "new_sphere")]), - (surface_sphere_project_unproject, resample_to_fsLR32k, [("out_file", "current_sphere")]), + (inputnode, resample_to_fsLR32k, [('hemi_files', 'in_file')]), + (collect_registration_files, resample_to_fsLR32k, [('target_sphere', 'new_sphere')]), + (surface_sphere_project_unproject, resample_to_fsLR32k, [('out_file', 'current_sphere')]), ]) # fmt:skip # Apply FLIRT-format anatomical-to-template affine transform to 32k surfs # NOTE: What does this step do? Aren't the data in fsLR/dhcpAsym-32k from resample_to_fsLR32k? apply_affine_to_fsLR32k = pe.MapNode( ApplyAffine(num_threads=omp_nthreads), - name="apply_affine_to_fsLR32k", + name='apply_affine_to_fsLR32k', mem_gb=mem_gb, n_procs=omp_nthreads, - iterfield=["in_file"], + iterfield=['in_file'], ) workflow.connect([ - (inputnode, apply_affine_to_fsLR32k, [("world_xfm", "affine")]), - (resample_to_fsLR32k, apply_affine_to_fsLR32k, [("out_file", "in_file")]), + (inputnode, apply_affine_to_fsLR32k, [('world_xfm', 'affine')]), + (resample_to_fsLR32k, apply_affine_to_fsLR32k, [('out_file', 'in_file')]), ]) # fmt:skip # Apply FNIRT-format (forward) anatomical-to-template warpfield # NOTE: What does this step do? apply_warpfield_to_fsLR32k = pe.MapNode( ApplyWarpfield(num_threads=omp_nthreads), - name="apply_warpfield_to_fsLR32k", + name='apply_warpfield_to_fsLR32k', mem_gb=mem_gb, n_procs=omp_nthreads, - iterfield=["in_file"], + iterfield=['in_file'], ) workflow.connect([ (inputnode, apply_warpfield_to_fsLR32k, [ - ("merged_warpfield", "forward_warp"), - ("merged_inv_warpfield", "warpfield"), + ('merged_warpfield', 'forward_warp'), + ('merged_inv_warpfield', 'warpfield'), ]), - (apply_affine_to_fsLR32k, apply_warpfield_to_fsLR32k, [("out_file", "in_file")]), - (apply_warpfield_to_fsLR32k, outputnode, [("out_file", "warped_hemi_files")]), + (apply_affine_to_fsLR32k, apply_warpfield_to_fsLR32k, [('out_file', 'in_file')]), + (apply_warpfield_to_fsLR32k, outputnode, [('out_file', 'warped_hemi_files')]), ]) # fmt:skip return workflow diff --git a/xcp_d/workflows/anatomical/volume.py b/xcp_d/workflows/anatomical/volume.py index e9137ec2e..2bff1b6e8 100644 --- a/xcp_d/workflows/anatomical/volume.py +++ b/xcp_d/workflows/anatomical/volume.py @@ -13,7 +13,7 @@ from xcp_d.utils.utils import list_to_str from xcp_d.workflows.anatomical.plotting import init_execsummary_anatomical_plots_wf -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc @@ -21,7 +21,7 @@ def init_postprocess_anat_wf( t1w_available, t2w_available, target_space, - name="postprocess_anat_wf", + name='postprocess_anat_wf', ): """Copy T1w, segmentation, and, optionally, T2w to the derivative directory. @@ -81,27 +81,27 @@ def init_postprocess_anat_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "t1w", - "t2w", - "anat_to_template_xfm", - "template", + 't1w', + 't2w', + 'anat_to_template_xfm', + 'template', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["t1w", "t2w"]), - name="outputnode", + niu.IdentityInterface(fields=['t1w', 't2w']), + name='outputnode', ) # Split cohort out of the space for MNIInfant templates. cohort = None - if "+" in target_space: - target_space, cohort = target_space.split("+") + if '+' in target_space: + target_space, cohort = target_space.split('+') template_file = str( - get_template(template=target_space, cohort=cohort, resolution=1, desc=None, suffix="T1w") + get_template(template=target_space, cohort=cohort, resolution=1, desc=None, suffix='T1w') ) inputnode.inputs.template = template_file @@ -110,14 +110,14 @@ def init_postprocess_anat_wf( DerivativesDataSink( space=target_space, cohort=cohort, - extension=".nii.gz", + extension='.nii.gz', ), - name="ds_t1w_std", + name='ds_t1w_std', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_t1w_std, [("t1w", "source_file")]), - (ds_t1w_std, outputnode, [("out_file", "t1w")]), + (inputnode, ds_t1w_std, [('t1w', 'source_file')]), + (ds_t1w_std, outputnode, [('out_file', 't1w')]), ]) # fmt:skip if t2w_available: @@ -125,29 +125,29 @@ def init_postprocess_anat_wf( DerivativesDataSink( space=target_space, cohort=cohort, - extension=".nii.gz", + extension='.nii.gz', ), - name="ds_t2w_std", + name='ds_t2w_std', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_t2w_std, [("t2w", "source_file")]), - (ds_t2w_std, outputnode, [("out_file", "t2w")]), + (inputnode, ds_t2w_std, [('t2w', 'source_file')]), + (ds_t2w_std, outputnode, [('out_file', 't2w')]), ]) # fmt:skip - if input_type in ("dcan", "hcp", "ukb"): + if input_type in ('dcan', 'hcp', 'ukb'): # Assume that the T1w and T2w files are in standard space, # but don't have the "space" entity, for the "dcan" and "hcp" derivatives. # This is a bug, and the converted filenames are inaccurate, so we have this # workaround in place. if t1w_available: - workflow.connect([(inputnode, ds_t1w_std, [("t1w", "in_file")])]) + workflow.connect([(inputnode, ds_t1w_std, [('t1w', 'in_file')])]) if t2w_available: - workflow.connect([(inputnode, ds_t2w_std, [("t2w", "in_file")])]) + workflow.connect([(inputnode, ds_t2w_std, [('t2w', 'in_file')])]) else: - out = ["T1w"] if t1w_available else [] + ["T2w"] if t2w_available else [] + out = ['T1w'] if t1w_available else [] + ['T2w'] if t2w_available else [] workflow.__desc__ = f""" #### Anatomical data @@ -160,43 +160,43 @@ def init_postprocess_anat_wf( # Warp the native T1w-space T1w, T1w segmentation, and T2w files to standard space. warp_t1w_to_template = pe.Node( ApplyTransforms( - interpolation="LanczosWindowedSinc", + interpolation='LanczosWindowedSinc', input_image_type=3, dimension=3, num_threads=config.nipype.omp_nthreads, ), - name="warp_t1w_to_template", + name='warp_t1w_to_template', mem_gb=2, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (inputnode, warp_t1w_to_template, [ - ("t1w", "input_image"), - ("anat_to_template_xfm", "transforms"), - ("template", "reference_image"), + ('t1w', 'input_image'), + ('anat_to_template_xfm', 'transforms'), + ('template', 'reference_image'), ]), - (warp_t1w_to_template, ds_t1w_std, [("output_image", "in_file")]), + (warp_t1w_to_template, ds_t1w_std, [('output_image', 'in_file')]), ]) # fmt:skip if t2w_available: warp_t2w_to_template = pe.Node( ApplyTransforms( - interpolation="LanczosWindowedSinc", + interpolation='LanczosWindowedSinc', input_image_type=3, dimension=3, num_threads=config.nipype.omp_nthreads, ), - name="warp_t2w_to_template", + name='warp_t2w_to_template', mem_gb=2, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (inputnode, warp_t2w_to_template, [ - ("t2w", "input_image"), - ("anat_to_template_xfm", "transforms"), - ("template", "reference_image"), + ('t2w', 'input_image'), + ('anat_to_template_xfm', 'transforms'), + ('template', 'reference_image'), ]), - (warp_t2w_to_template, ds_t2w_std, [("output_image", "in_file")]), + (warp_t2w_to_template, ds_t2w_std, [('output_image', 'in_file')]), ]) # fmt:skip if config.workflow.abcc_qc: @@ -205,17 +205,17 @@ def init_postprocess_anat_wf( t2w_available=t2w_available, ) workflow.connect([ - (inputnode, execsummary_anatomical_plots_wf, [("template", "inputnode.template")]), + (inputnode, execsummary_anatomical_plots_wf, [('template', 'inputnode.template')]), ]) # fmt:skip if t1w_available: workflow.connect([ - (ds_t1w_std, execsummary_anatomical_plots_wf, [("out_file", "inputnode.t1w")]), + (ds_t1w_std, execsummary_anatomical_plots_wf, [('out_file', 'inputnode.t1w')]), ]) # fmt:skip if t2w_available: workflow.connect([ - (ds_t2w_std, execsummary_anatomical_plots_wf, [("out_file", "inputnode.t2w")]), + (ds_t2w_std, execsummary_anatomical_plots_wf, [('out_file', 'inputnode.t2w')]), ]) # fmt:skip return workflow diff --git a/xcp_d/workflows/base.py b/xcp_d/workflows/base.py index 90539046f..bd62dbfae 100644 --- a/xcp_d/workflows/base.py +++ b/xcp_d/workflows/base.py @@ -8,7 +8,7 @@ from pathlib import Path import bids -import matplotlib +import matplotlib as mpl import nibabel as nb import nilearn import numpy as np @@ -48,7 +48,7 @@ from xcp_d.workflows.bold.nifti import init_postprocess_nifti_wf from xcp_d.workflows.parcellation import init_load_atlases_wf -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') def init_xcpd_wf(): @@ -74,14 +74,14 @@ def init_xcpd_wf(): ver = Version(config.environment.version) - xcpd_wf = Workflow(name=f"xcp_d_{ver.major}_{ver.minor}_wf") + xcpd_wf = Workflow(name=f'xcp_d_{ver.major}_{ver.minor}_wf') xcpd_wf.base_dir = config.execution.work_dir for subject_id in config.execution.participant_label: single_subject_wf = init_single_subject_wf(subject_id) - single_subject_wf.config["execution"]["crashdump_dir"] = str( - config.execution.output_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid + single_subject_wf.config['execution']['crashdump_dir'] = str( + config.execution.output_dir / f'sub-{subject_id}' / 'log' / config.execution.run_uuid ) for node in single_subject_wf._get_all_nodes(): node.config = deepcopy(single_subject_wf.config) @@ -90,10 +90,10 @@ def init_xcpd_wf(): # Dump a copy of the config file into the log directory log_dir = ( - config.execution.output_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid + config.execution.output_dir / f'sub-{subject_id}' / 'log' / config.execution.run_uuid ) log_dir.mkdir(exist_ok=True, parents=True) - config.to_filename(log_dir / "xcp_d.toml") + config.to_filename(log_dir / 'xcp_d.toml') return xcpd_wf @@ -131,9 +131,9 @@ def init_single_subject_wf(subject_id: str): input_type=config.workflow.input_type, file_format=config.workflow.file_format, ) - t1w_available = subj_data["t1w"] is not None - t2w_available = subj_data["t2w"] is not None - anat_mod = "t1w" if t1w_available else "t2w" + t1w_available = subj_data['t1w'] is not None + t2w_available = subj_data['t2w'] is not None + anat_mod = 't1w' if t1w_available else 't2w' mesh_available, standard_space_mesh, software, mesh_files = collect_mesh_data( layout=config.execution.layout, @@ -148,61 +148,61 @@ def init_single_subject_wf(subject_id: str): # determine the appropriate post-processing workflow workflows = { - "nifti": init_postprocess_nifti_wf, - "cifti": init_postprocess_cifti_wf, + 'nifti': init_postprocess_nifti_wf, + 'cifti': init_postprocess_cifti_wf, } init_postprocess_bold_wf = workflows[config.workflow.file_format] - preproc_files = subj_data["bold"] + preproc_files = subj_data['bold'] inputnode = pe.Node( niu.IdentityInterface( fields=[ - "t1w", - "t2w", # optional - "anat_brainmask", # used to estimate head radius and for QC metrics - "template_to_anat_xfm", # not used by cifti workflow - "anat_to_template_xfm", + 't1w', + 't2w', # optional + 'anat_brainmask', # used to estimate head radius and for QC metrics + 'template_to_anat_xfm', # not used by cifti workflow + 'anat_to_template_xfm', # mesh files - "lh_pial_surf", - "rh_pial_surf", - "lh_wm_surf", - "rh_wm_surf", - "lh_subject_sphere", - "rh_subject_sphere", + 'lh_pial_surf', + 'rh_pial_surf', + 'lh_wm_surf', + 'rh_wm_surf', + 'lh_subject_sphere', + 'rh_subject_sphere', # morphometry files - "sulcal_depth", - "sulcal_curv", - "cortical_thickness", - "cortical_thickness_corr", - "myelin", - "myelin_smoothed", + 'sulcal_depth', + 'sulcal_curv', + 'cortical_thickness', + 'cortical_thickness_corr', + 'myelin', + 'myelin_smoothed', ], ), - name="inputnode", + name='inputnode', ) - inputnode.inputs.t1w = subj_data["t1w"] - inputnode.inputs.t2w = subj_data["t2w"] - inputnode.inputs.anat_brainmask = subj_data["anat_brainmask"] - inputnode.inputs.template_to_anat_xfm = subj_data["template_to_anat_xfm"] - inputnode.inputs.anat_to_template_xfm = subj_data["anat_to_template_xfm"] + inputnode.inputs.t1w = subj_data['t1w'] + inputnode.inputs.t2w = subj_data['t2w'] + inputnode.inputs.anat_brainmask = subj_data['anat_brainmask'] + inputnode.inputs.template_to_anat_xfm = subj_data['template_to_anat_xfm'] + inputnode.inputs.anat_to_template_xfm = subj_data['anat_to_template_xfm'] # surface mesh files (required for brainsprite/warp workflows) - inputnode.inputs.lh_pial_surf = mesh_files["lh_pial_surf"] - inputnode.inputs.rh_pial_surf = mesh_files["rh_pial_surf"] - inputnode.inputs.lh_wm_surf = mesh_files["lh_wm_surf"] - inputnode.inputs.rh_wm_surf = mesh_files["rh_wm_surf"] - inputnode.inputs.lh_subject_sphere = mesh_files["lh_subject_sphere"] - inputnode.inputs.rh_subject_sphere = mesh_files["rh_subject_sphere"] + inputnode.inputs.lh_pial_surf = mesh_files['lh_pial_surf'] + inputnode.inputs.rh_pial_surf = mesh_files['rh_pial_surf'] + inputnode.inputs.lh_wm_surf = mesh_files['lh_wm_surf'] + inputnode.inputs.rh_wm_surf = mesh_files['rh_wm_surf'] + inputnode.inputs.lh_subject_sphere = mesh_files['lh_subject_sphere'] + inputnode.inputs.rh_subject_sphere = mesh_files['rh_subject_sphere'] # optional surface shape files (used by surface-warping workflow) - inputnode.inputs.sulcal_depth = morphometry_files["sulcal_depth"] - inputnode.inputs.sulcal_curv = morphometry_files["sulcal_curv"] - inputnode.inputs.cortical_thickness = morphometry_files["cortical_thickness"] - inputnode.inputs.cortical_thickness_corr = morphometry_files["cortical_thickness_corr"] - inputnode.inputs.myelin = morphometry_files["myelin"] - inputnode.inputs.myelin_smoothed = morphometry_files["myelin_smoothed"] + inputnode.inputs.sulcal_depth = morphometry_files['sulcal_depth'] + inputnode.inputs.sulcal_curv = morphometry_files['sulcal_curv'] + inputnode.inputs.cortical_thickness = morphometry_files['cortical_thickness'] + inputnode.inputs.cortical_thickness_corr = morphometry_files['cortical_thickness_corr'] + inputnode.inputs.myelin = morphometry_files['myelin'] + inputnode.inputs.myelin_smoothed = morphometry_files['myelin_smoothed'] - workflow = Workflow(name=f"sub_{subject_id}_wf") + workflow = Workflow(name=f'sub_{subject_id}_wf') info_dict = get_preproc_pipeline_info( input_type=config.workflow.input_type, @@ -220,9 +220,9 @@ def init_single_subject_wf(subject_id: str): """ cw_str = ( - "*Connectome Workbench* [@marcus2011informatics], " - if config.workflow.file_format == "cifti" - else "" + '*Connectome Workbench* [@marcus2011informatics], ' + if config.workflow.file_format == 'cifti' + else '' ) workflow.__postdesc__ = f""" @@ -230,7 +230,7 @@ def init_single_subject_wf(subject_id: str): *AFNI* [@cox1996afni;@cox1997software],{cw_str} *ANTS* [@avants2009advanced], *TemplateFlow* version {templateflow.__version__} [@ciric2022templateflow], -*matplotlib* version {matplotlib.__version__} [@hunter2007matplotlib], +*matplotlib* version {mpl.__version__} [@hunter2007matplotlib], *Nibabel* version {nb.__version__} [@brett_matthew_2022_6658382], *Nilearn* version {nilearn.__version__} [@abraham2014machine], *numpy* version {np.__version__} [@harris2020array], @@ -252,33 +252,33 @@ def init_single_subject_wf(subject_id: str): summary = pe.Node( SubjectSummary(subject_id=subject_id, bold=preproc_files), - name="summary", + name='summary', ) about = pe.Node( - AboutSummary(version=__version__, command=" ".join(sys.argv)), - name="about", + AboutSummary(version=__version__, command=' '.join(sys.argv)), + name='about', ) ds_report_summary = pe.Node( DerivativesDataSink( source_file=preproc_files[0], - desc="summary", + desc='summary', ), - name="ds_report_summary", + name='ds_report_summary', ) ds_report_about = pe.Node( DerivativesDataSink( source_file=preproc_files[0], - desc="about", + desc='about', ), - name="ds_report_about", + name='ds_report_about', run_without_submitting=True, ) # Extract target volumetric space for T1w image - target_space = get_entity(subj_data["anat_to_template_xfm"], "to") + target_space = get_entity(subj_data['anat_to_template_xfm'], 'to') postprocess_anat_wf = init_postprocess_anat_wf( t1w_available=t1w_available, @@ -288,9 +288,9 @@ def init_single_subject_wf(subject_id: str): workflow.connect([ (inputnode, postprocess_anat_wf, [ - ("t1w", "inputnode.t1w"), - ("t2w", "inputnode.t2w"), - ("anat_to_template_xfm", "inputnode.anat_to_template_xfm"), + ('t1w', 'inputnode.t1w'), + ('t2w', 'inputnode.t2w'), + ('anat_to_template_xfm', 'inputnode.anat_to_template_xfm'), ]), ]) # fmt:skip @@ -314,28 +314,28 @@ def init_single_subject_wf(subject_id: str): workflow.connect([ (inputnode, postprocess_surfaces_wf, [ - ("lh_pial_surf", "inputnode.lh_pial_surf"), - ("rh_pial_surf", "inputnode.rh_pial_surf"), - ("lh_wm_surf", "inputnode.lh_wm_surf"), - ("rh_wm_surf", "inputnode.rh_wm_surf"), - ("lh_subject_sphere", "inputnode.lh_subject_sphere"), - ("rh_subject_sphere", "inputnode.rh_subject_sphere"), - ("anat_to_template_xfm", "inputnode.anat_to_template_xfm"), - ("template_to_anat_xfm", "inputnode.template_to_anat_xfm"), + ('lh_pial_surf', 'inputnode.lh_pial_surf'), + ('rh_pial_surf', 'inputnode.rh_pial_surf'), + ('lh_wm_surf', 'inputnode.lh_wm_surf'), + ('rh_wm_surf', 'inputnode.rh_wm_surf'), + ('lh_subject_sphere', 'inputnode.lh_subject_sphere'), + ('rh_subject_sphere', 'inputnode.rh_subject_sphere'), + ('anat_to_template_xfm', 'inputnode.anat_to_template_xfm'), + ('template_to_anat_xfm', 'inputnode.template_to_anat_xfm'), ]), ]) # fmt:skip for morph_file in morph_file_types: workflow.connect([ - (inputnode, postprocess_surfaces_wf, [(morph_file, f"inputnode.{morph_file}")]), + (inputnode, postprocess_surfaces_wf, [(morph_file, f'inputnode.{morph_file}')]), ]) # fmt:skip if config.workflow.process_surfaces or standard_space_mesh: # Use standard-space structurals workflow.connect([ (postprocess_anat_wf, postprocess_surfaces_wf, [ - ("outputnode.t1w", "inputnode.t1w"), - ("outputnode.t2w", "inputnode.t2w"), + ('outputnode.t1w', 'inputnode.t1w'), + ('outputnode.t2w', 'inputnode.t2w'), ]), ]) # fmt:skip @@ -343,8 +343,8 @@ def init_single_subject_wf(subject_id: str): # Use native-space structurals workflow.connect([ (inputnode, postprocess_surfaces_wf, [ - ("t1w", "inputnode.t1w"), - ("t2w", "inputnode.t2w"), + ('t1w', 'inputnode.t1w'), + ('t2w', 'inputnode.t2w'), ]), ]) # fmt:skip @@ -357,18 +357,18 @@ def init_single_subject_wf(subject_id: str): for morph_file_type in morph_file_types: workflow.connect([ (inputnode, parcellate_surfaces_wf, [ - (morph_file_type, f"inputnode.{morph_file_type}"), + (morph_file_type, f'inputnode.{morph_file_type}'), ]), ]) # fmt:skip # Estimate head radius, if necessary # Need to warp the standard-space brain mask to the anatomical space to estimate head radius warp_brainmask = ApplyTransforms( - input_image=subj_data["anat_brainmask"], - transforms=[subj_data["template_to_anat_xfm"]], + input_image=subj_data['anat_brainmask'], + transforms=[subj_data['template_to_anat_xfm']], reference_image=subj_data[anat_mod], num_threads=2, - interpolation="GenericLabel", + interpolation='GenericLabel', input_image_type=3, dimension=3, ) @@ -394,23 +394,23 @@ def init_single_subject_wf(subject_id: str): n_task_runs = len(task_files) if config.workflow.combine_runs and (n_task_runs > 1): merge_elements = [ - "name_source", - "preprocessed_bold", - "motion_file", - "temporal_mask", - "denoised_bold", - "denoised_interpolated_bold", - "censored_denoised_bold", - "smoothed_denoised_bold", - "bold_mask", - "boldref", - "timeseries", - "timeseries_ciftis", + 'name_source', + 'preprocessed_bold', + 'motion_file', + 'temporal_mask', + 'denoised_bold', + 'denoised_interpolated_bold', + 'censored_denoised_bold', + 'smoothed_denoised_bold', + 'bold_mask', + 'boldref', + 'timeseries', + 'timeseries_ciftis', ] merge_dict = { io_name: pe.Node( niu.Merge(n_task_runs, no_flatten=True), - name=f"collect_{io_name}_{ent_set}", + name=f'collect_{io_name}_{ent_set}', ) for io_name in merge_elements } @@ -429,14 +429,14 @@ def init_single_subject_wf(subject_id: str): derivatives_datasets=config.execution.datasets, confound_spec=yaml.safe_load(config.execution.confounds_config.read_text()), ) - run_data["confounds"] = confounds_dict + run_data['confounds'] = confounds_dict else: - run_data["confounds"] = None + run_data['confounds'] = None post_scrubbing_duration = flag_bad_run( - motion_file=run_data["motion_file"], + motion_file=run_data['motion_file'], dummy_scans=config.workflow.dummy_scans, - TR=run_data["bold_metadata"]["RepetitionTime"], + TR=run_data['bold_metadata']['RepetitionTime'], motion_filter_type=config.workflow.motion_filter_type, motion_filter_order=config.workflow.motion_filter_order, band_stop_min=config.workflow.band_stop_min, @@ -449,10 +449,10 @@ def init_single_subject_wf(subject_id: str): post_scrubbing_duration < config.workflow.min_time ): LOGGER.warning( - f"Less than {config.workflow.min_time} seconds in " - f"{os.path.basename(bold_file)} survive " - f"high-motion outlier scrubbing ({post_scrubbing_duration}). " - "This run will not be processed." + f'Less than {config.workflow.min_time} seconds in ' + f'{os.path.basename(bold_file)} survive ' + f'high-motion outlier scrubbing ({post_scrubbing_duration}). ' + 'This run will not be processed.' ) continue @@ -462,7 +462,7 @@ def init_single_subject_wf(subject_id: str): exact_scans = calculate_exact_scans( exact_times=config.workflow.dcan_correlation_lengths, scan_length=post_scrubbing_duration, - t_r=run_data["bold_metadata"]["RepetitionTime"], + t_r=run_data['bold_metadata']['RepetitionTime'], bold_file=bold_file, ) @@ -474,82 +474,82 @@ def init_single_subject_wf(subject_id: str): t2w_available=t2w_available, n_runs=n_runs, exact_scans=exact_scans, - name=f"postprocess_{run_counter}_wf", + name=f'postprocess_{run_counter}_wf', ) run_counter += 1 workflow.connect([ (postprocess_anat_wf, postprocess_bold_wf, [ - ("outputnode.t1w", "inputnode.t1w"), - ("outputnode.t2w", "inputnode.t2w"), + ('outputnode.t1w', 'inputnode.t1w'), + ('outputnode.t2w', 'inputnode.t2w'), ]), ]) # fmt:skip - if (config.workflow.file_format == "cifti") and ( + if (config.workflow.file_format == 'cifti') and ( config.workflow.process_surfaces or (config.workflow.abcc_qc and mesh_available) ): workflow.connect([ (postprocess_surfaces_wf, postprocess_bold_wf, [ - ("outputnode.lh_midthickness", "inputnode.lh_midthickness"), - ("outputnode.rh_midthickness", "inputnode.rh_midthickness"), + ('outputnode.lh_midthickness', 'inputnode.lh_midthickness'), + ('outputnode.rh_midthickness', 'inputnode.rh_midthickness'), ]), ]) # fmt:skip if config.execution.atlases: workflow.connect([ (load_atlases_wf, postprocess_bold_wf, [ - ("outputnode.atlas_names", "inputnode.atlases"), - ("outputnode.atlas_files", "inputnode.atlas_files"), - ("outputnode.atlas_labels_files", "inputnode.atlas_labels_files"), + ('outputnode.atlas_names', 'inputnode.atlases'), + ('outputnode.atlas_files', 'inputnode.atlas_files'), + ('outputnode.atlas_labels_files', 'inputnode.atlas_labels_files'), ]), ]) # fmt:skip - if config.workflow.file_format == "nifti": + if config.workflow.file_format == 'nifti': workflow.connect([ (inputnode, postprocess_bold_wf, [ - ("anat_brainmask", "inputnode.anat_brainmask"), - ("template_to_anat_xfm", "inputnode.template_to_anat_xfm"), + ('anat_brainmask', 'inputnode.anat_brainmask'), + ('template_to_anat_xfm', 'inputnode.template_to_anat_xfm'), ]), ]) # fmt:skip # The post-processing workflow needs a native anatomical-space image as a reference workflow.connect([ - (inputnode, postprocess_bold_wf, [(anat_mod, "inputnode.anat_native")]), + (inputnode, postprocess_bold_wf, [(anat_mod, 'inputnode.anat_native')]), ]) # fmt:skip if config.workflow.combine_runs and (n_task_runs > 1): for io_name, node in merge_dict.items(): workflow.connect([ - (postprocess_bold_wf, node, [(f"outputnode.{io_name}", f"in{j_run + 1}")]), + (postprocess_bold_wf, node, [(f'outputnode.{io_name}', f'in{j_run + 1}')]), ]) # fmt:skip if config.workflow.combine_runs and (n_task_runs > 1): concatenate_data_wf = init_concatenate_data_wf( TR=TR, head_radius=head_radius, - name=f"concatenate_entity_set_{ent_set}_wf", + name=f'concatenate_entity_set_{ent_set}_wf', ) workflow.connect([ (inputnode, concatenate_data_wf, [ - ("anat_brainmask", "inputnode.anat_brainmask"), - ("template_to_anat_xfm", "inputnode.template_to_anat_xfm"), - (anat_mod, "inputnode.anat_native"), + ('anat_brainmask', 'inputnode.anat_brainmask'), + ('template_to_anat_xfm', 'inputnode.template_to_anat_xfm'), + (anat_mod, 'inputnode.anat_native'), ]), ]) # fmt:skip for io_name, node in merge_dict.items(): - workflow.connect([(node, concatenate_data_wf, [("out", f"inputnode.{io_name}")])]) + workflow.connect([(node, concatenate_data_wf, [('out', f'inputnode.{io_name}')])]) if run_counter == 0: raise RuntimeError( - f"No runs survived high-motion outlier scrubbing for subject {subject_id}. " - "Quitting workflow." + f'No runs survived high-motion outlier scrubbing for subject {subject_id}. ' + 'Quitting workflow.' ) workflow.connect([ - (summary, ds_report_summary, [("out_report", "in_file")]), - (about, ds_report_about, [("out_report", "in_file")]), + (summary, ds_report_summary, [('out_report', 'in_file')]), + (about, ds_report_about, [('out_report', 'in_file')]), ]) # fmt:skip return clean_datasinks(workflow) @@ -558,12 +558,12 @@ def init_single_subject_wf(subject_id: str): def clean_datasinks(workflow): """Clean DerivativesDataSinks in a workflow.""" for node in workflow.list_node_names(): - node_name = node.split(".")[-1] - if node_name.startswith("ds_"): - workflow.get_node(node).interface.out_path_base = "" + node_name = node.split('.')[-1] + if node_name.startswith('ds_'): + workflow.get_node(node).interface.out_path_base = '' workflow.get_node(node).interface.inputs.base_directory = config.execution.output_dir - if node_name.startswith("ds_report_"): - workflow.get_node(node).interface.inputs.datatype = "figures" + if node_name.startswith('ds_report_'): + workflow.get_node(node).interface.inputs.datatype = 'figures' return workflow diff --git a/xcp_d/workflows/bold/__init__.py b/xcp_d/workflows/bold/__init__.py index f202068cf..4bcd047ab 100644 --- a/xcp_d/workflows/bold/__init__.py +++ b/xcp_d/workflows/bold/__init__.py @@ -12,12 +12,12 @@ ) __all__ = [ - "cifti", - "concatenation", - "connectivity", - "metrics", - "nifti", - "outputs", - "plotting", - "postprocessing", + 'cifti', + 'concatenation', + 'connectivity', + 'metrics', + 'nifti', + 'outputs', + 'plotting', + 'postprocessing', ] diff --git a/xcp_d/workflows/bold/cifti.py b/xcp_d/workflows/bold/cifti.py index 04da7bd4e..0fae2c50e 100644 --- a/xcp_d/workflows/bold/cifti.py +++ b/xcp_d/workflows/bold/cifti.py @@ -25,7 +25,7 @@ init_prepare_confounds_wf, ) -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc @@ -37,7 +37,7 @@ def init_postprocess_cifti_wf( t2w_available, n_runs, exact_scans, - name="cifti_postprocess_wf", + name='cifti_postprocess_wf', ): """Organize the cifti processing workflow. @@ -133,37 +133,37 @@ def init_postprocess_cifti_wf( dummy_scans = config.workflow.dummy_scans despike = config.workflow.despike - TR = run_data["bold_metadata"]["RepetitionTime"] + TR = run_data['bold_metadata']['RepetitionTime'] inputnode = pe.Node( niu.IdentityInterface( fields=[ - "bold_file", - "boldref", - "t1w", - "t2w", - "motion_file", - "motion_json", - "confounds_files", - "dummy_scans", + 'bold_file', + 'boldref', + 't1w', + 't2w', + 'motion_file', + 'motion_json', + 'confounds_files', + 'dummy_scans', # if parcellation is performed - "atlases", - "atlas_files", - "atlas_labels_files", + 'atlases', + 'atlas_files', + 'atlas_labels_files', # CIFTI only # for plotting, if the anatomical workflow was used - "lh_midthickness", - "rh_midthickness", + 'lh_midthickness', + 'rh_midthickness', ], ), - name="inputnode", + name='inputnode', ) inputnode.inputs.bold_file = bold_file - inputnode.inputs.boldref = run_data["boldref"] - inputnode.inputs.motion_file = run_data["motion_file"] - inputnode.inputs.motion_json = run_data["motion_json"] - inputnode.inputs.confounds_files = run_data["confounds"] + inputnode.inputs.boldref = run_data['boldref'] + inputnode.inputs.motion_file = run_data['motion_file'] + inputnode.inputs.motion_json = run_data['motion_json'] + inputnode.inputs.confounds_files = run_data['confounds'] inputnode.inputs.dummy_scans = dummy_scans workflow.__desc__ = f""" @@ -178,38 +178,38 @@ def init_postprocess_cifti_wf( outputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "preprocessed_bold", - "motion_file", - "temporal_mask", - "denoised_bold", - "denoised_interpolated_bold", - "censored_denoised_bold", - "smoothed_denoised_bold", - "boldref", - "bold_mask", # will not be defined + 'name_source', + 'preprocessed_bold', + 'motion_file', + 'temporal_mask', + 'denoised_bold', + 'denoised_interpolated_bold', + 'censored_denoised_bold', + 'smoothed_denoised_bold', + 'boldref', + 'bold_mask', # will not be defined # if parcellation is performed - "timeseries", - "timeseries_ciftis", + 'timeseries', + 'timeseries_ciftis', ], ), - name="outputnode", + name='outputnode', ) mem_gbx = _create_mem_gb(bold_file) downcast_data = pe.Node( ConvertTo32(), - name="downcast_data", - mem_gb=mem_gbx["timeseries"], + name='downcast_data', + mem_gb=mem_gbx['timeseries'], ) workflow.connect([ (inputnode, outputnode, [ - ("bold_file", "name_source"), - ("boldref", "boldref"), + ('bold_file', 'name_source'), + ('boldref', 'boldref'), ]), - (inputnode, downcast_data, [("bold_file", "bold_file")]), + (inputnode, downcast_data, [('bold_file', 'bold_file')]), ]) # fmt:skip prepare_confounds_wf = init_prepare_confounds_wf( @@ -220,14 +220,14 @@ def init_postprocess_cifti_wf( workflow.connect([ (inputnode, prepare_confounds_wf, [ - ("bold_file", "inputnode.name_source"), - ("motion_file", "inputnode.motion_file"), - ("motion_json", "inputnode.motion_json"), - ("confounds_files", "inputnode.confounds_files"), + ('bold_file', 'inputnode.name_source'), + ('motion_file', 'inputnode.motion_file'), + ('motion_json', 'inputnode.motion_json'), + ('confounds_files', 'inputnode.confounds_files'), ]), - (downcast_data, prepare_confounds_wf, [("bold_file", "inputnode.preprocessed_bold")]), + (downcast_data, prepare_confounds_wf, [('bold_file', 'inputnode.preprocessed_bold')]), (prepare_confounds_wf, outputnode, [ - ("outputnode.preprocessed_bold", "preprocessed_bold"), + ('outputnode.preprocessed_bold', 'preprocessed_bold'), ]), ]) # fmt:skip @@ -235,13 +235,13 @@ def init_postprocess_cifti_wf( workflow.connect([ (prepare_confounds_wf, denoise_bold_wf, [ - ("outputnode.temporal_mask", "inputnode.temporal_mask"), - ("outputnode.confounds_tsv", "inputnode.confounds_tsv"), - ("outputnode.confounds_images", "inputnode.confounds_images"), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), + ('outputnode.confounds_tsv', 'inputnode.confounds_tsv'), + ('outputnode.confounds_images', 'inputnode.confounds_images'), ]), (denoise_bold_wf, outputnode, [ - ("outputnode.denoised_interpolated_bold", "denoised_interpolated_bold"), - ("outputnode.censored_denoised_bold", "censored_denoised_bold"), + ('outputnode.denoised_interpolated_bold', 'denoised_interpolated_bold'), + ('outputnode.censored_denoised_bold', 'censored_denoised_bold'), ]), ]) # fmt:skip @@ -250,17 +250,17 @@ def init_postprocess_cifti_wf( workflow.connect([ (prepare_confounds_wf, despike_wf, [ - ("outputnode.preprocessed_bold", "inputnode.bold_file"), + ('outputnode.preprocessed_bold', 'inputnode.bold_file'), ]), (despike_wf, denoise_bold_wf, [ - ("outputnode.bold_file", "inputnode.preprocessed_bold"), + ('outputnode.bold_file', 'inputnode.preprocessed_bold'), ]), ]) # fmt:skip else: workflow.connect([ (prepare_confounds_wf, denoise_bold_wf, [ - ("outputnode.preprocessed_bold", "inputnode.preprocessed_bold"), + ('outputnode.preprocessed_bold', 'inputnode.preprocessed_bold'), ]), ]) # fmt:skip @@ -269,14 +269,14 @@ def init_postprocess_cifti_wf( workflow.connect([ (inputnode, alff_wf, [ - ("lh_midthickness", "inputnode.lh_midthickness"), - ("rh_midthickness", "inputnode.rh_midthickness"), + ('lh_midthickness', 'inputnode.lh_midthickness'), + ('rh_midthickness', 'inputnode.rh_midthickness'), ]), (prepare_confounds_wf, alff_wf, [ - ("outputnode.temporal_mask", "inputnode.temporal_mask"), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), ]), (denoise_bold_wf, alff_wf, [ - ("outputnode.denoised_interpolated_bold", "inputnode.denoised_bold"), + ('outputnode.denoised_interpolated_bold', 'inputnode.denoised_bold'), ]), ]) # fmt:skip @@ -284,75 +284,75 @@ def init_postprocess_cifti_wf( workflow.connect([ (inputnode, reho_wf, [ - ("lh_midthickness", "inputnode.lh_midthickness"), - ("rh_midthickness", "inputnode.rh_midthickness"), + ('lh_midthickness', 'inputnode.lh_midthickness'), + ('rh_midthickness', 'inputnode.rh_midthickness'), ]), (denoise_bold_wf, reho_wf, [ - ("outputnode.censored_denoised_bold", "inputnode.denoised_bold"), + ('outputnode.censored_denoised_bold', 'inputnode.denoised_bold'), ]), ]) # fmt:skip qc_report_wf = init_qc_report_wf( TR=TR, head_radius=head_radius, - name="qc_report_wf", + name='qc_report_wf', ) workflow.connect([ - (inputnode, qc_report_wf, [("bold_file", "inputnode.name_source")]), + (inputnode, qc_report_wf, [('bold_file', 'inputnode.name_source')]), (prepare_confounds_wf, qc_report_wf, [ - ("outputnode.preprocessed_bold", "inputnode.preprocessed_bold"), - ("outputnode.dummy_scans", "inputnode.dummy_scans"), - ("outputnode.motion_file", "inputnode.motion_file"), - ("outputnode.temporal_mask", "inputnode.temporal_mask"), + ('outputnode.preprocessed_bold', 'inputnode.preprocessed_bold'), + ('outputnode.dummy_scans', 'inputnode.dummy_scans'), + ('outputnode.motion_file', 'inputnode.motion_file'), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), ]), (denoise_bold_wf, qc_report_wf, [ - ("outputnode.denoised_interpolated_bold", "inputnode.denoised_interpolated_bold"), - ("outputnode.censored_denoised_bold", "inputnode.censored_denoised_bold"), + ('outputnode.denoised_interpolated_bold', 'inputnode.denoised_interpolated_bold'), + ('outputnode.censored_denoised_bold', 'inputnode.censored_denoised_bold'), ]), ]) # fmt:skip postproc_derivatives_wf = init_postproc_derivatives_wf( name_source=bold_file, - source_metadata=run_data["bold_metadata"], + source_metadata=run_data['bold_metadata'], exact_scans=exact_scans, ) workflow.connect([ (inputnode, postproc_derivatives_wf, [ - ("motion_file", "inputnode.preproc_confounds_file"), - ("atlas_files", "inputnode.atlas_files"), - ("atlases", "inputnode.atlas_names"), + ('motion_file', 'inputnode.preproc_confounds_file'), + ('atlas_files', 'inputnode.atlas_files'), + ('atlases', 'inputnode.atlas_names'), ]), (denoise_bold_wf, postproc_derivatives_wf, [ - ("outputnode.denoised_bold", "inputnode.denoised_bold"), - ("outputnode.smoothed_denoised_bold", "inputnode.smoothed_denoised_bold"), + ('outputnode.denoised_bold', 'inputnode.denoised_bold'), + ('outputnode.smoothed_denoised_bold', 'inputnode.smoothed_denoised_bold'), ]), - (qc_report_wf, postproc_derivatives_wf, [("outputnode.qc_file", "inputnode.qc_file")]), + (qc_report_wf, postproc_derivatives_wf, [('outputnode.qc_file', 'inputnode.qc_file')]), (prepare_confounds_wf, postproc_derivatives_wf, [ - ("outputnode.confounds_tsv", "inputnode.confounds_tsv"), - ("outputnode.confounds_metadata", "inputnode.confounds_metadata"), - ("outputnode.motion_file", "inputnode.motion_file"), - ("outputnode.motion_metadata", "inputnode.motion_metadata"), - ("outputnode.temporal_mask", "inputnode.temporal_mask"), - ("outputnode.temporal_mask_metadata", "inputnode.temporal_mask_metadata"), + ('outputnode.confounds_tsv', 'inputnode.confounds_tsv'), + ('outputnode.confounds_metadata', 'inputnode.confounds_metadata'), + ('outputnode.motion_file', 'inputnode.motion_file'), + ('outputnode.motion_metadata', 'inputnode.motion_metadata'), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), + ('outputnode.temporal_mask_metadata', 'inputnode.temporal_mask_metadata'), ]), - (reho_wf, postproc_derivatives_wf, [("outputnode.reho", "inputnode.reho")]), + (reho_wf, postproc_derivatives_wf, [('outputnode.reho', 'inputnode.reho')]), (postproc_derivatives_wf, outputnode, [ - ("outputnode.motion_file", "motion_file"), - ("outputnode.temporal_mask", "temporal_mask"), - ("outputnode.denoised_bold", "denoised_bold"), - ("outputnode.smoothed_denoised_bold", "smoothed_denoised_bold"), - ("outputnode.timeseries", "timeseries"), - ("outputnode.timeseries_ciftis", "timeseries_ciftis"), + ('outputnode.motion_file', 'motion_file'), + ('outputnode.temporal_mask', 'temporal_mask'), + ('outputnode.denoised_bold', 'denoised_bold'), + ('outputnode.smoothed_denoised_bold', 'smoothed_denoised_bold'), + ('outputnode.timeseries', 'timeseries'), + ('outputnode.timeseries_ciftis', 'timeseries_ciftis'), ]), ]) # fmt:skip if bandpass_filter: workflow.connect([ (alff_wf, postproc_derivatives_wf, [ - ("outputnode.alff", "inputnode.alff"), - ("outputnode.smoothed_alff", "inputnode.smoothed_alff"), + ('outputnode.alff', 'inputnode.alff'), + ('outputnode.smoothed_alff', 'inputnode.smoothed_alff'), ]), ]) # fmt:skip @@ -364,45 +364,45 @@ def init_postprocess_cifti_wf( workflow.connect([ (inputnode, connectivity_wf, [ - ("bold_file", "inputnode.name_source"), - ("atlases", "inputnode.atlases"), - ("atlas_files", "inputnode.atlas_files"), - ("atlas_labels_files", "inputnode.atlas_labels_files"), - ("lh_midthickness", "inputnode.lh_midthickness"), - ("rh_midthickness", "inputnode.rh_midthickness"), + ('bold_file', 'inputnode.name_source'), + ('atlases', 'inputnode.atlases'), + ('atlas_files', 'inputnode.atlas_files'), + ('atlas_labels_files', 'inputnode.atlas_labels_files'), + ('lh_midthickness', 'inputnode.lh_midthickness'), + ('rh_midthickness', 'inputnode.rh_midthickness'), ]), (prepare_confounds_wf, connectivity_wf, [ - ("outputnode.temporal_mask", "inputnode.temporal_mask"), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), ]), (denoise_bold_wf, connectivity_wf, [ - ("outputnode.denoised_bold", "inputnode.denoised_bold"), + ('outputnode.denoised_bold', 'inputnode.denoised_bold'), ]), - (reho_wf, connectivity_wf, [("outputnode.reho", "inputnode.reho")]), + (reho_wf, connectivity_wf, [('outputnode.reho', 'inputnode.reho')]), (connectivity_wf, postproc_derivatives_wf, [ - ("outputnode.coverage_ciftis", "inputnode.coverage_ciftis"), - ("outputnode.timeseries_ciftis", "inputnode.timeseries_ciftis"), - ("outputnode.correlation_ciftis", "inputnode.correlation_ciftis"), - ("outputnode.correlation_ciftis_exact", "inputnode.correlation_ciftis_exact"), - ("outputnode.coverage", "inputnode.coverage"), - ("outputnode.timeseries", "inputnode.timeseries"), - ("outputnode.correlations", "inputnode.correlations"), - ("outputnode.correlations_exact", "inputnode.correlations_exact"), - ("outputnode.parcellated_reho", "inputnode.parcellated_reho"), + ('outputnode.coverage_ciftis', 'inputnode.coverage_ciftis'), + ('outputnode.timeseries_ciftis', 'inputnode.timeseries_ciftis'), + ('outputnode.correlation_ciftis', 'inputnode.correlation_ciftis'), + ('outputnode.correlation_ciftis_exact', 'inputnode.correlation_ciftis_exact'), + ('outputnode.coverage', 'inputnode.coverage'), + ('outputnode.timeseries', 'inputnode.timeseries'), + ('outputnode.correlations', 'inputnode.correlations'), + ('outputnode.correlations_exact', 'inputnode.correlations_exact'), + ('outputnode.parcellated_reho', 'inputnode.parcellated_reho'), ]), ]) # fmt:skip if bandpass_filter: workflow.connect([ - (alff_wf, connectivity_wf, [("outputnode.alff", "inputnode.alff")]), + (alff_wf, connectivity_wf, [('outputnode.alff', 'inputnode.alff')]), (connectivity_wf, postproc_derivatives_wf, [ - ("outputnode.parcellated_alff", "inputnode.parcellated_alff"), + ('outputnode.parcellated_alff', 'inputnode.parcellated_alff'), ]), ]) # fmt:skip if config.workflow.abcc_qc: # executive summary workflow execsummary_functional_plots_wf = init_execsummary_functional_plots_wf( - preproc_nifti=run_data["nifti_file"], + preproc_nifti=run_data['nifti_file'], t1w_available=t1w_available, t2w_available=t2w_available, mem_gb=mem_gbx, @@ -412,9 +412,9 @@ def init_postprocess_cifti_wf( # Use inputnode for executive summary instead of downcast_data because T1w is name # source. (inputnode, execsummary_functional_plots_wf, [ - ("boldref", "inputnode.boldref"), - ("t1w", "inputnode.t1w"), - ("t2w", "inputnode.t2w"), + ('boldref', 'inputnode.boldref'), + ('t1w', 'inputnode.t1w'), + ('t2w', 'inputnode.t2w'), ]), ]) # fmt:skip diff --git a/xcp_d/workflows/bold/concatenation.py b/xcp_d/workflows/bold/concatenation.py index 4d750958f..e4742bcbe 100644 --- a/xcp_d/workflows/bold/concatenation.py +++ b/xcp_d/workflows/bold/concatenation.py @@ -18,7 +18,7 @@ @fill_doc -def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): +def init_concatenate_data_wf(TR, head_radius, name='concatenate_data_wf'): """Concatenate postprocessed data across runs and directions. Workflow Graph @@ -86,69 +86,69 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): inputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "preprocessed_bold", - "motion_file", - "temporal_mask", - "denoised_bold", - "denoised_interpolated_bold", - "censored_denoised_bold", - "smoothed_denoised_bold", - "bold_mask", # only for niftis, from postproc workflows - "boldref", # only for niftis, from postproc workflows - "anat_native", # only for niftis, from data collection - "anat_brainmask", # only for niftis, from data collection - "template_to_anat_xfm", # only for niftis, from data collection - "timeseries", - "timeseries_ciftis", # only for ciftis, from postproc workflows + 'name_source', + 'preprocessed_bold', + 'motion_file', + 'temporal_mask', + 'denoised_bold', + 'denoised_interpolated_bold', + 'censored_denoised_bold', + 'smoothed_denoised_bold', + 'bold_mask', # only for niftis, from postproc workflows + 'boldref', # only for niftis, from postproc workflows + 'anat_native', # only for niftis, from data collection + 'anat_brainmask', # only for niftis, from data collection + 'template_to_anat_xfm', # only for niftis, from data collection + 'timeseries', + 'timeseries_ciftis', # only for ciftis, from postproc workflows ], ), - name="inputnode", + name='inputnode', ) clean_name_source = pe.Node( CleanNameSource(), - name="clean_name_source", + name='clean_name_source', ) - workflow.connect([(inputnode, clean_name_source, [("name_source", "name_source")])]) + workflow.connect([(inputnode, clean_name_source, [('name_source', 'name_source')])]) filter_runs = pe.Node( FilterOutFailedRuns(), - name="filter_runs", + name='filter_runs', ) workflow.connect([ (inputnode, filter_runs, [ - ("preprocessed_bold", "preprocessed_bold"), - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), - ("denoised_bold", "denoised_bold"), - ("denoised_interpolated_bold", "denoised_interpolated_bold"), - ("censored_denoised_bold", "censored_denoised_bold"), - ("smoothed_denoised_bold", "smoothed_denoised_bold"), - ("bold_mask", "bold_mask"), - ("boldref", "boldref"), - ("timeseries", "timeseries"), - ("timeseries_ciftis", "timeseries_ciftis"), + ('preprocessed_bold', 'preprocessed_bold'), + ('motion_file', 'motion_file'), + ('temporal_mask', 'temporal_mask'), + ('denoised_bold', 'denoised_bold'), + ('denoised_interpolated_bold', 'denoised_interpolated_bold'), + ('censored_denoised_bold', 'censored_denoised_bold'), + ('smoothed_denoised_bold', 'smoothed_denoised_bold'), + ('bold_mask', 'bold_mask'), + ('boldref', 'boldref'), + ('timeseries', 'timeseries'), + ('timeseries_ciftis', 'timeseries_ciftis'), ]) ]) # fmt:skip concatenate_inputs = pe.Node( ConcatenateInputs(), - name="concatenate_inputs", + name='concatenate_inputs', ) workflow.connect([ (filter_runs, concatenate_inputs, [ - ("preprocessed_bold", "preprocessed_bold"), - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), - ("denoised_bold", "denoised_bold"), - ("denoised_interpolated_bold", "denoised_interpolated_bold"), - ("censored_denoised_bold", "censored_denoised_bold"), - ("smoothed_denoised_bold", "smoothed_denoised_bold"), - ("timeseries", "timeseries"), - ("timeseries_ciftis", "timeseries_ciftis"), + ('preprocessed_bold', 'preprocessed_bold'), + ('motion_file', 'motion_file'), + ('temporal_mask', 'temporal_mask'), + ('denoised_bold', 'denoised_bold'), + ('denoised_interpolated_bold', 'denoised_interpolated_bold'), + ('censored_denoised_bold', 'censored_denoised_bold'), + ('smoothed_denoised_bold', 'smoothed_denoised_bold'), + ('timeseries', 'timeseries'), + ('timeseries_ciftis', 'timeseries_ciftis'), ]), ]) # fmt:skip @@ -156,29 +156,29 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): qc_report_wf = init_qc_report_wf( TR=TR, head_radius=head_radius, - name="concat_qc_report_wf", + name='concat_qc_report_wf', ) qc_report_wf.inputs.inputnode.dummy_scans = 0 workflow.connect([ (inputnode, qc_report_wf, [ - ("template_to_anat_xfm", "inputnode.template_to_anat_xfm"), - ("anat_native", "inputnode.anat"), - ("anat_brainmask", "inputnode.anat_brainmask"), + ('template_to_anat_xfm', 'inputnode.template_to_anat_xfm'), + ('anat_native', 'inputnode.anat'), + ('anat_brainmask', 'inputnode.anat_brainmask'), ]), - (clean_name_source, qc_report_wf, [("name_source", "inputnode.name_source")]), + (clean_name_source, qc_report_wf, [('name_source', 'inputnode.name_source')]), (filter_runs, qc_report_wf, [ # nifti-only inputs - (("bold_mask", _select_first), "inputnode.bold_mask"), - (("boldref", _select_first), "inputnode.boldref"), + (('bold_mask', _select_first), 'inputnode.bold_mask'), + (('boldref', _select_first), 'inputnode.boldref'), ]), (concatenate_inputs, qc_report_wf, [ - ("preprocessed_bold", "inputnode.preprocessed_bold"), - ("denoised_interpolated_bold", "inputnode.denoised_interpolated_bold"), - ("censored_denoised_bold", "inputnode.censored_denoised_bold"), - ("motion_file", "inputnode.motion_file"), - ("temporal_mask", "inputnode.temporal_mask"), - ("run_index", "inputnode.run_index"), + ('preprocessed_bold', 'inputnode.preprocessed_bold'), + ('denoised_interpolated_bold', 'inputnode.denoised_interpolated_bold'), + ('censored_denoised_bold', 'inputnode.censored_denoised_bold'), + ('motion_file', 'inputnode.motion_file'), + ('temporal_mask', 'inputnode.temporal_mask'), + ('run_index', 'inputnode.run_index'), ]), ]) # fmt:skip @@ -188,25 +188,25 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="motion_src", + name='motion_src', run_without_submitting=True, ) - workflow.connect([(filter_runs, motion_src, [("motion_file", "in1")])]) + workflow.connect([(filter_runs, motion_src, [('motion_file', 'in1')])]) ds_motion_file = pe.Node( DerivativesDataSink( - dismiss_entities=["segmentation", "den", "res", "space", "cohort", "desc"], - suffix="motion", - extension=".tsv", + dismiss_entities=['segmentation', 'den', 'res', 'space', 'cohort', 'desc'], + suffix='motion', + extension='.tsv', ), - name="ds_motion_file", + name='ds_motion_file', run_without_submitting=True, mem_gb=1, ) workflow.connect([ - (clean_name_source, ds_motion_file, [("name_source", "source_file")]), - (concatenate_inputs, ds_motion_file, [("motion_file", "in_file")]), - (motion_src, ds_motion_file, [("out", "Sources")]), + (clean_name_source, ds_motion_file, [('name_source', 'source_file')]), + (concatenate_inputs, ds_motion_file, [('motion_file', 'in_file')]), + (motion_src, ds_motion_file, [('out', 'Sources')]), ]) # fmt:skip if fd_thresh > 0: @@ -216,36 +216,36 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="temporal_mask_src", + name='temporal_mask_src', run_without_submitting=True, ) - workflow.connect([(filter_runs, temporal_mask_src, [("temporal_mask", "in1")])]) + workflow.connect([(filter_runs, temporal_mask_src, [('temporal_mask', 'in1')])]) ds_temporal_mask = pe.Node( DerivativesDataSink( - dismiss_entities=["segmentation", "den", "res", "space", "cohort", "desc"], - suffix="outliers", - extension=".tsv", + dismiss_entities=['segmentation', 'den', 'res', 'space', 'cohort', 'desc'], + suffix='outliers', + extension='.tsv', ), - name="ds_temporal_mask", + name='ds_temporal_mask', run_without_submitting=True, mem_gb=1, ) workflow.connect([ - (clean_name_source, ds_temporal_mask, [("name_source", "source_file")]), - (concatenate_inputs, ds_temporal_mask, [("temporal_mask", "in_file")]), - (temporal_mask_src, ds_temporal_mask, [("out", "Sources")]), + (clean_name_source, ds_temporal_mask, [('name_source', 'source_file')]), + (concatenate_inputs, ds_temporal_mask, [('temporal_mask', 'in_file')]), + (temporal_mask_src, ds_temporal_mask, [('out', 'Sources')]), ]) # fmt:skip - if file_format == "cifti": + if file_format == 'cifti': ds_denoised_bold = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], - desc="denoised", - den="91k", - extension=".dtseries.nii", + dismiss_entities=['den'], + desc='denoised', + den='91k', + extension='.dtseries.nii', ), - name="ds_denoised_bold", + name='ds_denoised_bold', run_without_submitting=True, mem_gb=2, ) @@ -253,12 +253,12 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): if smoothing: ds_smoothed_denoised_bold = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], - desc="denoisedSmoothed", - den="91k", - extension=".dtseries.nii", + dismiss_entities=['den'], + desc='denoisedSmoothed', + den='91k', + extension='.dtseries.nii', ), - name="ds_smoothed_denoised_bold", + name='ds_smoothed_denoised_bold', run_without_submitting=True, mem_gb=2, ) @@ -266,11 +266,11 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): else: ds_denoised_bold = pe.Node( DerivativesDataSink( - desc="denoised", - extension=".nii.gz", + desc='denoised', + extension='.nii.gz', compression=True, ), - name="ds_denoised_bold", + name='ds_denoised_bold', run_without_submitting=True, mem_gb=2, ) @@ -278,11 +278,11 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): if smoothing: ds_smoothed_denoised_bold = pe.Node( DerivativesDataSink( - desc="denoisedSmoothed", - extension=".nii.gz", + desc='denoisedSmoothed', + extension='.nii.gz', compression=True, ), - name="ds_smoothed_denoised_bold", + name='ds_smoothed_denoised_bold', run_without_submitting=True, mem_gb=2, ) @@ -293,15 +293,15 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="denoised_bold_src", + name='denoised_bold_src', run_without_submitting=True, ) - workflow.connect([(filter_runs, denoised_bold_src, [("denoised_bold", "in1")])]) + workflow.connect([(filter_runs, denoised_bold_src, [('denoised_bold', 'in1')])]) workflow.connect([ - (clean_name_source, ds_denoised_bold, [("name_source", "source_file")]), - (concatenate_inputs, ds_denoised_bold, [("denoised_bold", "in_file")]), - (denoised_bold_src, ds_denoised_bold, [("out", "Sources")]), + (clean_name_source, ds_denoised_bold, [('name_source', 'source_file')]), + (concatenate_inputs, ds_denoised_bold, [('denoised_bold', 'in_file')]), + (denoised_bold_src, ds_denoised_bold, [('out', 'Sources')]), ]) # fmt:skip if smoothing: @@ -311,16 +311,16 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="smoothed_src", + name='smoothed_src', run_without_submitting=True, ) workflow.connect([ - (filter_runs, smoothed_src, [("smoothed_denoised_bold", "in1")]), - (clean_name_source, ds_smoothed_denoised_bold, [("name_source", "source_file")]), + (filter_runs, smoothed_src, [('smoothed_denoised_bold', 'in1')]), + (clean_name_source, ds_smoothed_denoised_bold, [('name_source', 'source_file')]), (concatenate_inputs, ds_smoothed_denoised_bold, [ - ("smoothed_denoised_bold", "in_file"), + ('smoothed_denoised_bold', 'in_file'), ]), - (smoothed_src, ds_smoothed_denoised_bold, [("out", "Sources")]), + (smoothed_src, ds_smoothed_denoised_bold, [('out', 'Sources')]), ]) # fmt:skip # Functional connectivity outputs @@ -333,46 +333,46 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): ), run_without_submitting=True, mem_gb=1, - name="make_timeseries_dict", - iterfield=["in1"], + name='make_timeseries_dict', + iterfield=['in1'], ) workflow.connect([ - (filter_runs, make_timeseries_dict, [(("timeseries", _transpose_lol), "in1")]), + (filter_runs, make_timeseries_dict, [(('timeseries', _transpose_lol), 'in1')]), ]) # fmt:skip ds_timeseries = pe.MapNode( DerivativesDataSink( - dismiss_entities=["desc", "den", "res"], - statistic="mean", - suffix="timeseries", - extension=".tsv", + dismiss_entities=['desc', 'den', 'res'], + statistic='mean', + suffix='timeseries', + extension='.tsv', # Metadata - SamplingFrequency="TR", + SamplingFrequency='TR', ), - name="ds_timeseries", + name='ds_timeseries', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) ds_timeseries.inputs.segmentation = atlases workflow.connect([ - (clean_name_source, ds_timeseries, [("name_source", "source_file")]), - (concatenate_inputs, ds_timeseries, [("timeseries", "in_file")]), - (make_timeseries_dict, ds_timeseries, [("metadata", "meta_dict")]), + (clean_name_source, ds_timeseries, [('name_source', 'source_file')]), + (concatenate_inputs, ds_timeseries, [('timeseries', 'in_file')]), + (make_timeseries_dict, ds_timeseries, [('metadata', 'meta_dict')]), ]) # fmt:skip correlate_timeseries = pe.MapNode( TSVConnect(), run_without_submitting=True, mem_gb=1, - name="correlate_timeseries", - iterfield=["timeseries"], + name='correlate_timeseries', + iterfield=['timeseries'], ) workflow.connect([ (concatenate_inputs, correlate_timeseries, [ - ("timeseries", "timeseries"), - ("temporal_mask", "temporal_mask"), + ('timeseries', 'timeseries'), + ('temporal_mask', 'temporal_mask'), ]), ]) # fmt:skip @@ -384,32 +384,32 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): ), run_without_submitting=True, mem_gb=1, - name="make_correlations_dict", - iterfield=["in1"], + name='make_correlations_dict', + iterfield=['in1'], ) - workflow.connect([(ds_timeseries, make_correlations_dict, [("out_file", "in1")])]) + workflow.connect([(ds_timeseries, make_correlations_dict, [('out_file', 'in1')])]) ds_correlations = pe.MapNode( DerivativesDataSink( - dismiss_entities=["desc"], - statistic="pearsoncorrelation", - suffix="relmat", - extension=".tsv", + dismiss_entities=['desc'], + statistic='pearsoncorrelation', + suffix='relmat', + extension='.tsv', ), - name="ds_correlations", + name='ds_correlations', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) ds_correlations.inputs.segmentation = atlases workflow.connect([ - (clean_name_source, ds_correlations, [("name_source", "source_file")]), - (correlate_timeseries, ds_correlations, [("correlations", "in_file")]), - (make_correlations_dict, ds_correlations, [("metadata", "meta_dict")]), + (clean_name_source, ds_correlations, [('name_source', 'source_file')]), + (correlate_timeseries, ds_correlations, [('correlations', 'in_file')]), + (make_correlations_dict, ds_correlations, [('metadata', 'meta_dict')]), ]) # fmt:skip - if file_format == "cifti": + if file_format == 'cifti': cifti_ts_src = pe.MapNode( BIDSURI( numinputs=1, @@ -418,33 +418,33 @@ def init_concatenate_data_wf(TR, head_radius, name="concatenate_data_wf"): ), run_without_submitting=True, mem_gb=1, - name="cifti_ts_src", - iterfield=["in1"], + name='cifti_ts_src', + iterfield=['in1'], ) workflow.connect([ - (filter_runs, cifti_ts_src, [(("timeseries_ciftis", _transpose_lol), "in1")]), + (filter_runs, cifti_ts_src, [(('timeseries_ciftis', _transpose_lol), 'in1')]), ]) # fmt:skip ds_cifti_ts = pe.MapNode( DerivativesDataSink( check_hdr=False, - dismiss_entities=["desc", "den"], - den="91k", - statistic="mean", - suffix="timeseries", - extension=".ptseries.nii", + dismiss_entities=['desc', 'den'], + den='91k', + statistic='mean', + suffix='timeseries', + extension='.ptseries.nii', ), - name="ds_cifti_ts", + name='ds_cifti_ts', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) ds_cifti_ts.inputs.segmentation = atlases workflow.connect([ - (clean_name_source, ds_cifti_ts, [("name_source", "source_file")]), - (concatenate_inputs, ds_cifti_ts, [("timeseries_ciftis", "in_file")]), - (cifti_ts_src, ds_cifti_ts, [("metadata", "meta_dict")]), + (clean_name_source, ds_cifti_ts, [('name_source', 'source_file')]), + (concatenate_inputs, ds_cifti_ts, [('timeseries_ciftis', 'in_file')]), + (cifti_ts_src, ds_cifti_ts, [('metadata', 'meta_dict')]), ]) # fmt:skip return workflow diff --git a/xcp_d/workflows/bold/connectivity.py b/xcp_d/workflows/bold/connectivity.py index 9d5db3638..49da4c53f 100644 --- a/xcp_d/workflows/bold/connectivity.py +++ b/xcp_d/workflows/bold/connectivity.py @@ -13,11 +13,11 @@ from xcp_d.utils.doc import fill_doc from xcp_d.workflows.parcellation import init_parcellate_cifti_wf -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc -def init_functional_connectivity_nifti_wf(mem_gb, name="connectivity_wf"): +def init_functional_connectivity_nifti_wf(mem_gb, name='connectivity_wf'): """Extract BOLD time series and compute functional connectivity. Workflow Graph @@ -84,131 +84,131 @@ def init_functional_connectivity_nifti_wf(mem_gb, name="connectivity_wf"): inputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "bold_mask", - "denoised_bold", - "temporal_mask", - "alff", # may be Undefined - "reho", - "atlases", - "atlas_files", - "atlas_labels_files", + 'name_source', + 'bold_mask', + 'denoised_bold', + 'temporal_mask', + 'alff', # may be Undefined + 'reho', + 'atlases', + 'atlas_files', + 'atlas_labels_files', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "coverage", - "timeseries", - "correlations", - "correlations_exact", - "parcellated_alff", - "parcellated_reho", + 'coverage', + 'timeseries', + 'correlations', + 'correlations_exact', + 'parcellated_alff', + 'parcellated_reho', ], ), - name="outputnode", + name='outputnode', ) parcellate_data = pe.MapNode( NiftiParcellate(min_coverage=min_coverage), - name="parcellate_data", - iterfield=["atlas", "atlas_labels"], - mem_gb=mem_gb["timeseries"], + name='parcellate_data', + iterfield=['atlas', 'atlas_labels'], + mem_gb=mem_gb['timeseries'], ) workflow.connect([ (inputnode, parcellate_data, [ - ("denoised_bold", "filtered_file"), - ("bold_mask", "mask"), - ("atlas_files", "atlas"), - ("atlas_labels_files", "atlas_labels"), + ('denoised_bold', 'filtered_file'), + ('bold_mask', 'mask'), + ('atlas_files', 'atlas'), + ('atlas_labels_files', 'atlas_labels'), ]), (parcellate_data, outputnode, [ - ("coverage", "coverage"), - ("timeseries", "timeseries"), + ('coverage', 'coverage'), + ('timeseries', 'timeseries'), ]), ]) # fmt:skip if config.workflow.output_correlations: functional_connectivity = pe.MapNode( TSVConnect(), - name="functional_connectivity", - iterfield=["timeseries"], - mem_gb=mem_gb["timeseries"], + name='functional_connectivity', + iterfield=['timeseries'], + mem_gb=mem_gb['timeseries'], ) workflow.connect([ - (inputnode, functional_connectivity, [("temporal_mask", "temporal_mask")]), - (parcellate_data, functional_connectivity, [("timeseries", "timeseries")]), + (inputnode, functional_connectivity, [('temporal_mask', 'temporal_mask')]), + (parcellate_data, functional_connectivity, [('timeseries', 'timeseries')]), (functional_connectivity, outputnode, [ - ("correlations", "correlations"), - ("correlations_exact", "correlations_exact"), + ('correlations', 'correlations'), + ('correlations_exact', 'correlations_exact'), ]), ]) # fmt:skip connectivity_plot = pe.Node( ConnectPlot(), - name="connectivity_plot", - mem_gb=mem_gb["resampled"], + name='connectivity_plot', + mem_gb=mem_gb['resampled'], ) workflow.connect([ (inputnode, connectivity_plot, [ - ("atlases", "atlases"), - ("atlas_labels_files", "atlas_tsvs"), + ('atlases', 'atlases'), + ('atlas_labels_files', 'atlas_tsvs'), ]), - (functional_connectivity, connectivity_plot, [("correlations", "correlations_tsv")]), + (functional_connectivity, connectivity_plot, [('correlations', 'correlations_tsv')]), ]) # fmt:skip ds_report_connectivity_plot = pe.Node( DerivativesDataSink( - desc="connectivityplot", + desc='connectivityplot', ), - name="ds_report_connectivity_plot", + name='ds_report_connectivity_plot', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_connectivity_plot, [("name_source", "source_file")]), - (connectivity_plot, ds_report_connectivity_plot, [("connectplot", "in_file")]), + (inputnode, ds_report_connectivity_plot, [('name_source', 'source_file')]), + (connectivity_plot, ds_report_connectivity_plot, [('connectplot', 'in_file')]), ]) # fmt:skip parcellate_reho = pe.MapNode( NiftiParcellate(min_coverage=min_coverage), - name="parcellate_reho", - iterfield=["atlas", "atlas_labels"], - mem_gb=mem_gb["resampled"], + name='parcellate_reho', + iterfield=['atlas', 'atlas_labels'], + mem_gb=mem_gb['resampled'], ) workflow.connect([ (inputnode, parcellate_reho, [ - ("reho", "filtered_file"), - ("bold_mask", "mask"), - ("atlas_files", "atlas"), - ("atlas_labels_files", "atlas_labels"), + ('reho', 'filtered_file'), + ('bold_mask', 'mask'), + ('atlas_files', 'atlas'), + ('atlas_labels_files', 'atlas_labels'), ]), - (parcellate_reho, outputnode, [("timeseries", "parcellated_reho")]), + (parcellate_reho, outputnode, [('timeseries', 'parcellated_reho')]), ]) # fmt:skip if bandpass_filter: parcellate_alff = pe.MapNode( NiftiParcellate(min_coverage=min_coverage), - name="parcellate_alff", - iterfield=["atlas", "atlas_labels"], - mem_gb=mem_gb["resampled"], + name='parcellate_alff', + iterfield=['atlas', 'atlas_labels'], + mem_gb=mem_gb['resampled'], ) workflow.connect([ (inputnode, parcellate_alff, [ - ("alff", "filtered_file"), - ("bold_mask", "mask"), - ("atlas_files", "atlas"), - ("atlas_labels_files", "atlas_labels"), + ('alff', 'filtered_file'), + ('bold_mask', 'mask'), + ('atlas_files', 'atlas'), + ('atlas_labels_files', 'atlas_labels'), ]), - (parcellate_alff, outputnode, [("timeseries", "parcellated_alff")]), + (parcellate_alff, outputnode, [('timeseries', 'parcellated_alff')]), ]) # fmt:skip return workflow @fill_doc -def init_functional_connectivity_cifti_wf(mem_gb, exact_scans, name="connectivity_wf"): +def init_functional_connectivity_cifti_wf(mem_gb, exact_scans, name='connectivity_wf'): """Extract CIFTI time series. This will parcellate the CIFTI file using the selected atlases and compute functional @@ -292,116 +292,116 @@ def init_functional_connectivity_cifti_wf(mem_gb, exact_scans, name="connectivit inputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "denoised_bold", - "temporal_mask", - "alff", # may be Undefined - "reho", - "atlases", - "atlas_files", - "atlas_labels_files", + 'name_source', + 'denoised_bold', + 'temporal_mask', + 'alff', # may be Undefined + 'reho', + 'atlases', + 'atlas_files', + 'atlas_labels_files', # for plotting, if the anatomical workflow is enabled - "lh_midthickness", - "rh_midthickness", + 'lh_midthickness', + 'rh_midthickness', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "coverage_ciftis", - "timeseries_ciftis", - "correlation_ciftis", - "correlation_ciftis_exact", - "coverage", - "timeseries", - "correlations", - "correlations_exact", - "parcellated_alff", - "parcellated_reho", + 'coverage_ciftis', + 'timeseries_ciftis', + 'correlation_ciftis', + 'correlation_ciftis_exact', + 'coverage', + 'timeseries', + 'correlations', + 'correlations_exact', + 'parcellated_alff', + 'parcellated_reho', ], ), - name="outputnode", + name='outputnode', ) parcellate_bold_wf = init_parcellate_cifti_wf( mem_gb=mem_gb, compute_mask=True, - name="parcellate_bold_wf", + name='parcellate_bold_wf', ) workflow.connect([ (inputnode, parcellate_bold_wf, [ - ("denoised_bold", "inputnode.in_file"), - ("atlas_files", "inputnode.atlas_files"), - ("atlas_labels_files", "inputnode.atlas_labels_files"), + ('denoised_bold', 'inputnode.in_file'), + ('atlas_files', 'inputnode.atlas_files'), + ('atlas_labels_files', 'inputnode.atlas_labels_files'), ]), (parcellate_bold_wf, outputnode, [ - ("outputnode.parcellated_cifti", "timeseries_ciftis"), - ("outputnode.parcellated_tsv", "timeseries"), - ("outputnode.coverage_cifti", "coverage_ciftis"), - ("outputnode.coverage_tsv", "coverage"), + ('outputnode.parcellated_cifti', 'timeseries_ciftis'), + ('outputnode.parcellated_tsv', 'timeseries'), + ('outputnode.coverage_cifti', 'coverage_ciftis'), + ('outputnode.coverage_tsv', 'coverage'), ]), ]) # fmt:skip # Filter out subcortical atlases - cortical_atlases = select_atlases(atlases=config.execution.atlases, subset="cortical") + cortical_atlases = select_atlases(atlases=config.execution.atlases, subset='cortical') if cortical_atlases: plot_coverage = pe.Node( PlotCiftiParcellation( - base_desc="coverage", + base_desc='coverage', cortical_atlases=cortical_atlases, vmin=0, vmax=1, ), - name="plot_coverage", - mem_gb=mem_gb["resampled"], + name='plot_coverage', + mem_gb=mem_gb['resampled'], ) workflow.connect([ (inputnode, plot_coverage, [ - ("atlases", "labels"), - ("lh_midthickness", "lh_underlay"), - ("rh_midthickness", "rh_underlay"), + ('atlases', 'labels'), + ('lh_midthickness', 'lh_underlay'), + ('rh_midthickness', 'rh_underlay'), ]), - (parcellate_bold_wf, plot_coverage, [("outputnode.coverage_cifti", "in_files")]), + (parcellate_bold_wf, plot_coverage, [('outputnode.coverage_cifti', 'in_files')]), ]) # fmt:skip ds_report_coverage = pe.Node( DerivativesDataSink(), - name="ds_report_coverage", + name='ds_report_coverage', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_coverage, [("name_source", "source_file")]), + (inputnode, ds_report_coverage, [('name_source', 'source_file')]), (plot_coverage, ds_report_coverage, [ - ("out_file", "in_file"), - ("desc", "desc"), + ('out_file', 'in_file'), + ('desc', 'desc'), ]), ]) # fmt:skip # Reduce the CIFTI before calculating correlations parcellated_bold_buffer = pe.MapNode( - niu.IdentityInterface(fields=["parcellated_cifti"]), - name="parcellated_bold_buffer", - iterfield=["parcellated_cifti"], + niu.IdentityInterface(fields=['parcellated_cifti']), + name='parcellated_bold_buffer', + iterfield=['parcellated_cifti'], ) if config.workflow.output_interpolated: # If we want interpolated time series, the parcellated CIFTI will have interpolated values, # but the correlation matrices should only include low-motion volumes. remove_outliers = pe.MapNode( - Censor(column="framewise_displacement"), - name="remove_outliers", - iterfield=["in_file"], + Censor(column='framewise_displacement'), + name='remove_outliers', + iterfield=['in_file'], ) workflow.connect([ - (inputnode, remove_outliers, [("temporal_mask", "temporal_mask")]), - (parcellate_bold_wf, remove_outliers, [("outputnode.parcellated_cifti", "in_file")]), - (remove_outliers, parcellated_bold_buffer, [("out_file", "parcellated_cifti")]), + (inputnode, remove_outliers, [('temporal_mask', 'temporal_mask')]), + (parcellate_bold_wf, remove_outliers, [('outputnode.parcellated_cifti', 'in_file')]), + (remove_outliers, parcellated_bold_buffer, [('out_file', 'parcellated_cifti')]), ]) # fmt:skip else: workflow.connect([ (parcellate_bold_wf, parcellated_bold_buffer, [ - ("outputnode.parcellated_cifti", "parcellated_cifti"), + ('outputnode.parcellated_cifti', 'parcellated_cifti'), ]), ]) # fmt:skip @@ -409,154 +409,154 @@ def init_functional_connectivity_cifti_wf(mem_gb, exact_scans, name="connectivit # Correlate the parcellated data correlate_bold = pe.MapNode( CiftiCorrelation(num_threads=config.nipype.omp_nthreads), - name="correlate_bold", - iterfield=["in_file"], + name='correlate_bold', + iterfield=['in_file'], n_procs=config.nipype.omp_nthreads, ) workflow.connect([ - (parcellated_bold_buffer, correlate_bold, [("parcellated_cifti", "in_file")]), - (correlate_bold, outputnode, [("out_file", "correlation_ciftis")]), + (parcellated_bold_buffer, correlate_bold, [('parcellated_cifti', 'in_file')]), + (correlate_bold, outputnode, [('out_file', 'correlation_ciftis')]), ]) # fmt:skip # Convert correlation pconn file to TSV dconn_to_tsv = pe.MapNode( CiftiToTSV(), - name="dconn_to_tsv", - iterfield=["in_file", "atlas_labels"], + name='dconn_to_tsv', + iterfield=['in_file', 'atlas_labels'], ) workflow.connect([ - (inputnode, dconn_to_tsv, [("atlas_labels_files", "atlas_labels")]), - (correlate_bold, dconn_to_tsv, [("out_file", "in_file")]), - (dconn_to_tsv, outputnode, [("out_file", "correlations")]), + (inputnode, dconn_to_tsv, [('atlas_labels_files', 'atlas_labels')]), + (correlate_bold, dconn_to_tsv, [('out_file', 'in_file')]), + (dconn_to_tsv, outputnode, [('out_file', 'correlations')]), ]) # fmt:skip # Plot up to four connectivity matrices connectivity_plot = pe.Node( ConnectPlot(), - name="connectivity_plot", - mem_gb=mem_gb["resampled"], + name='connectivity_plot', + mem_gb=mem_gb['resampled'], ) workflow.connect([ (inputnode, connectivity_plot, [ - ("atlases", "atlases"), - ("atlas_labels_files", "atlas_tsvs"), + ('atlases', 'atlases'), + ('atlas_labels_files', 'atlas_tsvs'), ]), - (dconn_to_tsv, connectivity_plot, [("out_file", "correlations_tsv")]), + (dconn_to_tsv, connectivity_plot, [('out_file', 'correlations_tsv')]), ]) # fmt:skip ds_report_connectivity = pe.Node( DerivativesDataSink( - desc="connectivityplot", + desc='connectivityplot', ), - name="ds_report_connectivity", + name='ds_report_connectivity', run_without_submitting=False, mem_gb=0.1, ) workflow.connect([ - (inputnode, ds_report_connectivity, [("name_source", "source_file")]), - (connectivity_plot, ds_report_connectivity, [("connectplot", "in_file")]), + (inputnode, ds_report_connectivity, [('name_source', 'source_file')]), + (connectivity_plot, ds_report_connectivity, [('connectplot', 'in_file')]), ]) # fmt:skip # Perform exact-time correlations if exact_scans: collect_exact_ciftis = pe.Node( niu.Merge(len(exact_scans)), - name="collect_exact_ciftis", + name='collect_exact_ciftis', ) workflow.connect([ - (collect_exact_ciftis, outputnode, [("out", "correlation_ciftis_exact")]), + (collect_exact_ciftis, outputnode, [('out', 'correlation_ciftis_exact')]), ]) # fmt:skip collect_exact_tsvs = pe.Node( niu.Merge(len(exact_scans)), - name="collect_exact_tsvs", + name='collect_exact_tsvs', ) - workflow.connect([(collect_exact_tsvs, outputnode, [("out", "correlations_exact")])]) + workflow.connect([(collect_exact_tsvs, outputnode, [('out', 'correlations_exact')])]) for i_exact_scan, exact_scan in enumerate(exact_scans): reduce_exact_bold = pe.MapNode( - Censor(column=f"exact_{exact_scan}"), - name=f"reduce_bold_{exact_scan}volumes", - iterfield=["in_file"], + Censor(column=f'exact_{exact_scan}'), + name=f'reduce_bold_{exact_scan}volumes', + iterfield=['in_file'], ) workflow.connect([ - (inputnode, reduce_exact_bold, [("temporal_mask", "temporal_mask")]), - (parcellated_bold_buffer, reduce_exact_bold, [("parcellated_cifti", "in_file")]), + (inputnode, reduce_exact_bold, [('temporal_mask', 'temporal_mask')]), + (parcellated_bold_buffer, reduce_exact_bold, [('parcellated_cifti', 'in_file')]), ]) # fmt:skip # Correlate the parcellated data correlate_exact_bold = pe.MapNode( CiftiCorrelation(), - name=f"correlate_bold_{exact_scan}volumes", - iterfield=["in_file"], + name=f'correlate_bold_{exact_scan}volumes', + iterfield=['in_file'], ) workflow.connect([ - (reduce_exact_bold, correlate_exact_bold, [("out_file", "in_file")]), + (reduce_exact_bold, correlate_exact_bold, [('out_file', 'in_file')]), (correlate_exact_bold, collect_exact_ciftis, [ - ("out_file", f"in{i_exact_scan + 1}"), + ('out_file', f'in{i_exact_scan + 1}'), ]), ]) # fmt:skip # Convert correlation pconn file to TSV exact_dconn_to_tsv = pe.MapNode( CiftiToTSV(), - name=f"dconn_to_tsv_{exact_scan}volumes", - iterfield=["in_file", "atlas_labels"], + name=f'dconn_to_tsv_{exact_scan}volumes', + iterfield=['in_file', 'atlas_labels'], ) workflow.connect([ - (inputnode, exact_dconn_to_tsv, [("atlas_labels_files", "atlas_labels")]), - (correlate_exact_bold, exact_dconn_to_tsv, [("out_file", "in_file")]), - (exact_dconn_to_tsv, collect_exact_tsvs, [("out_file", f"in{i_exact_scan + 1}")]), + (inputnode, exact_dconn_to_tsv, [('atlas_labels_files', 'atlas_labels')]), + (correlate_exact_bold, exact_dconn_to_tsv, [('out_file', 'in_file')]), + (exact_dconn_to_tsv, collect_exact_tsvs, [('out_file', f'in{i_exact_scan + 1}')]), ]) # fmt:skip parcellate_reho_wf = init_parcellate_cifti_wf( mem_gb=mem_gb, compute_mask=False, - name="parcellate_reho_wf", + name='parcellate_reho_wf', ) workflow.connect([ (inputnode, parcellate_reho_wf, [ - ("reho", "inputnode.in_file"), - ("atlas_files", "inputnode.atlas_files"), - ("atlas_labels_files", "inputnode.atlas_labels_files"), + ('reho', 'inputnode.in_file'), + ('atlas_files', 'inputnode.atlas_files'), + ('atlas_labels_files', 'inputnode.atlas_labels_files'), ]), (parcellate_bold_wf, parcellate_reho_wf, [ - ("outputnode.vertexwise_coverage", "inputnode.vertexwise_coverage"), - ("outputnode.coverage_cifti", "inputnode.coverage_cifti"), + ('outputnode.vertexwise_coverage', 'inputnode.vertexwise_coverage'), + ('outputnode.coverage_cifti', 'inputnode.coverage_cifti'), ]), - (parcellate_reho_wf, outputnode, [("outputnode.parcellated_tsv", "parcellated_reho")]), + (parcellate_reho_wf, outputnode, [('outputnode.parcellated_tsv', 'parcellated_reho')]), ]) # fmt:skip if cortical_atlases: plot_parcellated_reho = pe.Node( PlotCiftiParcellation( - base_desc="reho", + base_desc='reho', cortical_atlases=cortical_atlases, ), - name="plot_parcellated_reho", - mem_gb=mem_gb["resampled"], + name='plot_parcellated_reho', + mem_gb=mem_gb['resampled'], ) workflow.connect([ (inputnode, plot_parcellated_reho, [ - ("atlases", "labels"), - ("lh_midthickness", "lh_underlay"), - ("rh_midthickness", "rh_underlay"), + ('atlases', 'labels'), + ('lh_midthickness', 'lh_underlay'), + ('rh_midthickness', 'rh_underlay'), ]), (parcellate_reho_wf, plot_parcellated_reho, [ - ("outputnode.parcellated_cifti", "in_files"), + ('outputnode.parcellated_cifti', 'in_files'), ]), ]) # fmt:skip ds_report_reho = pe.Node( DerivativesDataSink(), - name="ds_report_reho", + name='ds_report_reho', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_reho, [("name_source", "source_file")]), + (inputnode, ds_report_reho, [('name_source', 'source_file')]), (plot_parcellated_reho, ds_report_reho, [ - ("desc", "desc"), - ("out_file", "in_file"), + ('desc', 'desc'), + ('out_file', 'in_file'), ]), ]) # fmt:skip @@ -564,51 +564,51 @@ def init_functional_connectivity_cifti_wf(mem_gb, exact_scans, name="connectivit parcellate_alff_wf = init_parcellate_cifti_wf( mem_gb=mem_gb, compute_mask=False, - name="parcellate_alff_wf", + name='parcellate_alff_wf', ) workflow.connect([ (inputnode, parcellate_alff_wf, [ - ("alff", "inputnode.in_file"), - ("atlas_files", "inputnode.atlas_files"), - ("atlas_labels_files", "inputnode.atlas_labels_files"), + ('alff', 'inputnode.in_file'), + ('atlas_files', 'inputnode.atlas_files'), + ('atlas_labels_files', 'inputnode.atlas_labels_files'), ]), (parcellate_bold_wf, parcellate_alff_wf, [ - ("outputnode.vertexwise_coverage", "inputnode.vertexwise_coverage"), - ("outputnode.coverage_cifti", "inputnode.coverage_cifti"), + ('outputnode.vertexwise_coverage', 'inputnode.vertexwise_coverage'), + ('outputnode.coverage_cifti', 'inputnode.coverage_cifti'), ]), - (parcellate_alff_wf, outputnode, [("outputnode.parcellated_tsv", "parcellated_alff")]), + (parcellate_alff_wf, outputnode, [('outputnode.parcellated_tsv', 'parcellated_alff')]), ]) # fmt:skip if cortical_atlases: plot_parcellated_alff = pe.Node( PlotCiftiParcellation( - base_desc="alff", + base_desc='alff', cortical_atlases=cortical_atlases, ), - name="plot_parcellated_alff", - mem_gb=mem_gb["resampled"], + name='plot_parcellated_alff', + mem_gb=mem_gb['resampled'], ) workflow.connect([ (inputnode, plot_parcellated_alff, [ - ("atlases", "labels"), - ("lh_midthickness", "lh_underlay"), - ("rh_midthickness", "rh_underlay"), + ('atlases', 'labels'), + ('lh_midthickness', 'lh_underlay'), + ('rh_midthickness', 'rh_underlay'), ]), (parcellate_alff_wf, plot_parcellated_alff, [ - ("outputnode.parcellated_cifti", "in_files"), + ('outputnode.parcellated_cifti', 'in_files'), ]), ]) # fmt:skip ds_report_alff = pe.Node( DerivativesDataSink(), - name="ds_report_alff", + name='ds_report_alff', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_alff, [("name_source", "source_file")]), + (inputnode, ds_report_alff, [('name_source', 'source_file')]), (plot_parcellated_alff, ds_report_alff, [ - ("out_file", "in_file"), - ("desc", "desc"), + ('out_file', 'in_file'), + ('desc', 'desc'), ]), ]) # fmt:skip diff --git a/xcp_d/workflows/bold/metrics.py b/xcp_d/workflows/bold/metrics.py index c764c4049..28e6e5785 100644 --- a/xcp_d/workflows/bold/metrics.py +++ b/xcp_d/workflows/bold/metrics.py @@ -28,7 +28,7 @@ def init_alff_wf( name_source, TR, mem_gb, - name="alff_wf", + name='alff_wf', ): """Compute alff for both nifti and cifti. @@ -101,11 +101,11 @@ def init_alff_wf( smoothing = config.workflow.smoothing file_format = config.workflow.file_format - periodogram_desc = "" + periodogram_desc = '' if fd_thresh > 0: periodogram_desc = ( - " using the Lomb-Scargle periodogram " - "[@lomb1976least;@scargle1982studies;@townsend2010fast;@taylorlomb]" + ' using the Lomb-Scargle periodogram ' + '[@lomb1976least;@scargle1982studies;@townsend2010fast;@taylorlomb]' ) workflow.__desc__ = f""" \ @@ -123,19 +123,19 @@ def init_alff_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "denoised_bold", - "bold_mask", - "temporal_mask", + 'denoised_bold', + 'bold_mask', + 'temporal_mask', # only used for CIFTI data if the anatomical workflow is enabled - "lh_midthickness", - "rh_midthickness", + 'lh_midthickness', + 'rh_midthickness', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["alff", "smoothed_alff"]), - name="outputnode", + niu.IdentityInterface(fields=['alff', 'smoothed_alff']), + name='outputnode', ) # compute alff @@ -146,17 +146,17 @@ def init_alff_wf( high_pass=high_pass, n_threads=config.nipype.omp_nthreads, ), - mem_gb=mem_gb["resampled"], - name="alff_compt", + mem_gb=mem_gb['resampled'], + name='alff_compt', n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (inputnode, alff_compt, [ - ("denoised_bold", "in_file"), - ("bold_mask", "mask"), - ("temporal_mask", "temporal_mask"), + ('denoised_bold', 'in_file'), + ('bold_mask', 'mask'), + ('temporal_mask', 'temporal_mask'), ]), - (alff_compt, outputnode, [("alff", "alff")]) + (alff_compt, outputnode, [('alff', 'alff')]) ]) # fmt:skip # Plot the ALFF map @@ -164,89 +164,89 @@ def init_alff_wf( DerivativesDataSink( source_file=name_source, ), - name="ds_report_alff", + name='ds_report_alff', run_without_submitting=False, ) - if file_format == "cifti": + if file_format == 'cifti': alff_plot = pe.Node( - PlotDenseCifti(base_desc="alff"), - name="alff_plot", + PlotDenseCifti(base_desc='alff'), + name='alff_plot', ) workflow.connect([ (inputnode, alff_plot, [ - ("lh_midthickness", "lh_underlay"), - ("rh_midthickness", "rh_underlay"), + ('lh_midthickness', 'lh_underlay'), + ('rh_midthickness', 'rh_underlay'), ]), - (alff_plot, ds_report_alff, [("desc", "desc")]), + (alff_plot, ds_report_alff, [('desc', 'desc')]), ]) # fmt:skip else: alff_plot = pe.Node( PlotNifti(name_source=name_source), - name="alff_plot", + name='alff_plot', ) - ds_report_alff.inputs.desc = "alffVolumetricPlot" + ds_report_alff.inputs.desc = 'alffVolumetricPlot' workflow.connect([ - (alff_compt, alff_plot, [("alff", "in_file")]), - (alff_plot, ds_report_alff, [("out_file", "in_file")]), + (alff_compt, alff_plot, [('alff', 'in_file')]), + (alff_plot, ds_report_alff, [('out_file', 'in_file')]), ]) # fmt:skip if smoothing: # If we want to smooth - if file_format == "nifti": + if file_format == 'nifti': workflow.__desc__ = workflow.__desc__ + ( - " The ALFF maps were smoothed with Nilearn using a Gaussian kernel " - f"(FWHM={str(smoothing)} mm)." + ' The ALFF maps were smoothed with Nilearn using a Gaussian kernel ' + f'(FWHM={str(smoothing)} mm).' ) # Smooth via Nilearn smooth_data = pe.Node( Smooth(fwhm=smoothing), - name="niftismoothing", + name='niftismoothing', ) workflow.connect([ - (alff_compt, smooth_data, [("alff", "in_file")]), - (smooth_data, outputnode, [("out_file", "smoothed_alff")]) + (alff_compt, smooth_data, [('alff', 'in_file')]), + (smooth_data, outputnode, [('out_file', 'smoothed_alff')]) ]) # fmt:skip else: # If cifti workflow.__desc__ = workflow.__desc__ + ( - " The ALFF maps were smoothed with the Connectome Workbench using a Gaussian " - f"kernel (FWHM={str(smoothing)} mm)." + ' The ALFF maps were smoothed with the Connectome Workbench using a Gaussian ' + f'kernel (FWHM={str(smoothing)} mm).' ) # Smooth via Connectome Workbench sigma_lx = fwhm2sigma(smoothing) # Convert fwhm to standard deviation # Get templates for each hemisphere lh_midthickness = str( - get_template("fsLR", hemi="L", suffix="sphere", density="32k")[0] + get_template('fsLR', hemi='L', suffix='sphere', density='32k')[0] ) rh_midthickness = str( - get_template("fsLR", hemi="R", suffix="sphere", density="32k")[0] + get_template('fsLR', hemi='R', suffix='sphere', density='32k')[0] ) smooth_data = pe.Node( CiftiSmooth( sigma_surf=sigma_lx, sigma_vol=sigma_lx, - direction="COLUMN", + direction='COLUMN', right_surf=rh_midthickness, left_surf=lh_midthickness, num_threads=config.nipype.omp_nthreads, ), - name="ciftismoothing", - mem_gb=mem_gb["resampled"], + name='ciftismoothing', + mem_gb=mem_gb['resampled'], n_procs=config.nipype.omp_nthreads, ) # Always check the intent code in CiftiSmooth's output file fix_cifti_intent = pe.Node( FixCiftiIntent(), - name="fix_cifti_intent", - mem_gb=mem_gb["resampled"], + name='fix_cifti_intent', + mem_gb=mem_gb['resampled'], ) workflow.connect([ - (alff_compt, smooth_data, [("alff", "in_file")]), - (smooth_data, fix_cifti_intent, [("out_file", "in_file")]), - (fix_cifti_intent, outputnode, [("out_file", "smoothed_alff")]), + (alff_compt, smooth_data, [('alff', 'in_file')]), + (smooth_data, fix_cifti_intent, [('out_file', 'in_file')]), + (fix_cifti_intent, outputnode, [('out_file', 'smoothed_alff')]), ]) # fmt:skip return workflow @@ -256,7 +256,7 @@ def init_alff_wf( def init_reho_cifti_wf( name_source, mem_gb, - name="cifti_reho_wf", + name='cifti_reho_wf', ): """Compute ReHo from surface+volumetric (CIFTI) data. @@ -314,81 +314,81 @@ def init_reho_cifti_wf( """ inputnode = pe.Node( - niu.IdentityInterface(fields=["denoised_bold", "lh_midthickness", "rh_midthickness"]), - name="inputnode", + niu.IdentityInterface(fields=['denoised_bold', 'lh_midthickness', 'rh_midthickness']), + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["reho"]), - name="outputnode", + niu.IdentityInterface(fields=['reho']), + name='outputnode', ) # Extract left and right hemispheres via Connectome Workbench lh_surf = pe.Node( CiftiSeparateMetric( - metric="CORTEX_LEFT", - direction="COLUMN", + metric='CORTEX_LEFT', + direction='COLUMN', num_threads=config.nipype.omp_nthreads, ), - name="separate_lh", - mem_gb=mem_gb["resampled"], + name='separate_lh', + mem_gb=mem_gb['resampled'], n_procs=config.nipype.omp_nthreads, ) rh_surf = pe.Node( CiftiSeparateMetric( - metric="CORTEX_RIGHT", - direction="COLUMN", + metric='CORTEX_RIGHT', + direction='COLUMN', num_threads=config.nipype.omp_nthreads, ), - name="separate_rh", - mem_gb=mem_gb["resampled"], + name='separate_rh', + mem_gb=mem_gb['resampled'], n_procs=config.nipype.omp_nthreads, ) subcortical_nifti = pe.Node( CiftiSeparateVolumeAll( - direction="COLUMN", + direction='COLUMN', num_threads=config.nipype.omp_nthreads, ), - name="separate_subcortical", - mem_gb=mem_gb["resampled"], + name='separate_subcortical', + mem_gb=mem_gb['resampled'], n_procs=config.nipype.omp_nthreads, ) - # Calculate the reho by hemipshere + # Calculate the reho by hemisphere lh_reho = pe.Node( - SurfaceReHo(surf_hemi="L"), - name="reho_lh", - mem_gb=mem_gb["resampled"], + SurfaceReHo(surf_hemi='L'), + name='reho_lh', + mem_gb=mem_gb['resampled'], ) rh_reho = pe.Node( - SurfaceReHo(surf_hemi="R"), - name="reho_rh", - mem_gb=mem_gb["resampled"], + SurfaceReHo(surf_hemi='R'), + name='reho_rh', + mem_gb=mem_gb['resampled'], ) subcortical_reho = pe.Node( - ReHoNamePatch(neighborhood="vertices"), - name="reho_subcortical", - mem_gb=mem_gb["resampled"], + ReHoNamePatch(neighborhood='vertices'), + name='reho_subcortical', + mem_gb=mem_gb['resampled'], ) # Merge the surfaces and subcortical structures back into a CIFTI merge_cifti = pe.Node( CiftiCreateDenseFromTemplate( from_cropped=True, - out_file="reho.dscalar.nii", + out_file='reho.dscalar.nii', num_threads=config.nipype.omp_nthreads, ), - name="merge_cifti", - mem_gb=mem_gb["resampled"], + name='merge_cifti', + mem_gb=mem_gb['resampled'], n_procs=config.nipype.omp_nthreads, ) reho_plot = pe.Node( - PlotDenseCifti(base_desc="reho"), - name="reho_cifti_plot", + PlotDenseCifti(base_desc='reho'), + name='reho_cifti_plot', ) workflow.connect([ (inputnode, reho_plot, [ - ("lh_midthickness", "lh_underlay"), - ("rh_midthickness", "rh_underlay"), + ('lh_midthickness', 'lh_underlay'), + ('rh_midthickness', 'rh_underlay'), ]), ]) # fmt:skip @@ -396,27 +396,27 @@ def init_reho_cifti_wf( DerivativesDataSink( source_file=name_source, ), - name="ds_report_reho", + name='ds_report_reho', run_without_submitting=False, ) # Write out results workflow.connect([ - (inputnode, lh_surf, [("denoised_bold", "in_file")]), - (inputnode, rh_surf, [("denoised_bold", "in_file")]), - (inputnode, subcortical_nifti, [("denoised_bold", "in_file")]), - (lh_surf, lh_reho, [("out_file", "surf_bold")]), - (rh_surf, rh_reho, [("out_file", "surf_bold")]), - (subcortical_nifti, subcortical_reho, [("out_file", "in_file")]), - (inputnode, merge_cifti, [("denoised_bold", "template_cifti")]), - (lh_reho, merge_cifti, [("surf_gii", "left_metric")]), - (rh_reho, merge_cifti, [("surf_gii", "right_metric")]), - (subcortical_reho, merge_cifti, [("out_file", "volume_all")]), - (merge_cifti, outputnode, [("out_file", "reho")]), - (merge_cifti, reho_plot, [("out_file", "in_file")]), + (inputnode, lh_surf, [('denoised_bold', 'in_file')]), + (inputnode, rh_surf, [('denoised_bold', 'in_file')]), + (inputnode, subcortical_nifti, [('denoised_bold', 'in_file')]), + (lh_surf, lh_reho, [('out_file', 'surf_bold')]), + (rh_surf, rh_reho, [('out_file', 'surf_bold')]), + (subcortical_nifti, subcortical_reho, [('out_file', 'in_file')]), + (inputnode, merge_cifti, [('denoised_bold', 'template_cifti')]), + (lh_reho, merge_cifti, [('surf_gii', 'left_metric')]), + (rh_reho, merge_cifti, [('surf_gii', 'right_metric')]), + (subcortical_reho, merge_cifti, [('out_file', 'volume_all')]), + (merge_cifti, outputnode, [('out_file', 'reho')]), + (merge_cifti, reho_plot, [('out_file', 'in_file')]), (reho_plot, ds_report_reho, [ - ("out_file", "in_file"), - ("desc", "desc"), + ('out_file', 'in_file'), + ('desc', 'desc'), ]), ]) # fmt:skip @@ -424,7 +424,7 @@ def init_reho_cifti_wf( @fill_doc -def init_reho_nifti_wf(name_source, mem_gb, name="reho_nifti_wf"): +def init_reho_nifti_wf(name_source, mem_gb, name='reho_nifti_wf'): """Compute ReHo on volumetric (NIFTI) data. Workflow Graph @@ -472,42 +472,42 @@ def init_reho_nifti_wf(name_source, mem_gb, name="reho_nifti_wf"): """ inputnode = pe.Node( - niu.IdentityInterface(fields=["denoised_bold", "bold_mask"]), - name="inputnode", + niu.IdentityInterface(fields=['denoised_bold', 'bold_mask']), + name='inputnode', ) - outputnode = pe.Node(niu.IdentityInterface(fields=["reho"]), name="outputnode") + outputnode = pe.Node(niu.IdentityInterface(fields=['reho']), name='outputnode') # Run AFNI'S 3DReHo on the data compute_reho = pe.Node( - ReHoNamePatch(neighborhood="vertices"), - name="reho_3d", - mem_gb=mem_gb["resampled"], + ReHoNamePatch(neighborhood='vertices'), + name='reho_3d', + mem_gb=mem_gb['resampled'], n_procs=1, ) # Get the svg reho_plot = pe.Node( PlotNifti(name_source=name_source), - name="reho_nifti_plot", + name='reho_nifti_plot', ) ds_report_reho = pe.Node( DerivativesDataSink( source_file=name_source, - desc="rehoVolumetricPlot", + desc='rehoVolumetricPlot', ), - name="ds_report_reho", + name='ds_report_reho', run_without_submitting=False, ) # Write the results out workflow.connect([ (inputnode, compute_reho, [ - ("denoised_bold", "in_file"), - ("bold_mask", "mask_file"), + ('denoised_bold', 'in_file'), + ('bold_mask', 'mask_file'), ]), - (compute_reho, outputnode, [("out_file", "reho")]), - (compute_reho, reho_plot, [("out_file", "in_file")]), - (reho_plot, ds_report_reho, [("out_file", "in_file")]), + (compute_reho, outputnode, [('out_file', 'reho')]), + (compute_reho, reho_plot, [('out_file', 'in_file')]), + (reho_plot, ds_report_reho, [('out_file', 'in_file')]), ]) # fmt:skip return workflow diff --git a/xcp_d/workflows/bold/nifti.py b/xcp_d/workflows/bold/nifti.py index 8162dac5f..cc8953882 100644 --- a/xcp_d/workflows/bold/nifti.py +++ b/xcp_d/workflows/bold/nifti.py @@ -25,7 +25,7 @@ init_prepare_confounds_wf, ) -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc @@ -37,7 +37,7 @@ def init_postprocess_nifti_wf( t2w_available, n_runs, exact_scans, - name="bold_postprocess_wf", + name='bold_postprocess_wf', ): """Organize the bold processing workflow. @@ -144,39 +144,39 @@ def init_postprocess_nifti_wf( dummy_scans = config.workflow.dummy_scans despike = config.workflow.despike - TR = run_data["bold_metadata"]["RepetitionTime"] + TR = run_data['bold_metadata']['RepetitionTime'] inputnode = pe.Node( niu.IdentityInterface( fields=[ - "bold_file", - "boldref", - "t1w", - "t2w", - "motion_file", - "motion_json", - "confounds_files", - "dummy_scans", + 'bold_file', + 'boldref', + 't1w', + 't2w', + 'motion_file', + 'motion_json', + 'confounds_files', + 'dummy_scans', # if parcellation is performed - "atlases", - "atlas_files", - "atlas_labels_files", + 'atlases', + 'atlas_files', + 'atlas_labels_files', # NIfTI only - "bold_mask", - "template_to_anat_xfm", - "anat_native", - "anat_brainmask", + 'bold_mask', + 'template_to_anat_xfm', + 'anat_native', + 'anat_brainmask', ], ), - name="inputnode", + name='inputnode', ) inputnode.inputs.bold_file = bold_file - inputnode.inputs.boldref = run_data["boldref"] - inputnode.inputs.bold_mask = run_data["boldmask"] - inputnode.inputs.motion_file = run_data["motion_file"] - inputnode.inputs.motion_json = run_data["motion_json"] - inputnode.inputs.confounds_files = run_data["confounds"] + inputnode.inputs.boldref = run_data['boldref'] + inputnode.inputs.bold_mask = run_data['boldmask'] + inputnode.inputs.motion_file = run_data['motion_file'] + inputnode.inputs.motion_json = run_data['motion_json'] + inputnode.inputs.confounds_files = run_data['confounds'] inputnode.inputs.dummy_scans = dummy_scans workflow.__desc__ = f""" @@ -191,42 +191,42 @@ def init_postprocess_nifti_wf( outputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "preprocessed_bold", - "motion_file", - "temporal_mask", - "denoised_bold", - "denoised_interpolated_bold", - "censored_denoised_bold", - "smoothed_denoised_bold", - "boldref", - "bold_mask", + 'name_source', + 'preprocessed_bold', + 'motion_file', + 'temporal_mask', + 'denoised_bold', + 'denoised_interpolated_bold', + 'censored_denoised_bold', + 'smoothed_denoised_bold', + 'boldref', + 'bold_mask', # if parcellation is performed - "timeseries", - "timeseries_ciftis", # will not be defined + 'timeseries', + 'timeseries_ciftis', # will not be defined ], ), - name="outputnode", + name='outputnode', ) mem_gbx = _create_mem_gb(bold_file) downcast_data = pe.Node( ConvertTo32(), - name="downcast_data", - mem_gb=mem_gbx["timeseries"], + name='downcast_data', + mem_gb=mem_gbx['timeseries'], ) workflow.connect([ - (inputnode, outputnode, [("bold_file", "name_source")]), + (inputnode, outputnode, [('bold_file', 'name_source')]), (inputnode, downcast_data, [ - ("bold_file", "bold_file"), - ("boldref", "boldref"), - ("bold_mask", "bold_mask"), + ('bold_file', 'bold_file'), + ('boldref', 'boldref'), + ('bold_mask', 'bold_mask'), ]), (downcast_data, outputnode, [ - ("bold_mask", "bold_mask"), - ("boldref", "boldref"), + ('bold_mask', 'bold_mask'), + ('boldref', 'boldref'), ]), ]) # fmt:skip @@ -238,29 +238,29 @@ def init_postprocess_nifti_wf( workflow.connect([ (inputnode, prepare_confounds_wf, [ - ("bold_file", "inputnode.name_source"), - ("motion_file", "inputnode.motion_file"), - ("motion_json", "inputnode.motion_json"), - ("confounds_files", "inputnode.confounds_files"), + ('bold_file', 'inputnode.name_source'), + ('motion_file', 'inputnode.motion_file'), + ('motion_json', 'inputnode.motion_json'), + ('confounds_files', 'inputnode.confounds_files'), ]), - (downcast_data, prepare_confounds_wf, [("bold_file", "inputnode.preprocessed_bold")]), + (downcast_data, prepare_confounds_wf, [('bold_file', 'inputnode.preprocessed_bold')]), (prepare_confounds_wf, outputnode, [ - ("outputnode.preprocessed_bold", "preprocessed_bold"), + ('outputnode.preprocessed_bold', 'preprocessed_bold'), ]), ]) # fmt:skip denoise_bold_wf = init_denoise_bold_wf(TR=TR, mem_gb=mem_gbx) workflow.connect([ - (downcast_data, denoise_bold_wf, [("bold_mask", "inputnode.mask")]), + (downcast_data, denoise_bold_wf, [('bold_mask', 'inputnode.mask')]), (prepare_confounds_wf, denoise_bold_wf, [ - ("outputnode.temporal_mask", "inputnode.temporal_mask"), - ("outputnode.confounds_tsv", "inputnode.confounds_tsv"), - ("outputnode.confounds_images", "inputnode.confounds_images"), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), + ('outputnode.confounds_tsv', 'inputnode.confounds_tsv'), + ('outputnode.confounds_images', 'inputnode.confounds_images'), ]), (denoise_bold_wf, outputnode, [ - ("outputnode.denoised_interpolated_bold", "denoised_interpolated_bold"), - ("outputnode.censored_denoised_bold", "censored_denoised_bold"), + ('outputnode.denoised_interpolated_bold', 'denoised_interpolated_bold'), + ('outputnode.censored_denoised_bold', 'censored_denoised_bold'), ]), ]) # fmt:skip @@ -269,17 +269,17 @@ def init_postprocess_nifti_wf( workflow.connect([ (prepare_confounds_wf, despike_wf, [ - ("outputnode.preprocessed_bold", "inputnode.bold_file"), + ('outputnode.preprocessed_bold', 'inputnode.bold_file'), ]), (despike_wf, denoise_bold_wf, [ - ("outputnode.bold_file", "inputnode.preprocessed_bold"), + ('outputnode.bold_file', 'inputnode.preprocessed_bold'), ]), ]) # fmt:skip else: workflow.connect([ (prepare_confounds_wf, denoise_bold_wf, [ - ("outputnode.preprocessed_bold", "inputnode.preprocessed_bold"), + ('outputnode.preprocessed_bold', 'inputnode.preprocessed_bold'), ]), ]) # fmt:skip @@ -287,91 +287,91 @@ def init_postprocess_nifti_wf( alff_wf = init_alff_wf(name_source=bold_file, TR=TR, mem_gb=mem_gbx) workflow.connect([ - (downcast_data, alff_wf, [("bold_mask", "inputnode.bold_mask")]), + (downcast_data, alff_wf, [('bold_mask', 'inputnode.bold_mask')]), (prepare_confounds_wf, alff_wf, [ - ("outputnode.temporal_mask", "inputnode.temporal_mask"), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), ]), (denoise_bold_wf, alff_wf, [ - ("outputnode.denoised_interpolated_bold", "inputnode.denoised_bold"), + ('outputnode.denoised_interpolated_bold', 'inputnode.denoised_bold'), ]), ]) # fmt:skip reho_wf = init_reho_nifti_wf(name_source=bold_file, mem_gb=mem_gbx) workflow.connect([ - (downcast_data, reho_wf, [("bold_mask", "inputnode.bold_mask")]), + (downcast_data, reho_wf, [('bold_mask', 'inputnode.bold_mask')]), (denoise_bold_wf, reho_wf, [ - ("outputnode.censored_denoised_bold", "inputnode.denoised_bold"), + ('outputnode.censored_denoised_bold', 'inputnode.denoised_bold'), ]), ]) # fmt:skip qc_report_wf = init_qc_report_wf( TR=TR, head_radius=head_radius, - name="qc_report_wf", + name='qc_report_wf', ) workflow.connect([ (inputnode, qc_report_wf, [ - ("bold_file", "inputnode.name_source"), - ("boldref", "inputnode.boldref"), - ("bold_mask", "inputnode.bold_mask"), - ("anat_native", "inputnode.anat"), - ("anat_brainmask", "inputnode.anat_brainmask"), - ("template_to_anat_xfm", "inputnode.template_to_anat_xfm"), + ('bold_file', 'inputnode.name_source'), + ('boldref', 'inputnode.boldref'), + ('bold_mask', 'inputnode.bold_mask'), + ('anat_native', 'inputnode.anat'), + ('anat_brainmask', 'inputnode.anat_brainmask'), + ('template_to_anat_xfm', 'inputnode.template_to_anat_xfm'), ]), (prepare_confounds_wf, qc_report_wf, [ - ("outputnode.preprocessed_bold", "inputnode.preprocessed_bold"), - ("outputnode.dummy_scans", "inputnode.dummy_scans"), - ("outputnode.motion_file", "inputnode.motion_file"), - ("outputnode.temporal_mask", "inputnode.temporal_mask"), + ('outputnode.preprocessed_bold', 'inputnode.preprocessed_bold'), + ('outputnode.dummy_scans', 'inputnode.dummy_scans'), + ('outputnode.motion_file', 'inputnode.motion_file'), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), ]), (denoise_bold_wf, qc_report_wf, [ - ("outputnode.denoised_interpolated_bold", "inputnode.denoised_interpolated_bold"), - ("outputnode.censored_denoised_bold", "inputnode.censored_denoised_bold"), + ('outputnode.denoised_interpolated_bold', 'inputnode.denoised_interpolated_bold'), + ('outputnode.censored_denoised_bold', 'inputnode.censored_denoised_bold'), ]), ]) # fmt:skip postproc_derivatives_wf = init_postproc_derivatives_wf( name_source=bold_file, - source_metadata=run_data["bold_metadata"], + source_metadata=run_data['bold_metadata'], exact_scans=exact_scans, ) workflow.connect([ (inputnode, postproc_derivatives_wf, [ - ("motion_file", "inputnode.preproc_confounds_file"), - ("atlas_files", "inputnode.atlas_files"), - ("atlases", "inputnode.atlas_names"), + ('motion_file', 'inputnode.preproc_confounds_file'), + ('atlas_files', 'inputnode.atlas_files'), + ('atlases', 'inputnode.atlas_names'), ]), (prepare_confounds_wf, postproc_derivatives_wf, [ - ("outputnode.confounds_tsv", "inputnode.confounds_tsv"), - ("outputnode.confounds_metadata", "inputnode.confounds_metadata"), - ("outputnode.motion_file", "inputnode.motion_file"), - ("outputnode.motion_metadata", "inputnode.motion_metadata"), - ("outputnode.temporal_mask", "inputnode.temporal_mask"), - ("outputnode.temporal_mask_metadata", "inputnode.temporal_mask_metadata"), + ('outputnode.confounds_tsv', 'inputnode.confounds_tsv'), + ('outputnode.confounds_metadata', 'inputnode.confounds_metadata'), + ('outputnode.motion_file', 'inputnode.motion_file'), + ('outputnode.motion_metadata', 'inputnode.motion_metadata'), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), + ('outputnode.temporal_mask_metadata', 'inputnode.temporal_mask_metadata'), ]), (denoise_bold_wf, postproc_derivatives_wf, [ - ("outputnode.denoised_bold", "inputnode.denoised_bold"), - ("outputnode.smoothed_denoised_bold", "inputnode.smoothed_denoised_bold"), + ('outputnode.denoised_bold', 'inputnode.denoised_bold'), + ('outputnode.smoothed_denoised_bold', 'inputnode.smoothed_denoised_bold'), ]), - (qc_report_wf, postproc_derivatives_wf, [("outputnode.qc_file", "inputnode.qc_file")]), - (reho_wf, postproc_derivatives_wf, [("outputnode.reho", "inputnode.reho")]), + (qc_report_wf, postproc_derivatives_wf, [('outputnode.qc_file', 'inputnode.qc_file')]), + (reho_wf, postproc_derivatives_wf, [('outputnode.reho', 'inputnode.reho')]), (postproc_derivatives_wf, outputnode, [ - ("outputnode.motion_file", "motion_file"), - ("outputnode.temporal_mask", "temporal_mask"), - ("outputnode.denoised_bold", "denoised_bold"), - ("outputnode.smoothed_denoised_bold", "smoothed_denoised_bold"), - ("outputnode.timeseries", "timeseries"), + ('outputnode.motion_file', 'motion_file'), + ('outputnode.temporal_mask', 'temporal_mask'), + ('outputnode.denoised_bold', 'denoised_bold'), + ('outputnode.smoothed_denoised_bold', 'smoothed_denoised_bold'), + ('outputnode.timeseries', 'timeseries'), ]), ]) # fmt:skip if bandpass_filter: workflow.connect([ (alff_wf, postproc_derivatives_wf, [ - ("outputnode.alff", "inputnode.alff"), - ("outputnode.smoothed_alff", "inputnode.smoothed_alff"), + ('outputnode.alff', 'inputnode.alff'), + ('outputnode.smoothed_alff', 'inputnode.smoothed_alff'), ]), ]) # fmt:skip @@ -380,33 +380,33 @@ def init_postprocess_nifti_wf( workflow.connect([ (inputnode, connectivity_wf, [ - ("bold_file", "inputnode.name_source"), - ("atlases", "inputnode.atlases"), - ("atlas_files", "inputnode.atlas_files"), - ("atlas_labels_files", "inputnode.atlas_labels_files"), + ('bold_file', 'inputnode.name_source'), + ('atlases', 'inputnode.atlases'), + ('atlas_files', 'inputnode.atlas_files'), + ('atlas_labels_files', 'inputnode.atlas_labels_files'), ]), - (downcast_data, connectivity_wf, [("bold_mask", "inputnode.bold_mask")]), + (downcast_data, connectivity_wf, [('bold_mask', 'inputnode.bold_mask')]), (prepare_confounds_wf, connectivity_wf, [ - ("outputnode.temporal_mask", "inputnode.temporal_mask"), + ('outputnode.temporal_mask', 'inputnode.temporal_mask'), ]), (denoise_bold_wf, connectivity_wf, [ - ("outputnode.denoised_bold", "inputnode.denoised_bold"), + ('outputnode.denoised_bold', 'inputnode.denoised_bold'), ]), - (reho_wf, connectivity_wf, [("outputnode.reho", "inputnode.reho")]), + (reho_wf, connectivity_wf, [('outputnode.reho', 'inputnode.reho')]), (connectivity_wf, postproc_derivatives_wf, [ - ("outputnode.coverage", "inputnode.coverage"), - ("outputnode.timeseries", "inputnode.timeseries"), - ("outputnode.correlations", "inputnode.correlations"), - ("outputnode.correlations_exact", "inputnode.correlations_exact"), - ("outputnode.parcellated_reho", "inputnode.parcellated_reho"), + ('outputnode.coverage', 'inputnode.coverage'), + ('outputnode.timeseries', 'inputnode.timeseries'), + ('outputnode.correlations', 'inputnode.correlations'), + ('outputnode.correlations_exact', 'inputnode.correlations_exact'), + ('outputnode.parcellated_reho', 'inputnode.parcellated_reho'), ]), ]) # fmt:skip if bandpass_filter: workflow.connect([ - (alff_wf, connectivity_wf, [("outputnode.alff", "inputnode.alff")]), + (alff_wf, connectivity_wf, [('outputnode.alff', 'inputnode.alff')]), (connectivity_wf, postproc_derivatives_wf, [ - ("outputnode.parcellated_alff", "inputnode.parcellated_alff"), + ('outputnode.parcellated_alff', 'inputnode.parcellated_alff'), ]), ]) # fmt:skip @@ -423,9 +423,9 @@ def init_postprocess_nifti_wf( # Use inputnode for executive summary instead of downcast_data # because T1w is used as name source. (inputnode, execsummary_functional_plots_wf, [ - ("boldref", "inputnode.boldref"), - ("t1w", "inputnode.t1w"), - ("t2w", "inputnode.t2w"), + ('boldref', 'inputnode.boldref'), + ('t1w', 'inputnode.t1w'), + ('t2w', 'inputnode.t2w'), ]), ]) # fmt:skip diff --git a/xcp_d/workflows/bold/outputs.py b/xcp_d/workflows/bold/outputs.py index d30dbf4b0..b82ba0acf 100644 --- a/xcp_d/workflows/bold/outputs.py +++ b/xcp_d/workflows/bold/outputs.py @@ -17,7 +17,7 @@ def init_postproc_derivatives_wf( name_source, source_metadata, exact_scans, - name="postproc_derivatives_wf", + name='postproc_derivatives_wf', ): """Write out the xcp_d derivatives in BIDS format. @@ -90,38 +90,38 @@ def init_postproc_derivatives_wf( niu.IdentityInterface( fields=[ # preprocessing files to use as sources - "preproc_confounds_file", + 'preproc_confounds_file', # postprocessed outputs - "atlas_files", # for Sources - "confounds_tsv", - "confounds_metadata", - "coverage", - "timeseries", - "correlations", - "correlations_exact", - "qc_file", - "denoised_bold", - "smoothed_denoised_bold", - "alff", - "parcellated_alff", - "smoothed_alff", - "reho", - "parcellated_reho", - "motion_file", - "motion_metadata", - "temporal_mask", - "temporal_mask_metadata", - "dummy_scans", + 'atlas_files', # for Sources + 'confounds_tsv', + 'confounds_metadata', + 'coverage', + 'timeseries', + 'correlations', + 'correlations_exact', + 'qc_file', + 'denoised_bold', + 'smoothed_denoised_bold', + 'alff', + 'parcellated_alff', + 'smoothed_alff', + 'reho', + 'parcellated_reho', + 'motion_file', + 'motion_metadata', + 'temporal_mask', + 'temporal_mask_metadata', + 'dummy_scans', # cifti-only inputs - "coverage_ciftis", - "timeseries_ciftis", - "correlation_ciftis", - "correlation_ciftis_exact", + 'coverage_ciftis', + 'timeseries_ciftis', + 'correlation_ciftis', + 'correlation_ciftis_exact', # info for filenames - "atlas_names", + 'atlas_names', ], ), - name="inputnode", + name='inputnode', ) # Outputs that may be used by the concatenation workflow, in which case we want the actual @@ -129,16 +129,16 @@ def init_postproc_derivatives_wf( outputnode = pe.Node( niu.IdentityInterface( fields=[ - "motion_file", - "temporal_mask", - "denoised_bold", - "censored_denoised_bold", - "smoothed_denoised_bold", - "timeseries", - "timeseries_ciftis", + 'motion_file', + 'temporal_mask', + 'denoised_bold', + 'censored_denoised_bold', + 'smoothed_denoised_bold', + 'timeseries', + 'timeseries_ciftis', ], ), - name="outputnode", + name='outputnode', ) bold_sources = pe.Node( @@ -147,7 +147,7 @@ def init_postproc_derivatives_wf( dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="sources", + name='sources', ) bold_sources.inputs.in1 = name_source confound_sources = pe.Node( @@ -156,9 +156,9 @@ def init_postproc_derivatives_wf( dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="confounds", + name='confounds', ) - workflow.connect([(inputnode, confound_sources, [("preproc_confounds_file", "in1")])]) + workflow.connect([(inputnode, confound_sources, [('preproc_confounds_file', 'in1')])]) # Create dictionary of basic information cleaned_data_dictionary = { @@ -168,43 +168,43 @@ def init_postproc_derivatives_wf( if bandpass_filter: software_filters = {} if low_pass > 0 and high_pass > 0: - software_filters["Bandpass filter"] = { - "Low-pass cutoff (Hz)": low_pass, - "High-pass cutoff (Hz)": high_pass, - "Filter order": bpf_order, + software_filters['Bandpass filter'] = { + 'Low-pass cutoff (Hz)': low_pass, + 'High-pass cutoff (Hz)': high_pass, + 'Filter order': bpf_order, } elif high_pass > 0: - software_filters["High-pass filter"] = { - "cutoff (Hz)": high_pass, - "Filter order": bpf_order, + software_filters['High-pass filter'] = { + 'cutoff (Hz)': high_pass, + 'Filter order': bpf_order, } elif low_pass > 0: - software_filters["Low-pass filter"] = { - "cutoff (Hz)": low_pass, - "Filter order": bpf_order, + software_filters['Low-pass filter'] = { + 'cutoff (Hz)': low_pass, + 'Filter order': bpf_order, } # Determine cohort (if there is one) in the original data - cohort = get_entity(name_source, "cohort") + cohort = get_entity(name_source, 'cohort') ds_motion = pe.Node( DerivativesDataSink( source_file=name_source, - dismiss_entities=["segmentation", "den", "res", "space", "cohort", "desc"], - suffix="motion", - extension=".tsv", + dismiss_entities=['segmentation', 'den', 'res', 'space', 'cohort', 'desc'], + suffix='motion', + extension='.tsv', ), - name="ds_motion", + name='ds_motion', run_without_submitting=True, mem_gb=1, ) workflow.connect([ (inputnode, ds_motion, [ - ("motion_metadata", "meta_dict"), - ("motion_file", "in_file"), + ('motion_metadata', 'meta_dict'), + ('motion_file', 'in_file'), ]), - (confound_sources, ds_motion, [("out", "Sources")]), - (ds_motion, outputnode, [("out_file", "motion_file")]), + (confound_sources, ds_motion, [('out', 'Sources')]), + (ds_motion, outputnode, [('out_file', 'motion_file')]), ]) # fmt:skip merge_dense_src = pe.Node( @@ -212,16 +212,16 @@ def init_postproc_derivatives_wf( numinputs=( 1 + (1 if fd_thresh > 0 else 0) - + (1 if config.execution.confounds_config != "none" else 0) + + (1 if config.execution.confounds_config != 'none' else 0) ), dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="merge_dense_src", + name='merge_dense_src', run_without_submitting=True, mem_gb=1, ) - workflow.connect([(bold_sources, merge_dense_src, [("out", "in1")])]) + workflow.connect([(bold_sources, merge_dense_src, [('out', 'in1')])]) if fd_thresh > 0: motion_src = pe.Node( @@ -230,32 +230,32 @@ def init_postproc_derivatives_wf( dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="motion_src", + name='motion_src', ) - workflow.connect([(ds_motion, motion_src, [("out_file", "in1")])]) + workflow.connect([(ds_motion, motion_src, [('out_file', 'in1')])]) ds_temporal_mask = pe.Node( DerivativesDataSink( - dismiss_entities=["segmentation", "den", "res", "space", "cohort", "desc"], - suffix="outliers", - extension=".tsv", + dismiss_entities=['segmentation', 'den', 'res', 'space', 'cohort', 'desc'], + suffix='outliers', + extension='.tsv', source_file=name_source, # Metadata Threshold=fd_thresh, ), - name="ds_temporal_mask", + name='ds_temporal_mask', run_without_submitting=True, mem_gb=1, ) workflow.connect([ (inputnode, ds_temporal_mask, [ - ("temporal_mask_metadata", "meta_dict"), - ("temporal_mask", "in_file"), + ('temporal_mask_metadata', 'meta_dict'), + ('temporal_mask', 'in_file'), ]), - (motion_src, ds_temporal_mask, [("out", "Sources")]), - (ds_temporal_mask, outputnode, [("out_file", "temporal_mask")]), - (ds_temporal_mask, merge_dense_src, [("out_file", "in2")]), + (motion_src, ds_temporal_mask, [('out', 'Sources')]), + (ds_temporal_mask, outputnode, [('out_file', 'temporal_mask')]), + (ds_temporal_mask, merge_dense_src, [('out_file', 'in2')]), ]) # fmt:skip if config.execution.confounds_config is not None: @@ -267,70 +267,70 @@ def init_postproc_derivatives_wf( dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="confounds_src", + name='confounds_src', run_without_submitting=True, mem_gb=1, ) - workflow.connect([(inputnode, confounds_src, [("confounds_metadata", "metadata")])]) + workflow.connect([(inputnode, confounds_src, [('confounds_metadata', 'metadata')])]) if fd_thresh > 0: - workflow.connect([(ds_temporal_mask, confounds_src, [("out_file", "in2")])]) + workflow.connect([(ds_temporal_mask, confounds_src, [('out_file', 'in2')])]) ds_confounds = pe.Node( DerivativesDataSink( source_file=name_source, - dismiss_entities=["space", "cohort", "den", "res"], - datatype="func", - suffix="design", - extension=".tsv", + dismiss_entities=['space', 'cohort', 'den', 'res'], + datatype='func', + suffix='design', + extension='.tsv', ), - name="ds_confounds", + name='ds_confounds', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_confounds, [("confounds_tsv", "in_file")]), - (confounds_src, ds_confounds, [("metadata", "meta_dict")]), - (ds_confounds, merge_dense_src, [("out_file", f"in{3 if fd_thresh > 0 else 2}")]), + (inputnode, ds_confounds, [('confounds_tsv', 'in_file')]), + (confounds_src, ds_confounds, [('metadata', 'meta_dict')]), + (ds_confounds, merge_dense_src, [('out_file', f'in{3 if fd_thresh > 0 else 2}')]), ]) # fmt:skip # Write out derivatives via DerivativesDataSink ds_denoised_bold = pe.Node( DerivativesDataSink( source_file=name_source, - dismiss_entities=["den"], + dismiss_entities=['den'], cohort=cohort, - desc="denoised", - den="91k" if file_format == "cifti" else None, - extension=".dtseries.nii" if file_format == "cifti" else ".nii.gz", + desc='denoised', + den='91k' if file_format == 'cifti' else None, + extension='.dtseries.nii' if file_format == 'cifti' else '.nii.gz', # Metadata meta_dict=cleaned_data_dictionary, SoftwareFilters=software_filters, ), - name="ds_denoised_bold", + name='ds_denoised_bold', run_without_submitting=True, mem_gb=2, ) workflow.connect([ - (inputnode, ds_denoised_bold, [("denoised_bold", "in_file")]), - (merge_dense_src, ds_denoised_bold, [("out", "Sources")]), - (ds_denoised_bold, outputnode, [("out_file", "denoised_bold")]), + (inputnode, ds_denoised_bold, [('denoised_bold', 'in_file')]), + (merge_dense_src, ds_denoised_bold, [('out', 'Sources')]), + (ds_denoised_bold, outputnode, [('out_file', 'denoised_bold')]), ]) # fmt:skip if config.workflow.linc_qc: ds_qc_file = pe.Node( DerivativesDataSink( source_file=name_source, - dismiss_entities=["desc", "den", "res"], + dismiss_entities=['desc', 'den', 'res'], cohort=cohort, - den="91k" if file_format == "cifti" else None, - desc="linc", - suffix="qc", - extension=".tsv", + den='91k' if file_format == 'cifti' else None, + desc='linc', + suffix='qc', + extension='.tsv', ), - name="ds_qc_file", + name='ds_qc_file', run_without_submitting=True, mem_gb=1, ) - workflow.connect([(inputnode, ds_qc_file, [("qc_file", "in_file")])]) + workflow.connect([(inputnode, ds_qc_file, [('qc_file', 'in_file')])]) if smoothing: smoothed_bold_src = pe.Node( @@ -339,34 +339,34 @@ def init_postproc_derivatives_wf( dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="smoothed_bold_src", + name='smoothed_bold_src', run_without_submitting=True, mem_gb=1, ) - workflow.connect([(ds_denoised_bold, smoothed_bold_src, [("out_file", "in1")])]) + workflow.connect([(ds_denoised_bold, smoothed_bold_src, [('out_file', 'in1')])]) # Write out derivatives via DerivativesDataSink ds_smoothed_bold = pe.Node( DerivativesDataSink( source_file=name_source, - dismiss_entities=["den"], + dismiss_entities=['den'], cohort=cohort, - den="91k" if file_format == "cifti" else None, - desc="denoisedSmoothed", - extension=".dtseries.nii" if file_format == "cifti" else ".nii.gz", + den='91k' if file_format == 'cifti' else None, + desc='denoisedSmoothed', + extension='.dtseries.nii' if file_format == 'cifti' else '.nii.gz', check_hdr=False, # Metadata SoftwareFilters=software_filters, FWHM=smoothing, ), - name="ds_smoothed_bold", + name='ds_smoothed_bold', run_without_submitting=True, mem_gb=2, ) workflow.connect([ - (inputnode, ds_smoothed_bold, [("smoothed_denoised_bold", "in_file")]), - (smoothed_bold_src, ds_smoothed_bold, [("out", "Sources")]), - (ds_smoothed_bold, outputnode, [("out_file", "smoothed_denoised_bold")]), + (inputnode, ds_smoothed_bold, [('smoothed_denoised_bold', 'in_file')]), + (smoothed_bold_src, ds_smoothed_bold, [('out', 'Sources')]), + (ds_smoothed_bold, outputnode, [('out_file', 'smoothed_denoised_bold')]), ]) # fmt:skip # Connectivity workflow outputs @@ -379,10 +379,10 @@ def init_postproc_derivatives_wf( ), run_without_submitting=True, mem_gb=1, - name="make_atlas_dict", - iterfield=["in1"], + name='make_atlas_dict', + iterfield=['in1'], ) - workflow.connect([(inputnode, make_atlas_dict, [("atlas_files", "in1")])]) + workflow.connect([(inputnode, make_atlas_dict, [('atlas_files', 'in1')])]) # Convert Sources to a dictionary, to play well with parcellation MapNodes. add_denoised_to_src = pe.MapNode( @@ -393,35 +393,35 @@ def init_postproc_derivatives_wf( ), run_without_submitting=True, mem_gb=1, - name="add_denoised_to_src", - iterfield=["metadata"], + name='add_denoised_to_src', + iterfield=['metadata'], ) workflow.connect([ - (make_atlas_dict, add_denoised_to_src, [("metadata", "metadata")]), - (ds_denoised_bold, add_denoised_to_src, [("out_file", "in1")]), + (make_atlas_dict, add_denoised_to_src, [('metadata', 'metadata')]), + (ds_denoised_bold, add_denoised_to_src, [('out_file', 'in1')]), ]) # fmt:skip # TODO: Add brain mask to Sources (for NIfTIs). ds_coverage = pe.MapNode( DerivativesDataSink( source_file=name_source, - dismiss_entities=["desc", "den", "res"], + dismiss_entities=['desc', 'den', 'res'], cohort=cohort, - statistic="coverage", - suffix="bold", - extension=".tsv", + statistic='coverage', + suffix='bold', + extension='.tsv', ), - name="ds_coverage", + name='ds_coverage', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) workflow.connect([ (inputnode, ds_coverage, [ - ("atlas_names", "segmentation"), - ("coverage", "in_file"), + ('atlas_names', 'segmentation'), + ('coverage', 'in_file'), ]), - (make_atlas_dict, ds_coverage, [("metadata", "meta_dict")]), + (make_atlas_dict, ds_coverage, [('metadata', 'meta_dict')]), ]) # fmt:skip add_coverage_to_src = pe.MapNode( @@ -432,37 +432,37 @@ def init_postproc_derivatives_wf( ), run_without_submitting=True, mem_gb=1, - name="add_coverage_to_src", - iterfield=["metadata", "in1"], + name='add_coverage_to_src', + iterfield=['metadata', 'in1'], ) workflow.connect([ - (add_denoised_to_src, add_coverage_to_src, [("metadata", "metadata")]), - (ds_coverage, add_coverage_to_src, [("out_file", "in1")]), + (add_denoised_to_src, add_coverage_to_src, [('metadata', 'metadata')]), + (ds_coverage, add_coverage_to_src, [('out_file', 'in1')]), ]) # fmt:skip ds_timeseries = pe.MapNode( DerivativesDataSink( source_file=name_source, - dismiss_entities=["desc", "den", "res"], + dismiss_entities=['desc', 'den', 'res'], cohort=cohort, - statistic="mean", - suffix="timeseries", - extension=".tsv", + statistic='mean', + suffix='timeseries', + extension='.tsv', # Metadata - SamplingFrequency="TR", + SamplingFrequency='TR', ), - name="ds_timeseries", + name='ds_timeseries', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) workflow.connect([ (inputnode, ds_timeseries, [ - ("atlas_names", "segmentation"), - ("timeseries", "in_file"), + ('atlas_names', 'segmentation'), + ('timeseries', 'in_file'), ]), - (add_coverage_to_src, ds_timeseries, [("metadata", "meta_dict")]), - (ds_timeseries, outputnode, [("out_file", "timeseries")]), + (add_coverage_to_src, ds_timeseries, [('metadata', 'meta_dict')]), + (ds_timeseries, outputnode, [('out_file', 'timeseries')]), ]) # fmt:skip if config.workflow.output_correlations: @@ -474,78 +474,78 @@ def init_postproc_derivatives_wf( ), run_without_submitting=True, mem_gb=1, - name="make_corrs_meta_dict1", - iterfield=["in1"], + name='make_corrs_meta_dict1', + iterfield=['in1'], ) - workflow.connect([(ds_timeseries, make_corrs_meta_dict1, [("out_file", "in1")])]) + workflow.connect([(ds_timeseries, make_corrs_meta_dict1, [('out_file', 'in1')])]) make_corrs_meta_dict2 = pe.MapNode( BIDSURI( numinputs=1, dataset_links=config.execution.dataset_links, out_dir=str(output_dir), - field="NodeFiles", + field='NodeFiles', ), run_without_submitting=True, mem_gb=1, - name="make_corrs_meta_dict2", - iterfield=["in1", "metadata"], + name='make_corrs_meta_dict2', + iterfield=['in1', 'metadata'], ) workflow.connect([ - (inputnode, make_corrs_meta_dict2, [("atlas_files", "in1")]), - (make_corrs_meta_dict1, make_corrs_meta_dict2, [("metadata", "metadata")]), + (inputnode, make_corrs_meta_dict2, [('atlas_files', 'in1')]), + (make_corrs_meta_dict1, make_corrs_meta_dict2, [('metadata', 'metadata')]), ]) # fmt:skip ds_correlations = pe.MapNode( DerivativesDataSink( source_file=name_source, - dismiss_entities=["desc", "den", "res"], + dismiss_entities=['desc', 'den', 'res'], cohort=cohort, - statistic="pearsoncorrelation", - suffix="relmat", - extension=".tsv", + statistic='pearsoncorrelation', + suffix='relmat', + extension='.tsv', # Metadata - RelationshipMeasure="Pearson correlation coefficient", + RelationshipMeasure='Pearson correlation coefficient', Weighted=True, Directed=False, ValidDiagonal=False, - StorageFormat="Full", + StorageFormat='Full', ), - name="ds_correlations", + name='ds_correlations', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) workflow.connect([ (inputnode, ds_correlations, [ - ("atlas_names", "segmentation"), - ("correlations", "in_file"), + ('atlas_names', 'segmentation'), + ('correlations', 'in_file'), ]), - (make_corrs_meta_dict2, ds_correlations, [("metadata", "meta_dict")]), + (make_corrs_meta_dict2, ds_correlations, [('metadata', 'meta_dict')]), ]) # fmt:skip - if file_format == "cifti": + if file_format == 'cifti': ds_coverage_ciftis = pe.MapNode( DerivativesDataSink( source_file=name_source, check_hdr=False, - dismiss_entities=["desc"], + dismiss_entities=['desc'], cohort=cohort, - statistic="coverage", - suffix="boldmap", - extension=".pscalar.nii", + statistic='coverage', + suffix='boldmap', + extension='.pscalar.nii', ), - name="ds_coverage_ciftis", + name='ds_coverage_ciftis', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) workflow.connect([ (inputnode, ds_coverage_ciftis, [ - ("atlas_names", "segmentation"), - ("coverage_ciftis", "in_file"), + ('atlas_names', 'segmentation'), + ('coverage_ciftis', 'in_file'), ]), - (add_denoised_to_src, ds_coverage_ciftis, [("metadata", "meta_dict")]), + (add_denoised_to_src, ds_coverage_ciftis, [('metadata', 'meta_dict')]), ]) # fmt:skip add_ccoverage_to_src = pe.MapNode( @@ -556,37 +556,37 @@ def init_postproc_derivatives_wf( ), run_without_submitting=True, mem_gb=1, - name="add_ccoverage_to_src", - iterfield=["metadata", "in1"], + name='add_ccoverage_to_src', + iterfield=['metadata', 'in1'], ) workflow.connect([ - (add_denoised_to_src, add_ccoverage_to_src, [("metadata", "metadata")]), - (ds_coverage_ciftis, add_ccoverage_to_src, [("out_file", "in1")]), + (add_denoised_to_src, add_ccoverage_to_src, [('metadata', 'metadata')]), + (ds_coverage_ciftis, add_ccoverage_to_src, [('out_file', 'in1')]), ]) # fmt:skip ds_timeseries_ciftis = pe.MapNode( DerivativesDataSink( source_file=name_source, check_hdr=False, - dismiss_entities=["desc", "den"], + dismiss_entities=['desc', 'den'], cohort=cohort, - den="91k" if file_format == "cifti" else None, - statistic="mean", - suffix="timeseries", - extension=".ptseries.nii", + den='91k' if file_format == 'cifti' else None, + statistic='mean', + suffix='timeseries', + extension='.ptseries.nii', ), - name="ds_timeseries_ciftis", + name='ds_timeseries_ciftis', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) workflow.connect([ (inputnode, ds_timeseries_ciftis, [ - ("atlas_names", "segmentation"), - ("timeseries_ciftis", "in_file"), + ('atlas_names', 'segmentation'), + ('timeseries_ciftis', 'in_file'), ]), - (add_ccoverage_to_src, ds_timeseries_ciftis, [("metadata", "meta_dict")]), - (ds_timeseries_ciftis, outputnode, [("out_file", "timeseries_ciftis")]), + (add_ccoverage_to_src, ds_timeseries_ciftis, [('metadata', 'meta_dict')]), + (ds_timeseries_ciftis, outputnode, [('out_file', 'timeseries_ciftis')]), ]) # fmt:skip if config.workflow.output_correlations: @@ -598,11 +598,11 @@ def init_postproc_derivatives_wf( ), run_without_submitting=True, mem_gb=1, - name="make_ccorrs_meta_dict1", - iterfield=["in1"], + name='make_ccorrs_meta_dict1', + iterfield=['in1'], ) workflow.connect([ - (ds_timeseries_ciftis, make_ccorrs_meta_dict1, [("out_file", "in1")]), + (ds_timeseries_ciftis, make_ccorrs_meta_dict1, [('out_file', 'in1')]), ]) # fmt:skip make_ccorrs_meta_dict2 = pe.MapNode( @@ -610,76 +610,76 @@ def init_postproc_derivatives_wf( numinputs=1, dataset_links=config.execution.dataset_links, out_dir=str(output_dir), - field="NodeFiles", + field='NodeFiles', ), run_without_submitting=True, mem_gb=1, - name="make_ccorrs_meta_dict2", - iterfield=["in1", "metadata"], + name='make_ccorrs_meta_dict2', + iterfield=['in1', 'metadata'], ) workflow.connect([ - (inputnode, make_ccorrs_meta_dict2, [("atlas_files", "in1")]), - (make_ccorrs_meta_dict1, make_ccorrs_meta_dict2, [("metadata", "metadata")]), + (inputnode, make_ccorrs_meta_dict2, [('atlas_files', 'in1')]), + (make_ccorrs_meta_dict1, make_ccorrs_meta_dict2, [('metadata', 'metadata')]), ]) # fmt:skip ds_correlation_ciftis = pe.MapNode( DerivativesDataSink( source_file=name_source, check_hdr=False, - dismiss_entities=["desc", "den"], + dismiss_entities=['desc', 'den'], cohort=cohort, - den="91k" if file_format == "cifti" else None, - statistic="pearsoncorrelation", - suffix="boldmap", - extension=".pconn.nii", + den='91k' if file_format == 'cifti' else None, + statistic='pearsoncorrelation', + suffix='boldmap', + extension='.pconn.nii', # Metadata - RelationshipMeasure="Pearson correlation coefficient", + RelationshipMeasure='Pearson correlation coefficient', Weighted=True, Directed=False, ValidDiagonal=False, - StorageFormat="Full", + StorageFormat='Full', ), - name="ds_correlation_ciftis", + name='ds_correlation_ciftis', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) workflow.connect([ (inputnode, ds_correlation_ciftis, [ - ("atlas_names", "segmentation"), - ("correlation_ciftis", "in_file"), + ('atlas_names', 'segmentation'), + ('correlation_ciftis', 'in_file'), ]), - (make_ccorrs_meta_dict2, ds_correlation_ciftis, [("metadata", "meta_dict")]), + (make_ccorrs_meta_dict2, ds_correlation_ciftis, [('metadata', 'meta_dict')]), ]) # fmt:skip for i_exact_scan, exact_scan in enumerate(exact_scans): select_exact_scan_files = pe.MapNode( niu.Select(index=i_exact_scan), - name=f"select_exact_scan_files_{i_exact_scan}", - iterfield=["inlist"], + name=f'select_exact_scan_files_{i_exact_scan}', + iterfield=['inlist'], ) workflow.connect([ - (inputnode, select_exact_scan_files, [("correlations_exact", "inlist")]), + (inputnode, select_exact_scan_files, [('correlations_exact', 'inlist')]), ]) # fmt:skip ds_correlations_exact = pe.MapNode( DerivativesDataSink( source_file=name_source, - dismiss_entities=["desc", "den", "res"], + dismiss_entities=['desc', 'den', 'res'], cohort=cohort, - statistic="pearsoncorrelation", - desc=f"{exact_scan}volumes", - suffix="relmat", - extension=".tsv", + statistic='pearsoncorrelation', + desc=f'{exact_scan}volumes', + suffix='relmat', + extension='.tsv', ), - name=f"ds_correlations_exact_{i_exact_scan}", + name=f'ds_correlations_exact_{i_exact_scan}', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file"], + iterfield=['segmentation', 'in_file'], ) workflow.connect([ - (inputnode, ds_correlations_exact, [("atlas_names", "segmentation")]), - (select_exact_scan_files, ds_correlations_exact, [("out", "in_file")]), + (inputnode, ds_correlations_exact, [('atlas_names', 'segmentation')]), + (select_exact_scan_files, ds_correlations_exact, [('out', 'in_file')]), ]) # fmt:skip # Resting state metric outputs @@ -689,33 +689,33 @@ def init_postproc_derivatives_wf( dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="denoised_src", + name='denoised_src', run_without_submitting=True, mem_gb=1, ) - workflow.connect([(ds_denoised_bold, denoised_src, [("out_file", "in1")])]) + workflow.connect([(ds_denoised_bold, denoised_src, [('out_file', 'in1')])]) ds_reho = pe.Node( DerivativesDataSink( source_file=name_source, check_hdr=False, - dismiss_entities=["desc", "den"], + dismiss_entities=['desc', 'den'], cohort=cohort, - den="91k" if file_format == "cifti" else None, - statistic="reho", - suffix="boldmap", - extension=".dscalar.nii" if file_format == "cifti" else ".nii.gz", + den='91k' if file_format == 'cifti' else None, + statistic='reho', + suffix='boldmap', + extension='.dscalar.nii' if file_format == 'cifti' else '.nii.gz', # Metadata SoftwareFilters=software_filters, - Neighborhood="vertices", + Neighborhood='vertices', ), - name="ds_reho", + name='ds_reho', run_without_submitting=True, mem_gb=1, ) workflow.connect([ - (inputnode, ds_reho, [("reho", "in_file")]), - (denoised_src, ds_reho, [("out", "Sources")]), + (inputnode, ds_reho, [('reho', 'in_file')]), + (denoised_src, ds_reho, [('out', 'Sources')]), ]) # fmt:skip if config.execution.atlases: @@ -727,37 +727,37 @@ def init_postproc_derivatives_wf( ), run_without_submitting=True, mem_gb=1, - name="add_reho_to_src", - iterfield=["metadata"], + name='add_reho_to_src', + iterfield=['metadata'], ) workflow.connect([ - (make_atlas_dict, add_reho_to_src, [("metadata", "metadata")]), - (ds_reho, add_reho_to_src, [("out_file", "in1")]), + (make_atlas_dict, add_reho_to_src, [('metadata', 'metadata')]), + (ds_reho, add_reho_to_src, [('out_file', 'in1')]), ]) # fmt:skip ds_parcellated_reho = pe.MapNode( DerivativesDataSink( source_file=name_source, - dismiss_entities=["desc", "den", "res"], + dismiss_entities=['desc', 'den', 'res'], cohort=cohort, - statistic="reho", - suffix="bold", - extension=".tsv", + statistic='reho', + suffix='bold', + extension='.tsv', # Metadata SoftwareFilters=software_filters, - Neighborhood="vertices", + Neighborhood='vertices', ), - name="ds_parcellated_reho", + name='ds_parcellated_reho', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) workflow.connect([ (inputnode, ds_parcellated_reho, [ - ("atlas_names", "segmentation"), - ("parcellated_reho", "in_file"), + ('atlas_names', 'segmentation'), + ('parcellated_reho', 'in_file'), ]), - (add_reho_to_src, ds_parcellated_reho, [("metadata", "meta_dict")]), + (add_reho_to_src, ds_parcellated_reho, [('metadata', 'meta_dict')]), ]) # fmt:skip if bandpass_filter: @@ -765,22 +765,22 @@ def init_postproc_derivatives_wf( DerivativesDataSink( source_file=name_source, check_hdr=False, - dismiss_entities=["desc", "den"], + dismiss_entities=['desc', 'den'], cohort=cohort, - den="91k" if file_format == "cifti" else None, - statistic="alff", - suffix="boldmap", - extension=".dscalar.nii" if file_format == "cifti" else ".nii.gz", + den='91k' if file_format == 'cifti' else None, + statistic='alff', + suffix='boldmap', + extension='.dscalar.nii' if file_format == 'cifti' else '.nii.gz', # Metadata SoftwareFilters=software_filters, ), - name="ds_alff", + name='ds_alff', run_without_submitting=True, mem_gb=1, ) workflow.connect([ - (inputnode, ds_alff, [("alff", "in_file")]), - (denoised_src, ds_alff, [("out", "Sources")]), + (inputnode, ds_alff, [('alff', 'in_file')]), + (denoised_src, ds_alff, [('out', 'Sources')]), ]) # fmt:skip if smoothing: @@ -790,34 +790,34 @@ def init_postproc_derivatives_wf( dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="alff_src", + name='alff_src', run_without_submitting=True, mem_gb=1, ) - workflow.connect([(ds_alff, alff_src, [("out_file", "in1")])]) + workflow.connect([(ds_alff, alff_src, [('out_file', 'in1')])]) ds_smoothed_alff = pe.Node( DerivativesDataSink( source_file=name_source, - dismiss_entities=["den"], + dismiss_entities=['den'], cohort=cohort, - desc="smooth", - den="91k" if file_format == "cifti" else None, - statistic="alff", - suffix="boldmap", - extension=".dscalar.nii" if file_format == "cifti" else ".nii.gz", + desc='smooth', + den='91k' if file_format == 'cifti' else None, + statistic='alff', + suffix='boldmap', + extension='.dscalar.nii' if file_format == 'cifti' else '.nii.gz', check_hdr=False, # Metadata SoftwareFilters=software_filters, FWHM=smoothing, ), - name="ds_smoothed_alff", + name='ds_smoothed_alff', run_without_submitting=True, mem_gb=1, ) workflow.connect([ - (inputnode, ds_smoothed_alff, [("smoothed_alff", "in_file")]), - (alff_src, ds_smoothed_alff, [("out", "Sources")]), + (inputnode, ds_smoothed_alff, [('smoothed_alff', 'in_file')]), + (alff_src, ds_smoothed_alff, [('out', 'Sources')]), ]) # fmt:skip if config.execution.atlases: @@ -829,34 +829,34 @@ def init_postproc_derivatives_wf( ), run_without_submitting=True, mem_gb=1, - name="add_alff_to_src", - iterfield=["metadata"], + name='add_alff_to_src', + iterfield=['metadata'], ) workflow.connect([ - (make_atlas_dict, add_alff_to_src, [("metadata", "metadata")]), - (ds_alff, add_alff_to_src, [("out_file", "in1")]), + (make_atlas_dict, add_alff_to_src, [('metadata', 'metadata')]), + (ds_alff, add_alff_to_src, [('out_file', 'in1')]), ]) # fmt:skip ds_parcellated_alff = pe.MapNode( DerivativesDataSink( source_file=name_source, - dismiss_entities=["desc", "den", "res"], + dismiss_entities=['desc', 'den', 'res'], cohort=cohort, - statistic="alff", - suffix="bold", - extension=".tsv", + statistic='alff', + suffix='bold', + extension='.tsv', ), - name="ds_parcellated_alff", + name='ds_parcellated_alff', run_without_submitting=True, mem_gb=1, - iterfield=["segmentation", "in_file", "meta_dict"], + iterfield=['segmentation', 'in_file', 'meta_dict'], ) workflow.connect([ (inputnode, ds_parcellated_alff, [ - ("atlas_names", "segmentation"), - ("parcellated_alff", "in_file"), + ('atlas_names', 'segmentation'), + ('parcellated_alff', 'in_file'), ]), - (add_alff_to_src, ds_parcellated_alff, [("metadata", "meta_dict")]), + (add_alff_to_src, ds_parcellated_alff, [('metadata', 'meta_dict')]), ]) # fmt:skip return workflow diff --git a/xcp_d/workflows/bold/plotting.py b/xcp_d/workflows/bold/plotting.py index 6464a1ceb..125de0e6c 100644 --- a/xcp_d/workflows/bold/plotting.py +++ b/xcp_d/workflows/bold/plotting.py @@ -20,14 +20,14 @@ from xcp_d.utils.utils import get_bold2std_and_t1w_xfms, get_std2bold_xfms from xcp_d.workflows.plotting import init_plot_overlay_wf -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc def init_qc_report_wf( TR, head_radius, - name="qc_report_wf", + name='qc_report_wf', ): """Generate quality control figures and a QC file. @@ -88,86 +88,86 @@ def init_qc_report_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "preprocessed_bold", - "denoised_interpolated_bold", - "censored_denoised_bold", - "dummy_scans", - "motion_file", - "temporal_mask", - "run_index", # will only be set for concatenated data + 'name_source', + 'preprocessed_bold', + 'denoised_interpolated_bold', + 'censored_denoised_bold', + 'dummy_scans', + 'motion_file', + 'temporal_mask', + 'run_index', # will only be set for concatenated data # nifti-only inputs - "bold_mask", - "anat", # T1w/T2w image in anatomical space - "anat_brainmask", - "boldref", - "template_to_anat_xfm", + 'bold_mask', + 'anat', # T1w/T2w image in anatomical space + 'anat_brainmask', + 'boldref', + 'template_to_anat_xfm', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "qc_file", + 'qc_file', ], ), - name="outputnode", + name='outputnode', ) nlin2009casym_brain_mask = str( get_template( - "MNI152NLin2009cAsym", + 'MNI152NLin2009cAsym', resolution=2, - desc="brain", - suffix="mask", - extension=[".nii", ".nii.gz"], + desc='brain', + suffix='mask', + extension=['.nii', '.nii.gz'], ) ) - if config.workflow.file_format == "nifti": + if config.workflow.file_format == 'nifti': # We need the BOLD mask in T1w and standard spaces for QC metric calculation. # This is only possible for nifti inputs. get_native2space_transforms = pe.Node( Function( - input_names=["bold_file", "template_to_anat_xfm"], + input_names=['bold_file', 'template_to_anat_xfm'], output_names=[ - "bold_to_std_xfms", - "bold_to_std_xfms_invert", - "bold_to_t1w_xfms", - "bold_to_t1w_xfms_invert", + 'bold_to_std_xfms', + 'bold_to_std_xfms_invert', + 'bold_to_t1w_xfms', + 'bold_to_t1w_xfms_invert', ], function=get_bold2std_and_t1w_xfms, ), - name="get_native2space_transforms", + name='get_native2space_transforms', ) workflow.connect([ (inputnode, get_native2space_transforms, [ - ("name_source", "bold_file"), - ("template_to_anat_xfm", "template_to_anat_xfm"), + ('name_source', 'bold_file'), + ('template_to_anat_xfm', 'template_to_anat_xfm'), ]), ]) # fmt:skip warp_boldmask_to_t1w = pe.Node( ApplyTransforms( dimension=3, - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', num_threads=config.nipype.omp_nthreads, ), - name="warp_boldmask_to_t1w", + name='warp_boldmask_to_t1w', mem_gb=1, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (inputnode, warp_boldmask_to_t1w, [ - ("bold_mask", "input_image"), - ("anat", "reference_image"), + ('bold_mask', 'input_image'), + ('anat', 'reference_image'), ]), (get_native2space_transforms, warp_boldmask_to_t1w, [ - ("bold_to_t1w_xfms", "transforms"), - ("bold_to_t1w_xfms_invert", "invert_transform_flags"), + ('bold_to_t1w_xfms', 'transforms'), + ('bold_to_t1w_xfms_invert', 'invert_transform_flags'), ]), ]) # fmt:skip @@ -175,18 +175,18 @@ def init_qc_report_wf( ApplyTransforms( dimension=3, reference_image=nlin2009casym_brain_mask, - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', num_threads=config.nipype.omp_nthreads, ), - name="warp_boldmask_to_mni", + name='warp_boldmask_to_mni', mem_gb=1, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ - (inputnode, warp_boldmask_to_mni, [("bold_mask", "input_image")]), + (inputnode, warp_boldmask_to_mni, [('bold_mask', 'input_image')]), (get_native2space_transforms, warp_boldmask_to_mni, [ - ("bold_to_std_xfms", "transforms"), - ("bold_to_std_xfms_invert", "invert_transform_flags"), + ('bold_to_std_xfms', 'transforms'), + ('bold_to_std_xfms_invert', 'invert_transform_flags'), ]), ]) # fmt:skip @@ -194,21 +194,21 @@ def init_qc_report_wf( warp_anatmask_to_t1w = pe.Node( ApplyTransforms( dimension=3, - interpolation="NearestNeighbor", + interpolation='NearestNeighbor', num_threads=config.nipype.omp_nthreads, ), - name="warp_anatmask_to_t1w", + name='warp_anatmask_to_t1w', mem_gb=1, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (inputnode, warp_anatmask_to_t1w, [ - ("bold_mask", "input_image"), - ("anat", "reference_image"), + ('bold_mask', 'input_image'), + ('anat', 'reference_image'), ]), (get_native2space_transforms, warp_anatmask_to_t1w, [ - ("bold_to_t1w_xfms", "transforms"), - ("bold_to_t1w_xfms_invert", "invert_transform_flags"), + ('bold_to_t1w_xfms', 'transforms'), + ('bold_to_t1w_xfms_invert', 'invert_transform_flags'), ]), ]) # fmt:skip @@ -216,24 +216,24 @@ def init_qc_report_wf( # Get the set of transforms from MNI152NLin2009cAsym (the dseg) to the BOLD space. get_mni_to_bold_xfms = pe.Node( Function( - input_names=["bold_file", "source_file", "source_space"], - output_names=["transforms"], + input_names=['bold_file', 'source_file', 'source_space'], + output_names=['transforms'], function=get_std2bold_xfms, ), - name="get_std2native_transform", + name='get_std2native_transform', ) get_mni_to_bold_xfms.inputs.source_file = None - get_mni_to_bold_xfms.inputs.source_space = "MNI152NLin2009cAsym" - workflow.connect([(inputnode, get_mni_to_bold_xfms, [("name_source", "bold_file")])]) + get_mni_to_bold_xfms.inputs.source_space = 'MNI152NLin2009cAsym' + workflow.connect([(inputnode, get_mni_to_bold_xfms, [('name_source', 'bold_file')])]) # Use MNI152NLin2009cAsym tissue-type segmentation file for carpet plots. dseg_file = str( get_template( - "MNI152NLin2009cAsym", + 'MNI152NLin2009cAsym', resolution=1, - desc="carpet", - suffix="dseg", - extension=[".nii", ".nii.gz"], + desc='carpet', + suffix='dseg', + extension=['.nii', '.nii.gz'], ) ) @@ -242,16 +242,16 @@ def init_qc_report_wf( ApplyTransforms( dimension=3, input_image=dseg_file, - interpolation="GenericLabel", + interpolation='GenericLabel', num_threads=config.nipype.omp_nthreads, ), - name="warp_dseg_to_bold", + name='warp_dseg_to_bold', mem_gb=3, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ - (inputnode, warp_dseg_to_bold, [("boldref", "reference_image")]), - (get_mni_to_bold_xfms, warp_dseg_to_bold, [("transforms", "transforms")]), + (inputnode, warp_dseg_to_bold, [('boldref', 'reference_image')]), + (get_mni_to_bold_xfms, warp_dseg_to_bold, [('transforms', 'transforms')]), ]) # fmt:skip if config.workflow.linc_qc: @@ -261,27 +261,27 @@ def init_qc_report_wf( head_radius=head_radius, template_mask=nlin2009casym_brain_mask, ), - name="make_linc_qc", + name='make_linc_qc', mem_gb=2, ) workflow.connect([ (inputnode, make_linc_qc, [ - ("name_source", "name_source"), - ("preprocessed_bold", "bold_file"), - ("censored_denoised_bold", "cleaned_file"), - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), - ("dummy_scans", "dummy_scans"), + ('name_source', 'name_source'), + ('preprocessed_bold', 'bold_file'), + ('censored_denoised_bold', 'cleaned_file'), + ('motion_file', 'motion_file'), + ('temporal_mask', 'temporal_mask'), + ('dummy_scans', 'dummy_scans'), ]), - (make_linc_qc, outputnode, [("qc_file", "qc_file")]), + (make_linc_qc, outputnode, [('qc_file', 'qc_file')]), ]) # fmt:skip - if config.workflow.file_format == "nifti": + if config.workflow.file_format == 'nifti': workflow.connect([ - (inputnode, make_linc_qc, [("bold_mask", "bold_mask_inputspace")]), - (warp_boldmask_to_t1w, make_linc_qc, [("output_image", "bold_mask_anatspace")]), - (warp_boldmask_to_mni, make_linc_qc, [("output_image", "bold_mask_stdspace")]), - (warp_anatmask_to_t1w, make_linc_qc, [("output_image", "anat_mask_anatspace")]), + (inputnode, make_linc_qc, [('bold_mask', 'bold_mask_inputspace')]), + (warp_boldmask_to_t1w, make_linc_qc, [('output_image', 'bold_mask_anatspace')]), + (warp_boldmask_to_mni, make_linc_qc, [('output_image', 'bold_mask_stdspace')]), + (warp_anatmask_to_t1w, make_linc_qc, [('output_image', 'anat_mask_anatspace')]), ]) # fmt:skip else: make_linc_qc.inputs.bold_mask_inputspace = None @@ -289,80 +289,80 @@ def init_qc_report_wf( ds_qc_metadata = pe.Node( DerivativesDataSink( dismiss_entities=list(DerivativesDataSink._allowed_entities), - allowed_entities=["desc"], - desc="linc", - suffix="qc", - extension=".json", + allowed_entities=['desc'], + desc='linc', + suffix='qc', + extension='.json', ), - name="ds_qc_metadata", + name='ds_qc_metadata', run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_qc_metadata, [("name_source", "source_file")]), - (make_linc_qc, ds_qc_metadata, [("qc_metadata", "in_file")]), + (inputnode, ds_qc_metadata, [('name_source', 'source_file')]), + (make_linc_qc, ds_qc_metadata, [('qc_metadata', 'in_file')]), ]) # fmt:skip make_qc_plots_nipreps = pe.Node( QCPlots(TR=TR, head_radius=head_radius), - name="make_qc_plots_nipreps", + name='make_qc_plots_nipreps', mem_gb=2, ) workflow.connect([ (inputnode, make_qc_plots_nipreps, [ - ("preprocessed_bold", "bold_file"), - ("censored_denoised_bold", "cleaned_file"), - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), + ('preprocessed_bold', 'bold_file'), + ('censored_denoised_bold', 'cleaned_file'), + ('motion_file', 'motion_file'), + ('temporal_mask', 'temporal_mask'), ]), ]) # fmt:skip - if config.workflow.file_format == "nifti": + if config.workflow.file_format == 'nifti': workflow.connect([ - (inputnode, make_qc_plots_nipreps, [("bold_mask", "mask_file")]), - (warp_dseg_to_bold, make_qc_plots_nipreps, [("output_image", "seg_file")]), + (inputnode, make_qc_plots_nipreps, [('bold_mask', 'mask_file')]), + (warp_dseg_to_bold, make_qc_plots_nipreps, [('output_image', 'seg_file')]), ]) # fmt:skip else: make_qc_plots_nipreps.inputs.mask_file = None ds_report_preproc_qc_nipreps = pe.Node( - DerivativesDataSink(desc="preprocessing"), - name="ds_report_preproc_qc_nipreps", + DerivativesDataSink(desc='preprocessing'), + name='ds_report_preproc_qc_nipreps', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_preproc_qc_nipreps, [("name_source", "source_file")]), - (make_qc_plots_nipreps, ds_report_preproc_qc_nipreps, [("raw_qcplot", "in_file")]), + (inputnode, ds_report_preproc_qc_nipreps, [('name_source', 'source_file')]), + (make_qc_plots_nipreps, ds_report_preproc_qc_nipreps, [('raw_qcplot', 'in_file')]), ]) # fmt:skip ds_report_postproc_qc_nipreps = pe.Node( - DerivativesDataSink(desc="postprocessing"), - name="ds_report_postproc_qc_nipreps", + DerivativesDataSink(desc='postprocessing'), + name='ds_report_postproc_qc_nipreps', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_postproc_qc_nipreps, [("name_source", "source_file")]), - (make_qc_plots_nipreps, ds_report_postproc_qc_nipreps, [("clean_qcplot", "in_file")]), + (inputnode, ds_report_postproc_qc_nipreps, [('name_source', 'source_file')]), + (make_qc_plots_nipreps, ds_report_postproc_qc_nipreps, [('clean_qcplot', 'in_file')]), ]) # fmt:skip functional_qc = pe.Node( FunctionalSummary(TR=TR), - name="qcsummary", + name='qcsummary', run_without_submitting=False, mem_gb=2, ) workflow.connect([ - (inputnode, functional_qc, [("name_source", "bold_file")]), - (make_linc_qc, functional_qc, [("qc_file", "qc_file")]), + (inputnode, functional_qc, [('name_source', 'bold_file')]), + (make_linc_qc, functional_qc, [('qc_file', 'qc_file')]), ]) # fmt:skip ds_report_qualitycontrol = pe.Node( - DerivativesDataSink(desc="qualitycontrol"), - name="ds_report_qualitycontrol", + DerivativesDataSink(desc='qualitycontrol'), + name='ds_report_qualitycontrol', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_qualitycontrol, [("name_source", "source_file")]), - (functional_qc, ds_report_qualitycontrol, [("out_report", "in_file")]), + (inputnode, ds_report_qualitycontrol, [('name_source', 'source_file')]), + (functional_qc, ds_report_qualitycontrol, [('out_report', 'in_file')]), ]) # fmt:skip else: # Need to explicitly add the outputnode to the workflow, since it's not set otherwise. @@ -371,72 +371,72 @@ def init_qc_report_wf( if config.workflow.abcc_qc: make_abcc_qc = pe.Node( ABCCQC(TR=TR), - name="make_abcc_qc", + name='make_abcc_qc', mem_gb=2, ) - workflow.connect([(inputnode, make_abcc_qc, [("motion_file", "motion_file")])]) + workflow.connect([(inputnode, make_abcc_qc, [('motion_file', 'motion_file')])]) ds_abcc_qc = pe.Node( DerivativesDataSink( - datatype="func", - desc="abcc", - suffix="qc", - extension="hdf5", + datatype='func', + desc='abcc', + suffix='qc', + extension='hdf5', ), - name="ds_abcc_qc", + name='ds_abcc_qc', run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_abcc_qc, [("name_source", "source_file")]), - (make_abcc_qc, ds_abcc_qc, [("qc_file", "in_file")]), + (inputnode, ds_abcc_qc, [('name_source', 'source_file')]), + (make_abcc_qc, ds_abcc_qc, [('qc_file', 'in_file')]), ]) # fmt:skip # Generate preprocessing and postprocessing carpet plots. make_qc_plots_es = pe.Node( QCPlotsES(TR=TR, standardize=config.execution.confounds_config is None), - name="make_qc_plots_es", + name='make_qc_plots_es', mem_gb=2, ) workflow.connect([ (inputnode, make_qc_plots_es, [ - ("preprocessed_bold", "preprocessed_bold"), - ("denoised_interpolated_bold", "denoised_interpolated_bold"), - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), - ("run_index", "run_index"), + ('preprocessed_bold', 'preprocessed_bold'), + ('denoised_interpolated_bold', 'denoised_interpolated_bold'), + ('motion_file', 'motion_file'), + ('temporal_mask', 'temporal_mask'), + ('run_index', 'run_index'), ]), ]) # fmt:skip - if config.workflow.file_format == "nifti": + if config.workflow.file_format == 'nifti': workflow.connect([ - (inputnode, make_qc_plots_es, [("bold_mask", "mask")]), - (warp_dseg_to_bold, make_qc_plots_es, [("output_image", "seg_data")]), + (inputnode, make_qc_plots_es, [('bold_mask', 'mask')]), + (warp_dseg_to_bold, make_qc_plots_es, [('output_image', 'seg_data')]), ]) # fmt:skip ds_report_preproc_qc_es = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], - desc="preprocESQC", + dismiss_entities=['den'], + desc='preprocESQC', ), - name="ds_report_preproc_qc_es", + name='ds_report_preproc_qc_es', run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_report_preproc_qc_es, [("name_source", "source_file")]), - (make_qc_plots_es, ds_report_preproc_qc_es, [("before_process", "in_file")]), + (inputnode, ds_report_preproc_qc_es, [('name_source', 'source_file')]), + (make_qc_plots_es, ds_report_preproc_qc_es, [('before_process', 'in_file')]), ]) # fmt:skip ds_report_postproc_qc_es = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], - desc="postprocESQC", + dismiss_entities=['den'], + desc='postprocESQC', ), - name="ds_report_postproc_qc_es", + name='ds_report_postproc_qc_es', run_without_submitting=True, ) workflow.connect([ - (inputnode, ds_report_postproc_qc_es, [("name_source", "source_file")]), - (make_qc_plots_es, ds_report_postproc_qc_es, [("after_process", "in_file")]), + (inputnode, ds_report_postproc_qc_es, [('name_source', 'source_file')]), + (make_qc_plots_es, ds_report_postproc_qc_es, [('after_process', 'in_file')]), ]) # fmt:skip return workflow @@ -448,7 +448,7 @@ def init_execsummary_functional_plots_wf( t1w_available, t2w_available, mem_gb, - name="execsummary_functional_plots_wf", + name='execsummary_functional_plots_wf', ): """Generate the functional figures for an executive summary. @@ -502,17 +502,17 @@ def init_execsummary_functional_plots_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "preproc_nifti", - "boldref", # a nifti boldref - "t1w", - "t2w", # optional + 'preproc_nifti', + 'boldref', # a nifti boldref + 't1w', + 't2w', # optional ], ), - name="inputnode", + name='inputnode', ) if not preproc_nifti: raise ValueError( - "No preprocessed NIfTI found. Executive summary figures cannot be generated." + 'No preprocessed NIfTI found. Executive summary figures cannot be generated.' ) inputnode.inputs.preproc_nifti = preproc_nifti @@ -520,126 +520,126 @@ def init_execsummary_functional_plots_wf( # Get bb_registration_file prefix from fmriprep # TODO: Replace with interfaces. current_bold_file = os.path.basename(preproc_nifti) - if "_space" in current_bold_file: - bb_register_prefix = current_bold_file.split("_space")[0] + if '_space' in current_bold_file: + bb_register_prefix = current_bold_file.split('_space')[0] else: - bb_register_prefix = current_bold_file.split("_desc")[0] + bb_register_prefix = current_bold_file.split('_desc')[0] # TODO: Switch to interface bold_t1w_registration_files = layout.get( - desc=["bbregister", "coreg", "bbr", "flirtbbr", "flirtnobbr"], - extension=".svg", - suffix="bold", - return_type="file", + desc=['bbregister', 'coreg', 'bbr', 'flirtbbr', 'flirtnobbr'], + extension='.svg', + suffix='bold', + return_type='file', ) bold_t1w_registration_files = fnmatch.filter( bold_t1w_registration_files, - f"*/{bb_register_prefix}*", + f'*/{bb_register_prefix}*', ) if not bold_t1w_registration_files: - LOGGER.warning("No coregistration figure found in preprocessing derivatives.") + LOGGER.warning('No coregistration figure found in preprocessing derivatives.') else: bold_t1w_registration_file = bold_t1w_registration_files[0] ds_report_registration = pe.Node( DerivativesDataSink( in_file=bold_t1w_registration_file, - dismiss_entities=["den"], - desc="bbregister", + dismiss_entities=['den'], + desc='bbregister', ), - name="ds_report_registration", + name='ds_report_registration', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(inputnode, ds_report_registration, [("preproc_nifti", "source_file")])]) + workflow.connect([(inputnode, ds_report_registration, [('preproc_nifti', 'source_file')])]) # Calculate the mean bold image calculate_mean_bold = pe.Node( - BinaryMath(expression="np.mean(img, axis=3)"), - name="calculate_mean_bold", - mem_gb=mem_gb["timeseries"], + BinaryMath(expression='np.mean(img, axis=3)'), + name='calculate_mean_bold', + mem_gb=mem_gb['timeseries'], ) - workflow.connect([(inputnode, calculate_mean_bold, [("preproc_nifti", "in_file")])]) + workflow.connect([(inputnode, calculate_mean_bold, [('preproc_nifti', 'in_file')])]) # Plot the mean bold image - plot_meanbold = pe.Node(AnatomicalPlot(), name="plot_meanbold") - workflow.connect([(calculate_mean_bold, plot_meanbold, [("out_file", "in_file")])]) + plot_meanbold = pe.Node(AnatomicalPlot(), name='plot_meanbold') + workflow.connect([(calculate_mean_bold, plot_meanbold, [('out_file', 'in_file')])]) # Write out the figures. ds_report_meanbold = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], - desc="mean", + dismiss_entities=['den'], + desc='mean', ), - name="ds_report_meanbold", + name='ds_report_meanbold', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, ds_report_meanbold, [("preproc_nifti", "source_file")]), - (plot_meanbold, ds_report_meanbold, [("out_file", "in_file")]), + (inputnode, ds_report_meanbold, [('preproc_nifti', 'source_file')]), + (plot_meanbold, ds_report_meanbold, [('out_file', 'in_file')]), ]) # fmt:skip # Plot the reference bold image - plot_boldref = pe.Node(AnatomicalPlot(), name="plot_boldref") - workflow.connect([(inputnode, plot_boldref, [("boldref", "in_file")])]) + plot_boldref = pe.Node(AnatomicalPlot(), name='plot_boldref') + workflow.connect([(inputnode, plot_boldref, [('boldref', 'in_file')])]) # Write out the figures. ds_report_boldref = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], - desc="boldref", + dismiss_entities=['den'], + desc='boldref', ), - name="ds_report_boldref", + name='ds_report_boldref', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, ds_report_boldref, [("preproc_nifti", "source_file")]), - (plot_boldref, ds_report_boldref, [("out_file", "in_file")]), + (inputnode, ds_report_boldref, [('preproc_nifti', 'source_file')]), + (plot_boldref, ds_report_boldref, [('out_file', 'in_file')]), ]) # fmt:skip # Start plotting the overlay figures # T1 in Task, Task in T1, Task in T2, T2 in Task - anatomicals = (["t1w"] if t1w_available else []) + (["t2w"] if t2w_available else []) + anatomicals = (['t1w'] if t1w_available else []) + (['t2w'] if t2w_available else []) for anat in anatomicals: # Resample BOLD to match resolution of T1w/T2w data resample_bold_to_anat = pe.Node( ResampleToImage(), - name=f"resample_bold_to_{anat}", - mem_gb=mem_gb["resampled"], + name=f'resample_bold_to_{anat}', + mem_gb=mem_gb['resampled'], ) workflow.connect([ - (inputnode, resample_bold_to_anat, [(anat, "target_file")]), - (calculate_mean_bold, resample_bold_to_anat, [("out_file", "in_file")]), + (inputnode, resample_bold_to_anat, [(anat, 'target_file')]), + (calculate_mean_bold, resample_bold_to_anat, [('out_file', 'in_file')]), ]) # fmt:skip plot_anat_on_task_wf = init_plot_overlay_wf( - desc=f"{anat[0].upper()}{anat[1:]}OnTask", - name=f"plot_{anat}_on_task_wf", + desc=f'{anat[0].upper()}{anat[1:]}OnTask', + name=f'plot_{anat}_on_task_wf', ) workflow.connect([ (inputnode, plot_anat_on_task_wf, [ - ("preproc_nifti", "inputnode.name_source"), - (anat, "inputnode.overlay_file"), + ('preproc_nifti', 'inputnode.name_source'), + (anat, 'inputnode.overlay_file'), ]), (resample_bold_to_anat, plot_anat_on_task_wf, [ - ("out_file", "inputnode.underlay_file"), + ('out_file', 'inputnode.underlay_file'), ]), ]) # fmt:skip plot_task_on_anat_wf = init_plot_overlay_wf( - desc=f"TaskOn{anat[0].upper()}{anat[1:]}", - name=f"plot_task_on_{anat}_wf", + desc=f'TaskOn{anat[0].upper()}{anat[1:]}', + name=f'plot_task_on_{anat}_wf', ) workflow.connect([ (inputnode, plot_task_on_anat_wf, [ - ("preproc_nifti", "inputnode.name_source"), - (anat, "inputnode.underlay_file"), + ('preproc_nifti', 'inputnode.name_source'), + (anat, 'inputnode.underlay_file'), ]), (resample_bold_to_anat, plot_task_on_anat_wf, [ - ("out_file", "inputnode.overlay_file"), + ('out_file', 'inputnode.overlay_file'), ]), ]) # fmt:skip diff --git a/xcp_d/workflows/bold/postprocessing.py b/xcp_d/workflows/bold/postprocessing.py index 7785850a8..874baabdc 100644 --- a/xcp_d/workflows/bold/postprocessing.py +++ b/xcp_d/workflows/bold/postprocessing.py @@ -37,7 +37,7 @@ def init_prepare_confounds_wf( TR, exact_scans, head_radius, - name="prepare_confounds_wf", + name='prepare_confounds_wf', ): """Prepare confounds. @@ -106,20 +106,20 @@ def init_prepare_confounds_wf( motion_filter_order = config.workflow.motion_filter_order fd_thresh = config.workflow.fd_thresh - dummy_scans_str = "" - if dummy_scans == "auto": + dummy_scans_str = '' + if dummy_scans == 'auto': dummy_scans_str = ( - "Non-steady-state volumes were extracted from the preprocessed confounds " - "and were discarded from both the BOLD data and nuisance regressors. " + 'Non-steady-state volumes were extracted from the preprocessed confounds ' + 'and were discarded from both the BOLD data and nuisance regressors. ' ) elif dummy_scans == 1: dummy_scans_str = ( - "The first volume of both the BOLD data and nuisance " + 'The first volume of both the BOLD data and nuisance ' "regressors was discarded as a non-steady-state volume, or 'dummy scan'. " ) elif dummy_scans > 1: dummy_scans_str = ( - f"The first {num2words(dummy_scans)} volumes of both the BOLD data and nuisance " + f'The first {num2words(dummy_scans)} volumes of both the BOLD data and nuisance ' "regressors were discarded as non-steady-state volumes, or 'dummy scans'. " ) @@ -139,7 +139,7 @@ def init_prepare_confounds_wf( exact_scans=exact_scans, ) else: - censoring_description = "" + censoring_description = '' if config.execution.confounds_config is None: confounds_config = None @@ -157,39 +157,39 @@ def init_prepare_confounds_wf( ) workflow.__desc__ = ( - f" {dummy_scans_str}{motion_description} {censoring_description} {confounds_description}" + f' {dummy_scans_str}{motion_description} {censoring_description} {confounds_description}' ) inputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "preprocessed_bold", - "dummy_scans", - "motion_file", - "motion_json", - "confounds_files", + 'name_source', + 'preprocessed_bold', + 'dummy_scans', + 'motion_file', + 'motion_json', + 'confounds_files', ], ), - name="inputnode", + name='inputnode', ) inputnode.inputs.dummy_scans = dummy_scans outputnode = pe.Node( niu.IdentityInterface( fields=[ - "preprocessed_bold", - "dummy_scans", - "motion_file", - "motion_metadata", - "temporal_mask", - "temporal_mask_metadata", - "confounds_tsv", - "confounds_images", - "confounds_metadata", + 'preprocessed_bold', + 'dummy_scans', + 'motion_file', + 'motion_metadata', + 'temporal_mask', + 'temporal_mask_metadata', + 'confounds_tsv', + 'confounds_images', + 'confounds_metadata', ], ), - name="outputnode", + name='outputnode', ) # Filter motion parameters and calculate FD for censoring @@ -203,17 +203,17 @@ def init_prepare_confounds_wf( fd_thresh=fd_thresh, head_radius=head_radius, ), - name="process_motion", + name='process_motion', mem_gb=1, n_procs=1, ) workflow.connect([ (inputnode, process_motion, [ - ("motion_file", "motion_file"), - ("motion_json", "motion_json"), + ('motion_file', 'motion_file'), + ('motion_json', 'motion_json'), ]), - (process_motion, outputnode, [("motion_metadata", "motion_metadata")]), + (process_motion, outputnode, [('motion_metadata', 'motion_metadata')]), ]) # fmt:skip if config.execution.confounds_config is not None: @@ -230,153 +230,153 @@ def init_prepare_confounds_wf( motion_filter_type=motion_filter_type, motion_filter_order=motion_filter_order, ), - name="generate_confounds", + name='generate_confounds', mem_gb=2, ) # Load and filter confounds workflow.connect([ (inputnode, generate_confounds, [ - ("name_source", "in_file"), - ("confounds_files", "confounds_files"), + ('name_source', 'in_file'), + ('confounds_files', 'confounds_files'), ]), - (generate_confounds, outputnode, [("confounds_metadata", "confounds_metadata")]), + (generate_confounds, outputnode, [('confounds_metadata', 'confounds_metadata')]), ]) # fmt:skip # A buffer node to hold either the original files or the files with the first N vols removed. dummy_scan_buffer = pe.Node( niu.IdentityInterface( fields=[ - "preprocessed_bold", - "dummy_scans", - "confounds_tsv", - "confounds_images", - "motion_file", - "temporal_mask", + 'preprocessed_bold', + 'dummy_scans', + 'confounds_tsv', + 'confounds_images', + 'motion_file', + 'temporal_mask', ] ), - name="dummy_scan_buffer", + name='dummy_scan_buffer', ) if dummy_scans: remove_dummy_scans = pe.Node( RemoveDummyVolumes(), - name="remove_dummy_scans", + name='remove_dummy_scans', mem_gb=4, ) workflow.connect([ (inputnode, remove_dummy_scans, [ - ("preprocessed_bold", "bold_file"), - ("dummy_scans", "dummy_scans"), + ('preprocessed_bold', 'bold_file'), + ('dummy_scans', 'dummy_scans'), ]), (process_motion, remove_dummy_scans, [ - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), + ('motion_file', 'motion_file'), + ('temporal_mask', 'temporal_mask'), ]), (remove_dummy_scans, dummy_scan_buffer, [ - ("bold_file_dropped_TR", "preprocessed_bold"), - ("confounds_tsv_dropped_TR", "confounds_tsv"), - ("confounds_images_dropped_TR", "confounds_images"), - ("motion_file_dropped_TR", "motion_file"), - ("temporal_mask_dropped_TR", "temporal_mask"), - ("dummy_scans", "dummy_scans"), + ('bold_file_dropped_TR', 'preprocessed_bold'), + ('confounds_tsv_dropped_TR', 'confounds_tsv'), + ('confounds_images_dropped_TR', 'confounds_images'), + ('motion_file_dropped_TR', 'motion_file'), + ('temporal_mask_dropped_TR', 'temporal_mask'), + ('dummy_scans', 'dummy_scans'), ]), ]) # fmt:skip if config.execution.confounds_config is not None: workflow.connect([ (generate_confounds, remove_dummy_scans, [ - ("confounds_tsv", "confounds_tsv"), - ("confounds_images", "confounds_images"), + ('confounds_tsv', 'confounds_tsv'), + ('confounds_images', 'confounds_images'), ]), ]) # fmt:skip else: workflow.connect([ (inputnode, dummy_scan_buffer, [ - ("dummy_scans", "dummy_scans"), - ("preprocessed_bold", "preprocessed_bold"), + ('dummy_scans', 'dummy_scans'), + ('preprocessed_bold', 'preprocessed_bold'), ]), (process_motion, dummy_scan_buffer, [ - ("motion_file", "motion_file"), - ("temporal_mask", "temporal_mask"), + ('motion_file', 'motion_file'), + ('temporal_mask', 'temporal_mask'), ]), ]) # fmt:skip if config.execution.confounds_config is not None: workflow.connect([ (generate_confounds, dummy_scan_buffer, [ - ("confounds_tsv", "confounds_tsv"), - ("confounds_images", "confounds_images"), + ('confounds_tsv', 'confounds_tsv'), + ('confounds_images', 'confounds_images'), ]), ]) # fmt:skip workflow.connect([ (dummy_scan_buffer, outputnode, [ - ("preprocessed_bold", "preprocessed_bold"), - ("dummy_scans", "dummy_scans"), - ("motion_file", "motion_file"), + ('preprocessed_bold', 'preprocessed_bold'), + ('dummy_scans', 'dummy_scans'), + ('motion_file', 'motion_file'), ]), ]) # fmt:skip if config.execution.confounds_config is not None: workflow.connect([ (dummy_scan_buffer, outputnode, [ - ("confounds_tsv", "confounds_tsv"), - ("confounds_images", "confounds_images"), + ('confounds_tsv', 'confounds_tsv'), + ('confounds_images', 'confounds_images'), ]), ]) # fmt:skip if config.workflow.dcan_correlation_lengths: random_censor = pe.Node( RandomCensor(exact_scans=exact_scans, random_seed=config.seeds.master), - name="random_censor", + name='random_censor', ) workflow.connect([ (process_motion, random_censor, [ - ("temporal_mask_metadata", "temporal_mask_metadata"), + ('temporal_mask_metadata', 'temporal_mask_metadata'), ]), - (dummy_scan_buffer, random_censor, [("temporal_mask", "temporal_mask")]), + (dummy_scan_buffer, random_censor, [('temporal_mask', 'temporal_mask')]), (random_censor, outputnode, [ - ("temporal_mask", "temporal_mask"), - ("temporal_mask_metadata", "temporal_mask_metadata"), + ('temporal_mask', 'temporal_mask'), + ('temporal_mask_metadata', 'temporal_mask_metadata'), ]), ]) # fmt:skip else: workflow.connect([ - (process_motion, outputnode, [("temporal_mask_metadata", "temporal_mask_metadata")]), - (dummy_scan_buffer, outputnode, [("temporal_mask", "temporal_mask")]), + (process_motion, outputnode, [('temporal_mask_metadata', 'temporal_mask_metadata')]), + (dummy_scan_buffer, outputnode, [('temporal_mask', 'temporal_mask')]), ]) # fmt:skip if config.execution.confounds_config is not None: plot_design_matrix = pe.Node( niu.Function( - input_names=["design_matrix", "temporal_mask"], - output_names=["design_matrix_figure"], + input_names=['design_matrix', 'temporal_mask'], + output_names=['design_matrix_figure'], function=_plot_design_matrix, ), - name="plot_design_matrix", + name='plot_design_matrix', ) workflow.connect([ - (dummy_scan_buffer, plot_design_matrix, [("confounds_tsv", "design_matrix")]), - (outputnode, plot_design_matrix, [("temporal_mask", "temporal_mask")]), + (dummy_scan_buffer, plot_design_matrix, [('confounds_tsv', 'design_matrix')]), + (outputnode, plot_design_matrix, [('temporal_mask', 'temporal_mask')]), ]) # fmt:skip ds_report_design_matrix = pe.Node( DerivativesDataSink( - dismiss_entities=["space", "res", "den", "desc"], - suffix="design", - extension=".svg", + dismiss_entities=['space', 'res', 'den', 'desc'], + suffix='design', + extension='.svg', ), - name="ds_report_design_matrix", + name='ds_report_design_matrix', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_design_matrix, [("name_source", "source_file")]), - (plot_design_matrix, ds_report_design_matrix, [("design_matrix_figure", "in_file")]), + (inputnode, ds_report_design_matrix, [('name_source', 'source_file')]), + (plot_design_matrix, ds_report_design_matrix, [('design_matrix_figure', 'in_file')]), ]) # fmt:skip censor_report = pe.Node( @@ -386,37 +386,37 @@ def init_prepare_confounds_wf( fd_thresh=fd_thresh, head_radius=head_radius, ), - name="censor_report", + name='censor_report', mem_gb=2, ) workflow.connect([ # use the full version of the confounds, for dummy scans in the figure - (process_motion, censor_report, [("motion_file", "motion_file")]), - (dummy_scan_buffer, censor_report, [("dummy_scans", "dummy_scans")]), - (outputnode, censor_report, [("temporal_mask", "temporal_mask")]), + (process_motion, censor_report, [('motion_file', 'motion_file')]), + (dummy_scan_buffer, censor_report, [('dummy_scans', 'dummy_scans')]), + (outputnode, censor_report, [('temporal_mask', 'temporal_mask')]), ]) # fmt:skip ds_report_censoring = pe.Node( DerivativesDataSink( - desc="censoring", - suffix="motion", - extension=".svg", + desc='censoring', + suffix='motion', + extension='.svg', ), - name="ds_report_censoring", + name='ds_report_censoring', run_without_submitting=False, ) workflow.connect([ - (inputnode, ds_report_censoring, [("name_source", "source_file")]), - (censor_report, ds_report_censoring, [("out_file", "in_file")]), + (inputnode, ds_report_censoring, [('name_source', 'source_file')]), + (censor_report, ds_report_censoring, [('out_file', 'in_file')]), ]) # fmt:skip return workflow @fill_doc -def init_despike_wf(TR, name="despike_wf"): +def init_despike_wf(TR, name='despike_wf'): """Despike BOLD data with AFNI's 3dDespike. Despiking truncates large spikes in the BOLD times series. @@ -461,17 +461,17 @@ def init_despike_wf(TR, name="despike_wf"): file_format = config.workflow.file_format omp_nthreads = config.nipype.omp_nthreads - inputnode = pe.Node(niu.IdentityInterface(fields=["bold_file"]), name="inputnode") - outputnode = pe.Node(niu.IdentityInterface(fields=["bold_file"]), name="outputnode") + inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='inputnode') + outputnode = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='outputnode') despike3d = pe.Node( - DespikePatch(outputtype="NIFTI_GZ", args="-nomask -NEW"), - name="despike3d", + DespikePatch(outputtype='NIFTI_GZ', args='-nomask -NEW'), + name='despike3d', mem_gb=4, n_procs=omp_nthreads, ) - if file_format == "cifti": + if file_format == 'cifti': workflow.__desc__ = """ The BOLD data were converted to NIfTI format, despiked with *AFNI*'s *3dDespike*, and converted back to CIFTI format. @@ -479,27 +479,27 @@ def init_despike_wf(TR, name="despike_wf"): # first, convert the cifti to a nifti convert_to_nifti = pe.Node( - CiftiConvert(target="to", num_threads=config.nipype.omp_nthreads), - name="convert_to_nifti", + CiftiConvert(target='to', num_threads=config.nipype.omp_nthreads), + name='convert_to_nifti', mem_gb=4, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ - (inputnode, convert_to_nifti, [("bold_file", "in_file")]), - (convert_to_nifti, despike3d, [("out_file", "in_file")]), + (inputnode, convert_to_nifti, [('bold_file', 'in_file')]), + (convert_to_nifti, despike3d, [('out_file', 'in_file')]), ]) # fmt:skip # finally, convert the despiked nifti back to cifti convert_to_cifti = pe.Node( - CiftiConvert(target="from", TR=TR, num_threads=config.nipype.omp_nthreads), - name="convert_to_cifti", + CiftiConvert(target='from', TR=TR, num_threads=config.nipype.omp_nthreads), + name='convert_to_cifti', mem_gb=4, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ - (inputnode, convert_to_cifti, [("bold_file", "cifti_template")]), - (despike3d, convert_to_cifti, [("out_file", "in_file")]), - (convert_to_cifti, outputnode, [("out_file", "bold_file")]), + (inputnode, convert_to_cifti, [('bold_file', 'cifti_template')]), + (despike3d, convert_to_cifti, [('out_file', 'in_file')]), + (convert_to_cifti, outputnode, [('out_file', 'bold_file')]), ]) # fmt:skip else: @@ -507,15 +507,15 @@ def init_despike_wf(TR, name="despike_wf"): The BOLD data were despiked with *AFNI*'s *3dDespike*. """ workflow.connect([ - (inputnode, despike3d, [("bold_file", "in_file")]), - (despike3d, outputnode, [("out_file", "bold_file")]), + (inputnode, despike3d, [('bold_file', 'in_file')]), + (despike3d, outputnode, [('out_file', 'bold_file')]), ]) # fmt:skip return workflow @fill_doc -def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): +def init_denoise_bold_wf(TR, mem_gb, name='denoise_bold_wf'): """Denoise BOLD data. Workflow Graph @@ -573,72 +573,72 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): """ if fd_thresh > 0: workflow.__desc__ += ( - "Any volumes censored earlier in the workflow were first cubic spline interpolated in " - "the BOLD data. " - "Outlier volumes at the beginning or end of the time series were replaced with the " + 'Any volumes censored earlier in the workflow were first cubic spline interpolated in ' + 'the BOLD data. ' + 'Outlier volumes at the beginning or end of the time series were replaced with the ' "closest low-motion volume's values, " - "as cubic spline interpolation can produce extreme extrapolations. " + 'as cubic spline interpolation can produce extreme extrapolations. ' ) if bandpass_filter: if low_pass > 0 and high_pass > 0: - btype = "band-pass" - preposition = "between" - filt_input = f"{high_pass}-{low_pass}" + btype = 'band-pass' + preposition = 'between' + filt_input = f'{high_pass}-{low_pass}' elif high_pass > 0: - btype = "high-pass" - preposition = "above" - filt_input = f"{high_pass}" + btype = 'high-pass' + preposition = 'above' + filt_input = f'{high_pass}' elif low_pass > 0: - btype = "low-pass" - preposition = "below" - filt_input = f"{low_pass}" + btype = 'low-pass' + preposition = 'below' + filt_input = f'{low_pass}' workflow.__desc__ += ( - f"The timeseries were {btype} filtered using a(n) " - f"{num2words(bpf_order, ordinal=True)}-order Butterworth filter, " - f"in order to retain signals {preposition} {filt_input} Hz. " - "The same filter was applied to the confounds." + f'The timeseries were {btype} filtered using a(n) ' + f'{num2words(bpf_order, ordinal=True)}-order Butterworth filter, ' + f'in order to retain signals {preposition} {filt_input} Hz. ' + 'The same filter was applied to the confounds.' ) if fd_thresh > 0: workflow.__desc__ += ( - " The resulting time series were then denoised via linear regression, " - "in which the low-motion volumes from the BOLD time series and confounds were used to " - "calculate parameter estimates, and then the interpolated time series were denoised " - "using the low-motion parameter estimates. " - "The interpolated time series were then censored using the temporal mask." + ' The resulting time series were then denoised via linear regression, ' + 'in which the low-motion volumes from the BOLD time series and confounds were used to ' + 'calculate parameter estimates, and then the interpolated time series were denoised ' + 'using the low-motion parameter estimates. ' + 'The interpolated time series were then censored using the temporal mask.' ) else: workflow.__desc__ += ( - " The resulting time series were then denoised using linear regression. " + ' The resulting time series were then denoised using linear regression. ' ) inputnode = pe.Node( niu.IdentityInterface( fields=[ - "preprocessed_bold", - "temporal_mask", - "confounds_tsv", - "confounds_images", - "mask", # only used for NIFTIs + 'preprocessed_bold', + 'temporal_mask', + 'confounds_tsv', + 'confounds_images', + 'mask', # only used for NIFTIs ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "denoised_interpolated_bold", - "censored_denoised_bold", - "smoothed_denoised_bold", - "denoised_bold", + 'denoised_interpolated_bold', + 'censored_denoised_bold', + 'smoothed_denoised_bold', + 'denoised_bold', ], ), - name="outputnode", + name='outputnode', ) - denoising_interface = DenoiseCifti if (file_format == "cifti") else DenoiseNifti + denoising_interface = DenoiseCifti if (file_format == 'cifti') else DenoiseNifti regress_and_filter_bold = pe.Node( denoising_interface( TR=TR, @@ -647,62 +647,62 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): filter_order=bpf_order, bandpass_filter=bandpass_filter, ), - name="regress_and_filter_bold", - mem_gb=mem_gb["timeseries"], + name='regress_and_filter_bold', + mem_gb=mem_gb['timeseries'], ) workflow.connect([ (inputnode, regress_and_filter_bold, [ - ("preprocessed_bold", "preprocessed_bold"), - ("confounds_tsv", "confounds_tsv"), - ("confounds_images", "confounds_images"), - ("temporal_mask", "temporal_mask"), + ('preprocessed_bold', 'preprocessed_bold'), + ('confounds_tsv', 'confounds_tsv'), + ('confounds_images', 'confounds_images'), + ('temporal_mask', 'temporal_mask'), ]), (regress_and_filter_bold, outputnode, [ - ("denoised_interpolated_bold", "denoised_interpolated_bold"), + ('denoised_interpolated_bold', 'denoised_interpolated_bold'), ]), ]) # fmt:skip - if file_format == "nifti": - workflow.connect([(inputnode, regress_and_filter_bold, [("mask", "mask")])]) + if file_format == 'nifti': + workflow.connect([(inputnode, regress_and_filter_bold, [('mask', 'mask')])]) censor_interpolated_data = pe.Node( - Censor(column="framewise_displacement"), - name="censor_interpolated_data", - mem_gb=mem_gb["resampled"], + Censor(column='framewise_displacement'), + name='censor_interpolated_data', + mem_gb=mem_gb['resampled'], ) workflow.connect([ - (inputnode, censor_interpolated_data, [("temporal_mask", "temporal_mask")]), + (inputnode, censor_interpolated_data, [('temporal_mask', 'temporal_mask')]), (regress_and_filter_bold, censor_interpolated_data, [ - ("denoised_interpolated_bold", "in_file"), + ('denoised_interpolated_bold', 'in_file'), ]), - (censor_interpolated_data, outputnode, [("out_file", "censored_denoised_bold")]), + (censor_interpolated_data, outputnode, [('out_file', 'censored_denoised_bold')]), ]) # fmt:skip denoised_bold_buffer = pe.Node( - niu.IdentityInterface(fields=["denoised_bold"]), - name="denoised_bold_buffer", + niu.IdentityInterface(fields=['denoised_bold']), + name='denoised_bold_buffer', ) if config.workflow.output_interpolated: workflow.connect([ (regress_and_filter_bold, denoised_bold_buffer, [ - ("denoised_interpolated_bold", "denoised_bold"), + ('denoised_interpolated_bold', 'denoised_bold'), ]), ]) # fmt:skip else: workflow.connect([ - (censor_interpolated_data, denoised_bold_buffer, [("out_file", "denoised_bold")]), + (censor_interpolated_data, denoised_bold_buffer, [('out_file', 'denoised_bold')]), ]) # fmt:skip - workflow.connect([(denoised_bold_buffer, outputnode, [("denoised_bold", "denoised_bold")])]) + workflow.connect([(denoised_bold_buffer, outputnode, [('denoised_bold', 'denoised_bold')])]) if smoothing: resd_smoothing_wf = init_resd_smoothing_wf(mem_gb=mem_gb) workflow.connect([ - (denoised_bold_buffer, resd_smoothing_wf, [("denoised_bold", "inputnode.bold_file")]), + (denoised_bold_buffer, resd_smoothing_wf, [('denoised_bold', 'inputnode.bold_file')]), (resd_smoothing_wf, outputnode, [ - ("outputnode.smoothed_bold", "smoothed_denoised_bold"), + ('outputnode.smoothed_bold', 'smoothed_denoised_bold'), ]), ]) # fmt:skip @@ -710,7 +710,7 @@ def init_denoise_bold_wf(TR, mem_gb, name="denoise_bold_wf"): @fill_doc -def init_resd_smoothing_wf(mem_gb, name="resd_smoothing_wf"): +def init_resd_smoothing_wf(mem_gb, name='resd_smoothing_wf'): """Smooth BOLD residuals. Workflow Graph @@ -744,12 +744,12 @@ def init_resd_smoothing_wf(mem_gb, name="resd_smoothing_wf"): smoothing = config.workflow.smoothing file_format = config.workflow.file_format - inputnode = pe.Node(niu.IdentityInterface(fields=["bold_file"]), name="inputnode") - outputnode = pe.Node(niu.IdentityInterface(fields=["smoothed_bold"]), name="outputnode") + inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='inputnode') + outputnode = pe.Node(niu.IdentityInterface(fields=['smoothed_bold']), name='outputnode') # Turn specified FWHM (Full-Width at Half Maximum) to standard deviation. sigma_lx = fwhm2sigma(smoothing) - if file_format == "cifti": + if file_format == 'cifti': workflow.__desc__ = f""" \ The denoised BOLD was then smoothed using *Connectome Workbench* with a Gaussian kernel (FWHM={str(smoothing)} mm). @@ -760,44 +760,44 @@ def init_resd_smoothing_wf(mem_gb, name="resd_smoothing_wf"): CiftiSmooth( sigma_surf=sigma_lx, # the size of the surface kernel sigma_vol=sigma_lx, # the volume of the surface kernel - direction="COLUMN", # which direction to smooth along@ + direction='COLUMN', # which direction to smooth along@ # pull out atlases for each hemisphere right_surf=str( get_template( - template="fsLR", + template='fsLR', space=None, - hemi="R", - density="32k", + hemi='R', + density='32k', desc=None, - suffix="sphere", + suffix='sphere', ) ), left_surf=str( get_template( - template="fsLR", + template='fsLR', space=None, - hemi="L", - density="32k", + hemi='L', + density='32k', desc=None, - suffix="sphere", + suffix='sphere', ) ), num_threads=config.nipype.omp_nthreads, ), - name="cifti_smoothing", - mem_gb=mem_gb["timeseries"], + name='cifti_smoothing', + mem_gb=mem_gb['timeseries'], n_procs=config.nipype.omp_nthreads, ) # Always check the intent code in CiftiSmooth's output file fix_cifti_intent = pe.Node( FixCiftiIntent(), - name="fix_cifti_intent", + name='fix_cifti_intent', mem_gb=1, ) workflow.connect([ - (smooth_data, fix_cifti_intent, [("out_file", "in_file")]), - (fix_cifti_intent, outputnode, [("out_file", "smoothed_bold")]), + (smooth_data, fix_cifti_intent, [('out_file', 'in_file')]), + (fix_cifti_intent, outputnode, [('out_file', 'smoothed_bold')]), ]) # fmt:skip else: @@ -807,11 +807,11 @@ def init_resd_smoothing_wf(mem_gb, name="resd_smoothing_wf"): # Use nilearn to smooth the image smooth_data = pe.Node( Smooth(fwhm=smoothing), # FWHM = kernel size - name="nifti_smoothing", - mem_gb=mem_gb["timeseries"], + name='nifti_smoothing', + mem_gb=mem_gb['timeseries'], ) - workflow.connect([(smooth_data, outputnode, [("out_file", "smoothed_bold")])]) + workflow.connect([(smooth_data, outputnode, [('out_file', 'smoothed_bold')])]) - workflow.connect([(inputnode, smooth_data, [("bold_file", "in_file")])]) + workflow.connect([(inputnode, smooth_data, [('bold_file', 'in_file')])]) return workflow diff --git a/xcp_d/workflows/parcellation.py b/xcp_d/workflows/parcellation.py index 6eedce96e..7e5190d74 100644 --- a/xcp_d/workflows/parcellation.py +++ b/xcp_d/workflows/parcellation.py @@ -14,11 +14,11 @@ from xcp_d.utils.doc import fill_doc from xcp_d.utils.utils import get_std2bold_xfms -LOGGER = logging.getLogger("nipype.workflow") +LOGGER = logging.getLogger('nipype.workflow') @fill_doc -def init_load_atlases_wf(name="load_atlases_wf"): +def init_load_atlases_wf(name='load_atlases_wf'): """Load atlases and warp them to the same space as the BOLD file. Workflow Graph @@ -66,12 +66,12 @@ def init_load_atlases_wf(name="load_atlases_wf"): atlas_names, atlas_files, atlas_labels_files, atlas_metadata = [], [], [], [] atlas_datasets = [] for atlas, atlas_dict in atlases.items(): - config.loggers.workflow.info(f"Loading atlas: {atlas}") + config.loggers.workflow.info(f'Loading atlas: {atlas}') atlas_names.append(atlas) - atlas_datasets.append(atlas_dict["dataset"]) - atlas_files.append(atlas_dict["image"]) - atlas_labels_files.append(atlas_dict["labels"]) - atlas_metadata.append(atlas_dict["metadata"]) + atlas_datasets.append(atlas_dict['dataset']) + atlas_files.append(atlas_dict['image']) + atlas_labels_files.append(atlas_dict['labels']) + atlas_metadata.append(atlas_dict['metadata']) # Write a description atlas_str = describe_atlases(atlas_names) @@ -84,16 +84,16 @@ def init_load_atlases_wf(name="load_atlases_wf"): inputnode = pe.Node( niu.IdentityInterface( fields=[ - "name_source", - "bold_file", - "atlas_names", - "atlas_datasets", - "atlas_files", - "atlas_labels_files", - "atlas_metadata", + 'name_source', + 'bold_file', + 'atlas_names', + 'atlas_datasets', + 'atlas_files', + 'atlas_labels_files', + 'atlas_metadata', ], ), - name="inputnode", + name='inputnode', ) inputnode.inputs.atlas_names = atlas_names inputnode.inputs.atlas_datasets = atlas_datasets @@ -104,73 +104,73 @@ def init_load_atlases_wf(name="load_atlases_wf"): outputnode = pe.Node( niu.IdentityInterface( fields=[ - "atlas_names", - "atlas_files", - "atlas_labels_files", + 'atlas_names', + 'atlas_files', + 'atlas_labels_files', ], ), - name="outputnode", + name='outputnode', ) - workflow.connect([(inputnode, outputnode, [("atlas_names", "atlas_names")])]) + workflow.connect([(inputnode, outputnode, [('atlas_names', 'atlas_names')])]) atlas_buffer = pe.Node( niu.IdentityInterface( - fields=["atlas_file"], + fields=['atlas_file'], ), - name="atlas_buffer", + name='atlas_buffer', ) - if config.workflow.file_format == "nifti": + if config.workflow.file_format == 'nifti': workflow.__desc__ += ( - " Each atlas was warped to the same space and resolution as the BOLD file." + ' Each atlas was warped to the same space and resolution as the BOLD file.' ) get_xfms_to_bold_space = pe.MapNode( Function( - input_names=["bold_file", "source_file", "source_space"], - output_names=["transforms"], + input_names=['bold_file', 'source_file', 'source_space'], + output_names=['transforms'], function=get_std2bold_xfms, ), - name="get_xfms_to_bold_space", - iterfield=["source_file"], + name='get_xfms_to_bold_space', + iterfield=['source_file'], ) workflow.connect([ (inputnode, get_xfms_to_bold_space, [ - ("bold_file", "bold_file"), - ("atlas_files", "source_file"), + ('bold_file', 'bold_file'), + ('atlas_files', 'source_file'), ]), ]) # fmt:skip # ApplyTransforms needs a 3D image for the reference image. grab_first_volume = pe.Node( IndexImage(index=0), - name="grab_first_volume", + name='grab_first_volume', ) - workflow.connect([(inputnode, grab_first_volume, [("bold_file", "in_file")])]) + workflow.connect([(inputnode, grab_first_volume, [('bold_file', 'in_file')])]) # Using the generated transforms, apply them to get everything in the correct MNI form warp_atlases_to_bold_space = pe.MapNode( ApplyTransforms( - interpolation="GenericLabel", + interpolation='GenericLabel', input_image_type=3, dimension=3, num_threads=config.nipype.omp_nthreads, ), - name="warp_atlases_to_bold_space", - iterfield=["input_image", "transforms"], + name='warp_atlases_to_bold_space', + iterfield=['input_image', 'transforms'], mem_gb=2, n_procs=config.nipype.omp_nthreads, ) workflow.connect([ - (inputnode, warp_atlases_to_bold_space, [("atlas_files", "input_image")]), - (grab_first_volume, warp_atlases_to_bold_space, [("out_file", "reference_image")]), - (get_xfms_to_bold_space, warp_atlases_to_bold_space, [("transforms", "transforms")]), - (warp_atlases_to_bold_space, atlas_buffer, [("output_image", "atlas_file")]), + (inputnode, warp_atlases_to_bold_space, [('atlas_files', 'input_image')]), + (grab_first_volume, warp_atlases_to_bold_space, [('out_file', 'reference_image')]), + (get_xfms_to_bold_space, warp_atlases_to_bold_space, [('transforms', 'transforms')]), + (warp_atlases_to_bold_space, atlas_buffer, [('output_image', 'atlas_file')]), ]) # fmt:skip else: - workflow.connect([(inputnode, atlas_buffer, [("atlas_files", "atlas_file")])]) + workflow.connect([(inputnode, atlas_buffer, [('atlas_files', 'atlas_file')])]) atlas_srcs = pe.MapNode( BIDSURI( @@ -178,42 +178,42 @@ def init_load_atlases_wf(name="load_atlases_wf"): dataset_links=config.execution.dataset_links, out_dir=str(output_dir), ), - name="atlas_srcs", - iterfield=["in1"], + name='atlas_srcs', + iterfield=['in1'], run_without_submitting=True, ) - workflow.connect([(inputnode, atlas_srcs, [("atlas_files", "in1")])]) + workflow.connect([(inputnode, atlas_srcs, [('atlas_files', 'in1')])]) copy_atlas = pe.MapNode( CopyAtlas(output_dir=output_dir), - name="copy_atlas", - iterfield=["in_file", "atlas", "meta_dict", "Sources"], + name='copy_atlas', + iterfield=['in_file', 'atlas', 'meta_dict', 'Sources'], run_without_submitting=True, ) workflow.connect([ (inputnode, copy_atlas, [ - ("name_source", "name_source"), - ("atlas_names", "atlas"), - ("atlas_metadata", "meta_dict"), + ('name_source', 'name_source'), + ('atlas_names', 'atlas'), + ('atlas_metadata', 'meta_dict'), ]), - (atlas_buffer, copy_atlas, [("atlas_file", "in_file")]), - (atlas_srcs, copy_atlas, [("out", "Sources")]), - (copy_atlas, outputnode, [("out_file", "atlas_files")]), + (atlas_buffer, copy_atlas, [('atlas_file', 'in_file')]), + (atlas_srcs, copy_atlas, [('out', 'Sources')]), + (copy_atlas, outputnode, [('out_file', 'atlas_files')]), ]) # fmt:skip copy_atlas_labels_file = pe.MapNode( CopyAtlas(output_dir=output_dir), - name="copy_atlas_labels_file", - iterfield=["in_file", "atlas"], + name='copy_atlas_labels_file', + iterfield=['in_file', 'atlas'], run_without_submitting=True, ) workflow.connect([ (inputnode, copy_atlas_labels_file, [ - ("name_source", "name_source"), - ("atlas_names", "atlas"), - ("atlas_labels_files", "in_file"), + ('name_source', 'name_source'), + ('atlas_names', 'atlas'), + ('atlas_labels_files', 'in_file'), ]), - (copy_atlas_labels_file, outputnode, [("out_file", "atlas_labels_files")]), + (copy_atlas_labels_file, outputnode, [('out_file', 'atlas_labels_files')]), ]) # fmt:skip return workflow @@ -222,7 +222,7 @@ def init_load_atlases_wf(name="load_atlases_wf"): def init_parcellate_cifti_wf( mem_gb, compute_mask=True, - name="parcellate_cifti_wf", + name='parcellate_cifti_wf', ): """Parcellate a CIFTI file using a set of atlases. @@ -301,139 +301,139 @@ def init_parcellate_cifti_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "in_file", - "atlas_files", - "atlas_labels_files", - "vertexwise_coverage", - "coverage_cifti", + 'in_file', + 'atlas_files', + 'atlas_labels_files', + 'vertexwise_coverage', + 'coverage_cifti', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "parcellated_cifti", - "parcellated_tsv", - "vertexwise_coverage", - "coverage_cifti", - "coverage_tsv", + 'parcellated_cifti', + 'parcellated_tsv', + 'vertexwise_coverage', + 'coverage_cifti', + 'coverage_tsv', ], ), - name="outputnode", + name='outputnode', ) # Replace vertices with all zeros with NaNs using Python. coverage_buffer = pe.Node( - niu.IdentityInterface(fields=["vertexwise_coverage", "coverage_cifti"]), - name="coverage_buffer", + niu.IdentityInterface(fields=['vertexwise_coverage', 'coverage_cifti']), + name='coverage_buffer', ) if compute_mask: # Write out a vertex-wise binary coverage map using Python. vertexwise_coverage = pe.Node( CiftiVertexMask(), - name="vertexwise_coverage", + name='vertexwise_coverage', ) workflow.connect([ - (inputnode, vertexwise_coverage, [("in_file", "in_file")]), - (vertexwise_coverage, coverage_buffer, [("mask_file", "vertexwise_coverage")]), - (vertexwise_coverage, outputnode, [("mask_file", "vertexwise_coverage")]), + (inputnode, vertexwise_coverage, [('in_file', 'in_file')]), + (vertexwise_coverage, coverage_buffer, [('mask_file', 'vertexwise_coverage')]), + (vertexwise_coverage, outputnode, [('mask_file', 'vertexwise_coverage')]), ]) # fmt:skip parcellate_coverage = pe.MapNode( CiftiParcellateWorkbench( - direction="COLUMN", + direction='COLUMN', only_numeric=True, - out_file="parcellated_atlas.pscalar.nii", + out_file='parcellated_atlas.pscalar.nii', num_threads=config.nipype.omp_nthreads, ), - name="parcellate_coverage", - iterfield=["atlas_label"], + name='parcellate_coverage', + iterfield=['atlas_label'], n_procs=config.nipype.omp_nthreads, ) workflow.connect([ - (inputnode, parcellate_coverage, [("atlas_files", "atlas_label")]), - (vertexwise_coverage, parcellate_coverage, [("mask_file", "in_file")]), - (parcellate_coverage, coverage_buffer, [("out_file", "coverage_cifti")]), - (parcellate_coverage, outputnode, [("out_file", "coverage_cifti")]), + (inputnode, parcellate_coverage, [('atlas_files', 'atlas_label')]), + (vertexwise_coverage, parcellate_coverage, [('mask_file', 'in_file')]), + (parcellate_coverage, coverage_buffer, [('out_file', 'coverage_cifti')]), + (parcellate_coverage, outputnode, [('out_file', 'coverage_cifti')]), ]) # fmt:skip coverage_to_tsv = pe.MapNode( CiftiToTSV(), - name="coverage_to_tsv", - iterfield=["in_file", "atlas_labels"], + name='coverage_to_tsv', + iterfield=['in_file', 'atlas_labels'], ) workflow.connect([ - (inputnode, coverage_to_tsv, [("atlas_labels_files", "atlas_labels")]), - (parcellate_coverage, coverage_to_tsv, [("out_file", "in_file")]), - (coverage_to_tsv, outputnode, [("out_file", "coverage_tsv")]), + (inputnode, coverage_to_tsv, [('atlas_labels_files', 'atlas_labels')]), + (parcellate_coverage, coverage_to_tsv, [('out_file', 'in_file')]), + (coverage_to_tsv, outputnode, [('out_file', 'coverage_tsv')]), ]) # fmt:skip else: workflow.connect([ (inputnode, coverage_buffer, [ - ("vertexwise_coverage", "vertexwise_coverage"), - ("coverage_cifti", "coverage_cifti"), + ('vertexwise_coverage', 'vertexwise_coverage'), + ('coverage_cifti', 'coverage_cifti'), ]), ]) # fmt:skip # Parcellate the data file using the vertex-wise coverage. parcellate_data = pe.MapNode( CiftiParcellateWorkbench( - direction="COLUMN", + direction='COLUMN', only_numeric=True, out_file=f"parcellated_data.{'ptseries' if compute_mask else 'pscalar'}.nii", num_threads=config.nipype.omp_nthreads, ), - name="parcellate_data", - iterfield=["atlas_label"], - mem_gb=mem_gb["resampled"], + name='parcellate_data', + iterfield=['atlas_label'], + mem_gb=mem_gb['resampled'], n_procs=config.nipype.omp_nthreads, ) workflow.connect([ (inputnode, parcellate_data, [ - ("in_file", "in_file"), - ("atlas_files", "atlas_label"), + ('in_file', 'in_file'), + ('atlas_files', 'atlas_label'), ]), - (coverage_buffer, parcellate_data, [("vertexwise_coverage", "cifti_weights")]), + (coverage_buffer, parcellate_data, [('vertexwise_coverage', 'cifti_weights')]), ]) # fmt:skip # Threshold node coverage values based on coverage threshold. threshold_coverage = pe.MapNode( CiftiMath( - expression=f"data > {config.workflow.min_coverage}", + expression=f'data > {config.workflow.min_coverage}', num_threads=config.nipype.omp_nthreads, ), - name="threshold_coverage", - iterfield=["data"], - mem_gb=mem_gb["resampled"], + name='threshold_coverage', + iterfield=['data'], + mem_gb=mem_gb['resampled'], n_procs=config.nipype.omp_nthreads, ) - workflow.connect([(coverage_buffer, threshold_coverage, [("coverage_cifti", "data")])]) + workflow.connect([(coverage_buffer, threshold_coverage, [('coverage_cifti', 'data')])]) # Mask out uncovered nodes from parcellated denoised data mask_parcellated_data = pe.MapNode( CiftiMask(), - name="mask_parcellated_data", - iterfield=["in_file", "mask"], - mem_gb=mem_gb["resampled"], + name='mask_parcellated_data', + iterfield=['in_file', 'mask'], + mem_gb=mem_gb['resampled'], ) workflow.connect([ - (parcellate_data, mask_parcellated_data, [("out_file", "in_file")]), - (threshold_coverage, mask_parcellated_data, [("out_file", "mask")]), - (mask_parcellated_data, outputnode, [("out_file", "parcellated_cifti")]), + (parcellate_data, mask_parcellated_data, [('out_file', 'in_file')]), + (threshold_coverage, mask_parcellated_data, [('out_file', 'mask')]), + (mask_parcellated_data, outputnode, [('out_file', 'parcellated_cifti')]), ]) # fmt:skip # Convert the parcellated CIFTI to a TSV file cifti_to_tsv = pe.MapNode( CiftiToTSV(), - name="cifti_to_tsv", - iterfield=["in_file", "atlas_labels"], + name='cifti_to_tsv', + iterfield=['in_file', 'atlas_labels'], ) workflow.connect([ - (inputnode, cifti_to_tsv, [("atlas_labels_files", "atlas_labels")]), - (mask_parcellated_data, cifti_to_tsv, [("out_file", "in_file")]), - (cifti_to_tsv, outputnode, [("out_file", "parcellated_tsv")]), + (inputnode, cifti_to_tsv, [('atlas_labels_files', 'atlas_labels')]), + (mask_parcellated_data, cifti_to_tsv, [('out_file', 'in_file')]), + (cifti_to_tsv, outputnode, [('out_file', 'parcellated_tsv')]), ]) # fmt:skip return workflow diff --git a/xcp_d/workflows/plotting.py b/xcp_d/workflows/plotting.py index e6843c80a..6af9300b7 100644 --- a/xcp_d/workflows/plotting.py +++ b/xcp_d/workflows/plotting.py @@ -13,7 +13,7 @@ from xcp_d.utils.doc import fill_doc -def init_plot_overlay_wf(desc, name="plot_overlay_wf"): +def init_plot_overlay_wf(desc, name='plot_overlay_wf'): """Use the default slices from slicesdir to make a plot.""" from xcp_d.interfaces.plotting import SlicesDir @@ -22,62 +22,62 @@ def init_plot_overlay_wf(desc, name="plot_overlay_wf"): inputnode = pe.Node( niu.IdentityInterface( fields=[ - "underlay_file", - "overlay_file", - "name_source", + 'underlay_file', + 'overlay_file', + 'name_source', ], ), - name="inputnode", + name='inputnode', ) plot_overlay_figure = pe.Node( - SlicesDir(out_extension=".png"), - name="plot_overlay_figure", + SlicesDir(out_extension='.png'), + name='plot_overlay_figure', mem_gb=1, ) workflow.connect([ (inputnode, plot_overlay_figure, [ - ("underlay_file", "in_files"), - ("overlay_file", "outline_image"), + ('underlay_file', 'in_files'), + ('overlay_file', 'outline_image'), ]), ]) # fmt:skip ds_report_overlay = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], + dismiss_entities=['den'], desc=desc, - extension=".png", + extension='.png', ), - name="ds_report_overlay", + name='ds_report_overlay', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, ds_report_overlay, [("name_source", "source_file")]), - (plot_overlay_figure, ds_report_overlay, [("out_files", "in_file")]), + (inputnode, ds_report_overlay, [('name_source', 'source_file')]), + (plot_overlay_figure, ds_report_overlay, [('out_files', 'in_file')]), ]) # fmt:skip - reformat_for_brain_swipes = pe.Node(FormatForBrainSwipes(), name="reformat_for_brain_swipes") + reformat_for_brain_swipes = pe.Node(FormatForBrainSwipes(), name='reformat_for_brain_swipes') workflow.connect([ - (plot_overlay_figure, reformat_for_brain_swipes, [("slicewise_files", "in_files")]), + (plot_overlay_figure, reformat_for_brain_swipes, [('slicewise_files', 'in_files')]), ]) # fmt:skip ds_report_reformatted_figure = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], - desc=f"{desc}BrainSwipes", - extension=".png", + dismiss_entities=['den'], + desc=f'{desc}BrainSwipes', + extension='.png', ), - name="ds_report_reformatted_figure", + name='ds_report_reformatted_figure', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, ds_report_reformatted_figure, [("name_source", "source_file")]), - (reformat_for_brain_swipes, ds_report_reformatted_figure, [("out_file", "in_file")]), + (inputnode, ds_report_reformatted_figure, [('name_source', 'source_file')]), + (reformat_for_brain_swipes, ds_report_reformatted_figure, [('out_file', 'in_file')]), ]) # fmt:skip return workflow @@ -86,7 +86,7 @@ def init_plot_overlay_wf(desc, name="plot_overlay_wf"): @fill_doc def init_plot_custom_slices_wf( desc, - name="plot_custom_slices_wf", + name='plot_custom_slices_wf', ): """Plot a custom selection of slices with Slicer. @@ -120,7 +120,7 @@ def init_plot_custom_slices_wf( name_source """ # NOTE: These slices are almost certainly specific to a given MNI template and resolution. - SINGLE_SLICES = ["x", "x", "x", "y", "y", "y", "z", "z", "z"] + SINGLE_SLICES = ['x', 'x', 'x', 'y', 'y', 'y', 'z', 'z', 'z'] SLICE_NUMBERS = [36, 45, 52, 43, 54, 65, 23, 33, 39] workflow = Workflow(name=name) @@ -128,58 +128,58 @@ def init_plot_custom_slices_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "underlay_file", - "overlay_file", - "name_source", + 'underlay_file', + 'overlay_file', + 'name_source', ], ), - name="inputnode", + name='inputnode', ) # slices/slicer does not do well trying to make the red outline when it # cannot find the edges, so cannot use the ROI files with some low intensities. binarize_edges = pe.Node( - BinaryMath(expression="img.astype(bool).astype(int)"), - name="binarize_edges", + BinaryMath(expression='img.astype(bool).astype(int)'), + name='binarize_edges', mem_gb=1, ) - workflow.connect([(inputnode, binarize_edges, [("overlay_file", "in_file")])]) + workflow.connect([(inputnode, binarize_edges, [('overlay_file', 'in_file')])]) make_image = pe.MapNode( fsl.Slicer(show_orientation=True, label_slices=True), - name="make_image", - iterfield=["single_slice", "slice_number"], + name='make_image', + iterfield=['single_slice', 'slice_number'], mem_gb=1, ) make_image.inputs.single_slice = SINGLE_SLICES make_image.inputs.slice_number = SLICE_NUMBERS workflow.connect([ - (inputnode, make_image, [("underlay_file", "in_file")]), - (binarize_edges, make_image, [("out_file", "image_edges")]), + (inputnode, make_image, [('underlay_file', 'in_file')]), + (binarize_edges, make_image, [('out_file', 'image_edges')]), ]) # fmt:skip combine_images = pe.Node( - PNGAppend(out_file="out.png"), - name="combine_images", + PNGAppend(out_file='out.png'), + name='combine_images', mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) - workflow.connect([(make_image, combine_images, [("out_file", "in_files")])]) + workflow.connect([(make_image, combine_images, [('out_file', 'in_files')])]) ds_report_overlay = pe.Node( DerivativesDataSink( - dismiss_entities=["den"], + dismiss_entities=['den'], desc=desc, - extension=".png", + extension='.png', ), - name="ds_report_overlay", + name='ds_report_overlay', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) workflow.connect([ - (inputnode, ds_report_overlay, [("name_source", "source_file")]), - (combine_images, ds_report_overlay, [("out_file", "in_file")]), + (inputnode, ds_report_overlay, [('name_source', 'source_file')]), + (combine_images, ds_report_overlay, [('out_file', 'in_file')]), ]) # fmt:skip return workflow