From 4538b7c3077a93e098b86521c26f2cb960490183 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 14 Jun 2023 15:50:21 -0400 Subject: [PATCH 01/28] Fix description of signal denoising method in docs (#898) --- .circleci/config.yml | 2 +- CHANGES.md => docs/changes.md | 7 +++++-- docs/changes.rst | 7 ------- docs/conf.py | 1 + docs/usage.rst | 23 +++++++++++------------ xcp_d/workflows/connectivity.py | 2 +- 6 files changed, 19 insertions(+), 23 deletions(-) rename CHANGES.md => docs/changes.md (99%) delete mode 100644 docs/changes.rst diff --git a/.circleci/config.yml b/.circleci/config.yml index fea90d3dd..a8fb98164 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -316,7 +316,7 @@ jobs: no_output_timeout: 3h command: | # Get version, update files. - THISVERSION=$(python -c "from aslprep import __version__; print(__version__)") + THISVERSION=$(python3 -c "from xcp_d import __version__; print(__version__)") sed -i "s/title = {xcp_d}/title = {xcp_d ${CIRCLE_TAG:-$THISVERSION}}/" xcp_d/data/boilerplate.bib # Build docker image e=1 && for i in {1..5}; do diff --git a/CHANGES.md b/docs/changes.md similarity index 99% rename from CHANGES.md rename to docs/changes.md index eda8fb7a8..6636ebd84 100644 --- a/CHANGES.md +++ b/docs/changes.md @@ -1,3 +1,5 @@ +# What's New + ## 0.4.0 ### 🛠 Breaking Changes @@ -163,7 +165,8 @@ Two big breaking changes are (1) there is a new `--dcan-qc` flag that determines **Full Changelog**: https://github.com/PennLINC/xcp_d/compare/0.2.0...0.3.0 -# 0.2.2 +## 0.2.2 + This is a patch release for the 0.2 series. The main bug being fixed is that using `--dummytime` was causing crashes in the executive summary workflow. ### 🎉 Exciting New Features @@ -304,7 +307,7 @@ There is a full list of the changes made between 0.1.3 and 0.2.0 below. However, * [TEST] Add tests for outstanding modules by @kahinimehta in https://github.com/PennLINC/xcp_d/pull/529 * [FIX] Fcon workflow tests are incompatible with changes from main by @kahinimehta in https://github.com/PennLINC/xcp_d/pull/584 -## New Contributors +### New Contributors * @tsalo made their first contribution in https://github.com/PennLINC/xcp_d/pull/457 **Full Changelog**: https://github.com/PennLINC/xcp_d/compare/0.1.3...0.2.0 diff --git a/docs/changes.rst b/docs/changes.rst deleted file mode 100644 index ab149c726..000000000 --- a/docs/changes.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. include:: links.rst - -########## -What's new -########## - -.. mdinclude:: ../CHANGES.md diff --git a/docs/conf.py b/docs/conf.py index 3663ae734..2a151acc7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -412,6 +412,7 @@ def setup(app): + """Add extra formatting files.""" app.add_css_file("theme_overrides.css") # We need this for the boilerplate script app.add_js_file("https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js") diff --git a/docs/usage.rst b/docs/usage.rst index 1f8225c63..1031a4459 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -294,21 +294,20 @@ The individual confounds files should be tab-delimited, with one column for each and one row for each volume in the data being denoised. -Signal Confounds for Non-Aggressive Denoising -============================================= +Including Signal Regressors +=========================== Let's say you have some nuisance regressors that are not necessarily orthogonal to some associated -regressors that are ostensibly noise. +regressors that are ostensibly signal. For example, if you ran `tedana `_ on multi-echo data, you would have a series of "rejected" (noise) and "accepted" (signal) ICA components. Because tedana uses a spatial ICA, these components' time series are not necessarily independent, and there can be shared variance between them. If you want to properly denoise your data using the noise components, -you need to perform "non-aggressive" denoising so that variance from the signal components is not -removed as well. -In non-aggressive denoising, you fit a GLM using both the noise and signal regressors, -then reconstruct the predicted data using just the noise regressors, -and finally remove that predicted data from the real data. +you need to account for the variance they share with the signal components. + +XCP-D allows users to include the signal regressors in their custom confounds file, +so that the noise regressors can be orthogonalized with respect to the signal regressors. For more information about different types of denoising, see `tedana's documentation `_, @@ -317,15 +316,15 @@ and/or `Pruim et al. (2015) `_ So how do we implement this in XCP-D? In order to define regressors that should be treated as signal, -and thus use non-aggressive denoising instead of the default aggressive denoising, +and thus orthogonalize the noise regressors with respect to known signals instead of regressing +them without modification, you should include those regressors in your custom confounds file, with column names starting with ``signal__`` (lower-case "signal", followed by two underscores). .. important:: - XCP-D will automatically perform non-aggressive denoising with any nuisance-regressor option - that uses AROMA regressors - (e.g., ``aroma`` or ``aroma_gsr``). + XCP-D will automatically orthogonalize noise regressors with respect to signal regressors + with any nuisance-regressor option that uses AROMA regressors (e.g., ``aroma`` or ``aroma_gsr``). Task Regression diff --git a/xcp_d/workflows/connectivity.py b/xcp_d/workflows/connectivity.py index 1eb3a2267..6b2e4fb34 100644 --- a/xcp_d/workflows/connectivity.py +++ b/xcp_d/workflows/connectivity.py @@ -264,7 +264,7 @@ def init_parcellate_surfaces_wf( wf = init_parcellate_surfaces_wf( output_dir=".", - files_to_parcellate=["sulc", "curv", "thickness"], + files_to_parcellate=["sulcal_depth", "sulcal_curv", "cortical_thickness"], min_coverage=0.5, mem_gb=0.1, omp_nthreads=1, From aa6c09175306a35f19f319c7ef07dd9581e4946c Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 16 Jun 2023 11:42:08 -0400 Subject: [PATCH 02/28] Improve QC sidecar contents (#900) --- xcp_d/data/boilerplate.bib | 12 ++++ xcp_d/interfaces/plotting.py | 56 +++++++-------- xcp_d/utils/qcmetrics.py | 128 ++++++++++++++++++++++++++++------- xcp_d/workflows/plotting.py | 14 ++-- 4 files changed, 153 insertions(+), 57 deletions(-) diff --git a/xcp_d/data/boilerplate.bib b/xcp_d/data/boilerplate.bib index 022aa2ead..2f33c9c1a 100644 --- a/xcp_d/data/boilerplate.bib +++ b/xcp_d/data/boilerplate.bib @@ -658,3 +658,15 @@ @article{jiang2016regional year={2016}, publisher={Sage Publications Sage CA: Los Angeles, CA} } + +@article{vijaymeena2016survey, + title={A survey on similarity measures in text mining}, + author={Vijaymeena, MK and Kavitha, K}, + journal={Machine Learning and Applications: An International Journal}, + volume={3}, + number={2}, + pages={19--28}, + year={2016}, + url={https://doi.org/10.5121/mlaij.2016.3103}, + doi={10.5121/mlaij.2016.3103} +} diff --git a/xcp_d/interfaces/plotting.py b/xcp_d/interfaces/plotting.py index 2a4fde365..66c87eb96 100644 --- a/xcp_d/interfaces/plotting.py +++ b/xcp_d/interfaces/plotting.py @@ -334,6 +334,16 @@ def _run_interface(self, runtime): bbox_inches="tight", ) + # Get the different components in the bold file name + # eg: ['sub-colornest001', 'ses-1'], etc. + _, bold_file_name = os.path.split(self.inputs.name_source) + bold_file_name_components = bold_file_name.split("_") + + # Fill out dictionary with entities from filename + qc_values_dict = {} + for entity in bold_file_name_components[:-1]: + qc_values_dict[entity.split("-")[0]] = entity.split("-")[1] + # Calculate QC measures mean_fd = np.mean(preproc_fd_timeseries) mean_rms = np.nanmean(rmsd_censored) # first value can be NaN if no dummy scans @@ -344,19 +354,21 @@ def _run_interface(self, runtime): rmsd_max_value = np.nanmax(rmsd_censored) # A summary of all the values - qc_values = { - "meanFD": [mean_fd], - "relMeansRMSMotion": [mean_rms], - "relMaxRMSMotion": [rmsd_max_value], - "meanDVInit": [mean_dvars_before_processing], - "meanDVFinal": [mean_dvars_after_processing], - "num_censored_volumes": [num_censored_volumes], - "nVolsRemoved": [dummy_scans], - "motionDVCorrInit": [motionDVCorrInit], - "motionDVCorrFinal": [motionDVCorrFinal], - } + qc_values_dict.update( + { + "meanFD": [mean_fd], + "relMeansRMSMotion": [mean_rms], + "relMaxRMSMotion": [rmsd_max_value], + "meanDVInit": [mean_dvars_before_processing], + "meanDVFinal": [mean_dvars_after_processing], + "num_censored_volumes": [num_censored_volumes], + "nVolsRemoved": [dummy_scans], + "motionDVCorrInit": [motionDVCorrInit], + "motionDVCorrFinal": [motionDVCorrFinal], + } + ) - QC_METADATA = { + qc_metadata = { "meanFD": { "LongName": "Mean Framewise Displacement", "Description": ( @@ -434,29 +446,19 @@ def _run_interface(self, runtime): }, } - # Get the different components in the bold file name - # eg: ['sub-colornest001', 'ses-1'], etc. - _, bold_file_name = os.path.split(self.inputs.name_source) - bold_file_name_components = bold_file_name.split("_") - - # Fill out dictionary with entities from filename - qc_dictionary = {} - for entity in bold_file_name_components[:-1]: - qc_dictionary.update({entity.split("-")[0]: entity.split("-")[1]}) - - qc_dictionary.update(qc_values) if self.inputs.bold2T1w_mask: # If a bold mask in T1w is provided # Compute quality of registration - registration_qc = compute_registration_qc( + registration_qc, registration_metadata = compute_registration_qc( bold2t1w_mask=self.inputs.bold2T1w_mask, anat_brainmask=self.inputs.anat_brainmask, bold2template_mask=self.inputs.bold2temp_mask, template_mask=self.inputs.template_mask, ) - qc_dictionary.update(registration_qc) # Add values to dictionary + qc_values_dict.update(registration_qc) # Add values to dictionary + qc_metadata.update(registration_metadata) # Convert dictionary to df and write out the qc file - df = pd.DataFrame(qc_dictionary) + df = pd.DataFrame(qc_values_dict) self._results["qc_file"] = fname_presuffix( self.inputs.cleaned_file, suffix="qc_bold.csv", @@ -473,7 +475,7 @@ def _run_interface(self, runtime): use_ext=False, ) with open(self._results["qc_metadata"], "w") as fo: - json.dump(QC_METADATA, fo, indent=4, sort_keys=True) + json.dump(qc_metadata, fo, indent=4, sort_keys=True) return runtime diff --git a/xcp_d/utils/qcmetrics.py b/xcp_d/utils/qcmetrics.py index fd8a19705..842996ff0 100644 --- a/xcp_d/utils/qcmetrics.py +++ b/xcp_d/utils/qcmetrics.py @@ -37,6 +37,8 @@ def compute_registration_qc(bold2t1w_mask, anat_brainmask, bold2template_mask, t ------- reg_qc : dict Quality control measures between different inputs. + qc_metadata : dict + Metadata describing the QC measures. """ bold2t1w_mask_arr = nb.load(bold2t1w_mask).get_fdata() t1w_mask_arr = nb.load(anat_brainmask).get_fdata() @@ -46,12 +48,72 @@ def compute_registration_qc(bold2t1w_mask, anat_brainmask, bold2template_mask, t reg_qc = { "coregDice": [dice(bold2t1w_mask_arr, t1w_mask_arr)], "coregPearson": [pearson(bold2t1w_mask_arr, t1w_mask_arr)], - "coregCoverage": [coverage(bold2t1w_mask_arr, t1w_mask_arr)], + "coregCoverage": [overlap(bold2t1w_mask_arr, t1w_mask_arr)], "normDice": [dice(bold2template_mask_arr, template_mask_arr)], "normPearson": [pearson(bold2template_mask_arr, template_mask_arr)], - "normCoverage": [coverage(bold2template_mask_arr, template_mask_arr)], + "normCoverage": [overlap(bold2template_mask_arr, template_mask_arr)], } - return reg_qc + qc_metadata = { + "coregDice": { + "LongName": "Coregistration Sørensen-Dice Coefficient", + "Description": ( + "The Sørensen-Dice coefficient calculated between the binary brain masks from the " + "coregistered anatomical and functional images. " + "Values are bounded between 0 and 1, " + "with higher values indicating better coregistration." + ), + "Term URL": "https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient", + }, + "coregPearson": { + "LongName": "Coregistration Pearson Correlation", + "Description": ( + "The Pearson correlation coefficient calculated between the binary brain masks " + "from the coregistered anatomical and functional images. " + "Values are bounded between 0 and 1, " + "with higher values indicating better coregistration." + ), + "Term URL": "https://en.wikipedia.org/wiki/Pearson_correlation_coefficient", + }, + "coregCoverage": { + "LongName": "Coregistration Coverage Metric", + "Description": ( + "The Szymkiewicz-Simpson overlap coefficient calculated between the binary brain " + "masks from the normalized functional image and the associated template. " + "Higher values indicate better normalization." + ), + "Term URL": "https://en.wikipedia.org/wiki/Overlap_coefficient", + }, + "normDice": { + "LongName": "Normalization Sørensen-Dice Coefficient", + "Description": ( + "The Sørensen-Dice coefficient calculated between the binary brain masks from the " + "normalized functional image and the associated template. " + "Values are bounded between 0 and 1, " + "with higher values indicating better normalization." + ), + "Term URL": "https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient", + }, + "normPearson": { + "LongName": "Normalization Pearson Correlation", + "Description": ( + "The Pearson correlation coefficient calculated between the binary brain masks " + "from the normalized functional image and the associated template. " + "Values are bounded between 0 and 1, " + "with higher values indicating better normalization." + ), + "Term URL": "https://en.wikipedia.org/wiki/Pearson_correlation_coefficient", + }, + "normCoverage": { + "LongName": "Normalization Overlap Coefficient", + "Description": ( + "The Szymkiewicz-Simpson overlap coefficient calculated between the binary brain " + "masks from the normalized functional image and the associated template. " + "Higher values indicate better normalization." + ), + "Term URL": "https://en.wikipedia.org/wiki/Overlap_coefficient", + }, + } + return reg_qc, qc_metadata def dice(input1, input2): @@ -78,7 +140,7 @@ def dice(input1, input2): Returns ------- - dice : :obj:`float` + coef : :obj:`float` The Dice coefficient between ``input1`` and ``input2``. It ranges from 0 (no overlap) to 1 (perfect overlap). @@ -94,12 +156,12 @@ def dice(input1, input2): size_i1 = np.count_nonzero(input1) size_i2 = np.count_nonzero(input2) - try: - dsi = (2 * intersection) / (size_i1 + size_i2) - except ZeroDivisionError: - dsi = 0 + if (size_i1 + size_i2) == 0: + coef = 0 + else: + coef = (2 * intersection) / (size_i1 + size_i2) - return dsi + return coef def pearson(input1, input2): @@ -114,19 +176,28 @@ def pearson(input1, input2): Returns ------- - corr : :obj:`float` + coef : :obj:`float` Correlation between the two images. """ input1 = np.atleast_1d(input1.astype(bool)).flatten() input2 = np.atleast_1d(input2.astype(bool)).flatten() - corr = np.corrcoef(input1, input2)[0][1] + return np.corrcoef(input1, input2)[0, 1] + + +def overlap(input1, input2): + r"""Calculate overlap coefficient between two images. - return corr + The metric is defined as + .. math:: -def coverage(input1, input2): - """Estimate the coverage between two masks. + DC=\frac{|A \cap B||}{min(|A|,|B|)} + + , where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects). + + The overlap coefficient is also known as the Szymkiewicz-Simpson coefficient + :footcite:p:`vijaymeena2016survey`. Parameters ---------- @@ -137,19 +208,20 @@ def coverage(input1, input2): Returns ------- - cov : :obj:`float` + coef : :obj:`float` Coverage between two images. + + References + ---------- + .. footbibliography:: """ input1 = np.atleast_1d(input1.astype(bool)) input2 = np.atleast_1d(input2.astype(bool)) intersection = np.count_nonzero(input1 & input2) - smallv = np.minimum(np.sum(input1), np.sum(input2)) - cov = intersection / smallv - - return cov + return intersection / smallv def compute_dvars(datat): @@ -173,7 +245,7 @@ def compute_dvars(datat): return np.sqrt(datax_ss) -def _make_dcan_qc_file(filtered_motion, TR): +def make_dcan_qc_file(filtered_motion, TR): """Make DCAN HDF5 file from single motion file. NOTE: This is a Node function. @@ -240,15 +312,25 @@ def make_dcan_df(filtered_motion, name, TR): for thresh in np.linspace(0, 1, 101): thresh = np.around(thresh, 2) - dcan.create_dataset(f"/dcan_motion/fd_{thresh}/skip", data=0, dtype="float") + dcan.create_dataset( + f"/dcan_motion/fd_{thresh}/skip", + data=0, + dtype="float", + ) dcan.create_dataset( f"/dcan_motion/fd_{thresh}/binary_mask", data=(fd > thresh).astype(int), dtype="float", ) - dcan.create_dataset(f"/dcan_motion/fd_{thresh}/threshold", data=thresh, dtype="float") dcan.create_dataset( - f"/dcan_motion/fd_{thresh}/total_frame_count", data=len(fd), dtype="float" + f"/dcan_motion/fd_{thresh}/threshold", + data=thresh, + dtype="float", + ) + dcan.create_dataset( + f"/dcan_motion/fd_{thresh}/total_frame_count", + data=len(fd), + dtype="float", ) dcan.create_dataset( f"/dcan_motion/fd_{thresh}/remaining_total_frame_count", diff --git a/xcp_d/workflows/plotting.py b/xcp_d/workflows/plotting.py index 80fe8da45..576350699 100644 --- a/xcp_d/workflows/plotting.py +++ b/xcp_d/workflows/plotting.py @@ -10,7 +10,7 @@ from xcp_d.interfaces.plotting import QCPlots, QCPlotsES from xcp_d.interfaces.report import FunctionalSummary from xcp_d.utils.doc import fill_doc -from xcp_d.utils.qcmetrics import _make_dcan_qc_file +from xcp_d.utils.qcmetrics import make_dcan_qc_file from xcp_d.utils.utils import get_bold2std_and_t1w_xfms, get_std2bold_xfms @@ -320,19 +320,19 @@ def init_qc_report_wf( # fmt:on if dcan_qc: - make_dcan_qc_file = pe.Node( + make_dcan_qc_file_node = pe.Node( Function( input_names=["filtered_motion", "TR"], output_names=["dcan_df_file"], - function=_make_dcan_qc_file, + function=make_dcan_qc_file, ), - name="make_dcan_qc_file", + name="make_dcan_qc_file_node", ) - make_dcan_qc_file.inputs.TR = TR + make_dcan_qc_file_node.inputs.TR = TR # fmt:off workflow.connect([ - (inputnode, make_dcan_qc_file, [("filtered_motion", "filtered_motion")]), + (inputnode, make_dcan_qc_file_node, [("filtered_motion", "filtered_motion")]), ]) # fmt:on @@ -351,7 +351,7 @@ def init_qc_report_wf( # fmt:off workflow.connect([ (inputnode, ds_dcan_qc, [("name_source", "source_file")]), - (make_dcan_qc_file, ds_dcan_qc, [("dcan_df_file", "in_file")]), + (make_dcan_qc_file_node, ds_dcan_qc, [("dcan_df_file", "in_file")]), ]) # fmt:on From 6618c1441bd9303c548ea8ecc4ff7dfb7ad951b7 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 20 Jun 2023 09:23:23 -0400 Subject: [PATCH 03/28] Refactor dcan/hcp ingestion and add more surface files (#887) --- xcp_d/interfaces/utils.py | 10 +- xcp_d/utils/bids.py | 66 ++++++-- xcp_d/utils/dcan2fmriprep.py | 286 +++++++++-------------------------- xcp_d/utils/hcp2fmriprep.py | 286 ++++++++++------------------------- xcp_d/utils/ingestion.py | 186 ++++++++++++++++++++++- 5 files changed, 394 insertions(+), 440 deletions(-) diff --git a/xcp_d/interfaces/utils.py b/xcp_d/interfaces/utils.py index c73b88565..56a5be733 100644 --- a/xcp_d/interfaces/utils.py +++ b/xcp_d/interfaces/utils.py @@ -3,10 +3,10 @@ from nipype.interfaces.base import ( BaseInterfaceInputSpec, File, - InputMultiObject, OutputMultiObject, SimpleInterface, TraitedSpec, + Undefined, traits, traits_extension, ) @@ -118,8 +118,12 @@ def _run_interface(self, runtime): class _FilterUndefinedInputSpec(BaseInterfaceInputSpec): - inlist = InputMultiObject( - traits.Str, + inlist = traits.List( + traits.Either( + traits.Str, + None, + Undefined, + ), mandatory=True, desc="List of objects to filter.", ) diff --git a/xcp_d/utils/bids.py b/xcp_d/utils/bids.py index 2add840c7..9a35acc69 100644 --- a/xcp_d/utils/bids.py +++ b/xcp_d/utils/bids.py @@ -349,7 +349,7 @@ def collect_data( return layout, subj_data -def _find_standard_space_surfaces(layout, participant_label, queries): +def _find_standard_space_surfaces(layout, participant_label, queries, require_all): """Find standard-space surfaces for a given set of queries. Parameters @@ -364,7 +364,7 @@ def _find_standard_space_surfaces(layout, participant_label, queries): standard_space_surfaces : bool out_surface_files : dict """ - standard_space_surfaces = True + standard_space_surfaces = require_all for name, query in queries.items(): # First, try to grab the first base surface file in standard space. # If it's not available, switch to native T1w-space data. @@ -378,9 +378,11 @@ def _find_standard_space_surfaces(layout, participant_label, queries): ) if len(temp_files) == 0: LOGGER.info("No standard-space surfaces found.") - standard_space_surfaces = False - elif len(temp_files) > 1: - LOGGER.warning(f"{name}: More than one standard-space surface found.") + standard_space_surfaces = False if require_all else standard_space_surfaces + elif temp_files: + standard_space_surfaces = True if not require_all else standard_space_surfaces + if len(temp_files) > 1: + LOGGER.warning(f"{name}: More than one standard-space surface found.") # Now that we know if there are standard-space surfaces available, we can grab the files. if standard_space_surfaces: @@ -407,17 +409,18 @@ def _find_standard_space_surfaces(layout, participant_label, queries): } out_surface_files = {} - surface_files_found = True + surface_files_found = require_all for dtype, surface_files_ in surface_files.items(): if len(surface_files_) == 1: + surface_files_found = True if not require_all else surface_files_found out_surface_files[dtype] = surface_files_[0] elif len(surface_files_) == 0: - surface_files_found = False + surface_files_found = False if require_all else surface_files_found out_surface_files[dtype] = None else: - surface_files_found = False + surface_files_found = False if require_all else surface_files_found surface_str = "\n\t".join(surface_files_) raise ValueError( "More than one surface found.\n" @@ -490,6 +493,7 @@ def collect_surface_data(layout, participant_label): layout, participant_label, mesh_queries, + require_all=True, ) shape_queries = { @@ -529,12 +533,49 @@ def collect_surface_data(layout, participant_label): "suffix": "thickness", "extension": ".shape.gii", }, + "lh_cortical_thickness_corr": { + "hemi": "L", + "desc": "corrected", + "suffix": "thickness", + "extension": ".shape.gii", + }, + "rh_cortical_thickness_corr": { + "hemi": "R", + "desc": "corrected", + "suffix": "thickness", + "extension": ".shape.gii", + }, + "lh_myelin": { + "hemi": "L", + "desc": None, + "suffix": "myelinw", + "extension": ".func.gii", + }, + "rh_myelin": { + "hemi": "R", + "desc": None, + "suffix": "myelinw", + "extension": ".func.gii", + }, + "lh_myelin_smoothed": { + "hemi": "L", + "desc": "smoothed", + "suffix": "myelinw", + "extension": ".func.gii", + }, + "rh_myelin_smoothed": { + "hemi": "R", + "desc": "smoothed", + "suffix": "myelinw", + "extension": ".func.gii", + }, } _, _, shape_files = _find_standard_space_surfaces( layout, participant_label, shape_queries, + require_all=False, ) morphometry_files = [k for k, v in shape_files.items() if v is not None] morphometry_files_reduced = [f for f in morphometry_files if f.startswith("lh_")] @@ -666,13 +707,12 @@ def write_dataset_description(fmri_dir, xcpd_dir): orig_dset_description = os.path.join(fmri_dir, "dataset_description.json") if not os.path.isfile(orig_dset_description): - dset_desc = {} + raise FileNotFoundError(f"Dataset description DNE: {orig_dset_description}") - else: - with open(orig_dset_description, "r") as fo: - dset_desc = json.load(fo) + with open(orig_dset_description, "r") as fo: + dset_desc = json.load(fo) - assert dset_desc["DatasetType"] == "derivative" + assert dset_desc["DatasetType"] == "derivative" # Update dataset description dset_desc["Name"] = "XCP-D: A Robust Postprocessing Pipeline of fMRI data" diff --git a/xcp_d/utils/dcan2fmriprep.py b/xcp_d/utils/dcan2fmriprep.py index 497bddff2..0710c7372 100644 --- a/xcp_d/utils/dcan2fmriprep.py +++ b/xcp_d/utils/dcan2fmriprep.py @@ -2,17 +2,23 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Functions for converting DCAN-format derivatives to fMRIPrep format.""" import glob -import logging import os import re import nibabel as nb -import numpy as np import pandas as pd +from nipype import logging from pkg_resources import resource_filename as pkgrf from xcp_d.utils.filemanip import ensure_list -from xcp_d.utils.ingestion import copy_file, extract_mean_signal, plot_bbreg, write_json +from xcp_d.utils.ingestion import ( + collect_anatomical_files, + collect_confounds, + collect_surfaces, + copy_files_in_dict, + plot_bbreg, + write_json, +) LOGGER = logging.getLogger("nipype.utils") @@ -86,7 +92,7 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): individual transforms available in the DCAN derivatives. """ assert isinstance(in_dir, str) - assert os.path.isdir(in_dir) + assert os.path.isdir(in_dir), f"Folder DNE: {in_dir}" assert isinstance(out_dir, str) assert isinstance(sub_ent, str) @@ -99,12 +105,15 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): RES_ENT = "res-2" subject_dir_fmriprep = os.path.join(out_dir, sub_ent) + os.makedirs(subject_dir_fmriprep, exist_ok=True) # get session ids session_folders = sorted(glob.glob(os.path.join(in_dir, sub_ent, "s*"))) ses_entities = [ os.path.basename(ses_dir) for ses_dir in session_folders if os.path.isdir(ses_dir) ] + if not ses_entities: + raise FileNotFoundError(f"No session volumes found in {os.path.join(in_dir, sub_ent)}") # A dictionary of mappings from HCP derivatives to fMRIPrep derivatives. # Values will be lists, to allow one-to-many mappings. @@ -116,6 +125,7 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): for ses_ent in ses_entities: LOGGER.info(f"Processing {ses_ent}") + subses_ents = f"{sub_ent}_{ses_ent}" session_dir_fmriprep = os.path.join(subject_dir_fmriprep, ses_ent) if os.path.isdir(session_dir_fmriprep): @@ -124,10 +134,6 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): anat_dir_orig = os.path.join(in_dir, sub_ent, ses_ent, "files", "MNINonLinear") anat_dir_fmriprep = os.path.join(session_dir_fmriprep, "anat") - - # NOTE: Why *was* this set to the *first* session only? (I fixed it) - # AFAICT, this would copy the first session's files from DCAN into *every* - # session of the output directory. func_dir_orig = os.path.join(anat_dir_orig, "Results") func_dir_fmriprep = os.path.join(session_dir_fmriprep, "func") work_dir = os.path.join(subject_dir_fmriprep, "work") @@ -136,94 +142,40 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): os.makedirs(func_dir_fmriprep, exist_ok=True) os.makedirs(work_dir, exist_ok=True) - # We don't actually use any transforms, so we don't need the xfms directory. - # xforms_dir_orig = os.path.join(anat_dir_orig, "xfms") - - # Collect anatomical files to copy - t1w_orig = os.path.join(anat_dir_orig, "T1w.nii.gz") - t1w_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{ses_ent}_{volspace_ent}_desc-preproc_T1w.nii.gz", - ) - copy_dictionary[t1w_orig] = [t1w_fmriprep] - - brainmask_orig = os.path.join(anat_dir_orig, "brainmask_fs.nii.gz") - brainmask_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{ses_ent}_{volspace_ent}_desc-brain_mask.nii.gz", - ) - copy_dictionary[brainmask_orig] = [brainmask_fmriprep] - - # NOTE: What is this file for? - ribbon_orig = os.path.join(anat_dir_orig, "ribbon.nii.gz") - ribbon_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{ses_ent}_{volspace_ent}_desc-ribbon_T1w.nii.gz", - ) - copy_dictionary[ribbon_orig] = [ribbon_fmriprep] - - dseg_orig = os.path.join(anat_dir_orig, "aparc+aseg.nii.gz") - dseg_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{ses_ent}_{volspace_ent}_desc-aparcaseg_dseg.nii.gz", - ) - copy_dictionary[dseg_orig] = [dseg_fmriprep] - - # Grab transforms - # t1w_to_template_orig = os.path.join(xforms_dir_orig, "ANTS_CombinedWarp.nii.gz") + # Create identity-based transforms t1w_to_template_fmriprep = os.path.join( anat_dir_fmriprep, - f"{sub_ent}_{ses_ent}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt", + f"{subses_ents}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt", ) copy_dictionary[identity_xfm].append(t1w_to_template_fmriprep) - # template_to_t1w_orig = os.path.join(xforms_dir_orig, "ANTS_CombinedInvWarp.nii.gz") template_to_t1w_fmriprep = os.path.join( anat_dir_fmriprep, - f"{sub_ent}_{ses_ent}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt", + f"{subses_ents}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt", ) copy_dictionary[identity_xfm].append(template_to_t1w_fmriprep) - # Grab surface morphometry files - fsaverage_dir_orig = os.path.join(anat_dir_orig, "fsaverage_LR32k") - - SURFACE_DICT = { - "R.midthickness.32k_fs_LR.surf.gii": "hemi-R_desc-hcp_midthickness.surf.gii", - "L.midthickness.32k_fs_LR.surf.gii": "hemi-L_desc-hcp_midthickness.surf.gii", - "R.inflated.32k_fs_LR.surf.gii": "hemi-R_desc-hcp_inflated.surf.gii", - "L.inflated.32k_fs_LR.surf.gii": "hemi-L_desc-hcp_inflated.surf.gii", - "R.very_inflated.32k_fs_LR.surf.gii": "hemi-R_desc-hcp_vinflated.surf.gii", - "L.very_inflated.32k_fs_LR.surf.gii": "hemi-L_desc-hcp_vinflated.surf.gii", - "R.pial.32k_fs_LR.surf.gii": "hemi-R_pial.surf.gii", - "L.pial.32k_fs_LR.surf.gii": "hemi-L_pial.surf.gii", - "R.white.32k_fs_LR.surf.gii": "hemi-R_smoothwm.surf.gii", - "L.white.32k_fs_LR.surf.gii": "hemi-L_smoothwm.surf.gii", - "R.corrThickness.32k_fs_LR.shape.gii": "hemi-R_thickness.shape.gii", - "L.corrThickness.32k_fs_LR.shape.gii": "hemi-L_thickness.shape.gii", - "R.curvature.32k_fs_LR.shape.gii": "hemi-R_curv.shape.gii", - "L.curvature.32k_fs_LR.shape.gii": "hemi-L_curv.shape.gii", - "R.sulc.32k_fs_LR.shape.gii": "hemi-R_sulc.shape.gii", - "L.sulc.32k_fs_LR.shape.gii": "hemi-L_sulc.shape.gii", - } - - for in_str, out_str in SURFACE_DICT.items(): - surf_orig = os.path.join(fsaverage_dir_orig, f"{sub_id}.{in_str}") - surf_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{ses_ent}_space-fsLR_den-32k_{out_str}", - ) - copy_dictionary[surf_orig] = [surf_fmriprep] + # Collect anatomical files to copy + base_anatomical_ents = f"{subses_ents}_{volspace_ent}_{RES_ENT}" + anat_dict = collect_anatomical_files( + anat_dir_orig, + anat_dir_fmriprep, + base_anatomical_ents, + ) + copy_dictionary = {**copy_dictionary, **anat_dict} + # Collect surface files to copy + surfaces_dict = collect_surfaces(anat_dir_orig, anat_dir_fmriprep, sub_id, subses_ents) + copy_dictionary = {**copy_dictionary, **surfaces_dict} LOGGER.info("Finished collecting anatomical files") - # get masks and transforms - wmmask = os.path.join(anat_dir_orig, f"wm_2mm_{sub_id}_mask_eroded.nii.gz") - csfmask = os.path.join(anat_dir_orig, f"vent_2mm_{sub_id}_mask_eroded.nii.gz") + # Get masks to be used to extract confounds + wm_mask = os.path.join(anat_dir_orig, f"wm_2mm_{sub_id}_mask_eroded.nii.gz") + csf_mask = os.path.join(anat_dir_orig, f"vent_2mm_{sub_id}_mask_eroded.nii.gz") # Collect functional files to copy task_dirs_orig = sorted(glob.glob(os.path.join(func_dir_orig, f"{ses_ent}_task-*"))) - task_dirs_orig = [task_dir for task_dir in task_dirs_orig if os.path.isdir(task_dir)] - task_names = [os.path.basename(task_dir) for task_dir in task_dirs_orig] + task_names = [os.path.basename(f) for f in task_dirs_orig if os.path.isdir(f)] for base_task_name in task_names: LOGGER.info(f"Processing {base_task_name}") @@ -240,20 +192,17 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): continue task_id, run_id = found_task_info[0] - run_ent = f"run-{run_id}" task_ent = f"task-{task_id}" + run_ent = f"run-{run_id}" task_dir_orig = os.path.join(func_dir_orig, base_task_name) # Find original task files - # This file is the anatomical brain mask downsampled to 2mm3. - brainmask_orig_temp = os.path.join(task_dir_orig, "brainmask_fs.2.0.nii.gz") - sbref_orig = os.path.join(task_dir_orig, f"{base_task_name}_SBRef.nii.gz") boldref_fmriprep = os.path.join( func_dir_fmriprep, ( - f"{sub_ent}_{ses_ent}_{task_ent}_{run_ent}_{volspace_ent}_" + f"{subses_ents}_{task_ent}_{run_ent}_{volspace_ent}_" f"{RES_ENT}_boldref.nii.gz" ), ) @@ -263,7 +212,7 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): bold_nifti_fmriprep = os.path.join( func_dir_fmriprep, ( - f"{sub_ent}_{ses_ent}_{task_ent}_{run_ent}_" + f"{subses_ents}_{task_ent}_{run_ent}_" f"{volspace_ent}_{RES_ENT}_desc-preproc_bold.nii.gz" ), ) @@ -272,160 +221,79 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): bold_cifti_orig = os.path.join(task_dir_orig, f"{base_task_name}_Atlas.dtseries.nii") bold_cifti_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{ses_ent}_{task_ent}_{run_ent}_space-fsLR_den-91k_bold.dtseries.nii", + f"{subses_ents}_{task_ent}_{run_ent}_space-fsLR_den-91k_bold.dtseries.nii", ) copy_dictionary[bold_cifti_orig] = [bold_cifti_fmriprep] - # native_to_t1w_orig = os.path.join(xforms_dir_orig, f"{task_ent}2T1w.nii.gz") + # More identity transforms native_to_t1w_fmriprep = os.path.join( func_dir_fmriprep, - ( - f"{sub_ent}_{ses_ent}_{task_ent}_{run_ent}_" - "from-scanner_to-T1w_mode-image_xfm.txt" - ), + f"{subses_ents}_{task_ent}_{run_ent}_from-scanner_to-T1w_mode-image_xfm.txt", ) copy_dictionary[identity_xfm].append(native_to_t1w_fmriprep) - # t1w_to_native_orig = os.path.join(xforms_dir_orig, f"T1w2{task_ent}.nii.gz") t1w_to_native_fmriprep = os.path.join( func_dir_fmriprep, - ( - f"{sub_ent}_{ses_ent}_{task_ent}_{run_ent}_" - "from-T1w_to-scanner_mode-image_xfm.txt" - ), + f"{subses_ents}_{task_ent}_{run_ent}_from-T1w_to-scanner_mode-image_xfm.txt", ) copy_dictionary[identity_xfm].append(t1w_to_native_fmriprep) # Extract metadata for JSON files - TR = nb.load(bold_nifti_orig).header.get_zooms()[-1] # repetition time - bold_nifti_json_dict = { - "RepetitionTime": float(TR), + bold_metadata = { + "RepetitionTime": float(nb.load(bold_nifti_orig).header.get_zooms()[-1]), "TaskName": task_id, } bold_nifti_json_fmriprep = os.path.join( func_dir_fmriprep, ( - f"{sub_ent}_{ses_ent}_{task_ent}_{run_ent}_{volspace_ent}_" + f"{subses_ents}_{task_ent}_{run_ent}_{volspace_ent}_" f"{RES_ENT}_desc-preproc_bold.json" ), ) - write_json(bold_nifti_json_dict, bold_nifti_json_fmriprep) - - bold_cifti_json_dict = { - "RepetitionTime": float(TR), - "TaskName": task_id, - "grayordinates": "91k", - "space": "HCP grayordinates", - "surface": "fsLR", - "surface_density": "32k", - "volume": "MNI152NLin6Asym", - } + write_json(bold_metadata, bold_nifti_json_fmriprep) + + bold_metadata.update( + { + "grayordinates": "91k", + "space": "HCP grayordinates", + "surface": "fsLR", + "surface_density": "32k", + "volume": "MNI152NLin6Asym", + }, + ) bold_cifti_json_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{ses_ent}_{task_ent}_{run_ent}_space-fsLR_den-91k_bold.dtseries.json", + f"{subses_ents}_{task_ent}_{run_ent}_space-fsLR_den-91k_bold.dtseries.json", ) - - write_json(bold_cifti_json_dict, bold_cifti_json_fmriprep) + write_json(bold_metadata, bold_cifti_json_fmriprep) # Create confound regressors - mvreg = pd.read_csv( - os.path.join(task_dir_orig, "Movement_Regressors.txt"), - header=None, - delimiter=r"\s+", - ) - - # Only use the first six columns - mvreg = mvreg.iloc[:, 0:6] - mvreg.columns = ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"] - - # convert rotations from degrees to radians - rot_columns = [c for c in mvreg.columns if c.startswith("rot")] - for col in rot_columns: - mvreg[col] = mvreg[col] * np.pi / 180 - - # get derivatives of motion columns - columns = mvreg.columns.tolist() - for col in columns: - mvreg[f"{col}_derivative1"] = mvreg[col].diff() - - # get powers - columns = mvreg.columns.tolist() - for col in columns: - mvreg[f"{col}_power2"] = mvreg[col] ** 2 - - # Use dummy column for framewise displacement, which will be recalculated by XCP-D. - mvreg["framewise_displacement"] = 0 - - # use masks: brain, csf, and wm mask to extract timeseries - gsreg = extract_mean_signal( - mask=brainmask_orig_temp, - nifti=bold_nifti_orig, - work_dir=work_dir, - ) - csfreg = extract_mean_signal( - mask=csfmask, - nifti=bold_nifti_orig, - work_dir=work_dir, - ) - wmreg = extract_mean_signal( - mask=wmmask, - nifti=bold_nifti_orig, - work_dir=work_dir, - ) - rsmd = np.loadtxt(os.path.join(task_dir_orig, "Movement_AbsoluteRMS.txt")) - - brainreg = pd.DataFrame( - {"global_signal": gsreg, "white_matter": wmreg, "csf": csfreg, "rmsd": rsmd} - ) - - # get derivatives and powers - brainreg["global_signal_derivative1"] = brainreg["global_signal"].diff() - brainreg["white_matter_derivative1"] = brainreg["white_matter"].diff() - brainreg["csf_derivative1"] = brainreg["csf"].diff() - - brainreg["global_signal_derivative1_power2"] = ( - brainreg["global_signal_derivative1"] ** 2 - ) - brainreg["global_signal_power2"] = brainreg["global_signal"] ** 2 - - brainreg["white_matter_derivative1_power2"] = brainreg["white_matter_derivative1"] ** 2 - brainreg["white_matter_power2"] = brainreg["white_matter"] ** 2 - - brainreg["csf_derivative1_power2"] = brainreg["csf_derivative1"] ** 2 - brainreg["csf_power2"] = brainreg["csf"] ** 2 - - # Merge the two DataFrames - regressors = pd.concat([mvreg, brainreg], axis=1) - - # write out the confounds - regressors_file_base = ( - f"{sub_ent}_{ses_ent}_task-{task_id}_{run_ent}_desc-confounds_timeseries" - ) - regressors_tsv_fmriprep = os.path.join( + collect_confounds( + task_dir_orig, func_dir_fmriprep, - f"{regressors_file_base}.tsv", - ) - regressors.to_csv(regressors_tsv_fmriprep, sep="\t", index=False) - - # NOTE: Is this JSON any good? - regressors_json_fmriprep = os.path.join( - func_dir_fmriprep, - f"{regressors_file_base}.json", + f"{subses_ents}_{task_ent}_{run_ent}", + work_dir=work_dir, + bold_file=bold_nifti_orig, + # This file is the anatomical brain mask downsampled to 2 mm3. + brainmask_file=os.path.join(task_dir_orig, "brainmask_fs.2.0.nii.gz"), + csf_mask_file=csf_mask, + wm_mask_file=wm_mask, ) - write_json(bold_cifti_json_dict, regressors_json_fmriprep) # Make figures figdir = os.path.join(subject_dir_fmriprep, "figures") os.makedirs(figdir, exist_ok=True) bbref_fig_fmriprep = os.path.join( figdir, - f"{sub_ent}_{ses_ent}_{task_ent}_{run_ent}_desc-bbregister_bold.svg", + f"{subses_ents}_{task_ent}_{run_ent}_desc-bbregister_bold.svg", ) + t1w = os.path.join(anat_dir_orig, "T1w.nii.gz") + ribbon = os.path.join(anat_dir_orig, "ribbon.nii.gz") bbref_fig_fmriprep = plot_bbreg( - fixed_image=t1w_orig, + fixed_image=t1w, moving_image=sbref_orig, out_file=bbref_fig_fmriprep, - contour=ribbon_orig, + contour=ribbon, ) LOGGER.info(f"Finished {base_task_name}") @@ -434,18 +302,10 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): # Copy ABCD files to fMRIPrep folder LOGGER.info("Copying files") - for file_orig, files_fmriprep in copy_dictionary.items(): - if not isinstance(files_fmriprep, list): - raise ValueError( - f"Entry for {file_orig} should be a list, but is a {type(files_fmriprep)}" - ) - - if len(files_fmriprep) > 1: - LOGGER.warning(f"File used for more than one output: {file_orig}") - - for file_fmriprep in files_fmriprep: - copy_file(file_orig, file_fmriprep) + copy_files_in_dict(copy_dictionary) + LOGGER.info("Finished copying files") + # Write the dataset description out last dataset_description_dict = { "Name": "ABCD-DCAN", "BIDSVersion": "1.4.0", @@ -470,6 +330,6 @@ def convert_dcan_to_bids_single_subject(in_dir, out_dir, sub_ent): scans_tuple = tuple(scans_dict.items()) scans_df = pd.DataFrame(scans_tuple, columns=["filename", "source_file"]) - scans_tsv = os.path.join(subject_dir_fmriprep, f"{sub_ent}_scans.tsv") + scans_tsv = os.path.join(subject_dir_fmriprep, f"{subses_ents}_scans.tsv") scans_df.to_csv(scans_tsv, sep="\t", index=False) LOGGER.info("Conversion completed") diff --git a/xcp_d/utils/hcp2fmriprep.py b/xcp_d/utils/hcp2fmriprep.py index 807b2a596..6ad7050ca 100644 --- a/xcp_d/utils/hcp2fmriprep.py +++ b/xcp_d/utils/hcp2fmriprep.py @@ -5,13 +5,19 @@ import os import nibabel as nb -import numpy as np import pandas as pd from nipype import logging from pkg_resources import resource_filename as pkgrf from xcp_d.utils.filemanip import ensure_list -from xcp_d.utils.ingestion import copy_file, extract_mean_signal, plot_bbreg, write_json +from xcp_d.utils.ingestion import ( + collect_anatomical_files, + collect_confounds, + collect_surfaces, + copy_files_in_dict, + plot_bbreg, + write_json, +) LOGGER = logging.getLogger("nipype.utils") @@ -116,22 +122,21 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): individual transforms available in the DCAN derivatives. """ assert isinstance(in_dir, str) - assert os.path.isdir(in_dir) + assert os.path.isdir(in_dir), f"Folder DNE: {in_dir}" assert isinstance(out_dir, str) assert isinstance(sub_ent, str) sub_id = sub_ent.replace("sub-", "") # Reset the subject entity in case the sub- prefix wasn't included originally. sub_ent = f"sub-{sub_id}" + subses_ents = sub_ent VOLSPACE = "MNI152NLin6Asym" volspace_ent = f"space-{VOLSPACE}" RES_ENT = "res-2" - # The identity xform is used in place of any actual ones. - identity_xfm = pkgrf("xcp_d", "/data/transform/itkIdentityTransform.txt") - anat_dir_orig = os.path.join(in_dir, sub_id, "MNINonLinear") + func_dir_orig = os.path.join(anat_dir_orig, "Results") subject_dir_fmriprep = os.path.join(out_dir, sub_ent) anat_dir_fmriprep = os.path.join(subject_dir_fmriprep, "anat") func_dir_fmriprep = os.path.join(subject_dir_fmriprep, "func") @@ -145,7 +150,7 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): os.makedirs(func_dir_fmriprep, exist_ok=True) os.makedirs(work_dir, exist_ok=True) - # Get necessary files + # Get masks to be used to extract confounds csf_mask = pkgrf("xcp_d", f"/data/masks/{volspace_ent}_{RES_ENT}_label-CSF_mask.nii.gz") wm_mask = pkgrf("xcp_d", f"/data/masks/{volspace_ent}_{RES_ENT}_label-WM_mask.nii.gz") @@ -153,276 +158,149 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): # Values will be lists, to allow one-to-many mappings. copy_dictionary = {} - # Collect anatomical files to copy - t1w_orig = os.path.join(anat_dir_orig, "T1w_restore.nii.gz") - t1w_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{volspace_ent}_{RES_ENT}_desc-preproc_T1w.nii.gz", - ) - copy_dictionary[t1w_orig] = [t1w_fmriprep] - - brainmask_orig = os.path.join(anat_dir_orig, "brainmask_fs.nii.gz") - brainmask_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{volspace_ent}_{RES_ENT}_desc-brain_mask.nii.gz", - ) - copy_dictionary[brainmask_orig] = [brainmask_fmriprep] - - # NOTE: What is this file for? - ribbon_orig = os.path.join(anat_dir_orig, "ribbon.nii.gz") - ribbon_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{volspace_ent}_{RES_ENT}_desc-ribbon_T1w.nii.gz", - ) - copy_dictionary[ribbon_orig] = [ribbon_fmriprep] - - dseg_orig = os.path.join(anat_dir_orig, "aparc.a2009s+aseg.nii.gz") - dseg_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_{volspace_ent}_{RES_ENT}_desc-aparcaseg_dseg.nii.gz", - ) - copy_dictionary[dseg_orig] = [dseg_fmriprep] + # The identity xform is used in place of any actual ones. + identity_xfm = pkgrf("xcp_d", "/data/transform/itkIdentityTransform.txt") + copy_dictionary[identity_xfm] = [] - # Grab transforms t1w_to_template_fmriprep = os.path.join( anat_dir_fmriprep, - f"{sub_ent}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt", + f"{subses_ents}_from-T1w_to-{VOLSPACE}_mode-image_xfm.txt", ) - copy_dictionary[identity_xfm] = [t1w_to_template_fmriprep] + copy_dictionary[identity_xfm].append(t1w_to_template_fmriprep) template_to_t1w_fmriprep = os.path.join( anat_dir_fmriprep, - f"{sub_ent}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt", + f"{subses_ents}_from-{VOLSPACE}_to-T1w_mode-image_xfm.txt", ) copy_dictionary[identity_xfm].append(template_to_t1w_fmriprep) - # Grab surface morphometry files - fsaverage_dir_orig = os.path.join(anat_dir_orig, "fsaverage_LR32k") - - SURFACE_DICT = { - "R.midthickness": "hemi-R_desc-hcp_midthickness", - "L.midthickness": "hemi-L_desc-hcp_midthickness", - "R.inflated": "hemi-R_desc-hcp_inflated", - "L.inflated": "hemi-L_desc-hcp_inflated", - "R.very_inflated": "hemi-R_desc-hcp_vinflated", - "L.very_inflated": "hemi-L_desc-hcp_vinflated", - "R.pial": "hemi-R_pial", - "L.pial": "hemi-L_pial", - "R.white": "hemi-R_smoothwm", - "L.white": "hemi-L_smoothwm", - } - for in_str, out_str in SURFACE_DICT.items(): - surf_orig = os.path.join( - fsaverage_dir_orig, - f"{sub_id}.{in_str}.32k_fs_LR.surf.gii", - ) - surf_fmriprep = os.path.join( - anat_dir_fmriprep, - f"{sub_ent}_space-fsLR_den-32k_{out_str}.surf.gii", - ) - copy_dictionary[surf_orig] = [surf_fmriprep] + # Collect anatomical files to copy + base_anatomical_ents = f"{subses_ents}_{volspace_ent}_{RES_ENT}" + anat_dict = collect_anatomical_files(anat_dir_orig, anat_dir_fmriprep, base_anatomical_ents) + copy_dictionary = {**copy_dictionary, **anat_dict} + # Collect surface files to copy + surfaces_dict = collect_surfaces(anat_dir_orig, anat_dir_fmriprep, sub_id, subses_ents) + copy_dictionary = {**copy_dictionary, **surfaces_dict} LOGGER.info("Finished collecting anatomical files") # Collect functional files to copy - subject_task_folders = sorted( - glob.glob(os.path.join(in_dir, sub_id, "MNINonLinear", "Results", "*")) - ) - subject_task_folders = [ - task for task in subject_task_folders if task.endswith("RL") or task.endswith("LR") + task_dirs_orig = sorted(glob.glob(os.path.join(func_dir_orig, "*"))) + task_names = [ + os.path.basename(f) for f in task_dirs_orig if f.endswith("RL") or f.endswith("LR") ] - for subject_task_folder in subject_task_folders: - LOGGER.info(f"Processing {subject_task_folder}") + + for base_task_name in task_names: + LOGGER.info(f"Processing {base_task_name}") # NOTE: What is the first element in the folder name? - _, task_id, dir_id = os.path.basename(subject_task_folder).split("_") + _, task_id, dir_id = base_task_name.split("_") task_ent = f"task-{task_id}" dir_ent = f"dir-{dir_id}" - # TODO: Rename variable - run_foldername = os.path.basename(subject_task_folder) - # Find original task files - brainmask_orig_temp = os.path.join(subject_task_folder, "brainmask_fs.2.nii.gz") + task_dir_orig = os.path.join(func_dir_orig, base_task_name) - bold_nifti_orig = os.path.join(subject_task_folder, f"{run_foldername}.nii.gz") - bold_nifti_fmriprep = os.path.join( + # Find original task files + sbref_orig = os.path.join(task_dir_orig, "SBRef_dc.nii.gz") + boldref_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{task_ent}_{dir_ent}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.nii.gz", + f"{subses_ents}_{task_ent}_{dir_ent}_{volspace_ent}_{RES_ENT}_boldref.nii.gz", ) - copy_dictionary[bold_nifti_orig] = [bold_nifti_fmriprep] + copy_dictionary[sbref_orig] = [boldref_fmriprep] - sbref_orig = os.path.join(subject_task_folder, "SBRef_dc.nii.gz") - boldref_fmriprep = os.path.join( + bold_nifti_orig = os.path.join(task_dir_orig, f"{base_task_name}.nii.gz") + bold_nifti_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{task_ent}_{dir_ent}_{volspace_ent}_{RES_ENT}_boldref.nii.gz", + ( + f"{subses_ents}_{task_ent}_{dir_ent}_{volspace_ent}_{RES_ENT}_" + "desc-preproc_bold.nii.gz" + ), ) - copy_dictionary[sbref_orig] = [boldref_fmriprep] + copy_dictionary[bold_nifti_orig] = [bold_nifti_fmriprep] bold_cifti_orig = os.path.join( - subject_task_folder, - f"{run_foldername}_Atlas_MSMAll.dtseries.nii", + task_dir_orig, + f"{base_task_name}_Atlas_MSMAll.dtseries.nii", ) bold_cifti_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{task_ent}_{dir_ent}_space-fsLR_den-91k_bold.dtseries.nii", + f"{subses_ents}_{task_ent}_{dir_ent}_space-fsLR_den-91k_bold.dtseries.nii", ) copy_dictionary[bold_cifti_orig] = [bold_cifti_fmriprep] - # Grab transforms + # More identity transforms native_to_t1w_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{task_ent}_{dir_ent}_from-scanner_to-T1w_mode-image_xfm.txt", + f"{subses_ents}_{task_ent}_{dir_ent}_from-scanner_to-T1w_mode-image_xfm.txt", ) copy_dictionary[identity_xfm].append(native_to_t1w_fmriprep) t1w_to_native_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{task_ent}_{dir_ent}_from-T1w_to-scanner_mode-image_xfm.txt", + f"{subses_ents}_{task_ent}_{dir_ent}_from-T1w_to-scanner_mode-image_xfm.txt", ) copy_dictionary[identity_xfm].append(t1w_to_native_fmriprep) # Extract metadata for JSON files - TR = nb.load(bold_nifti_orig).header.get_zooms()[-1] # repetition time - bold_nifti_json_dict = { - "RepetitionTime": float(TR), + bold_metadata = { + "RepetitionTime": float(nb.load(bold_nifti_orig).header.get_zooms()[-1]), "TaskName": task_id, } bold_nifti_json_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{task_ent}_{dir_ent}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.json", + f"{subses_ents}_{task_ent}_{dir_ent}_{volspace_ent}_{RES_ENT}_desc-preproc_bold.json", ) - write_json(bold_nifti_json_dict, bold_nifti_json_fmriprep) + write_json(bold_metadata, bold_nifti_json_fmriprep) - bold_cifti_json_dict = { - "RepetitionTime": float(TR), - "TaskName": task_id, - "grayordinates": "91k", - "space": "HCP grayordinates", - "surface": "fsLR", - "surface_density": "32k", - "volume": "MNI152NLin6Asym", - } + bold_metadata.update( + { + "grayordinates": "91k", + "space": "HCP grayordinates", + "surface": "fsLR", + "surface_density": "32k", + "volume": "MNI152NLin6Asym", + }, + ) bold_cifti_json_fmriprep = os.path.join( func_dir_fmriprep, - f"{sub_ent}_{task_ent}_{dir_ent}_space-fsLR_den-91k_bold.dtseries.json", + f"{subses_ents}_{task_ent}_{dir_ent}_space-fsLR_den-91k_bold.dtseries.json", ) - write_json(bold_cifti_json_dict, bold_cifti_json_fmriprep) + write_json(bold_metadata, bold_cifti_json_fmriprep) # Create confound regressors - mvreg = pd.read_csv( - os.path.join(subject_task_folder, "Movement_Regressors.txt"), - header=None, - delimiter=r"\s+", - ) - mvreg.columns = [ - "trans_x", - "trans_y", - "trans_z", - "rot_x", - "rot_y", - "rot_z", - "trans_x_derivative1", - "trans_y_derivative1", - "trans_z_derivative1", - "rot_x_derivative1", - "rot_y_derivative1", - "rot_z_derivative1", - ] - # convert rotations from degrees to radians - rot_columns = [c for c in mvreg.columns if c.startswith("rot")] - for col in rot_columns: - mvreg[col] = mvreg[col] * np.pi / 180 - - # set first row of derivative columns to nan, for fMRIPrep compatibility - deriv_columns = [c for c in mvreg.columns if c.endswith("derivative1")] - for col in deriv_columns: - mvreg.loc[0, col] = None - - # get powers - columns = mvreg.columns.tolist() - for col in columns: - mvreg[f"{col}_power2"] = mvreg[col] ** 2 - - # Use dummy column for framewise displacement, which will be recalculated by XCP-D. - mvreg["framewise_displacement"] = 0 - - # use masks: brain, csf, and wm mask to extract timeseries - gsreg = extract_mean_signal( - mask=brainmask_orig_temp, - nifti=bold_nifti_orig, - work_dir=work_dir, - ) - csfreg = extract_mean_signal(mask=csf_mask, nifti=bold_nifti_orig, work_dir=work_dir) - wmreg = extract_mean_signal(mask=wm_mask, nifti=bold_nifti_orig, work_dir=work_dir) - rmsd = np.loadtxt(os.path.join(subject_task_folder, "Movement_AbsoluteRMS.txt")) - - brainreg = pd.DataFrame( - {"global_signal": gsreg, "white_matter": wmreg, "csf": csfreg, "rmsd": rmsd} - ) - - # get derivatives and powers - brainreg["global_signal_derivative1"] = brainreg["global_signal"].diff() - brainreg["white_matter_derivative1"] = brainreg["white_matter"].diff() - brainreg["csf_derivative1"] = brainreg["csf"].diff() - - brainreg["global_signal_derivative1_power2"] = brainreg["global_signal_derivative1"] ** 2 - brainreg["global_signal_power2"] = brainreg["global_signal"] ** 2 - - brainreg["white_matter_derivative1_power2"] = brainreg["white_matter_derivative1"] ** 2 - brainreg["white_matter_power2"] = brainreg["white_matter"] ** 2 - - brainreg["csf_derivative1_power2"] = brainreg["csf_derivative1"] ** 2 - brainreg["csf_power2"] = brainreg["csf"] ** 2 - - # Merge the two DataFrames - regressors = pd.concat([mvreg, brainreg], axis=1) - - # write out the confounds - regressors_file_base = f"{sub_ent}_{task_ent}_{dir_ent}_desc-confounds_timeseries" - regressors_tsv_fmriprep = os.path.join( + collect_confounds( + task_dir_orig, func_dir_fmriprep, - f"{regressors_file_base}.tsv", - ) - regressors.to_csv(regressors_tsv_fmriprep, index=False, sep="\t", na_rep="n/a") - - # NOTE: Is this JSON any good? - regressors_json_fmriprep = os.path.join( - func_dir_fmriprep, - f"{regressors_file_base}.json", + f"{subses_ents}_{task_ent}_{dir_ent}", + work_dir=work_dir, + bold_file=bold_nifti_orig, + brainmask_file=os.path.join(task_dir_orig, "brainmask_fs.2.nii.gz"), + csf_mask_file=csf_mask, + wm_mask_file=wm_mask, ) - regressors.to_json(regressors_json_fmriprep) # Make figures figdir = os.path.join(subject_dir_fmriprep, "figures") os.makedirs(figdir, exist_ok=True) bbref_fig_fmriprep = os.path.join( figdir, - f"{sub_ent}_{task_ent}_{dir_ent}_desc-bbregister_bold.svg", + f"{subses_ents}_{task_ent}_{dir_ent}_desc-bbregister_bold.svg", ) + t1w = os.path.join(anat_dir_orig, "T1w.nii.gz") + ribbon = os.path.join(anat_dir_orig, "ribbon.nii.gz") bbref_fig_fmriprep = plot_bbreg( - fixed_image=t1w_orig, + fixed_image=t1w, moving_image=sbref_orig, out_file=bbref_fig_fmriprep, - contour=ribbon_orig, + contour=ribbon, ) - LOGGER.info(f"Finished {subject_task_folder}") + LOGGER.info(f"Finished {base_task_name}") LOGGER.info("Finished collecting functional files") # Copy HCP files to fMRIPrep folder LOGGER.info("Copying files") - for file_orig, files_fmriprep in copy_dictionary.items(): - if not isinstance(files_fmriprep, list): - raise ValueError( - f"Entry for {file_orig} should be a list, but is a {type(files_fmriprep)}" - ) - - if len(files_fmriprep) > 1: - LOGGER.warning(f"File used for more than one output: {file_orig}") - - for file_fmriprep in files_fmriprep: - copy_file(file_orig, file_fmriprep) - + copy_files_in_dict(copy_dictionary) LOGGER.info("Finished copying files") # Write the dataset description out last @@ -449,6 +327,6 @@ def convert_hcp_to_bids_single_subject(in_dir, out_dir, sub_ent): scans_tuple = tuple(scans_dict.items()) scans_df = pd.DataFrame(scans_tuple, columns=["filename", "source_file"]) - scans_tsv = os.path.join(subject_dir_fmriprep, f"{sub_ent}_scans.tsv") + scans_tsv = os.path.join(subject_dir_fmriprep, f"{subses_ents}_scans.tsv") scans_df.to_csv(scans_tsv, sep="\t", index=False) LOGGER.info("Conversion completed") diff --git a/xcp_d/utils/ingestion.py b/xcp_d/utils/ingestion.py index 9433ffa46..48f3caa92 100644 --- a/xcp_d/utils/ingestion.py +++ b/xcp_d/utils/ingestion.py @@ -5,7 +5,10 @@ import os import numpy as np -from nilearn import maskers +from nilearn import image, maskers +from nipype import logging + +LOGGER = logging.getLogger("nipype.utils") def copy_file(src, dst): @@ -21,6 +24,161 @@ def copy_file(src, dst): shutil.copyfile(src, dst) +def collect_anatomical_files(anat_dir_orig, anat_dir_fmriprep, base_anatomical_ents): + """Collect anatomical files from ABCD or HCP-YA derivatives.""" + ANAT_DICT = { + # XXX: Why have T1w here and T1w_restore for HCP? + "T1w.nii.gz": "desc-preproc_T1w.nii.gz", + "brainmask_fs.nii.gz": "desc-brain_mask.nii.gz", + "ribbon.nii.gz": "desc-ribbon_T1w.nii.gz", + "aparc+aseg.nii.gz": "desc-aparcaseg_dseg.nii.gz", + } + copy_dictionary = {} + + for in_str, out_str in ANAT_DICT.items(): + anat_orig = os.path.join(anat_dir_orig, in_str) + anat_fmriprep = os.path.join(anat_dir_fmriprep, f"{base_anatomical_ents}_{out_str}") + if os.path.isfile(anat_orig): + copy_dictionary[anat_orig] = [anat_fmriprep] + else: + LOGGER.warning(f"File DNE: {anat_orig}") + + return copy_dictionary + + +def collect_surfaces(anat_dir_orig, anat_dir_fmriprep, sub_id, subses_ents): + """Collect surface files from ABCD or HCP-YA derivatives.""" + SURFACE_DICT = { + "{hemi}.midthickness.32k_fs_LR.surf.gii": "hemi-{hemi}_desc-hcp_midthickness.surf.gii", + "{hemi}.inflated.32k_fs_LR.surf.gii": "hemi-{hemi}_desc-hcp_inflated.surf.gii", + "{hemi}.very_inflated.32k_fs_LR.surf.gii": "hemi-{hemi}_desc-hcp_vinflated.surf.gii", + "{hemi}.pial.32k_fs_LR.surf.gii": "hemi-{hemi}_pial.surf.gii", + "{hemi}.white.32k_fs_LR.surf.gii": "hemi-{hemi}_smoothwm.surf.gii", + "{hemi}.thickness.32k_fs_LR.shape.gii": "hemi-{hemi}_thickness.shape.gii", + "{hemi}.corrThickness.32k_fs_LR.shape.gii": ( + "hemi-{hemi}_desc-corrected_thickness.shape.gii" + ), + "{hemi}.curvature.32k_fs_LR.shape.gii": "hemi-{hemi}_curv.shape.gii", + "{hemi}.sulc.32k_fs_LR.shape.gii": "hemi-{hemi}_sulc.shape.gii", + "{hemi}.MyelinMap.32k_fs_LR.func.gii": "hemi-{hemi}_myelinw.func.gii", + "{hemi}.SmoothedMyelinMap.32k_fs_LR.func.gii": ( + "hemi-{hemi}_desc-smoothed_myelinw.func.gii" + ), + } + + fsaverage_dir_orig = os.path.join(anat_dir_orig, "fsaverage_LR32k") + copy_dictionary = {} + for in_str, out_str in SURFACE_DICT.items(): + for hemi in ["L", "R"]: + hemi_in_str = in_str.format(hemi=hemi) + hemi_out_str = out_str.format(hemi=hemi) + surf_orig = os.path.join(fsaverage_dir_orig, f"{sub_id}.{hemi_in_str}") + surf_fmriprep = os.path.join( + anat_dir_fmriprep, + f"{subses_ents}_space-fsLR_den-32k_{hemi_out_str}", + ) + if os.path.isfile(surf_orig): + copy_dictionary[surf_orig] = [surf_fmriprep] + else: + LOGGER.warning(f"File DNE: {surf_orig}") + + return copy_dictionary + + +def collect_confounds( + task_dir_orig, + func_dir_fmriprep, + base_task_ents, + work_dir, + bold_file, + brainmask_file, + csf_mask_file, + wm_mask_file, +): + """Create confound regressors.""" + import pandas as pd + + mvreg_file = os.path.join(task_dir_orig, "Movement_Regressors.txt") + rmsd_file = os.path.join(task_dir_orig, "Movement_AbsoluteRMS.txt") + + mvreg = pd.read_csv(mvreg_file, header=None, delimiter=r"\s+") + + # Only use the first six columns + mvreg = mvreg.iloc[:, 0:6] + mvreg.columns = ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"] + + # convert rotations from degrees to radians + rot_columns = [c for c in mvreg.columns if c.startswith("rot")] + for col in rot_columns: + mvreg[col] = mvreg[col] * np.pi / 180 + + # get derivatives of motion columns + columns = mvreg.columns.tolist() + for col in columns: + mvreg[f"{col}_derivative1"] = mvreg[col].diff() + + # get powers + columns = mvreg.columns.tolist() + for col in columns: + mvreg[f"{col}_power2"] = mvreg[col] ** 2 + + # Use dummy column for framewise displacement, which will be recalculated by XCP-D. + mvreg["framewise_displacement"] = 0 + + # use masks: brain, csf, and wm mask to extract timeseries + mean_gs = extract_mean_signal( + mask=brainmask_file, + nifti=bold_file, + work_dir=work_dir, + ) + mean_csf = extract_mean_signal( + mask=csf_mask_file, + nifti=bold_file, + work_dir=work_dir, + ) + mean_wm = extract_mean_signal( + mask=wm_mask_file, + nifti=bold_file, + work_dir=work_dir, + ) + rsmd = np.loadtxt(rmsd_file) + + brainreg = pd.DataFrame( + {"global_signal": mean_gs, "white_matter": mean_wm, "csf": mean_csf, "rmsd": rsmd} + ) + + # get derivatives and powers + brainreg["global_signal_derivative1"] = brainreg["global_signal"].diff() + brainreg["white_matter_derivative1"] = brainreg["white_matter"].diff() + brainreg["csf_derivative1"] = brainreg["csf"].diff() + + brainreg["global_signal_derivative1_power2"] = brainreg["global_signal_derivative1"] ** 2 + brainreg["global_signal_power2"] = brainreg["global_signal"] ** 2 + + brainreg["white_matter_derivative1_power2"] = brainreg["white_matter_derivative1"] ** 2 + brainreg["white_matter_power2"] = brainreg["white_matter"] ** 2 + + brainreg["csf_derivative1_power2"] = brainreg["csf_derivative1"] ** 2 + brainreg["csf_power2"] = brainreg["csf"] ** 2 + + # Merge the two DataFrames + confounds_df = pd.concat([mvreg, brainreg], axis=1) + + # write out the confounds + regressors_tsv_fmriprep = os.path.join( + func_dir_fmriprep, + f"{base_task_ents}_desc-confounds_timeseries.tsv", + ) + confounds_df.to_csv(regressors_tsv_fmriprep, sep="\t", index=False) + + # NOTE: Is this JSON any good? + regressors_json_fmriprep = os.path.join( + func_dir_fmriprep, + f"{base_task_ents}_desc-confounds_timeseries.json", + ) + confounds_df.to_json(regressors_json_fmriprep) + + def extract_mean_signal(mask, nifti, work_dir): """Extract mean signal within mask from NIFTI.""" assert os.path.isfile(mask), f"File DNE: {mask}" @@ -40,17 +198,16 @@ def write_json(data, outfile): def plot_bbreg(fixed_image, moving_image, contour, out_file="report.svg"): """Plot bbref_fig_fmriprep results.""" import numpy as np - from nilearn.image import load_img, resample_img, threshold_img from niworkflows.viz.utils import compose_view, cuts_from_bbox, plot_registration - fixed_image_nii = load_img(fixed_image) - moving_image_nii = load_img(moving_image) - moving_image_nii = resample_img( + fixed_image_nii = image.load_img(fixed_image) + moving_image_nii = image.load_img(moving_image) + moving_image_nii = image.resample_img( moving_image_nii, target_affine=np.eye(3), interpolation="nearest" ) - contour_nii = load_img(contour) if contour is not None else None + contour_nii = image.load_img(contour) if contour is not None else None - mask_nii = threshold_img(fixed_image_nii, 1e-3) + mask_nii = image.threshold_img(fixed_image_nii, 1e-3) n_cuts = 7 if contour_nii: @@ -80,3 +237,18 @@ def plot_bbreg(fixed_image, moving_image, contour, out_file="report.svg"): out_file=out_file, ) return out_file + + +def copy_files_in_dict(copy_dictionary): + """Copy files in dictionary.""" + for file_orig, files_fmriprep in copy_dictionary.items(): + if not isinstance(files_fmriprep, list): + raise ValueError( + f"Entry for {file_orig} should be a list, but is a {type(files_fmriprep)}" + ) + + if len(files_fmriprep) > 1: + LOGGER.warning(f"File used for more than one output: {file_orig}") + + for file_fmriprep in files_fmriprep: + copy_file(file_orig, file_fmriprep) From 2f31b5d94b0508b6b3f93776a7d829e04f4cd67f Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 20 Jun 2023 12:15:46 -0400 Subject: [PATCH 04/28] Rename the parcellated ALFF/ReHo outputs (#902) --- docs/outputs.rst | 8 +- xcp_d/data/xcp_d_bids_config.json | 4 +- .../data/test_ds001419_cifti_outputs.txt | 52 ++++++------- .../test_ds001419_cifti_t2wonly_outputs.txt | 52 ++++++------- .../data/test_ds001419_nifti_outputs.txt | 78 +++++++++---------- ...st_fmriprep_without_freesurfer_outputs.txt | 52 ++++++------- xcp_d/tests/data/test_nibabies_outputs.txt | 52 ++++++------- xcp_d/workflows/outputs.py | 6 +- 8 files changed, 151 insertions(+), 153 deletions(-) diff --git a/docs/outputs.rst b/docs/outputs.rst index ce66b50df..a9ca6e52f 100644 --- a/docs/outputs.rst +++ b/docs/outputs.rst @@ -230,15 +230,15 @@ data. _space-