From da6d866558c2f7415fc54db6a7366b3b4b6a1b62 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 20 Sep 2023 09:32:19 -0400 Subject: [PATCH] Fix resting-state plots in executive summary (#941) --- .../executive_summary.html.jinja | 17 ++--- .../task_static_plots.html.jinja | 14 ++-- xcp_d/interfaces/execsummary.py | 76 +++++++++++++------ 3 files changed, 70 insertions(+), 37 deletions(-) diff --git a/xcp_d/data/executive_summary_templates/executive_summary.html.jinja b/xcp_d/data/executive_summary_templates/executive_summary.html.jinja index 52d4dd0c4..4570bbdf7 100644 --- a/xcp_d/data/executive_summary_templates/executive_summary.html.jinja +++ b/xcp_d/data/executive_summary_templates/executive_summary.html.jinja @@ -138,16 +138,15 @@ #} {% include "anatomical_registration_plots.html.jinja" %} - {# Carpet/line plot for pre- and post-regression, concatenate across runs. #} - {% include "concatenated_task_static_plots.html.jinja" %} - {# - Task static plots. One section per run of each task. - 1. Task in T1 - 2. T1 in Task - 3. BOLD mean(?) image on the left - 4. BOLD reference image on the left - 3/4. Pre and post regression carpet/line plots on right. + "Functional Data" section, with BOLD figures. + 1. Concatenated resting-state carpet plots. + 2. One section per run of each task. + 1. Task in T1 + 2. T1 in Task + 3. BOLD mean(?) image on the left + 4. BOLD reference image on the left + 3/4. Pre and post regression carpet/line plots on right. #} {% include "task_static_plots.html.jinja" %} diff --git a/xcp_d/data/executive_summary_templates/task_static_plots.html.jinja b/xcp_d/data/executive_summary_templates/task_static_plots.html.jinja index df985f533..e0b9fa49b 100644 --- a/xcp_d/data/executive_summary_templates/task_static_plots.html.jinja +++ b/xcp_d/data/executive_summary_templates/task_static_plots.html.jinja @@ -2,8 +2,9 @@ Start the tasks section and put in the column headings for the task-specific data. Inputs: - - task_files[]{"task"} - - task_files[]{"run"} + - concatenated_rest_files{"preproc_carpet"} + - concatenated_rest_files{"postproc_carpet"} + - task_files[]{"key"} - task_files[]{"registration_files"} - task_files[]{"registration_titles"} - task_files[]{"bold"} @@ -22,11 +23,14 @@

Functional Data

+ + {# Carpet/line plot for pre- and post-regression, concatenate across runs. #} + {% include "concatenated_task_static_plots.html.jinja" %} +
{% for run_dict in task_files %} - {% set task = run_dict["task"] %} - {% set run = run_dict["run"] %} + {% set key = run_dict["key"] %} {% set registration_files = run_dict["registration_files"] %} {% set registration_titles = run_dict["registration_titles"] %} {% set bold = run_dict["bold"] %} @@ -38,7 +42,7 @@ Add the task name for the next few rows. #}
-
task-{{ task }} run-{{ run }}:
+
{{ key }}:
{# Full rows for registration files #} diff --git a/xcp_d/interfaces/execsummary.py b/xcp_d/interfaces/execsummary.py index 6f8bb3c42..2dccd93c6 100644 --- a/xcp_d/interfaces/execsummary.py +++ b/xcp_d/interfaces/execsummary.py @@ -4,6 +4,8 @@ import re from pathlib import Path +import numpy as np +import pandas as pd from bids.layout import BIDSLayout, Query from bs4 import BeautifulSoup from jinja2 import Environment, FileSystemLoader, Markup @@ -144,6 +146,24 @@ def collect_inputs(self): self.structural_files_ = structural_files + # Collect figures for concatenated resting-state data (if any) + concatenated_rest_files = {} + + query = { + "subject": self.subject_id, + "task": "rest", + "run": Query.NONE, + "desc": "preprocESQC", + "suffix": "bold", + "extension": ".svg", + } + concatenated_rest_files["preproc_carpet"] = self._get_bids_file(query) + + query["desc"] = "postprocESQC" + concatenated_rest_files["postproc_carpet"] = self._get_bids_file(query) + + self.concatenated_rest_files_ = concatenated_rest_files + # Determine the unique entity-sets for the task data. postproc_files = self.layout.get( subject=self.subject_id, @@ -166,34 +186,42 @@ def collect_inputs(self): task_entity_sets = [] for entity_set in unique_entity_sets: for entity in ORDERING: - entity_set[entity] = entity_set.get(entity, Query.NONE) + entity_set[entity] = entity_set.get(entity, np.nan) task_entity_sets.append(entity_set) - concatenated_rest_files = {} + # Now sort the entity sets by each entity + task_entity_sets = pd.DataFrame(task_entity_sets) + task_entity_sets = task_entity_sets.sort_values(by=task_entity_sets.columns.tolist()) + task_entity_sets = task_entity_sets.fillna(Query.NONE) - query = { - "subject": self.subject_id, - "task": "rest", - "run": Query.NONE, - "desc": "preprocESQC", - "suffix": "bold", - "extension": ".svg", - } - concatenated_rest_files["preproc_carpet"] = self._get_bids_file(query) - - query["desc"] = "postcarpetplot" - concatenated_rest_files["postproc_carpet"] = self._get_bids_file(query) + # Extract entities with variability + # This lets us name the sections based on multiple entities (not just task and run) + nunique = task_entity_sets.nunique() + nunique.loc["task"] = 2 # ensure we keep task + nunique.loc["run"] = 2 # ensure we keep run + cols_to_drop = nunique[nunique == 1].index + task_entity_namer = task_entity_sets.drop(cols_to_drop, axis=1) - self.concatenated_rest_files_ = concatenated_rest_files + # Convert back to dictionary + task_entity_sets = task_entity_sets.to_dict(orient="records") + task_entity_namer = task_entity_namer.to_dict(orient="records") task_files = [] - for task_entity_set in task_entity_sets: - task_file_figures = task_entity_set.copy() - task_file_figures[ - "key" - ] = f"task-{task_entity_set['task']}_run-{task_entity_set.get('run', 0)}" + for i_set, task_entity_set in enumerate(task_entity_sets): + task_file_figures = {} + + # Convert any floats in the name to ints + temp_dict = {} + for k, v in task_entity_namer[i_set].items(): + try: + temp_dict[k] = int(v) + except (ValueError, TypeError): + temp_dict[k] = v + + # String used for subsection headers + task_file_figures["key"] = " ".join([f"{k}-{v}" for k, v in temp_dict.items()]) query = { "subject": self.subject_id, @@ -222,10 +250,12 @@ def collect_inputs(self): task_file_figures["registration_files"].append(found_file) - task_files.append(task_file_figures) + # If there no mean BOLD figure, then the "run" was made by the concatenation workflow. + # Skip the concatenated resting-state scan, since it has its own section. + if query["task"] == "rest" and not task_file_figures["bold"]: + continue - # Sort the files by the desired key - task_files = sorted(task_files, key=lambda d: d["key"]) + task_files.append(task_file_figures) self.task_files_ = task_files