diff --git a/narps_open/pipelines/team_T54A.py b/narps_open/pipelines/team_T54A.py index a680f042..9cedd408 100644 --- a/narps_open/pipelines/team_T54A.py +++ b/narps_open/pipelines/team_T54A.py @@ -6,7 +6,7 @@ from os.path import join from itertools import product -from nipype import Workflow, Node +from nipype import Workflow, Node, MapNode from nipype.interfaces.utility import IdentityInterface, Function from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( @@ -17,6 +17,8 @@ from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation +from narps_open.data.participants import get_group +from narps_open.core.common import remove_file, list_intersection, elements_in_string, clean_list # Setup FSL FSLCommand.set_default_output_type('NIFTI_GZ') @@ -105,7 +107,7 @@ def get_parameters_file(filepath, subject_id, run_id, working_dir): Create a tsv file with only desired parameters per subject per run. Parameters : - - filepath : path to subject parameters file (i.e. one per run) + - filepath : path to the subject parameters file (i.e. one per run) - subject_id : subject for whom the 1st level analysis is made - run_id: run for which the 1st level analysis is made - working_dir: str, name of the directory for intermediate results @@ -132,7 +134,7 @@ def get_parameters_file(filepath, subject_id, run_id, working_dir): retained_parameters = DataFrame(transpose(temp_list)) parameters_file = join(working_dir, 'parameters_file', - f'parameters_file_sub-{subject_id}_run{run_id}.tsv') + f'parameters_file_sub-{subject_id}_run-{run_id}.tsv') makedirs(join(working_dir, 'parameters_file'), exist_ok = True) @@ -142,30 +144,6 @@ def get_parameters_file(filepath, subject_id, run_id, working_dir): return parameters_file - def remove_smoothed_files(_, subject_id, run_id, working_dir): - """ - This method is used in a Function node to fully remove - the files generated by the smoothing node, once they aren't needed anymore. - - Parameters: - - _: Node input only used for triggering the Node - - subject_id: str, id of the subject from which to remove the file - - run_id: str, id of the run from which to remove the file - - working_dir: str, path to the working dir - """ - from shutil import rmtree - from os.path import join - - try: - rmtree(join( - working_dir, 'l1_analysis', - f'_run_id_{run_id}_subject_id_{subject_id}', 'smooth') - ) - except OSError as error: - print(error) - else: - print('The directory is deleted successfully') - def get_run_level_analysis(self): """ Create the run level analysis workflow. @@ -248,10 +226,9 @@ def get_run_level_analysis(self): # Function node remove_smoothed_files - remove output of the smooth node remove_smoothed_files = Node(Function( - function = self.remove_smoothed_files, - input_names = ['_', 'subject_id', 'run_id', 'working_dir']), - name = 'remove_smoothed_files') - remove_smoothed_files.inputs.working_dir = self.directories.working_dir + function = remove_file, + input_names = ['_', 'file_name']), + name = 'remove_smoothed_files', iterfield = 'file_name') # Create l1 analysis workflow and connect its nodes run_level_analysis = Workflow( @@ -280,16 +257,14 @@ def get_run_level_analysis(self): (model_generation, model_estimate, [ ('con_file', 'tcon_file'), ('design_file', 'design_file')]), - (information_source, remove_smoothed_files, [ - ('subject_id', 'subject_id'), - ('run_id', 'run_id')]), + (smoothing_func, remove_smoothed_files, [('out_file', 'file_name')]), (model_estimate, remove_smoothed_files, [('results_dir', '_')]), (model_estimate, data_sink, [('results_dir', 'run_level_analysis.@results')]), (model_generation, data_sink, [ ('design_file', 'run_level_analysis.@design_file'), ('design_image', 'run_level_analysis.@design_img')]), (skull_stripping_func, data_sink, [('mask_file', 'run_level_analysis.@skullstriped')]) - ]) + ]) return run_level_analysis @@ -452,65 +427,6 @@ def get_subject_level_outputs(self): return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] - def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_file: str): - """ - Return the file list containing only the files belonging to subject in the wanted group. - - Parameters : - - copes: original file list selected by select_files node - - varcopes: original file list selected by select_files node - - subject_list: list of subject IDs that are analyzed - - participants_file: file containing participants characteristics - - Returns : - - copes_equal_indifference : a subset of copes corresponding to subjects - in the equalIndifference group - - copes_equal_range : a subset of copes corresponding to subjects - in the equalRange group - - varcopes_equal_indifference : a subset of varcopes corresponding to subjects - in the equalIndifference group - - varcopes_equal_range : a subset of varcopes corresponding to subjects - in the equalRange group - - equal_indifference_ids : a list of subject ids in the equalIndifference group - - equal_range_ids : a list of subject ids in the equalRange group - """ - - subject_list_sub_ids = [] # ids as written in the participants file - equal_range_ids = [] # ids as 3-digit string - equal_indifference_ids = [] # ids as 3-digit string - equal_range_sub_ids = [] # ids as written in the participants file - equal_indifference_sub_ids = [] # ids as written in the participants file - - # Reading file containing participants IDs and groups - with open(participants_file, 'rt') as file: - next(file) # skip the header - - for line in file: - info = line.strip().split() - subject_id = info[0][-3:] - subject_group = info[1] - - # Check if the participant ID was selected and sort depending on group - if subject_id in subject_list: - subject_list_sub_ids.append(info[0]) - if subject_group == 'equalIndifference': - equal_indifference_ids.append(subject_id) - equal_indifference_sub_ids.append(info[0]) - elif subject_group == 'equalRange': - equal_range_ids.append(subject_id) - equal_range_sub_ids.append(info[0]) - - - # Return sorted selected copes and varcopes by group, and corresponding ids - return \ - [c for c in copes if any(i in c for i in equal_indifference_sub_ids)],\ - [c for c in copes if any(i in c for i in equal_range_sub_ids)],\ - [c for c in copes if any(i in c for i in subject_list_sub_ids)],\ - [v for v in varcopes if any(i in v for i in equal_indifference_sub_ids)],\ - [v for v in varcopes if any(i in v for i in equal_range_sub_ids)],\ - [v for v in varcopes if any(i in v for i in subject_list_sub_ids)],\ - equal_indifference_ids, equal_range_ids - def get_one_sample_t_test_regressors(subject_list: list) -> dict: """ Create dictionary of regressors for one sample t-test group analysis. @@ -592,14 +508,14 @@ def get_group_level_analysis_sub_workflow(self, method): # SelectFiles Node - select necessary files templates = { 'cope' : join(self.directories.output_dir, - 'l2_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}', + 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}', 'cope1.nii.gz'), 'varcope' : join(self.directories.output_dir, - 'l2_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}', + 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}', 'varcope1.nii.gz'), 'participants' : join(self.directories.dataset_dir, 'participants.tsv'), 'mask': join(self.directories.output_dir, - 'l1_analysis', '_run_id_*_subject_id_{subject_id}', + 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'sub-{subject_id}_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_preproc_brain_mask.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') @@ -617,50 +533,6 @@ def get_group_level_analysis_sub_workflow(self, method): merge_varcopes = Node(Merge(), name = 'merge_varcopes') merge_varcopes.inputs.dimension = 't' - # Function Node get_one_sample_t_test_regressors - # Get regressors in the equalRange and equalIndifference method case - regressors_one_sample = Node( - Function( - function = self.get_one_sample_t_test_regressors, - input_names = ['subject_list'], - output_names = ['regressors'] - ), - name = 'regressors_one_sample', - ) - - # Function Node get_two_sample_t_test_regressors - # Get regressors in the groupComp method case - regressors_two_sample = Node( - Function( - function = self.get_two_sample_t_test_regressors, - input_names = [ - 'equal_range_ids', - 'equal_indifference_ids', - 'subject_list', - ], - output_names = ['regressors', 'groups'] - ), - name = 'regressors_two_sample', - ) - regressors_two_sample.inputs.subject_list = self.subject_list - - # Function Node get_subgroups_contrasts - Get the contrast files for each subgroup - get_contrasts = Node(Function( - function = self.get_subgroups_contrasts, - input_names = ['copes', 'varcopes', 'subject_list', 'participants_file'], - output_names = [ - 'copes_equal_indifference', - 'copes_equal_range', - 'copes_global', - 'varcopes_equal_indifference', - 'varcopes_equal_range', - 'varcopes_global', - 'equal_indifference_id', - 'equal_range_id' - ] - ), - name = 'get_contrasts') - # MultipleRegressDesign Node - Specify model specify_model = Node(MultipleRegressDesign(), name = 'specify_model') @@ -685,74 +557,157 @@ def get_group_level_analysis_sub_workflow(self, method): name = f'group_level_analysis_{method}_nsub_{nb_subjects}') group_level_analysis.connect([ (information_source, select_files, [('contrast_id', 'contrast_id')]), - (information_source, get_contrasts, [('subject_list', 'subject_list')]), - (select_files, get_contrasts, [ - ('cope', 'copes'), - ('varcope', 'varcopes'), - ('participants', 'participants_file')]), (select_files, estimate_model, [('mask', 'mask_file')]), - (select_files, randomise, [('mask', 'mask')]) + (select_files, randomise, [('mask', 'mask')]), + (merge_copes, estimate_model, [('merged_file', 'cope_file')]), + (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), + (specify_model, estimate_model, [ + ('design_mat', 'design_file'), + ('design_con', 't_con_file'), + ('design_grp', 'cov_split_file') + ]), + (merge_copes, randomise, [('merged_file', 'in_file')]), + (specify_model, randomise, [ + ('design_mat', 'design_mat'), + ('design_con', 'tcon') + ]), + (randomise, data_sink, [ + ('t_corrected_p_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tcorpfile'), + ('tstat_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstat') + ]), + (estimate_model, data_sink, [ + ('zstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats'), + ('tstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats') + ]) ]) if method in ('equalIndifference', 'equalRange'): + + # Setup a one sample t-test specify_model.inputs.contrasts = [ ['group_mean', 'T', ['group_mean'], [1]], ['group_mean_neg', 'T', ['group_mean'], [-1]] ] - group_level_analysis.connect([ - (regressors_one_sample, specify_model, [('regressors', 'regressors')]) - ]) + # Function Node get_group_subjects - Get subjects in the group and in the subject_list + get_group_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_group_subjects' + ) + get_group_subjects.inputs.list_1 = get_group(method) + get_group_subjects.inputs.list_2 = self.subject_list + + # Function Node elements_in_string + # Get contrast of parameter estimates (cope) for these subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_copes = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_copes', iterfield = 'input_str' + ) - if method == 'equalIndifference': - group_level_analysis.connect([ - (get_contrasts, merge_copes, [('copes_equal_indifference', 'in_files')]), - (get_contrasts, merge_varcopes,[('varcopes_equal_indifference', 'in_files')]), - (get_contrasts, regressors_one_sample, [ - ('equal_indifference_id', 'subject_list') - ]) - ]) + # Function Node elements_in_string + # Get variance of the estimated copes (varcope) for these subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_varcopes = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_varcopes', iterfield = 'input_str' + ) - elif method == 'equalRange': - group_level_analysis.connect([ - (get_contrasts, merge_copes, [('copes_equal_range', 'in_files')]), - (get_contrasts, merge_varcopes, [('varcopes_equal_range', 'in_files')]), - (get_contrasts, regressors_one_sample, [('equal_range_id', 'equal_range_id')]) - ]) + # Function Node get_one_sample_t_test_regressors + # Get regressors in the equalRange and equalIndifference method case + regressors_one_sample = Node( + Function( + function = self.get_one_sample_t_test_regressors, + input_names = ['subject_list'], + output_names = ['regressors'] + ), + name = 'regressors_one_sample', + ) + + # Add missing connections + group_level_analysis.connect([ + (select_files, get_copes, [('cope', 'input_str')]), + (select_files, get_varcopes, [('varcope', 'input_str')]), + (get_group_subjects, get_copes, [('out_list', 'elements')]), + (get_group_subjects, get_varcopes, [('out_list', 'elements')]), + (get_copes, merge_copes, [(('out_list', clean_list), 'in_files')]), + (get_varcopes, merge_varcopes,[(('out_list', clean_list), 'in_files')]), + (get_group_subjects, regressors_one_sample, [('out_list', 'subject_list')]), + (regressors_one_sample, specify_model, [('regressors', 'regressors')]) + ]) elif method == 'groupComp': + + # Setup a two sample t-test specify_model.inputs.contrasts = [ ['equalRange_sup', 'T', ['equalRange', 'equalIndifference'], [1, -1]] ] + + # Function Node get_equal_range_subjects + # Get subjects in the equalRange group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_range_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Function Node get_equal_indifference_subjects + # Get subjects in the equalIndifference group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Function Node get_two_sample_t_test_regressors + # Get regressors in the groupComp method case + regressors_two_sample = Node( + Function( + function = self.get_two_sample_t_test_regressors, + input_names = [ + 'equal_range_ids', + 'equal_indifference_ids', + 'subject_list', + ], + output_names = ['regressors', 'groups'] + ), + name = 'regressors_two_sample', + ) + regressors_two_sample.inputs.subject_list = self.subject_list + + # Add missing connections group_level_analysis.connect([ - (get_contrasts, merge_copes, [('copes_global', 'in_files')]), - (get_contrasts, merge_varcopes,[('varcopes_global', 'in_files')]), - (get_contrasts, regressors_two_sample, [ - ('equal_range_id', 'equal_range_id'), - ('equal_indifference_id', 'equal_indifference_id')]), + (select_files, merge_copes, [('cope', 'in_files')]), + (select_files, merge_varcopes,[('varcope', 'in_files')]), + (get_equal_range_subjects, regressors_two_sample, [ + ('out_list', 'equal_range_id') + ]), + (get_equal_indifference_subjects, regressors_two_sample, [ + ('out_list', 'equal_indifference_id') + ]), (regressors_two_sample, specify_model, [ ('regressors', 'regressors'), ('groups', 'groups')]) - ]) - - group_level_analysis.connect([ - (merge_copes, estimate_model, [('merged_file', 'cope_file')]), - (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), - (specify_model, estimate_model, [ - ('design_mat', 'design_file'), - ('design_con', 't_con_file'), - ('design_grp', 'cov_split_file')]), - (merge_copes, randomise, [('merged_file', 'in_file')]), - (specify_model, randomise, [ - ('design_mat', 'design_mat'), - ('design_con', 'tcon')]), - (randomise, data_sink, [ - ('t_corrected_p_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tcorpfile'), - ('tstat_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstat')]), - (estimate_model, data_sink, [ - ('zstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats'), - ('tstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats')]), - ]) + ]) return group_level_analysis @@ -761,7 +716,7 @@ def get_group_level_outputs(self): # Handle equalRange and equalIndifference parameters = { - 'contrast_id': ['ploss', 'pgain'], + 'contrast_id': self.contrast_list, 'method': ['equalRange', 'equalIndifference'], 'file': [ 'randomise_tfce_corrp_tstat1.nii.gz', @@ -797,7 +752,7 @@ def get_group_level_outputs(self): return_list += [join( self.directories.output_dir, f'group_level_analysis_groupComp_nsub_{len(self.subject_list)}', - '_contrast_id_ploss', f'{file}') for file in files] + '_contrast_id_2', f'{file}') for file in files] return return_list @@ -807,43 +762,40 @@ def get_hypotheses_outputs(self): nb_sub = len(self.subject_list) files = [ join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_pgain', 'zstat1.nii.gz'), + '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_pgain', 'zstat1.nii.gz'), + '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_pgain', 'zstat1.nii.gz'), + '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_pgain', 'zstat1.nii.gz'), + '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat2.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat2.nii.gz'), + '_contrast_id_2', 'zstat2.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat2.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat2.nii.gz'), + '_contrast_id_2', 'zstat2.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat1.nii.gz'), + '_contrast_id_2', 'zstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat1.nii.gz'), + '_contrast_id_2', 'zstat1.nii.gz'), join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat1.nii.gz') + '_contrast_id_2', 'zstat1.nii.gz') ] return [join(self.directories.output_dir, f) for f in files] - -##### TODO : what is this ? -#system('export PATH=$PATH:/local/egermani/ICA-AROMA') diff --git a/tests/pipelines/__init__.py b/tests/pipelines/__init__.py deleted file mode 100644 index 9ede3dbc..00000000 --- a/tests/pipelines/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -""" -Configuration for testing of the narps_open.pipelines modules. -""" - -from pytest import helpers - -@helpers.register -def mock_event_data(mocker): - """ Mocks the return of the open function with the contents of a fake event file """ - - fake_event_data = 'onset duration\tgain\tloss\tRT\tparticipant_response\n' - fake_event_data += '4.071\t4\t14\t6\t2.388\tweakly_accept\n' - fake_event_data += '11.834\t4\t34\t14\t2.289\tstrongly_accept\n' - fake_event_data += '19.535\t4\t38\t19\t0\tNoResp\n' - fake_event_data += '27.535\t4\t10\t15\t2.08\tstrongly_reject\n' - fake_event_data += '36.435\t4\t16\t17\t2.288\tweakly_reject\n' - - mocker.patch('builtins.open', mocker.mock_open(read_data = fake_event_data)) - -@helpers.register -def mock_participants_data(mocker): - """ Mocks the return of the open function with the contents of a fake participants file """ - - fake_participants_data = 'participant_id\tgroup\tgender\tage\n' - fake_participants_data += 'sub-001\tequalIndifference\tM\t24\n' - fake_participants_data += 'sub-002\tequalRange\tM\t25\n' - fake_participants_data += 'sub-003\tequalIndifference\tF\t27\n' - fake_participants_data += 'sub-004\tequalRange\tM\t25\n' - - mocker.patch('builtins.open', mocker.mock_open(read_data = fake_participants_data)) diff --git a/tests/pipelines/test_team_T54A.py b/tests/pipelines/test_team_T54A.py index e8b1f3a3..b585d9b0 100644 --- a/tests/pipelines/test_team_T54A.py +++ b/tests/pipelines/test_team_T54A.py @@ -10,13 +10,28 @@ pytest -q test_team_T54A.py pytest -q test_team_T54A.py -k """ +from os import mkdir +from os.path import exists, join +from shutil import rmtree -from pytest import helpers, mark +from pytest import helpers, mark, fixture from numpy import isclose from nipype import Workflow from nipype.interfaces.base import Bunch from narps_open.pipelines.team_T54A import PipelineTeamT54A +from narps_open.utils.configuration import Configuration + +TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_T54A') + +@fixture +def remove_test_dir(): + """ A fixture to remove temporary directory created by tests """ + + rmtree(TEMPORARY_DIR, ignore_errors = True) + mkdir(TEMPORARY_DIR) + yield # test runs here + rmtree(TEMPORARY_DIR, ignore_errors = True) class TestPipelinesTeamT54A: """ A class that contains all the unit tests for the PipelineTeamT54A class.""" @@ -70,12 +85,13 @@ def test_outputs(): @staticmethod @mark.unit_test - def test_subject_information(mocker): + def test_subject_information(): """ Test the get_subject_information method """ - helpers.mock_event_data(mocker) + event_file_path = join( + Configuration()['directories']['test_data'], 'pipelines', 'events.tsv') - information = PipelineTeamT54A.get_subject_information('fake_event_file_path')[0] + information = PipelineTeamT54A.get_subject_information(event_file_path)[0] assert isinstance(information, Bunch) assert information.conditions == [ @@ -122,31 +138,25 @@ def test_subject_information(mocker): @staticmethod @mark.unit_test - def test_parameters_file(mocker): + def test_parameters_file(remove_test_dir): """ Test the get_parameters_file method """ - @staticmethod - @mark.unit_test - def test_subgroups_contrasts(mocker): - """ Test the get_subgroups_contrasts method """ - - helpers.mock_participants_data(mocker) + confounds_file_path = join( + Configuration()['directories']['test_data'], 'pipelines', 'confounds.tsv') - cei, cer, cg, vei, ver, vg, eii, eri = PipelineTeamT54A.get_subgroups_contrasts( - ['sub-001/_contrast_id_1/cope1.nii.gz', 'sub-001/_contrast_id_2/cope1.nii.gz', 'sub-002/_contrast_id_1/cope1.nii.gz', 'sub-002/_contrast_id_2/cope1.nii.gz', 'sub-003/_contrast_id_1/cope1.nii.gz', 'sub-003/_contrast_id_2/cope1.nii.gz', 'sub-004/_contrast_id_1/cope1.nii.gz', 'sub-004/_contrast_id_2/cope1.nii.gz'], # copes - ['sub-001/_contrast_id_1/varcope1.nii.gz', 'sub-001/_contrast_id_2/varcope1.nii.gz', 'sub-002/_contrast_id_1/varcope1.nii.gz', 'sub-002/_contrast_id_2/varcope1.nii.gz', 'sub-003/_contrast_id_1/varcope1.nii.gz', 'sub-003/_contrast_id_2/varcope1.nii.gz', 'sub-004/_contrast_id_1/varcope1.nii.gz', 'sub-004/_contrast_id_2/varcope1.nii.gz'], # varcopes - ['001', '002', '003', '004'], # subject_list - ['fake_participants_file_path'] # participants file + PipelineTeamT54A.get_parameters_file( + confounds_file_path, + 'fake_subject_id', + 'fake_run_id', + TEMPORARY_DIR ) - assert cei == ['sub-001/_contrast_id_1/cope1.nii.gz', 'sub-001/_contrast_id_2/cope1.nii.gz', 'sub-003/_contrast_id_1/cope1.nii.gz', 'sub-003/_contrast_id_2/cope1.nii.gz'] - assert cer == ['sub-002/_contrast_id_1/cope1.nii.gz', 'sub-002/_contrast_id_2/cope1.nii.gz', 'sub-004/_contrast_id_1/cope1.nii.gz', 'sub-004/_contrast_id_2/cope1.nii.gz'] - assert cg == ['sub-001/_contrast_id_1/cope1.nii.gz', 'sub-001/_contrast_id_2/cope1.nii.gz', 'sub-002/_contrast_id_1/cope1.nii.gz', 'sub-002/_contrast_id_2/cope1.nii.gz', 'sub-003/_contrast_id_1/cope1.nii.gz', 'sub-003/_contrast_id_2/cope1.nii.gz', 'sub-004/_contrast_id_1/cope1.nii.gz', 'sub-004/_contrast_id_2/cope1.nii.gz'] - assert vei == ['sub-001/_contrast_id_1/varcope1.nii.gz', 'sub-001/_contrast_id_2/varcope1.nii.gz', 'sub-003/_contrast_id_1/varcope1.nii.gz', 'sub-003/_contrast_id_2/varcope1.nii.gz'] - assert ver == ['sub-002/_contrast_id_1/varcope1.nii.gz', 'sub-002/_contrast_id_2/varcope1.nii.gz', 'sub-004/_contrast_id_1/varcope1.nii.gz', 'sub-004/_contrast_id_2/varcope1.nii.gz'] - assert vg == ['sub-001/_contrast_id_1/varcope1.nii.gz', 'sub-001/_contrast_id_2/varcope1.nii.gz', 'sub-002/_contrast_id_1/varcope1.nii.gz', 'sub-002/_contrast_id_2/varcope1.nii.gz', 'sub-003/_contrast_id_1/varcope1.nii.gz', 'sub-003/_contrast_id_2/varcope1.nii.gz', 'sub-004/_contrast_id_1/varcope1.nii.gz', 'sub-004/_contrast_id_2/varcope1.nii.gz'] - assert eii == ['001', '003'] - assert eri == ['002', '004'] + # Check parameter file was created + assert exists(join( + TEMPORARY_DIR, + 'parameters_file', + 'parameters_file_sub-fake_subject_id_run-fake_run_id.tsv') + ) @staticmethod @mark.unit_test diff --git a/tests/test_data/pipelines/confounds.tsv b/tests/test_data/pipelines/confounds.tsv new file mode 100644 index 00000000..f49d4fea --- /dev/null +++ b/tests/test_data/pipelines/confounds.tsv @@ -0,0 +1,4 @@ +CSF WhiteMatter GlobalSignal stdDVARS non-stdDVARS vx-wisestdDVARS FramewiseDisplacement tCompCor00 tCompCor01 tCompCor02 tCompCor03 tCompCor04 tCompCor05 aCompCor00 aCompCor01 aCompCor02 aCompCor03 aCompCor04 aCompCor05 Cosine00 Cosine01 Cosine02 Cosine03 Cosine04 Cosine05 NonSteadyStateOutlier00 X Y Z RotX RotY RotZ +6551.281999999999 6476.4653 9874.576 n/a n/a n/a n/a 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 -0.0 0.0 +6484.7285 6473.4890000000005 9830.212 1.09046686 52.78273392 1.05943739 0.13527900930999998 0.0263099209 -0.0673065879 0.0934882554 -0.0079328884 0.0338007737 -0.011491083999999999 -0.042411347099999996 0.027736422900000002 0.0453303087 -0.07022609490000001 0.0963618709 -0.0200867957 0.0665186088 0.0665174038 0.0665153954 0.0665125838 0.0665089688 0.06650455059999999 0.0 -0.00996895 -0.0313444 -3.00931e-06 0.00132687 -0.000384193 -0.00016819 +6441.5337 6485.7256 9821.212 1.07520139 52.04382706 1.03821933 0.12437666391 -0.0404820317 0.034150583 0.13661184210000002 0.0745358691 -0.0054829985999999995 -0.0217322686 0.046214115199999996 0.005774624 -0.043909359800000006 -0.075619539 0.17546891539999998 -0.0345256763 0.0665153954 0.06650455059999999 0.06648647719999999 0.0664611772 0.0664286533 0.0663889091 0.0 -2.56954e-05 -0.00923735 0.0549667 0.000997278 -0.00019745 -0.000398988 diff --git a/tests/test_data/pipelines/events.tsv b/tests/test_data/pipelines/events.tsv new file mode 100644 index 00000000..4b8f04e6 --- /dev/null +++ b/tests/test_data/pipelines/events.tsv @@ -0,0 +1,6 @@ +onset duration gain loss RT participant_response +4.071 4 14 6 2.388 weakly_accept +11.834 4 34 14 2.289 strongly_accept +19.535 4 38 19 0 NoResp +27.535 4 10 15 2.08 strongly_reject +36.435 4 16 17 2.288 weakly_reject \ No newline at end of file diff --git a/tests/test_data/pipelines/participants.tsv b/tests/test_data/pipelines/participants.tsv new file mode 100644 index 00000000..312dbcde --- /dev/null +++ b/tests/test_data/pipelines/participants.tsv @@ -0,0 +1,5 @@ +participant_id group gender age +sub-001 equalIndifference M 24 +sub-002 equalRange M 25 +sub-003 equalIndifference F 27 +sub-004 equalRange M 25 \ No newline at end of file