diff --git a/bin/5ttgen b/bin/5ttgen deleted file mode 100755 index ace4ff2df2..0000000000 --- a/bin/5ttgen +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Script that generates a five-tissue-type (5TT) segmented image: the format appropriate for ACT -# -# In this script, major stages of processing can be performed in one of two ways: -# - Using FSL tools: BET for brain extraction, FAST for tissue segmentation, FIRST for sub-cortical grey matter segmentation -# - Using segmentations from FreeSurfer -# Alternative algorithms for performing this conversion can be added by creating a new file in lib/mrtrix3/_5ttgen/ and -# defining the appropriate functions; 5ttgen will automatically make that algorithm available at the command-line - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm #pylint: disable=no-name-in-module, import-outside-toplevel - - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Generate a 5TT image suitable for ACT') - cmdline.add_citation('Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. NeuroImage, 2012, 62, 1924-1938') - cmdline.add_description('5ttgen acts as a \'master\' script for generating a five-tissue-type (5TT) segmented tissue image suitable for use in Anatomically-Constrained Tractography (ACT). A range of different algorithms are available for completing this task. When using this script, the name of the algorithm to be used must appear as the first argument on the command-line after \'5ttgen\'. The subsequent compulsory arguments and options available depend on the particular algorithm being invoked.') - cmdline.add_description('Each algorithm available also has its own help page, including necessary references; e.g. to see the help page of the \'fsl\' algorithm, type \'5ttgen fsl\'.') - - common_options = cmdline.add_argument_group('Options common to all 5ttgen algorithms') - common_options.add_argument('-nocrop', action='store_true', default=False, help='Do NOT crop the resulting 5TT image to reduce its size (keep the same dimensions as the input image)') - common_options.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Represent the amygdalae and hippocampi as sub-cortical grey matter in the 5TT image') - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app, run #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) - - alg.check_output_paths() - - app.make_scratch_dir() - alg.get_inputs() - app.goto_scratch_dir() - - alg.execute() - - stderr = run.command('5ttcheck result.mif').stderr - if '[WARNING]' in stderr: - app.warn('Generated image does not perfectly conform to 5TT format:') - for line in stderr.splitlines(): - app.warn(line) - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/blend b/bin/blend deleted file mode 100755 index 656c9c3d9a..0000000000 --- a/bin/blend +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python2 - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=consider-using-f-string - -import os -import sys - -if len(sys.argv) <= 1: - sys.stderr.write('A script to blend two sets of movie frames together with a desired overlap.\n') - sys.stderr.write('The input arguments are two folders containing the movie frames (eg. output from the MRview screenshot tool), and the desired number of overlapping frames.\n') - sys.stderr.write('eg: blend folder1 folder2 20 output_folder\n') - sys.exit(1) - -INPUT_FOLDER_1 = sys.argv[1] -INPUT_FOLDER_2 = sys.argv[2] -FILE_LIST_1 = sorted(os.listdir(INPUT_FOLDER_1)) -FILE_LIST_2 = sorted(os.listdir(INPUT_FOLDER_2)) -NUM_OVERLAP = int(sys.argv[3]) -OUTPUT_FOLDER = sys.argv[4] - -if not os.path.exists(OUTPUT_FOLDER): - os.mkdir(OUTPUT_FOLDER) - -NUM_OUTPUT_FRAMES = len(FILE_LIST_1) + len(FILE_LIST_2) - NUM_OVERLAP -for i in range(NUM_OUTPUT_FRAMES): - file_name = 'frame' + '%0*d' % (5, i) + '.png' - if i <= len(FILE_LIST_1) - NUM_OVERLAP: - os.system('cp -L ' + INPUT_FOLDER_1 + '/' + FILE_LIST_1[i] + ' ' + OUTPUT_FOLDER + '/' + file_name) - if len(FILE_LIST_1) - NUM_OVERLAP < i < len(FILE_LIST_1): - i2 = i - (len(FILE_LIST_1) - NUM_OVERLAP) - 1 - blend_amount = 100 * float(i2 + 1) / float(NUM_OVERLAP) - os.system('convert ' + INPUT_FOLDER_1 + '/' + FILE_LIST_1[i] + ' ' + INPUT_FOLDER_2 + '/' + FILE_LIST_2[i2] + ' -alpha on -compose blend -define compose:args=' + str(blend_amount) + ' -gravity South -composite ' + OUTPUT_FOLDER + '/' + file_name) - if i >= (len(FILE_LIST_1)): - i2 = i - (len(FILE_LIST_1) - NUM_OVERLAP) - 1 - os.system('cp -L ' + INPUT_FOLDER_2 + '/' + FILE_LIST_2[i2] + ' ' + OUTPUT_FOLDER + '/' + file_name) diff --git a/bin/convert_bruker b/bin/convert_bruker deleted file mode 100755 index 854f2e462c..0000000000 --- a/bin/convert_bruker +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=unspecified-encoding - -import sys, os.path - -if len (sys.argv) != 3: - sys.stderr.write("usage: convert_bruker 2dseq header.mih\n") - sys.exit (0) - - -#if os.path.basename (sys.argv[1]) != '2dseq': - #print ("expected '2dseq' file as first argument") - #sys.exit (1) - -if not sys.argv[2].endswith ('.mih'): - sys.stderr.write("expected .mih suffix as the second argument\n") - sys.exit (1) - - - -def main(): - - with open (os.path.join (os.path.dirname (sys.argv[1]), 'reco')) as file_reco: - lines = file_reco.read().split ('##$') - - with open (os.path.join (os.path.dirname (sys.argv[1]), '../../acqp')) as file_acqp: - lines += file_acqp.read().split ('##$') - - with open (os.path.join (os.path.dirname (sys.argv[1]), '../../method')) as file_method: - lines += file_method.read().split ('##$') - - - for line in lines: - line = line.lower() - if line.startswith ('reco_size='): - mat_size = line.splitlines()[1].split() - print ('mat_size', mat_size) - elif line.startswith ('nslices='): - nslices = line.split('=')[1].split()[0] - print ('nslices', nslices) - elif line.startswith ('acq_time_points='): - nacq = len (line.split('\n',1)[1].split()) - print ('nacq', nacq) - elif line.startswith ('reco_wordtype='): - wtype = line.split('=')[1].split()[0] - print ('wtype', wtype) - elif line.startswith ('reco_byte_order='): - byteorder = line.split('=')[1].split()[0] - print ('byteorder', byteorder) - elif line.startswith ('pvm_spatresol='): - res = line.splitlines()[1].split() - print ('res', res) - elif line.startswith ('pvm_spackarrslicedistance='): - slicethick = line.splitlines()[1].split()[0] - print ('slicethick', slicethick) - elif line.startswith ('pvm_dweffbval='): - bval = line.split('\n',1)[1].split() - print ('bval', bval) - elif line.startswith ('pvm_dwgradvec='): - bvec = line.split('\n',1)[1].split() - print ('bvec', bvec) - - - with open (sys.argv[2], 'w') as file_out: - file_out.write ('mrtrix image\ndim: ' + mat_size[0] + ',' + mat_size[1]) - if len(mat_size) > 2: - file_out.write (',' + str(mat_size[2])) - else: - try: - nslices #pylint: disable=pointless-statement - file_out.write (',' + str(nslices)) - except: - pass - - try: - nacq #pylint: disable=pointless-statement - file_out.write (',' + str(nacq)) - except: - pass - - file_out.write ('\nvox: ' + str(res[0]) + ',' + str(res[1])) - if len(res) > 2: - file_out.write (',' + str(res[2])) - else: - try: - slicethick #pylint: disable=pointless-statement - file_out.write (',' + str(slicethick)) - except: - pass - try: - nacq #pylint: disable=pointless-statement - file_out.write (',') - except: - pass - - file_out.write ('\ndatatype: ') - if wtype == '_16bit_sgn_int': - file_out.write ('int16') - elif wtype == '_32bit_sgn_int': - file_out.write ('int32') - - if byteorder=='littleendian': - file_out.write ('le') - else: - file_out.write ('be') - - file_out.write ('\nlayout: +0,+1') - try: - nslices #pylint: disable=pointless-statement - file_out.write (',+2') - except: - pass - try: - nacq #pylint: disable=pointless-statement - file_out.write (',+3') - except: - pass - - file_out.write ('\nfile: ' + sys.argv[1] + '\n') - - try: - assert len(bvec) == 3*len(bval) - bvec = [ bvec[n:n+3] for n in range(0,len(bvec),3) ] - for direction, value in zip(bvec, bval): - file_out.write ('dw_scheme: ' + direction[0] + ',' + direction[1] + ',' + str(-float(direction[2])) + ',' + value + '\n') - except: - pass - -main() diff --git a/bin/dwi2response b/bin/dwi2response deleted file mode 100755 index 8269dcd5ba..0000000000 --- a/bin/dwi2response +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Script for estimating response functions for spherical deconvolution -# A number of different approaches are available within this script for performing response function estimation. - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module, import-outside-toplevel - - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Thijs Dhollander (thijs.dhollander@gmail.com)') - cmdline.set_synopsis('Estimate response function(s) for spherical deconvolution') - cmdline.add_description('dwi2response offers different algorithms for performing various types of response function estimation. The name of the algorithm must appear as the first argument on the command-line after \'dwi2response\'. The subsequent arguments and options depend on the particular algorithm being invoked.') - cmdline.add_description('Each algorithm available has its own help page, including necessary references; e.g. to see the help page of the \'fa\' algorithm, type \'dwi2response fa\'.') - - # General options - common_options = cmdline.add_argument_group('General dwi2response options') - common_options.add_argument('-mask', help='Provide an initial mask for response voxel selection') - common_options.add_argument('-voxels', help='Output an image showing the final voxel selection(s)') - common_options.add_argument('-shells', help='The b-value(s) to use in response function estimation (comma-separated list in case of multiple b-values, b=0 must be included explicitly)') - common_options.add_argument('-lmax', help='The maximum harmonic degree(s) for response function estimation (comma-separated list in case of multiple b-values)') - app.add_dwgrad_import_options(cmdline) - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) - - # Check for prior existence of output files, and grab any input files, used by the particular algorithm - if app.ARGS.voxels: - app.check_output_path(app.ARGS.voxels) - alg.check_output_paths() - - # Sanitise some inputs, and get ready for data import - if app.ARGS.lmax: - try: - lmax = [ int(x) for x in app.ARGS.lmax.split(',') ] - if any(lmax_value%2 for lmax_value in lmax): - raise MRtrixError('Value of lmax must be even') - except: - raise MRtrixError('Parameter lmax must be a number') - if alg.needs_single_shell() and not len(lmax) == 1: - raise MRtrixError('Can only specify a single lmax value for single-shell algorithms') - shells_option = '' - if app.ARGS.shells: - try: - shells_values = [ int(round(float(x))) for x in app.ARGS.shells.split(',') ] - except: - raise MRtrixError('-shells option should provide a comma-separated list of b-values') - if alg.needs_single_shell() and not len(shells_values) == 1: - raise MRtrixError('Can only specify a single b-value shell for single-shell algorithms') - shells_option = ' -shells ' + app.ARGS.shells - singleshell_option = '' - if alg.needs_single_shell(): - singleshell_option = ' -singleshell -no_bzero' - - grad_import_option = app.read_dwgrad_import_options() - if not grad_import_option and 'dw_scheme' not in image.Header(path.from_user(app.ARGS.input, False)).keyval(): - raise MRtrixError('Script requires diffusion gradient table: either in image header, or using -grad / -fslgrad option') - - app.make_scratch_dir() - - # Get standard input data into the scratch directory - if alg.needs_single_shell() or shells_option: - app.console('Importing DWI data (' + path.from_user(app.ARGS.input) + ') and selecting b-values...') - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' - -strides 0,0,0,1' + grad_import_option + ' | dwiextract - ' + path.to_scratch('dwi.mif') + shells_option + singleshell_option, show=False) - else: # Don't discard b=0 in multi-shell algorithms - app.console('Importing DWI data (' + path.from_user(app.ARGS.input) + ')...') - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('dwi.mif') + ' -strides 0,0,0,1' + grad_import_option, show=False) - if app.ARGS.mask: - app.console('Importing mask (' + path.from_user(app.ARGS.mask) + ')...') - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit', show=False) - - alg.get_inputs() - - app.goto_scratch_dir() - - if alg.supports_mask(): - if app.ARGS.mask: - # Check that the brain mask is appropriate - mask_header = image.Header('mask.mif') - if mask_header.size()[:3] != image.Header('dwi.mif').size()[:3]: - raise MRtrixError('Dimensions of provided mask image do not match DWI') - if not (len(mask_header.size()) == 3 or (len(mask_header.size()) == 4 and mask_header.size()[3] == 1)): - raise MRtrixError('Provided mask image needs to be a 3D image') - else: - app.console('Computing brain mask (dwi2mask)...') - run.command('dwi2mask dwi.mif mask.mif', show=False) - - if not image.statistics('mask.mif', mask='mask.mif').count: - raise MRtrixError(('Provided' if app.ARGS.mask else 'Generated') + ' mask image does not contain any voxels') - - # From here, the script splits depending on what estimation algorithm is being used - alg.execute() - - - - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwibiascorrect b/bin/dwibiascorrect deleted file mode 100755 index 75946b7e39..0000000000 --- a/bin/dwibiascorrect +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Script that performs B1 field inhomogeneity correction for a DWI volume series -# Bias field is typically estimated using the mean b=0 image, and subsequently used to correct all volumes - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Perform B1 field inhomogeneity correction for a DWI volume series') - common_options = cmdline.add_argument_group('Options common to all dwibiascorrect algorithms') - common_options.add_argument('-mask', metavar='image', help='Manually provide a mask image for bias field estimation') - common_options.add_argument('-bias', metavar='image', help='Output the estimated bias field') - app.add_dwgrad_import_options(cmdline) - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) - - app.check_output_path(app.ARGS.output) - app.check_output_path(app.ARGS.bias) - alg.check_output_paths() - - app.make_scratch_dir() - - grad_import_option = app.read_dwgrad_import_options() - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif') + grad_import_option) - if app.ARGS.mask: - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') - - alg.get_inputs() - - app.goto_scratch_dir() - - # Make sure it's actually a DWI that's been passed - dwi_header = image.Header('in.mif') - if len(dwi_header.size()) != 4: - raise MRtrixError('Input image must be a 4D image') - if 'dw_scheme' not in dwi_header.keyval(): - raise MRtrixError('No valid DW gradient scheme provided or present in image header') - if len(dwi_header.keyval()['dw_scheme']) != dwi_header.size()[3]: - raise MRtrixError('DW gradient scheme contains different number of entries (' + str(len(dwi_header.keyval()['dw_scheme'])) + ' to number of volumes in DWIs (' + dwi_header.size()[3] + ')') - - # Generate a brain mask if required, or check the mask if provided by the user - if app.ARGS.mask: - if not image.match('in.mif', 'mask.mif', up_to_dim=3): - raise MRtrixError('Provided mask image does not match input DWI') - else: - run.command('dwi2mask in.mif mask.mif') - - # From here, the script splits depending on what estimation algorithm is being used - alg.execute() - - - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwicat b/bin/dwicat deleted file mode 100755 index 4f547f5779..0000000000 --- a/bin/dwicat +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=unspecified-encoding - - - -import json, shutil - - - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Lena Dorfschmidt (ld548@cam.ac.uk) and Jakub Vohryzek (jakub.vohryzek@queens.ox.ac.uk) and Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Concatenating multiple DWI series accounting for differential intensity scaling') - cmdline.add_description('This script concatenates two or more 4D DWI series, accounting for the ' - 'fact that there may be differences in intensity scaling between those series. ' - 'This intensity scaling is corrected by determining scaling factors that will ' - 'make the overall image intensities in the b=0 volumes of each series approximately ' - 'equivalent.') - cmdline.add_argument('inputs', nargs='+', help='Multiple input diffusion MRI series') - cmdline.add_argument('output', help='The output image series (all DWIs concatenated)') - cmdline.add_argument('-mask', metavar='image', help='Provide a binary mask within which image intensities will be matched') - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - num_inputs = len(app.ARGS.inputs) - if num_inputs < 2: - raise MRtrixError('Script requires at least two input image series') - - # check input data - def check_header(header): - if len(header.size()) > 4: - raise MRtrixError('Image "' + header.name() + '" contains more than 4 dimensions') - if not 'dw_scheme' in header.keyval(): - raise MRtrixError('Image "' + header.name() + '" does not contain a gradient table') - dw_scheme = header.keyval()['dw_scheme'] - try: - if isinstance(dw_scheme[0], list): - num_grad_lines = len(dw_scheme) - elif (isinstance(dw_scheme[0], ( int, float))) and len(dw_scheme) >= 4: - num_grad_lines = 1 - else: - raise MRtrixError - except (IndexError, MRtrixError): - raise MRtrixError('Image "' + header.name() + '" contains gradient table of unknown format') - if len(header.size()) == 4: - num_volumes = header.size()[3] - if num_grad_lines != num_volumes: - raise MRtrixError('Number of lines in gradient table for image "' + header.name() + '" (' + str(num_grad_lines) + ') does not match number of volumes (' + str(num_volumes) + ')') - elif not (num_grad_lines == 1 and len(dw_scheme) >= 4 and dw_scheme[3] <= float(CONFIG.get('BZeroThreshold', 10.0))): - raise MRtrixError('Image "' + header.name() + '" is 3D, and cannot be validated as a b=0 volume') - - first_header = image.Header(path.from_user(app.ARGS.inputs[0], False)) - check_header(first_header) - warn_protocol_mismatch = False - for filename in app.ARGS.inputs[1:]: - this_header = image.Header(path.from_user(filename, False)) - check_header(this_header) - if this_header.size()[0:3] != first_header.size()[0:3]: - raise MRtrixError('Spatial dimensions of image "' + filename + '" do not match those of first image "' + first_header.name() + '"') - for field_name in [ 'EchoTime', 'RepetitionTime', 'FlipAngle' ]: - first_value = first_header.keyval().get(field_name) - this_value = this_header.keyval().get(field_name) - if first_value and this_value and first_value != this_value: - warn_protocol_mismatch = True - if warn_protocol_mismatch: - app.warn('Mismatched protocol acquisition parameters detected between input images; ' + \ - 'the assumption of equivalent intensities between b=0 volumes of different inputs underlying operation of this script may not be valid') - if app.ARGS.mask: - mask_header = image.Header(path.from_user(app.ARGS.mask, False)) - if mask_header.size()[0:3] != first_header.size()[0:3]: - raise MRtrixError('Spatial dimensions of mask image "' + app.ARGS.mask + '" do not match those of first image "' + first_header.name() + '"') - - # check output path - app.check_output_path(path.from_user(app.ARGS.output, False)) - - # import data to scratch directory - app.make_scratch_dir() - for index, filename in enumerate(app.ARGS.inputs): - run.command('mrconvert ' + path.from_user(filename) + ' ' + path.to_scratch(str(index) + 'in.mif')) - if app.ARGS.mask: - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') - app.goto_scratch_dir() - - # extract b=0 volumes within each input series - for index in range(0, num_inputs): - infile = str(index) + 'in.mif' - outfile = str(index) + 'b0.mif' - if len(image.Header(infile).size()) > 3: - run.command('dwiextract ' + infile + ' ' + outfile + ' -bzero') - else: - run.function(shutil.copyfile, infile, outfile) - - mask_option = ' -mask_input mask.mif -mask_target mask.mif' if app.ARGS.mask else '' - - # for all but the first image series: - # - find multiplicative factor to match b=0 images to those of the first image - # - apply multiplicative factor to whole image series - # It would be better to not preferentially treat one of the inputs differently to any other: - # - compare all inputs to all other inputs - # - determine one single appropriate scaling factor for each image based on all results - # can't do a straight geometric average: e.g. if run for 2 images, each would map to - # the the input intensoty of the other image, and so the two corrected images would not match - # should be some mathematical theorem providing the optimal scaling factor for each image - # based on the resulting matrix of optimal scaling factors - filelist = [ '0in.mif' ] - for index in range(1, num_inputs): - stderr_text = run.command('mrhistmatch scale ' + str(index) + 'b0.mif 0b0.mif ' + str(index) + 'rescaledb0.mif' + mask_option).stderr - scaling_factor = None - for line in stderr_text.splitlines(): - if 'Estimated scale factor is' in line: - try: - scaling_factor = float(line.split()[-1]) - except ValueError: - raise MRtrixError('Unable to convert scaling factor from mrhistmatch output to floating-point number') - break - if scaling_factor is None: - raise MRtrixError('Unable to extract scaling factor from mrhistmatch output') - filename = str(index) + 'rescaled.mif' - run.command('mrcalc ' + str(index) + 'in.mif ' + str(scaling_factor) + ' -mult ' + filename) - filelist.append(filename) - - # concatenate all series together - run.command('mrcat ' + ' '.join(filelist) + ' - -axis 3 | ' + \ - 'mrconvert - result.mif -json_export result_init.json -strides 0,0,0,1') - - # remove current contents of command_history, since there's no sensible - # way to choose from which input image the contents should be taken; - # we do however want to keep other contents of keyval (e.g. gradient table) - with open('result_init.json', 'r') as input_json_file: - keyval = json.load(input_json_file) - keyval.pop('command_history', None) - with open('result_final.json', 'w') as output_json_file: - json.dump(keyval, output_json_file) - - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval='result_final.json', force=app.FORCE_OVERWRITE) - - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwifslpreproc b/bin/dwifslpreproc deleted file mode 100755 index 30624cb188..0000000000 --- a/bin/dwifslpreproc +++ /dev/null @@ -1,1328 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Script for performing DWI pre-processing using FSL 5.0 (onwards) tools eddy / topup / applytopup - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=unspecified-encoding - - - -import itertools, json, math, os, shutil, sys -from distutils.spawn import find_executable - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Perform diffusion image pre-processing using FSL\'s eddy tool; including inhomogeneity distortion correction using FSL\'s topup tool if possible') - cmdline.add_description('This script is intended to provide convenience of use of the FSL software tools topup and eddy for performing DWI pre-processing, by encapsulating some of the surrounding image data and metadata processing steps. It is intended to simply these processing steps for most commonly-used DWI acquisition strategies, whilst also providing support for some more exotic acquisitions. The "example usage" section demonstrates the ways in which the script can be used based on the (compulsory) -rpe_* command-line options.') - cmdline.add_description('The "-topup_options" and "-eddy_options" command-line options allow the user to pass desired command-line options directly to the FSL commands topup and eddy. The available options for those commands may vary between versions of FSL; users can interrogate such by querying the help pages of the installed software, and/or the FSL online documentation: (topup) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/TopupUsersGuide ; (eddy) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/UsersGuide') - cmdline.add_description('The script will attempt to run the CUDA version of eddy; if this does not succeed for any reason, or is not present on the system, the CPU version will be attempted instead. By default, the CUDA eddy binary found that indicates compilation against the most recent version of CUDA will be attempted; this can be over-ridden by providing a soft-link "eddy_cuda" within your path that links to the binary you wish to be executed.') - cmdline.add_description('Note that this script does not perform any explicit registration between images provided to topup via the -se_epi option, and the DWI volumes provided to eddy. In some instances (motion between acquisitions) this can result in erroneous application of the inhomogeneity field during distortion correction. Use of the -align_seepi option is advocated in this scenario, which ensures that the first volume in the series provided to topup is also the first volume in the series provided to eddy, guaranteeing alignment. But a prerequisite for this approach is that the image contrast within the images provided to the -se_epi option must match the b=0 volumes present within the input DWI series: this means equivalent TE, TR and flip angle (note that differences in multi-band factors between two acquisitions may lead to differences in TR).') - cmdline.add_example_usage('A basic DWI acquisition, where all image volumes are acquired in a single protocol with fixed phase encoding', - 'dwifslpreproc DWI_in.mif DWI_out.mif -rpe_none -pe_dir ap -readout_time 0.55', - 'Due to use of a single fixed phase encoding, no EPI distortion correction can be applied in this case.') - cmdline.add_example_usage('DWIs all acquired with a single fixed phase encoding; but additionally a pair of b=0 images with reversed phase encoding to estimate the inhomogeneity field', - 'mrcat b0_ap.mif b0_pa.mif b0_pair.mif -axis 3; dwifslpreproc DWI_in.mif DWI_out.mif -rpe_pair -se_epi b0_pair.mif -pe_dir ap -readout_time 0.72 -align_seepi', - 'Here the two individual b=0 volumes are concatenated into a single 4D image series, and this is provided to the script via the -se_epi option. Note that with the -rpe_pair option used here, which indicates that the SE-EPI image series contains one or more pairs of b=0 images with reversed phase encoding, the FIRST HALF of the volumes in the SE-EPI series must possess the same phase encoding as the input DWI series, while the second half are assumed to contain the opposite phase encoding direction but identical total readout time. Use of the -align_seepi option is advocated as long as its use is valid (more information in the Description section).') - cmdline.add_example_usage('All DWI directions & b-values are acquired twice, with the phase encoding direction of the second acquisition protocol being reversed with respect to the first', - 'mrcat DWI_lr.mif DWI_rl.mif DWI_all.mif -axis 3; dwifslpreproc DWI_all.mif DWI_out.mif -rpe_all -pe_dir lr -readout_time 0.66', - 'Here the two acquisition protocols are concatenated into a single DWI series containing all acquired volumes. The direction indicated via the -pe_dir option should be the direction of phase encoding used in acquisition of the FIRST HALF of volumes in the input DWI series; ie. the first of the two files that was provided to the mrcat command. In this usage scenario, the output DWI series will contain the same number of image volumes as ONE of the acquired DWI series (ie. half of the number in the concatenated series); this is because the script will identify pairs of volumes that possess the same diffusion sensitisation but reversed phase encoding, and perform explicit recombination of those volume pairs in such a way that image contrast in regions of inhomogeneity is determined from the stretched rather than the compressed image.') - cmdline.add_example_usage('Any acquisition scheme that does not fall into one of the example usages above', - 'mrcat DWI_*.mif DWI_all.mif -axis 3; mrcat b0_*.mif b0_all.mif -axis 3; dwifslpreproc DWI_all.mif DWI_out.mif -rpe_header -se_epi b0_all.mif -align_seepi', - 'With this usage, the relevant phase encoding information is determined entirely based on the contents of the relevant image headers, and dwifslpreproc prepares all metadata for the executed FSL commands accordingly. This can therefore be used if the particular DWI acquisition strategy used does not correspond to one of the simple examples as described in the prior examples. This usage is predicated on the headers of the input files containing appropriately-named key-value fields such that MRtrix3 tools identify them as such. In some cases, conversion from DICOM using MRtrix3 commands will automatically extract and embed this information; however this is not true for all scanner vendors and/or software versions. In the latter case it may be possible to manually provide these metadata; either using the -json_import command-line option of dwifslpreproc, or the -json_import or one of the -import_pe_* command-line options of MRtrix3\'s mrconvert command (and saving in .mif format) prior to running dwifslpreproc.') - cmdline.add_citation('Andersson, J. L. & Sotiropoulos, S. N. An integrated approach to correction for off-resonance effects and subject movement in diffusion MR imaging. NeuroImage, 2015, 125, 1063-1078', is_external=True) - cmdline.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) - cmdline.add_citation('Skare, S. & Bammer, R. Jacobian weighting of distortion corrected EPI data. Proceedings of the International Society for Magnetic Resonance in Medicine, 2010, 5063', condition='If performing recombination of diffusion-weighted volume pairs with opposing phase encoding directions', is_external=True) - cmdline.add_citation('Andersson, J. L.; Skare, S. & Ashburner, J. How to correct susceptibility distortions in spin-echo echo-planar images: application to diffusion tensor imaging. NeuroImage, 2003, 20, 870-888', condition='If performing EPI susceptibility distortion correction', is_external=True) - cmdline.add_citation('Andersson, J. L. R.; Graham, M. S.; Zsoldos, E. & Sotiropoulos, S. N. Incorporating outlier detection and replacement into a non-parametric framework for movement and distortion correction of diffusion MR images. NeuroImage, 2016, 141, 556-572', condition='If including "--repol" in -eddy_options input', is_external=True) - cmdline.add_citation('Andersson, J. L. R.; Graham, M. S.; Drobnjak, I.; Zhang, H.; Filippini, N. & Bastiani, M. Towards a comprehensive framework for movement and distortion correction of diffusion MR images: Within volume movement. NeuroImage, 2017, 152, 450-466', condition='If including "--mporder" in -eddy_options input', is_external=True) - cmdline.add_citation('Bastiani, M.; Cottaar, M.; Fitzgibbon, S.P.; Suri, S.; Alfaro-Almagro, F.; Sotiropoulos, S.N.; Jbabdi, S.; Andersson, J.L.R. Automated quality control for within and between studies diffusion MRI data using a non-parametric framework for movement and distortion correction. NeuroImage, 2019, 184, 801-812', condition='If using -eddyqc_test or -eddyqc_all option and eddy_quad is installed', is_external=True) - cmdline.add_argument('input', help='The input DWI series to be corrected') - cmdline.add_argument('output', help='The output corrected image series') - cmdline.add_argument('-pe_dir', metavar=('PE'), help='Manually specify the phase encoding direction of the input series; can be a signed axis number (e.g. -0, 1, +2), an axis designator (e.g. RL, PA, IS), or NIfTI axis codes (e.g. i-, j, k)') - cmdline.add_argument('-readout_time', metavar=('time'), type=float, help='Manually specify the total readout time of the input series (in seconds)') - cmdline.add_argument('-se_epi', metavar=('image'), help='Provide an additional image series consisting of spin-echo EPI images, which is to be used exclusively by topup for estimating the inhomogeneity field (i.e. it will not form part of the output image series)') - cmdline.add_argument('-align_seepi', action='store_true', help='Achieve alignment between the SE-EPI images used for inhomogeneity field estimation, and the DWIs (more information in Description section)') - cmdline.add_argument('-json_import', metavar=('file'), help='Import image header information from an associated JSON file (may be necessary to determine phase encoding information)') - cmdline.add_argument('-topup_options', metavar=('" TopupOptions"'), help='Manually provide additional command-line options to the topup command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to topup)') - cmdline.add_argument('-eddy_options', metavar=('" EddyOptions"'), help='Manually provide additional command-line options to the eddy command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to eddy)') - cmdline.add_argument('-eddy_mask', metavar=('image'), help='Provide a processing mask to use for eddy, instead of having dwifslpreproc generate one internally using dwi2mask') - cmdline.add_argument('-eddy_slspec', metavar=('file'), help='Provide a file containing slice groupings for eddy\'s slice-to-volume registration') - cmdline.add_argument('-eddyqc_text', metavar=('directory'), help='Copy the various text-based statistical outputs generated by eddy, and the output of eddy_qc (if installed), into an output directory') - cmdline.add_argument('-eddyqc_all', metavar=('directory'), help='Copy ALL outputs generated by eddy (including images), and the output of eddy_qc (if installed), into an output directory') - app.add_dwgrad_export_options(cmdline) - app.add_dwgrad_import_options(cmdline) - rpe_options = cmdline.add_argument_group('Options for specifying the acquisition phase-encoding design; note that one of the -rpe_* options MUST be provided') - rpe_options.add_argument('-rpe_none', action='store_true', help='Specify that no reversed phase-encoding image data is being provided; eddy will perform eddy current and motion correction only') - rpe_options.add_argument('-rpe_pair', action='store_true', help='Specify that a set of images (typically b=0 volumes) will be provided for use in inhomogeneity field estimation only (using the -se_epi option)') - rpe_options.add_argument('-rpe_all', action='store_true', help='Specify that ALL DWIs have been acquired with opposing phase-encoding') - rpe_options.add_argument('-rpe_header', action='store_true', help='Specify that the phase-encoding information can be found in the image header(s), and that this is the information that the script should use') - cmdline.flag_mutually_exclusive_options( [ 'rpe_none', 'rpe_pair', 'rpe_all', 'rpe_header' ], True ) - cmdline.flag_mutually_exclusive_options( [ 'rpe_none', 'se_epi' ], False ) # May still technically provide -se_epi even with -rpe_all - cmdline.flag_mutually_exclusive_options( [ 'rpe_header', 'pe_dir' ], False ) # Can't manually provide phase-encoding direction if expecting it to be in the header - cmdline.flag_mutually_exclusive_options( [ 'rpe_header', 'readout_time' ], False ) # Can't manually provide readout time if expecting it to be in the header - cmdline.flag_mutually_exclusive_options( [ 'eddyqc_text', 'eddyqc_all' ], False ) - - - - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, fsl, image, matrix, path, phaseencoding, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel - - if utils.is_windows(): - raise MRtrixError('Script cannot run on Windows due to FSL dependency') - - image.check_3d_nonunity(path.from_user(app.ARGS.input, False)) - - pe_design = '' - if app.ARGS.rpe_none: - pe_design = 'None' - elif app.ARGS.rpe_pair: - pe_design = 'Pair' - if not app.ARGS.se_epi: - raise MRtrixError('If using the -rpe_pair option, the -se_epi option must be used to provide the spin-echo EPI data to be used by topup') - elif app.ARGS.rpe_all: - pe_design = 'All' - elif app.ARGS.rpe_header: - pe_design = 'Header' - else: - raise MRtrixError('Must explicitly specify phase-encoding acquisition design (even if none)') - - if app.ARGS.align_seepi and not app.ARGS.se_epi: - raise MRtrixError('-align_seepi option is only applicable when the -se_epi option is also used') - - fsl_path = os.environ.get('FSLDIR', '') - if not fsl_path: - raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - - if not pe_design == 'None': - topup_config_path = os.path.join(fsl_path, 'etc', 'flirtsch', 'b02b0.cnf') - if not os.path.isfile(topup_config_path): - raise MRtrixError('Could not find necessary default config file for FSL topup command (expected location: ' + topup_config_path + ')') - topup_cmd = fsl.exe_name('topup') - applytopup_cmd = fsl.exe_name('applytopup') - - if not fsl.eddy_binary(True) and not fsl.eddy_binary(False): - raise MRtrixError('Could not find any version of FSL eddy command') - fsl_suffix = fsl.suffix() - app.check_output_path(app.ARGS.output) - - # Export the gradient table to the path requested by the user if necessary - grad_export_option = app.read_dwgrad_export_options() - - - eddyqc_path = None - eddyqc_files = [ 'eddy_parameters', 'eddy_movement_rms', 'eddy_restricted_movement_rms', \ - 'eddy_post_eddy_shell_alignment_parameters', 'eddy_post_eddy_shell_PE_translation_parameters', \ - 'eddy_outlier_report', 'eddy_outlier_map', 'eddy_outlier_n_stdev_map', 'eddy_outlier_n_sqr_stdev_map', \ - 'eddy_movement_over_time' ] - if app.ARGS.eddyqc_text: - eddyqc_path = path.from_user(app.ARGS.eddyqc_text, False) - elif app.ARGS.eddyqc_all: - eddyqc_path = path.from_user(app.ARGS.eddyqc_all, False) - eddyqc_files.extend([ 'eddy_outlier_free_data.nii.gz', 'eddy_cnr_maps.nii.gz', 'eddy_residuals.nii.gz' ]) - if eddyqc_path: - if os.path.exists(eddyqc_path): - if os.path.isdir(eddyqc_path): - if any(os.path.exists(os.path.join(eddyqc_path, filename)) for filename in eddyqc_files): - if app.FORCE_OVERWRITE: - app.warn('Output eddy QC directory already contains relevant files; these will be overwritten on completion') - else: - raise MRtrixError('Output eddy QC directory already contains relevant files (use -force to override)') - else: - if app.FORCE_OVERWRITE: - app.warn('Target for eddy QC output is not a directory; it will be overwritten on completion') - else: - raise MRtrixError('Target for eddy QC output exists, and is not a directory (use -force to override)') - - - eddy_manual_options = [] - if app.ARGS.eddy_options: - # Initially process as a list; we'll convert back to a string later - eddy_manual_options = app.ARGS.eddy_options.strip().split() - # Check for erroneous usages before we perform any data importing - if any(entry.startswith('--mask=') for entry in eddy_manual_options): - raise MRtrixError('Cannot provide eddy processing mask via -eddy_options "--mask=..." as manipulations are required; use -eddy_mask option instead') - if any(entry.startswith('--slspec=') for entry in eddy_manual_options): - raise MRtrixError('Cannot provide eddy slice specification file via -eddy_options "--slspec=..." as manipulations are required; use -eddy_slspec option instead') - if '--resamp=lsr' in eddy_manual_options: - raise MRtrixError('dwifslpreproc does not currently support least-squares reconstruction; this cannot be simply passed via -eddy_options') - - - # Don't import slspec file directly; just make sure it exists - if app.ARGS.eddy_slspec and not os.path.isfile(path.from_user(app.ARGS.eddy_slspec, False)): - raise MRtrixError('Unable to find file \"' + app.ARGS.eddy_slspec + '\" provided via -eddy_slspec option') - - - # Convert all input images into MRtrix format and store in scratch directory first - app.make_scratch_dir() - - grad_import_option = app.read_dwgrad_import_options() - json_import_option = '' - if app.ARGS.json_import: - json_import_option = ' -json_import ' + path.from_user(app.ARGS.json_import) - json_export_option = ' -json_export ' + path.to_scratch('dwi.json', True) - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('dwi.mif') + grad_import_option + json_import_option + json_export_option) - if app.ARGS.se_epi: - image.check_3d_nonunity(path.from_user(app.ARGS.se_epi, False)) - run.command('mrconvert ' + path.from_user(app.ARGS.se_epi) + ' ' + path.to_scratch('se_epi.mif')) - if app.ARGS.eddy_mask: - run.command('mrconvert ' + path.from_user(app.ARGS.eddy_mask) + ' ' + path.to_scratch('eddy_mask.mif') + ' -datatype bit') - - app.goto_scratch_dir() - - - # Get information on the input images, and check their validity - dwi_header = image.Header('dwi.mif') - if not len(dwi_header.size()) == 4: - raise MRtrixError('Input DWI must be a 4D image') - dwi_num_volumes = dwi_header.size()[3] - app.debug('Number of DWI volumes: ' + str(dwi_num_volumes)) - dwi_num_slices = dwi_header.size()[2] - app.debug('Number of DWI slices: ' + str(dwi_num_slices)) - dwi_pe_scheme = phaseencoding.get_scheme(dwi_header) - if app.ARGS.se_epi: - se_epi_header = image.Header('se_epi.mif') - # This doesn't necessarily apply any more: May be able to combine e.g. a P>>A from -se_epi with an A>>P b=0 image from the DWIs - # if not len(se_epi_header.size()) == 4: - # raise MRtrixError('File provided using -se_epi option must contain more than one image volume') - se_epi_pe_scheme = phaseencoding.get_scheme(se_epi_header) - if 'dw_scheme' not in dwi_header.keyval(): - raise MRtrixError('No diffusion gradient table found') - grad = dwi_header.keyval()['dw_scheme'] - if not len(grad) == dwi_num_volumes: - raise MRtrixError('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(dwi_num_volumes) + ' volumes); check your input data') - - - # Deal with slice timing information for eddy slice-to-volume correction - slice_encoding_axis = 2 - eddy_mporder = any(s.startswith('--mporder') for s in eddy_manual_options) - if eddy_mporder: - if 'SliceEncodingDirection' in dwi_header.keyval(): - slice_encoding_direction = dwi_header.keyval()['SliceEncodingDirection'] - app.debug('Slice encoding direction: ' + slice_encoding_direction) - if not slice_encoding_direction.startswith('k'): - raise MRtrixError('DWI header indicates that 3rd spatial axis is not the slice axis; this is not yet compatible with --mporder option in eddy, nor supported in dwifslpreproc') - slice_encoding_direction = image.axis2dir(slice_encoding_direction) - else: - app.console('No slice encoding direction information present; assuming third axis corresponds to slices') - slice_encoding_direction = [0,0,1] - slice_encoding_axis = [ index for index, value in enumerate(slice_encoding_direction) if value ][0] - slice_groups = [ ] - slice_timing = [ ] - # Since there's a chance that we may need to pad this info, we can't just copy this file - # to the scratch directory... - if app.ARGS.eddy_slspec: - try: - slice_groups = matrix.load_numeric(path.from_user(app.ARGS.eddy_slspec, False), dtype=int) - app.debug('Slice groups: ' + str(slice_groups)) - except ValueError: - try: - slice_timing = matrix.load_numeric(path.from_user(app.ARGS.eddy_slspec, False), dtype=float) - app.debug('Slice timing: ' + str(slice_timing)) - app.warn('\"slspec\" file provided to FSL eddy is supposed to contain slice indices for slice groups; ' - 'contents of file \"' + app.ARGS.eddy_slspec + '\" appears to instead be slice timings; ' - 'these data have been imported and will be converted to the appropriate format') - if len(slice_timing) != dwi_num_slices: - raise MRtrixError('Cannot use slice timing information from file \"' + app.ARGS.eddy_slspec + '\" for slice-to-volume correction: ' + \ - 'number of entries (' + str(len(slice_timing)) + ') does not match number of slices (' + str(dwi_num_slices) + ')') - except ValueError: - raise MRtrixError('Error parsing eddy \"slspec\" file \"' + app.ARGS.eddy_slspec + '\" ' - '(please see FSL eddy help page, specifically the --slspec option)') - else: - if 'SliceTiming' not in dwi_header.keyval(): - raise MRtrixError('Cannot perform slice-to-volume correction in eddy: ' - '-eddy_slspec option not specified, and no slice timing information present in input DWI header') - slice_timing = dwi_header.keyval()['SliceTiming'] - app.debug('Initial slice timing contents from header: ' + str(slice_timing)) - if slice_timing in ['invalid', 'variable']: - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'data flagged as "' + slice_timing + '"') - # Fudges necessary to maniupulate nature of slice timing data in cases where - # bad JSON formatting has led to the data not being simply a list of floats - # (whether from MRtrix3 DICOM conversion or from anything else) - if isinstance(slice_timing, utils.STRING_TYPES): - slice_timing = slice_timing.split() - if not isinstance(slice_timing, list): - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'data is not a list') - if len(slice_timing) == 1: - slice_timing = slice_timing[0] - if not isinstance(slice_timing, list): - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'unexpected data format') - if isinstance(slice_timing[0], list): - if not all(len(entry) == 1 for entry in slice_timing): - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'data do not appear to be 1D') - slice_timing = [ entry[0] for entry in slice_timing ] - if not all(isinstance(entry, float) for entry in slice_timing): - try: - slice_timing = [ float(entry) for entry in slice_timing ] - except ValueError: - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'data are not numeric') - app.debug('Re-formatted slice timing contents from header: ' + str(slice_timing)) - if len(slice_timing) != dwi_num_slices: - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'number of entries (' + str(len(slice_timing)) + ') does not match number of slices (' + str(dwi_header.size()[2]) + ')') - elif app.ARGS.eddy_slspec: - app.warn('-eddy_slspec option provided, but "--mporder=" not provided via -eddy_options; ' - 'slice specification file not imported as it would not be utilised by eddy') - - - # Use new features of dirstat to query the quality of the diffusion acquisition scheme - # Need to know the mean b-value in each shell, and the asymmetry value of each shell - # But don't bother testing / warning the user if they're already controlling for this - if not app.ARGS.eddy_options or not any(s.startswith('--slm=') for s in app.ARGS.eddy_options.split()): - shell_bvalues = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] - shell_asymmetries = [ float(value) for value in run.command('dirstat dwi.mif -output asym').stdout.splitlines() ] - # dirstat will skip any b=0 shell by default; therefore for correspondence between - # shell_bvalues and shell_symmetry, need to remove any b=0 from the former - if len(shell_bvalues) == len(shell_asymmetries) + 1: - shell_bvalues = shell_bvalues[1:] - elif len(shell_bvalues) != len(shell_asymmetries): - raise MRtrixError('Number of b-values reported by mrinfo (' + str(len(shell_bvalues)) + ') does not match number of outputs provided by dirstat (' + str(len(shell_asymmetries)) + ')') - for bvalue, asymmetry in zip(shell_bvalues, shell_asymmetries): - if asymmetry >= 0.1: - app.warn('sampling of b=' + str(bvalue) + ' shell is ' + ('strongly' if asymmetry >= 0.4 else 'moderately') + \ - ' asymmetric; distortion correction may benefit from use of: ' + \ - '-eddy_options " ... --slm=linear ... "') - - - # Since we want to access user-defined phase encoding information regardless of whether or not - # such information is present in the header, let's grab it here - manual_pe_dir = None - if app.ARGS.pe_dir: - manual_pe_dir = [ float(i) for i in phaseencoding.direction(app.ARGS.pe_dir) ] - app.debug('Manual PE direction: ' + str(manual_pe_dir)) - manual_trt = None - if app.ARGS.readout_time: - manual_trt = float(app.ARGS.readout_time) - app.debug('Manual readout time: ' + str(manual_trt)) - - - # Utilise the b-value clustering algorithm in src/dwi/shells.* - shell_indices = [ [ int(i) for i in entry.split(',') ] for entry in image.mrinfo('dwi.mif', 'shell_indices').split(' ') ] - shell_bvalues = [ float(f) for f in image.mrinfo('dwi.mif', 'shell_bvalues').split(' ')] - bzero_threshold = float(CONFIG.get('BZeroThreshold', 10.0)) - - # For each volume index, store the index of the shell to which it is attributed - # (this will make it much faster to determine whether or not two volumes belong to the same shell) - vol2shell = [ -1 ] * dwi_num_volumes - for index, volumes in enumerate(shell_indices): - for volume in volumes: - vol2shell[volume] = index - assert all(index >= 0 for index in vol2shell) - - - def grads_match(one, two): - # Are the two volumes assigned to different b-value shells? - if vol2shell[one] != vol2shell[two]: - return False - # Does this shell correspond to b=0? - if shell_bvalues[vol2shell[one]] <= bzero_threshold: - return True - # Dot product between gradient directions - # First, need to check for zero-norm vectors: - # - If both are zero, skip this check - # - If one is zero and the other is not, volumes don't match - # - If neither is zero, test the dot product - if any(grad[one][0:3]): - if not any(grad[two][0:3]): - return False - dot_product = grad[one][0]*grad[two][0] + grad[one][1]*grad[two][1] + grad[one][2]*grad[two][2] - if abs(dot_product) < 0.999: - return False - elif any(grad[two][0:3]): - return False - return True - - - # Manually generate a phase-encoding table for the input DWI based on user input - dwi_manual_pe_scheme = None - se_epi_manual_pe_scheme = None - auto_trt = 0.1 - dwi_auto_trt_warning = False - if manual_pe_dir: - - if manual_trt: - trt = manual_trt - else: - trt = auto_trt - dwi_auto_trt_warning = True - - # Still construct the manual PE scheme even with 'None' or 'Pair': - # there may be information in the header that we need to compare against - if pe_design == 'None': - line = list(manual_pe_dir) - line.append(trt) - dwi_manual_pe_scheme = [ line ] * dwi_num_volumes - app.debug('Manual DWI PE scheme for \'None\' PE design: ' + str(dwi_manual_pe_scheme)) - - # With 'Pair', also need to construct the manual scheme for SE EPIs - elif pe_design == 'Pair': - line = list(manual_pe_dir) - line.append(trt) - dwi_manual_pe_scheme = [ line ] * dwi_num_volumes - app.debug('Manual DWI PE scheme for \'Pair\' PE design: ' + str(dwi_manual_pe_scheme)) - if len(se_epi_header.size()) != 4: - raise MRtrixError('If using -rpe_pair option, image provided using -se_epi must be a 4D image') - se_epi_num_volumes = se_epi_header.size()[3] - if se_epi_num_volumes%2: - raise MRtrixError('If using -rpe_pair option, image provided using -se_epi must contain an even number of volumes') - # Assume that first half of volumes have same direction as series; - # second half have the opposite direction - se_epi_manual_pe_scheme = [ line ] * int(se_epi_num_volumes/2) - line = [ (-i if i else 0.0) for i in manual_pe_dir ] - line.append(trt) - se_epi_manual_pe_scheme.extend( [ line ] * int(se_epi_num_volumes/2) ) - app.debug('Manual SEEPI PE scheme for \'Pair\' PE design: ' + str(se_epi_manual_pe_scheme)) - - # If -rpe_all, need to scan through grad and figure out the pairings - # This will be required if relying on user-specified phase encode direction - # It will also be required at the end of the script for the manual recombination - # Update: The possible permutations of volume-matched acquisition is limited within the - # context of the -rpe_all option. In particular, the potential for having more - # than one b=0 volume within each half means that it is not possible to permit - # arbitrary ordering of those pairs, since b=0 volumes would then be matched - # despite having the same phase-encoding direction. Instead, explicitly enforce - # that volumes must be matched between the first and second halves of the DWI data. - elif pe_design == 'All': - if dwi_num_volumes%2: - raise MRtrixError('If using -rpe_all option, input image must contain an even number of volumes') - grads_matched = [ dwi_num_volumes ] * dwi_num_volumes - grad_pairs = [ ] - app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') - for index1 in range(int(dwi_num_volumes/2)): - if grads_matched[index1] == dwi_num_volumes: # As yet unpaired - for index2 in range(int(dwi_num_volumes/2), dwi_num_volumes): - if grads_matched[index2] == dwi_num_volumes: # Also as yet unpaired - if grads_match(index1, index2): - grads_matched[index1] = index2 - grads_matched[index2] = index1 - grad_pairs.append([index1, index2]) - app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + ': ' + str(grad[index1]) + ' ' + str(grad[index2])) - break - else: - raise MRtrixError('Unable to determine matching reversed phase-encode direction volume for DWI volume ' + str(index1)) - if not len(grad_pairs) == dwi_num_volumes/2: - raise MRtrixError('Unable to determine complete matching DWI volume pairs for reversed phase-encode combination') - # Construct manual PE scheme here: - # Regardless of whether or not there's a scheme in the header, need to have it: - # if there's one in the header, want to compare to the manually-generated one - dwi_manual_pe_scheme = [ ] - for index in range(0, dwi_num_volumes): - line = list(manual_pe_dir) - if index >= int(dwi_num_volumes/2): - line = [ (-i if i else 0.0) for i in line ] - line.append(trt) - dwi_manual_pe_scheme.append(line) - app.debug('Manual DWI PE scheme for \'All\' PE design: ' + str(dwi_manual_pe_scheme)) - - else: # No manual phase encode direction defined - - if not pe_design == 'Header': - raise MRtrixError('If not using -rpe_header, phase encoding direction must be provided using the -pe_dir option') - - - - def scheme_dirs_match(one, two): - for line_one, line_two in zip(one, two): - if not line_one[0:3] == line_two[0:3]: - return False - return True - - def scheme_times_match(one, two): - for line_one, line_two in zip(one, two): - if abs(line_one[3] - line_two[3]) > 5e-3: - return False - return True - - - - # Determine whether or not the phase encoding table generated manually should be used - # (possibly instead of a table present in the image header) - overwrite_dwi_pe_scheme = False - if dwi_pe_scheme: - if manual_pe_dir: - # Compare manual specification to that read from the header; - # overwrite & give warning to user if they differ - # Bear in mind that this could even be the case for -rpe_all; - # relying on earlier code having successfully generated the 'appropriate' - # PE scheme for the input volume based on the diffusion gradient table - if not scheme_dirs_match(dwi_pe_scheme, dwi_manual_pe_scheme): - app.warn('User-defined phase-encoding direction design does not match what is stored in DWI image header; proceeding with user specification') - overwrite_dwi_pe_scheme = True - if manual_trt: - # Compare manual specification to that read from the header - if not scheme_times_match(dwi_pe_scheme, dwi_manual_pe_scheme): - app.warn('User-defined total readout time does not match what is stored in DWI image header; proceeding with user specification') - overwrite_dwi_pe_scheme = True - if overwrite_dwi_pe_scheme: - dwi_pe_scheme = dwi_manual_pe_scheme # May be used later for triggering volume recombination - else: - dwi_manual_pe_scheme = None # To guarantee that these generated data are never used - else: - # Nothing in the header; rely entirely on user specification - if pe_design == 'Header': - raise MRtrixError('No phase encoding information found in DWI image header') - if not manual_pe_dir: - raise MRtrixError('No phase encoding information provided either in header or at command-line') - if dwi_auto_trt_warning: - app.console('Total readout time not provided at command-line; assuming sane default of ' + str(auto_trt)) - dwi_pe_scheme = dwi_manual_pe_scheme # May be needed later for triggering volume recombination - - # This may be required by -rpe_all for extracting b=0 volumes while retaining phase-encoding information - import_dwi_pe_table_option = '' - if dwi_manual_pe_scheme: - phaseencoding.save('dwi_manual_pe_scheme.txt', dwi_manual_pe_scheme) - import_dwi_pe_table_option = ' -import_pe_table dwi_manual_pe_scheme.txt' - - - # Find the index of the first DWI volume that is a b=0 volume - # This needs to occur at the outermost loop as it is pertinent information - # not only for the -align_seepi option, but also for when the -se_epi option - # is not provided at all, and the input to topup is extracted solely from the DWIs - dwi_first_bzero_index = 0 - for line in grad: - if line[3] <= bzero_threshold: - break - dwi_first_bzero_index += 1 - app.debug('Index of first b=0 image in DWIs is ' + str(dwi_first_bzero_index)) - - - # Deal with the phase-encoding of the images to be fed to topup (if applicable) - do_topup = (not pe_design == 'None') - overwrite_se_epi_pe_scheme = False - se_epi_path = 'se_epi.mif' - dwi_permvols_preeddy_option = '' - dwi_permvols_posteddy_option = '' - dwi_bzero_added_to_se_epi = False - if app.ARGS.se_epi: - - # Newest version of eddy requires that topup field be on the same grid as the eddy input DWI - if not image.match(dwi_header, se_epi_header, up_to_dim=3): - app.console('DWIs and SE-EPI images used for inhomogeneity field estimation are defined on different image grids; ' - 'the latter will be automatically re-gridded to match the former') - new_se_epi_path = 'se_epi_regrid.mif' - run.command('mrtransform ' + se_epi_path + ' - -reorient_fod no -interp sinc -template dwi.mif | mrcalc - 0.0 -max ' + new_se_epi_path) - app.cleanup(se_epi_path) - se_epi_path = new_se_epi_path - se_epi_header = image.Header(se_epi_path) - - # 3 possible sources of PE information: DWI header, topup image header, command-line - # Any pair of these may conflict, and any one could be absent - - # Have to switch here based on phase-encoding acquisition design - if pe_design == 'Pair': - # Criteria: - # * If present in own header, ignore DWI header entirely - - # - If also provided at command-line, look for conflict & report - # - If not provided at command-line, nothing to do - # * If _not_ present in own header: - # - If provided at command-line, infer appropriately - # - If not provided at command-line, but the DWI header has that information, infer appropriately - if se_epi_pe_scheme: - if manual_pe_dir: - if not scheme_dirs_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): - app.warn('User-defined phase-encoding direction design does not match what is stored in SE EPI image header; proceeding with user specification') - overwrite_se_epi_pe_scheme = True - if manual_trt: - if not scheme_times_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): - app.warn('User-defined total readout time does not match what is stored in SE EPI image header; proceeding with user specification') - overwrite_se_epi_pe_scheme = True - if overwrite_se_epi_pe_scheme: - se_epi_pe_scheme = se_epi_manual_pe_scheme - else: - se_epi_manual_pe_scheme = None # To guarantee that these data are never used - else: - overwrite_se_epi_pe_scheme = True - se_epi_pe_scheme = se_epi_manual_pe_scheme - - elif pe_design == 'All': - # Criteria: - # * If present in own header: - # - Nothing to do - # * If _not_ present in own header: - # - Don't have enough information to proceed - # - Is this too harsh? (e.g. Have rules by which it may be inferred from the DWI header / command-line) - if not se_epi_pe_scheme: - raise MRtrixError('If explicitly including SE EPI images when using -rpe_all option, they must come with their own associated phase-encoding information in the image header') - - elif pe_design == 'Header': - # Criteria: - # * If present in own header: - # Nothing to do (-pe_dir option is mutually exclusive) - # * If _not_ present in own header: - # Cannot proceed - if not se_epi_pe_scheme: - raise MRtrixError('No phase-encoding information present in SE-EPI image header') - # If there is no phase encoding contrast within the SE-EPI series, - # try combining it with the DWI b=0 volumes, see if that produces some contrast - # However, this should probably only be permitted if the -align_seepi option is defined - se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() - if not se_epi_pe_scheme_has_contrast: - if app.ARGS.align_seepi: - app.console('No phase-encoding contrast present in SE-EPI images; will examine again after combining with DWI b=0 images') - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_dwibzeros.mif' - # Don't worry about trying to produce a balanced scheme here - run.command('dwiextract dwi.mif - -bzero | mrcat - ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') - se_epi_header = image.Header(new_se_epi_path) - se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() - if se_epi_pe_scheme_has_contrast: - app.cleanup(se_epi_path) - se_epi_path = new_se_epi_path - se_epi_pe_scheme = phaseencoding.get_scheme(se_epi_header) - dwi_bzero_added_to_se_epi = True - # Delay testing appropriateness of the concatenation of these images - # (i.e. differences in contrast) to later - else: - raise MRtrixError('No phase-encoding contrast present in SE-EPI images, even after concatenating with b=0 images due to -align_seepi option; ' - 'cannot perform inhomogeneity field estimation') - else: - raise MRtrixError('No phase-encoding contrast present in SE-EPI images; cannot perform inhomogeneity field estimation') - - if app.ARGS.align_seepi: - - for field_name, description in { 'EchoTime': 'echo time', - 'RepetitionTime': 'repetition time', - 'FlipAngle': 'flip angle' }.items(): - dwi_value = dwi_header.keyval().get(field_name) - se_epi_value = se_epi_header.keyval().get(field_name) - if dwi_value and se_epi_value and dwi_value != se_epi_value: - app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different ' + description + ' to the DWIs being corrected. ' - 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup ' - 'due to use of the -align_seepi option.') - - # If we are using the -se_epi option, and hence the input images to topup have not come from the DWIs themselves, - # we need to insert the first b=0 DWI volume to the start of the topup input image. Otherwise, the field estimated - # by topup will not be correctly aligned with the volumes as they are processed by eddy. - # - # However, there's also a code path by which we may have already performed this addition. - # If we have already apliced the b=0 volumes from the DWI input with the SE-EPI image - # (due to the absence of phase-encoding contrast in the SE-EPI series), we don't want to - # re-attempt such a concatenation; the fact that the DWI b=0 images were inserted ahead of - # the SE-EPI images means the alignment issue should be dealt with. - - if dwi_first_bzero_index == len(grad) and not dwi_bzero_added_to_se_epi: - - app.warn('Unable to find b=0 volume in input DWIs to provide alignment between topup and eddy; script will proceed as though the -align_seepi option were not provided') - - # If b=0 volumes from the DWIs have already been added to the SE-EPI image due to an - # absence of phase-encoding contrast in the latter, we don't need to perform the following - elif not dwi_bzero_added_to_se_epi: - - run.command('mrconvert dwi.mif dwi_first_bzero.mif -coord 3 ' + str(dwi_first_bzero_index) + ' -axes 0,1,2') - dwi_first_bzero_pe = dwi_manual_pe_scheme[dwi_first_bzero_index] if overwrite_dwi_pe_scheme else dwi_pe_scheme[dwi_first_bzero_index] - - se_epi_pe_sum = [ 0, 0, 0 ] - se_epi_volume_to_remove = len(se_epi_pe_scheme) - for index, line in enumerate(se_epi_pe_scheme): - se_epi_pe_sum = [ i + j for i, j in zip(se_epi_pe_sum, line[0:3]) ] - if se_epi_volume_to_remove == len(se_epi_pe_scheme) and line[0:3] == dwi_first_bzero_pe[0:3]: - se_epi_volume_to_remove = index - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_firstdwibzero.mif' - if (se_epi_pe_sum == [ 0, 0, 0 ]) and (se_epi_volume_to_remove < len(se_epi_pe_scheme)): - app.console('Balanced phase-encoding scheme detected in SE-EPI series; volume ' + str(se_epi_volume_to_remove) + ' will be removed and replaced with first b=0 from DWIs') - run.command('mrconvert ' + se_epi_path + ' - -coord 3 ' + ','.join([str(index) for index in range(len(se_epi_pe_scheme)) if not index == se_epi_volume_to_remove]) + ' | mrcat dwi_first_bzero.mif - ' + new_se_epi_path + ' -axis 3') - # Also need to update the phase-encoding scheme appropriately if it's being set manually - # (if embedded within the image headers, should be updated through the command calls) - if se_epi_manual_pe_scheme: - first_line = list(manual_pe_dir) - first_line.append(trt) - new_se_epi_manual_pe_scheme = [ ] - new_se_epi_manual_pe_scheme.append(first_line) - for index, entry in enumerate(se_epi_manual_pe_scheme): - if not index == se_epi_volume_to_remove: - new_se_epi_manual_pe_scheme.append(entry) - se_epi_manual_pe_scheme = new_se_epi_manual_pe_scheme - else: - if se_epi_pe_sum == [ 0, 0, 0 ] and se_epi_volume_to_remove == len(se_epi_pe_scheme): - app.console('Phase-encoding scheme of -se_epi image is balanced, but could not find appropriate volume with which to substitute first b=0 volume from DWIs; first b=0 DWI volume will be inserted to start of series, resulting in an unbalanced scheme') - else: - app.console('Unbalanced phase-encoding scheme detected in series provided via -se_epi option; first DWI b=0 volume will be inserted to start of series') - run.command('mrcat dwi_first_bzero.mif ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') - # Also need to update the phase-encoding scheme appropriately - if se_epi_manual_pe_scheme: - first_line = list(manual_pe_dir) - first_line.append(trt) - se_epi_manual_pe_scheme = [ first_line, se_epi_manual_pe_scheme ] - - # Ended branching based on balanced-ness of PE acquisition scheme within SE-EPI volumes - app.cleanup(se_epi_path) - app.cleanup('dwi_first_bzero.mif') - se_epi_path = new_se_epi_path - - # Ended branching based on: - # - Detection of first b=0 volume in DWIs; or - # - Prior merge of SE-EPI and DWI b=0 volumes due to no phase-encoding contrast in SE-EPI - - # Completed checking for presence of -se_epi option - - elif not pe_design == 'None': # No SE EPI images explicitly provided: In some cases, can extract appropriate b=0 images from DWI - - # If using 'All' or 'Header', and haven't been given any topup images, need to extract the b=0 volumes from the series, - # preserving phase-encoding information while doing so - # Preferably also make sure that there's some phase-encoding contrast in there... - # With -rpe_all, need to write inferred phase-encoding to file and import before using dwiextract so that the phase-encoding - # of the extracted b=0's is propagated to the generated b=0 series - run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' - | dwiextract - ' + se_epi_path + ' -bzero') - se_epi_header = image.Header(se_epi_path) - - # If there's no contrast remaining in the phase-encoding scheme, it'll be written to - # PhaseEncodingDirection and TotalReadoutTime rather than pe_scheme - # In this scenario, we will be unable to run topup, or volume recombination - if 'pe_scheme' not in se_epi_header.keyval(): - if pe_design == 'All': - raise MRtrixError('DWI header indicates no phase encoding contrast between b=0 images; cannot proceed with volume recombination-based pre-processing') - app.warn('DWI header indicates no phase encoding contrast between b=0 images; proceeding without inhomogeneity field estimation') - do_topup = False - run.function(os.remove, se_epi_path) - se_epi_path = None - se_epi_header = None - - - # If the first b=0 volume in the DWIs is in fact not the first volume (i.e. index zero), we're going to - # manually place it at the start of the DWI volumes when they are input to eddy, so that the - # first input volume to topup and the first input volume to eddy are one and the same. - # Note: If at a later date, the statistical outputs from eddy are considered (e.g. motion, outliers), - # then this volume permutation will need to be taken into account - if dwi_first_bzero_index == len(grad): - app.warn("No image volumes were classified as b=0 by MRtrix3; no permutation of order of DWI volumes can occur " + \ - "(do you need to adjust config file entry BZeroThreshold?)") - elif dwi_first_bzero_index: - app.console('First b=0 volume in input DWIs is volume index ' + str(dwi_first_bzero_index) + '; ' - 'this will be permuted to be the first volume (index 0) when eddy is run') - dwi_permvols_preeddy_option = ' -coord 3 ' + \ - str(dwi_first_bzero_index) + \ - ',0' + \ - (':' + str(dwi_first_bzero_index-1) if dwi_first_bzero_index > 1 else '') + \ - (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ - (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') - dwi_permvols_posteddy_option = ' -coord 3 1' + \ - (':' + str(dwi_first_bzero_index) if dwi_first_bzero_index > 1 else '') + \ - ',0' + \ - (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ - (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') - app.debug('mrconvert options for axis permutation:') - app.debug('Pre: ' + str(dwi_permvols_preeddy_option)) - app.debug('Post: ' + str(dwi_permvols_posteddy_option)) - - - - # This may be required when setting up the topup call - se_epi_manual_pe_table_option = '' - if se_epi_manual_pe_scheme: - phaseencoding.save('se_epi_manual_pe_scheme.txt', se_epi_manual_pe_scheme) - se_epi_manual_pe_table_option = ' -import_pe_table se_epi_manual_pe_scheme.txt' - - - # Need gradient table if running dwi2mask after applytopup to derive a brain mask for eddy - run.command('mrinfo dwi.mif -export_grad_mrtrix grad.b') - - - eddy_in_topup_option = '' - dwi_post_eddy_crop_option = '' - slice_padded = False - dwi_path = 'dwi.mif' - if do_topup: - - # topup will crash if its input image has a spatial dimension with a non-even size; - # presumably due to a downsampling by a factor of 2 in a multi-resolution scheme - # The newest eddy also requires the output from topup and the input DWIs to have the same size; - # therefore this restriction applies to the DWIs as well - # Rather than crop in this case (which would result in a cropped output image), - # duplicate the last slice on any problematic axis, and then crop that extra - # slice at the output step - # By this point, if the input SE-EPI images and DWIs are not on the same image grid, the - # SE-EPI images have already been re-gridded to DWI image space; - odd_axis_count = 0 - for axis_size in dwi_header.size()[:3]: - if int(axis_size%2): - odd_axis_count += 1 - if odd_axis_count: - app.console(str(odd_axis_count) + ' spatial ' + ('axes of DWIs have' if odd_axis_count > 1 else 'axis of DWIs has') + ' non-even size; ' - 'this will be automatically padded for compatibility with topup, and the extra slice' + ('s' if odd_axis_count > 1 else '') + ' erased afterwards') - for axis, axis_size in enumerate(dwi_header.size()[:3]): - if int(axis_size%2): - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_pad' + str(axis) + '.mif' - run.command('mrconvert ' + se_epi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + se_epi_path + ' - ' + new_se_epi_path + ' -axis ' + str(axis)) - app.cleanup(se_epi_path) - se_epi_path = new_se_epi_path - new_dwi_path = os.path.splitext(dwi_path)[0] + '_pad' + str(axis) + '.mif' - run.command('mrconvert ' + dwi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' -clear dw_scheme - | mrcat ' + dwi_path + ' - ' + new_dwi_path + ' -axis ' + str(axis)) - app.cleanup(dwi_path) - dwi_path = new_dwi_path - dwi_post_eddy_crop_option += ' -coord ' + str(axis) + ' 0:' + str(axis_size-1) - if axis == slice_encoding_axis: - slice_padded = True - dwi_num_slices += 1 - # If we are padding the slice axis, and performing slice-to-volume correction, - # then we need to perform the corresponding padding to the slice timing - if eddy_mporder: - # At this point in the script, this information may be encoded either within - # the slice timing vector (as imported from the image header), or as - # slice groups (i.e. in the format expected by eddy). How these data are - # stored affects how the padding is performed. - if slice_timing: - slice_timing.append(slice_timing[-1]) - elif slice_groups: - # Can't edit in place when looping through the list - new_slice_groups = [ ] - for group in slice_groups: - if axis_size-1 in group: - group.append(axis_size) - new_slice_groups.append(group) - slice_groups = new_slice_groups - - - # Do the conversion in preparation for topup - run.command('mrconvert ' + se_epi_path + ' topup_in.nii' + se_epi_manual_pe_table_option + ' -strides -1,+2,+3,+4 -export_pe_table topup_datain.txt') - app.cleanup(se_epi_path) - - # Run topup - topup_manual_options = '' - if app.ARGS.topup_options: - topup_manual_options = ' ' + app.ARGS.topup_options.strip() - topup_output = run.command(topup_cmd + ' --imain=topup_in.nii --datain=topup_datain.txt --out=field --fout=field_map' + fsl_suffix + ' --config=' + topup_config_path + ' --verbose' + topup_manual_options) - with open('topup_output.txt', 'wb') as topup_output_file: - topup_output_file.write((topup_output.stdout + '\n' + topup_output.stderr + '\n').encode('utf-8', errors='replace')) - if app.VERBOSITY > 1: - app.console('Output of topup command:') - sys.stderr.write(topup_output.stdout + '\n' + topup_output.stderr + '\n') - - # Apply the warp field to the input image series to get an initial corrected volume estimate - # applytopup can't receive the complete DWI input and correct it as a whole, because the phase-encoding - # details may vary between volumes - if dwi_manual_pe_scheme: - run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + ' - | mrinfo - -export_pe_eddy applytopup_config.txt applytopup_indices.txt') - else: - run.command('mrinfo ' + dwi_path + ' -export_pe_eddy applytopup_config.txt applytopup_indices.txt') - - - # Update: Call applytopup separately for each unique phase-encoding - # This should be the most compatible option with more complex phase-encoding acquisition designs, - # since we don't need to worry about applytopup performing volume recombination - # Plus, recombination doesn't need to be optimal; we're only using this to derive a brain mask - applytopup_image_list = [ ] - index = 1 - applytopup_config = matrix.load_matrix('applytopup_config.txt') - applytopup_indices = matrix.load_vector('applytopup_indices.txt', dtype=int) - applytopup_volumegroups = [ [ index for index, value in enumerate(applytopup_indices) if value == group ] for group in range(1, len(applytopup_config)+1) ] - app.debug('applytopup_config: ' + str(applytopup_config)) - app.debug('applytopup_indices: ' + str(applytopup_indices)) - app.debug('applytopup_volumegroups: ' + str(applytopup_volumegroups)) - for index, group in enumerate(applytopup_volumegroups): - prefix = os.path.splitext(dwi_path)[0] + '_pe_' + str(index) - input_path = prefix + '.nii' - json_path = prefix + '.json' - temp_path = prefix + '_applytopup.nii' - output_path = prefix + '_applytopup.mif' - run.command('mrconvert ' + dwi_path + ' ' + input_path + ' -coord 3 ' + ','.join(str(value) for value in group) + ' -strides -1,+2,+3,+4 -json_export ' + json_path) - run.command(applytopup_cmd + ' --imain=' + input_path + ' --datain=applytopup_config.txt --inindex=' + str(index+1) + ' --topup=field --out=' + temp_path + ' --method=jac') - app.cleanup(input_path) - temp_path = fsl.find_image(temp_path) - run.command('mrconvert ' + temp_path + ' ' + output_path + ' -json_import ' + json_path) - app.cleanup(json_path) - app.cleanup(temp_path) - applytopup_image_list.append(output_path) - index += 1 - - # Use the initial corrected volumes to derive a brain mask for eddy - if not app.ARGS.eddy_mask: - if len(applytopup_image_list) == 1: - run.command('dwi2mask ' + applytopup_image_list[0] + ' - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - else: - run.command('mrcat ' + ' '.join(applytopup_image_list) + ' - -axis 3 | dwi2mask - - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - - app.cleanup(applytopup_image_list) - - eddy_in_topup_option = ' --topup=field' - - else: - - # Generate a processing mask for eddy based on the uncorrected input DWIs - if not app.ARGS.eddy_mask: - run.command('dwi2mask ' + dwi_path + ' - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - - - # Use user supplied mask for eddy instead of one derived from the images using dwi2mask - if app.ARGS.eddy_mask: - if image.match('eddy_mask.mif', dwi_path, up_to_dim=3): - run.command('mrconvert eddy_mask.mif eddy_mask.nii -datatype float32 -stride -1,+2,+3') - else: - app.warn('User-provided processing mask for eddy does not match DWI voxel grid; resampling') - run.command('mrtransform eddy_mask.mif - -template ' + dwi_path + ' -interp linear | ' - + 'mrthreshold - -abs 0.5 - | ' - + 'mrconvert - eddy_mask.nii -datatype float32 -stride -1,+2,+3') - app.cleanup('eddy_mask.mif') - - # Generate the text file containing slice timing / grouping information if necessary - if eddy_mporder: - if slice_timing: - # This list contains, for each slice, the timing offset between acquisition of the - # first slice in the volume, and acquisition of that slice - # Eddy however requires a text file where each row contains those slices that were - # acquired with a single readout, in ordered rows from first slice (group) - # acquired to last slice (group) acquired - if sum(slice_encoding_direction) < 0: - slice_timing = reversed(slice_timing) - slice_groups = [ [ x[0] for x in g ] for _, g in itertools.groupby(sorted(enumerate(slice_timing), key=lambda x:x[1]), key=lambda x:x[1]) ] #pylint: disable=unused-variable - app.debug('Slice timing: ' + str(slice_timing)) - app.debug('Resulting slice groups: ' + str(slice_groups)) - # Variable slice_groups may have already been defined in the correct format. - # In that instance, there's nothing to do other than write it to file; - # UNLESS the slice encoding direction is known to be reversed, in which case - # we need to reverse the timings. Would think that this would however be - # rare, given it requires that the slspec text file be provided manually but - # SliceEncodingDirection to be present. - elif slice_groups and sum(slice_encoding_direction) < 0: - new_slice_groups = [ ] - for group in new_slice_groups: - new_slice_groups.append([ dwi_num_slices-index for index in group ]) - app.debug('Slice groups reversed due to negative slice encoding direction') - app.debug('Original: ' + str(slice_groups)) - app.debug('New: ' + str(new_slice_groups)) - slice_groups = new_slice_groups - - matrix.save_numeric('slspec.txt', slice_groups, add_to_command_history=False, fmt='%d') - eddy_manual_options.append('--slspec=slspec.txt') - - - # Revert eddy_manual_options from a list back to a single string - eddy_manual_options = (' ' + ' '.join(eddy_manual_options)) if eddy_manual_options else '' - - - # Prepare input data for eddy - run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + dwi_permvols_preeddy_option + ' eddy_in.nii -strides -1,+2,+3,+4 -export_grad_fsl bvecs bvals -export_pe_eddy eddy_config.txt eddy_indices.txt') - app.cleanup(dwi_path) - - # Run eddy - # If a CUDA version is in PATH, run that first; if it fails, re-try using the non-CUDA version - eddy_all_options = '--imain=eddy_in.nii --mask=eddy_mask.nii --acqp=eddy_config.txt --index=eddy_indices.txt --bvecs=bvecs --bvals=bvals' + eddy_in_topup_option + eddy_manual_options + ' --out=dwi_post_eddy --verbose' - eddy_cuda_cmd = fsl.eddy_binary(True) - eddy_openmp_cmd = fsl.eddy_binary(False) - if eddy_cuda_cmd: - # If running CUDA version, but OpenMP version is also available, don't stop the script if the CUDA version fails - try: - eddy_output = run.command(eddy_cuda_cmd + ' ' + eddy_all_options) - except run.MRtrixCmdError as exception_cuda: - if not eddy_openmp_cmd: - raise - with open('eddy_cuda_failure_output.txt', 'wb') as eddy_output_file: - eddy_output_file.write(str(exception_cuda).encode('utf-8', errors='replace')) - app.console('CUDA version of \'eddy\' was not successful; attempting OpenMP version') - try: - eddy_output = run.command(eddy_openmp_cmd + ' ' + eddy_all_options) - except run.MRtrixCmdError as exception_openmp: - with open('eddy_openmp_failure_output.txt', 'wb') as eddy_output_file: - eddy_output_file.write(str(exception_openmp).encode('utf-8', errors='replace')) - # Both have failed; want to combine error messages - eddy_cuda_header = ('=' * len(eddy_cuda_cmd)) \ - + '\n' \ - + eddy_cuda_cmd \ - + '\n' \ - + ('=' * len(eddy_cuda_cmd)) \ - + '\n' - eddy_openmp_header = ('=' * len(eddy_openmp_cmd)) \ - + '\n' \ - + eddy_openmp_cmd \ - + '\n' \ - + ('=' * len(eddy_openmp_cmd)) \ - + '\n' - exception_stdout = eddy_cuda_header \ - + exception_cuda.stdout \ - + '\n\n' \ - + eddy_openmp_header \ - + exception_openmp.stdout \ - + '\n\n' - exception_stderr = eddy_cuda_header \ - + exception_cuda.stderr \ - + '\n\n' \ - + eddy_openmp_header \ - + exception_openmp.stderr \ - + '\n\n' - raise run.MRtrixCmdError('eddy* ' + eddy_all_options, - 1, - exception_stdout, - exception_stderr) - - else: - eddy_output = run.command(eddy_openmp_cmd + ' ' + eddy_all_options) - with open('eddy_output.txt', 'wb') as eddy_output_file: - eddy_output_file.write((eddy_output.stdout + '\n' + eddy_output.stderr + '\n').encode('utf-8', errors='replace')) - if app.VERBOSITY > 1: - app.console('Output of eddy command:') - sys.stderr.write(eddy_output.stdout + '\n' + eddy_output.stderr + '\n') - app.cleanup('eddy_in.nii') - - eddy_output_image_path = fsl.find_image('dwi_post_eddy') - - - # Check to see whether or not eddy has provided a rotated bvecs file; - # if it has, import this into the output image - bvecs_path = 'dwi_post_eddy.eddy_rotated_bvecs' - if not os.path.isfile(bvecs_path): - app.warn('eddy has not provided rotated bvecs file; using original gradient table. Recommend updating FSL eddy to version 5.0.9 or later.') - bvecs_path = 'bvecs' - - - # Run eddy qc tool QUAD if installed and one of -eddyqc_text or -eddyqc_all is specified - eddyqc_prefix = 'dwi_post_eddy' - if eddyqc_path: - if find_executable('eddy_quad'): - - eddyqc_mask = 'eddy_mask.nii' - eddyqc_fieldmap = fsl.find_image('field_map') if do_topup else None - eddyqc_slspec = 'slspec.txt' if eddy_mporder else None - - # If there was any relevant padding applied, then we want to provide - # the comprehensive set of files to EddyQC with that padding removed - if dwi_post_eddy_crop_option: - progress = app.ProgressBar('Removing image padding prior to running EddyQC', len(eddyqc_files) + 3) - - for eddy_filename in eddyqc_files: - if os.path.isfile('dwi_post_eddy.' + eddy_filename): - if slice_padded and eddy_filename in [ 'eddy_outlier_map', 'eddy_outlier_n_sqr_stdev_map', 'eddy_outlier_n_stdev_map' ]: - with open('dwi_post_eddy.' + eddy_filename, 'r') as f_eddyfile: - eddy_data = f_eddyfile.readlines() - eddy_data_header = eddy_data[0] - eddy_data = eddy_data[1:] - for line in eddy_data: - line = ' '.join(line.strip().split(' ')[:-1]) - with open('dwi_post_eddy_unpad.' + eddy_filename, 'w') as f_eddyfile: - f_eddyfile.write(eddy_data_header + '\n') - f_eddyfile.write('\n'.join(eddy_data) + '\n') - elif eddy_filename.endswith('.nii.gz'): - run.command('mrconvert dwi_post_eddy.' + eddy_filename + ' dwi_post_eddy_unpad.' + eddy_filename + dwi_post_eddy_crop_option) - else: - run.function(os.symlink, 'dwi_post_eddy.' + eddy_filename, 'dwi_post_eddy_unpad.' + eddy_filename) - app.cleanup('dwi_post_eddy.' + eddy_filename) - progress.increment() - - if eddy_mporder and slice_padded: - app.debug('Current slice groups: ' + str(slice_groups)) - app.debug('Slice encoding direction: ' + str(slice_encoding_direction)) - # Remove padded slice from slice_groups, write new slspec - if sum(slice_encoding_direction) < 0: - slice_groups = [ [ index-1 for index in group if index ] for group in slice_groups ] - else: - slice_groups = [ [ index for index in group if index != dwi_num_slices-1 ] for group in slice_groups ] - eddyqc_slspec = 'slspec_unpad.txt' - app.debug('Slice groups after removal: ' + str(slice_groups)) - try: - # After this removal, slspec should now be a square matrix - assert all(len(group) == len(slice_groups[0]) for group in slice_groups[1:]) - matrix.save_matrix(eddyqc_slspec, slice_groups, add_to_command_history=False, fmt='%d') - except AssertionError: - matrix.save_numeric(eddyqc_slspec, slice_groups, add_to_command_history=False, fmt='%d') - raise - - run.command('mrconvert eddy_mask.nii eddy_mask_unpad.nii' + dwi_post_eddy_crop_option) - eddyqc_mask = 'eddy_mask_unpad.nii' - progress.increment() - run.command('mrconvert ' + fsl.find_image('field_map') + ' field_map_unpad.nii' + dwi_post_eddy_crop_option) - eddyqc_fieldmap = 'field_map_unpad.nii' - progress.increment() - run.command('mrconvert ' + eddy_output_image_path + ' dwi_post_eddy_unpad.nii.gz' + dwi_post_eddy_crop_option) - eddyqc_prefix = 'dwi_post_eddy_unpad' - progress.done() - - eddyqc_options = ' -idx eddy_indices.txt -par eddy_config.txt -b bvals -m ' + eddyqc_mask - if os.path.isfile(eddyqc_prefix + '.eddy_residuals.nii.gz'): - eddyqc_options += ' -g ' + bvecs_path - if do_topup: - eddyqc_options += ' -f ' + eddyqc_fieldmap - if eddy_mporder: - eddyqc_options += ' -s ' + eddyqc_slspec - if app.VERBOSITY > 2: - eddyqc_options += ' -v' - try: - run.command('eddy_quad ' + eddyqc_prefix + eddyqc_options) - except run.MRtrixCmdError as exception: - with open('eddy_quad_failure_output.txt', 'wb') as eddy_quad_output_file: - eddy_quad_output_file.write(str(exception).encode('utf-8', errors='replace')) - app.debug(str(exception)) - app.warn('Error running automated EddyQC tool \'eddy_quad\'; QC data written to "' + eddyqc_path + '" will be files from "eddy" only') - # Delete the directory if the script only made it partway through - try: - shutil.rmtree(eddyqc_prefix + '.qc') - except OSError: - pass - else: - app.console('Command \'eddy_quad\' not found in PATH; skipping') - - - # Have to retain these images until after eddyQC is run - # If using -eddyqc_all, also write the mask provided to eddy to the output directory; - # therefore don't delete it yet here - if not app.ARGS.eddyqc_all: - app.cleanup('eddy_mask.nii') - if do_topup: - app.cleanup(fsl.find_image('field_fieldcoef')) - - - # Get the axis strides from the input series, so the output image can be modified to match - stride_option = ' -strides ' + ','.join([str(i) for i in dwi_header.strides()]) - - - # Determine whether or not volume recombination should be performed - # This could be either due to use of -rpe_all option, or just due to the data provided with -rpe_header - # Rather than trying to re-use the code that was used in the case of -rpe_all, run fresh code - # The phase-encoding scheme needs to be checked also - volume_matchings = [ dwi_num_volumes ] * dwi_num_volumes - volume_pairs = [ ] - app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') - for index1 in range(dwi_num_volumes): - if volume_matchings[index1] == dwi_num_volumes: # As yet unpaired - for index2 in range(index1+1, dwi_num_volumes): - if volume_matchings[index2] == dwi_num_volumes: # Also as yet unpaired - # Here, need to check both gradient matching and reversed phase-encode direction - if not any(dwi_pe_scheme[index1][i] + dwi_pe_scheme[index2][i] for i in range(0,3)) and grads_match(index1, index2): - volume_matchings[index1] = index2 - volume_matchings[index2] = index1 - volume_pairs.append([index1, index2]) - app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + '\n' + - 'Phase encoding: ' + str(dwi_pe_scheme[index1]) + ' ' + str(dwi_pe_scheme[index2]) + '\n' + - 'Gradients: ' + str(grad[index1]) + ' ' + str(grad[index2])) - break - - - if len(volume_pairs) != int(dwi_num_volumes/2): - - if do_topup: - app.cleanup('topup_in.nii') - app.cleanup(fsl.find_image('field_map')) - - # Convert the resulting volume to the output image, and re-insert the diffusion encoding - run.command('mrconvert ' + eddy_output_image_path + ' result.mif' + dwi_permvols_posteddy_option + dwi_post_eddy_crop_option + stride_option + ' -fslgrad ' + bvecs_path + ' bvals') - app.cleanup(eddy_output_image_path) - - else: - app.console('Detected matching DWI volumes with opposing phase encoding; performing explicit volume recombination') - - # Perform a manual combination of the volumes output by eddy, since LSR is disabled - - # Generate appropriate bvecs / bvals files - # Particularly if eddy has provided rotated bvecs, since we're combining two volumes into one that - # potentially have subject rotation between them (and therefore the sensitisation direction is - # not precisely equivalent), the best we can do is take the mean of the two vectors. - # Manual recombination of volumes needs to take into account the explicit volume matching - - bvecs = matrix.load_matrix(bvecs_path) - bvecs_combined_transpose = [ ] - bvals_combined = [ ] - - for pair in volume_pairs: - bvec_mean = [ 0.5*(bvecs[0][pair[0]] + bvecs[0][pair[1]]), - 0.5*(bvecs[1][pair[0]] + bvecs[1][pair[1]]), - 0.5*(bvecs[2][pair[0]] + bvecs[2][pair[1]]) ] - norm2 = matrix.dot(bvec_mean, bvec_mean) - - # If one diffusion sensitisation gradient direction is reversed with respect to - # the other, still want to enable their recombination; but need to explicitly - # account for this when averaging the two directions - if norm2 < 0.5: - bvec_mean = [ 0.5*(bvecs[0][pair[0]] - bvecs[0][pair[1]]), - 0.5*(bvecs[1][pair[0]] - bvecs[1][pair[1]]), - 0.5*(bvecs[2][pair[0]] - bvecs[2][pair[1]]) ] - norm2 = matrix.dot(bvec_mean, bvec_mean) - - # Occasionally a b=0 volume can have a zero vector - if norm2: - factor = 1.0 / math.sqrt(norm2) - new_vec = [ bvec_mean[0]*factor, bvec_mean[1]*factor, bvec_mean[2]*factor ] - else: - new_vec = [ 0.0, 0.0, 0.0 ] - bvecs_combined_transpose.append(new_vec) - bvals_combined.append(0.5 * (grad[pair[0]][3] + grad[pair[1]][3])) - - bvecs_combined = matrix.transpose(bvecs_combined_transpose) - matrix.save_matrix('bvecs_combined', bvecs_combined, add_to_command_history=False) - matrix.save_vector('bvals_combined', bvals_combined, add_to_command_history=False) - - # Prior to 5.0.8, a bug resulted in the output field map image from topup having an identity transform, - # regardless of the transform of the input image - # Detect this, and manually replace the transform if necessary - # (even if this doesn't cause an issue with the subsequent mrcalc command, it may in the future, it's better for - # visualising the script intermediate files, and it gives the user a warning about an out-of-date FSL) - field_map_image = fsl.find_image('field_map') - field_map_header = image.Header(field_map_image) - if not image.match('topup_in.nii', field_map_header, up_to_dim=3): - app.warn('topup output field image has erroneous header; recommend updating FSL to version 5.0.8 or later') - new_field_map_image = 'field_map_fix.mif' - run.command('mrtransform ' + field_map_image + ' -replace topup_in.nii ' + new_field_map_image) - app.cleanup(field_map_image) - field_map_image = new_field_map_image - # In FSL 6.0.0, field map image is erroneously constructed with the same number of volumes as the input image, - # with all but the first volume containing intensity-scaled duplicates of the uncorrected input images - # The first volume is however the expected field offset image - elif len(field_map_header.size()) == 4: - app.console('Correcting erroneous FSL 6.0.0 field map image output') - new_field_map_image = 'field_map_fix.mif' - run.command('mrconvert ' + field_map_image + ' -coord 3 0 -axes 0,1,2 ' + new_field_map_image) - app.cleanup(field_map_image) - field_map_image = new_field_map_image - app.cleanup('topup_in.nii') - - - # Derive the weight images - # Scaling term for field map is identical to the bandwidth provided in the topup config file - # (converts Hz to pixel count; that way a simple image gradient can be used to get the Jacobians) - # Let mrfilter apply the default 1 voxel size gaussian smoothing filter before calculating the field gradient - # - # The jacobian image may be different for any particular volume pair - # The appropriate PE directions and total readout times can be acquired from the eddy-style config/index files - # eddy_config.txt and eddy_indices.txt - eddy_config = matrix.load_matrix('eddy_config.txt') - eddy_indices = matrix.load_vector('eddy_indices.txt', dtype=int) - app.debug('EDDY config: ' + str(eddy_config)) - app.debug('EDDY indices: ' + str(eddy_indices)) - - # This section derives, for each phase encoding configuration present, the 'weight' to be applied - # to the image during volume recombination, which is based on the Jacobian of the field in the - # phase encoding direction - for index, config in enumerate(eddy_config): - pe_axis = [ i for i, e in enumerate(config[0:3]) if e != 0][0] - sign_multiplier = ' -1.0 -mult' if config[pe_axis] < 0 else '' - field_derivative_path = 'field_deriv_pe_' + str(index+1) + '.mif' - run.command('mrcalc ' + field_map_image + ' ' + str(config[3]) + ' -mult' + sign_multiplier + ' - | mrfilter - gradient - | mrconvert - ' + field_derivative_path + ' -coord 3 ' + str(pe_axis) + ' -axes 0,1,2') - jacobian_path = 'jacobian_' + str(index+1) + '.mif' - run.command('mrcalc 1.0 ' + field_derivative_path + ' -add 0.0 -max ' + jacobian_path) - app.cleanup(field_derivative_path) - run.command('mrcalc ' + jacobian_path + ' ' + jacobian_path + ' -mult weight' + str(index+1) + '.mif') - app.cleanup(jacobian_path) - app.cleanup(field_map_image) - - # If eddy provides its main image output in a compressed format, the code block below will need to - # uncompress that image independently for every volume pair. Instead, if this is the case, let's - # convert it to an uncompressed format before we do anything with it. - if eddy_output_image_path.endswith('.gz'): - new_eddy_output_image_path = 'dwi_post_eddy_uncompressed.mif' - run.command('mrconvert ' + eddy_output_image_path + ' ' + new_eddy_output_image_path) - app.cleanup(eddy_output_image_path) - eddy_output_image_path = new_eddy_output_image_path - - # If the DWI volumes were permuted prior to running eddy, then the simplest approach is to permute them - # back to their original positions; otherwise, the stored gradient vector directions / phase encode - # directions / matched volume pairs are no longer appropriate - if dwi_permvols_posteddy_option: - new_eddy_output_image_path = os.path.splitext(eddy_output_image_path)[0] + '_volpermuteundo.mif' - run.command('mrconvert ' + eddy_output_image_path + dwi_permvols_posteddy_option + ' ' + new_eddy_output_image_path) - app.cleanup(eddy_output_image_path) - eddy_output_image_path = new_eddy_output_image_path - - # This section extracts the two volumes corresponding to each reversed phase-encoded volume pair, and - # derives a single image volume based on the recombination equation - combined_image_list = [ ] - progress = app.ProgressBar('Performing explicit volume recombination', len(volume_pairs)) - for index, volumes in enumerate(volume_pairs): - pe_indices = [ eddy_indices[i] for i in volumes ] - run.command('mrconvert ' + eddy_output_image_path + ' volume0.mif -coord 3 ' + str(volumes[0])) - run.command('mrconvert ' + eddy_output_image_path + ' volume1.mif -coord 3 ' + str(volumes[1])) - # Volume recombination equation described in Skare and Bammer 2010 - combined_image_path = 'combined' + str(index) + '.mif' - run.command('mrcalc volume0.mif weight' + str(pe_indices[0]) + '.mif -mult volume1.mif weight' + str(pe_indices[1]) + '.mif -mult -add weight' + str(pe_indices[0]) + '.mif weight' + str(pe_indices[1]) + '.mif -add -divide 0.0 -max ' + combined_image_path) - combined_image_list.append(combined_image_path) - run.function(os.remove, 'volume0.mif') - run.function(os.remove, 'volume1.mif') - progress.increment() - progress.done() - - app.cleanup(eddy_output_image_path) - for index in range(0, len(eddy_config)): - app.cleanup('weight' + str(index+1) + '.mif') - - # Finally the recombined volumes must be concatenated to produce the resulting image series - combine_command = ['mrcat', combined_image_list, '-', '-axis', '3', '|', \ - 'mrconvert', '-', 'result.mif', '-fslgrad', 'bvecs_combined', 'bvals_combined'] - if dwi_post_eddy_crop_option: - combine_command.extend(dwi_post_eddy_crop_option.strip().split(' ')) - combine_command.extend(stride_option.strip().split(' ')) - run.command(combine_command) - app.cleanup(combined_image_list) - - - # Grab any relevant files that eddy has created, and copy them to the requested directory - if eddyqc_path: - if app.FORCE_OVERWRITE and os.path.exists(eddyqc_path) and not os.path.isdir(eddyqc_path): - run.function(os.remove, eddyqc_path) - if not os.path.exists(eddyqc_path): - run.function(os.makedirs, eddyqc_path) - for filename in eddyqc_files: - if os.path.exists(eddyqc_prefix + '.' + filename): - # If this is an image, and axis padding was applied, want to undo the padding - if filename.endswith('.nii.gz') and dwi_post_eddy_crop_option: - run.command('mrconvert ' + eddyqc_prefix + '.' + filename + ' ' + path.quote(os.path.join(eddyqc_path, filename)) + dwi_post_eddy_crop_option) - else: - run.function(shutil.copy, eddyqc_prefix + '.' + filename, os.path.join(eddyqc_path, filename)) - # Also grab any files generated by the eddy qc tool QUAD - if os.path.isdir(eddyqc_prefix + '.qc'): - if app.FORCE_OVERWRITE and os.path.exists(os.path.join(eddyqc_path, 'quad')): - run.function(shutil.rmtree, os.path.join(eddyqc_path, 'quad')) - run.function(shutil.copytree, eddyqc_prefix + '.qc', os.path.join(eddyqc_path, 'quad')) - # Also grab the brain mask that was provided to eddy if -eddyqc_all was specified - if app.ARGS.eddyqc_all: - if dwi_post_eddy_crop_option: - run.command('mrconvert eddy_mask.nii ' + path.quote(os.path.join(eddyqc_path, 'eddy_mask.nii')) + dwi_post_eddy_crop_option) - else: - run.function(shutil.copy, 'eddy_mask.nii', os.path.join(eddyqc_path, 'eddy_mask.nii')) - app.cleanup('eddy_mask.nii') - - - - - keys_to_remove = [ 'MultibandAccelerationFactor', 'SliceEncodingDirection', 'SliceTiming' ] - # These keys are still relevant for the output data if no EPI distortion correction was performed - if do_topup: - keys_to_remove.extend([ 'PhaseEncodingDirection', 'TotalReadoutTime', 'pe_scheme' ]) - # Get the header key-value entries from the input DWI, remove those we don't wish to keep, and - # export the result to a new JSON file so that they can be inserted into the output header - with open('dwi.json', 'r') as input_json_file: - keyval = json.load(input_json_file) - for key in keys_to_remove: - keyval.pop(key, None) - # Make sure to use the revised diffusion gradient table rather than that of the input; - # incorporates motion correction, and possibly also the explicit volume recombination - keyval['dw_scheme'] = image.Header('result.mif').keyval()['dw_scheme'] - # 'Stash' the phase encoding scheme of the original uncorrected DWIs, since it still - # may be useful information at some point in the future but is no longer relevant - # for e.g. tracking for different volumes, or performing any geometric corrections - if do_topup: - keyval['prior_pe_scheme'] = dwi_manual_pe_scheme if dwi_manual_pe_scheme else dwi_pe_scheme - with open('output.json', 'w') as output_json_file: - json.dump(keyval, output_json_file) - - - # Finish! - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output) + grad_export_option, mrconvert_keyval='output.json', force=app.FORCE_OVERWRITE) - - - - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwigradcheck b/bin/dwigradcheck deleted file mode 100755 index 86d310fa8b..0000000000 --- a/bin/dwigradcheck +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=unspecified-encoding,consider-using-f-string - -import copy, numbers, os, shutil, sys - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Check the orientation of the diffusion gradient table') - cmdline.add_description('Note that the corrected gradient table can be output using the -export_grad_{mrtrix,fsl} option.') - cmdline.add_citation('Jeurissen, B.; Leemans, A.; Sijbers, J. Automated correction of improperly rotated diffusion gradient orientations in diffusion weighted MRI. Medical Image Analysis, 2014, 18(7), 953-962') - cmdline.add_argument('input', help='The input DWI series to be checked') - cmdline.add_argument('-mask', metavar='image', help='Provide a brain mask image') - cmdline.add_argument('-number', type=int, default=10000, help='Set the number of tracks to generate for each test') - - app.add_dwgrad_export_options(cmdline) - app.add_dwgrad_import_options(cmdline) - - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, image, matrix, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - image_dimensions = image.Header(path.from_user(app.ARGS.input, False)).size() - if len(image_dimensions) != 4: - raise MRtrixError('Input image must be a 4D image') - if min(image_dimensions) == 1: - raise MRtrixError('Cannot perform tractography on an image with a unity dimension') - num_volumes = image_dimensions[3] - - app.make_scratch_dir() - - # Make sure the image data can be memory-mapped - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('data.mif') + ' -strides 0,0,0,1 -datatype float32') - - if app.ARGS.grad: - shutil.copy(path.from_user(app.ARGS.grad, False), path.to_scratch('grad.b', False)) - elif app.ARGS.fslgrad: - shutil.copy(path.from_user(app.ARGS.fslgrad[0], False), path.to_scratch('bvecs', False)) - shutil.copy(path.from_user(app.ARGS.fslgrad[1], False), path.to_scratch('bvals', False)) - if app.ARGS.mask: - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') - - app.goto_scratch_dir() - - # Make sure we have gradient table stored externally to header in both MRtrix and FSL formats - if not os.path.isfile('grad.b'): - if os.path.isfile('bvecs'): - run.command('mrinfo data.mif -fslgrad bvecs bvals -export_grad_mrtrix grad.b') - else: - run.command('mrinfo data.mif -export_grad_mrtrix grad.b') - - if not os.path.isfile('bvecs'): - if os.path.isfile('grad.b'): - run.command('mrinfo data.mif -grad grad.b -export_grad_fsl bvecs bvals') - else: - run.command('mrinfo data.mif -export_grad_fsl bvecs bvals') - - # Import both of these into local memory - grad_mrtrix = matrix.load_matrix('grad.b') - grad_fsl = matrix.load_matrix('bvecs') - # Is our gradient table of the correct length? - if not len(grad_mrtrix) == num_volumes: - raise MRtrixError('Number of entries in gradient table does not match number of DWI volumes') - if not len(grad_fsl) == 3 or not len(grad_fsl[0]) == num_volumes: - raise MRtrixError('Internal error (inconsistent gradient table storage)') - - - # Generate a brain mask if we weren't provided with one - # Note that gradient table must be explicitly loaded, since there may not - # be one in the image header (user may be relying on -grad or -fslgrad input options) - if not os.path.exists('mask.mif'): - run.command('dwi2mask data.mif mask.mif -grad grad.b') - - # How many tracks are we going to generate? - number_option = ' -select ' + str(app.ARGS.number) - - - # What variations of gradient errors can we conceive? - - # Done: - # * Has an axis been flipped? (none, 0, 1, 2) - # * Have axes been swapped? (012 021 102 120 201 210) - # * For both flips & swaps, it could occur in either scanner or image space... - - # To do: - # * Have the gradients been defined with respect to image space rather than scanner space? - # * After conversion to gradients in image space, are they _then_ defined with respect to scanner space? - # (should the above two be tested independently from the axis flips / permutations?) - - - axis_flips = [ 'none', 0, 1, 2 ] - axis_permutations = [ ( 0, 1, 2 ), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0) ] - grad_basis = [ 'scanner', 'image' ] - total_tests = len(axis_flips) * len(axis_permutations) * len(grad_basis) - - - # List where the first element is the mean length - lengths = [ ] - - progress = app.ProgressBar('Testing gradient table alterations (0 of ' + str(total_tests) + ')', total_tests) - - for flip in axis_flips: - for permutation in axis_permutations: - for basis in grad_basis: - - suffix = '_flip' + str(flip) + '_perm' + ''.join(str(item) for item in permutation) + '_' + basis - - if basis == 'scanner': - - grad = copy.copy(grad_mrtrix) - - # Don't do anything if there aren't any axis flips occurring (flip == 'none') - if isinstance(flip, numbers.Number): - multiplier = [ 1.0, 1.0, 1.0, 1.0 ] - multiplier[flip] = -1.0 - grad = [ [ r*m for r,m in zip(row, multiplier) ] for row in grad ] - - grad = [ [ row[permutation[0]], row[permutation[1]], row[permutation[2]], row[3] ] for row in grad ] - - # Create the gradient table file - grad_path = 'grad' + suffix + '.b' - with open(grad_path, 'w') as grad_file: - for line in grad: - grad_file.write (','.join([str(v) for v in line]) + '\n') - - grad_option = ' -grad ' + grad_path - - elif basis == 'image': - - grad = copy.copy(grad_fsl) - - if isinstance(flip, numbers.Number): - grad[flip] = [ -v for v in grad[flip] ] - - grad = [ grad[permutation[0]], grad[permutation[1]], grad[permutation[2]] ] - - grad_path = 'bvecs' + suffix - with open(grad_path, 'w') as bvecs_file: - for line in grad: - bvecs_file.write (' '.join([str(v) for v in line]) + '\n') - - grad_option = ' -fslgrad ' + grad_path + ' bvals' - - # Run the tracking experiment - run.command('tckgen -algorithm tensor_det data.mif' + grad_option + ' -seed_image mask.mif -mask mask.mif' + number_option + ' -minlength 0 -downsample 5 tracks' + suffix + '.tck') - - # Get the mean track length - meanlength=float(run.command('tckstats tracks' + suffix + '.tck -output mean -ignorezero').stdout) - - # Add to the database - lengths.append([meanlength,flip,permutation,basis]) - - # Increament the progress bar - progress.increment('Testing gradient table alterations (' + str(len(lengths)) + ' of ' + str(total_tests) + ')') - - progress.done() - - # Sort the list to find the best gradient configuration(s) - lengths.sort() - lengths.reverse() - - - # Provide a printout of the mean streamline length of each gradient table manipulation - sys.stderr.write('Mean length Axis flipped Axis permutations Axis basis\n') - for line in lengths: - if isinstance(line[1], numbers.Number): - flip_str = "{:4d}".format(line[1]) - else: - flip_str = line[1] - sys.stderr.write("{:5.2f}".format(line[0]) + ' ' + flip_str + ' ' + str(line[2]) + ' ' + line[3] + '\n') - - - # If requested, extract what has been detected as the best gradient table, and - # export it in the format requested by the user - grad_export_option = app.read_dwgrad_export_options() - if grad_export_option: - best = lengths[0] - suffix = '_flip' + str(best[1]) + '_perm' + ''.join(str(item) for item in best[2]) + '_' + best[3] - if best[3] == 'scanner': - grad_import_option = ' -grad grad' + suffix + '.b' - elif best[3] == 'image': - grad_import_option = ' -fslgrad bvecs' + suffix + ' bvals' - run.command('mrinfo data.mif' + grad_import_option + grad_export_option, force=app.FORCE_OVERWRITE) - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwinormalise b/bin/dwinormalise deleted file mode 100755 index 743bcd3127..0000000000 --- a/bin/dwinormalise +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Script that performs intensity normalisation of DWIs in various ways - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Perform various forms of intensity normalisation of DWIs') - cmdline.add_description('This script provides access to different techniques for globally scaling the intensity of diffusion-weighted images. ' - 'The different algorithms have different purposes, and different requirements with respect to the data with which they must be provided & will produce as output. ' - 'Further information on the individual algorithms available can be accessed via their individual help pages; eg. "dwinormalise group -help".') - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) - alg.check_output_paths() - - # From here, the script splits depending on what algorithm is being used - alg.execute() - - - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/dwishellmath b/bin/dwishellmath deleted file mode 100755 index 689075cf6d..0000000000 --- a/bin/dwishellmath +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=consider-using-f-string - - -SUPPORTED_OPS = ['mean', 'median', 'sum', 'product', 'rms', 'norm', 'var', 'std', 'min', 'max', 'absmax', 'magmax'] - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Daan Christiaens (daan.christiaens@kcl.ac.uk)') - cmdline.set_synopsis('Apply an mrmath operation to each b-value shell in a DWI series') - cmdline.add_description('The output of this command is a 4D image, where ' - 'each volume corresponds to a b-value shell (in order of increasing b-value), and ' - 'the intensities within each volume correspond to the chosen statistic having been computed from across the DWI volumes belonging to that b-value shell.') - cmdline.add_argument('input', help='The input diffusion MRI series') - cmdline.add_argument('operation', choices=SUPPORTED_OPS, help='The operation to be applied to each shell; this must be one of the following: ' + ', '.join(SUPPORTED_OPS)) - cmdline.add_argument('output', help='The output image series') - cmdline.add_example_usage('To compute the mean diffusion-weighted signal in each b-value shell', - 'dwishellmath dwi.mif mean shellmeans.mif') - app.add_dwgrad_import_options(cmdline) - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - # check inputs and outputs - dwi_header = image.Header(path.from_user(app.ARGS.input, False)) - if len(dwi_header.size()) != 4: - raise MRtrixError('Input image must be a 4D image') - gradimport = app.read_dwgrad_import_options() - if not gradimport and 'dw_scheme' not in dwi_header.keyval(): - raise MRtrixError('No diffusion gradient table provided, and none present in image header') - app.check_output_path(app.ARGS.output) - # import data and gradient table - app.make_scratch_dir() - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif') + gradimport + ' -strides 0,0,0,1') - app.goto_scratch_dir() - # run per-shell operations - files = [] - for index, bvalue in enumerate(image.mrinfo('in.mif', 'shell_bvalues').split()): - filename = 'shell-{:02d}.mif'.format(index) - run.command('dwiextract -shells ' + bvalue + ' in.mif - | mrmath -axis 3 - ' + app.ARGS.operation + ' ' + filename) - files.append(filename) - if len(files) > 1: - # concatenate to output file - run.command('mrcat -axis 3 ' + ' '.join(files) + ' out.mif') - run.command('mrconvert out.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - else: - # make a 4D image with one volume - app.warn('Only one unique b-value present in DWI data; command mrmath with -axis 3 option may be preferable') - run.command('mrconvert ' + files[0] + ' ' + path.from_user(app.ARGS.output) + ' -axes 0,1,2,-1', mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/for_each b/bin/for_each deleted file mode 100755 index fb41d7b3f7..0000000000 --- a/bin/for_each +++ /dev/null @@ -1,306 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - -import os, re, sys, threading - - - -# Since we're going to capture everything after the colon character and "hide" it from argparse, -# we need to store the contents from there in a global so as for it to be accessible from execute() -CMDSPLIT = [ ] - - - -def usage(cmdline): #pylint: disable=unused-variable - global CMDSPLIT - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') - cmdline.set_synopsis('Perform some arbitrary processing step for each of a set of inputs') - cmdline.add_description('This script greatly simplifies various forms of batch processing by enabling the execution of a command (or set of commands) independently for each of a set of inputs. Part of the way that this is achieved is by providing basic text substitutions, which simplify the formation of valid command strings based on the unique components of the input strings on which the script is instructed to execute. The available substitutions are listed below (note that the -test command-line option can be used to ensure correct command string formation prior to actually executing the commands):') - cmdline.add_description(' - IN: The full matching pattern, including leading folders. For example, if the target list contains a file "folder/image.mif", any occurrence of "IN" will be substituted with "folder/image.mif".') - cmdline.add_description(' - NAME: The basename of the matching pattern. For example, if the target list contains a file "folder/image.mif", any occurrence of "NAME" will be substituted with "image.mif".') - cmdline.add_description(' - PRE: The prefix of the input pattern (the basename stripped of its extension). For example, if the target list contains a file "folder/my.image.mif.gz", any occurrence of "PRE" will be substituted with "my.image".') - cmdline.add_description(' - UNI: The unique part of the input after removing any common prefix and common suffix. For example, if the target list contains files: "folder/001dwi.mif", "folder/002dwi.mif", "folder/003dwi.mif", any occurrence of "UNI" will be substituted with "001", "002", "003".') - cmdline.add_description('Note that due to a limitation of the Python "argparse" module, any command-line OPTIONS that the user intends to provide specifically to the for_each script must appear BEFORE providing the list of inputs on which for_each is intended to operate. While command-line options provided as such will be interpreted specifically by the for_each script, any command-line options that are provided AFTER the COLON separator will form part of the executed COMMAND, and will therefore be interpreted as command-line options having been provided to that underlying command.') - cmdline.add_example_usage('Demonstration of basic usage syntax', - 'for_each folder/*.mif : mrinfo IN', - 'This will run the "mrinfo" command for every .mif file present in "folder/". Note that the compulsory colon symbol is used to separate the list of items on which for_each is being instructed to operate, from the command that is intended to be run for each input.') - cmdline.add_example_usage('Multi-threaded use of for_each', - 'for_each -nthreads 4 freesurfer/subjects/* : recon-all -subjid NAME -all', - 'In this example, for_each is instructed to run the FreeSurfer command \'recon-all\' for all subjects within the \'subjects\' directory, with four subjects being processed in parallel at any one time. Whenever processing of one subject is completed, processing for a new unprocessed subject will commence. This technique is useful for improving the efficiency of running single-threaded commands on multi-core systems, as long as the system possesses enough memory to support such parallel processing. Note that in the case of multi-threaded commands (which includes many MRtrix3 commands), it is generally preferable to permit multi-threaded execution of the command on a single input at a time, rather than processing multiple inputs in parallel.') - cmdline.add_example_usage('Excluding specific inputs from execution', - 'for_each *.nii -exclude 001.nii : mrconvert IN PRE.mif', - 'Particularly when a wildcard is used to define the list of inputs for for_each, it is possible in some instances that this list will include one or more strings for which execution should in fact not be performed; for instance, if a command has already been executed for one or more files, and then for_each is being used to execute the same command for all other files. In this case, the -exclude option can be used to effectively remove an item from the list of inputs that would otherwise be included due to the use of a wildcard (and can be used more than once to exclude more than one string). In this particular example, mrconvert is instructed to perform conversions from NIfTI to MRtrix image formats, for all except the first image in the directory. Note that any usages of this option must appear AFTER the list of inputs. Note also that the argument following the -exclude option can alternatively be a regular expression, in which case any inputs for which a match to the expression is found will be excluded from processing.') - cmdline.add_example_usage('Testing the command string substitution', - 'for_each -test * : mrconvert IN PRE.mif', - 'By specifying the -test option, the script will print to the terminal the results of text substitutions for all of the specified inputs, but will not actually execute those commands. It can therefore be used to verify that the script is receiving the intended set of inputs, and that the text substitutions on those inputs lead to the intended command strings.') - cmdline.add_example_usage('Utilising shell operators within the command substitution', - 'for_each * : tensor2metric IN/dwi.mif - "|" tensor2metric - -fa IN/fa.mif', - 'In this example, if the double-quotes were NOT placed around the pipe operator, then the shell would take the sum total output of the for_each script and pipe that to a single invocation of the tensor2metric command. Since in this example it is instead desired for the pipe operator to be a part of the command string that is executed multiple times by the for_each script, it must be escaped using double-quotes.') - cmdline.add_argument('inputs', help='Each of the inputs for which processing should be run', nargs='+') - cmdline.add_argument('colon', help='Colon symbol (":") delimiting the for_each inputs & command-line options from the actual command to be executed', type=str, choices=[':']) - cmdline.add_argument('command', help='The command string to run for each input, containing any number of substitutions listed in the Description section', type=str) - cmdline.add_argument('-exclude', help='Exclude one specific input string / all strings matching a regular expression from being processed (see Example Usage)', action='append', metavar='"regex"', nargs=1) - cmdline.add_argument('-test', help='Test the operation of the for_each script, by printing the command strings following string substitution but not actually executing them', action='store_true', default=False) - - # Usage of for_each needs to be handled slightly differently here: - # We want argparse to parse only the contents of the command-line before the colon symbol, - # as these are the items that pertain to the invocation of the for_each script; - # anything after the colon should instead form a part of the command that - # for_each is responsible for executing - try: - index = next(i for i,s in enumerate(sys.argv) if s == ':') - try: - CMDSPLIT = sys.argv[index+1:] - sys.argv = sys.argv[:index+1] - sys.argv.append(' '.join(CMDSPLIT)) - except IndexError: - sys.stderr.write('Erroneous usage: No command specified (colon separator cannot be the last entry provided)\n') - sys.exit(0) - except StopIteration: - if len(sys.argv) > 2: - sys.stderr.write('Erroneous usage: A colon must be used to separate for_each inputs from the command to be executed\n') - sys.exit(0) - - - - - - - - -# These need to be globals in order to be accessible from execute_parallel() -class Shared(object): - def __init__(self): - self._job_index = 0 - self.lock = threading.Lock() - self.stop = False - def next(self, jobs): - job = None - with self.lock: - if self._job_index < len(jobs): - job = jobs[self._job_index] - self._job_index += 1 - self.stop = self._job_index == len(jobs) - return job - -shared = Shared() #pylint: disable=invalid-name - - - -KEYLIST = [ 'IN', 'NAME', 'PRE', 'UNI' ] - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import ANSI, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, run #pylint: disable=no-name-in-module, import-outside-toplevel - - inputs = app.ARGS.inputs - app.debug('All inputs: ' + str(inputs)) - app.debug('Command: ' + str(app.ARGS.command)) - app.debug('CMDSPLIT: ' + str(CMDSPLIT)) - - if app.ARGS.exclude: - app.ARGS.exclude = [ exclude[0] for exclude in app.ARGS.exclude ] # To deal with argparse's action=append. Always guaranteed to be only one argument since nargs=1 - app.debug('To exclude: ' + str(app.ARGS.exclude)) - exclude_unmatched = [ ] - to_exclude = [ ] - for exclude in app.ARGS.exclude: - if exclude in inputs: - to_exclude.append(exclude) - else: - try: - re_object = re.compile(exclude) - regex_hits = [ ] - for arg in inputs: - search_result = re_object.search(arg) - if search_result and search_result.group(): - regex_hits.append(arg) - if regex_hits: - app.debug('Inputs excluded via regex "' + exclude + '": ' + str(regex_hits)) - to_exclude.extend(regex_hits) - else: - app.debug('Compiled exclude regex "' + exclude + '" had no hits') - exclude_unmatched.append(exclude) - except re.error: - app.debug('Exclude string "' + exclude + '" did not compile as regex') - exclude_unmatched.append(exclude) - if exclude_unmatched: - app.warn('Item' + ('s' if len(exclude_unmatched) > 1 else '') + ' specified via -exclude did not result in item exclusion, whether by direct match or compilation as regex: ' + str('\'' + exclude_unmatched[0] + '\'' if len(exclude_unmatched) == 1 else exclude_unmatched)) - inputs = [ arg for arg in inputs if arg not in to_exclude ] - if not inputs: - raise MRtrixError('No inputs remaining after application of exclusion criteri' + ('on' if len(app.ARGS.exclude) == 1 else 'a')) - app.debug('Inputs after exclusion: ' + str(inputs)) - - common_prefix = os.path.commonprefix(inputs) - common_suffix = os.path.commonprefix([i[::-1] for i in inputs])[::-1] - app.debug('Common prefix: ' + common_prefix if common_prefix else 'No common prefix') - app.debug('Common suffix: ' + common_suffix if common_suffix else 'No common suffix') - - for entry in CMDSPLIT: - if os.path.exists(entry): - keys_present = [ key for key in KEYLIST if key in entry ] - if keys_present: - app.warn('Performing text substitution of ' + str(keys_present) + ' within command: "' + entry + '"; but the original text exists as a path on the file system... is this a problematic filesystem path?') - - try: - next(entry for entry in CMDSPLIT if any(key for key in KEYLIST if key in entry)) - except StopIteration: - raise MRtrixError('None of the unique for_each keys ' + str(KEYLIST) + ' appear in command string "' + app.ARGS.command + '"; no substitution can occur') - - class Entry(object): - def __init__(self, input_text): - self.input_text = input_text - self.sub_in = input_text - self.sub_name = os.path.basename(input_text.rstrip('/')) - self.sub_pre = os.path.splitext(self.sub_name.rstrip('.gz'))[0] - if common_suffix: - self.sub_uni = input_text[len(common_prefix):-len(common_suffix)] - else: - self.sub_uni = input_text[len(common_prefix):] - - self.substitutions = { 'IN': self.sub_in, 'NAME': self.sub_name, 'PRE': self.sub_pre, 'UNI': self.sub_uni } - app.debug('Input text: ' + input_text) - app.debug('Substitutions: ' + str(self.substitutions)) - - self.cmd = [ ] - for entry in CMDSPLIT: - for (key, value) in self.substitutions.items(): - entry = entry.replace(key, value) - if ' ' in entry: - entry = '"' + entry + '"' - self.cmd.append(entry) - app.debug('Resulting command: ' + str(self.cmd)) - - self.outputtext = None - self.returncode = None - - jobs = [ ] - for i in inputs: - jobs.append(Entry(i)) - - if app.ARGS.test: - app.console('Command strings for ' + str(len(jobs)) + ' jobs:') - for job in jobs: - sys.stderr.write(ANSI.execute + 'Input:' + ANSI.clear + ' "' + job.input_text + '"\n') - sys.stderr.write(ANSI.execute + 'Command:' + ANSI.clear + ' ' + ' '.join(job.cmd) + '\n') - return - - parallel = app.NUM_THREADS is not None and app.NUM_THREADS > 1 - - def progress_string(): - text = str(sum(1 if job.returncode is not None else 0 for job in jobs)) + \ - '/' + \ - str(len(jobs)) + \ - ' jobs completed ' + \ - ('across ' + str(app.NUM_THREADS) + ' threads' if parallel else 'sequentially') - fail_count = sum(bool(job.returncode) for job in jobs) - if fail_count: - text += ' (' + str(fail_count) + ' errors)' - return text - - progress = app.ProgressBar(progress_string(), len(jobs)) - - def execute_parallel(): - while not shared.stop: - my_job = shared.next(jobs) - if not my_job: - return - try: - result = run.command(' '.join(my_job.cmd), shell=True) - my_job.outputtext = result.stdout + result.stderr - my_job.returncode = 0 - except run.MRtrixCmdError as exception: - my_job.outputtext = str(exception) - my_job.returncode = exception.returncode - except Exception as exception: # pylint: disable=broad-except - my_job.outputtext = str(exception) - my_job.returncode = 1 - with shared.lock: - progress.increment(progress_string()) - - if parallel: - threads = [ ] - for i in range (1, app.NUM_THREADS): - thread = threading.Thread(target=execute_parallel) - thread.start() - threads.append(thread) - execute_parallel() - for thread in threads: - thread.join() - else: - for job in jobs: - try: - result = run.command(' '.join(job.cmd), shell=True) - job.outputtext = result.stdout + result.stderr - job.returncode = 0 - except run.MRtrixCmdError as exception: - job.outputtext = str(exception) - job.returncode = exception.returncode - except Exception as exception: # pylint: disable=broad-except - job.outputtext = str(exception) - job.returncode = 1 - progress.increment(progress_string()) - - progress.done() - - assert all(job.returncode is not None for job in jobs) - fail_count = sum(bool(job.returncode) for job in jobs) - if fail_count: - app.warn(str(fail_count) + ' of ' + str(len(jobs)) + ' jobs did not complete successfully') - if fail_count > 1: - app.warn('Outputs from failed commands:') - sys.stderr.write(app.EXEC_NAME + ':\n') - else: - app.warn('Output from failed command:') - for job in jobs: - if job.returncode: - if job.outputtext: - app.warn('For input "' + job.sub_in + '" (returncode = ' + str(job.returncode) + '):') - for line in job.outputtext.splitlines(): - sys.stderr.write(' ' * (len(app.EXEC_NAME)+2) + line + '\n') - else: - app.warn('No output from command for input "' + job.sub_in + '" (return code = ' + str(job.returncode) + ')') - if fail_count > 1: - sys.stderr.write(app.EXEC_NAME + ':\n') - raise MRtrixError(str(fail_count) + ' of ' + str(len(jobs)) + ' jobs did not complete successfully') - - if app.VERBOSITY > 1: - if any(job.outputtext for job in jobs): - sys.stderr.write(app.EXEC_NAME + ':\n') - for job in jobs: - if job.outputtext: - app.console('Output of command for input "' + job.sub_in + '":') - for line in job.outputtext.splitlines(): - sys.stderr.write(' ' * (len(app.EXEC_NAME)+2) + line + '\n') - else: - app.console('No output from command for input "' + job.sub_in + '"') - sys.stderr.write(app.EXEC_NAME + ':\n') - else: - app.console('No output from command for any inputs') - - app.console('Script reported successful completion for all inputs') - - - - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/gen_scheme b/bin/gen_scheme deleted file mode 100755 index 406a7c965b..0000000000 --- a/bin/gen_scheme +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -set -e - -if [ "$#" -eq 0 ]; then - echo " - gen_scheme: part of the MRtrix package - -SYNOPSIS - - gen_scheme numPE [ bvalue ndir ]... - - numPE the number of phase-encoding directions to be included in - the scheme (most scanners will only support a single PE - direction per sequence, so this will typically be 1). - - bvalue the b-value of the shell - - ndir the number of directions to include in the shell - - -DESCRIPTION - - This script generates a diffusion gradient table according to the - parameters specified. For most users, something like the following would be - appropriate: - - gen_scheme 1 0 5 750 20 3000 60 - - which will geneate a multi-shell diffusion gradient table with a single - phase-encode direction, consisting of 5 b=0, 20 b=750, and 60 b=3000 - volumes. - - The gradient table is generated using the following procedure: - - - The directions for each shell are optimally distributed using a bipolar - electrostatic repulsion model (using the command 'dirgen'). - - - These are then split into numPE sets (if numPE != 1) using a brute-force - random search for the most optimally-distributed subsets (using the command - 'dirsplit'). - - - Each of the resulting sets is then rearranged by inversion of individual - directions through the origin (i.e. direction vector x => -x) using a - brute-force random search to find the most optimal combination in terms - of unipolar repulsion: this ensures near-uniform distribution over the - sphere to avoid biases in terms of eddy-current distortions, as - recommended for FSL's EDDY command (this step uses the 'dirflip' command). - - - Finally, all the individual subsets are merged (using the 'dirmerge' - command) into a single gradient table, in such a way as to maintain - near-uniformity upon truncation (in as much as is possible), in both - b-value and directional domains. In other words, the approach aims to - ensure that if the acquisition is cut short, the set of volumes acquired - nonetheless contains the same relative proportions of b-values as - specified, with directions that are near-uniformly distributed. - - The primary output of this command is a file called 'dw_scheme.txt', - consisting of a 5-column table, with one line per volume. Each column - consists of [ x y z b PE ], where [ x y z ] is the unit direction vector, b - is the b-value in unit of s/mm², and PE is a integer ID from 1 to numPE. - - The command also retains all of the subsets generated along the way, which - you can safely delete once the command has completed. Since this can - consist of quite a few files, it is recommended to run this command within - its own temporary folder. - - See also the 'dirstat' command to obtain simple metrics of quality for the - set produced. -" - exit 1 -else - - nPE=$1 - if [ $nPE -ne 1 ] && [ $nPE -ne 2 ] && [ $nPE -ne 4 ]; then - echo "ERROR: numPE should be one of 1, 2, 4" - exit 1 - fi - - shift - # store args for re-use: - ARGS=( "$@" ) - - # print parsed info for sanity-checking: - echo "generating scheme with $nPE phase-encode directions, with:" - while [ ! -z "$1" ]; do - echo " b = $1: $2 directions" - shift 2 - done - - perm="" #"-perm 1000" - - # reset args: - set -- "${ARGS[@]}" - merge="" - - while [ ! -z "$1" ]; do - echo "=====================================" - echo "generating directions for b = $1..." - echo "=====================================" - - merge=$merge" "$1 - - dirgen $2 dirs-b$1-$2.txt -force - if [ $nPE -gt 1 ]; then - dirsplit dirs-b$1-$2.txt dirs-b$1-$2-{1..2}.txt -force $perm - if [ $nPE -gt 2 ]; then - dirsplit dirs-b$1-$2-1.txt dirs-b$1-$2-1{1..2}.txt -force $perm - dirsplit dirs-b$1-$2-2.txt dirs-b$1-$2-2{1..2}.txt -force $perm - # TODO: the rest... - for n in dirs-b$1-$2-{1,2}{1,2}.txt; do - dirflip $n ${n%.txt}-flip.txt -force $perm - merge=$merge" "${n%.txt}-flip.txt - done - else - for n in dirs-b$1-$2-{1,2}.txt; do - dirflip $n ${n%.txt}-flip.txt -force $perm - merge=$merge" "${n%.txt}-flip.txt - done - fi - else - dirflip dirs-b$1-$2.txt dirs-b$1-$2-flip.txt -force $perm - merge=$merge" "dirs-b$1-$2-flip.txt - fi - - shift 2 - done - - echo $merge - dirmerge $nPE $merge dw_scheme.txt -force -fi - diff --git a/bin/labelsgmfix b/bin/labelsgmfix deleted file mode 100755 index 7e7d36ac4b..0000000000 --- a/bin/labelsgmfix +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Script for 'repairing' a FreeSurfer parcellation image -# FreeSurfer's sub-cortical structure segmentation has been observed to be highly variable -# under scan-rescan conditions. This introduces unwanted variability into the connectome, -# as the parcellations don't overlap with the sub-cortical segmentations provided by -# FIRST for the sake of Anatomically-Constrained Tractography. This script determines the -# node indices that correspond to these structures, and replaces them with estimates -# derived from FIRST. - - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=unspecified-encoding - - -import math, os - - - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('In a FreeSurfer parcellation image, replace the sub-cortical grey matter structure delineations using FSL FIRST') - cmdline.add_citation('Patenaude, B.; Smith, S. M.; Kennedy, D. N. & Jenkinson, M. A Bayesian model of shape and appearance for subcortical brain segmentation. NeuroImage, 2011, 56, 907-922', is_external=True) - cmdline.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) - cmdline.add_citation('Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. The effects of SIFT on the reproducibility and biological accuracy of the structural connectome. NeuroImage, 2015, 104, 253-265') - cmdline.add_argument('parc', help='The input FreeSurfer parcellation image') - cmdline.add_argument('t1', help='The T1 image to be provided to FIRST') - cmdline.add_argument('lut', help='The lookup table file that the parcellated image is based on') - cmdline.add_argument('output', help='The output parcellation image') - cmdline.add_argument('-premasked', action='store_true', default=False, help='Indicate that brain masking has been applied to the T1 input image') - cmdline.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Consider the amygdalae and hippocampi as sub-cortical grey matter structures, and also replace their estimates with those from FIRST') - - - - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, fsl, image, path, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel - - if utils.is_windows(): - raise MRtrixError('Script cannot run on Windows due to FSL dependency') - - app.check_output_path(path.from_user(app.ARGS.output, False)) - image.check_3d_nonunity(path.from_user(app.ARGS.t1, False)) - - fsl_path = os.environ.get('FSLDIR', '') - if not fsl_path: - raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - - first_cmd = fsl.exe_name('run_first_all') - - first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') - if not os.path.isdir(first_atlas_path): - raise MRtrixError('Atlases required for FSL\'s FIRST program not installed;\nPlease install fsl-first-data using your relevant package manager') - - # Want a mapping between FreeSurfer node names and FIRST structure names - # Just deal with the 5 that are used in ACT; FreeSurfer's hippocampus / amygdala segmentations look good enough. - structure_map = { 'L_Accu':'Left-Accumbens-area', 'R_Accu':'Right-Accumbens-area', - 'L_Caud':'Left-Caudate', 'R_Caud':'Right-Caudate', - 'L_Pall':'Left-Pallidum', 'R_Pall':'Right-Pallidum', - 'L_Puta':'Left-Putamen', 'R_Puta':'Right-Putamen', - 'L_Thal':'Left-Thalamus-Proper', 'R_Thal':'Right-Thalamus-Proper' } - if app.ARGS.sgm_amyg_hipp: - structure_map.update({ 'L_Amyg':'Left-Amygdala', 'R_Amyg':'Right-Amygdala', - 'L_Hipp':'Left-Hippocampus', 'R_Hipp':'Right-Hippocampus' }) - - t1_spacing = image.Header(path.from_user(app.ARGS.t1, False)).spacing() - upsample_for_first = False - # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data - if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225: - app.warn('Voxel size of input T1 image larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' - 'image will be resampled to 1mm isotropic in order to maximise chance of ' - 'FSL FIRST script succeeding') - upsample_for_first = True - - app.make_scratch_dir() - - # Get the parcellation and T1 images into the scratch directory, with conversion of the T1 into the correct format for FSL - run.command('mrconvert ' + path.from_user(app.ARGS.parc) + ' ' + path.to_scratch('parc.mif')) - if upsample_for_first: - run.command('mrgrid ' + path.from_user(app.ARGS.t1) + ' regrid - -voxel 1.0 -interp sinc | mrcalc - 0.0 -max - | mrconvert - ' + path.to_scratch('T1.nii') + ' -strides -1,+2,+3') - else: - run.command('mrconvert ' + path.from_user(app.ARGS.t1) + ' ' + path.to_scratch('T1.nii') + ' -strides -1,+2,+3') - - app.goto_scratch_dir() - - # Run FIRST - first_input_is_brain_extracted = '' - if app.ARGS.premasked: - first_input_is_brain_extracted = ' -b' - run.command(first_cmd + ' -m none -s ' + ','.join(structure_map.keys()) + ' -i T1.nii' + first_input_is_brain_extracted + ' -o first') - fsl.check_first('first', structure_map.keys()) - - # Generate an empty image that will be used to construct the new SGM nodes - run.command('mrcalc parc.mif 0 -min sgm.mif') - - # Read the local connectome LUT file - # This will map a structure name to an index - sgm_lut = {} - sgm_lut_file_name = 'FreeSurferSGM.txt' - sgm_lut_file_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), sgm_lut_file_name) - with open(sgm_lut_file_path) as sgm_lut_file: - for line in sgm_lut_file: - line = line.rstrip() - if line and line[0]!='#': - line = line.split() - sgm_lut[line[1]] = line[0] # This can remain as a string - - # Convert FIRST meshes to node masks - # In this use case, don't want the PVE images; want to threshold at 0.5 - mask_list = [ ] - progress = app.ProgressBar('Generating mask images for SGM structures', len(structure_map)) - for key, value in structure_map.items(): - image_path = key + '_mask.mif' - mask_list.append(image_path) - vtk_in_path = 'first-' + key + '_first.vtk' - run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii') - run.command('mesh2voxel first-' + key + '_transformed.vtk parc.mif - | mrthreshold - ' + image_path + ' -abs 0.5') - # Add to the SGM image; don't worry about overlap for now - node_index = sgm_lut[value] - run.command('mrcalc ' + image_path + ' ' + node_index + ' sgm.mif -if sgm_new.mif') - if not app.CONTINUE_OPTION: - run.function(os.remove, 'sgm.mif') - run.function(os.rename, 'sgm_new.mif', 'sgm.mif') - progress.increment() - progress.done() - - # Detect any overlapping voxels between the SGM masks, and set to zero - run.command(['mrmath', mask_list, 'sum', '-', '|', \ - 'mrcalc', '-', '1', '-gt', 'sgm_overlap_mask.mif']) - run.command('mrcalc sgm_overlap_mask.mif 0 sgm.mif -if sgm_masked.mif') - - # Convert the SGM label image to the indices that are required based on the user-provided LUT file - run.command('labelconvert sgm_masked.mif ' + sgm_lut_file_path + ' ' + path.from_user(app.ARGS.lut) + ' sgm_new_labels.mif') - - # For each SGM structure: - # * Figure out what index the structure has been mapped to; this can only be done using mrstats - # * Strip that index from the parcellation image - # * Insert the new delineation of that structure - progress = app.ProgressBar('Replacing SGM parcellations', len(structure_map)) - for struct in structure_map: - image_path = struct + '_mask.mif' - index = int(image.statistics('sgm_new_labels.mif', mask=image_path).median) - run.command('mrcalc parc.mif ' + str(index) + ' -eq 0 parc.mif -if parc_removed.mif') - run.function(os.remove, 'parc.mif') - run.function(os.rename, 'parc_removed.mif', 'parc.mif') - progress.increment() - progress.done() - - # Insert the new delineations of all SGM structures in a single call - # Enforce unsigned integer datatype of output image - run.command('mrcalc sgm_new_labels.mif 0.5 -gt sgm_new_labels.mif parc.mif -if result.mif -datatype uint32') - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.parc, False), force=app.FORCE_OVERWRITE) - - - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/mrtrix3.py b/bin/mrtrix3.py deleted file mode 100644 index 12c0164325..0000000000 --- a/bin/mrtrix3.py +++ /dev/null @@ -1,92 +0,0 @@ - -# Copyright (c) 2008-2019 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import os, sys - -try: - # since importlib code below only works on Python 3.5+ - # https://stackoverflow.com/a/50395128 - if sys.version_info < (3,5): - raise ImportError - - import importlib.util - - def imported(lib_path): - try: - spec = importlib.util.spec_from_file_location('mrtrix3', os.path.join (lib_path, 'mrtrix3', '__init__.py')) - module = importlib.util.module_from_spec (spec) - sys.modules[spec.name] = module - spec.loader.exec_module (module) - return True - except ImportError: - return False - -except ImportError: - try: - import imp - except ImportError: - print ('failed to import either imp or importlib module!') - sys.exit(1) - - def imported(lib_path): - success = False - fp = None - try: - fp, pathname, description = imp.find_module('mrtrix3', [ lib_path ]) - imp.load_module('mrtrix3', fp, pathname, description) - success = True - except ImportError: - pass - finally: - if fp: - fp.close() - return success - - -# Can the MRtrix3 Python modules be found based on their relative location to this file? -# Note that this includes the case where this file is a softlink within an external module, -# which provides a direct link to the core installation -if not imported (os.path.normpath (os.path.join ( \ - os.path.dirname (os.path.realpath (__file__)), os.pardir, 'lib') )): - - # If this file is a duplicate, which has been stored in an external module, - # we may be able to figure out the location of the core library using the - # build script. - - # case 1: build is a symbolic link: - if not imported (os.path.join (os.path.dirname (os.path.realpath ( \ - os.path.join (os.path.dirname(__file__), os.pardir, 'build'))), 'lib')): - - # case 2: build is a file containing the path to the core build script: - try: - with open (os.path.join (os.path.dirname(__file__), os.pardir, 'build')) as fp: - for line in fp: - build_path = line.split ('#',1)[0].strip() - if build_path: - break - except IOError: - pass - - if not imported (os.path.join (os.path.dirname (build_path), 'lib')): - - sys.stderr.write(''' -ERROR: Unable to locate MRtrix3 Python modules - -For detailed instructions, please refer to: -https://mrtrix.readthedocs.io/en/latest/tips_and_tricks/external_modules.html -''') - sys.stderr.flush() - sys.exit(1) diff --git a/bin/mrtrix_cleanup b/bin/mrtrix_cleanup deleted file mode 100755 index bfbe821304..0000000000 --- a/bin/mrtrix_cleanup +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=unspecified-encoding - - -import math, os, re, shutil - - -POSTFIXES = [ 'B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB' ] - - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Clean up residual temporary files & scratch directories from MRtrix3 commands') - cmdline.add_description('This script will search the file system at the specified location (and in sub-directories thereof) for any temporary files or directories that have been left behind by failed or terminated MRtrix3 commands, and attempt to delete them.') - cmdline.add_description('Note that the script\'s search for temporary items will not extend beyond the user-specified filesystem location. This means that any built-in or user-specified default location for MRtrix3 piped data and scripts will not be automatically searched. Cleanup of such locations should instead be performed explicitly: e.g. "mrtrix_cleanup /tmp/" to remove residual piped images from /tmp/.') - cmdline.add_description('This script should not be run while other MRtrix3 commands are being executed: it may delete temporary items during operation that may lead to unexpected behaviour.') - cmdline.add_argument('path', help='Path from which to commence filesystem search') - cmdline.add_argument('-test', action='store_true', help='Run script in test mode: will list identified files / directories, but not attempt to delete them') - cmdline.add_argument('-failed', metavar='file', nargs=1, help='Write list of items that the script failed to delete to a text file') - cmdline.flag_mutually_exclusive_options([ 'test', 'failed' ]) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - - file_regex = re.compile(r"^mrtrix-tmp-[a-zA-Z0-9]{6}\..*$") - file_config_regex = re.compile(r"^" + CONFIG['TmpFilePrefix'] + r"[a-zA-Z0-9]{6}\..*$") \ - if 'TmpFilePrefix' in CONFIG and CONFIG['TmpFilePrefix'] != 'mrtrix-tmp-' \ - else None - dir_regex = re.compile(r"^\w+-tmp-[a-zA-Z0-9]{6}$") - dir_config_regex = re.compile(r"^" + CONFIG['ScriptScratchPrefix'] + r"[a-zA-Z0-9]{6}$") \ - if 'ScriptScratchPrefix' in CONFIG \ - else None - - files_to_delete = [ ] - dirs_to_delete = [ ] - root_dir = os.path.abspath(app.ARGS.path) - print_search_dir = ('' if os.path.abspath(os.getcwd()) == root_dir else ' from ' + root_dir) - def file_search(regex): - files_to_delete.extend([ os.path.join(dirname, filename) for filename in filter(regex.search, filelist) ]) - def dir_search(regex): - items = set(filter(regex.search, subdirlist)) - if items: - dirs_to_delete.extend([os.path.join(dirname, subdirname) for subdirname in items]) - subdirlist[:] = list(set(subdirlist)-items) - def print_msg(): - return 'Searching' + print_search_dir + ' (found ' + str(len(files_to_delete)) + ' files, ' + str(len(dirs_to_delete)) + ' directories)' - progress = app.ProgressBar(print_msg) - for dirname, subdirlist, filelist in os.walk(root_dir): - file_search(file_regex) - if file_config_regex: - file_search(file_config_regex) - dir_search(dir_regex) - if dir_config_regex: - dir_search(dir_config_regex) - progress.increment() - progress.done() - - if app.ARGS.test: - if files_to_delete: - app.console('Files identified (' + str(len(files_to_delete)) + '):') - for filepath in files_to_delete: - app.console(' ' + filepath) - else: - app.console('No files' + ('' if dirs_to_delete else ' or directories') + ' found') - if dirs_to_delete: - app.console('Directories identified (' + str(len(dirs_to_delete)) + '):') - for dirpath in dirs_to_delete: - app.console(' ' + dirpath) - elif files_to_delete: - app.console('No directories identified') - elif files_to_delete or dirs_to_delete: - progress = app.ProgressBar('Deleting temporaries (' + str(len(files_to_delete)) + ' files, ' + str(len(dirs_to_delete)) + ' directories)', len(files_to_delete) + len(dirs_to_delete)) - except_list = [ ] - size_deleted = 0 - for filepath in files_to_delete: - filesize = 0 - try: - filesize = os.path.getsize(filepath) - os.remove(filepath) - size_deleted += filesize - except OSError: - except_list.append(filepath) - progress.increment() - for dirpath in dirs_to_delete: - dirsize = 0 - try: - for dirname, subdirlist, filelist in os.walk(dirpath): - dirsize += sum(os.path.getsize(filename) for filename in filelist) - except OSError: - pass - try: - shutil.rmtree(dirpath) - size_deleted += dirsize - except OSError: - except_list.append(dirpath) - progress.increment() - progress.done() - postfix_index = int(math.floor(math.log(size_deleted, 1024))) if size_deleted else 0 - if postfix_index: - size_deleted = round(size_deleted / math.pow(1024, postfix_index), 2) - def print_freed(): - return ' (' + str(size_deleted) + POSTFIXES[postfix_index] + ' freed)' if size_deleted else '' - if except_list: - app.console(str(len(files_to_delete) + len(dirs_to_delete) - len(except_list)) + ' of ' + str(len(files_to_delete) + len(dirs_to_delete)) + ' items erased' + print_freed()) - if app.ARGS.failed: - with open(app.ARGS.failed, 'w') as outfile: - for item in except_list: - outfile.write(item + '\n') - app.console('List of items script failed to erase written to file "' + app.ARGS.failed + '"') - else: - app.console('Items that could not be erased:') - for item in except_list: - app.console(' ' + item) - else: - app.console('All items deleted successfully' + print_freed()) - else: - app.console('No files or directories found') - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/notfound b/bin/notfound deleted file mode 100755 index b9c5be81b4..0000000000 --- a/bin/notfound +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -if [ $# -eq 0 ]; then - cat << 'HELP_PAGE' -USAGE: - $ notfound base_directory search_string - - This is a simple script designed to help identify subjects that do not yet have a specific file generated. For example when adding new patients to a study. It is designed to be used when each patient has a folder containing their images. - - For example: - $ notfound study_folder fod.mif - will identify all subject folders (e.g. study_folder/subject001, study_folder/subject002, ...) that do NOT contain a file fod.mif - - Note that this can be used in combination with the foreach script. For example: - $ foreach $(notfound study_folder fod.mif) : dwi2fod IN/dwi.mif IN/response.txt IN/fod.mif -HELP_PAGE - -exit 1 - -fi - -find ${1} -mindepth 1 -maxdepth 1 \( -type l -o -type d \) '!' -exec test -e "{}/${2}" ';' -print - diff --git a/bin/population_template b/bin/population_template deleted file mode 100755 index ec2c2de67e..0000000000 --- a/bin/population_template +++ /dev/null @@ -1,1451 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Generates an unbiased group-average template via image registration of images to a midway space. - -# note: deal with these warnings properly when we drop support for Python 2: -# pylint: disable=unspecified-encoding,consider-using-f-string - -import json, math, os, re, shutil, sys - -DEFAULT_RIGID_SCALES = [0.3,0.4,0.6,0.8,1.0,1.0] -DEFAULT_RIGID_LMAX = [2,2,2,4,4,4] -DEFAULT_AFFINE_SCALES = [0.3,0.4,0.6,0.8,1.0,1.0] -DEFAULT_AFFINE_LMAX = [2,2,2,4,4,4] - -DEFAULT_NL_SCALES = [0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0] -DEFAULT_NL_NITER = [ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] -DEFAULT_NL_LMAX = [ 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4] - -REGISTRATION_MODES = ['rigid', 'affine', 'nonlinear', 'rigid_affine', 'rigid_nonlinear', 'affine_nonlinear', 'rigid_affine_nonlinear'] - -AGGREGATION_MODES = ["mean", "median"] - -IMAGEEXT = 'mif nii mih mgh mgz img hdr'.split() - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('David Raffelt (david.raffelt@florey.edu.au) & Max Pietsch (maximilian.pietsch@kcl.ac.uk) & Thijs Dhollander (thijs.dhollander@gmail.com)') - - cmdline.set_synopsis('Generates an unbiased group-average template from a series of images') - cmdline.add_description('First a template is optimised with linear registration (rigid and/or affine, both by default), then non-linear registration is used to optimise the template further.') - cmdline.add_argument("input_dir", nargs='+', help='Input directory containing all images used to build the template') - cmdline.add_argument("template", help='Corresponding output template image. For multi-contrast registration, provide multiple paired input_dir and template arguments. Example: WM_dir WM_template.mif GM_dir GM_template.mif') - - options = cmdline.add_argument_group('Multi-contrast options') - options.add_argument('-mc_weight_initial_alignment', help='Weight contribution of each contrast to the initial alignment. Comma separated, default: 1.0') - options.add_argument('-mc_weight_rigid', help='Weight contribution of each contrast to the objective of rigid registration. Comma separated, default: 1.0') - options.add_argument('-mc_weight_affine', help='Weight contribution of each contrast to the objective of affine registration. Comma separated, default: 1.0') - options.add_argument('-mc_weight_nl', help='Weight contribution of each contrast to the objective of nonlinear registration. Comma separated, default: 1.0') - - linoptions = cmdline.add_argument_group('Options for the linear registration') - linoptions.add_argument('-linear_no_pause', action='store_true', help='Do not pause the script if a linear registration seems implausible') - linoptions.add_argument('-linear_estimator', help='Specify estimator for intensity difference metric. Valid choices are: l1 (least absolute: |x|), l2 (ordinary least squares), lp (least powers: |x|^1.2), Default: None (no robust estimator used)') - linoptions.add_argument('-rigid_scale', help='Specify the multi-resolution pyramid used to build the rigid template, in the form of a list of scale factors (default: %s). This and affine_scale implicitly define the number of template levels' % ','.join([str(x) for x in DEFAULT_RIGID_SCALES])) - linoptions.add_argument('-rigid_lmax', help='Specify the lmax used for rigid registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in DEFAULT_RIGID_LMAX])) - linoptions.add_argument('-rigid_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:50 for each scale). This must be a single number or a list of same length as the linear_scale factor list') - linoptions.add_argument('-affine_scale', help='Specify the multi-resolution pyramid used to build the affine template, in the form of a list of scale factors (default: %s). This and rigid_scale implicitly define the number of template levels' % ','.join([str(x) for x in DEFAULT_AFFINE_SCALES])) - linoptions.add_argument('-affine_lmax', help='Specify the lmax used for affine registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in DEFAULT_AFFINE_LMAX])) - linoptions.add_argument('-affine_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:500 for each scale). This must be a single number or a list of same length as the linear_scale factor list') - - nloptions = cmdline.add_argument_group('Options for the non-linear registration') - nloptions.add_argument('-nl_scale', help='Specify the multi-resolution pyramid used to build the non-linear template, in the form of a list of scale factors (default: %s). This implicitly defines the number of template levels' % ','.join([str(x) for x in DEFAULT_NL_SCALES])) - nloptions.add_argument('-nl_lmax', help='Specify the lmax used for non-linear registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in DEFAULT_NL_LMAX])) - nloptions.add_argument('-nl_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in DEFAULT_NL_NITER])) - nloptions.add_argument('-nl_update_smooth', default='2.0', help='Regularise the gradient update field with Gaussian smoothing (standard deviation in voxel units, Default 2.0 x voxel_size)') - nloptions.add_argument('-nl_disp_smooth', default='1.0', help='Regularise the displacement field with Gaussian smoothing (standard deviation in voxel units, Default 1.0 x voxel_size)') - nloptions.add_argument('-nl_grad_step', default='0.5', help='The gradient step size for non-linear registration (Default: 0.5)') - - options = cmdline.add_argument_group('Input, output and general options') - options.add_argument('-type', help='Specify the types of registration stages to perform. Options are "rigid" (perform rigid registration only which might be useful for intra-subject registration in longitudinal analysis), "affine" (perform affine registration) and "nonlinear" as well as cominations of registration types: %s. Default: rigid_affine_nonlinear' % ', '.join('"' + x + '"' for x in REGISTRATION_MODES if "_" in x), default='rigid_affine_nonlinear') - options.add_argument('-voxel_size', help='Define the template voxel size in mm. Use either a single value for isotropic voxels or 3 comma separated values.') - options.add_argument('-initial_alignment', default='mass', help='Method of alignment to form the initial template. Options are "mass" (default), "robust_mass" (requires masks), "geometric" and "none".') - options.add_argument('-mask_dir', help='Optionally input a set of masks inside a single directory, one per input image (with the same file name prefix). Using masks will speed up registration significantly. Note that masks are used for registration, not for aggregation. To exclude areas from aggregation, NaN-mask your input images.') - options.add_argument('-warp_dir', help='Output a directory containing warps from each input to the template. If the folder does not exist it will be created') - options.add_argument('-transformed_dir', help='Output a directory containing the input images transformed to the template. If the folder does not exist it will be created. For multi-contrast registration, provide comma separated list of directories.') - options.add_argument('-linear_transformations_dir', help='Output a directory containing the linear transformations used to generate the template. If the folder does not exist it will be created') - options.add_argument('-template_mask', help='Output a template mask. Only works if -mask_dir has been input. The template mask is computed as the intersection of all subject masks in template space.') - options.add_argument('-noreorientation', action='store_true', help='Turn off FOD reorientation in mrregister. Reorientation is on by default if the number of volumes in the 4th dimension corresponds to the number of coefficients in an antipodally symmetric spherical harmonic series (i.e. 6, 15, 28, 45, 66 etc)') - options.add_argument('-leave_one_out', help='Register each input image to a template that does not contain that image. Valid choices: 0, 1, auto. (Default: auto (true if n_subjects larger than 2 and smaller than 15)) ') - options.add_argument('-aggregate', help='Measure used to aggregate information from transformed images to the template image. Valid choices: %s. Default: mean' % ', '.join(AGGREGATION_MODES)) - options.add_argument('-aggregation_weights', help='Comma separated file containing weights used for weighted image aggregation. Each row must contain the identifiers of the input image and its weight. Note that this weighs intensity values not transformations (shape).') - options.add_argument('-nanmask', action='store_true', help='Optionally apply masks to (transformed) input images using NaN values to specify include areas for registration and aggregation. Only works if -mask_dir has been input.') - options.add_argument('-copy_input', action='store_true', help='Copy input images and masks into local scratch directory.') - -# ENH: add option to initialise warps / transformations - -# Binds raw_input() to input() in Python2, so that input() can be used -# and the code will work on both Python 2 and 3 -try: - input = raw_input #pylint: disable=redefined-builtin, invalid-name -except NameError: - pass - - -def abspath(arg, *args): - return os.path.abspath(os.path.join(arg, *args)) - - -def relpath(arg, *args): - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - return os.path.relpath(os.path.join(arg, *args), app.WORKING_DIR) - - -def copy(src, dst, follow_symlinks=True): - """Copy data but do not set mode bits. Return the file's destination. - - mimics shutil.copy but without setting mode bits as shutil.copymode can fail on exotic mounts - (observed on cifs with file_mode=0777). - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - if sys.version_info[0] > 2: - shutil.copyfile(src, dst, follow_symlinks=follow_symlinks) # pylint: disable=unexpected-keyword-arg - else: - shutil.copyfile(src, dst) - return dst - - -def check_linear_transformation(transformation, cmd, max_scaling=0.5, max_shear=0.2, max_rot=None, pause_on_warn=True): - from mrtrix3 import app, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel - if max_rot is None: - max_rot = 2 * math.pi - - good = True - run.command('transformcalc ' + transformation + ' decompose ' + transformation + 'decomp') - if not os.path.isfile(transformation + 'decomp'): # does not exist if run with -continue option - app.console(transformation + 'decomp not found. skipping check') - return True - data = utils.load_keyval(transformation + 'decomp') - run.function(os.remove, transformation + 'decomp') - scaling = [float(value) for value in data['scaling']] - if any(a < 0 for a in scaling) or any(a > (1 + max_scaling) for a in scaling) or any( - a < (1 - max_scaling) for a in scaling): - app.warn("large scaling (" + str(scaling) + ") in " + transformation) - good = False - shear = [float(value) for value in data['shear']] - if any(abs(a) > max_shear for a in shear): - app.warn("large shear (" + str(shear) + ") in " + transformation) - good = False - rot_angle = float(data['angle_axis'][0]) - if abs(rot_angle) > max_rot: - app.warn("large rotation (" + str(rot_angle) + ") in " + transformation) - good = False - - if not good: - newcmd = [] - what = '' - init_rotation_found = False - skip = 0 - for element in cmd.split(): - if skip: - skip -= 1 - continue - if '_init_rotation' in element: - init_rotation_found = True - if '_init_matrix' in element: - skip = 1 - continue - if 'affine_scale' in element: - assert what != 'rigid' - what = 'affine' - elif 'rigid_scale' in element: - assert what != 'affine' - what = 'rigid' - newcmd.append(element) - newcmd = " ".join(newcmd) - if not init_rotation_found: - app.console("replacing the transformation obtained with:") - app.console(cmd) - if what: - newcmd += ' -' + what + '_init_translation mass -' + what + '_init_rotation search' - app.console("by the one obtained with:") - app.console(newcmd) - run.command(newcmd, force=True) - return check_linear_transformation(transformation, newcmd, max_scaling, max_shear, max_rot, pause_on_warn=pause_on_warn) - if pause_on_warn: - app.warn("you might want to manually repeat mrregister with different parameters and overwrite the transformation file: \n%s" % transformation) - app.console('The command that failed the test was: \n' + cmd) - app.console('Working directory: \n' + os.getcwd()) - input("press enter to continue population_template") - return good - - -def aggregate(inputs, output, contrast_idx, mode, force=True): - from mrtrix3 import MRtrixError, run # pylint: disable=no-name-in-module, import-outside-toplevel - - images = [inp.ims_transformed[contrast_idx] for inp in inputs] - if mode == 'mean': - run.command(['mrmath', images, 'mean', '-keep_unary_axes', output], force=force) - elif mode == 'median': - run.command(['mrmath', images, 'median', '-keep_unary_axes', output], force=force) - elif mode == 'weighted_mean': - weights = [inp.aggregation_weight for inp in inputs] - assert not any(w is None for w in weights), weights - wsum = sum(map(float, weights)) - if wsum <= 0: - raise MRtrixError("the sum of aggregetion weights has to be positive") - cmd = ['mrcalc'] - for weight, image in zip(weights, images): - if float(weight) != 0: - cmd += [image, weight, '-mult'] + (['-add'] if len(cmd) > 1 else []) - cmd += ['%.16f' % wsum, '-div', output] - run.command(cmd, force=force) - else: - raise MRtrixError("aggregation mode %s not understood" % mode) - - -def inplace_nan_mask(images, masks): - from mrtrix3 import run # pylint: disable=no-name-in-module, import-outside-toplevel - assert len(images) == len(masks), (len(images), len(masks)) - for image, mask in zip(images, masks): - target_dir = os.path.split(image)[0] - masked = os.path.join(target_dir, '__' + os.path.split(image)[1]) - run.command("mrcalc " + mask + " " + image + " nan -if " + masked, force=True) - run.function(shutil.move, masked, image) - - -def calculate_isfinite(inputs, contrasts): - from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel - agg_weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] - for cid in range(contrasts.n_contrasts): - for inp in inputs: - if contrasts.n_volumes[cid] > 0: - cmd = 'mrconvert ' + inp.ims_transformed[cid] + ' -coord 3 0 - | mrcalc - -finite' - else: - cmd = 'mrcalc ' + inp.ims_transformed[cid] + ' -finite' - if inp.aggregation_weight: - cmd += ' %s -mult ' % inp.aggregation_weight - cmd += ' isfinite%s/%s.mif' % (contrasts.suff[cid], inp.uid) - run.command(cmd, force=True) - for cid in range(contrasts.n_contrasts): - cmd = ['mrmath', path.all_in_dir('isfinite%s' % contrasts.suff[cid]), 'sum'] - if agg_weights: - agg_weight_norm = str(float(len(agg_weights)) / sum(agg_weights)) - cmd += ['-', '|', 'mrcalc', '-', agg_weight_norm, '-mult'] - run.command(cmd + [contrasts.isfinite_count[cid]], force=True) - - -def get_common_postfix(file_list): - return os.path.commonprefix([i[::-1] for i in file_list])[::-1] - - -def get_common_prefix(file_list): - return os.path.commonprefix(file_list) - - -class Contrasts(object): - """ - Class that parses arguments and holds information specific to each image contrast - - Attributes - ---------- - suff: list of str - identifiers used for contrast-specific filenames and folders ['_c0', '_c1', ...] - - names: list of str - derived from constrast-specific input folder - - templates_out: list of str - full path to output templates - - templates: list of str - holds current template names during registration - - n_volumes: list of int - number of volumes in each contrast - - fod_reorientation: list of bool - whether to perform FOD reorientation with mrtransform - - isfinite_count: list of str - filenames of images holding (weighted) number of finite-valued voxels across all images - - mc_weight_: list of str - contrast-specific weight used during initialisation / registration - - _weight_option: list of str - weight option to be passed to mrregister, = {'initial_alignment', 'rigid', 'affine', 'nl'} - - n_contrasts: int - - """ - - def __init__(self): - from mrtrix3 import MRtrixError, path, app # pylint: disable=no-name-in-module, import-outside-toplevel - - n_contrasts = len(app.ARGS.input_dir) - - self.suff = ["_c" + c for c in map(str, range(n_contrasts))] - self.names = [os.path.relpath(f, os.path.commonprefix(app.ARGS.input_dir)) for f in app.ARGS.input_dir] - - self.templates_out = [path.from_user(t, True) for t in app.ARGS.template] - - self.mc_weight_initial_alignment = [None for _ in range(self.n_contrasts)] - self.mc_weight_rigid = [None for _ in range(self.n_contrasts)] - self.mc_weight_affine = [None for _ in range(self.n_contrasts)] - self.mc_weight_nl = [None for _ in range(self.n_contrasts)] - self.initial_alignment_weight_option = [None for _ in range(self.n_contrasts)] - self.rigid_weight_option = [None for _ in range(self.n_contrasts)] - self.affine_weight_option = [None for _ in range(self.n_contrasts)] - self.nl_weight_option = [None for _ in range(self.n_contrasts)] - - self.isfinite_count = ['isfinite' + c + '.mif' for c in self.suff] - self.templates = [None for _ in range(self.n_contrasts)] - self.n_volumes = [None for _ in range(self.n_contrasts)] - self.fod_reorientation = [None for _ in range(self.n_contrasts)] - - - for mode in ['initial_alignment', 'rigid', 'affine', 'nl']: - opt = app.ARGS.__dict__.get('mc_weight_' + mode, None) - if opt: - if n_contrasts == 1: - raise MRtrixError('mc_weight_' + mode+' requires multiple input contrasts') - opt = opt.split(',') - if len(opt) != n_contrasts: - raise MRtrixError('mc_weight_' + mode+' needs to be defined for each contrast') - else: - opt = ["1"] * n_contrasts - self.__dict__['mc_weight_%s' % mode] = opt - self.__dict__['%s_weight_option' % mode] = ' -mc_weights '+','.join(opt)+' ' if n_contrasts > 1 else '' - - if len(self.templates_out) != n_contrasts: - raise MRtrixError('number of templates (%i) does not match number of input directories (%i)' % - (len(self.templates_out), n_contrasts)) - - @property - def n_contrasts(self): - return len(self.suff) - - def __repr__(self, *args, **kwargs): - text = '' - for cid in range(self.n_contrasts): - text += '\tcontrast: %s, template: %s, suffix: %s\n' % (self.names[cid], self.templates_out[cid], self.suff[cid]) - return text - - -class Input(object): - """ - Class that holds input information specific to a single image (multiple contrasts) - - Attributes - ---------- - uid: str - unique identifier for these input image(s), does not contain spaces - - ims_path: list of str - full path to input images, shell quoted OR paths to cached file if cache_local was called - - msk_path: str - full path to input mask, shell quoted OR path to cached file if cache_local was called - - ims_filenames : list of str - for each contrast the input file paths stripped of their respective directories. Used for final output only. - - msk_filename: str - as ims_filenames - - ims_transformed: list of str - input_transformed/.mif - - msk_transformed: list of str - mask_transformed/.mif - - aggregation_weight: float - weights used in image aggregation that forms the template. Has to be normalised across inputs. - - _im_directories : list of str - full path to user-provided input directories containing the input images, one for each contrast - - _msk_directory: str - full path to user-provided mask directory - - _local_ims: list of str - path to cached input images - - _local_msk: str - path to cached input mask - - Methods - ------- - cache_local() - copy files into folders in current working directory. modifies _local_ims and _local_msk - - """ - def __init__(self, uid, filenames, directories, contrasts, mask_filename='', mask_directory=''): - self.contrasts = contrasts - - self.uid = uid - assert self.uid, "UID empty" - assert self.uid.count(' ') == 0, 'UID "%s" contains whitespace' % self.uid - - assert len(directories) == len(filenames) - self.ims_filenames = filenames - self._im_directories = directories - - self.msk_filename = mask_filename - self._msk_directory = mask_directory - - n_contrasts = len(contrasts) - - self.ims_transformed = [os.path.join('input_transformed'+contrasts[cid], uid + '.mif') for cid in range(n_contrasts)] - self.msk_transformed = os.path.join('mask_transformed', uid + '.mif') - - self.aggregation_weight = None - - self._local_ims = [] - self._local_msk = None - - def __repr__(self, *args, **kwargs): - text = '\nInput [' - for key in sorted([k for k in self.__dict__ if not k.startswith('_')]): - text += '\n\t' + str(key) + ': ' + str(self.__dict__[key]) - text += '\n]' - return text - - def info(self): - message = ['input: ' + self.uid] - if self.aggregation_weight: - message += ['agg weight: ' + self.aggregation_weight] - for csuff, fname in zip(self.contrasts, self.ims_filenames): - message += [((csuff + ': ') if csuff else '') + '"' + fname + '"'] - if self.msk_filename: - message += ['mask: ' + self.msk_filename] - return ', '.join(message) - - def cache_local(self): - from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel - contrasts = self.contrasts - for cid, csuff in enumerate(contrasts): - if not os.path.isdir('input' + csuff): - path.make_dir('input' + csuff) - run.command('mrconvert ' + self.ims_path[cid] + ' ' + os.path.join('input' + csuff, self.uid + '.mif')) - self._local_ims = [os.path.join('input' + csuff, self.uid + '.mif') for csuff in contrasts] - if self.msk_filename: - if not os.path.isdir('mask'): - path.make_dir('mask') - run.command('mrconvert ' + self.msk_path + ' ' + os.path.join('mask', self.uid + '.mif')) - self._local_msk = os.path.join('mask', self.uid + '.mif') - - def get_ims_path(self, quoted=True): - """ return path to input images """ - from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel - if self._local_ims: - return self._local_ims - return [path.from_user(abspath(d, f), quoted) for d, f in zip(self._im_directories, self.ims_filenames)] - ims_path = property(get_ims_path) - - def get_msk_path(self, quoted=True): - """ return path to input mask """ - from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel - if self._local_msk: - return self._local_msk - return path.from_user(os.path.join(self._msk_directory, self.msk_filename), quoted) if self.msk_filename else None - msk_path = property(get_msk_path) - - -def parse_input_files(in_files, mask_files, contrasts, f_agg_weight=None, whitespace_repl='_'): - """ - matches input images across contrasts and pair them with masks. - extracts unique identifiers from mask and image filenames by stripping common pre and postfix (per contrast and for masks) - unique identifiers contain ASCII letters, numbers and '_' but no whitespace which is replaced by whitespace_repl - - in_files: list of lists - the inner list holds filenames specific to a contrast - - mask_files: - can be empty - - returns list of Input - - checks: 3d_nonunity - TODO check if no common grid & trafo across contrasts (only relevant for robust init?) - - """ - from mrtrix3 import MRtrixError, app, path, image # pylint: disable=no-name-in-module, import-outside-toplevel - contrasts = contrasts.suff - inputs = [] - def paths_to_file_uids(paths, prefix, postfix): - """ strip pre and postfix from filename, replace whitespace characters """ - uid_path = {} - uids = [] - for path in paths: - uid = re.sub(re.escape(postfix)+'$', '', re.sub('^'+re.escape(prefix), '', os.path.split(path)[1])) - uid = re.sub(r'\s+', whitespace_repl, uid) - if not uid: - raise MRtrixError('No uniquely identifiable part of filename "' + path + '" ' - 'after prefix and postfix substitution ' - 'with prefix "' + prefix + '" and postfix "' + postfix + '"') - app.debug('UID mapping: "' + path + '" --> "' + uid + '"') - if uid in uid_path: - raise MRtrixError('unique file identifier is not unique: "' + uid + '" mapped to "' + path + '" and "' + uid_path[uid] +'"') - uid_path[uid] = path - uids.append(uid) - return uids - - # mask uids - mask_uids = [] - if mask_files: - mask_common_postfix = get_common_postfix(mask_files) - if not mask_common_postfix: - raise MRtrixError('mask filenames do not have a common postfix') - mask_common_prefix = get_common_prefix([os.path.split(m)[1] for m in mask_files]) - mask_uids = paths_to_file_uids(mask_files, mask_common_prefix, mask_common_postfix) - if app.VERBOSITY > 1: - app.console('mask uids:' + str(mask_uids)) - - # images uids - common_postfix = [get_common_postfix(files) for files in in_files] - common_prefix = [get_common_prefix(files) for files in in_files] - # xcontrast_xsubject_pre_postfix: prefix and postfix of the common part across contrasts and subjects, - # without image extensions and leading or trailing '_' or '-' - xcontrast_xsubject_pre_postfix = [get_common_postfix(common_prefix).lstrip('_-'), - get_common_prefix([re.sub('.('+'|'.join(IMAGEEXT)+')(.gz)?$', '', pfix).rstrip('_-') for pfix in common_postfix])] - if app.VERBOSITY > 1: - app.console("common_postfix: " + str(common_postfix)) - app.console("common_prefix: " + str(common_prefix)) - app.console("xcontrast_xsubject_pre_postfix: " + str(xcontrast_xsubject_pre_postfix)) - for ipostfix, postfix in enumerate(common_postfix): - if not postfix: - raise MRtrixError('image filenames do not have a common postfix:\n' + '\n'.join(in_files[ipostfix])) - - c_uids = [] - for cid, files in enumerate(in_files): - c_uids.append(paths_to_file_uids(files, common_prefix[cid], common_postfix[cid])) - - if app.VERBOSITY > 1: - app.console('uids by contrast:' + str(c_uids)) - - # join images and masks - for ifile, fname in enumerate(in_files[0]): - uid = c_uids[0][ifile] - fnames = [fname] - dirs = [abspath(path.from_user(app.ARGS.input_dir[0], False))] - if len(contrasts) > 1: - for cid in range(1, len(contrasts)): - dirs.append(abspath(path.from_user(app.ARGS.input_dir[cid], False))) - image.check_3d_nonunity(os.path.join(dirs[cid], in_files[cid][ifile])) - if uid != c_uids[cid][ifile]: - raise MRtrixError('no matching image was found for image %s and contrasts %s and %s.' % (fname, dirs[0], dirs[cid])) - fnames.append(in_files[cid][ifile]) - - if mask_files: - if uid not in mask_uids: - raise MRtrixError('no matching mask image was found for input image ' + fname + ' with uid "'+uid+'". ' - 'Mask uid candidates: ' + ', '.join(['"%s"' % m for m in mask_uids])) - index = mask_uids.index(uid) - # uid, filenames, directories, contrasts, mask_filename = '', mask_directory = '', agg_weight = None - inputs.append(Input(uid, fnames, dirs, contrasts, - mask_filename=mask_files[index], mask_directory=abspath(path.from_user(app.ARGS.mask_dir, False)))) - else: - inputs.append(Input(uid, fnames, dirs, contrasts)) - - # parse aggregation weights and match to inputs - if f_agg_weight: - import csv # pylint: disable=import-outside-toplevel - try: - with open(f_agg_weight, 'r') as fweights: - agg_weights = dict((row[0].strip(), row[1].strip()) for row in csv.reader(fweights, delimiter=',', quotechar='#')) - except UnicodeDecodeError: - with open(f_agg_weight, 'r') as fweights: - reader = csv.reader(fweights.read().decode('utf-8', errors='replace'), delimiter=',', quotechar='#') - agg_weights = dict((row[0].strip(), row[1].strip()) for row in reader) - pref = '^' + re.escape(get_common_prefix(list(agg_weights.keys()))) - suff = re.escape(get_common_postfix(list(agg_weights.keys()))) + '$' - agg_weights = {re.sub(suff, '', re.sub(pref, '', item[0])):item[1] for item in agg_weights.items()} - for inp in inputs: - if inp.uid not in agg_weights: - raise MRtrixError('aggregation weight not found for input "%s"' % inp.uid) - inp.aggregation_weight = agg_weights[inp.uid] - app.console('Using aggregation weights ' + f_agg_weight) - weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] - if sum(weights) <= 0: - raise MRtrixError('Sum of aggregation weights is not positive: ' + str(weights)) - if any(w < 0 for w in weights): - app.warn('Negative aggregation weights: ' + str(weights)) - - return inputs, xcontrast_xsubject_pre_postfix - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError, app, image, matrix, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - if not app.ARGS.type in REGISTRATION_MODES: - raise MRtrixError("registration type must be one of %s. provided: %s" % (str(REGISTRATION_MODES), app.ARGS.type)) - dorigid = "rigid" in app.ARGS.type - doaffine = "affine" in app.ARGS.type - dolinear = dorigid or doaffine - dononlinear = "nonlinear" in app.ARGS.type - assert (dorigid + doaffine + dononlinear >= 1), "FIXME: registration type not valid" - - - input_output = app.ARGS.input_dir + [app.ARGS.template] - n_contrasts = len(input_output) // 2 - if len(input_output) != 2 * n_contrasts: - raise MRtrixError('expected two arguments per contrast, received %i: %s' % (len(input_output), ', '.join(input_output))) - if n_contrasts > 1: - app.console('Generating population template using multi-contrast registration') - - # reorder arguments for multi-contrast registration as after command line parsing app.ARGS.input_dir holds all but one argument - app.ARGS.input_dir = [] - app.ARGS.template = [] - for i_contrast in range(n_contrasts): - inargs = (input_output[i_contrast*2], input_output[i_contrast*2+1]) - if not os.path.isdir(inargs[0]): - raise MRtrixError('input directory %s not found' % inargs[0]) - app.ARGS.input_dir.append(relpath(inargs[0])) - app.ARGS.template.append(relpath(inargs[1])) - - cns = Contrasts() - app.debug(str(cns)) - - in_files = [sorted(path.all_in_dir(input_dir, dir_path=False)) for input_dir in app.ARGS.input_dir] - if len(in_files[0]) <= 1: - raise MRtrixError('Not enough images found in input directory ' + app.ARGS.input_dir[0] + - '. More than one image is needed to generate a population template') - if n_contrasts > 1: - for cid in range(1, n_contrasts): - if len(in_files[cid]) != len(in_files[0]): - raise MRtrixError('Found %i images in input directory %s ' % (len(app.ARGS.input_dir[0]), app.ARGS.input_dir[0]) + - 'but %i input images in %s.' % (len(app.ARGS.input_dir[cid]), app.ARGS.input_dir[cid])) - else: - app.console('Generating a population-average template from ' + str(len(in_files[0])) + ' input images') - if n_contrasts > 1: - app.console('using ' + str(len(in_files)) + ' contrasts for each input image') - - voxel_size = None - if app.ARGS.voxel_size: - voxel_size = app.ARGS.voxel_size.split(',') - if len(voxel_size) == 1: - voxel_size = voxel_size * 3 - try: - if len(voxel_size) != 3: - raise ValueError - [float(v) for v in voxel_size] #pylint: disable=expression-not-assigned - except ValueError: - raise MRtrixError('voxel size needs to be a single or three comma-separated floating point numbers; received: ' + str(app.ARGS.voxel_size)) - - agg_measure = 'mean' - if app.ARGS.aggregate is not None: - if not app.ARGS.aggregate in AGGREGATION_MODES: - app.error("aggregation type must be one of %s. provided: %s" % (str(AGGREGATION_MODES), app.ARGS.aggregate)) - agg_measure = app.ARGS.aggregate - - agg_weights = app.ARGS.aggregation_weights - if agg_weights is not None: - agg_measure = "weighted_" + agg_measure - if agg_measure != 'weighted_mean': - app.error("aggregation weights require '-aggregate mean' option. provided: %s" % (app.ARGS.aggregate)) - if not os.path.isfile(app.ARGS.aggregation_weights): - app.error("aggregation weights file not found: %s" % app.ARGS.aggregation_weights) - - initial_alignment = app.ARGS.initial_alignment - if initial_alignment not in ["mass", "robust_mass", "geometric", "none"]: - raise MRtrixError('initial_alignment must be one of ' + " ".join(["mass", "robust_mass", "geometric", "none"]) + " provided: " + str(initial_alignment)) - - linear_estimator = app.ARGS.linear_estimator - if linear_estimator and not linear_estimator.lower() == 'none': - if not dolinear: - raise MRtrixError('linear_estimator specified when no linear registration is requested') - if linear_estimator not in ["l1", "l2", "lp"]: - raise MRtrixError('linear_estimator must be one of ' + " ".join(["l1", "l2", "lp"]) + " provided: " + str(linear_estimator)) - - use_masks = False - mask_files = [] - if app.ARGS.mask_dir: - use_masks = True - app.ARGS.mask_dir = relpath(app.ARGS.mask_dir) - if not os.path.isdir(app.ARGS.mask_dir): - raise MRtrixError('mask directory not found') - mask_files = sorted(path.all_in_dir(app.ARGS.mask_dir, dir_path=False)) - if len(mask_files) < len(in_files[0]): - raise MRtrixError('there are not enough mask images for the number of images in the input directory') - - if not use_masks: - app.warn('no masks input. Use input masks to reduce computation time and improve robustness') - - if app.ARGS.template_mask and not use_masks: - raise MRtrixError('you cannot output a template mask because no subject masks were input using -mask_dir') - - nanmask_input = app.ARGS.nanmask - if nanmask_input and not use_masks: - raise MRtrixError('you cannot use NaN masking when no subject masks were input using -mask_dir') - - ins, xcontrast_xsubject_pre_postfix = parse_input_files(in_files, mask_files, cns, agg_weights) - - leave_one_out = 'auto' - if app.ARGS.leave_one_out is not None: - leave_one_out = app.ARGS.leave_one_out - if not leave_one_out in ['0', '1', 'auto']: - raise MRtrixError('leave_one_out not understood: ' + str(leave_one_out)) - if leave_one_out == 'auto': - leave_one_out = 2 < len(ins) < 15 - else: - leave_one_out = bool(int(leave_one_out)) - if leave_one_out: - app.console('performing leave-one-out registration') - # check that at sum of weights is positive for any grouping if weighted aggregation is used - weights = [float(inp.aggregation_weight) for inp in ins if inp.aggregation_weight is not None] - if weights and sum(weights) - max(weights) <= 0: - raise MRtrixError('leave-one-out registration requires positive aggregation weights in all groupings') - - noreorientation = app.ARGS.noreorientation - - do_pause_on_warn = True - if app.ARGS.linear_no_pause: - do_pause_on_warn = False - if not dolinear: - raise MRtrixError("linear option set when no linear registration is performed") - - if len(app.ARGS.template) != n_contrasts: - raise MRtrixError('mismatch between number of output templates (%i) ' % len(app.ARGS.template) + - 'and number of contrasts (%i)' % n_contrasts) - for templ in app.ARGS.template: - app.check_output_path(templ) - - if app.ARGS.warp_dir: - app.ARGS.warp_dir = relpath(app.ARGS.warp_dir) - app.check_output_path(app.ARGS.warp_dir) - - if app.ARGS.transformed_dir: - app.ARGS.transformed_dir = [relpath(d) for d in app.ARGS.transformed_dir.split(',')] - if len(app.ARGS.transformed_dir) != n_contrasts: - raise MRtrixError('require multiple comma separated transformed directories if multi-contrast registration is used') - for tdir in app.ARGS.transformed_dir: - app.check_output_path(tdir) - - if app.ARGS.linear_transformations_dir: - if not dolinear: - raise MRtrixError("linear option set when no linear registration is performed") - app.ARGS.linear_transformations_dir = relpath(app.ARGS.linear_transformations_dir) - app.check_output_path(app.ARGS.linear_transformations_dir) - - # automatically detect SH series in each contrast - do_fod_registration = False # in any contrast - cns.n_volumes = [] - cns.fod_reorientation = [] - for cid in range(n_contrasts): - header = image.Header(ins[0].get_ims_path(False)[cid]) - image_size = header.size() - if len(image_size) < 3 or len(image_size) > 4: - raise MRtrixError('only 3 and 4 dimensional images can be used to build a template') - if len(image_size) == 4: - cns.fod_reorientation.append(header.is_sh() and not noreorientation) - cns.n_volumes.append(image_size[3]) - do_fod_registration = do_fod_registration or cns.fod_reorientation[-1] - else: - cns.fod_reorientation.append(False) - cns.n_volumes.append(0) - if do_fod_registration: - app.console("SH Series detected, performing FOD registration in contrast: " + - ', '.join(app.ARGS.input_dir[cid] for cid in range(n_contrasts) if cns.fod_reorientation[cid])) - c_mrtransform_reorientation = [' -reorient_fod ' + ('yes' if cns.fod_reorientation[cid] else 'no') + ' ' - for cid in range(n_contrasts)] - - if nanmask_input: - app.console("NaN masking transformed images") - - # rigid options - if app.ARGS.rigid_scale: - rigid_scales = [float(x) for x in app.ARGS.rigid_scale.split(',')] - if not dorigid: - raise MRtrixError("rigid_scales option set when no rigid registration is performed") - else: - rigid_scales = DEFAULT_RIGID_SCALES - if app.ARGS.rigid_lmax: - if not dorigid: - raise MRtrixError("rigid_lmax option set when no rigid registration is performed") - rigid_lmax = [int(x) for x in app.ARGS.rigid_lmax.split(',')] - if do_fod_registration and len(rigid_scales) != len(rigid_lmax): - raise MRtrixError('rigid_scales and rigid_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(rigid_scales), len(rigid_lmax))) - else: - rigid_lmax = DEFAULT_RIGID_LMAX - - rigid_niter = [100] * len(rigid_scales) - if app.ARGS.rigid_niter: - if not dorigid: - raise MRtrixError("rigid_niter specified when no rigid registration is performed") - rigid_niter = [int(x) for x in app.ARGS.rigid_niter.split(',')] - if len(rigid_niter) == 1: - rigid_niter = rigid_niter * len(rigid_scales) - elif len(rigid_scales) != len(rigid_niter): - raise MRtrixError('rigid_scales and rigid_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(rigid_scales), len(rigid_niter))) - - # affine options - if app.ARGS.affine_scale: - affine_scales = [float(x) for x in app.ARGS.affine_scale.split(',')] - if not doaffine: - raise MRtrixError("affine_scale option set when no affine registration is performed") - else: - affine_scales = DEFAULT_AFFINE_SCALES - if app.ARGS.affine_lmax: - if not doaffine: - raise MRtrixError("affine_lmax option set when no affine registration is performed") - affine_lmax = [int(x) for x in app.ARGS.affine_lmax.split(',')] - if do_fod_registration and len(affine_scales) != len(affine_lmax): - raise MRtrixError('affine_scales and affine_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(affine_scales), len(affine_lmax))) - else: - affine_lmax = DEFAULT_AFFINE_LMAX - - affine_niter = [500] * len(affine_scales) - if app.ARGS.affine_niter: - if not doaffine: - raise MRtrixError("affine_niter specified when no affine registration is performed") - affine_niter = [int(x) for x in app.ARGS.affine_niter.split(',')] - if len(affine_niter) == 1: - affine_niter = affine_niter * len(affine_scales) - elif len(affine_scales) != len(affine_niter): - raise MRtrixError('affine_scales and affine_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(affine_scales), len(affine_niter))) - - linear_scales = [] - linear_lmax = [] - linear_niter = [] - linear_type = [] - if dorigid: - linear_scales += rigid_scales - linear_lmax += rigid_lmax - linear_niter += rigid_niter - linear_type += ['rigid'] * len(rigid_scales) - - if doaffine: - linear_scales += affine_scales - linear_lmax += affine_lmax - linear_niter += affine_niter - linear_type += ['affine'] * len(affine_scales) - - assert len(linear_type) == len(linear_scales) - assert len(linear_scales) == len(linear_niter) - if do_fod_registration: - if len(linear_lmax) != len(linear_niter): - mismatch = [] - if len(rigid_lmax) != len(rigid_niter): - mismatch += ['rigid: lmax stages: %s, niter stages: %s' % (len(rigid_lmax), len(rigid_niter))] - if len(affine_lmax) != len(affine_niter): - mismatch += ['affine: lmax stages: %s, niter stages: %s' % (len(affine_lmax), len(affine_niter))] - raise MRtrixError('linear registration: lmax and niter schedules are not equal in length: %s' % (', '.join(mismatch))) - app.console('-' * 60) - app.console('initial alignment of images: %s' % initial_alignment) - app.console('-' * 60) - if n_contrasts > 1: - for cid in range(n_contrasts): - app.console('\tcontrast "%s": %s, ' % (cns.suff[cid], cns.names[cid]) + - 'objective weight: %s' % cns.mc_weight_initial_alignment[cid]) - - if dolinear: - app.console('-' * 60) - app.console('linear registration stages:') - app.console('-' * 60) - if n_contrasts > 1: - for cid in range(n_contrasts): - msg = '\tcontrast "%s": %s' % (cns.suff[cid], cns.names[cid]) - if 'rigid' in linear_type: - msg += ', objective weight rigid: %s' % cns.mc_weight_rigid[cid] - if 'affine' in linear_type: - msg += ', objective weight affine: %s' % cns.mc_weight_affine[cid] - app.console(msg) - - if do_fod_registration: - for istage, [tpe, scale, lmax, niter] in enumerate(zip(linear_type, linear_scales, linear_lmax, linear_niter)): - app.console('(%02i) %s scale: %.4f, niter: %i, lmax: %i' % (istage, tpe.ljust(9), scale, niter, lmax)) - else: - for istage, [tpe, scale, niter] in enumerate(zip(linear_type, linear_scales, linear_niter)): - app.console('(%02i) %s scale: %.4f, niter: %i, no reorientation' % (istage, tpe.ljust(9), scale, niter)) - - datatype_option = ' -datatype float32' - outofbounds_option = ' -nan' - - if not dononlinear: - nl_scales = [] - nl_lmax = [] - nl_niter = [] - if app.ARGS.warp_dir: - raise MRtrixError('warp_dir specified when no nonlinear registration is performed') - else: - nl_scales = [float(x) for x in app.ARGS.nl_scale.split(',')] if app.ARGS.nl_scale else DEFAULT_NL_SCALES - nl_niter = [int(x) for x in app.ARGS.nl_niter.split(',')] if app.ARGS.nl_niter else DEFAULT_NL_NITER - nl_lmax = [int(x) for x in app.ARGS.nl_lmax.split(',')] if app.ARGS.nl_lmax else DEFAULT_NL_LMAX - - if len(nl_scales) != len(nl_niter): - raise MRtrixError('nl_scales and nl_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(nl_scales), len(nl_niter))) - - app.console('-' * 60) - app.console('nonlinear registration stages:') - app.console('-' * 60) - if n_contrasts > 1: - for cid in range(n_contrasts): - app.console('\tcontrast "%s": %s, objective weight: %s' % (cns.suff[cid], cns.names[cid], cns.mc_weight_nl[cid])) - - if do_fod_registration: - if len(nl_scales) != len(nl_lmax): - raise MRtrixError('nl_scales and nl_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(nl_scales), len(nl_lmax))) - - if do_fod_registration: - for istage, [scale, lmax, niter] in enumerate(zip(nl_scales, nl_lmax, nl_niter)): - app.console('(%02i) nonlinear scale: %.4f, niter: %i, lmax: %i' % (istage, scale, niter, lmax)) - else: - for istage, [scale, niter] in enumerate(zip(nl_scales, nl_niter)): - app.console('(%02i) nonlinear scale: %.4f, niter: %i, no reorientation' % (istage, scale, niter)) - - app.console('-' * 60) - app.console('input images:') - app.console('-' * 60) - for inp in ins: - app.console('\t' + inp.info()) - - app.make_scratch_dir() - app.goto_scratch_dir() - - for contrast in cns.suff: - path.make_dir('input_transformed' + contrast) - - for contrast in cns.suff: - path.make_dir('isfinite' + contrast) - - path.make_dir('linear_transforms_initial') - path.make_dir('linear_transforms') - for level in range(0, len(linear_scales)): - path.make_dir('linear_transforms_%02i' % level) - for level in range(0, len(nl_scales)): - path.make_dir('warps_%02i' % level) - - if use_masks: - path.make_dir('mask_transformed') - write_log = (app.VERBOSITY >= 2) - if write_log: - path.make_dir('log') - - if initial_alignment == 'robust_mass': - if not use_masks: - raise MRtrixError('robust_mass initial alignment requires masks') - path.make_dir('robust') - - if app.ARGS.copy_input: - app.console('Copying images into scratch directory') - for inp in ins: - inp.cache_local() - - # Make initial template in average space using first contrast - app.console('Generating initial template') - input_filenames = [inp.get_ims_path(False)[0] for inp in ins] - if voxel_size is None: - run.command(['mraverageheader', input_filenames, 'average_header.mif', '-fill']) - else: - run.command(['mraverageheader', '-fill', input_filenames, '-', '|', - 'mrgrid', '-', 'regrid', '-voxel', ','.join(map(str, voxel_size)), 'average_header.mif']) - - # crop average space to extent defined by original masks - if use_masks: - progress = app.ProgressBar('Importing input masks to average space for template cropping', len(ins)) - for inp in ins: - run.command('mrtransform ' + inp.msk_path + ' -interp nearest -template average_header.mif ' + inp.msk_transformed) - progress.increment() - progress.done() - run.command(['mrmath', [inp.msk_transformed for inp in ins], 'max', 'mask_initial.mif']) - run.command('mrgrid average_header.mif crop -mask mask_initial.mif average_header_cropped.mif') - run.function(os.remove, 'mask_initial.mif') - run.function(os.remove, 'average_header.mif') - run.function(shutil.move, 'average_header_cropped.mif', 'average_header.mif') - progress = app.ProgressBar('Erasing temporary mask images', len(ins)) - for inp in ins: - run.function(os.remove, inp.msk_transformed) - progress.increment() - progress.done() - - # create average space headers for other contrasts - if n_contrasts > 1: - avh3d = 'average_header3d.mif' - avh4d = 'average_header4d.mif' - if len(image.Header('average_header.mif').size()) == 3: - run.command('mrconvert average_header.mif ' + avh3d) - else: - run.command('mrconvert average_header.mif -coord 3 0 -axes 0,1,2 ' + avh3d) - run.command('mrconvert ' + avh3d + ' -axes 0,1,2,-1 ' + avh4d) - for cid in range(n_contrasts): - if cns.n_volumes[cid] == 0: - run.function(copy, avh3d, 'average_header' + cns.suff[cid] + '.mif') - elif cns.n_volumes[cid] == 1: - run.function(copy, avh4d, 'average_header' + cns.suff[cid] + '.mif') - else: - run.command('mrcat ' + ' '.join([avh3d] * cns.n_volumes[cid]) + ' -axis 3 average_header' + cns.suff[cid] + '.mif') - run.function(os.remove, avh3d) - run.function(os.remove, avh4d) - else: - run.function(shutil.move, 'average_header.mif', 'average_header' + cns.suff[0] + '.mif') - - cns.templates = ['average_header' + csuff + '.mif' for csuff in cns.suff] - - if initial_alignment == 'none': - progress = app.ProgressBar('Resampling input images to template space with no initial alignment', len(ins) * n_contrasts) - for inp in ins: - for cid in range(n_contrasts): - run.command('mrtransform ' + inp.ims_path[cid] + c_mrtransform_reorientation[cid] + ' -interp linear ' + - '-template ' + cns.templates[cid] + ' ' + inp.ims_transformed[cid] + - outofbounds_option + - datatype_option) - progress.increment() - progress.done() - - if use_masks: - progress = app.ProgressBar('Reslicing input masks to average header', len(ins)) - for inp in ins: - run.command('mrtransform ' + inp.msk_path + ' ' + inp.msk_transformed + ' ' + - '-interp nearest -template ' + cns.templates[0] + ' ' + - datatype_option) - progress.increment() - progress.done() - - if nanmask_input: - inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], - [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) - - if leave_one_out: - calculate_isfinite(ins, cns) - - if not dolinear: - for inp in ins: - with open(os.path.join('linear_transforms_initial', inp.uid + '.txt'), 'w') as fout: - fout.write('1 0 0 0\n0 1 0 0\n0 0 1 0\n0 0 0 1\n') - - run.function(copy, 'average_header' + cns.suff[0] + '.mif', 'average_header.mif') - - else: - progress = app.ProgressBar('Performing initial rigid registration to template', len(ins)) - mask_option = '' - cid = 0 - lmax_option = ' -rigid_lmax 0 ' if cns.fod_reorientation[cid] else ' -noreorientation ' - contrast_weight_option = cns.initial_alignment_weight_option - for inp in ins: - output_option = ' -rigid ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) - if use_masks: - mask_option = ' -mask1 ' + inp.msk_path - if initial_alignment == 'robust_mass': - if not os.path.isfile('robust/template.mif'): - if cns.n_volumes[cid] > 0: - run.command('mrconvert ' + cns.templates[cid] + ' -coord 3 0 - | mrconvert - -axes 0,1,2 robust/template.mif') - else: - run.command('mrconvert ' + cns.templates[cid] + ' robust/template.mif') - if n_contrasts > 1: - cmd = ['mrcalc', inp.ims_path[cid], cns.mc_weight_initial_alignment[cid], '-mult'] - for cid in range(1, n_contrasts): - cmd += [inp.ims_path[cid], cns.mc_weight_initial_alignment[cid], '-mult', '-add'] - contrast_weight_option = '' - run.command(' '.join(cmd) + - ' - | mrfilter - zclean -zlower 3 -zupper 3 robust/image_' + inp.uid + '.mif' - ' -maskin ' + inp.msk_path + ' -maskout robust/mask_' + inp.uid + '.mif') - else: - run.command('mrfilter ' + inp.ims_path[0] + ' zclean -zlower 3 -zupper 3 robust/image_' + inp.uid + '.mif' + - ' -maskin ' + inp.msk_path + ' -maskout robust/mask_' + inp.uid + '.mif') - images = 'robust/image_' + inp.uid + '.mif robust/template.mif' - mask_option = ' -mask1 ' + 'robust/mask_' + inp.uid + '.mif' - lmax_option = '' - - run.command('mrregister ' + images + - mask_option + - ' -rigid_scale 1 ' + - ' -rigid_niter 0 ' + - ' -type rigid ' + - lmax_option + - contrast_weight_option + - ' -rigid_init_translation ' + initial_alignment.replace('robust_', '') + ' ' + - datatype_option + - output_option) - # translate input images to centre of mass without interpolation - for cid in range(n_contrasts): - run.command('mrtransform ' + inp.ims_path[cid] + c_mrtransform_reorientation[cid] + - ' -linear ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + - ' ' + inp.ims_transformed[cid] + "_translated.mif" + datatype_option) - if use_masks: - run.command('mrtransform ' + inp.msk_path + - ' -linear ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + - ' ' + inp.msk_transformed + "_translated.mif" + - datatype_option) - progress.increment() - # update average space of first contrast to new extent, delete other average space images - run.command('mraverageheader ' + ' '.join([inp.ims_transformed[cid] + '_translated.mif' for inp in ins]) + ' average_header_tight.mif') - progress.done() - - if voxel_size is None: - run.command('mrgrid average_header_tight.mif pad -uniform 10 average_header.mif', force=True) - else: - run.command('mrgrid average_header_tight.mif pad -uniform 10 - | ' - 'mrgrid - regrid -voxel ' + ','.join(map(str, voxel_size)) + ' average_header.mif', force=True) - run.function(os.remove, 'average_header_tight.mif') - for cid in range(1, n_contrasts): - run.function(os.remove, 'average_header' + cns.suff[cid] + '.mif') - - if use_masks: - # reslice masks - progress = app.ProgressBar('Reslicing input masks to average header', len(ins)) - for inp in ins: - run.command('mrtransform ' + inp.msk_transformed + '_translated.mif' + ' ' + inp.msk_transformed + ' ' + - '-interp nearest -template average_header.mif' + datatype_option) - progress.increment() - progress.done() - # crop average space to extent defined by translated masks - run.command(['mrmath', [inp.msk_transformed for inp in ins], 'max', 'mask_translated.mif']) - run.command('mrgrid average_header.mif crop -mask mask_translated.mif average_header_cropped.mif') - # pad average space to allow for deviation from initial alignment - run.command('mrgrid average_header_cropped.mif pad -uniform 10 average_header.mif', force=True) - run.function(os.remove, 'average_header_cropped.mif') - # reslice masks - progress = app.ProgressBar('Reslicing masks to new padded average header', len(ins)) - for inp in ins: - run.command('mrtransform ' + inp.msk_transformed + '_translated.mif ' + inp.msk_transformed + ' ' + - '-interp nearest -template average_header.mif' + datatype_option, force=True) - run.function(os.remove, inp.msk_transformed + '_translated.mif') - progress.increment() - progress.done() - run.function(os.remove, 'mask_translated.mif') - - # reslice images - progress = app.ProgressBar('Reslicing input images to average header', len(ins) * n_contrasts) - for cid in range(n_contrasts): - for inp in ins: - run.command('mrtransform ' + c_mrtransform_reorientation[cid] + inp.ims_transformed[cid] + '_translated.mif ' + - inp.ims_transformed[cid] + ' ' + - ' -interp linear -template average_header.mif' + - outofbounds_option + - datatype_option) - run.function(os.remove, inp.ims_transformed[cid] + '_translated.mif') - progress.increment() - progress.done() - - if nanmask_input: - inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], - [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) - - if leave_one_out: - calculate_isfinite(ins, cns) - - cns.templates = ['initial_template' + contrast + '.mif' for contrast in cns.suff] - for cid in range(n_contrasts): - aggregate(ins, 'initial_template' + cns.suff[cid] + '.mif', cid, agg_measure) - if cns.n_volumes[cid] == 1: - run.function(shutil.move, 'initial_template' + cns.suff[cid] + '.mif', 'tmp.mif') - run.command('mrconvert tmp.mif initial_template' + cns.suff[cid] + '.mif -axes 0,1,2,-1') - - # Optimise template with linear registration - if not dolinear: - for inp in ins: - run.function(copy, os.path.join('linear_transforms_initial', inp.uid+'.txt'), - os.path.join('linear_transforms', inp.uid+'.txt')) - else: - level = 0 - regtype = linear_type[0] - def linear_msg(): - return 'Optimising template with linear registration (stage {0} of {1}; {2})'.format(level + 1, len(linear_scales), regtype) - progress = app.ProgressBar(linear_msg, len(linear_scales) * len(ins) * (1 + n_contrasts + int(use_masks))) - for level, (regtype, scale, niter, lmax) in enumerate(zip(linear_type, linear_scales, linear_niter, linear_lmax)): - for inp in ins: - initialise_option = '' - if use_masks: - mask_option = ' -mask1 ' + inp.msk_path - else: - mask_option = '' - lmax_option = ' -noreorientation' - metric_option = '' - mrregister_log_option = '' - if regtype == 'rigid': - scale_option = ' -rigid_scale ' + str(scale) - niter_option = ' -rigid_niter ' + str(niter) - regtype_option = ' -type rigid' - output_option = ' -rigid ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') - contrast_weight_option = cns.rigid_weight_option - if level > 0: - initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms_%02i' % (level - 1), inp.uid + '.txt') - if do_fod_registration: - lmax_option = ' -rigid_lmax ' + str(lmax) - if linear_estimator: - metric_option = ' -rigid_metric.diff.estimator ' + linear_estimator - if app.VERBOSITY >= 2: - mrregister_log_option = ' -info -rigid_log ' + os.path.join('log', inp.uid + contrast[cid] + "_" + str(level) + '.log') - else: - scale_option = ' -affine_scale ' + str(scale) - niter_option = ' -affine_niter ' + str(niter) - regtype_option = ' -type affine' - output_option = ' -affine ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') - contrast_weight_option = cns.affine_weight_option - if level > 0: - initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms_%02i' % (level - 1), inp.uid + '.txt') - if do_fod_registration: - lmax_option = ' -affine_lmax ' + str(lmax) - if linear_estimator: - metric_option = ' -affine_metric.diff.estimator ' + linear_estimator - if write_log: - mrregister_log_option = ' -info -affine_log ' + os.path.join('log', inp.uid + contrast[cid] + "_" + str(level) + '.log') - - if leave_one_out: - tmpl = [] - for cid in range(n_contrasts): - isfinite = 'isfinite%s/%s.mif' % (cns.suff[cid], inp.uid) - weight = inp.aggregation_weight if inp.aggregation_weight is not None else '1' - # loo = (template * weighted sum - weight * this) / (weighted sum - weight) - run.command('mrcalc ' + cns.isfinite_count[cid] + ' ' + isfinite + ' -sub - | mrcalc ' + cns.templates[cid] + - ' ' + cns.isfinite_count[cid] + ' -mult ' + inp.ims_transformed[cid] + ' ' + weight + ' -mult ' + - ' -sub - -div loo_%s' % cns.templates[cid], force=True) - tmpl.append('loo_%s' % cns.templates[cid]) - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, tmpl)]) - else: - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) - command = 'mrregister ' + images + \ - initialise_option + \ - mask_option + \ - scale_option + \ - niter_option + \ - lmax_option + \ - regtype_option + \ - metric_option + \ - datatype_option + \ - contrast_weight_option + \ - output_option + \ - mrregister_log_option - run.command(command, force=True) - check_linear_transformation(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt'), command, - pause_on_warn=do_pause_on_warn) - if leave_one_out: - for im_temp in tmpl: - run.function(os.remove, im_temp) - progress.increment() - - # Here we ensure the template doesn't drift or scale - # TODO matrix avarage might produce a large FOV for large rotations # pylint: disable=fixme - run.command('transformcalc ' + ' '.join(path.all_in_dir('linear_transforms_%02i' % level)) + - ' average linear_transform_average.txt -quiet', force=True) - if linear_type[level] == 'rigid': - run.command('transformcalc linear_transform_average.txt rigid linear_transform_average.txt -quiet', force=True) - run.command('transformcalc linear_transform_average.txt invert linear_transform_average_inv.txt -quiet', force=True) - - average_inv = matrix.load_transform('linear_transform_average_inv.txt') - if average_inv is not None: - for inp in ins: - transform = matrix.dot(matrix.load_transform(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt')), average_inv) - matrix.save_transform(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt'), transform, force=True) - - for cid in range(n_contrasts): - for inp in ins: - run.command('mrtransform ' + c_mrtransform_reorientation[cid] + inp.ims_path[cid] + - ' -template ' + cns.templates[cid] + - ' -linear ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + - ' ' + inp.ims_transformed[cid] + - outofbounds_option + - datatype_option, - force=True) - progress.increment() - - if use_masks: - for inp in ins: - run.command('mrtransform ' + inp.msk_path + - ' -template ' + cns.templates[0] + - ' -interp nearest' + - ' -linear ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + - ' ' + inp.msk_transformed, - force=True) - progress.increment() - - if nanmask_input: - inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], - [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) - - if leave_one_out: - calculate_isfinite(ins, cns) - - for cid in range(n_contrasts): - cns.templates[cid] = 'linear_template%02i%s.mif' % (level, cns.suff[cid]) - aggregate(ins, cns.templates[cid], cid, agg_measure) - if cns.n_volumes[cid] == 1: - run.function(shutil.move, cns.templates[cid], 'tmp.mif') - run.command('mrconvert tmp.mif ' + cns.templates[cid] + ' -axes 0,1,2,-1') - run.function(os.remove, 'tmp.mif') - - for entry in os.listdir('linear_transforms_%02i' % level): - run.function(copy, os.path.join('linear_transforms_%02i' % level, entry), os.path.join('linear_transforms', entry)) - progress.done() - - # Create a template mask for nl registration by taking the intersection of all transformed input masks and dilating - if use_masks and (dononlinear or app.ARGS.template_mask): - run.command(['mrmath', path.all_in_dir('mask_transformed')] + - 'min - | maskfilter - median - | maskfilter - dilate -npass 5 init_nl_template_mask.mif'.split(), force=True) - current_template_mask = 'init_nl_template_mask.mif' - - if dononlinear: - path.make_dir('warps') - level = 0 - def nonlinear_msg(): - return 'Optimising template with non-linear registration (stage {0} of {1})'.format(level + 1, len(nl_scales)) - progress = app.ProgressBar(nonlinear_msg, len(nl_scales) * len(ins)) - for level, (scale, niter, lmax) in enumerate(zip(nl_scales, nl_niter, nl_lmax)): - for inp in ins: - if level > 0: - initialise_option = ' -nl_init ' + os.path.join('warps_%02i' % (level - 1), inp.uid + '.mif') - scale_option = '' - else: - scale_option = ' -nl_scale ' + str(scale) - if not doaffine: # rigid or no previous linear stage - initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms', inp.uid + '.txt') - else: - initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms', inp.uid + '.txt') - - if use_masks: - mask_option = ' -mask1 ' + inp.msk_path + ' -mask2 ' + current_template_mask - else: - mask_option = '' - - if do_fod_registration: - lmax_option = ' -nl_lmax ' + str(lmax) - else: - lmax_option = ' -noreorientation' - - contrast_weight_option = cns.nl_weight_option - - if leave_one_out: - tmpl = [] - for cid in range(n_contrasts): - isfinite = 'isfinite%s/%s.mif' % (cns.suff[cid], inp.uid) - weight = inp.aggregation_weight if inp.aggregation_weight is not None else '1' - # loo = (template * weighted sum - weight * this) / (weighted sum - weight) - run.command('mrcalc ' + cns.isfinite_count[cid] + ' ' + isfinite + ' -sub - | mrcalc ' + cns.templates[cid] + - ' ' + cns.isfinite_count[cid] + ' -mult ' + inp.ims_transformed[cid] + ' ' + weight + ' -mult ' + - ' -sub - -div loo_%s' % cns.templates[cid], force=True) - tmpl.append('loo_%s' % cns.templates[cid]) - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, tmpl)]) - else: - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) - run.command('mrregister ' + images + - ' -type nonlinear' + - ' -nl_niter ' + str(nl_niter[level]) + - ' -nl_warp_full ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + - ' -transformed ' + - ' -transformed '.join([inp.ims_transformed[cid] for cid in range(n_contrasts)]) + ' ' + - ' -nl_update_smooth ' + app.ARGS.nl_update_smooth + - ' -nl_disp_smooth ' + app.ARGS.nl_disp_smooth + - ' -nl_grad_step ' + app.ARGS.nl_grad_step + - initialise_option + - contrast_weight_option + - scale_option + - mask_option + - datatype_option + - outofbounds_option + - lmax_option, - force=True) - - if use_masks: - run.command('mrtransform ' + inp.msk_path + - ' -template ' + cns.templates[0] + - ' -warp_full ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + - ' ' + inp.msk_transformed + - ' -interp nearest ', - force=True) - - if leave_one_out: - for im_temp in tmpl: - run.function(os.remove, im_temp) - - if level > 0: - run.function(os.remove, os.path.join('warps_%02i' % (level - 1), inp.uid + '.mif')) - - progress.increment(nonlinear_msg()) - - if nanmask_input: - inplace_nan_mask([_inp.ims_transformed[cid] for _inp in ins for cid in range(n_contrasts)], - [_inp.msk_transformed for _inp in ins for cid in range(n_contrasts)]) - - if leave_one_out: - calculate_isfinite(ins, cns) - - for cid in range(n_contrasts): - cns.templates[cid] = 'nl_template%02i%s.mif' % (level, cns.suff[cid]) - aggregate(ins, cns.templates[cid], cid, agg_measure) - if cns.n_volumes[cid] == 1: - run.function(shutil.move, cns.templates[cid], 'tmp.mif') - run.command('mrconvert tmp.mif ' + cns.templates[cid] + ' -axes 0,1,2,-1') - run.function(os.remove, 'tmp.mif') - - if use_masks: - run.command(['mrmath', path.all_in_dir('mask_transformed')] + - 'min - | maskfilter - median - | '.split() + - ('maskfilter - dilate -npass 5 nl_template_mask' + str(level) + '.mif').split()) - current_template_mask = 'nl_template_mask' + str(level) + '.mif' - - if level < len(nl_scales) - 1: - if scale < nl_scales[level + 1]: - upsample_factor = nl_scales[level + 1] / scale - for inp in ins: - run.command('mrgrid ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + - ' regrid -scale %f tmp.mif' % upsample_factor, force=True) - run.function(shutil.move, 'tmp.mif', os.path.join('warps_%02i' % level, inp.uid + '.mif')) - else: - for inp in ins: - run.function(shutil.move, os.path.join('warps_%02i' % level, inp.uid + '.mif'), 'warps') - progress.done() - - for cid in range(n_contrasts): - run.command('mrconvert ' + cns.templates[cid] + ' ' + cns.templates_out[cid], - mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) - - if app.ARGS.warp_dir: - warp_path = path.from_user(app.ARGS.warp_dir, False) - if os.path.exists(warp_path): - run.function(shutil.rmtree, warp_path) - os.makedirs(warp_path) - progress = app.ProgressBar('Copying non-linear warps to output directory "' + warp_path + '"', len(ins)) - for inp in ins: - keyval = image.Header(os.path.join('warps', inp.uid + '.mif')).keyval() - keyval = dict((k, keyval[k]) for k in ('linear1', 'linear2')) - json_path = os.path.join('warps', inp.uid + '.json') - with open(json_path, 'w') as json_file: - json.dump(keyval, json_file) - run.command('mrconvert ' + os.path.join('warps', inp.uid + '.mif') + ' ' + - path.quote(os.path.join(warp_path, xcontrast_xsubject_pre_postfix[0] + - inp.uid + xcontrast_xsubject_pre_postfix[1] + '.mif')), - mrconvert_keyval=json_path, force=app.FORCE_OVERWRITE) - progress.increment() - progress.done() - - if app.ARGS.linear_transformations_dir: - linear_transformations_path = path.from_user(app.ARGS.linear_transformations_dir, False) - if os.path.exists(linear_transformations_path): - run.function(shutil.rmtree, linear_transformations_path) - os.makedirs(linear_transformations_path) - for inp in ins: - trafo = matrix.load_transform(os.path.join('linear_transforms', inp.uid + '.txt')) - matrix.save_transform(os.path.join(linear_transformations_path, - xcontrast_xsubject_pre_postfix[0] + inp.uid - + xcontrast_xsubject_pre_postfix[1] + '.txt'), - trafo, - force=app.FORCE_OVERWRITE) - - if app.ARGS.transformed_dir: - for cid, trdir in enumerate(app.ARGS.transformed_dir): - transformed_path = path.from_user(trdir, False) - if os.path.exists(transformed_path): - run.function(shutil.rmtree, transformed_path) - os.makedirs(transformed_path) - progress = app.ProgressBar('Copying transformed images to output directory "' + transformed_path + '"', len(ins)) - for inp in ins: - run.command(['mrconvert', inp.ims_transformed[cid], os.path.join(transformed_path, inp.ims_filenames[cid])], - mrconvert_keyval=inp.get_ims_path(False)[cid], force=app.FORCE_OVERWRITE) - progress.increment() - progress.done() - - if app.ARGS.template_mask: - run.command('mrconvert ' + current_template_mask + ' ' + path.from_user(app.ARGS.template_mask, True), - mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/bin/responsemean b/bin/responsemean deleted file mode 100755 index 5f5c3c06b4..0000000000 --- a/bin/responsemean +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - -import math, os, sys - - - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') - cmdline.set_synopsis('Calculate the mean response function from a set of text files') - cmdline.add_description('Example usage: ' + os.path.basename(sys.argv[0]) + ' input_response1.txt input_response2.txt input_response3.txt ... output_average_response.txt') - cmdline.add_description('All response function files provided must contain the same number of unique b-values (lines), as well as the same number of coefficients per line.') - cmdline.add_description('As long as the number of unique b-values is identical across all input files, the coefficients will be averaged. This is performed on the assumption that the actual acquired b-values are identical. This is however impossible for the ' + os.path.basename(sys.argv[0]) + ' command to determine based on the data provided; it is therefore up to the user to ensure that this requirement is satisfied.') - cmdline.add_argument('inputs', help='The input response functions', nargs='+') - cmdline.add_argument('output', help='The output mean response function') - cmdline.add_argument('-legacy', action='store_true', help='Use the legacy behaviour of former command \'average_response\': average response function coefficients directly, without compensating for global magnitude differences between input files') - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, matrix #pylint: disable=no-name-in-module, import-outside-toplevel - - app.check_output_path(app.ARGS.output) - - data = [ ] # 3D matrix: Subject, b-value, ZSH coefficient - for filepath in app.ARGS.inputs: - subject = matrix.load_matrix(filepath) - if any(len(line) != len(subject[0]) for line in subject[1:]): - raise MRtrixError('File \'' + filepath + '\' does not contain the same number of entries per line (multi-shell response functions must have the same number of coefficients per b-value; pad the data with zeroes if necessary)') - if data: - if len(subject) != len(data[0]): - raise MRtrixError('File \'' + filepath + '\' contains ' + str(len(subject)) + ' b-value' + ('s' if len(subject) > 1 else '') + ' (line' + ('s' if len(subject) > 1 else '') + '); this differs from the first file read (' + sys.argv[1] + '), which contains ' + str(len(data[0])) + ' line' + ('s' if len(data[0]) > 1 else '')) - if len(subject[0]) != len(data[0][0]): - raise MRtrixError('File \'' + filepath + '\' contains ' + str(len(subject[0])) + ' coefficient' + ('s' if len(subject[0]) > 1 else '') + ' per b-value (line); this differs from the first file read (' + sys.argv[1] + '), which contains ' + str(len(data[0][0])) + ' coefficient' + ('s' if len(data[0][0]) > 1 else '') + ' per line') - data.append(subject) - - app.console('Calculating mean RF across ' + str(len(data)) + ' inputs, with ' + str(len(data[0])) + ' b-value' + ('s' if len(data[0])>1 else '') + ' and lmax=' + str(2*(len(data[0][0])-1))) - - # Old approach: Just take the average across all subjects - # New approach: Calculate a multiplier to use for each subject, based on the geometric mean - # scaling factor required to bring the subject toward the group mean l=0 terms (across shells) - - mean_lzero_terms = [ sum(subject[row][0] for subject in data)/len(data) for row in range(len(data[0])) ] - app.debug('Mean l=0 terms: ' + str(mean_lzero_terms)) - - weighted_sum_coeffs = [[0.0] * len(data[0][0]) for _ in range(len(data[0]))] #pylint: disable=unused-variable - for subject in data: - if app.ARGS.legacy: - multiplier = 1.0 - else: - subj_lzero_terms = [line[0] for line in subject] - log_multiplier = 0.0 - for subj_lzero, mean_lzero in zip(subj_lzero_terms, mean_lzero_terms): - log_multiplier += math.log(mean_lzero / subj_lzero) - log_multiplier /= len(data[0]) - multiplier = math.exp(log_multiplier) - app.debug('Subject l=0 terms: ' + str(subj_lzero_terms)) - app.debug('Resulting multipler: ' + str(multiplier)) - weighted_sum_coeffs = [ [ a + multiplier*b for a, b in zip(linea, lineb) ] for linea, lineb in zip(weighted_sum_coeffs, subject) ] - - mean_coeffs = [ [ f/len(data) for f in line ] for line in weighted_sum_coeffs ] - matrix.save_matrix(app.ARGS.output, mean_coeffs, force=app.FORCE_OVERWRITE) - - - - -# Execute the script -import mrtrix3 -mrtrix3.execute() #pylint: disable=no-member diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index ecb3208cf9..0000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -sphinx-rtd-theme -sphinx-notfound-page - diff --git a/lib/mrtrix3/_5ttgen/deep_atropos.py b/lib/mrtrix3/_5ttgen/deep_atropos.py new file mode 100644 index 0000000000..665795a238 --- /dev/null +++ b/lib/mrtrix3/_5ttgen/deep_atropos.py @@ -0,0 +1,45 @@ +import os +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('deep_atropos', parents=[base_parser]) + parser.set_author('Lucius S. Fekonja (lucius.fekonja[at]charite.de)') + parser.set_synopsis('Generate the 5TT image based on a Deep Atropos segmentation image') + parser.add_argument('input', help='The input Deep Atropos segmentation image') + parser.add_argument('output', help='The output 5TT image') + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) + +def check_deep_atropos_input(image_path): + dim = image.Header(image_path).size() + if len(dim) != 3: + raise MRtrixError('Image \'' + image_path + '\' does not look like Deep Atropos segmentation (number of spatial dimensions is not 3)') + +def get_inputs(): #pylint: disable=unused-variable + check_deep_atropos_input(path.from_user(app.ARGS.input, False)) + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif')) + +def execute(): #pylint: disable=unused-variable + # Generate the images related to each tissue + run.command('mrcalc input.mif 1 -eq CSF.mif') + run.command('mrcalc input.mif 2 -eq cGM.mif') + run.command('mrcalc input.mif 3 -eq WM1.mif') + run.command('mrcalc input.mif 5 -eq WM2.mif') + run.command('mrcalc input.mif 6 -eq WM3.mif') + run.command('mrmath WM1.mif WM2.mif WM3.mif sum WM.mif') + run.command('mrcalc input.mif 4 -eq sGM.mif') + + # Create an empty lesion image + run.command('mrcalc WM.mif 0 -mul lsn.mif') + + # Convert into the 5tt format + run.command('mrcat cGM.mif sGM.mif WM.mif CSF.mif lsn.mif 5tt.mif -axis 3') + + if app.ARGS.nocrop: + run.function(os.rename, '5tt.mif', 'result.mif') + else: + run.command('mrmath 5tt.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrgrid 5tt.mif crop result.mif -mask -') + + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/share/mrtrix3/_5ttgen/FreeSurfer2ACT.txt b/share/mrtrix3/_5ttgen/FreeSurfer2ACT.txt deleted file mode 100644 index 5d0e9ca9fb..0000000000 --- a/share/mrtrix3/_5ttgen/FreeSurfer2ACT.txt +++ /dev/null @@ -1,930 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Config file to convert FreeSurfer volumetric segmentation outputs -# into a format that can be used in Anatomically-Constrained Tractography - -# This is in fact a configuration file just like those used in labelconfig -# to convert from a raw parcellation image into an image with contiguous -# node indices, for the purposes for constructing a structural connectome. -# As it turns out, this framework is appropriate here, where many FreeSurfer -# segmentations are mapped to particular tissue types. - -# ACT tissue types are as follows: -# 1: Cortical grey matter -# 2: Sub-cortical grey matter -# 3: White matter -# 4: CSF -# 5: Pathological tissue -# (0 is skipped, and reserved for non-bran voxels & tissues for which no -# tissue type has been specified) - -# This config file should be able to convert the labels from any of the -# following FreeSurfer output files that are generated by recon-all: -# aparc.a2009s+aseg.mgz -# aparc+aseg.mgz -# aseg.auto.mgz -# aseg.auto_noCCseg.mgz -# aseg.mgz - - -3 Left-Cerebral-White-Matter -1 Left-Cerebral-Cortex -4 Left-Lateral-Ventricle -4 Left-Inf-Lat-Vent -3 Left-Cerebellum-White-Matter -1 Left-Cerebellum-Cortex -2 Left-Thalamus -2 Left-Thalamus-Proper -2 Left-Caudate -2 Left-Putamen -2 Left-Pallidum -4 3rd-Ventricle -4 4th-Ventricle -3 Brain-Stem -1 Left-Hippocampus -1 Left-Amygdala -4 CSF -2 Left-Lesion -2 Left-Accumbens-area -3 Left-VentralDC -4 Left-vessel -4 Left-choroid-plexus - -3 Right-Cerebral-White-Matter -1 Right-Cerebral-Cortex -4 Right-Lateral-Ventricle -4 Right-Inf-Lat-Vent -3 Right-Cerebellum-White-Matter -1 Right-Cerebellum-Cortex -2 Right-Thalamus -2 Right-Thalamus-Proper -2 Right-Caudate -2 Right-Putamen -2 Right-Pallidum -1 Right-Hippocampus -1 Right-Amygdala -2 Right-Lesion -2 Right-Accumbens-area -3 Right-VentralDC -4 Right-vessel -4 Right-choroid-plexus - -4 5th-Ventricle -4 Left-Lateral-Ventricles -4 Right-Lateral-Ventricles -5 WM-hypointensities -5 Left-WM-hypointensities -5 Right-WM-hypointensities -5 non-WM-hypointensities -5 Left-non-WM-hypointensities -5 Right-non-WM-hypointensities -3 Optic-Chiasm -3 Corpus_Callosum -1 Left-Amygdala-Anterior -1 Right-Amygdala-Anterior - -3 Left-IntCapsule-Ant -3 Right-IntCapsule-Ant -3 Left-IntCapsule-Pos -3 Right-IntCapsule-Pos - -3 brainstem -3 DCG -3 Vermis -3 Midbrain -3 Pons -3 Medulla - -5 Left-Cortical-Dysplasia -5 Right-Cortical-Dysplasia - -3 Fornix -3 CC_Posterior -3 CC_Mid_Posterior -3 CC_Central -3 CC_Mid_Anterior -3 CC_Anterior - -1 ctx-lh-unknown -1 ctx-lh-bankssts -1 ctx-lh-caudalanteriorcingulate -1 ctx-lh-caudalmiddlefrontal -1 ctx-lh-corpuscallosum -1 ctx-lh-cuneus -1 ctx-lh-entorhinal -1 ctx-lh-fusiform -1 ctx-lh-inferiorparietal -1 ctx-lh-inferiortemporal -1 ctx-lh-isthmuscingulate -1 ctx-lh-lateraloccipital -1 ctx-lh-lateralorbitofrontal -1 ctx-lh-lingual -1 ctx-lh-medialorbitofrontal -1 ctx-lh-middletemporal -1 ctx-lh-parahippocampal -1 ctx-lh-paracentral -1 ctx-lh-parsopercularis -1 ctx-lh-parsorbitalis -1 ctx-lh-parstriangularis -1 ctx-lh-pericalcarine -1 ctx-lh-postcentral -1 ctx-lh-posteriorcingulate -1 ctx-lh-precentral -1 ctx-lh-precuneus -1 ctx-lh-rostralanteriorcingulate -1 ctx-lh-rostralmiddlefrontal -1 ctx-lh-superiorfrontal -1 ctx-lh-superiorparietal -1 ctx-lh-superiortemporal -1 ctx-lh-supramarginal -1 ctx-lh-frontalpole -1 ctx-lh-temporalpole -1 ctx-lh-transversetemporal -1 ctx-lh-insula - -1 ctx-rh-unknown -1 ctx-rh-bankssts -1 ctx-rh-caudalanteriorcingulate -1 ctx-rh-caudalmiddlefrontal -1 ctx-rh-corpuscallosum -1 ctx-rh-cuneus -1 ctx-rh-entorhinal -1 ctx-rh-fusiform -1 ctx-rh-inferiorparietal -1 ctx-rh-inferiortemporal -1 ctx-rh-isthmuscingulate -1 ctx-rh-lateraloccipital -1 ctx-rh-lateralorbitofrontal -1 ctx-rh-lingual -1 ctx-rh-medialorbitofrontal -1 ctx-rh-middletemporal -1 ctx-rh-parahippocampal -1 ctx-rh-paracentral -1 ctx-rh-parsopercularis -1 ctx-rh-parsorbitalis -1 ctx-rh-parstriangularis -1 ctx-rh-pericalcarine -1 ctx-rh-postcentral -1 ctx-rh-posteriorcingulate -1 ctx-rh-precentral -1 ctx-rh-precuneus -1 ctx-rh-rostralanteriorcingulate -1 ctx-rh-rostralmiddlefrontal -1 ctx-rh-superiorfrontal -1 ctx-rh-superiorparietal -1 ctx-rh-superiortemporal -1 ctx-rh-supramarginal -1 ctx-rh-frontalpole -1 ctx-rh-temporalpole -1 ctx-rh-transversetemporal -1 ctx-rh-insula - -3 wm-lh-unknown -3 wm-lh-bankssts -3 wm-lh-caudalanteriorcingulate -3 wm-lh-caudalmiddlefrontal -3 wm-lh-corpuscallosum -3 wm-lh-cuneus -3 wm-lh-entorhinal -3 wm-lh-fusiform -3 wm-lh-inferiorparietal -3 wm-lh-inferiortemporal -3 wm-lh-isthmuscingulate -3 wm-lh-lateraloccipital -3 wm-lh-lateralorbitofrontal -3 wm-lh-lingual -3 wm-lh-medialorbitofrontal -3 wm-lh-middletemporal -3 wm-lh-parahippocampal -3 wm-lh-paracentral -3 wm-lh-parsopercularis -3 wm-lh-parsorbitalis -3 wm-lh-parstriangularis -3 wm-lh-pericalcarine -3 wm-lh-postcentral -3 wm-lh-posteriorcingulate -3 wm-lh-precentral -3 wm-lh-precuneus -3 wm-lh-rostralanteriorcingulate -3 wm-lh-rostralmiddlefrontal -3 wm-lh-superiorfrontal -3 wm-lh-superiorparietal -3 wm-lh-superiortemporal -3 wm-lh-supramarginal -3 wm-lh-frontalpole -3 wm-lh-temporalpole -3 wm-lh-transversetemporal -3 wm-lh-insula - -3 wm-rh-unknown -3 wm-rh-bankssts -3 wm-rh-caudalanteriorcingulate -3 wm-rh-caudalmiddlefrontal -3 wm-rh-corpuscallosum -3 wm-rh-cuneus -3 wm-rh-entorhinal -3 wm-rh-fusiform -3 wm-rh-inferiorparietal -3 wm-rh-inferiortemporal -3 wm-rh-isthmuscingulate -3 wm-rh-lateraloccipital -3 wm-rh-lateralorbitofrontal -3 wm-rh-lingual -3 wm-rh-medialorbitofrontal -3 wm-rh-middletemporal -3 wm-rh-parahippocampal -3 wm-rh-paracentral -3 wm-rh-parsopercularis -3 wm-rh-parsorbitalis -3 wm-rh-parstriangularis -3 wm-rh-pericalcarine -3 wm-rh-postcentral -3 wm-rh-posteriorcingulate -3 wm-rh-precentral -3 wm-rh-precuneus -3 wm-rh-rostralanteriorcingulate -3 wm-rh-rostralmiddlefrontal -3 wm-rh-superiorfrontal -3 wm-rh-superiorparietal -3 wm-rh-superiortemporal -3 wm-rh-supramarginal -3 wm-rh-frontalpole -3 wm-rh-temporalpole -3 wm-rh-transversetemporal -3 wm-rh-insula - -1 ctx-lh-Unknown -1 ctx-lh-Corpus_callosum -1 ctx-lh-G_and_S_Insula_ONLY_AVERAGE -1 ctx-lh-G_cingulate-Isthmus -1 ctx-lh-G_cingulate-Main_part -1 ctx-lh-G_cingulate-caudal_ACC -1 ctx-lh-G_cingulate-rostral_ACC -1 ctx-lh-G_cingulate-posterior -1 ctx-lh-S_cingulate-caudal_ACC -1 ctx-lh-S_cingulate-rostral_ACC -1 ctx-lh-S_cingulate-posterior -1 ctx-lh-S_pericallosal-caudal -1 ctx-lh-S_pericallosal-rostral -1 ctx-lh-S_pericallosal-posterior -1 ctx-lh-G_cuneus -1 ctx-lh-G_frontal_inf-Opercular_part -1 ctx-lh-G_frontal_inf-Orbital_part -1 ctx-lh-G_frontal_inf-Triangular_part -1 ctx-lh-G_frontal_middle -1 ctx-lh-G_frontal_superior -1 ctx-lh-G_frontomarginal -1 ctx-lh-G_insular_long -1 ctx-lh-G_insular_short -1 ctx-lh-G_and_S_occipital_inferior -1 ctx-lh-G_occipital_middle -1 ctx-lh-G_occipital_superior -1 ctx-lh-G_occipit-temp_lat-Or_fusiform -1 ctx-lh-G_occipit-temp_med-Lingual_part -1 ctx-lh-G_occipit-temp_med-Parahippocampal_part -1 ctx-lh-G_orbital -1 ctx-lh-G_paracentral -1 ctx-lh-G_parietal_inferior-Angular_part -1 ctx-lh-G_parietal_inferior-Supramarginal_part -1 ctx-lh-G_parietal_superior -1 ctx-lh-G_postcentral -1 ctx-lh-G_precentral -1 ctx-lh-G_precuneus -1 ctx-lh-G_rectus -1 ctx-lh-G_subcallosal -1 ctx-lh-G_subcentral -1 ctx-lh-G_temporal_inferior -1 ctx-lh-G_temporal_middle -1 ctx-lh-G_temp_sup-G_temp_transv_and_interm_S -1 ctx-lh-G_temp_sup-Lateral_aspect -1 ctx-lh-G_temp_sup-Planum_polare -1 ctx-lh-G_temp_sup-Planum_tempolare -1 ctx-lh-G_and_S_transverse_frontopolar -1 ctx-lh-Lat_Fissure-ant_sgt-ramus_horizontal -1 ctx-lh-Lat_Fissure-ant_sgt-ramus_vertical -1 ctx-lh-Lat_Fissure-post_sgt -1 ctx-lh-Medial_wall -1 ctx-lh-Pole_occipital -1 ctx-lh-Pole_temporal -1 ctx-lh-S_calcarine -1 ctx-lh-S_central -1 ctx-lh-S_central_insula -1 ctx-lh-S_cingulate-Main_part_and_Intracingulate -1 ctx-lh-S_cingulate-Marginalis_part -1 ctx-lh-S_circular_insula_anterior -1 ctx-lh-S_circular_insula_inferior -1 ctx-lh-S_circular_insula_superior -1 ctx-lh-S_collateral_transverse_ant -1 ctx-lh-S_collateral_transverse_post -1 ctx-lh-S_frontal_inferior -1 ctx-lh-S_frontal_middle -1 ctx-lh-S_frontal_superior -1 ctx-lh-S_frontomarginal -1 ctx-lh-S_intermedius_primus-Jensen -1 ctx-lh-S_intraparietal-and_Parietal_transverse -1 ctx-lh-S_occipital_anterior -1 ctx-lh-S_occipital_middle_and_Lunatus -1 ctx-lh-S_occipital_superior_and_transversalis -1 ctx-lh-S_occipito-temporal_lateral -1 ctx-lh-S_occipito-temporal_medial_and_S_Lingual -1 ctx-lh-S_orbital-H_shapped -1 ctx-lh-S_orbital_lateral -1 ctx-lh-S_orbital_medial-Or_olfactory -1 ctx-lh-S_paracentral -1 ctx-lh-S_parieto_occipital -1 ctx-lh-S_pericallosal -1 ctx-lh-S_postcentral -1 ctx-lh-S_precentral-Inferior-part -1 ctx-lh-S_precentral-Superior-part -1 ctx-lh-S_subcentral_ant -1 ctx-lh-S_subcentral_post -1 ctx-lh-S_suborbital -1 ctx-lh-S_subparietal -1 ctx-lh-S_supracingulate -1 ctx-lh-S_temporal_inferior -1 ctx-lh-S_temporal_superior -1 ctx-lh-S_temporal_transverse - -1 ctx-rh-Unknown -1 ctx-rh-Corpus_callosum -1 ctx-rh-G_and_S_Insula_ONLY_AVERAGE -1 ctx-rh-G_cingulate-Isthmus -1 ctx-rh-G_cingulate-Main_part -1 ctx-rh-G_cuneus -1 ctx-rh-G_frontal_inf-Opercular_part -1 ctx-rh-G_frontal_inf-Orbital_part -1 ctx-rh-G_frontal_inf-Triangular_part -1 ctx-rh-G_frontal_middle -1 ctx-rh-G_frontal_superior -1 ctx-rh-G_frontomarginal -1 ctx-rh-G_insular_long -1 ctx-rh-G_insular_short -1 ctx-rh-G_and_S_occipital_inferior -1 ctx-rh-G_occipital_middle -1 ctx-rh-G_occipital_superior -1 ctx-rh-G_occipit-temp_lat-Or_fusiform -1 ctx-rh-G_occipit-temp_med-Lingual_part -1 ctx-rh-G_occipit-temp_med-Parahippocampal_part -1 ctx-rh-G_orbital -1 ctx-rh-G_paracentral -1 ctx-rh-G_parietal_inferior-Angular_part -1 ctx-rh-G_parietal_inferior-Supramarginal_part -1 ctx-rh-G_parietal_superior -1 ctx-rh-G_postcentral -1 ctx-rh-G_precentral -1 ctx-rh-G_precuneus -1 ctx-rh-G_rectus -1 ctx-rh-G_subcallosal -1 ctx-rh-G_subcentral -1 ctx-rh-G_temporal_inferior -1 ctx-rh-G_temporal_middle -1 ctx-rh-G_temp_sup-G_temp_transv_and_interm_S -1 ctx-rh-G_temp_sup-Lateral_aspect -1 ctx-rh-G_temp_sup-Planum_polare -1 ctx-rh-G_temp_sup-Planum_tempolare -1 ctx-rh-G_and_S_transverse_frontopolar -1 ctx-rh-Lat_Fissure-ant_sgt-ramus_horizontal -1 ctx-rh-Lat_Fissure-ant_sgt-ramus_vertical -1 ctx-rh-Lat_Fissure-post_sgt -1 ctx-rh-Medial_wall -1 ctx-rh-Pole_occipital -1 ctx-rh-Pole_temporal -1 ctx-rh-S_calcarine -1 ctx-rh-S_central -1 ctx-rh-S_central_insula -1 ctx-rh-S_cingulate-Main_part_and_Intracingulate -1 ctx-rh-S_cingulate-Marginalis_part -1 ctx-rh-S_circular_insula_anterior -1 ctx-rh-S_circular_insula_inferior -1 ctx-rh-S_circular_insula_superior -1 ctx-rh-S_collateral_transverse_ant -1 ctx-rh-S_collateral_transverse_post -1 ctx-rh-S_frontal_inferior -1 ctx-rh-S_frontal_middle -1 ctx-rh-S_frontal_superior -1 ctx-rh-S_frontomarginal -1 ctx-rh-S_intermedius_primus-Jensen -1 ctx-rh-S_intraparietal-and_Parietal_transverse -1 ctx-rh-S_occipital_anterior -1 ctx-rh-S_occipital_middle_and_Lunatus -1 ctx-rh-S_occipital_superior_and_transversalis -1 ctx-rh-S_occipito-temporal_lateral -1 ctx-rh-S_occipito-temporal_medial_and_S_Lingual -1 ctx-rh-S_orbital-H_shapped -1 ctx-rh-S_orbital_lateral -1 ctx-rh-S_orbital_medial-Or_olfactory -1 ctx-rh-S_paracentral -1 ctx-rh-S_parieto_occipital -1 ctx-rh-S_pericallosal -1 ctx-rh-S_postcentral -1 ctx-rh-S_precentral-Inferior-part -1 ctx-rh-S_precentral-Superior-part -1 ctx-rh-S_subcentral_ant -1 ctx-rh-S_subcentral_post -1 ctx-rh-S_suborbital -1 ctx-rh-S_subparietal -1 ctx-rh-S_supracingulate -1 ctx-rh-S_temporal_inferior -1 ctx-rh-S_temporal_superior -1 ctx-rh-S_temporal_transverse - -1 ctx-rh-G_cingulate-caudal_ACC -1 ctx-rh-G_cingulate-rostral_ACC -1 ctx-rh-G_cingulate-posterior -1 ctx-rh-S_cingulate-caudal_ACC -1 ctx-rh-S_cingulate-rostral_ACC -1 ctx-rh-S_cingulate-posterior -1 ctx-rh-S_pericallosal-caudal -1 ctx-rh-S_pericallosal-rostral -1 ctx-rh-S_pericallosal-posterior - -3 wm-lh-Unknown -3 wm-lh-Corpus_callosum -3 wm-lh-G_and_S_Insula_ONLY_AVERAGE -3 wm-lh-G_cingulate-Isthmus -3 wm-lh-G_cingulate-Main_part -3 wm-lh-G_cuneus -3 wm-lh-G_frontal_inf-Opercular_part -3 wm-lh-G_frontal_inf-Orbital_part -3 wm-lh-G_frontal_inf-Triangular_part -3 wm-lh-G_frontal_middle -3 wm-lh-G_frontal_superior -3 wm-lh-G_frontomarginal -3 wm-lh-G_insular_long -3 wm-lh-G_insular_short -3 wm-lh-G_and_S_occipital_inferior -3 wm-lh-G_occipital_middle -3 wm-lh-G_occipital_superior -3 wm-lh-G_occipit-temp_lat-Or_fusiform -3 wm-lh-G_occipit-temp_med-Lingual_part -3 wm-lh-G_occipit-temp_med-Parahippocampal_part -3 wm-lh-G_orbital -3 wm-lh-G_paracentral -3 wm-lh-G_parietal_inferior-Angular_part -3 wm-lh-G_parietal_inferior-Supramarginal_part -3 wm-lh-G_parietal_superior -3 wm-lh-G_postcentral -3 wm-lh-G_precentral -3 wm-lh-G_precuneus -3 wm-lh-G_rectus -3 wm-lh-G_subcallosal -3 wm-lh-G_subcentral -3 wm-lh-G_temporal_inferior -3 wm-lh-G_temporal_middle -3 wm-lh-G_temp_sup-G_temp_transv_and_interm_S -3 wm-lh-G_temp_sup-Lateral_aspect -3 wm-lh-G_temp_sup-Planum_polare -3 wm-lh-G_temp_sup-Planum_tempolare -3 wm-lh-G_and_S_transverse_frontopolar -3 wm-lh-Lat_Fissure-ant_sgt-ramus_horizontal -3 wm-lh-Lat_Fissure-ant_sgt-ramus_vertical -3 wm-lh-Lat_Fissure-post_sgt -3 wm-lh-Medial_wall -3 wm-lh-Pole_occipital -3 wm-lh-Pole_temporal -3 wm-lh-S_calcarine -3 wm-lh-S_central -3 wm-lh-S_central_insula -3 wm-lh-S_cingulate-Main_part_and_Intracingulate -3 wm-lh-S_cingulate-Marginalis_part -3 wm-lh-S_circular_insula_anterior -3 wm-lh-S_circular_insula_inferior -3 wm-lh-S_circular_insula_superior -3 wm-lh-S_collateral_transverse_ant -3 wm-lh-S_collateral_transverse_post -3 wm-lh-S_frontal_inferior -3 wm-lh-S_frontal_middle -3 wm-lh-S_frontal_superior -3 wm-lh-S_frontomarginal -3 wm-lh-S_intermedius_primus-Jensen -3 wm-lh-S_intraparietal-and_Parietal_transverse -3 wm-lh-S_occipital_anterior -3 wm-lh-S_occipital_middle_and_Lunatus -3 wm-lh-S_occipital_superior_and_transversalis -3 wm-lh-S_occipito-temporal_lateral -3 wm-lh-S_occipito-temporal_medial_and_S_Lingual -3 wm-lh-S_orbital-H_shapped -3 wm-lh-S_orbital_lateral -3 wm-lh-S_orbital_medial-Or_olfactory -3 wm-lh-S_paracentral -3 wm-lh-S_parieto_occipital -3 wm-lh-S_pericallosal -3 wm-lh-S_postcentral -3 wm-lh-S_precentral-Inferior-part -3 wm-lh-S_precentral-Superior-part -3 wm-lh-S_subcentral_ant -3 wm-lh-S_subcentral_post -3 wm-lh-S_suborbital -3 wm-lh-S_subparietal -3 wm-lh-S_supracingulate -3 wm-lh-S_temporal_inferior -3 wm-lh-S_temporal_superior -3 wm-lh-S_temporal_transverse - -3 wm-rh-Unknown -3 wm-rh-Corpus_callosum -3 wm-rh-G_and_S_Insula_ONLY_AVERAGE -3 wm-rh-G_cingulate-Isthmus -3 wm-rh-G_cingulate-Main_part -3 wm-rh-G_cuneus -3 wm-rh-G_frontal_inf-Opercular_part -3 wm-rh-G_frontal_inf-Orbital_part -3 wm-rh-G_frontal_inf-Triangular_part -3 wm-rh-G_frontal_middle -3 wm-rh-G_frontal_superior -3 wm-rh-G_frontomarginal -3 wm-rh-G_insular_long -3 wm-rh-G_insular_short -3 wm-rh-G_and_S_occipital_inferior -3 wm-rh-G_occipital_middle -3 wm-rh-G_occipital_superior -3 wm-rh-G_occipit-temp_lat-Or_fusiform -3 wm-rh-G_occipit-temp_med-Lingual_part -3 wm-rh-G_occipit-temp_med-Parahippocampal_part -3 wm-rh-G_orbital -3 wm-rh-G_paracentral -3 wm-rh-G_parietal_inferior-Angular_part -3 wm-rh-G_parietal_inferior-Supramarginal_part -3 wm-rh-G_parietal_superior -3 wm-rh-G_postcentral -3 wm-rh-G_precentral -3 wm-rh-G_precuneus -3 wm-rh-G_rectus -3 wm-rh-G_subcallosal -3 wm-rh-G_subcentral -3 wm-rh-G_temporal_inferior -3 wm-rh-G_temporal_middle -3 wm-rh-G_temp_sup-G_temp_transv_and_interm_S -3 wm-rh-G_temp_sup-Lateral_aspect -3 wm-rh-G_temp_sup-Planum_polare -3 wm-rh-G_temp_sup-Planum_tempolare -3 wm-rh-G_and_S_transverse_frontopolar -3 wm-rh-Lat_Fissure-ant_sgt-ramus_horizontal -3 wm-rh-Lat_Fissure-ant_sgt-ramus_vertical -3 wm-rh-Lat_Fissure-post_sgt -3 wm-rh-Medial_wall -3 wm-rh-Pole_occipital -3 wm-rh-Pole_temporal -3 wm-rh-S_calcarine -3 wm-rh-S_central -3 wm-rh-S_central_insula -3 wm-rh-S_cingulate-Main_part_and_Intracingulate -3 wm-rh-S_cingulate-Marginalis_part -3 wm-rh-S_circular_insula_anterior -3 wm-rh-S_circular_insula_inferior -3 wm-rh-S_circular_insula_superior -3 wm-rh-S_collateral_transverse_ant -3 wm-rh-S_collateral_transverse_post -3 wm-rh-S_frontal_inferior -3 wm-rh-S_frontal_middle -3 wm-rh-S_frontal_superior -3 wm-rh-S_frontomarginal -3 wm-rh-S_intermedius_primus-Jensen -3 wm-rh-S_intraparietal-and_Parietal_transverse -3 wm-rh-S_occipital_anterior -3 wm-rh-S_occipital_middle_and_Lunatus -3 wm-rh-S_occipital_superior_and_transversalis -3 wm-rh-S_occipito-temporal_lateral -3 wm-rh-S_occipito-temporal_medial_and_S_Lingual -3 wm-rh-S_orbital-H_shapped -3 wm-rh-S_orbital_lateral -3 wm-rh-S_orbital_medial-Or_olfactory -3 wm-rh-S_paracentral -3 wm-rh-S_parieto_occipital -3 wm-rh-S_pericallosal -3 wm-rh-S_postcentral -3 wm-rh-S_precentral-Inferior-part -3 wm-rh-S_precentral-Superior-part -3 wm-rh-S_subcentral_ant -3 wm-rh-S_subcentral_post -3 wm-rh-S_suborbital -3 wm-rh-S_subparietal -3 wm-rh-S_supracingulate -3 wm-rh-S_temporal_inferior -3 wm-rh-S_temporal_superior -3 wm-rh-S_temporal_transverse - -3 Left-UnsegmentedWhiteMatter -3 Right-UnsegmentedWhiteMatter - -1 ctx_lh_Unknown -1 ctx_lh_G_and_S_frontomargin -1 ctx_lh_G_and_S_occipital_inf -1 ctx_lh_G_and_S_paracentral -1 ctx_lh_G_and_S_subcentral -1 ctx_lh_G_and_S_transv_frontopol -1 ctx_lh_G_and_S_cingul-Ant -1 ctx_lh_G_and_S_cingul-Mid-Ant -1 ctx_lh_G_and_S_cingul-Mid-Post -1 ctx_lh_G_cingul-Post-dorsal -1 ctx_lh_G_cingul-Post-ventral -1 ctx_lh_G_cuneus -1 ctx_lh_G_front_inf-Opercular -1 ctx_lh_G_front_inf-Orbital -1 ctx_lh_G_front_inf-Triangul -1 ctx_lh_G_front_middle -1 ctx_lh_G_front_sup -1 ctx_lh_G_Ins_lg_and_S_cent_ins -1 ctx_lh_G_insular_short -1 ctx_lh_G_occipital_middle -1 ctx_lh_G_occipital_sup -1 ctx_lh_G_oc-temp_lat-fusifor -1 ctx_lh_G_oc-temp_med-Lingual -1 ctx_lh_G_oc-temp_med-Parahip -1 ctx_lh_G_orbital -1 ctx_lh_G_pariet_inf-Angular -1 ctx_lh_G_pariet_inf-Supramar -1 ctx_lh_G_parietal_sup -1 ctx_lh_G_postcentral -1 ctx_lh_G_precentral -1 ctx_lh_G_precuneus -1 ctx_lh_G_rectus -1 ctx_lh_G_subcallosal -1 ctx_lh_G_temp_sup-G_T_transv -1 ctx_lh_G_temp_sup-Lateral -1 ctx_lh_G_temp_sup-Plan_polar -1 ctx_lh_G_temp_sup-Plan_tempo -1 ctx_lh_G_temporal_inf -1 ctx_lh_G_temporal_middle -1 ctx_lh_Lat_Fis-ant-Horizont -1 ctx_lh_Lat_Fis-ant-Vertical -1 ctx_lh_Lat_Fis-post -1 ctx_lh_Medial_wall -1 ctx_lh_Pole_occipital -1 ctx_lh_Pole_temporal -1 ctx_lh_S_calcarine -1 ctx_lh_S_central -1 ctx_lh_S_cingul-Marginalis -1 ctx_lh_S_circular_insula_ant -1 ctx_lh_S_circular_insula_inf -1 ctx_lh_S_circular_insula_sup -1 ctx_lh_S_collat_transv_ant -1 ctx_lh_S_collat_transv_post -1 ctx_lh_S_front_inf -1 ctx_lh_S_front_middle -1 ctx_lh_S_front_sup -1 ctx_lh_S_interm_prim-Jensen -1 ctx_lh_S_intrapariet_and_P_trans -1 ctx_lh_S_oc_middle_and_Lunatus -1 ctx_lh_S_oc_sup_and_transversal -1 ctx_lh_S_occipital_ant -1 ctx_lh_S_oc-temp_lat -1 ctx_lh_S_oc-temp_med_and_Lingual -1 ctx_lh_S_orbital_lateral -1 ctx_lh_S_orbital_med-olfact -1 ctx_lh_S_orbital-H_Shaped -1 ctx_lh_S_parieto_occipital -1 ctx_lh_S_pericallosal -1 ctx_lh_S_postcentral -1 ctx_lh_S_precentral-inf-part -1 ctx_lh_S_precentral-sup-part -1 ctx_lh_S_suborbital -1 ctx_lh_S_subparietal -1 ctx_lh_S_temporal_inf -1 ctx_lh_S_temporal_sup -1 ctx_lh_S_temporal_transverse - -1 ctx_rh_Unknown -1 ctx_rh_G_and_S_frontomargin -1 ctx_rh_G_and_S_occipital_inf -1 ctx_rh_G_and_S_paracentral -1 ctx_rh_G_and_S_subcentral -1 ctx_rh_G_and_S_transv_frontopol -1 ctx_rh_G_and_S_cingul-Ant -1 ctx_rh_G_and_S_cingul-Mid-Ant -1 ctx_rh_G_and_S_cingul-Mid-Post -1 ctx_rh_G_cingul-Post-dorsal -1 ctx_rh_G_cingul-Post-ventral -1 ctx_rh_G_cuneus -1 ctx_rh_G_front_inf-Opercular -1 ctx_rh_G_front_inf-Orbital -1 ctx_rh_G_front_inf-Triangul -1 ctx_rh_G_front_middle -1 ctx_rh_G_front_sup -1 ctx_rh_G_Ins_lg_and_S_cent_ins -1 ctx_rh_G_insular_short -1 ctx_rh_G_occipital_middle -1 ctx_rh_G_occipital_sup -1 ctx_rh_G_oc-temp_lat-fusifor -1 ctx_rh_G_oc-temp_med-Lingual -1 ctx_rh_G_oc-temp_med-Parahip -1 ctx_rh_G_orbital -1 ctx_rh_G_pariet_inf-Angular -1 ctx_rh_G_pariet_inf-Supramar -1 ctx_rh_G_parietal_sup -1 ctx_rh_G_postcentral -1 ctx_rh_G_precentral -1 ctx_rh_G_precuneus -1 ctx_rh_G_rectus -1 ctx_rh_G_subcallosal -1 ctx_rh_G_temp_sup-G_T_transv -1 ctx_rh_G_temp_sup-Lateral -1 ctx_rh_G_temp_sup-Plan_polar -1 ctx_rh_G_temp_sup-Plan_tempo -1 ctx_rh_G_temporal_inf -1 ctx_rh_G_temporal_middle -1 ctx_rh_Lat_Fis-ant-Horizont -1 ctx_rh_Lat_Fis-ant-Vertical -1 ctx_rh_Lat_Fis-post -1 ctx_rh_Medial_wall -1 ctx_rh_Pole_occipital -1 ctx_rh_Pole_temporal -1 ctx_rh_S_calcarine -1 ctx_rh_S_central -1 ctx_rh_S_cingul-Marginalis -1 ctx_rh_S_circular_insula_ant -1 ctx_rh_S_circular_insula_inf -1 ctx_rh_S_circular_insula_sup -1 ctx_rh_S_collat_transv_ant -1 ctx_rh_S_collat_transv_post -1 ctx_rh_S_front_inf -1 ctx_rh_S_front_middle -1 ctx_rh_S_front_sup -1 ctx_rh_S_interm_prim-Jensen -1 ctx_rh_S_intrapariet_and_P_trans -1 ctx_rh_S_oc_middle_and_Lunatus -1 ctx_rh_S_oc_sup_and_transversal -1 ctx_rh_S_occipital_ant -1 ctx_rh_S_oc-temp_lat -1 ctx_rh_S_oc-temp_med_and_Lingual -1 ctx_rh_S_orbital_lateral -1 ctx_rh_S_orbital_med-olfact -1 ctx_rh_S_orbital-H_Shaped -1 ctx_rh_S_parieto_occipital -1 ctx_rh_S_pericallosal -1 ctx_rh_S_postcentral -1 ctx_rh_S_precentral-inf-part -1 ctx_rh_S_precentral-sup-part -1 ctx_rh_S_suborbital -1 ctx_rh_S_subparietal -1 ctx_rh_S_temporal_inf -1 ctx_rh_S_temporal_sup -1 ctx_rh_S_temporal_transverse - -3 wm_lh_Unknown -3 wm_lh_G_and_S_frontomargin -3 wm_lh_G_and_S_occipital_inf -3 wm_lh_G_and_S_paracentral -3 wm_lh_G_and_S_subcentral -3 wm_lh_G_and_S_transv_frontopol -3 wm_lh_G_and_S_cingul-Ant -3 wm_lh_G_and_S_cingul-Mid-Ant -3 wm_lh_G_and_S_cingul-Mid-Post -3 wm_lh_G_cingul-Post-dorsal -3 wm_lh_G_cingul-Post-ventral -3 wm_lh_G_cuneus -3 wm_lh_G_front_inf-Opercular -3 wm_lh_G_front_inf-Orbital -3 wm_lh_G_front_inf-Triangul -3 wm_lh_G_front_middle -3 wm_lh_G_front_sup -3 wm_lh_G_Ins_lg_and_S_cent_ins -3 wm_lh_G_insular_short -3 wm_lh_G_occipital_middle -3 wm_lh_G_occipital_sup -3 wm_lh_G_oc-temp_lat-fusifor -3 wm_lh_G_oc-temp_med-Lingual -3 wm_lh_G_oc-temp_med-Parahip -3 wm_lh_G_orbital -3 wm_lh_G_pariet_inf-Angular -3 wm_lh_G_pariet_inf-Supramar -3 wm_lh_G_parietal_sup -3 wm_lh_G_postcentral -3 wm_lh_G_precentral -3 wm_lh_G_precuneus -3 wm_lh_G_rectus -3 wm_lh_G_subcallosal -3 wm_lh_G_temp_sup-G_T_transv -3 wm_lh_G_temp_sup-Lateral -3 wm_lh_G_temp_sup-Plan_polar -3 wm_lh_G_temp_sup-Plan_tempo -3 wm_lh_G_temporal_inf -3 wm_lh_G_temporal_middle -3 wm_lh_Lat_Fis-ant-Horizont -3 wm_lh_Lat_Fis-ant-Vertical -3 wm_lh_Lat_Fis-post -3 wm_lh_Medial_wall -3 wm_lh_Pole_occipital -3 wm_lh_Pole_temporal -3 wm_lh_S_calcarine -3 wm_lh_S_central -3 wm_lh_S_cingul-Marginalis -3 wm_lh_S_circular_insula_ant -3 wm_lh_S_circular_insula_inf -3 wm_lh_S_circular_insula_sup -3 wm_lh_S_collat_transv_ant -3 wm_lh_S_collat_transv_post -3 wm_lh_S_front_inf -3 wm_lh_S_front_middle -3 wm_lh_S_front_sup -3 wm_lh_S_interm_prim-Jensen -3 wm_lh_S_intrapariet_and_P_trans -3 wm_lh_S_oc_middle_and_Lunatus -3 wm_lh_S_oc_sup_and_transversal -3 wm_lh_S_occipital_ant -3 wm_lh_S_oc-temp_lat -3 wm_lh_S_oc-temp_med_and_Lingual -3 wm_lh_S_orbital_lateral -3 wm_lh_S_orbital_med-olfact -3 wm_lh_S_orbital-H_Shaped -3 wm_lh_S_parieto_occipital -3 wm_lh_S_pericallosal -3 wm_lh_S_postcentral -3 wm_lh_S_precentral-inf-part -3 wm_lh_S_precentral-sup-part -3 wm_lh_S_suborbital -3 wm_lh_S_subparietal -3 wm_lh_S_temporal_inf -3 wm_lh_S_temporal_sup -3 wm_lh_S_temporal_transverse - -3 wm_rh_Unknown -3 wm_rh_G_and_S_frontomargin -3 wm_rh_G_and_S_occipital_inf -3 wm_rh_G_and_S_paracentral -3 wm_rh_G_and_S_subcentral -3 wm_rh_G_and_S_transv_frontopol -3 wm_rh_G_and_S_cingul-Ant -3 wm_rh_G_and_S_cingul-Mid-Ant -3 wm_rh_G_and_S_cingul-Mid-Post -3 wm_rh_G_cingul-Post-dorsal -3 wm_rh_G_cingul-Post-ventral -3 wm_rh_G_cuneus -3 wm_rh_G_front_inf-Opercular -3 wm_rh_G_front_inf-Orbital -3 wm_rh_G_front_inf-Triangul -3 wm_rh_G_front_middle -3 wm_rh_G_front_sup -3 wm_rh_G_Ins_lg_and_S_cent_ins -3 wm_rh_G_insular_short -3 wm_rh_G_occipital_middle -3 wm_rh_G_occipital_sup -3 wm_rh_G_oc-temp_lat-fusifor -3 wm_rh_G_oc-temp_med-Lingual -3 wm_rh_G_oc-temp_med-Parahip -3 wm_rh_G_orbital -3 wm_rh_G_pariet_inf-Angular -3 wm_rh_G_pariet_inf-Supramar -3 wm_rh_G_parietal_sup -3 wm_rh_G_postcentral -3 wm_rh_G_precentral -3 wm_rh_G_precuneus -3 wm_rh_G_rectus -3 wm_rh_G_subcallosal -3 wm_rh_G_temp_sup-G_T_transv -3 wm_rh_G_temp_sup-Lateral -3 wm_rh_G_temp_sup-Plan_polar -3 wm_rh_G_temp_sup-Plan_tempo -3 wm_rh_G_temporal_inf -3 wm_rh_G_temporal_middle -3 wm_rh_Lat_Fis-ant-Horizont -3 wm_rh_Lat_Fis-ant-Vertical -3 wm_rh_Lat_Fis-post -3 wm_rh_Medial_wall -3 wm_rh_Pole_occipital -3 wm_rh_Pole_temporal -3 wm_rh_S_calcarine -3 wm_rh_S_central -3 wm_rh_S_cingul-Marginalis -3 wm_rh_S_circular_insula_ant -3 wm_rh_S_circular_insula_inf -3 wm_rh_S_circular_insula_sup -3 wm_rh_S_collat_transv_ant -3 wm_rh_S_collat_transv_post -3 wm_rh_S_front_inf -3 wm_rh_S_front_middle -3 wm_rh_S_front_sup -3 wm_rh_S_interm_prim-Jensen -3 wm_rh_S_intrapariet_and_P_trans -3 wm_rh_S_oc_middle_and_Lunatus -3 wm_rh_S_oc_sup_and_transversal -3 wm_rh_S_occipital_ant -3 wm_rh_S_oc-temp_lat -3 wm_rh_S_oc-temp_med_and_Lingual -3 wm_rh_S_orbital_lateral -3 wm_rh_S_orbital_med-olfact -3 wm_rh_S_orbital-H_Shaped -3 wm_rh_S_parieto_occipital -3 wm_rh_S_pericallosal -3 wm_rh_S_postcentral -3 wm_rh_S_precentral-inf-part -3 wm_rh_S_precentral-sup-part -3 wm_rh_S_suborbital -3 wm_rh_S_subparietal -3 wm_rh_S_temporal_inf -3 wm_rh_S_temporal_sup -3 wm_rh_S_temporal_transverse - diff --git a/share/mrtrix3/_5ttgen/FreeSurfer2ACT_sgm_amyg_hipp.txt b/share/mrtrix3/_5ttgen/FreeSurfer2ACT_sgm_amyg_hipp.txt deleted file mode 100644 index 0aa6d1bf43..0000000000 --- a/share/mrtrix3/_5ttgen/FreeSurfer2ACT_sgm_amyg_hipp.txt +++ /dev/null @@ -1,913 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Config file to convert FreeSurfer volumetric segmentation outputs -# into a format that can be used in Anatomically-Constrained Tractography - -# This file works in an identical way to the file FreeSurfer2ACT.txt. The only -# difference is that in this configuration, the amygdalae and hippocampi -# will be labeled as sub-cortical grey matter, rather than cortical grey -# matter. - - - -3 Left-Cerebral-White-Matter -1 Left-Cerebral-Cortex -4 Left-Lateral-Ventricle -4 Left-Inf-Lat-Vent -3 Left-Cerebellum-White-Matter -1 Left-Cerebellum-Cortex -2 Left-Thalamus -2 Left-Thalamus-Proper -2 Left-Caudate -2 Left-Putamen -2 Left-Pallidum -4 3rd-Ventricle -4 4th-Ventricle -3 Brain-Stem -2 Left-Hippocampus -2 Left-Amygdala -4 CSF -2 Left-Lesion -2 Left-Accumbens-area -3 Left-VentralDC -4 Left-vessel -4 Left-choroid-plexus - -3 Right-Cerebral-White-Matter -1 Right-Cerebral-Cortex -4 Right-Lateral-Ventricle -4 Right-Inf-Lat-Vent -3 Right-Cerebellum-White-Matter -1 Right-Cerebellum-Cortex -2 Right-Thalamus -2 Right-Thalamus-Proper -2 Right-Caudate -2 Right-Putamen -2 Right-Pallidum -2 Right-Hippocampus -2 Right-Amygdala -2 Right-Lesion -2 Right-Accumbens-area -3 Right-VentralDC -4 Right-vessel -4 Right-choroid-plexus - -4 5th-Ventricle -4 Left-Lateral-Ventricles -4 Right-Lateral-Ventricles -5 WM-hypointensities -5 Left-WM-hypointensities -5 Right-WM-hypointensities -5 non-WM-hypointensities -5 Left-non-WM-hypointensities -5 Right-non-WM-hypointensities -3 Optic-Chiasm -3 Corpus_Callosum -1 Left-Amygdala-Anterior -1 Right-Amygdala-Anterior - -3 Left-IntCapsule-Ant -3 Right-IntCapsule-Ant -3 Left-IntCapsule-Pos -3 Right-IntCapsule-Pos - -3 brainstem -3 DCG -3 Vermis -3 Midbrain -3 Pons -3 Medulla - -5 Left-Cortical-Dysplasia -5 Right-Cortical-Dysplasia - -3 Fornix -3 CC_Posterior -3 CC_Mid_Posterior -3 CC_Central -3 CC_Mid_Anterior -3 CC_Anterior - -1 ctx-lh-unknown -1 ctx-lh-bankssts -1 ctx-lh-caudalanteriorcingulate -1 ctx-lh-caudalmiddlefrontal -1 ctx-lh-corpuscallosum -1 ctx-lh-cuneus -1 ctx-lh-entorhinal -1 ctx-lh-fusiform -1 ctx-lh-inferiorparietal -1 ctx-lh-inferiortemporal -1 ctx-lh-isthmuscingulate -1 ctx-lh-lateraloccipital -1 ctx-lh-lateralorbitofrontal -1 ctx-lh-lingual -1 ctx-lh-medialorbitofrontal -1 ctx-lh-middletemporal -1 ctx-lh-parahippocampal -1 ctx-lh-paracentral -1 ctx-lh-parsopercularis -1 ctx-lh-parsorbitalis -1 ctx-lh-parstriangularis -1 ctx-lh-pericalcarine -1 ctx-lh-postcentral -1 ctx-lh-posteriorcingulate -1 ctx-lh-precentral -1 ctx-lh-precuneus -1 ctx-lh-rostralanteriorcingulate -1 ctx-lh-rostralmiddlefrontal -1 ctx-lh-superiorfrontal -1 ctx-lh-superiorparietal -1 ctx-lh-superiortemporal -1 ctx-lh-supramarginal -1 ctx-lh-frontalpole -1 ctx-lh-temporalpole -1 ctx-lh-transversetemporal -1 ctx-lh-insula - -1 ctx-rh-unknown -1 ctx-rh-bankssts -1 ctx-rh-caudalanteriorcingulate -1 ctx-rh-caudalmiddlefrontal -1 ctx-rh-corpuscallosum -1 ctx-rh-cuneus -1 ctx-rh-entorhinal -1 ctx-rh-fusiform -1 ctx-rh-inferiorparietal -1 ctx-rh-inferiortemporal -1 ctx-rh-isthmuscingulate -1 ctx-rh-lateraloccipital -1 ctx-rh-lateralorbitofrontal -1 ctx-rh-lingual -1 ctx-rh-medialorbitofrontal -1 ctx-rh-middletemporal -1 ctx-rh-parahippocampal -1 ctx-rh-paracentral -1 ctx-rh-parsopercularis -1 ctx-rh-parsorbitalis -1 ctx-rh-parstriangularis -1 ctx-rh-pericalcarine -1 ctx-rh-postcentral -1 ctx-rh-posteriorcingulate -1 ctx-rh-precentral -1 ctx-rh-precuneus -1 ctx-rh-rostralanteriorcingulate -1 ctx-rh-rostralmiddlefrontal -1 ctx-rh-superiorfrontal -1 ctx-rh-superiorparietal -1 ctx-rh-superiortemporal -1 ctx-rh-supramarginal -1 ctx-rh-frontalpole -1 ctx-rh-temporalpole -1 ctx-rh-transversetemporal -1 ctx-rh-insula - -3 wm-lh-unknown -3 wm-lh-bankssts -3 wm-lh-caudalanteriorcingulate -3 wm-lh-caudalmiddlefrontal -3 wm-lh-corpuscallosum -3 wm-lh-cuneus -3 wm-lh-entorhinal -3 wm-lh-fusiform -3 wm-lh-inferiorparietal -3 wm-lh-inferiortemporal -3 wm-lh-isthmuscingulate -3 wm-lh-lateraloccipital -3 wm-lh-lateralorbitofrontal -3 wm-lh-lingual -3 wm-lh-medialorbitofrontal -3 wm-lh-middletemporal -3 wm-lh-parahippocampal -3 wm-lh-paracentral -3 wm-lh-parsopercularis -3 wm-lh-parsorbitalis -3 wm-lh-parstriangularis -3 wm-lh-pericalcarine -3 wm-lh-postcentral -3 wm-lh-posteriorcingulate -3 wm-lh-precentral -3 wm-lh-precuneus -3 wm-lh-rostralanteriorcingulate -3 wm-lh-rostralmiddlefrontal -3 wm-lh-superiorfrontal -3 wm-lh-superiorparietal -3 wm-lh-superiortemporal -3 wm-lh-supramarginal -3 wm-lh-frontalpole -3 wm-lh-temporalpole -3 wm-lh-transversetemporal -3 wm-lh-insula - -3 wm-rh-unknown -3 wm-rh-bankssts -3 wm-rh-caudalanteriorcingulate -3 wm-rh-caudalmiddlefrontal -3 wm-rh-corpuscallosum -3 wm-rh-cuneus -3 wm-rh-entorhinal -3 wm-rh-fusiform -3 wm-rh-inferiorparietal -3 wm-rh-inferiortemporal -3 wm-rh-isthmuscingulate -3 wm-rh-lateraloccipital -3 wm-rh-lateralorbitofrontal -3 wm-rh-lingual -3 wm-rh-medialorbitofrontal -3 wm-rh-middletemporal -3 wm-rh-parahippocampal -3 wm-rh-paracentral -3 wm-rh-parsopercularis -3 wm-rh-parsorbitalis -3 wm-rh-parstriangularis -3 wm-rh-pericalcarine -3 wm-rh-postcentral -3 wm-rh-posteriorcingulate -3 wm-rh-precentral -3 wm-rh-precuneus -3 wm-rh-rostralanteriorcingulate -3 wm-rh-rostralmiddlefrontal -3 wm-rh-superiorfrontal -3 wm-rh-superiorparietal -3 wm-rh-superiortemporal -3 wm-rh-supramarginal -3 wm-rh-frontalpole -3 wm-rh-temporalpole -3 wm-rh-transversetemporal -3 wm-rh-insula - -1 ctx-lh-Unknown -1 ctx-lh-Corpus_callosum -1 ctx-lh-G_and_S_Insula_ONLY_AVERAGE -1 ctx-lh-G_cingulate-Isthmus -1 ctx-lh-G_cingulate-Main_part -1 ctx-lh-G_cingulate-caudal_ACC -1 ctx-lh-G_cingulate-rostral_ACC -1 ctx-lh-G_cingulate-posterior -1 ctx-lh-S_cingulate-caudal_ACC -1 ctx-lh-S_cingulate-rostral_ACC -1 ctx-lh-S_cingulate-posterior -1 ctx-lh-S_pericallosal-caudal -1 ctx-lh-S_pericallosal-rostral -1 ctx-lh-S_pericallosal-posterior -1 ctx-lh-G_cuneus -1 ctx-lh-G_frontal_inf-Opercular_part -1 ctx-lh-G_frontal_inf-Orbital_part -1 ctx-lh-G_frontal_inf-Triangular_part -1 ctx-lh-G_frontal_middle -1 ctx-lh-G_frontal_superior -1 ctx-lh-G_frontomarginal -1 ctx-lh-G_insular_long -1 ctx-lh-G_insular_short -1 ctx-lh-G_and_S_occipital_inferior -1 ctx-lh-G_occipital_middle -1 ctx-lh-G_occipital_superior -1 ctx-lh-G_occipit-temp_lat-Or_fusiform -1 ctx-lh-G_occipit-temp_med-Lingual_part -1 ctx-lh-G_occipit-temp_med-Parahippocampal_part -1 ctx-lh-G_orbital -1 ctx-lh-G_paracentral -1 ctx-lh-G_parietal_inferior-Angular_part -1 ctx-lh-G_parietal_inferior-Supramarginal_part -1 ctx-lh-G_parietal_superior -1 ctx-lh-G_postcentral -1 ctx-lh-G_precentral -1 ctx-lh-G_precuneus -1 ctx-lh-G_rectus -1 ctx-lh-G_subcallosal -1 ctx-lh-G_subcentral -1 ctx-lh-G_temporal_inferior -1 ctx-lh-G_temporal_middle -1 ctx-lh-G_temp_sup-G_temp_transv_and_interm_S -1 ctx-lh-G_temp_sup-Lateral_aspect -1 ctx-lh-G_temp_sup-Planum_polare -1 ctx-lh-G_temp_sup-Planum_tempolare -1 ctx-lh-G_and_S_transverse_frontopolar -1 ctx-lh-Lat_Fissure-ant_sgt-ramus_horizontal -1 ctx-lh-Lat_Fissure-ant_sgt-ramus_vertical -1 ctx-lh-Lat_Fissure-post_sgt -1 ctx-lh-Medial_wall -1 ctx-lh-Pole_occipital -1 ctx-lh-Pole_temporal -1 ctx-lh-S_calcarine -1 ctx-lh-S_central -1 ctx-lh-S_central_insula -1 ctx-lh-S_cingulate-Main_part_and_Intracingulate -1 ctx-lh-S_cingulate-Marginalis_part -1 ctx-lh-S_circular_insula_anterior -1 ctx-lh-S_circular_insula_inferior -1 ctx-lh-S_circular_insula_superior -1 ctx-lh-S_collateral_transverse_ant -1 ctx-lh-S_collateral_transverse_post -1 ctx-lh-S_frontal_inferior -1 ctx-lh-S_frontal_middle -1 ctx-lh-S_frontal_superior -1 ctx-lh-S_frontomarginal -1 ctx-lh-S_intermedius_primus-Jensen -1 ctx-lh-S_intraparietal-and_Parietal_transverse -1 ctx-lh-S_occipital_anterior -1 ctx-lh-S_occipital_middle_and_Lunatus -1 ctx-lh-S_occipital_superior_and_transversalis -1 ctx-lh-S_occipito-temporal_lateral -1 ctx-lh-S_occipito-temporal_medial_and_S_Lingual -1 ctx-lh-S_orbital-H_shapped -1 ctx-lh-S_orbital_lateral -1 ctx-lh-S_orbital_medial-Or_olfactory -1 ctx-lh-S_paracentral -1 ctx-lh-S_parieto_occipital -1 ctx-lh-S_pericallosal -1 ctx-lh-S_postcentral -1 ctx-lh-S_precentral-Inferior-part -1 ctx-lh-S_precentral-Superior-part -1 ctx-lh-S_subcentral_ant -1 ctx-lh-S_subcentral_post -1 ctx-lh-S_suborbital -1 ctx-lh-S_subparietal -1 ctx-lh-S_supracingulate -1 ctx-lh-S_temporal_inferior -1 ctx-lh-S_temporal_superior -1 ctx-lh-S_temporal_transverse - -1 ctx-rh-Unknown -1 ctx-rh-Corpus_callosum -1 ctx-rh-G_and_S_Insula_ONLY_AVERAGE -1 ctx-rh-G_cingulate-Isthmus -1 ctx-rh-G_cingulate-Main_part -1 ctx-rh-G_cuneus -1 ctx-rh-G_frontal_inf-Opercular_part -1 ctx-rh-G_frontal_inf-Orbital_part -1 ctx-rh-G_frontal_inf-Triangular_part -1 ctx-rh-G_frontal_middle -1 ctx-rh-G_frontal_superior -1 ctx-rh-G_frontomarginal -1 ctx-rh-G_insular_long -1 ctx-rh-G_insular_short -1 ctx-rh-G_and_S_occipital_inferior -1 ctx-rh-G_occipital_middle -1 ctx-rh-G_occipital_superior -1 ctx-rh-G_occipit-temp_lat-Or_fusiform -1 ctx-rh-G_occipit-temp_med-Lingual_part -1 ctx-rh-G_occipit-temp_med-Parahippocampal_part -1 ctx-rh-G_orbital -1 ctx-rh-G_paracentral -1 ctx-rh-G_parietal_inferior-Angular_part -1 ctx-rh-G_parietal_inferior-Supramarginal_part -1 ctx-rh-G_parietal_superior -1 ctx-rh-G_postcentral -1 ctx-rh-G_precentral -1 ctx-rh-G_precuneus -1 ctx-rh-G_rectus -1 ctx-rh-G_subcallosal -1 ctx-rh-G_subcentral -1 ctx-rh-G_temporal_inferior -1 ctx-rh-G_temporal_middle -1 ctx-rh-G_temp_sup-G_temp_transv_and_interm_S -1 ctx-rh-G_temp_sup-Lateral_aspect -1 ctx-rh-G_temp_sup-Planum_polare -1 ctx-rh-G_temp_sup-Planum_tempolare -1 ctx-rh-G_and_S_transverse_frontopolar -1 ctx-rh-Lat_Fissure-ant_sgt-ramus_horizontal -1 ctx-rh-Lat_Fissure-ant_sgt-ramus_vertical -1 ctx-rh-Lat_Fissure-post_sgt -1 ctx-rh-Medial_wall -1 ctx-rh-Pole_occipital -1 ctx-rh-Pole_temporal -1 ctx-rh-S_calcarine -1 ctx-rh-S_central -1 ctx-rh-S_central_insula -1 ctx-rh-S_cingulate-Main_part_and_Intracingulate -1 ctx-rh-S_cingulate-Marginalis_part -1 ctx-rh-S_circular_insula_anterior -1 ctx-rh-S_circular_insula_inferior -1 ctx-rh-S_circular_insula_superior -1 ctx-rh-S_collateral_transverse_ant -1 ctx-rh-S_collateral_transverse_post -1 ctx-rh-S_frontal_inferior -1 ctx-rh-S_frontal_middle -1 ctx-rh-S_frontal_superior -1 ctx-rh-S_frontomarginal -1 ctx-rh-S_intermedius_primus-Jensen -1 ctx-rh-S_intraparietal-and_Parietal_transverse -1 ctx-rh-S_occipital_anterior -1 ctx-rh-S_occipital_middle_and_Lunatus -1 ctx-rh-S_occipital_superior_and_transversalis -1 ctx-rh-S_occipito-temporal_lateral -1 ctx-rh-S_occipito-temporal_medial_and_S_Lingual -1 ctx-rh-S_orbital-H_shapped -1 ctx-rh-S_orbital_lateral -1 ctx-rh-S_orbital_medial-Or_olfactory -1 ctx-rh-S_paracentral -1 ctx-rh-S_parieto_occipital -1 ctx-rh-S_pericallosal -1 ctx-rh-S_postcentral -1 ctx-rh-S_precentral-Inferior-part -1 ctx-rh-S_precentral-Superior-part -1 ctx-rh-S_subcentral_ant -1 ctx-rh-S_subcentral_post -1 ctx-rh-S_suborbital -1 ctx-rh-S_subparietal -1 ctx-rh-S_supracingulate -1 ctx-rh-S_temporal_inferior -1 ctx-rh-S_temporal_superior -1 ctx-rh-S_temporal_transverse - -1 ctx-rh-G_cingulate-caudal_ACC -1 ctx-rh-G_cingulate-rostral_ACC -1 ctx-rh-G_cingulate-posterior -1 ctx-rh-S_cingulate-caudal_ACC -1 ctx-rh-S_cingulate-rostral_ACC -1 ctx-rh-S_cingulate-posterior -1 ctx-rh-S_pericallosal-caudal -1 ctx-rh-S_pericallosal-rostral -1 ctx-rh-S_pericallosal-posterior - -3 wm-lh-Unknown -3 wm-lh-Corpus_callosum -3 wm-lh-G_and_S_Insula_ONLY_AVERAGE -3 wm-lh-G_cingulate-Isthmus -3 wm-lh-G_cingulate-Main_part -3 wm-lh-G_cuneus -3 wm-lh-G_frontal_inf-Opercular_part -3 wm-lh-G_frontal_inf-Orbital_part -3 wm-lh-G_frontal_inf-Triangular_part -3 wm-lh-G_frontal_middle -3 wm-lh-G_frontal_superior -3 wm-lh-G_frontomarginal -3 wm-lh-G_insular_long -3 wm-lh-G_insular_short -3 wm-lh-G_and_S_occipital_inferior -3 wm-lh-G_occipital_middle -3 wm-lh-G_occipital_superior -3 wm-lh-G_occipit-temp_lat-Or_fusiform -3 wm-lh-G_occipit-temp_med-Lingual_part -3 wm-lh-G_occipit-temp_med-Parahippocampal_part -3 wm-lh-G_orbital -3 wm-lh-G_paracentral -3 wm-lh-G_parietal_inferior-Angular_part -3 wm-lh-G_parietal_inferior-Supramarginal_part -3 wm-lh-G_parietal_superior -3 wm-lh-G_postcentral -3 wm-lh-G_precentral -3 wm-lh-G_precuneus -3 wm-lh-G_rectus -3 wm-lh-G_subcallosal -3 wm-lh-G_subcentral -3 wm-lh-G_temporal_inferior -3 wm-lh-G_temporal_middle -3 wm-lh-G_temp_sup-G_temp_transv_and_interm_S -3 wm-lh-G_temp_sup-Lateral_aspect -3 wm-lh-G_temp_sup-Planum_polare -3 wm-lh-G_temp_sup-Planum_tempolare -3 wm-lh-G_and_S_transverse_frontopolar -3 wm-lh-Lat_Fissure-ant_sgt-ramus_horizontal -3 wm-lh-Lat_Fissure-ant_sgt-ramus_vertical -3 wm-lh-Lat_Fissure-post_sgt -3 wm-lh-Medial_wall -3 wm-lh-Pole_occipital -3 wm-lh-Pole_temporal -3 wm-lh-S_calcarine -3 wm-lh-S_central -3 wm-lh-S_central_insula -3 wm-lh-S_cingulate-Main_part_and_Intracingulate -3 wm-lh-S_cingulate-Marginalis_part -3 wm-lh-S_circular_insula_anterior -3 wm-lh-S_circular_insula_inferior -3 wm-lh-S_circular_insula_superior -3 wm-lh-S_collateral_transverse_ant -3 wm-lh-S_collateral_transverse_post -3 wm-lh-S_frontal_inferior -3 wm-lh-S_frontal_middle -3 wm-lh-S_frontal_superior -3 wm-lh-S_frontomarginal -3 wm-lh-S_intermedius_primus-Jensen -3 wm-lh-S_intraparietal-and_Parietal_transverse -3 wm-lh-S_occipital_anterior -3 wm-lh-S_occipital_middle_and_Lunatus -3 wm-lh-S_occipital_superior_and_transversalis -3 wm-lh-S_occipito-temporal_lateral -3 wm-lh-S_occipito-temporal_medial_and_S_Lingual -3 wm-lh-S_orbital-H_shapped -3 wm-lh-S_orbital_lateral -3 wm-lh-S_orbital_medial-Or_olfactory -3 wm-lh-S_paracentral -3 wm-lh-S_parieto_occipital -3 wm-lh-S_pericallosal -3 wm-lh-S_postcentral -3 wm-lh-S_precentral-Inferior-part -3 wm-lh-S_precentral-Superior-part -3 wm-lh-S_subcentral_ant -3 wm-lh-S_subcentral_post -3 wm-lh-S_suborbital -3 wm-lh-S_subparietal -3 wm-lh-S_supracingulate -3 wm-lh-S_temporal_inferior -3 wm-lh-S_temporal_superior -3 wm-lh-S_temporal_transverse - -3 wm-rh-Unknown -3 wm-rh-Corpus_callosum -3 wm-rh-G_and_S_Insula_ONLY_AVERAGE -3 wm-rh-G_cingulate-Isthmus -3 wm-rh-G_cingulate-Main_part -3 wm-rh-G_cuneus -3 wm-rh-G_frontal_inf-Opercular_part -3 wm-rh-G_frontal_inf-Orbital_part -3 wm-rh-G_frontal_inf-Triangular_part -3 wm-rh-G_frontal_middle -3 wm-rh-G_frontal_superior -3 wm-rh-G_frontomarginal -3 wm-rh-G_insular_long -3 wm-rh-G_insular_short -3 wm-rh-G_and_S_occipital_inferior -3 wm-rh-G_occipital_middle -3 wm-rh-G_occipital_superior -3 wm-rh-G_occipit-temp_lat-Or_fusiform -3 wm-rh-G_occipit-temp_med-Lingual_part -3 wm-rh-G_occipit-temp_med-Parahippocampal_part -3 wm-rh-G_orbital -3 wm-rh-G_paracentral -3 wm-rh-G_parietal_inferior-Angular_part -3 wm-rh-G_parietal_inferior-Supramarginal_part -3 wm-rh-G_parietal_superior -3 wm-rh-G_postcentral -3 wm-rh-G_precentral -3 wm-rh-G_precuneus -3 wm-rh-G_rectus -3 wm-rh-G_subcallosal -3 wm-rh-G_subcentral -3 wm-rh-G_temporal_inferior -3 wm-rh-G_temporal_middle -3 wm-rh-G_temp_sup-G_temp_transv_and_interm_S -3 wm-rh-G_temp_sup-Lateral_aspect -3 wm-rh-G_temp_sup-Planum_polare -3 wm-rh-G_temp_sup-Planum_tempolare -3 wm-rh-G_and_S_transverse_frontopolar -3 wm-rh-Lat_Fissure-ant_sgt-ramus_horizontal -3 wm-rh-Lat_Fissure-ant_sgt-ramus_vertical -3 wm-rh-Lat_Fissure-post_sgt -3 wm-rh-Medial_wall -3 wm-rh-Pole_occipital -3 wm-rh-Pole_temporal -3 wm-rh-S_calcarine -3 wm-rh-S_central -3 wm-rh-S_central_insula -3 wm-rh-S_cingulate-Main_part_and_Intracingulate -3 wm-rh-S_cingulate-Marginalis_part -3 wm-rh-S_circular_insula_anterior -3 wm-rh-S_circular_insula_inferior -3 wm-rh-S_circular_insula_superior -3 wm-rh-S_collateral_transverse_ant -3 wm-rh-S_collateral_transverse_post -3 wm-rh-S_frontal_inferior -3 wm-rh-S_frontal_middle -3 wm-rh-S_frontal_superior -3 wm-rh-S_frontomarginal -3 wm-rh-S_intermedius_primus-Jensen -3 wm-rh-S_intraparietal-and_Parietal_transverse -3 wm-rh-S_occipital_anterior -3 wm-rh-S_occipital_middle_and_Lunatus -3 wm-rh-S_occipital_superior_and_transversalis -3 wm-rh-S_occipito-temporal_lateral -3 wm-rh-S_occipito-temporal_medial_and_S_Lingual -3 wm-rh-S_orbital-H_shapped -3 wm-rh-S_orbital_lateral -3 wm-rh-S_orbital_medial-Or_olfactory -3 wm-rh-S_paracentral -3 wm-rh-S_parieto_occipital -3 wm-rh-S_pericallosal -3 wm-rh-S_postcentral -3 wm-rh-S_precentral-Inferior-part -3 wm-rh-S_precentral-Superior-part -3 wm-rh-S_subcentral_ant -3 wm-rh-S_subcentral_post -3 wm-rh-S_suborbital -3 wm-rh-S_subparietal -3 wm-rh-S_supracingulate -3 wm-rh-S_temporal_inferior -3 wm-rh-S_temporal_superior -3 wm-rh-S_temporal_transverse - -3 Left-UnsegmentedWhiteMatter -3 Right-UnsegmentedWhiteMatter - -1 ctx_lh_Unknown -1 ctx_lh_G_and_S_frontomargin -1 ctx_lh_G_and_S_occipital_inf -1 ctx_lh_G_and_S_paracentral -1 ctx_lh_G_and_S_subcentral -1 ctx_lh_G_and_S_transv_frontopol -1 ctx_lh_G_and_S_cingul-Ant -1 ctx_lh_G_and_S_cingul-Mid-Ant -1 ctx_lh_G_and_S_cingul-Mid-Post -1 ctx_lh_G_cingul-Post-dorsal -1 ctx_lh_G_cingul-Post-ventral -1 ctx_lh_G_cuneus -1 ctx_lh_G_front_inf-Opercular -1 ctx_lh_G_front_inf-Orbital -1 ctx_lh_G_front_inf-Triangul -1 ctx_lh_G_front_middle -1 ctx_lh_G_front_sup -1 ctx_lh_G_Ins_lg_and_S_cent_ins -1 ctx_lh_G_insular_short -1 ctx_lh_G_occipital_middle -1 ctx_lh_G_occipital_sup -1 ctx_lh_G_oc-temp_lat-fusifor -1 ctx_lh_G_oc-temp_med-Lingual -1 ctx_lh_G_oc-temp_med-Parahip -1 ctx_lh_G_orbital -1 ctx_lh_G_pariet_inf-Angular -1 ctx_lh_G_pariet_inf-Supramar -1 ctx_lh_G_parietal_sup -1 ctx_lh_G_postcentral -1 ctx_lh_G_precentral -1 ctx_lh_G_precuneus -1 ctx_lh_G_rectus -1 ctx_lh_G_subcallosal -1 ctx_lh_G_temp_sup-G_T_transv -1 ctx_lh_G_temp_sup-Lateral -1 ctx_lh_G_temp_sup-Plan_polar -1 ctx_lh_G_temp_sup-Plan_tempo -1 ctx_lh_G_temporal_inf -1 ctx_lh_G_temporal_middle -1 ctx_lh_Lat_Fis-ant-Horizont -1 ctx_lh_Lat_Fis-ant-Vertical -1 ctx_lh_Lat_Fis-post -1 ctx_lh_Medial_wall -1 ctx_lh_Pole_occipital -1 ctx_lh_Pole_temporal -1 ctx_lh_S_calcarine -1 ctx_lh_S_central -1 ctx_lh_S_cingul-Marginalis -1 ctx_lh_S_circular_insula_ant -1 ctx_lh_S_circular_insula_inf -1 ctx_lh_S_circular_insula_sup -1 ctx_lh_S_collat_transv_ant -1 ctx_lh_S_collat_transv_post -1 ctx_lh_S_front_inf -1 ctx_lh_S_front_middle -1 ctx_lh_S_front_sup -1 ctx_lh_S_interm_prim-Jensen -1 ctx_lh_S_intrapariet_and_P_trans -1 ctx_lh_S_oc_middle_and_Lunatus -1 ctx_lh_S_oc_sup_and_transversal -1 ctx_lh_S_occipital_ant -1 ctx_lh_S_oc-temp_lat -1 ctx_lh_S_oc-temp_med_and_Lingual -1 ctx_lh_S_orbital_lateral -1 ctx_lh_S_orbital_med-olfact -1 ctx_lh_S_orbital-H_Shaped -1 ctx_lh_S_parieto_occipital -1 ctx_lh_S_pericallosal -1 ctx_lh_S_postcentral -1 ctx_lh_S_precentral-inf-part -1 ctx_lh_S_precentral-sup-part -1 ctx_lh_S_suborbital -1 ctx_lh_S_subparietal -1 ctx_lh_S_temporal_inf -1 ctx_lh_S_temporal_sup -1 ctx_lh_S_temporal_transverse - -1 ctx_rh_Unknown -1 ctx_rh_G_and_S_frontomargin -1 ctx_rh_G_and_S_occipital_inf -1 ctx_rh_G_and_S_paracentral -1 ctx_rh_G_and_S_subcentral -1 ctx_rh_G_and_S_transv_frontopol -1 ctx_rh_G_and_S_cingul-Ant -1 ctx_rh_G_and_S_cingul-Mid-Ant -1 ctx_rh_G_and_S_cingul-Mid-Post -1 ctx_rh_G_cingul-Post-dorsal -1 ctx_rh_G_cingul-Post-ventral -1 ctx_rh_G_cuneus -1 ctx_rh_G_front_inf-Opercular -1 ctx_rh_G_front_inf-Orbital -1 ctx_rh_G_front_inf-Triangul -1 ctx_rh_G_front_middle -1 ctx_rh_G_front_sup -1 ctx_rh_G_Ins_lg_and_S_cent_ins -1 ctx_rh_G_insular_short -1 ctx_rh_G_occipital_middle -1 ctx_rh_G_occipital_sup -1 ctx_rh_G_oc-temp_lat-fusifor -1 ctx_rh_G_oc-temp_med-Lingual -1 ctx_rh_G_oc-temp_med-Parahip -1 ctx_rh_G_orbital -1 ctx_rh_G_pariet_inf-Angular -1 ctx_rh_G_pariet_inf-Supramar -1 ctx_rh_G_parietal_sup -1 ctx_rh_G_postcentral -1 ctx_rh_G_precentral -1 ctx_rh_G_precuneus -1 ctx_rh_G_rectus -1 ctx_rh_G_subcallosal -1 ctx_rh_G_temp_sup-G_T_transv -1 ctx_rh_G_temp_sup-Lateral -1 ctx_rh_G_temp_sup-Plan_polar -1 ctx_rh_G_temp_sup-Plan_tempo -1 ctx_rh_G_temporal_inf -1 ctx_rh_G_temporal_middle -1 ctx_rh_Lat_Fis-ant-Horizont -1 ctx_rh_Lat_Fis-ant-Vertical -1 ctx_rh_Lat_Fis-post -1 ctx_rh_Medial_wall -1 ctx_rh_Pole_occipital -1 ctx_rh_Pole_temporal -1 ctx_rh_S_calcarine -1 ctx_rh_S_central -1 ctx_rh_S_cingul-Marginalis -1 ctx_rh_S_circular_insula_ant -1 ctx_rh_S_circular_insula_inf -1 ctx_rh_S_circular_insula_sup -1 ctx_rh_S_collat_transv_ant -1 ctx_rh_S_collat_transv_post -1 ctx_rh_S_front_inf -1 ctx_rh_S_front_middle -1 ctx_rh_S_front_sup -1 ctx_rh_S_interm_prim-Jensen -1 ctx_rh_S_intrapariet_and_P_trans -1 ctx_rh_S_oc_middle_and_Lunatus -1 ctx_rh_S_oc_sup_and_transversal -1 ctx_rh_S_occipital_ant -1 ctx_rh_S_oc-temp_lat -1 ctx_rh_S_oc-temp_med_and_Lingual -1 ctx_rh_S_orbital_lateral -1 ctx_rh_S_orbital_med-olfact -1 ctx_rh_S_orbital-H_Shaped -1 ctx_rh_S_parieto_occipital -1 ctx_rh_S_pericallosal -1 ctx_rh_S_postcentral -1 ctx_rh_S_precentral-inf-part -1 ctx_rh_S_precentral-sup-part -1 ctx_rh_S_suborbital -1 ctx_rh_S_subparietal -1 ctx_rh_S_temporal_inf -1 ctx_rh_S_temporal_sup -1 ctx_rh_S_temporal_transverse - -3 wm_lh_Unknown -3 wm_lh_G_and_S_frontomargin -3 wm_lh_G_and_S_occipital_inf -3 wm_lh_G_and_S_paracentral -3 wm_lh_G_and_S_subcentral -3 wm_lh_G_and_S_transv_frontopol -3 wm_lh_G_and_S_cingul-Ant -3 wm_lh_G_and_S_cingul-Mid-Ant -3 wm_lh_G_and_S_cingul-Mid-Post -3 wm_lh_G_cingul-Post-dorsal -3 wm_lh_G_cingul-Post-ventral -3 wm_lh_G_cuneus -3 wm_lh_G_front_inf-Opercular -3 wm_lh_G_front_inf-Orbital -3 wm_lh_G_front_inf-Triangul -3 wm_lh_G_front_middle -3 wm_lh_G_front_sup -3 wm_lh_G_Ins_lg_and_S_cent_ins -3 wm_lh_G_insular_short -3 wm_lh_G_occipital_middle -3 wm_lh_G_occipital_sup -3 wm_lh_G_oc-temp_lat-fusifor -3 wm_lh_G_oc-temp_med-Lingual -3 wm_lh_G_oc-temp_med-Parahip -3 wm_lh_G_orbital -3 wm_lh_G_pariet_inf-Angular -3 wm_lh_G_pariet_inf-Supramar -3 wm_lh_G_parietal_sup -3 wm_lh_G_postcentral -3 wm_lh_G_precentral -3 wm_lh_G_precuneus -3 wm_lh_G_rectus -3 wm_lh_G_subcallosal -3 wm_lh_G_temp_sup-G_T_transv -3 wm_lh_G_temp_sup-Lateral -3 wm_lh_G_temp_sup-Plan_polar -3 wm_lh_G_temp_sup-Plan_tempo -3 wm_lh_G_temporal_inf -3 wm_lh_G_temporal_middle -3 wm_lh_Lat_Fis-ant-Horizont -3 wm_lh_Lat_Fis-ant-Vertical -3 wm_lh_Lat_Fis-post -3 wm_lh_Medial_wall -3 wm_lh_Pole_occipital -3 wm_lh_Pole_temporal -3 wm_lh_S_calcarine -3 wm_lh_S_central -3 wm_lh_S_cingul-Marginalis -3 wm_lh_S_circular_insula_ant -3 wm_lh_S_circular_insula_inf -3 wm_lh_S_circular_insula_sup -3 wm_lh_S_collat_transv_ant -3 wm_lh_S_collat_transv_post -3 wm_lh_S_front_inf -3 wm_lh_S_front_middle -3 wm_lh_S_front_sup -3 wm_lh_S_interm_prim-Jensen -3 wm_lh_S_intrapariet_and_P_trans -3 wm_lh_S_oc_middle_and_Lunatus -3 wm_lh_S_oc_sup_and_transversal -3 wm_lh_S_occipital_ant -3 wm_lh_S_oc-temp_lat -3 wm_lh_S_oc-temp_med_and_Lingual -3 wm_lh_S_orbital_lateral -3 wm_lh_S_orbital_med-olfact -3 wm_lh_S_orbital-H_Shaped -3 wm_lh_S_parieto_occipital -3 wm_lh_S_pericallosal -3 wm_lh_S_postcentral -3 wm_lh_S_precentral-inf-part -3 wm_lh_S_precentral-sup-part -3 wm_lh_S_suborbital -3 wm_lh_S_subparietal -3 wm_lh_S_temporal_inf -3 wm_lh_S_temporal_sup -3 wm_lh_S_temporal_transverse - -3 wm_rh_Unknown -3 wm_rh_G_and_S_frontomargin -3 wm_rh_G_and_S_occipital_inf -3 wm_rh_G_and_S_paracentral -3 wm_rh_G_and_S_subcentral -3 wm_rh_G_and_S_transv_frontopol -3 wm_rh_G_and_S_cingul-Ant -3 wm_rh_G_and_S_cingul-Mid-Ant -3 wm_rh_G_and_S_cingul-Mid-Post -3 wm_rh_G_cingul-Post-dorsal -3 wm_rh_G_cingul-Post-ventral -3 wm_rh_G_cuneus -3 wm_rh_G_front_inf-Opercular -3 wm_rh_G_front_inf-Orbital -3 wm_rh_G_front_inf-Triangul -3 wm_rh_G_front_middle -3 wm_rh_G_front_sup -3 wm_rh_G_Ins_lg_and_S_cent_ins -3 wm_rh_G_insular_short -3 wm_rh_G_occipital_middle -3 wm_rh_G_occipital_sup -3 wm_rh_G_oc-temp_lat-fusifor -3 wm_rh_G_oc-temp_med-Lingual -3 wm_rh_G_oc-temp_med-Parahip -3 wm_rh_G_orbital -3 wm_rh_G_pariet_inf-Angular -3 wm_rh_G_pariet_inf-Supramar -3 wm_rh_G_parietal_sup -3 wm_rh_G_postcentral -3 wm_rh_G_precentral -3 wm_rh_G_precuneus -3 wm_rh_G_rectus -3 wm_rh_G_subcallosal -3 wm_rh_G_temp_sup-G_T_transv -3 wm_rh_G_temp_sup-Lateral -3 wm_rh_G_temp_sup-Plan_polar -3 wm_rh_G_temp_sup-Plan_tempo -3 wm_rh_G_temporal_inf -3 wm_rh_G_temporal_middle -3 wm_rh_Lat_Fis-ant-Horizont -3 wm_rh_Lat_Fis-ant-Vertical -3 wm_rh_Lat_Fis-post -3 wm_rh_Medial_wall -3 wm_rh_Pole_occipital -3 wm_rh_Pole_temporal -3 wm_rh_S_calcarine -3 wm_rh_S_central -3 wm_rh_S_cingul-Marginalis -3 wm_rh_S_circular_insula_ant -3 wm_rh_S_circular_insula_inf -3 wm_rh_S_circular_insula_sup -3 wm_rh_S_collat_transv_ant -3 wm_rh_S_collat_transv_post -3 wm_rh_S_front_inf -3 wm_rh_S_front_middle -3 wm_rh_S_front_sup -3 wm_rh_S_interm_prim-Jensen -3 wm_rh_S_intrapariet_and_P_trans -3 wm_rh_S_oc_middle_and_Lunatus -3 wm_rh_S_oc_sup_and_transversal -3 wm_rh_S_occipital_ant -3 wm_rh_S_oc-temp_lat -3 wm_rh_S_oc-temp_med_and_Lingual -3 wm_rh_S_orbital_lateral -3 wm_rh_S_orbital_med-olfact -3 wm_rh_S_orbital-H_Shaped -3 wm_rh_S_parieto_occipital -3 wm_rh_S_pericallosal -3 wm_rh_S_postcentral -3 wm_rh_S_precentral-inf-part -3 wm_rh_S_precentral-sup-part -3 wm_rh_S_suborbital -3 wm_rh_S_subparietal -3 wm_rh_S_temporal_inf -3 wm_rh_S_temporal_sup -3 wm_rh_S_temporal_transverse - diff --git a/share/mrtrix3/_5ttgen/hsvs/AmygSubfields.txt b/share/mrtrix3/_5ttgen/hsvs/AmygSubfields.txt deleted file mode 100644 index c557294be8..0000000000 --- a/share/mrtrix3/_5ttgen/hsvs/AmygSubfields.txt +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. -2 Lateral-nucleus 72 132 181 0 -2 Basolateral-nucleus 243 243 243 0 -2 Basal-nucleus 207 63 79 0 -2 Centromedial-nucleus 121 20 135 0 -2 Central-nucleus 197 60 248 0 -2 Medial-nucleus 2 149 2 0 -2 Cortical-nucleus 221 249 166 0 -2 Accessory-Basal-nucleus 232 146 35 0 -2 Corticoamygdaloid-transitio 20 60 120 0 -2 Anterior-amygdaloid-area-AAA 250 250 0 0 -2 Fusion-amygdala-HP-FAH 122 187 222 0 -2 Hippocampal-amygdala-transition-HATA 237 12 177 0 -2 Endopiriform-nucleus 10 49 255 0 -3 Lateral-nucleus-olfactory-tract 205 184 144 0 -2 Paralaminar-nucleus 45 205 165 0 -2 Intercalated-nucleus 117 160 175 0 -2 Prepiriform-cortex 221 217 21 0 -2 Periamygdaloid-cortex 20 60 120 0 -2 Envelope-Amygdala 141 21 100 0 -2 Extranuclear-Amydala 225 140 141 0 diff --git a/share/mrtrix3/_5ttgen/hsvs/HippSubfields.txt b/share/mrtrix3/_5ttgen/hsvs/HippSubfields.txt deleted file mode 100644 index bcc500748e..0000000000 --- a/share/mrtrix3/_5ttgen/hsvs/HippSubfields.txt +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -3 Left-hippocampal_fissure -2 Left-CADG-head -2 Left-subiculum -3 Left-fimbria -3 Right-hippocampal_fissure -2 Right-CADG-head -2 Right-subiculum -3 Right-fimbria -2 alveus -2 perforant_pathway -2 parasubiculum -2 presubiculum -2 subiculum -2 CA1 -2 CA2 -2 CA3 -2 CA4 -2 GC-DG -2 HATA -3 fimbria -2 molecular_layer_HP -3 hippocampal_fissure -2 entorhinal_cortex -2 molecular_layer_subiculum -2 HP_tail -2 presubiculum-head -2 presubiculum-body -2 subiculum-head -2 subiculum-body -2 CA1-head -2 CA1-body -2 CA3-head -2 CA3-body -2 CA4-head -2 CA4-body -2 GC-ML-DG-head -2 GC-ML-DG-body -2 molecular_layer_HP-head -2 molecular_layer_HP-body diff --git a/share/mrtrix3/labelconvert/aal.txt b/share/mrtrix3/labelconvert/aal.txt deleted file mode 100644 index e412130d52..0000000000 --- a/share/mrtrix3/labelconvert/aal.txt +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -0 ??? Unknown 0 0 0 0 - -1 PREL Precentral_L 243 231 117 255 -2 F1L Frontal_Sup_L 212 91 65 255 -3 F1OL Frontal_Sup_Orb_L 227 141 65 255 -4 F2L Frontal_Mid_L 157 179 83 255 -5 F2OL Frontal_Mid_Orb_L 246 236 164 255 -6 F3OPL Frontal_Inf_Oper_L 192 207 213 255 -7 F3TL Frontal_Inf_Tri_L 220 245 129 255 -8 F3OL Frontal_Inf_Orb_L 183 145 163 255 -9 ROL Rolandic_Oper_L 65 39 86 255 -10 SMAL Supp_Motor_Area_L 56 34 80 255 -11 OCL Olfactory_L 136 45 97 255 -12 F1ML Frontal_Sup_Medial_L 177 194 148 255 -13 F1MOL Frontal_Med_Orb_L 226 142 69 255 -14 GRL Rectus_L 159 179 141 255 -15 INL Insula_L 111 120 63 255 -16 ACINL Cingulum_Ant_L 164 90 126 255 -17 MCINL Cingulum_Mid_L 131 156 62 255 -18 PCINL Cingulum_Post_L 184 161 54 255 -19 HIPL Hippocampus_L 55 34 83 255 -20 PHIPL ParaHippocampal_L 140 66 68 255 -21 AMYGL Amygdala_L 246 224 90 255 -22 V1L Calcarine_L 255 255 255 255 -23 QL Cuneus_L 111 115 68 255 -24 LINGL Lingual_L 155 65 63 255 -25 O1L Occipital_Sup_L 200 117 72 255 -26 O2L Occipital_Mid_L 186 70 94 255 -27 O3L Occipital_Inf_L 163 182 79 255 -28 FUSIL Fusiform_L 203 79 107 255 -29 POSTL Postcentral_L 97 110 64 255 -30 P1L Parietal_Sup_L 138 168 107 255 -31 P2L Parietal_Inf_L 77 78 117 255 -32 SMGL SupraMarginal_L 85 45 94 255 -33 AGL Angular_L 109 93 128 255 -34 PQL Precuneus_L 137 54 72 255 -35 PCLL Paracentral_Lobule_L 151 126 57 255 -36 HESL Heschl_L 172 189 88 255 -37 T1L Temporal_Sup_L 224 117 61 255 -38 T1PL Temporal_Pole_Sup_L 203 84 90 255 -39 T2L Temporal_Mid_L 54 45 97 255 -40 T2PL Temporal_Pole_Mid_L 161 183 97 255 -41 T3L Temporal_Inf_L 132 135 164 255 - -42 CAUL Caudate_L 133 32 87 255 -43 PUTL Putamen_L 88 113 113 255 -44 PALL Pallidum_L 46 37 77 255 -45 THAL Thalamus_L 155 177 115 255 - -46 CERC1L Cerebelum_Crus1_L 116 16 212 255 -47 CERC2L Cerebelum_Crus2_L 30 236 29 255 -48 CER3L Cerebelum_3_L 35 36 242 255 -49 CER45L Cerebelum_4_5_L 36 240 239 255 -50 CER6L Cerebelum_6_L 238 30 238 255 -51 CER7BL Cerebelum_7b_L 150 67 27 255 -52 CER8L Cerebelum_8_L 237 236 30 255 -53 CER9L Cerebelum_9_L 35 157 36 255 -54 CER10L Cerebelum_10_L 226 22 23 255 - -55 VER12 Vermis_1_2 73 126 151 128 -56 VER3 Vermis_3 18 18 121 128 -57 VER45 Vermis_4_5 18 120 120 128 -58 VER6 Vermis_6 119 15 119 128 -59 VER7 Vermis_7 75 33 33 128 -60 VER8 Vermis_8 118 118 15 128 -61 VER9 Vermis_9 18 78 18 128 -62 VER10 Vermis_10 113 11 11 128 - -63 CERC1R Cerebelum_Crus1_R 116 16 212 255 -64 CERC2R Cerebelum_Crus2_R 30 236 29 255 -65 CER3R Cerebelum_3_R 35 36 242 255 -66 CER45R Cerebelum_4_5_R 36 240 239 255 -67 CER6R Cerebelum_6_R 238 30 238 255 -68 CER7BR Cerebelum_7b_R 150 67 27 255 -69 CER8R Cerebelum_8_R 237 236 30 255 -70 CER9R Cerebelum_9_R 35 157 36 255 -71 CER10R Cerebelum_10_R 226 22 23 255 - -72 CAUR Caudate_R 133 32 87 255 -73 PUTR Putamen_R 88 113 113 255 -74 PALR Pallidum_R 46 37 77 255 -75 THAR Thalamus_R 155 177 115 255 - -76 PRER Precentral_R 243 231 117 255 -77 F1R Frontal_Sup_R 212 91 65 255 -78 F1OR Frontal_Sup_Orb_R 227 141 65 255 -79 F2R Frontal_Mid_R 157 179 83 255 -80 F2OR Frontal_Mid_Orb_R 246 236 164 255 -81 F3OPR Frontal_Inf_Oper_R 192 207 213 255 -82 F3TR Frontal_Inf_Tri_R 220 245 129 255 -83 F3OR Frontal_Inf_Orb_R 183 145 163 255 -84 ROR Rolandic_Oper_R 65 39 86 255 -85 SMAR Supp_Motor_Area_R 56 34 80 255 -86 OCR Olfactory_R 136 45 97 255 -87 F1MR Frontal_Sup_Medial_R 177 194 148 255 -88 F1MOR Frontal_Med_Orb_R 226 142 69 255 -89 GRR Rectus_R 159 179 141 255 -90 INR Insula_R 111 120 63 255 -91 ACINR Cingulum_Ant_R 164 90 126 255 -92 MCINR Cingulum_Mid_R 131 156 62 255 -93 PCINR Cingulum_Post_R 184 161 54 255 -94 HIPR Hippocampus_R 55 34 83 255 -95 PHIPR ParaHippocampal_R 140 66 68 255 -96 AMYGR Amygdala_R 246 224 90 255 -97 V1R Calcarine_R 255 255 255 255 -98 QR Cuneus_R 111 115 68 255 -99 LINGR Lingual_R 155 65 63 255 -100 O1R Occipital_Sup_R 200 117 72 255 -101 O2R Occipital_Mid_R 186 70 94 255 -102 O3R Occipital_Inf_R 163 182 79 255 -103 FUSIR Fusiform_R 203 79 107 255 -104 POSTR Postcentral_R 97 110 64 255 -105 P1R Parietal_Sup_R 138 168 107 255 -106 P2R Parietal_Inf_R 77 78 117 255 -107 SMGR SupraMarginal_R 85 45 94 255 -108 AGR Angular_R 109 93 128 255 -109 PQR Precuneus_R 137 54 72 255 -110 PCLR Paracentral_Lobule_R 151 126 57 255 -111 HESR Heschl_R 172 189 88 255 -112 T1R Temporal_Sup_R 224 117 61 255 -113 T1PR Temporal_Pole_Sup_R 203 84 90 255 -114 T2R Temporal_Mid_R 54 45 97 255 -115 T2PR Temporal_Pole_Mid_R 161 183 97 255 -116 T3R Temporal_Inf_R 132 135 164 255 - diff --git a/share/mrtrix3/labelconvert/aal2.txt b/share/mrtrix3/labelconvert/aal2.txt deleted file mode 100644 index e34b62af5f..0000000000 --- a/share/mrtrix3/labelconvert/aal2.txt +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -0 ??? Unknown 0 0 0 0 - -1 FAL Precentral_L 243 231 117 255 -2 FAR Precentral_R 243 231 117 255 -3 F1_2L Frontal_Sup_2_L 212 91 65 255 -4 F1_2R Frontal_Sup_2_R 212 91 65 255 -5 F2_2L Frontal_Mid_2_L 157 179 83 255 -6 F2_2R Frontal_Mid_2_R 157 179 83 255 -7 F3OPL Frontal_Inf_Oper_L 192 207 213 255 -8 F3OPR Frontal_Inf_Oper_R 192 207 213 255 -9 F3TL Frontal_Inf_Tri_L 220 245 129 255 -10 F3TR Frontal_Inf_Tri_R 220 245 129 255 -11 F3O_2L Frontal_Inf_Orb_2_L 183 145 163 255 -12 F3O_2R Frontal_Inf_Orb_2_R 183 145 163 255 -13 ORL Rolandic_Oper_L 65 39 86 255 -14 ORR Rolandic_Oper_R 65 39 86 255 -15 SMAL Supp_Motor_Area_L 56 34 80 255 -16 SMAR Supp_Motor_Area_R 56 34 80 255 -17 COBL Olfactory_L 136 45 97 255 -18 COBR Olfactory_R 136 45 97 255 -19 FML Frontal_Sup_Medial_L 177 194 148 255 -20 FMR Frontal_Sup_Medial_R 177 194 148 255 -21 FMOL Frontal_Med_Orb_L 226 142 69 255 -22 FMOR Frontal_Med_Orb_R 226 142 69 255 -23 GRL Rectus_L 159 179 141 255 -24 GRR Rectus_R 159 179 141 255 -25 OFCMEDL OFCmed_L 228 63 161 255 -26 OFCMEDR OFCmed_R 228 63 161 255 -27 OFCANTL OFCant_L 252 255 164 255 -28 OFCANTR OFCant_R 252 255 164 255 -29 OFCPOSTL OFCpost_L 225 240 245 255 -30 OFCPOSTR OFCpost_R 225 240 245 255 -31 OFCLATL OFClat_L 190 253 164 255 -32 OFCLATR OFClat_R 190 253 164 255 -33 INL Insula_L 111 120 63 255 -34 INR Insula_R 111 120 63 255 -35 CIAL Cingulate_Ant_L 164 90 126 255 -36 CIAR Cingulate_Ant_R 164 90 126 255 -37 CINML Cingulate_Mid_L 131 156 62 255 -38 CINMR Cingulate_Mid_R 131 156 62 255 -39 CIPL Cingulate_Post_L 184 161 54 255 -40 CIPR Cingulate_Post_R 184 161 54 255 -41 HIPPOL Hippocampus_L 55 34 83 255 -42 HIPPOR Hippocampus_R 55 34 83 255 -43 PARA_HIPPOL ParaHippocampal_L 140 66 68 255 -44 PARA_HIPPOR ParaHippocampal_R 140 66 68 255 -45 AMYGDL Amygdala_L 246 224 90 255 -46 AMYGDR Amygdala_R 246 224 90 255 -47 V1L Calcarine_L 255 255 255 255 -48 V1R Calcarine_R 255 255 255 255 -49 QL Cuneus_L 111 115 68 255 -50 QR Cuneus_R 111 115 68 255 -51 LINGL Lingual_L 155 65 63 255 -52 LINGR Lingual_R 155 65 63 255 -53 O1L Occipital_Sup_L 200 117 72 255 -54 O1R Occipital_Sup_R 200 117 72 255 -55 O2L Occipital_Mid_L 186 70 94 255 -56 O2R Occipital_Mid_R 186 70 94 255 -57 O3L Occipital_Inf_L 163 182 79 255 -58 O3R Occipital_Inf_R 163 182 79 255 -59 FUSIL Fusiform_L 203 79 107 255 -60 FUSIR Fusiform_R 203 79 107 255 -61 PAL Postcentral_L 97 110 64 255 -62 PAR Postcentral_R 97 110 64 255 -63 P1L Parietal_Sup_L 138 168 107 255 -64 P1R Parietal_Sup_R 138 168 107 255 -65 P2L Parietal_Inf_L 77 78 117 255 -66 P2R Parietal_Inf_R 77 78 117 255 -67 GSML SupraMarginal_L 85 45 94 255 -68 GSMR SupraMarginal_R 85 45 94 255 -69 GAL Angular_L 109 93 128 255 -70 GAR Angular_R 109 93 128 255 -71 PQL Precuneus_L 137 54 72 255 -72 PQR Precuneus_R 137 54 72 255 -73 LPCL Paracentral_Lobule_L 151 126 57 255 -74 LPCR Paracentral_Lobule_R 151 126 57 255 -75 NCL Caudate_L 133 32 87 255 -76 NCR Caudate_R 133 32 87 255 -77 NLL Putamen_L 88 113 113 255 -78 NLR Putamen_R 88 113 113 255 -79 PALLL Pallidum_L 46 37 77 255 -80 PALLR Pallidum_R 46 37 77 255 -81 THAL Thalamus_L 155 177 115 255 -82 THAR Thalamus_R 155 177 115 255 -83 HESCHLL Heschl_L 172 189 88 255 -84 HESCHLR Heschl_R 172 189 88 255 -85 T1L Temporal_Sup_L 224 117 61 255 -86 T1R Temporal_Sup_R 224 117 61 255 -87 T1AL Temporal_Pole_Sup_L 203 84 90 255 -88 T1AR Temporal_Pole_Sup_R 203 84 90 255 -89 T2L Temporal_Mid_L 54 45 97 255 -90 T2R Temporal_Mid_R 54 45 97 255 -91 T2AL Temporal_Pole_Mid_L 161 183 97 255 -92 T2AR Temporal_Pole_Mid_R 161 183 97 255 -93 T3L Temporal_Inf_L 132 135 164 255 -94 T3R Temporal_Inf_R 132 135 164 255 -95 CERCRU1L Cerebelum_Crus1_L 116 16 212 255 -96 CERCRU1R Cerebelum_Crus1_R 116 16 212 255 -97 CERCRU2L Cerebelum_Crus2_L 30 236 29 255 -98 CERCRU2R Cerebelum_Crus2_R 30 236 29 255 -99 CER3L Cerebelum_3_L 35 36 242 255 -100 CER3R Cerebelum_3_R 35 36 242 255 -101 CER4_5L Cerebelum_4_5_L 36 240 239 255 -102 CER4_5R Cerebelum_4_5_R 36 240 239 255 -103 CER6L Cerebelum_6_L 238 30 238 255 -104 CER6R Cerebelum_6_R 238 30 238 255 -105 CER7BL Cerebelum_7b_L 150 67 27 255 -106 CER7BR Cerebelum_7b_R 150 67 27 255 -107 CER8L Cerebelum_8_L 237 236 30 255 -108 CER8R Cerebelum_8_R 237 236 30 255 -109 CER9L Cerebelum_9_L 35 157 36 255 -110 CER9R Cerebelum_9_R 35 157 36 255 -111 CER10L Cerebelum_10_L 226 22 23 255 -112 CER10R Cerebelum_10_R 226 22 23 255 -113 VER1_2 Vermis_1_2 73 126 151 128 -114 VER3 Vermis_3 18 18 121 128 -115 VER4_5 Vermis_4_5 18 120 120 128 -116 VER6 Vermis_6 119 15 119 128 -117 VER7 Vermis_7 75 33 33 128 -118 VER8 Vermis_8 118 118 15 128 -119 VER9 Vermis_9 18 78 18 128 -120 VER10 Vermis_10 113 11 11 128 - diff --git a/share/mrtrix3/labelconvert/fs2lobes_cinginc_convert.txt b/share/mrtrix3/labelconvert/fs2lobes_cinginc_convert.txt deleted file mode 100644 index d4ebf64e37..0000000000 --- a/share/mrtrix3/labelconvert/fs2lobes_cinginc_convert.txt +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table for MRtrix command labelconvert -# Extracts the relevant grey matter parcellations from the default FreeSurfer segmentation (desikan_killiany), but segmentations are summarised into lobes and subcortical areas -# Cingulate nodes are included in the (approximately) relevant lobes -# This file is intended to be used for the label conversion process; the corresponding lookup table for interpreting the resulting labels is: fs2lobes_cinginc_labels.txt - - -0 Unknown - -4 ctx-lh-bankssts -1 ctx-lh-caudalanteriorcingulate -1 ctx-lh-caudalmiddlefrontal -3 ctx-lh-cuneus -4 ctx-lh-entorhinal -4 ctx-lh-fusiform -2 ctx-lh-inferiorparietal -4 ctx-lh-inferiortemporal -2 ctx-lh-isthmuscingulate -3 ctx-lh-lateraloccipital -1 ctx-lh-lateralorbitofrontal -3 ctx-lh-lingual -1 ctx-lh-medialorbitofrontal -4 ctx-lh-middletemporal -4 ctx-lh-parahippocampal -1 ctx-lh-paracentral -1 ctx-lh-parsopercularis -1 ctx-lh-parsorbitalis -1 ctx-lh-parstriangularis -3 ctx-lh-pericalcarine -2 ctx-lh-postcentral -2 ctx-lh-posteriorcingulate -1 ctx-lh-precentral -2 ctx-lh-precuneus -1 ctx-lh-rostralanteriorcingulate -1 ctx-lh-rostralmiddlefrontal -1 ctx-lh-superiorfrontal -2 ctx-lh-superiorparietal -4 ctx-lh-superiortemporal -2 ctx-lh-supramarginal -1 ctx-lh-frontalpole -4 ctx-lh-temporalpole -4 ctx-lh-transversetemporal -#x ctx-lh-insula - -5 Left-Cerebellum-Cortex - -6 Left-Thalamus -6 Left-Thalamus-Proper -6 Left-Caudate -6 Left-Putamen -6 Left-Pallidum -6 Left-Hippocampus -6 Left-Amygdala -6 Left-Accumbens-area - -7 Right-Thalamus -7 Right-Thalamus-Proper -7 Right-Caudate -7 Right-Putamen -7 Right-Pallidum -7 Right-Hippocampus -7 Right-Amygdala -7 Right-Accumbens-area - -8 Right-Cerebellum-Cortex - -9 ctx-rh-bankssts -12 ctx-rh-caudalanteriorcingulate -12 ctx-rh-caudalmiddlefrontal -10 ctx-rh-cuneus -9 ctx-rh-entorhinal -9 ctx-rh-fusiform -11 ctx-rh-inferiorparietal -9 ctx-rh-inferiortemporal -11 ctx-rh-isthmuscingulate -10 ctx-rh-lateraloccipital -12 ctx-rh-lateralorbitofrontal -10 ctx-rh-lingual -12 ctx-rh-medialorbitofrontal -9 ctx-rh-middletemporal -9 ctx-rh-parahippocampal -12 ctx-rh-paracentral -12 ctx-rh-parsopercularis -12 ctx-rh-parsorbitalis -12 ctx-rh-parstriangularis -10 ctx-rh-pericalcarine -11 ctx-rh-postcentral -11 ctx-rh-posteriorcingulate -12 ctx-rh-precentral -11 ctx-rh-precuneus -12 ctx-rh-rostralanteriorcingulate -12 ctx-rh-rostralmiddlefrontal -12 ctx-rh-superiorfrontal -11 ctx-rh-superiorparietal -9 ctx-rh-superiortemporal -11 ctx-rh-supramarginal -12 ctx-rh-frontalpole -9 ctx-rh-temporalpole -9 ctx-rh-transversetemporal -#x ctx-rh-insula - diff --git a/share/mrtrix3/labelconvert/fs2lobes_cinginc_labels.txt b/share/mrtrix3/labelconvert/fs2lobes_cinginc_labels.txt deleted file mode 100644 index 752468da97..0000000000 --- a/share/mrtrix3/labelconvert/fs2lobes_cinginc_labels.txt +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table file to accompany conversion file fs2lobes_cinginc_convert.txt -# Following conversion of a label image to the targets as defined in the file fs2lobes_cinginc_convert.txt, this file provides the labels for the resulting image. - - -0 Unknown - -1 L_Frontal_lobe -2 L_Parietal_lobe -3 L_Occipital_lobe -4 L_Temporal_lobe -5 L_Cerebellum -6 L_Subcortical - -7 R_Subcortical -8 R_Cerebellum -9 R_Temporal_lobe -10 R_Occipital_lobe -11 R_Parietal_lobe -12 R_Frontal_lobe - diff --git a/share/mrtrix3/labelconvert/fs2lobes_cingsep_convert.txt b/share/mrtrix3/labelconvert/fs2lobes_cingsep_convert.txt deleted file mode 100644 index 397cb981ba..0000000000 --- a/share/mrtrix3/labelconvert/fs2lobes_cingsep_convert.txt +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table for MRtrix command labelconvert -# Extracts the relevant grey matter parcellations from the default FreeSurfer segmentation (desikan_killiany), but segmentations are summarised into lobes and subcortical areas -# Cingulate regions are combined to produce their own separate 'cingulate lobe' nodes -# This file is intended to be used for the label conversion process; the corresponding lookup table for interpreting the resulting labels is: fs2lobes_cingsep_labels.txt - - -0 Unknown - -4 ctx-lh-bankssts -5 ctx-lh-caudalanteriorcingulate -1 ctx-lh-caudalmiddlefrontal -3 ctx-lh-cuneus -4 ctx-lh-entorhinal -4 ctx-lh-fusiform -2 ctx-lh-inferiorparietal -4 ctx-lh-inferiortemporal -5 ctx-lh-isthmuscingulate -3 ctx-lh-lateraloccipital -1 ctx-lh-lateralorbitofrontal -3 ctx-lh-lingual -1 ctx-lh-medialorbitofrontal -4 ctx-lh-middletemporal -4 ctx-lh-parahippocampal -1 ctx-lh-paracentral -1 ctx-lh-parsopercularis -1 ctx-lh-parsorbitalis -1 ctx-lh-parstriangularis -3 ctx-lh-pericalcarine -2 ctx-lh-postcentral -5 ctx-lh-posteriorcingulate -1 ctx-lh-precentral -2 ctx-lh-precuneus -5 ctx-lh-rostralanteriorcingulate -1 ctx-lh-rostralmiddlefrontal -1 ctx-lh-superiorfrontal -2 ctx-lh-superiorparietal -4 ctx-lh-superiortemporal -2 ctx-lh-supramarginal -1 ctx-lh-frontalpole -4 ctx-lh-temporalpole -4 ctx-lh-transversetemporal -#x ctx-lh-insula - -6 Left-Cerebellum-Cortex - -7 Left-Thalamus -7 Left-Thalamus-Proper -7 Left-Caudate -7 Left-Putamen -7 Left-Pallidum -7 Left-Hippocampus -7 Left-Amygdala -7 Left-Accumbens-area - -8 Right-Thalamus -8 Right-Thalamus-Proper -8 Right-Caudate -8 Right-Putamen -8 Right-Pallidum -8 Right-Hippocampus -8 Right-Amygdala -8 Right-Accumbens-area - -9 Right-Cerebellum-Cortex - -11 ctx-rh-bankssts -10 ctx-rh-caudalanteriorcingulate -14 ctx-rh-caudalmiddlefrontal -12 ctx-rh-cuneus -11 ctx-rh-entorhinal -11 ctx-rh-fusiform -13 ctx-rh-inferiorparietal -11 ctx-rh-inferiortemporal -10 ctx-rh-isthmuscingulate -12 ctx-rh-lateraloccipital -14 ctx-rh-lateralorbitofrontal -12 ctx-rh-lingual -14 ctx-rh-medialorbitofrontal -11 ctx-rh-middletemporal -11 ctx-rh-parahippocampal -14 ctx-rh-paracentral -14 ctx-rh-parsopercularis -14 ctx-rh-parsorbitalis -14 ctx-rh-parstriangularis -12 ctx-rh-pericalcarine -13 ctx-rh-postcentral -10 ctx-rh-posteriorcingulate -14 ctx-rh-precentral -13 ctx-rh-precuneus -10 ctx-rh-rostralanteriorcingulate -14 ctx-rh-rostralmiddlefrontal -14 ctx-rh-superiorfrontal -13 ctx-rh-superiorparietal -11 ctx-rh-superiortemporal -13 ctx-rh-supramarginal -14 ctx-rh-frontalpole -11 ctx-rh-temporalpole -11 ctx-rh-transversetemporal -#x ctx-rh-insula - diff --git a/share/mrtrix3/labelconvert/fs2lobes_cingsep_labels.txt b/share/mrtrix3/labelconvert/fs2lobes_cingsep_labels.txt deleted file mode 100644 index a6fd8d5f51..0000000000 --- a/share/mrtrix3/labelconvert/fs2lobes_cingsep_labels.txt +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table file to accompany conversion file fs2lobes_cingsep_convert.txt -# Following conversion of a label image to the targets as defined in the file fs2lobes_cingsep_convert.txt, this file provides the labels for the resulting image. - - -0 Unknown - -1 L_Frontal_lobe -2 L_Parietal_lobe -3 L_Occipital_lobe -4 L_Temporal_lobe -5 L_Cingulate_lobe -6 L_Cerebellum -7 L_Subcortical - -8 R_Subcortical -9 R_Cerebellum -10 R_Cingulate_lobe -11 R_Temporal_lobe -12 R_Occipital_lobe -13 R_Parietal_lobe -14 R_Frontal_lobe - diff --git a/share/mrtrix3/labelconvert/fs_a2009s.txt b/share/mrtrix3/labelconvert/fs_a2009s.txt deleted file mode 100644 index 34fd2c2591..0000000000 --- a/share/mrtrix3/labelconvert/fs_a2009s.txt +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table for extracting the relevant grey matter parcellations from the a2009s FreeSurfer segmentation - -0 Unknown 0 0 0 0 - -1 ctx_lh_G_and_S_frontomargin 23 220 60 255 -2 ctx_lh_G_and_S_occipital_inf 23 60 180 255 -3 ctx_lh_G_and_S_paracentral 63 100 60 255 -4 ctx_lh_G_and_S_subcentral 63 20 220 255 -5 ctx_lh_G_and_S_transv_frontopol 13 0 250 255 -6 ctx_lh_G_and_S_cingul-Ant 26 60 0 255 -7 ctx_lh_G_and_S_cingul-Mid-Ant 26 60 75 255 -8 ctx_lh_G_and_S_cingul-Mid-Post 26 60 150 255 -9 ctx_lh_G_cingul-Post-dorsal 25 60 250 255 -10 ctx_lh_G_cingul-Post-ventral 60 25 25 255 -11 ctx_lh_G_cuneus 180 20 20 255 -12 ctx_lh_G_front_inf-Opercular 220 20 100 255 -13 ctx_lh_G_front_inf-Orbital 140 60 60 255 -14 ctx_lh_G_front_inf-Triangul 180 220 140 255 -15 ctx_lh_G_front_middle 140 100 180 255 -16 ctx_lh_G_front_sup 180 20 140 255 -17 ctx_lh_G_Ins_lg_and_S_cent_ins 23 10 10 255 -18 ctx_lh_G_insular_short 225 140 140 255 -19 ctx_lh_G_occipital_middle 180 60 180 255 -20 ctx_lh_G_occipital_sup 20 220 60 255 -21 ctx_lh_G_oc-temp_lat-fusifor 60 20 140 255 -22 ctx_lh_G_oc-temp_med-Lingual 220 180 140 255 -23 ctx_lh_G_oc-temp_med-Parahip 65 100 20 255 -24 ctx_lh_G_orbital 220 60 20 255 -25 ctx_lh_G_pariet_inf-Angular 20 60 220 255 -26 ctx_lh_G_pariet_inf-Supramar 100 100 60 255 -27 ctx_lh_G_parietal_sup 220 180 220 255 -28 ctx_lh_G_postcentral 20 180 140 255 -29 ctx_lh_G_precentral 60 140 180 255 -30 ctx_lh_G_precuneus 25 20 140 255 -31 ctx_lh_G_rectus 20 60 100 255 -32 ctx_lh_G_subcallosal 60 220 20 255 -33 ctx_lh_G_temp_sup-G_T_transv 60 60 220 255 -34 ctx_lh_G_temp_sup-Lateral 220 60 220 255 -35 ctx_lh_G_temp_sup-Plan_polar 65 220 60 255 -36 ctx_lh_G_temp_sup-Plan_tempo 25 140 20 255 -37 ctx_lh_G_temporal_inf 220 220 100 255 -38 ctx_lh_G_temporal_middle 180 60 60 255 -39 ctx_lh_Lat_Fis-ant-Horizont 61 20 220 255 -40 ctx_lh_Lat_Fis-ant-Vertical 61 20 60 255 -41 ctx_lh_Lat_Fis-post 61 60 100 255 -42 ctx_lh_Pole_occipital 140 20 60 255 -43 ctx_lh_Pole_temporal 220 180 20 255 -44 ctx_lh_S_calcarine 63 180 180 255 -45 ctx_lh_S_central 221 20 10 255 -46 ctx_lh_S_cingul-Marginalis 221 20 100 255 -47 ctx_lh_S_circular_insula_ant 221 60 140 255 -48 ctx_lh_S_circular_insula_inf 221 20 220 255 -49 ctx_lh_S_circular_insula_sup 61 220 220 255 -50 ctx_lh_S_collat_transv_ant 100 200 200 255 -51 ctx_lh_S_collat_transv_post 10 200 200 255 -52 ctx_lh_S_front_inf 221 220 20 255 -53 ctx_lh_S_front_middle 141 20 100 255 -54 ctx_lh_S_front_sup 61 220 100 255 -55 ctx_lh_S_interm_prim-Jensen 141 60 20 255 -56 ctx_lh_S_intrapariet_and_P_trans 143 20 220 255 -57 ctx_lh_S_oc_middle_and_Lunatus 101 60 220 255 -58 ctx_lh_S_oc_sup_and_transversal 21 20 140 255 -59 ctx_lh_S_occipital_ant 61 20 180 255 -60 ctx_lh_S_oc-temp_lat 221 140 20 255 -61 ctx_lh_S_oc-temp_med_and_Lingual 141 100 220 255 -62 ctx_lh_S_orbital_lateral 221 100 20 255 -63 ctx_lh_S_orbital_med-olfact 181 200 20 255 -64 ctx_lh_S_orbital-H_Shaped 101 20 20 255 -65 ctx_lh_S_parieto_occipital 101 100 180 255 -66 ctx_lh_S_pericallosal 181 220 20 255 -67 ctx_lh_S_postcentral 21 140 200 255 -68 ctx_lh_S_precentral-inf-part 21 20 240 255 -69 ctx_lh_S_precentral-sup-part 21 20 200 255 -70 ctx_lh_S_suborbital 21 20 60 255 -71 ctx_lh_S_subparietal 101 60 60 255 -72 ctx_lh_S_temporal_inf 21 180 180 255 -73 ctx_lh_S_temporal_sup 223 220 60 255 -74 ctx_lh_S_temporal_transverse 221 60 60 255 - -75 Left-Cerebellum-Cortex 230 148 34 255 - -76 Left-Thalamus 0 118 14 255 -76 Left-Thalamus-Proper 0 118 14 255 -77 Left-Caudate 122 186 220 255 -78 Left-Putamen 236 13 176 255 -79 Left-Pallidum 12 48 255 255 -80 Left-Hippocampus 220 216 20 255 -81 Left-Amygdala 103 255 255 255 -82 Left-Accumbens-area 255 165 0 255 - -83 Right-Thalamus 0 118 14 255 -83 Right-Thalamus-Proper 0 118 14 255 -84 Right-Caudate 122 186 220 255 -85 Right-Putamen 236 13 176 255 -86 Right-Pallidum 13 48 255 255 -87 Right-Hippocampus 220 216 20 255 -88 Right-Amygdala 103 255 255 255 -89 Right-Accumbens-area 255 165 0 255 - -90 ctx_rh_G_and_S_frontomargin 23 220 60 255 -91 ctx_rh_G_and_S_occipital_inf 23 60 180 255 -92 ctx_rh_G_and_S_paracentral 63 100 60 255 -93 ctx_rh_G_and_S_subcentral 63 20 220 255 -94 ctx_rh_G_and_S_transv_frontopol 13 0 250 255 -95 ctx_rh_G_and_S_cingul-Ant 26 60 0 255 -96 ctx_rh_G_and_S_cingul-Mid-Ant 26 60 75 255 -97 ctx_rh_G_and_S_cingul-Mid-Post 26 60 150 255 -98 ctx_rh_G_cingul-Post-dorsal 25 60 250 255 -99 ctx_rh_G_cingul-Post-ventral 60 25 25 255 -100 ctx_rh_G_cuneus 180 20 20 255 -101 ctx_rh_G_front_inf-Opercular 220 20 100 255 -102 ctx_rh_G_front_inf-Orbital 140 60 60 255 -103 ctx_rh_G_front_inf-Triangul 180 220 140 255 -104 ctx_rh_G_front_middle 140 100 180 255 -105 ctx_rh_G_front_sup 180 20 140 255 -106 ctx_rh_G_Ins_lg_and_S_cent_ins 23 10 10 255 -107 ctx_rh_G_insular_short 225 140 140 255 -108 ctx_rh_G_occipital_middle 180 60 180 255 -109 ctx_rh_G_occipital_sup 20 220 60 255 -110 ctx_rh_G_oc-temp_lat-fusifor 60 20 140 255 -111 ctx_rh_G_oc-temp_med-Lingual 220 180 140 255 -112 ctx_rh_G_oc-temp_med-Parahip 65 100 20 255 -113 ctx_rh_G_orbital 220 60 20 255 -114 ctx_rh_G_pariet_inf-Angular 20 60 220 255 -115 ctx_rh_G_pariet_inf-Supramar 100 100 60 255 -116 ctx_rh_G_parietal_sup 220 180 220 255 -117 ctx_rh_G_postcentral 20 180 140 255 -118 ctx_rh_G_precentral 60 140 180 255 -119 ctx_rh_G_precuneus 25 20 140 255 -120 ctx_rh_G_rectus 20 60 100 255 -121 ctx_rh_G_subcallosal 60 220 20 255 -122 ctx_rh_G_temp_sup-G_T_transv 60 60 220 255 -123 ctx_rh_G_temp_sup-Lateral 220 60 220 255 -124 ctx_rh_G_temp_sup-Plan_polar 65 220 60 255 -125 ctx_rh_G_temp_sup-Plan_tempo 25 140 20 255 -126 ctx_rh_G_temporal_inf 220 220 100 255 -127 ctx_rh_G_temporal_middle 180 60 60 255 -128 ctx_rh_Lat_Fis-ant-Horizont 61 20 220 255 -129 ctx_rh_Lat_Fis-ant-Vertical 61 20 60 255 -130 ctx_rh_Lat_Fis-post 61 60 100 255 -131 ctx_rh_Pole_occipital 140 20 60 255 -132 ctx_rh_Pole_temporal 220 180 20 255 -133 ctx_rh_S_calcarine 63 180 180 255 -134 ctx_rh_S_central 221 20 10 255 -135 ctx_rh_S_cingul-Marginalis 221 20 100 255 -136 ctx_rh_S_circular_insula_ant 221 60 140 255 -137 ctx_rh_S_circular_insula_inf 221 20 220 255 -138 ctx_rh_S_circular_insula_sup 61 220 220 255 -139 ctx_rh_S_collat_transv_ant 100 200 200 255 -140 ctx_rh_S_collat_transv_post 10 200 200 255 -141 ctx_rh_S_front_inf 221 220 20 255 -142 ctx_rh_S_front_middle 141 20 100 255 -143 ctx_rh_S_front_sup 61 220 100 255 -144 ctx_rh_S_interm_prim-Jensen 141 60 20 255 -145 ctx_rh_S_intrapariet_and_P_trans 143 20 220 255 -146 ctx_rh_S_oc_middle_and_Lunatus 101 60 220 255 -147 ctx_rh_S_oc_sup_and_transversal 21 20 140 255 -148 ctx_rh_S_occipital_ant 61 20 180 255 -149 ctx_rh_S_oc-temp_lat 221 140 20 255 -150 ctx_rh_S_oc-temp_med_and_Lingual 141 100 220 255 -151 ctx_rh_S_orbital_lateral 221 100 20 255 -152 ctx_rh_S_orbital_med-olfact 181 200 20 255 -153 ctx_rh_S_orbital-H_Shaped 101 20 20 255 -154 ctx_rh_S_parieto_occipital 101 100 180 255 -155 ctx_rh_S_pericallosal 181 220 20 255 -156 ctx_rh_S_postcentral 21 140 200 255 -157 ctx_rh_S_precentral-inf-part 21 20 240 255 -158 ctx_rh_S_precentral-sup-part 21 20 200 255 -159 ctx_rh_S_suborbital 21 20 60 255 -160 ctx_rh_S_subparietal 101 60 60 255 -161 ctx_rh_S_temporal_inf 21 180 180 255 -162 ctx_rh_S_temporal_sup 223 220 60 255 -163 ctx_rh_S_temporal_transverse 221 60 60 255 - -164 Right-Cerebellum-Cortex 230 148 34 255 - diff --git a/share/mrtrix3/labelconvert/fs_default.txt b/share/mrtrix3/labelconvert/fs_default.txt deleted file mode 100644 index 8ea6173d00..0000000000 --- a/share/mrtrix3/labelconvert/fs_default.txt +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table for extracting the relevant grey matter parcellations from the default FreeSurfer segmentation (desikan_killiany) - -0 ??? Unknown 0 0 0 0 - -1 L.BSTS ctx-lh-bankssts 25 100 40 255 -2 L.CACG ctx-lh-caudalanteriorcingulate 125 100 160 255 -3 L.CMFG ctx-lh-caudalmiddlefrontal 100 25 0 255 -4 L.CU ctx-lh-cuneus 220 20 100 255 -5 L.EC ctx-lh-entorhinal 220 20 10 255 -6 L.FG ctx-lh-fusiform 180 220 140 255 -7 L.IPG ctx-lh-inferiorparietal 220 60 220 255 -8 L.ITG ctx-lh-inferiortemporal 180 40 120 255 -9 L.ICG ctx-lh-isthmuscingulate 140 20 140 255 -10 L.LOG ctx-lh-lateraloccipital 20 30 140 255 -11 L.LOFG ctx-lh-lateralorbitofrontal 35 75 50 255 -12 L.LG ctx-lh-lingual 225 140 140 255 -13 L.MOFG ctx-lh-medialorbitofrontal 200 35 75 255 -14 L.MTG ctx-lh-middletemporal 160 100 50 255 -15 L.PHIG ctx-lh-parahippocampal 20 220 60 255 -16 L.PaCG ctx-lh-paracentral 60 220 60 255 -17 L.POP ctx-lh-parsopercularis 220 180 140 255 -18 L.POR ctx-lh-parsorbitalis 20 100 50 255 -19 L.PTR ctx-lh-parstriangularis 220 60 20 255 -20 L.PCAL ctx-lh-pericalcarine 120 100 60 255 -21 L.PoCG ctx-lh-postcentral 220 20 20 255 -22 L.PCG ctx-lh-posteriorcingulate 220 180 220 255 -23 L.PrCG ctx-lh-precentral 60 20 220 255 -24 L.PCU ctx-lh-precuneus 160 140 180 255 -25 L.RACG ctx-lh-rostralanteriorcingulate 80 20 140 255 -26 L.RMFG ctx-lh-rostralmiddlefrontal 75 50 125 255 -27 L.SFG ctx-lh-superiorfrontal 20 220 160 255 -28 L.SPG ctx-lh-superiorparietal 20 180 140 255 -29 L.STG ctx-lh-superiortemporal 140 220 220 255 -30 L.SMG ctx-lh-supramarginal 80 160 20 255 -31 L.FP ctx-lh-frontalpole 100 0 100 255 -32 L.TP ctx-lh-temporalpole 70 70 70 255 -33 L.TTG ctx-lh-transversetemporal 150 150 200 255 -34 L.IN ctx-lh-insula 255 192 32 255 - -35 L.CER Left-Cerebellum-Cortex 230 148 34 255 - -36 L.TH Left-Thalamus 0 118 14 255 -36 L.TH Left-Thalamus-Proper 0 118 14 255 -37 L.CA Left-Caudate 122 186 220 255 -38 L.PU Left-Putamen 236 13 176 255 -39 L.PA Left-Pallidum 12 48 255 255 -40 L.HI Left-Hippocampus 220 216 20 255 -41 L.AM Left-Amygdala 103 255 255 255 -42 L.AC Left-Accumbens-area 255 165 0 255 - -43 R.TH Right-Thalamus 0 118 14 255 -43 R.TH Right-Thalamus-Proper 0 118 14 255 -44 R.CA Right-Caudate 122 186 220 255 -45 R.PU Right-Putamen 236 13 176 255 -46 R.PA Right-Pallidum 13 48 255 255 -47 R.HI Right-Hippocampus 220 216 20 255 -48 R.AM Right-Amygdala 103 255 255 255 -49 R.AC Right-Accumbens-area 255 165 0 255 - -50 R.BSTS ctx-rh-bankssts 25 100 40 255 -51 R.CACG ctx-rh-caudalanteriorcingulate 125 100 160 255 -52 R.CMFG ctx-rh-caudalmiddlefrontal 100 25 0 255 -53 R.CU ctx-rh-cuneus 220 20 100 255 -54 R.EC ctx-rh-entorhinal 220 20 10 255 -55 R.FG ctx-rh-fusiform 180 220 140 255 -56 R.IPG ctx-rh-inferiorparietal 220 60 220 255 -57 R.ITG ctx-rh-inferiortemporal 180 40 120 255 -58 R.ICG ctx-rh-isthmuscingulate 140 20 140 255 -59 R.LOG ctx-rh-lateraloccipital 20 30 140 255 -60 R.LOFG ctx-rh-lateralorbitofrontal 35 75 50 255 -61 R.LG ctx-rh-lingual 225 140 140 255 -62 R.MOFG ctx-rh-medialorbitofrontal 200 35 75 255 -63 R.MTG ctx-rh-middletemporal 160 100 50 255 -64 R.PHIG ctx-rh-parahippocampal 20 220 60 255 -65 R.PaCG ctx-rh-paracentral 60 220 60 255 -66 R.POP ctx-rh-parsopercularis 220 180 140 255 -67 R.POR ctx-rh-parsorbitalis 20 100 50 255 -68 R.PTR ctx-rh-parstriangularis 220 60 20 255 -69 R.PCAL ctx-rh-pericalcarine 120 100 60 255 -70 R.PoCG ctx-rh-postcentral 220 20 20 255 -71 R.PCG ctx-rh-posteriorcingulate 220 180 220 255 -72 R.PrCG ctx-rh-precentral 60 20 220 255 -73 R.PCU ctx-rh-precuneus 160 140 180 255 -74 R.RACG ctx-rh-rostralanteriorcingulate 80 20 140 255 -75 R.RMFG ctx-rh-rostralmiddlefrontal 75 50 125 255 -76 R.SFG ctx-rh-superiorfrontal 20 220 160 255 -77 R.SPG ctx-rh-superiorparietal 20 180 140 255 -78 R.STG ctx-rh-superiortemporal 140 220 220 255 -79 R.SMG ctx-rh-supramarginal 80 160 20 255 -80 R.FP ctx-rh-frontalpole 100 0 100 255 -81 R.TP ctx-rh-temporalpole 70 70 70 255 -82 R.TTG ctx-rh-transversetemporal 150 150 200 255 -83 R.IN ctx-rh-insula 255 192 32 255 - -84 R.CER Right-Cerebellum-Cortex 230 148 34 255 - diff --git a/share/mrtrix3/labelconvert/hcpmmp1_ordered.txt b/share/mrtrix3/labelconvert/hcpmmp1_ordered.txt deleted file mode 100644 index df6381a8fa..0000000000 --- a/share/mrtrix3/labelconvert/hcpmmp1_ordered.txt +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table for defining the Glasser parcellation as values incrementing from 1 -# Thanks to Marlene Tahedl (https://github.com/martahedl) -# ID Labelname R G B A - 1 L_V1 63 5 255 255 - 2 L_MST 54 103 129 255 - 3 L_V6 62 78 178 255 - 4 L_V2 23 50 233 255 - 5 L_V3 15 40 226 255 - 6 L_V4 14 28 214 255 - 7 L_V8 26 47 201 255 - 8 L_4 33 178 20 255 - 9 L_3b 35 205 21 255 - 10 L_FEF 134 143 137 255 - 11 L_PEF 141 166 152 255 - 12 L_55b 62 88 47 255 - 13 L_V3A 7 57 246 255 - 14 L_RSC 153 64 133 255 - 15 L_POS2 186 86 188 255 - 16 L_V7 18 70 195 255 - 17 L_IPS1 57 114 160 255 - 18 L_FFC 57 89 117 255 - 19 L_V3B 25 47 206 255 - 20 L_LO1 0 49 184 255 - 21 L_LO2 18 33 181 255 - 22 L_PIT 34 43 171 255 - 23 L_MT 31 86 104 255 - 24 L_A1 235 19 47 255 - 25 L_PSL 122 79 56 255 - 26 L_SFL 31 49 22 255 - 27 L_PCV 138 128 153 255 - 28 L_STV 142 87 76 255 - 29 L_7Pm 149 181 188 255 - 30 L_7m 42 31 44 255 - 31 L_POS1 81 54 98 255 - 32 L_23d 127 56 91 255 - 33 L_v23ab 29 0 14 255 - 34 L_d23ab 63 17 47 255 - 35 L_31pv 54 11 42 255 - 36 L_5m 48 163 48 255 - 37 L_5mv 153 172 136 255 - 38 L_23c 205 190 194 255 - 39 L_5L 64 154 64 255 - 40 L_24dd 69 161 44 255 - 41 L_24dv 119 159 66 255 - 42 L_7AL 115 194 134 255 - 43 L_SCEF 110 146 86 255 - 44 L_6ma 144 179 135 255 - 45 L_7Am 184 243 224 255 - 46 L_7PL 169 246 237 255 - 47 L_7PC 68 177 108 255 - 48 L_LIPv 63 113 158 255 - 49 L_VIP 50 129 137 255 - 50 L_MIP 136 206 204 255 - 51 L_1 20 191 38 255 - 52 L_2 48 176 62 255 - 53 L_3a 47 216 22 255 - 54 L_6d 26 158 32 255 - 55 L_6mp 64 159 38 255 - 56 L_6v 71 168 50 255 - 57 L_p24pr 146 136 107 255 - 58 L_33pr 129 106 103 255 - 59 L_a24pr 176 133 130 255 - 60 L_p32pr 163 142 121 255 - 61 L_a24 67 12 25 255 - 62 L_d32 90 41 50 255 - 63 L_8BM 105 83 81 255 - 64 L_p32 95 32 52 255 - 65 L_10r 25 12 1 255 - 66 L_47m 96 75 66 255 - 67 L_8Av 53 54 43 255 - 68 L_8Ad 63 67 48 255 - 69 L_9m 43 35 18 255 - 70 L_8BL 34 39 20 255 - 71 L_9p 38 43 28 255 - 72 L_10d 38 35 24 255 - 73 L_8C 101 85 84 255 - 74 L_44 88 88 62 255 - 75 L_45 60 63 42 255 - 76 L_47l 51 49 31 255 - 77 L_a47r 77 76 66 255 - 78 L_6r 178 214 151 255 - 79 L_IFJa 126 119 95 255 - 80 L_IFJp 137 160 132 255 - 81 L_IFSp 97 98 87 255 - 82 L_IFSa 188 201 178 255 - 83 L_p9-46v 138 149 134 255 - 84 L_46 207 217 200 255 - 85 L_a9-46v 145 112 128 255 - 86 L_9-46d 140 117 131 255 - 87 L_9a 55 52 45 255 - 88 L_10v 14 35 14 255 - 89 L_a10p 98 75 89 255 - 90 L_10pp 55 62 58 255 - 91 L_11l 129 106 118 255 - 92 L_13l 82 60 69 255 - 93 L_OFC 40 35 31 255 - 94 L_47s 66 35 33 255 - 95 L_LIPd 155 180 202 255 - 96 L_6a 121 192 136 255 - 97 L_i6-8 90 98 83 255 - 98 L_s6-8 88 92 73 255 - 99 L_43 145 138 64 255 -100 L_OP4 114 157 49 255 -101 L_OP1 99 158 35 255 -102 L_OP2-3 145 127 52 255 -103 L_52 208 72 66 255 -104 L_RI 178 61 23 255 -105 L_PFcm 190 144 89 255 -106 L_PoI2 131 124 83 255 -107 L_TA2 130 42 27 255 -108 L_FOP4 182 148 129 255 -109 L_MI 157 116 106 255 -110 L_Pir 90 66 65 255 -111 L_AVI 141 72 83 255 -112 L_AAIC 85 35 44 255 -113 L_FOP1 160 153 88 255 -114 L_FOP3 175 137 99 255 -115 L_FOP2 99 156 51 255 -116 L_PFt 129 222 134 255 -117 L_AIP 136 220 166 255 -118 L_EC 49 48 35 255 -119 L_PreS 86 57 83 255 -120 L_H 60 57 40 255 -121 L_ProS 87 59 176 255 -122 L_PeEc 61 62 51 255 -123 L_STGa 58 42 26 255 -124 L_PBelt 195 30 5 255 -125 L_A5 99 39 6 255 -126 L_PHA1 69 50 75 255 -127 L_PHA3 96 102 113 255 -128 L_STSda 77 51 34 255 -129 L_STSdp 73 57 39 255 -130 L_STSvp 71 59 57 255 -131 L_TGd 35 34 23 255 -132 L_TE1a 11 14 0 255 -133 L_TE1p 96 102 94 255 -134 L_TE2a 67 64 58 255 -135 L_TF 64 72 56 255 -136 L_TE2p 105 128 117 255 -137 L_PHT 217 252 223 255 -138 L_PH 88 131 163 255 -139 L_TPOJ1 125 90 64 255 -140 L_TPOJ2 156 162 138 255 -141 L_TPOJ3 121 148 145 255 -142 L_DVT 120 112 190 255 -143 L_PGp 155 190 228 255 -144 L_IP2 176 166 174 255 -145 L_IP1 118 121 128 255 -146 L_IP0 112 160 207 255 -147 L_PFop 187 205 145 255 -148 L_PF 255 255 226 255 -149 L_PFm 133 105 114 255 -150 L_PGi 53 62 46 255 -151 L_PGs 62 67 57 255 -152 L_V6A 41 89 166 255 -153 L_VMV1 62 61 155 255 -154 L_VMV3 49 53 159 255 -155 L_PHA2 88 90 89 255 -156 L_V4t 14 79 128 255 -157 L_FST 92 157 153 255 -158 L_V3CD 15 46 182 255 -159 L_LO3 54 94 160 255 -160 L_VMV2 67 62 161 255 -161 L_31pd 56 22 45 255 -162 L_31a 143 105 133 255 -163 L_VVC 53 61 124 255 -164 L_25 36 24 14 255 -165 L_s32 46 35 21 255 -166 L_pOFC 76 48 53 255 -167 L_PoI1 164 104 97 255 -168 L_Ig 110 123 36 255 -169 L_FOP5 163 102 105 255 -170 L_p10p 89 64 76 255 -171 L_p47r 126 123 112 255 -172 L_TGv 46 51 42 255 -173 L_MBelt 187 30 24 255 -174 L_LBelt 233 18 27 255 -175 L_A4 152 39 4 255 -176 L_STSva 40 33 22 255 -177 L_TE1m 59 45 43 255 -178 L_PI 122 50 50 255 -179 L_a32pr 130 63 87 255 -180 L_p24 123 35 74 255 -181 R_V1 63 5 255 255 -182 R_MST 54 103 129 255 -183 R_V6 62 78 178 255 -184 R_V2 23 50 233 255 -185 R_V3 15 40 226 255 -186 R_V4 14 28 214 255 -187 R_V8 26 47 201 255 -188 R_4 33 178 20 255 -189 R_3b 35 205 21 255 -190 R_FEF 134 143 137 255 -191 R_PEF 141 166 152 255 -192 R_55b 62 88 47 255 -193 R_V3A 7 57 246 255 -194 R_RSC 153 64 133 255 -195 R_POS2 186 86 188 255 -196 R_V7 18 70 195 255 -197 R_IPS1 57 114 160 255 -198 R_FFC 57 89 117 255 -199 R_V3B 25 47 206 255 -200 R_LO1 0 49 184 255 -201 R_LO2 18 33 181 255 -202 R_PIT 34 43 171 255 -203 R_MT 31 86 104 255 -204 R_A1 235 19 47 255 -205 R_PSL 122 79 56 255 -206 R_SFL 31 49 22 255 -207 R_PCV 138 128 153 255 -208 R_STV 142 87 76 255 -209 R_7Pm 149 181 188 255 -210 R_7m 42 31 44 255 -211 R_POS1 81 54 98 255 -212 R_23d 127 56 91 255 -213 R_v23ab 29 0 14 255 -214 R_d23ab 63 17 47 255 -215 R_31pv 54 11 42 255 -216 R_5m 48 163 48 255 -217 R_5mv 153 172 136 255 -218 R_23c 205 190 194 255 -219 R_5L 64 154 64 255 -220 R_24dd 69 161 44 255 -221 R_24dv 119 159 66 255 -222 R_7AL 115 194 134 255 -223 R_SCEF 110 146 86 255 -224 R_6ma 144 179 135 255 -225 R_7Am 184 243 224 255 -226 R_7PL 169 246 237 255 -227 R_7PC 68 177 108 255 -228 R_LIPv 63 113 158 255 -229 R_VIP 50 129 137 255 -230 R_MIP 136 206 204 255 -231 R_1 20 191 38 255 -232 R_2 48 176 62 255 -233 R_3a 47 216 22 255 -234 R_6d 26 158 32 255 -235 R_6mp 64 159 38 255 -236 R_6v 71 168 50 255 -237 R_p24pr 146 136 107 255 -238 R_33pr 129 106 103 255 -239 R_a24pr 176 133 130 255 -240 R_p32pr 163 142 121 255 -241 R_a24 67 12 25 255 -242 R_d32 90 41 50 255 -243 R_8BM 105 83 81 255 -244 R_p32 95 32 52 255 -245 R_10r 25 12 1 255 -246 R_47m 96 75 66 255 -247 R_8Av 53 54 43 255 -248 R_8Ad 63 67 48 255 -249 R_9m 43 35 18 255 -250 R_8BL 34 39 20 255 -251 R_9p 38 43 28 255 -252 R_10d 38 35 24 255 -253 R_8C 101 85 84 255 -254 R_44 88 88 62 255 -255 R_45 60 63 42 255 -256 R_47l 51 49 31 255 -257 R_a47r 77 76 66 255 -258 R_6r 178 214 151 255 -259 R_IFJa 126 119 95 255 -260 R_IFJp 137 160 132 255 -261 R_IFSp 97 98 87 255 -262 R_IFSa 188 201 178 255 -263 R_p9-46v 138 149 134 255 -264 R_46 207 217 200 255 -265 R_a9-46v 145 112 128 255 -266 R_9-46d 140 117 131 255 -267 R_9a 55 52 45 255 -268 R_10v 14 35 14 255 -269 R_a10p 98 75 89 255 -270 R_10pp 55 62 58 255 -271 R_11l 129 106 118 255 -272 R_13l 82 60 69 255 -273 R_OFC 40 35 31 255 -274 R_47s 66 35 33 255 -275 R_LIPd 155 180 202 255 -276 R_6a 121 192 136 255 -277 R_i6-8 90 98 83 255 -278 R_s6-8 88 92 73 255 -279 R_43 145 138 64 255 -280 R_OP4 114 157 49 255 -281 R_OP1 99 158 35 255 -282 R_OP2-3 145 127 52 255 -283 R_52 208 72 66 255 -284 R_RI 178 61 23 255 -285 R_PFcm 190 144 89 255 -286 R_PoI2 131 124 83 255 -287 R_TA2 130 42 27 255 -288 R_FOP4 182 148 129 255 -289 R_MI 157 116 106 255 -290 R_Pir 90 66 65 255 -291 R_AVI 141 72 83 255 -292 R_AAIC 85 35 44 255 -293 R_FOP1 160 153 88 255 -294 R_FOP3 175 137 99 255 -295 R_FOP2 99 156 51 255 -296 R_PFt 129 222 134 255 -297 R_AIP 136 220 166 255 -298 R_EC 49 48 35 255 -299 R_PreS 86 57 83 255 -300 R_H 60 57 40 255 -301 R_ProS 87 59 176 255 -302 R_PeEc 61 62 51 255 -303 R_STGa 58 42 26 255 -304 R_PBelt 195 30 5 255 -305 R_A5 99 39 6 255 -306 R_PHA1 69 50 75 255 -307 R_PHA3 96 102 113 255 -308 R_STSda 77 51 34 255 -309 R_STSdp 73 57 39 255 -310 R_STSvp 71 59 57 255 -311 R_TGd 35 34 23 255 -312 R_TE1a 11 14 0 255 -313 R_TE1p 96 102 94 255 -314 R_TE2a 67 64 58 255 -315 R_TF 64 72 56 255 -316 R_TE2p 105 128 117 255 -317 R_PHT 217 252 223 255 -318 R_PH 88 131 163 255 -319 R_TPOJ1 125 90 64 255 -320 R_TPOJ2 156 162 138 255 -321 R_TPOJ3 121 148 145 255 -322 R_DVT 120 112 190 255 -323 R_PGp 155 190 228 255 -324 R_IP2 176 166 174 255 -325 R_IP1 118 121 128 255 -326 R_IP0 112 160 207 255 -327 R_PFop 187 205 145 255 -328 R_PF 255 255 226 255 -329 R_PFm 133 105 114 255 -330 R_PGi 53 62 46 255 -331 R_PGs 62 67 57 255 -332 R_V6A 41 89 166 255 -333 R_VMV1 62 61 155 255 -334 R_VMV3 49 53 159 255 -335 R_PHA2 88 90 89 255 -336 R_V4t 14 79 128 255 -337 R_FST 92 157 153 255 -338 R_V3CD 15 46 182 255 -339 R_LO3 54 94 160 255 -340 R_VMV2 67 62 161 255 -341 R_31pd 56 22 45 255 -342 R_31a 143 105 133 255 -343 R_VVC 53 61 124 255 -344 R_25 36 24 14 255 -345 R_s32 46 35 21 255 -346 R_pOFC 76 48 53 255 -347 R_PoI1 164 104 97 255 -348 R_Ig 110 123 36 255 -349 R_FOP5 163 102 105 255 -350 R_p10p 89 64 76 255 -351 R_p47r 126 123 112 255 -352 R_TGv 46 51 42 255 -353 R_MBelt 187 30 24 255 -354 R_LBelt 233 18 27 255 -355 R_A4 152 39 4 255 -356 R_STSva 40 33 22 255 -357 R_TE1m 59 45 43 255 -358 R_PI 122 50 50 255 -359 R_a32pr 130 63 87 255 -360 R_p24 123 35 74 255 -361 L_Cerebellum 230 148 34 255 -362 L_Thalamus 0 118 14 255 -363 L_Caudate 122 186 220 255 -364 L_Putamen 236 13 176 255 -365 L_Pallidum 12 48 255 255 -366 L_Hippocampus 220 216 20 255 -367 L_Amygdala 103 255 255 255 -368 L_Accumbens 255 165 0 255 -369 L_VentralDC 165 42 42 255 -370 R_Cerebellum 230 148 34 255 -371 R_Thalamus 0 118 14 255 -372 R_Caudate 122 186 220 255 -373 R_Putamen 236 13 176 255 -374 R_Pallidum 13 48 255 255 -375 R_Hippocampus 220 216 20 255 -376 R_Amygdala 103 255 255 255 -377 R_Accumbens 255 165 0 255 -378 R_VentralDC 165 42 42 255 -379 Brain-Stem 119 159 176 255 diff --git a/share/mrtrix3/labelconvert/hcpmmp1_original.txt b/share/mrtrix3/labelconvert/hcpmmp1_original.txt deleted file mode 100644 index be2f778515..0000000000 --- a/share/mrtrix3/labelconvert/hcpmmp1_original.txt +++ /dev/null @@ -1,406 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table for grey matter parcellations as provided by the Glasser atlas -# This tableprovides parcel names and colours as they are provded from e.g. the following command: -# mri_aparc2aseg --old-ribbon --annotation glasser --s hmsc001-01 --o glasser.mgz -# Thanks to Marlene Tahedl (https://github.com/martahedl) - -# Subcortical parcellation according to the original FreeSurferColorLUT.txt -# ID Labelname R G B A -8 L_Cerebellum 230 148 34 255 -10 L_Thalamus 0 118 14 255 -11 L_Caudate 122 186 220 255 -12 L_Putamen 236 13 176 255 -13 L_Pallidum 12 48 255 255 -16 Brain-Stem 119 159 176 255 -17 L_Hippocampus 220 216 20 255 -18 L_Amygdala 103 255 255 255 -26 L_Accumbens 255 165 0 255 -28 L_VentralDC 165 42 42 255 -47 R_Cerebellum 230 148 34 255 -49 R_Thalamus 0 118 14 255 -50 R_Caudate 122 186 220 255 -51 R_Putamen 236 13 176 255 -52 R_Pallidum 13 48 255 255 -53 R_Hippocampus 220 216 20 255 -54 R_Amygdala 103 255 255 255 -58 R_Accumbens 255 165 0 255 -60 R_VentralDC 165 42 42 255 - -# Cortical parcellation -# ID Labelname R G B A -1000 UNKNOWN 0 0 0 0 -1001 L_V1 63 5 255 255 -1002 L_MST 54 103 129 255 -1003 L_V6 62 78 178 255 -1004 L_V2 23 50 233 255 -1005 L_V3 15 40 226 255 -1006 L_V4 14 28 214 255 -1007 L_V8 26 47 201 255 -1008 L_4 33 178 20 255 -1009 L_3b 35 205 21 255 -1010 L_FEF 134 143 137 255 -1011 L_PEF 141 166 152 255 -1012 L_55b 62 88 47 255 -1013 L_V3A 7 57 246 255 -1014 L_RSC 153 64 133 255 -1015 L_POS2 186 86 188 255 -1016 L_V7 18 70 195 255 -1017 L_IPS1 57 114 160 255 -1018 L_FFC 57 89 117 255 -1019 L_V3B 25 47 206 255 -1020 L_LO1 0 49 184 255 -1021 L_LO2 18 33 181 255 -1022 L_PIT 34 43 171 255 -1023 L_MT 31 86 104 255 -1024 L_A1 235 19 47 255 -1025 L_PSL 122 79 56 255 -1026 L_SFL 31 49 22 255 -1027 L_PCV 138 128 153 255 -1028 L_STV 142 87 76 255 -1029 L_7Pm 149 181 188 255 -1030 L_7m 42 31 44 255 -1031 L_POS1 81 54 98 255 -1032 L_23d 127 56 91 255 -1033 L_v23ab 29 0 14 255 -1034 L_d23ab 63 17 47 255 -1035 L_31pv 54 11 42 255 -1036 L_5m 48 163 48 255 -1037 L_5mv 153 172 136 255 -1038 L_23c 205 190 194 255 -1039 L_5L 64 154 64 255 -1040 L_24dd 69 161 44 255 -1041 L_24dv 119 159 66 255 -1042 L_7AL 115 194 134 255 -1043 L_SCEF 110 146 86 255 -1044 L_6ma 144 179 135 255 -1045 L_7Am 184 243 224 255 -1046 L_7PL 169 246 237 255 -1047 L_7PC 68 177 108 255 -1048 L_LIPv 63 113 158 255 -1049 L_VIP 50 129 137 255 -1050 L_MIP 136 206 204 255 -1051 L_1 20 191 38 255 -1052 L_2 48 176 62 255 -1053 L_3a 47 216 22 255 -1054 L_6d 26 158 32 255 -1055 L_6mp 64 159 38 255 -1056 L_6v 71 168 50 255 -1057 L_p24pr 146 136 107 255 -1058 L_33pr 129 106 103 255 -1059 L_a24pr 176 133 130 255 -1060 L_p32pr 163 142 121 255 -1061 L_a24 67 12 25 255 -1062 L_d32 90 41 50 255 -1063 L_8BM 105 83 81 255 -1064 L_p32 95 32 52 255 -1065 L_10r 25 12 1 255 -1066 L_47m 96 75 66 255 -1067 L_8Av 53 54 43 255 -1068 L_8Ad 63 67 48 255 -1069 L_9m 43 35 18 255 -1070 L_8BL 34 39 20 255 -1071 L_9p 38 43 28 255 -1072 L_10d 38 35 24 255 -1073 L_8C 101 85 84 255 -1074 L_44 88 88 62 255 -1075 L_45 60 63 42 255 -1076 L_47l 51 49 31 255 -1077 L_a47r 77 76 66 255 -1078 L_6r 178 214 151 255 -1079 L_IFJa 126 119 95 255 -1080 L_IFJp 137 160 132 255 -1081 L_IFSp 97 98 87 255 -1082 L_IFSa 188 201 178 255 -1083 L_p9-46v 138 149 134 255 -1084 L_46 207 217 200 255 -1085 L_a9-46v 145 112 128 255 -1086 L_9-46d 140 117 131 255 -1087 L_9a 55 52 45 255 -1088 L_10v 14 35 14 255 -1089 L_a10p 98 75 89 255 -1090 L_10pp 55 62 58 255 -1091 L_11l 129 106 118 255 -1092 L_13l 82 60 69 255 -1093 L_OFC 40 35 31 255 -1094 L_47s 66 35 33 255 -1095 L_LIPd 155 180 202 255 -1096 L_6a 121 192 136 255 -1097 L_i6-8 90 98 83 255 -1098 L_s6-8 88 92 73 255 -1099 L_43 145 138 64 255 -1100 L_OP4 114 157 49 255 -1101 L_OP1 99 158 35 255 -1102 L_OP2-3 145 127 52 255 -1103 L_52 208 72 66 255 -1104 L_RI 178 61 23 255 -1105 L_PFcm 190 144 89 255 -1106 L_PoI2 131 124 83 255 -1107 L_TA2 130 42 27 255 -1108 L_FOP4 182 148 129 255 -1109 L_MI 157 116 106 255 -1110 L_Pir 90 66 65 255 -1111 L_AVI 141 72 83 255 -1112 L_AAIC 85 35 44 255 -1113 L_FOP1 160 153 88 255 -1114 L_FOP3 175 137 99 255 -1115 L_FOP2 99 156 51 255 -1116 L_PFt 129 222 134 255 -1117 L_AIP 136 220 166 255 -1118 L_EC 49 48 35 255 -1119 L_PreS 86 57 83 255 -1120 L_H 60 57 40 255 -1121 L_ProS 87 59 176 255 -1122 L_PeEc 61 62 51 255 -1123 L_STGa 58 42 26 255 -1124 L_PBelt 195 30 5 255 -1125 L_A5 99 39 6 255 -1126 L_PHA1 69 50 75 255 -1127 L_PHA3 96 102 113 255 -1128 L_STSda 77 51 34 255 -1129 L_STSdp 73 57 39 255 -1130 L_STSvp 71 59 57 255 -1131 L_TGd 35 34 23 255 -1132 L_TE1a 11 14 0 255 -1133 L_TE1p 96 102 94 255 -1134 L_TE2a 67 64 58 255 -1135 L_TF 64 72 56 255 -1136 L_TE2p 105 128 117 255 -1137 L_PHT 217 252 223 255 -1138 L_PH 88 131 163 255 -1139 L_TPOJ1 125 90 64 255 -1140 L_TPOJ2 156 162 138 255 -1141 L_TPOJ3 121 148 145 255 -1142 L_DVT 120 112 190 255 -1143 L_PGp 155 190 228 255 -1144 L_IP2 176 166 174 255 -1145 L_IP1 118 121 128 255 -1146 L_IP0 112 160 207 255 -1147 L_PFop 187 205 145 255 -1148 L_PF 255 255 226 255 -1149 L_PFm 133 105 114 255 -1150 L_PGi 53 62 46 255 -1151 L_PGs 62 67 57 255 -1152 L_V6A 41 89 166 255 -1153 L_VMV1 62 61 155 255 -1154 L_VMV3 49 53 159 255 -1155 L_PHA2 88 90 89 255 -1156 L_V4t 14 79 128 255 -1157 L_FST 92 157 153 255 -1158 L_V3CD 15 46 182 255 -1159 L_LO3 54 94 160 255 -1160 L_VMV2 67 62 161 255 -1161 L_31pd 56 22 45 255 -1162 L_31a 143 105 133 255 -1163 L_VVC 53 61 124 255 -1164 L_25 36 24 14 255 -1165 L_s32 46 35 21 255 -1166 L_pOFC 76 48 53 255 -1167 L_PoI1 164 104 97 255 -1168 L_Ig 110 123 36 255 -1169 L_FOP5 163 102 105 255 -1170 L_p10p 89 64 76 255 -1171 L_p47r 126 123 112 255 -1172 L_TGv 46 51 42 255 -1173 L_MBelt 187 30 24 255 -1174 L_LBelt 233 18 27 255 -1175 L_A4 152 39 4 255 -1176 L_STSva 40 33 22 255 -1177 L_TE1m 59 45 43 255 -1178 L_PI 122 50 50 255 -1179 L_a32pr 130 63 87 255 -1180 L_p24 123 35 74 255 -2001 R_V1 63 5 255 255 -2002 R_MST 54 103 129 255 -2003 R_V6 62 78 178 255 -2004 R_V2 23 50 233 255 -2005 R_V3 15 40 226 255 -2006 R_V4 14 28 214 255 -2007 R_V8 26 47 201 255 -2008 R_4 33 178 20 255 -2009 R_3b 35 205 21 255 -2010 R_FEF 134 143 137 255 -2011 R_PEF 141 166 152 255 -2012 R_55b 62 88 47 255 -2013 R_V3A 7 57 246 255 -2014 R_RSC 153 64 133 255 -2015 R_POS2 186 86 188 255 -2016 R_V7 18 70 195 255 -2017 R_IPS1 57 114 160 255 -2018 R_FFC 57 89 117 255 -2019 R_V3B 25 47 206 255 -2020 R_LO1 0 49 184 255 -2021 R_LO2 18 33 181 255 -2022 R_PIT 34 43 171 255 -2023 R_MT 31 86 104 255 -2024 R_A1 235 19 47 255 -2025 R_PSL 122 79 56 255 -2026 R_SFL 31 49 22 255 -2027 R_PCV 138 128 153 255 -2028 R_STV 142 87 76 255 -2029 R_7Pm 149 181 188 255 -2030 R_7m 42 31 44 255 -2031 R_POS1 81 54 98 255 -2032 R_23d 127 56 91 255 -2033 R_v23ab 29 0 14 255 -2034 R_d23ab 63 17 47 255 -2035 R_31pv 54 11 42 255 -2036 R_5m 48 163 48 255 -2037 R_5mv 153 172 136 255 -2038 R_23c 205 190 194 255 -2039 R_5L 64 154 64 255 -2040 R_24dd 69 161 44 255 -2041 R_24dv 119 159 66 255 -2042 R_7AL 115 194 134 255 -2043 R_SCEF 110 146 86 255 -2044 R_6ma 144 179 135 255 -2045 R_7Am 184 243 224 255 -2046 R_7PL 169 246 237 255 -2047 R_7PC 68 177 108 255 -2048 R_LIPv 63 113 158 255 -2049 R_VIP 50 129 137 255 -2050 R_MIP 136 206 204 255 -2051 R_1 20 191 38 255 -2052 R_2 48 176 62 255 -2053 R_3a 47 216 22 255 -2054 R_6d 26 158 32 255 -2055 R_6mp 64 159 38 255 -2056 R_6v 71 168 50 255 -2057 R_p24pr 146 136 107 255 -2058 R_33pr 129 106 103 255 -2059 R_a24pr 176 133 130 255 -2060 R_p32pr 163 142 121 255 -2061 R_a24 67 12 25 255 -2062 R_d32 90 41 50 255 -2063 R_8BM 105 83 81 255 -2064 R_p32 95 32 52 255 -2065 R_10r 25 12 1 255 -2066 R_47m 96 75 66 255 -2067 R_8Av 53 54 43 255 -2068 R_8Ad 63 67 48 255 -2069 R_9m 43 35 18 255 -2070 R_8BL 34 39 20 255 -2071 R_9p 38 43 28 255 -2072 R_10d 38 35 24 255 -2073 R_8C 101 85 84 255 -2074 R_44 88 88 62 255 -2075 R_45 60 63 42 255 -2076 R_47l 51 49 31 255 -2077 R_a47r 77 76 66 255 -2078 R_6r 178 214 151 255 -2079 R_IFJa 126 119 95 255 -2080 R_IFJp 137 160 132 255 -2081 R_IFSp 97 98 87 255 -2082 R_IFSa 188 201 178 255 -2083 R_p9-46v 138 149 134 255 -2084 R_46 207 217 200 255 -2085 R_a9-46v 145 112 128 255 -2086 R_9-46d 140 117 131 255 -2087 R_9a 55 52 45 255 -2088 R_10v 14 35 14 255 -2089 R_a10p 98 75 89 255 -2090 R_10pp 55 62 58 255 -2091 R_11l 129 106 118 255 -2092 R_13l 82 60 69 255 -2093 R_OFC 40 35 31 255 -2094 R_47s 66 35 33 255 -2095 R_LIPd 155 180 202 255 -2096 R_6a 121 192 136 255 -2097 R_i6-8 90 98 83 255 -2098 R_s6-8 88 92 73 255 -2099 R_43 145 138 64 255 -2100 R_OP4 114 157 49 255 -2101 R_OP1 99 158 35 255 -2102 R_OP2-3 145 127 52 255 -2103 R_52 208 72 66 255 -2104 R_RI 178 61 23 255 -2105 R_PFcm 190 144 89 255 -2106 R_PoI2 131 124 83 255 -2107 R_TA2 130 42 27 255 -2108 R_FOP4 182 148 129 255 -2109 R_MI 157 116 106 255 -2110 R_Pir 90 66 65 255 -2111 R_AVI 141 72 83 255 -2112 R_AAIC 85 35 44 255 -2113 R_FOP1 160 153 88 255 -2114 R_FOP3 175 137 99 255 -2115 R_FOP2 99 156 51 255 -2116 R_PFt 129 222 134 255 -2117 R_AIP 136 220 166 255 -2118 R_EC 49 48 35 255 -2119 R_PreS 86 57 83 255 -2120 R_H 60 57 40 255 -2121 R_ProS 87 59 176 255 -2122 R_PeEc 61 62 51 255 -2123 R_STGa 58 42 26 255 -2124 R_PBelt 195 30 5 255 -2125 R_A5 99 39 6 255 -2126 R_PHA1 69 50 75 255 -2127 R_PHA3 96 102 113 255 -2128 R_STSda 77 51 34 255 -2129 R_STSdp 73 57 39 255 -2130 R_STSvp 71 59 57 255 -2131 R_TGd 35 34 23 255 -2132 R_TE1a 11 14 0 255 -2133 R_TE1p 96 102 94 255 -2134 R_TE2a 67 64 58 255 -2135 R_TF 64 72 56 255 -2136 R_TE2p 105 128 117 255 -2137 R_PHT 217 252 223 255 -2138 R_PH 88 131 163 255 -2139 R_TPOJ1 125 90 64 255 -2140 R_TPOJ2 156 162 138 255 -2141 R_TPOJ3 121 148 145 255 -2142 R_DVT 120 112 190 255 -2143 R_PGp 155 190 228 255 -2144 R_IP2 176 166 174 255 -2145 R_IP1 118 121 128 255 -2146 R_IP0 112 160 207 255 -2147 R_PFop 187 205 145 255 -2148 R_PF 255 255 226 255 -2149 R_PFm 133 105 114 255 -2150 R_PGi 53 62 46 255 -2151 R_PGs 62 67 57 255 -2152 R_V6A 41 89 166 255 -2153 R_VMV1 62 61 155 255 -2154 R_VMV3 49 53 159 255 -2155 R_PHA2 88 90 89 255 -2156 R_V4t 14 79 128 255 -2157 R_FST 92 157 153 255 -2158 R_V3CD 15 46 182 255 -2159 R_LO3 54 94 160 255 -2160 R_VMV2 67 62 161 255 -2161 R_31pd 56 22 45 255 -2162 R_31a 143 105 133 255 -2163 R_VVC 53 61 124 255 -2164 R_25 36 24 14 255 -2165 R_s32 46 35 21 255 -2166 R_pOFC 76 48 53 255 -2167 R_PoI1 164 104 97 255 -2168 R_Ig 110 123 36 255 -2169 R_FOP5 163 102 105 255 -2170 R_p10p 89 64 76 255 -2171 R_p47r 126 123 112 255 -2172 R_TGv 46 51 42 255 -2173 R_MBelt 187 30 24 255 -2174 R_LBelt 233 18 27 255 -2175 R_A4 152 39 4 255 -2176 R_STSva 40 33 22 255 -2177 R_TE1m 59 45 43 255 -2178 R_PI 122 50 50 255 -2179 R_a32pr 130 63 87 255 -2180 R_p24 123 35 74 255 - diff --git a/share/mrtrix3/labelconvert/lpba40.txt b/share/mrtrix3/labelconvert/lpba40.txt deleted file mode 100644 index 04491ca150..0000000000 --- a/share/mrtrix3/labelconvert/lpba40.txt +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Lookup table for converting LPBA40 parcellation into a numerical scheme appropriate for connectome construction - -#Index Name Red Green Blue Alpha - 0 background 0 0 0 0 - 1 L_superior_frontal_gyrus 255 128 128 255 - 2 L_middle_frontal_gyrus 255 0 0 255 - 3 L_inferior_frontal_gyrus 128 127 0 255 - 4 L_precentral_gyrus 255 255 0 255 - 5 L_middle_orbitofrontal_gyrus 255 0 128 255 - 6 L_lateral_orbitofrontal_gyrus 128 0 255 255 - 7 L_gyrus_rectus 128 128 255 255 - 8 L_postcentral_gyrus 255 0 0 255 - 9 L_superior_parietal_gyrus 127 128 0 255 -10 L_supramarginal_gyrus 255 255 0 255 -11 L_angular_gyrus 255 128 255 255 -12 L_precuneus 128 255 128 255 -13 L_superior_occipital_gyrus 127 127 255 255 -14 L_middle_occipital_gyrus 255 127 128 255 -15 L_inferior_occipital_gyrus 160 0 0 255 -16 L_cuneus 255 255 127 255 -17 L_superior_temporal_gyrus 128 0 64 255 -18 L_middle_temporal_gyrus 92 92 237 255 -19 L_inferior_temporal_gyrus 17 207 255 255 -20 L_parahippocampal_gyrus 105 180 31 255 -21 L_lingual_gyrus 255 255 0 255 -22 L_fusiform_gyrus 35 231 216 255 -23 L_insular_cortex 128 255 0 255 -24 L_cingulate_gyrus 64 128 255 255 -25 L_caudate 48 255 0 255 -26 L_putamen 128 160 48 255 -27 L_hippocampus 133 10 31 255 -28 cerebellum 240 128 48 255 -29 brainstem 128 255 0 255 -30 R_caudate 255 0 128 255 -31 R_putamen 255 128 0 255 -32 R_hippocampus 255 255 0 255 -33 R_superior_frontal_gyrus 255 128 0 255 -34 R_middle_frontal_gyrus 255 128 255 255 -35 R_inferior_frontal_gyrus 255 0 0 255 -36 R_precentral_gyrus 128 0 0 255 -37 R_middle_orbitofrontal_gyrus 128 128 64 255 -38 R_lateral_orbitofrontal_gyrus 255 128 127 255 -39 R_gyrus_rectus 255 255 0 255 -40 R_postcentral_gyrus 128 128 64 255 -41 R_superior_parietal_gyrus 255 255 0 255 -42 R_supramarginal_gyrus 128 128 0 255 -43 R_angular_gyrus 64 0 128 255 -44 R_precuneus 255 0 0 255 -45 R_superior_occipital_gyrus 255 127 0 255 -46 R_middle_occipital_gyrus 127 0 0 255 -47 R_inferior_occipital_gyrus 83 166 166 255 -48 R_cuneus 128 0 0 255 -49 R_superior_temporal_gyrus 128 255 0 255 -50 R_middle_temporal_gyrus 192 128 255 255 -51 R_inferior_temporal_gyrus 255 0 128 255 -52 R_parahippocampal_gyrus 64 255 0 255 -53 R_lingual_gyrus 255 255 0 255 -54 R_fusiform_gyrus 255 0 0 255 -55 R_insular_cortex 128 255 255 255 -56 R_cingulate_gyrus 255 0 255 255 diff --git a/share/mrtrix3/labelsgmfix/FreeSurferSGM.txt b/share/mrtrix3/labelsgmfix/FreeSurferSGM.txt deleted file mode 100644 index fe73fa32b3..0000000000 --- a/share/mrtrix3/labelsgmfix/FreeSurferSGM.txt +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2008-2024 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# This file is used to extract sub-cortical structures from FreeSurfer segmentations - -# Previously, the labelsgmfix command was compatible with both FreeSurfer LUT files and connectome config files, as the first two columns were identical. Now, this command ideally needs to be compatible with LUT files in any format. - -# This is solved in the following way: -# * Create a new image for sub-cortical structures using the output of FIRST and these indices. -# * Convert that image to the indices of the original lookup table using labelconvert. -# * Use mrstats to 'discover' what index each structure has been assigned to. -# * For each index in the original image, strip out any voxels corresponding to any of these values. -# * Also zero any voxels that coincide with any of the new SGM estimates. -# * Add the re-indexed SGM image to the stripped input image. - - -1 Left-Thalamus-Proper -2 Left-Caudate -3 Left-Putamen -4 Left-Pallidum -5 Left-Accumbens-area - -6 Right-Thalamus-Proper -7 Right-Caudate -8 Right-Putamen -9 Right-Pallidum -10 Right-Accumbens-area - -11 Left-Hippocampus -12 Left-Amygdala -13 Right-Hippocampus -14 Right-Amygdala - diff --git a/src/doc/build.md b/src/doc/build.md deleted file mode 100644 index 23c10cb41b..0000000000 --- a/src/doc/build.md +++ /dev/null @@ -1,192 +0,0 @@ -The build process {#build_page} -================= - -The procedure used to compile the source code is substantially different from -that used in most other open-source software. The most common way to compile a -software project relies on the `make` utility, and the presence of one or -several `Makefiles` describing which files are to compiled and linked, and in -what order. The process of generating the `Makefiles` is often facilitated by -other utilities such as `autoconf` & `automake`. One disadvantage of this -approach is that these `Makefiles` must be updated every time changes are made -to the source code that affect the dependencies and the order of compilation. - -In MRtrix, building the software relies on a two-stage process implemented in -Python. First, the `configure` script should be executed to set the relevant -architecture-specific variables (see @ref configure_page for details). Next, -the `build` script is executed, and is responsible for resolving all -inter-dependencies, then compiling and linking all the relevant files in the -correct order. This means that any new files added to the source tree will be -compiled if needed (according to the rules set out below), without any further -action required. In addition, this script is multi-threaded and will use all -available CPU cores simultaneously, significantly reducing the time needed to -build the software on modern multi-core systems. - -@note on systems with a large number of cores but comparatively small amount -of RAM, the multi-threaded build can run out of memory. In these cases, it may -be necessary to reduce the number of threads used by the build script by setting -the `NUMBER_OF_PROCESSORS` environment variable before invoking `./build`. - -Using the MRtrix build process {#build_process_usage} -============================== - -The build scripts used to build MRtrix applications are designed to be -easy to use, with no input required from the user. This does mean that -developers must follow a few fixed rules when writing software for use -with MRtrix. - -- To create a new executable, place the correspondingly named source file - in the `cmd/` folder. For example, if a new application called `myapp` - is to be written, write the corresponding code in the `cmd/myapp.cpp` - source file, and the build script will attempt to generate the executable - `release/bin/myapp` from it. You may want to consult the section @ref - command_howto for information on the contents of a command. - -- The `lib/` folder should contain only code destined to be included into - the MRtrix shared library. This library is intended to provide more - generic image access and manipulation routines. Developers should avoid - placing more application-specific routines in this folder. - -- Code designed for specific applications should be placed in the `src/` - folder. The corresponding code will then be linked directly into the - executables that make use of these routines, rather than being included - into the more general purpose MRtrix shared library. - -- Non-inlined function and variable definitions should be placed in - appropriately named source files (`*.cpp`), and the corresponding - declarations should be placed in a header file with the same name and the - appropriate suffix (`*.h`). This is essential if the build script is to resolve - the correct dependencies and link the correct object files together. - -- MRtrix headers or any header added by the user must be included within - quotes, and any system headers within angled brackets. This is critical for - the build system to work out the correct dependencies (see @ref include_path - for details). - - - -The configure script {#configure_section} -==================== - -The first step required for building the software is to run the `configure` -script, which tailors various parameters to the specific system that it is run -on. This includes checking that a compiler is available and behaves as -expected, that other required packages are available (such as Eigen), whether -the system is a 64-bit machine, etc. It is also possible to create distinct -co-existing configurations, for example to compile either release and debug -code. For details, see @ref configure_page. - - -The build script {#build_section} -================ - -This script is responsible for identifying the targets to be built, resolving -all their dependencies, compiling all the necessary object files (if they are -out of date), and linking them together in the correct order. This is done by -first identifying the desired targets, then building a list of their -dependencies, and treating these dependencies themselves as targets to be built -first. A target can only be built once all its dependencies are satisfied (i.e. -all its required dependencies have been built). At this point, the target is -built only if one or more of its dependencies is more recent than it is itself -(or if it doesn't yet exist). This is done by looking at the timestamps of the -relevant files. In this way, the relevant files are regenerated only when and -if required. - -The following rules are used for each of these steps: - -#### Identifying targets to be built - -Specific targets can be specified on the command-line, in which case only -their minimum required dependencies will be compiled and/or linked. This -is useful to check that changes made to a particular file compile without -error, without necessarily re-compiling all other associated files. For -example: - - $ ./build release/bin/mrconvert - $ ./build lib/mrtrix.o lib/app.o - - -If no specific targets are given, the default target list will be generated, -consisting of all applications found in the `cmd/` folder. For example, if the -file `cmd/my_application.cpp` exists, then the corresponding target -`release/bin/my_application` will be included in the default target list. - -#### Special target: _clean_ - -The special target `clean` can be passed to the `build` script to remove all -system-generated files, including all object files (`*.o`), all executables -(i.e. all files in the `release/bin/` folder), and the MRtrix shared library. - -#### Resolving dependencies for executables - -A target is assumed to correspond to an executable if it resides in the -`release/bin/` folder (the default target list consists of all executables). -The dependencies for an example executable \c release/bin/myapp are resolved in -the following way: - -1. the MRtrix library `lib/mrtrix-X_Y_Z.so` is added to the list - -2. the object file `cmd/myapp.o` is added to the list - -3. a list of all local headers included in the source file `cmd/myapp.cpp` is - generated. A header is considered local if it is included using inverted - commas rather than angled brackets. For example: - ~~~{.cpp} - // By default, the lib/ & src/ folders are included in the include search path - #include "mrtrix.h" // the file lib/mrtrix.h exists, and is considered a local header - #include // this file is not considered local - ~~~ - -4. if a corresponding source file is found for any of these headers, its - corresponding object file is added to the list. For example, if - `cmd/myapp.cpp` includes the header `src/histogram.h`, and the file - `src/histogram.cpp` exists, the object file `src/histogram.o` is added to the - list of dependencies. Note that object files in the `lib/` folder are not - added to the list of dependencies, since they should already be included - in the MRtrix library (see below). - -5. all headers included in any of the local headers or their corresponding - source files are also considered in the same way, recursively until no new - dependencies are found. For example, the file `src/histogram.cpp` might also - include the header `src/min_max.h`. Since the source file `src/min_max.cpp` - exists, the corresponding object file `src/min_max.o` is added to the list. - -#### Resolving dependencies for object files - -A target is considered to be an object file if its suffix corresponds to the -expected suffix for an object file (usually `*.o`). The dependencies for an -example object file `lib/mycode.o` are resolved as follows: - -1. the corresponding source file \c lib/mycode.cpp is added to the list - -2. a list of all local headers included in the source file `lib/mycode.cpp` is - generated. - -3. the list of local headers is expanded by recursively adding all local - headers found within the already included local headers, until no new - local headers are found. - -4. these headers are all added to the list of dependencies. - -#### Resolving dependencies for the MRtrix library - -The list of dependencies for the MRtrix library is generated by adding the -corresponding object file for each source file found in the `lib/` folder. For -example, if the file `lib/image/header.cpp` is found in the `lib/` folder, -the object file `lib/image/header.o` is added to the list of dependencies. - -#### Build rules for each target type - -- **executables:** dependencies consist of all relevant object files along with - the MRtrix library. These are all linked together to form the - executable. - -- **object files:** dependencies consist of a single source code file, along - with all the included headers. The source code file is compiled to form the - corresponding object file. - -- **MRtrix library**: dependencies consist of all the object files found in the - `lib/` folder. These are all linked together to form the shared library. - -- **source & header files:** these have no dependencies, and require no action. - - diff --git a/testing/binaries/data b/testing/binaries/data deleted file mode 160000 index f65db971d0..0000000000 --- a/testing/binaries/data +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f65db971d019675874751c2111877531e8bb5544 diff --git a/testing/scripts/data b/testing/scripts/data deleted file mode 160000 index b89990f5aa..0000000000 --- a/testing/scripts/data +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b89990f5aae924e5cf9d61938ebe209ad2152a90