diff --git a/bin/harden_transform_with_slicer.py b/bin/harden_transform_with_slicer.py index c29e33ff..fec1cd25 100644 --- a/bin/harden_transform_with_slicer.py +++ b/bin/harden_transform_with_slicer.py @@ -19,16 +19,16 @@ def harden_transform(polydata, transform, inverse, outdir): check_load, polydata_node = slicer.util.loadFiberBundle(str(polydata), 1) if not check_load: - print('Could not load polydata file:', polydata) + print(f'Could not load polydata file: {polydata}') return check_load, transform_node = slicer.util.loadTransform(str(transform), 1) if not check_load: - print('Could not load transform file:', transform) + print(f'Could not load transform file: {transform}') return if polydata_node.GetPolyData().GetNumberOfCells() == 0: - print('Empty cluster:', polydata) + print(f'Empty cluster: {polydata}') shutil.copyfile(polydata, output_name) return diff --git a/bin/wm_append_clusters.py b/bin/wm_append_clusters.py index 28b0e04c..d668cd48 100755 --- a/bin/wm_append_clusters.py +++ b/bin/wm_append_clusters.py @@ -46,12 +46,12 @@ def main(): inputdir = os.path.abspath(args.inputDirectory) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = os.path.abspath(args.outputDirectory) if not os.path.exists(args.outputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Output directory", args.outputDirectory, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Error: Output directory {args.outputDirectory} does not exist, creating it.") os.makedirs(outdir) if (args.clusterList is not None) and (args.tractMRML is not None): @@ -71,7 +71,7 @@ def main(): for c_idx in args.clusterList: cluster_vtp_filename = 'cluster_' + str(c_idx).zfill(5) + '.vtp' if not os.path.exists(os.path.join(inputdir, cluster_vtp_filename)): - print(f"<{os.path.basename(__file__)}> Error:", cluster_vtp_filename, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: {cluster_vtp_filename} does not exist.") exit() cluster_vtp_list.append(cluster_vtp_filename) @@ -83,7 +83,7 @@ def main(): if idx > 0: cluster_vtp_filename = line[idx-13:idx+4] if not os.path.exists(os.path.join(inputdir, cluster_vtp_filename)): - print(f"<{os.path.basename(__file__)}> Error:", cluster_vtp_filename, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: {cluster_vtp_filename} does not exist.") exit() cluster_vtp_list.append(cluster_vtp_filename) else: @@ -92,9 +92,9 @@ def main(): print("") print(f"<{os.path.basename(__file__)}> Start to append clusters.") print("") - print("=====input directory======\n", inputdir) - print("=====output directory=====\n", outdir) - print("=====clusters to be appended====\n", cluster_vtp_list) + print(f"=====input directory======\n {inputdir}") + print(f"=====output directory=====\n {outdir}") + print(f"=====clusters to be appended====\n {cluster_vtp_list}") print("") appender = vtk.vtkAppendPolyData() @@ -122,9 +122,9 @@ def main(): wma.io.write_polydata(pd_appended_cluster, outputfile) print('') - print(f'<{os.path.basename(__file__)}> Appended fiber tract: number of fibers', pd_appended_cluster.GetNumberOfLines()) + print(f'<{os.path.basename(__file__)}> Appended fiber tract: number of fibers {pd_appended_cluster.GetNumberOfLines()}') print('') - print(f'<{os.path.basename(__file__)}> Save result to', outputfile, '\n') + print(f'<{os.path.basename(__file__)}> Save result to {outputfile}\n') if __name__ == '__main__': main() diff --git a/bin/wm_append_clusters_to_anatomical_tracts.py b/bin/wm_append_clusters_to_anatomical_tracts.py index df1d66e8..71d8b609 100755 --- a/bin/wm_append_clusters_to_anatomical_tracts.py +++ b/bin/wm_append_clusters_to_anatomical_tracts.py @@ -67,7 +67,7 @@ def main(): inputdir = os.path.abspath(args.inputDirectory) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist.") exit() inputdir_left = os.path.join(inputdir, 'tracts_left_hemisphere') @@ -75,18 +75,18 @@ def main(): inputdir_comm = os.path.join(inputdir, 'tracts_commissural') if not os.path.isdir(inputdir_left): - print(f"<{os.path.basename(__file__)}> Error: Input directory", inputdir_left, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {inputdir_left} does not exist.") exit() if not os.path.isdir(inputdir_right): - print(f"<{os.path.basename(__file__)}> Error: Input directory", inputdir_right, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {inputdir_right}does not exist.") exit() if not os.path.isdir(inputdir_comm): - print(f"<{os.path.basename(__file__)}> Error: Input directory", inputdir_comm, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {inputdir_comm} does not exist.") exit() atlasdir = os.path.abspath(args.atlasDirectory) if not os.path.isdir(args.atlasDirectory): - print(f"<{os.path.basename(__file__)}> Error: Atlas directory", args.atlasDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Atlas directory {args.atlasDirectory} does not exist.") exit() def list_mrml_files(input_dir): @@ -100,11 +100,11 @@ def list_mrml_files(input_dir): if len(mrml_files) == 0: print(f"<{os.path.basename(__file__)}> Error: There is no mrml files in the input atlas folder.") else: - print(f"<{os.path.basename(__file__)}>", len(mrml_files)-1, "mrml files are detected.") + print(f"<{os.path.basename(__file__)}> {len(mrml_files)-1} mrml files are detected.") outdir = os.path.abspath(args.outputDirectory) if not os.path.exists(args.outputDirectory): - print(f"<{os.path.basename(__file__)}> Output directory", args.outputDirectory, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {args.outputDirectory} does not exist, creating it.") os.makedirs(outdir) def output_appended_tract(cluster_vtp_list, outputfile): @@ -142,12 +142,12 @@ def output_appended_tract(cluster_vtp_list, outputfile): tract_idx = 1 for tract in hemispheric_tracts: - print(" *", tract_idx, "-", tract) + print(f" * {tract_idx} - {tract}") tract_idx = tract_idx + 1 mrml = os.path.join(atlasdir, tract+".mrml") if not os.path.exists(mrml): - print(f"<{os.path.basename(__file__)}> Error: Cannot locate", mrml) + print(f"<{os.path.basename(__file__)}> Error: Cannot locate {mrml}") exit() cluster_vtp_list_left = list() @@ -160,13 +160,13 @@ def output_appended_tract(cluster_vtp_list, outputfile): cluster_vtp_filename_left = os.path.join(inputdir_left, cluster_vtp_filename); if not os.path.exists(cluster_vtp_filename_left): - print(f"<{os.path.basename(__file__)}> Error:", cluster_vtp_filename_left, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: {cluster_vtp_filename_left} does not exist.") exit() cluster_vtp_list_left.append(cluster_vtp_filename_left) cluster_vtp_filename_right = os.path.join(inputdir_right, cluster_vtp_filename); if not os.path.exists(cluster_vtp_filename_right): - print(f"<{os.path.basename(__file__)}> Error:", cluster_vtp_filename_right, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: {cluster_vtp_filename_right} does not exist.") exit() cluster_vtp_list_right.append(cluster_vtp_filename_right) @@ -179,13 +179,13 @@ def output_appended_tract(cluster_vtp_list, outputfile): print(f"<{os.path.basename(__file__)}> commissural tracts: ") for tract in commissural_tracts: - print(" *", tract_idx, "-", tract) + print(f" * {tract_idx} - {tract}") tract_idx = tract_idx + 1 mrml = os.path.join(atlasdir, tract+".mrml") if not os.path.exists(mrml): - print(f"<{os.path.basename(__file__)}> Error: Cannot locate", mrml) + print(f"<{os.path.basename(__file__)}> Error: Cannot locate {mrml}") exit() cluster_vtp_list_comm = list() @@ -197,7 +197,7 @@ def output_appended_tract(cluster_vtp_list, outputfile): cluster_vtp_filename_comm = os.path.join(inputdir_comm, cluster_vtp_filename); if not os.path.exists(cluster_vtp_filename_comm): - print(f"<{os.path.basename(__file__)}> Error:", cluster_vtp_filename_comm, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error {cluster_vtp_filename_comm} does not exist.") exit() cluster_vtp_list_comm.append(cluster_vtp_filename_comm) @@ -216,8 +216,8 @@ def list_cluster_files(input_dir): list_tracts= list_cluster_files(outdir) print('') - print(f'<{os.path.basename(__file__)}> Appended tracts can be found at', outdir, '\n') - print(f'<{os.path.basename(__file__)}> A total of', len(list_tracts), 'tracts\n') + print(f'<{os.path.basename(__file__)}> Appended tracts can be found at {outdir}\n') + print(f'<{os.path.basename(__file__)}> A total of {len(list_tracts)} tracts\n') if __name__ == "__main__": diff --git a/bin/wm_append_diffusion_measures_across_subjects.py b/bin/wm_append_diffusion_measures_across_subjects.py index 6ddec935..9854b596 100644 --- a/bin/wm_append_diffusion_measures_across_subjects.py +++ b/bin/wm_append_diffusion_measures_across_subjects.py @@ -40,7 +40,7 @@ def main(): subject_IDs = [os.path.basename(folder) for folder in subject_folders] print() - print('Subject IDs: (n=%d): ' % len(subject_IDs)) + print(f'Subject IDs: (n={len(subject_IDs)}): ') print(subject_IDs) # anatomical tracts @@ -49,22 +49,22 @@ def main(): fields = [col.strip() for col in stats.columns] fields = fields[1:] print() - print('Tract diffusion measures per subject: (n=%d): ' % len(fields)) + print(f'Tract diffusion measures per subject: (n={len(fields)}): ') print(fields) tracts = stats.to_numpy()[:, 0] tracts = [os.path.basename(filepath).replace('.vtp', '').replace('.vtk', '').strip() for filepath in tracts] print() - print('White matter tracts per subject: (n=%d): ' % len(tracts)) + print(f'White matter tracts per subject: (n={len(tracts)}): ') print(tracts) appended_fields = ['subjectkey'] for tract in tracts: for field in fields: - appended_fields.append("%s.%s" % (tract, field)) + appended_fields.append(f"{tract}.{field}") print() - print('Appended tract diffusion measure fields per subject: (n=%d): ' % len(appended_fields)) + print(f'Appended tract diffusion measure fields per subject: (n={len(appended_fields)}): ') print(appended_fields[:10], ' ... ') data = [] @@ -101,13 +101,13 @@ def main(): fields = [col.strip() for col in stats.columns] fields = fields[1:] print() - print('Fiber cluster diffusion measures per subject: (n=%d): ' % len(fields)) + print(f'Fiber cluster diffusion measures per subject: (n={len(fields)}): ') print(fields) clusters = stats.to_numpy()[:, 0] clusters = [os.path.basename(filepath).replace('.vtp', '').replace('.vtk', '').strip() for filepath in clusters] print() - print('Fiber clusters per subject: (n=%d): ' % len(clusters)) + print(f'Fiber clusters per subject: (n={len(clusters)}): ') if len(clusters) == 800 and len(tracts) == 73: comm_clusters = [3, 8, 33, 40, 46, 52, 56, 57, 62, 68, 69, 73, 86, 91, 109, 110, 114, 142, 144, 145, 146, 159, 163, 250, 251, 252, 257, 262, 263, 268, 271, 305, 311, 314, 322, 330, 334, 338, 342, 350, 363, 371, 375, 403, 410, 437, 440, 448, 456, 465, 468, 475, 484, 485, 488, 519, 522, 525, 543, 545, 549, 557, 576, 577, 582, 587, 591, 597, 601, 614, 620, 623, 627, 633, 645, 653, 658, 663, 670, 677, 683, 702, 770, 781] @@ -132,10 +132,10 @@ def main(): for cluster in clusters_loc: for field in fields: - appended_fields.append("%s.%s.%s" % (loc, cluster, field)) + appended_fields.append(f"{loc}.{cluster}.{field}") print() - print('Appended cluster diffusion measure fields per subject: (n=%d): ' % len(appended_fields)) + print(f'Appended cluster diffusion measure fields per subject: (n={len(appended_fields)}): ') data = [] for s_idx, subject_folder in enumerate(subject_folders): diff --git a/bin/wm_assess_cluster_location_by_hemisphere.py b/bin/wm_assess_cluster_location_by_hemisphere.py index b214efe3..03381175 100755 --- a/bin/wm_assess_cluster_location_by_hemisphere.py +++ b/bin/wm_assess_cluster_location_by_hemisphere.py @@ -140,7 +140,7 @@ def _read_location(_inpd): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist or is not a directory.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist or is not a directory.") exit() clusterLocationFile = args.clusterLocationFile @@ -175,7 +175,7 @@ def _read_location(_inpd): if outdir is not None: print(f"<{os.path.basename(__file__)}> Separated clusters will be ouput.") if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) outdir_right = os.path.join(outdir, 'tracts_right_hemisphere') diff --git a/bin/wm_average_tract_measures.py b/bin/wm_average_tract_measures.py index ec47d893..9ebdecd6 100644 --- a/bin/wm_average_tract_measures.py +++ b/bin/wm_average_tract_measures.py @@ -51,13 +51,13 @@ def main(): print('All available tracts:') all_tracts = [f_name.replace('.Num_Points', '') for f_name in fields if f_name.endswith('.Num_Points')] - print('N = ', str(len(all_tracts)), ':') + print(f'N = {len(all_tracts)} :') print(all_tracts) append_list = [] print('Tracts and related measures to be appended.') for t_idx, tract in enumerate(args.tractList): - print('*', t_idx, '-', tract) + print(f'* {t_idx} - {tract}') indices = [index for index in range(len(fields)) if fields[index].startswith(tract+'.Num_') or re.search(tract+".*.Mean", fields[index])] if len(indices) == 0: print("Error: tract not founded in the input file.") @@ -68,7 +68,7 @@ def main(): append_list = numpy.array(append_list) append_measures = [field.replace(args.tractList[-1], '') for field in fields[indices]] - print("Output measures:", append_measures) + print(f"Output measures: {append_measures}") avg_stats = [stats.to_numpy()[:, 0]] for m_idx, m_name in enumerate(append_measures): @@ -125,7 +125,7 @@ def main(): df.to_csv(args.outoutmeasure, index=False) print() - print('Output file at:', os.path.abspath(args.outoutmeasure)) + print(f'Output file at: {os.path.abspath(args.outoutmeasure)}') exit() diff --git a/bin/wm_change_nrrd_dir.py b/bin/wm_change_nrrd_dir.py index e56b59cc..9c354e2e 100644 --- a/bin/wm_change_nrrd_dir.py +++ b/bin/wm_change_nrrd_dir.py @@ -77,7 +77,7 @@ def main(): with open(outputnrrd, 'w') as o: o.write(outstr) - print('Done! New nrrd header: ' + outputnrrd) + print(f'Done! New nrrd header {outputnrrd}') if __name__ == "__main__": diff --git a/bin/wm_cluster_atlas.py b/bin/wm_cluster_atlas.py index 68f7beae..b5dbe3c7 100755 --- a/bin/wm_cluster_atlas.py +++ b/bin/wm_cluster_atlas.py @@ -102,28 +102,28 @@ def main(): args = parser.parse_args() if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist or is not a directory.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist or is not a directory.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) print("\n==========================") print(f"<{os.path.basename(__file__)}> Clustering parameters") - print("input directory:\n", args.inputDirectory) - print("output directory:\n", args.outputDirectory) + print(f"input directory:\n {args.inputDirectory}") + print(f"output directory:\n {args.outputDirectory}") if args.numberOfFibers is not None: - print("fibers to analyze per subject: ", args.numberOfFibers) + print(f"fibers to analyze per subject: {args.numberOfFibers}") number_of_fibers_per_subject = args.numberOfFibers else: number_of_fibers_per_subject = 2000 - print("fibers to analyze per subject: Setting to default", number_of_fibers_per_subject) + print(f"fibers to analyze per subject: Setting to default {number_of_fibers_per_subject}") fiber_length = args.fiberLength - print("minimum length of fibers to analyze (in mm): ", fiber_length) + print(f"minimum length of fibers to analyze (in mm): {fiber_length}") if args.numberOfJobs is not None: number_of_jobs = args.numberOfJobs @@ -132,7 +132,7 @@ def main(): # multiprocessing is not used efficiently in our code and should # be avoided for large clustering problems, for now. number_of_jobs = 1 - print('Using N jobs:', number_of_jobs) + print(f'Using N jobs: {number_of_jobs}') if args.flag_verbose: print("Verbose ON.") @@ -146,46 +146,46 @@ def main(): print("Rendering. After clustering, will create colorful jpg images of the group.") render = not args.flag_norender - print("RENDER:", render) + print(f"RENDER: {render}") if args.numberOfClusters is not None: number_of_clusters = args.numberOfClusters else: number_of_clusters = 250 - print("Number of clusters to find: ", number_of_clusters) + print(f"Number of clusters to find: {number_of_clusters}") cluster_outlier_std_threshold = args.clusterOutlierStandardDeviation - print("Standard deviation for fiber outlier removal after clustering (accurate local probability): ", cluster_outlier_std_threshold) + print(f"Standard deviation for fiber outlier removal after clustering (accurate local probability): {cluster_outlier_std_threshold}") outlier_std_threshold = args.outlierStandardDeviation - print("Standard deviation for fiber outlier removal before clustering (approximated total probability): ", outlier_std_threshold) + print(f"Standard deviation for fiber outlier removal before clustering (approximated total probability): {outlier_std_threshold}") subject_percent_threshold = args.subjectPercentToKeepCluster - print("Percent of subjects needed in a cluster to retain that cluster on the next iteration:", subject_percent_threshold) + print(f"Percent of subjects needed in a cluster to retain that cluster on the next iteration:{subject_percent_threshold}") # 20mm works well for cluster-specific outlier removal in conjunction with the mean distance. cluster_local_sigma = args.outlierSigma - print("Local sigma for cluster outlier removal:", cluster_local_sigma) + print(f"Local sigma for cluster outlier removal:{cluster_local_sigma}") cluster_iterations = args.iterations - print("Iterations of clustering and outlier removal:", cluster_iterations) + print(f"Iterations of clustering and outlier removal:{cluster_iterations}") if args.sizeOfNystromSample is not None: number_of_sampled_fibers = args.sizeOfNystromSample else: number_of_sampled_fibers = 2000 - print("Size of Nystrom sample: ", number_of_sampled_fibers) + print(f"Size of Nystrom sample: {number_of_sampled_fibers}") if args.sigma is not None: sigma = args.sigma else: sigma = 60 - print("Sigma in mm: ", sigma) + print(f"Sigma in mm: {sigma}") if args.showNFibersInSlicer is not None: number_of_fibers_to_display = args.showNFibersInSlicer else: number_of_fibers_to_display = 10000.0 - print("Maximum total number of fibers to display in MRML/Slicer: ", number_of_fibers_to_display) + print(f"Maximum total number of fibers to display in MRML/Slicer: {number_of_fibers_to_display}") #if args.flag_remove_outliers: # if args.subjectPercent is not None: @@ -208,7 +208,7 @@ def main(): else: # for across-subjects matching threshold = 2.0 - print("Threshold (in mm) for fiber distances: ", threshold) + print(f"Threshold (in mm) for fiber distances: {threshold}") if args.distanceMethod is not None: @@ -216,7 +216,7 @@ def main(): else: # for across-subjects matching distance_method = 'Mean' - print("Fiber distance or comparison method: ", distance_method) + print(f"Fiber distance or comparison method: {distance_method}") if args.flag_nystrom_off: use_nystrom = False @@ -232,7 +232,7 @@ def main(): testing = False if args.randomSeed is not None: - print("Setting random seed to: ", args.randomSeed) + print(f"Setting random seed to: {args.randomSeed}") random_seed = args.randomSeed if args.flag_pos_def_off: @@ -265,14 +265,14 @@ def main(): input_polydatas = wma.io.list_vtk_files(args.inputDirectory) number_of_subjects = len(input_polydatas) - print(f"<{os.path.basename(__file__)}> Found ", number_of_subjects, "subjects in input directory:", args.inputDirectory) + print(f"<{os.path.basename(__file__)}> Found {number_of_subjects} subjects in input directory: {args.inputDirectory}") if number_of_subjects < 1: print("\n Error: No .vtk or .vtp files were found in the input directory.\n") exit() total_number_of_fibers = number_of_fibers_per_subject * number_of_subjects - print("Input number of subjects (number of vtk/vtp files): ", number_of_subjects) + print(f"Input number of subjects (number of vtk/vtp files): {number_of_subjects}") print("==========================\n") print(f"<{os.path.basename(__file__)}> Starting file I/O and computation.") @@ -292,14 +292,14 @@ def main(): outstr += str(number_of_subjects) outstr += '\n' outstr += '\n' - outstr += "Current date: " + time.strftime("%x") + outstr += f"Current date: {time.strftime("%x")}" outstr += '\n' - outstr += "Current time: " + time.strftime("%X") + outstr += f"Current time: {time.strftime("%X")}" outstr += '\n' outstr += '\n' - outstr += "Path to Script: " + os.path.realpath(__file__) + outstr += f"Path to Script: {os.path.realpath(__file__)}" outstr += '\n' - outstr += "Working Directory: " + os.getcwd() + outstr += f"Working Directory: {os.getcwd()}" outstr += '\n' outstr += '\n' outstr += "Description of Outputs\n" @@ -332,17 +332,17 @@ def main(): input_pds = list() for fname in input_polydatas: # read data - print(f"<{os.path.basename(__file__)}> Reading input file:", fname) + print(f"<{os.path.basename(__file__)}> Reading input file: {fname}") pd = wma.io.read_polydata(fname) # preprocessing step: minimum length #print f"<{os.path.basename(__file__)}> Preprocessing by length:", fiber_length, "mm." pd2 = wma.filter.preprocess(pd, fiber_length,verbose=verbose) # preprocessing step: fibers to analyze if number_of_fibers_per_subject is not None: - print(f"<{os.path.basename(__file__)}> Downsampling to", number_of_fibers_per_subject, "fibers from", pd2.GetNumberOfLines(),"fibers over length", fiber_length, ".") + print(f"<{os.path.basename(__file__)}> Downsampling to {number_of_fibers_per_subject} fibers from {pd2.GetNumberOfLines()} fibers over length {fiber_length}.") pd3 = wma.filter.downsample(pd2, number_of_fibers_per_subject, verbose=verbose, random_seed=random_seed) if pd3.GetNumberOfLines() != number_of_fibers_per_subject: - print(f"<{os.path.basename(__file__)}> Fibers found:", pd3.GetNumberOfLines(), "Fibers requested:", number_of_fibers_per_subject) + print(f"<{os.path.basename(__file__)}> Fibers found: {pd3.GetNumberOfLines()} Fibers requested: {number_of_fibers_per_subject}") print("\n ERROR: too few fibers over length threshold in subject:", fname) exit() else: @@ -378,15 +378,15 @@ def main(): # Check there are enough fibers for requested analysis if number_of_sampled_fibers >= input_data.GetNumberOfLines(): print(f"<{os.path.basename(__file__)}> Error: Nystrom sample size is larger than number of fibers available.") - print("number_of_subjects:", number_of_subjects) - print("number_of_fibers_per_subject:", number_of_fibers_per_subject) - print("total_number_of_fibers:", total_number_of_fibers) - print("requested subsample size from above total:", number_of_sampled_fibers) + print(f"number_of_subjects: {number_of_subjects}") + print(f"number_of_fibers_per_subject: {number_of_fibers_per_subject}") + print(f"total_number_of_fibers: {total_number_of_fibers}") + print(f"requested subsample size from above total: {number_of_sampled_fibers}") exit() # Set up random seed if random_seed is not None: - print(f"<{os.path.basename(__file__)}> Setting random seed to", random_seed) + print(f"<{os.path.basename(__file__)}> Setting random seed to: {random_seed}") numpy.random.seed(seed=random_seed) # Overall progress/outlier removal info file @@ -399,13 +399,13 @@ def main(): # Run clustering several times, removing outliers each time. for iteration in range(cluster_iterations): # make a directory for the current iteration - dirname = "iteration_%05d" % (iteration) + dirname = f"iteration_{iteration:05d}" outdir_current = os.path.join(outdir, dirname) # Calculate indices of random sample for Nystrom method nystrom_mask = numpy.random.permutation(input_data.GetNumberOfLines()) < number_of_sampled_fibers - print("BEFORE cluster: Polydata size:", input_data.GetNumberOfLines(), "Subject list for fibers:", subject_fiber_list.shape) + print(f"BEFORE cluster: Polydata size: {input_data.GetNumberOfLines()}, Subject list for fibers: {subject_fiber_list.shape}") # Run clustering on the polydata print(f'<{os.path.basename(__file__)}> Starting clustering.') @@ -422,7 +422,7 @@ def main(): # If any fibers were rejected, delete the corresponding entry in this list subject_fiber_list = numpy.delete(subject_fiber_list, reject_idx) - print("After cluster: Polydata size:", output_polydata_s.GetNumberOfLines(), "Subject list for fibers:", subject_fiber_list.shape) + print(f"After cluster: Polydata size: {output_polydata_s.GetNumberOfLines()} Subject list for fibers: {subject_fiber_list.shape}") # Save the output in our atlas format for automatic labeling of full brain datasets. # This is the data used to label a new subject. @@ -433,9 +433,9 @@ def main(): outdir1 = os.path.join(outdir_current, 'initial_clusters') if not os.path.exists(outdir1): - print(f"<{os.path.basename(__file__)}> Output directory", outdir1, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir1} does not exist, creating it.") os.makedirs(outdir1) - print(f'<{os.path.basename(__file__)}> Saving output files in directory:', outdir1) + print(f'<{os.path.basename(__file__)}> Saving output files in directory: {outdir1}') wma.cluster.output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_fiber_list, input_polydatas, number_of_subjects, outdir1, cluster_numbers_s, color, embed, number_of_fibers_to_display, testing=testing, verbose=False, render_images=render) # Remove outliers from this iteration and save atlas again @@ -522,11 +522,11 @@ def main(): mask2 = subject_ID_per_fiber == sidx if verbose: tmp = numpy.sum(cluster_similarity[fidx, mask2]) / numpy.sum(mask2) - print("LOO:", total_similarity[-1], "all:", total_similarity_OLD[fidx], "subj:", tmp, "f:", number_fibers_in_cluster, "LOO f:", number_fibers_in_LOO_cluster, "subj f:", numpy.sum(mask2)) + print(f"LOO: {total_similarity[-1]} all: {total_similarity_OLD[fidx]} subj: {tmp} f: {number_fibers_in_cluster} LOO f: {number_fibers_in_LOO_cluster}, subj f: {numpy.sum(mask2)}") total_similarity = numpy.array(total_similarity) if verbose: - print("cluster", c, "tsim:", numpy.min(total_similarity), numpy.mean(total_similarity), numpy.max(total_similarity), "num fibers:", numpy.sum(mask), "num subjects:", subjects_per_cluster) + print(f"cluster {c} tsim: {numpy.min(total_similarity)} {numpy.mean(total_similarity)} {numpy.max(total_similarity)} num fibers: {numpy.sum(mask)} num subjects: {subjects_per_cluster}") # remove outliers with low similarity to their cluster mean_sim = numpy.mean(total_similarity) @@ -553,7 +553,7 @@ def main(): mask = cluster_numbers_s == -c -1 cluster_removed_local.append(numpy.sum(mask)) - print("CLUSTER:", c, "/", numpy.max(cluster_indices), "| fibers:", number_fibers_in_cluster, "| subjects:", subjects_per_cluster, "| dist:", numpy.min(dist_mm), numpy.mean(dist_mm), numpy.max(dist_mm), "| sim :", numpy.min(cluster_similarity), numpy.mean(cluster_similarity), numpy.max(cluster_similarity), "| tsim:", numpy.min(total_similarity), numpy.mean(total_similarity), numpy.max(total_similarity), "| local reject total:", cluster_removed_local[-1]) + print(f"CLUSTER: {c} / {numpy.max(cluster_indices)} | fibers: {number_fibers_in_cluster} | subjects: {subjects_per_cluster} | dist: {numpy.min(dist_mm)} {numpy.mean(dist_mm)} {numpy.max(dist_mm)} | sim : {numpy.min(cluster_similarity)} {numpy.mean(cluster_similarity)} {numpy.max(cluster_similarity)} | tsim: {numpy.min(total_similarity)} {numpy.mean(total_similarity)} {numpy.max(total_similarity)} | local reject total: {cluster_removed_local[-1]}") plt.figure(0) # plot the mean per-fiber distance because plotting all distances allocated 20GB @@ -566,7 +566,7 @@ def main(): if len(plot_data) > 0: n, bins, patches = plt.hist(plot_data, histtype='barstacked', range=[0.0,1.0], bins=30, alpha=0.75) plt.setp(patches,'lw', 0.1) - print("Rejecting cluster outlier fibers:", len(reject_idx)) + print(f"Rejecting cluster outlier fibers: {len(reject_idx)}") # in a second pass, also remove outliers whose average fiber # similarity to their cluster is too low compared to the whole brain. @@ -583,7 +583,7 @@ def main(): reject_idx = numpy.array(reject_idx) - print("Rejecting whole-brain cluster outlier fibers:", len(reject_idx)) + print(f"Rejecting whole-brain cluster outlier fibers: {len(reject_idx)}") for cidx in cluster_indices: mask = cluster_numbers_s == -cidx -1 @@ -595,9 +595,9 @@ def main(): outdir2 = os.path.join(outdir_current, 'remove_outliers') if not os.path.exists(outdir2): - print(f"<{os.path.basename(__file__)}> Output directory", outdir2, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir2} does not exist, creating it.") os.makedirs(outdir2) - print(f'<{os.path.basename(__file__)}> Saving output files in directory:', outdir2) + print(f'<{os.path.basename(__file__)}> Saving output files in directory: {outdir2}') plt.figure(0) plt.savefig( os.path.join(outdir2, 'fiber_distances_per_cluster_histogram.pdf')) @@ -633,7 +633,7 @@ def main(): cluster_commissure[cidx], file=clusters_qc_file) clusters_qc_file.close() - print("Before save Polydata size:", output_polydata_s.GetNumberOfLines(), "Subject list for fibers:", subject_fiber_list.shape) + print(f"Before save Polydata size: {output_polydata_s.GetNumberOfLines()} Subject list for fibers: {subject_fiber_list.shape}") # Add outlier information to the atlas atlas.cluster_outlier_std_threshold = cluster_outlier_std_threshold @@ -652,9 +652,9 @@ def main(): # now make the outlier clusters have positive numbers with -cluster_numbers_s so they can be saved also outdir3 = os.path.join(outdir2, 'outlier_tracts') if not os.path.exists(outdir3): - print(f"<{os.path.basename(__file__)}> Output directory", outdir3, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir3} does not exist, creating it.") os.makedirs(outdir3) - print(f'<{os.path.basename(__file__)}> Saving outlier fiber files in directory:', outdir3) + print(f'<{os.path.basename(__file__)}> Saving outlier fiber files in directory: {outdir3}') mask = cluster_numbers_s < 0 cluster_numbers_outliers = -numpy.multiply(cluster_numbers_s, mask) - 1 wma.cluster.output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_fiber_list, input_polydatas, number_of_subjects, outdir3, cluster_numbers_outliers, color, embed, number_of_fibers_to_display, testing=testing, verbose=False, render_images=False) @@ -671,14 +671,14 @@ def main(): output_number_of_fibers = input_data.GetNumberOfLines() removed_fibers = input_number_of_fibers - output_number_of_fibers - print("End iteration Polydata size:", output_number_of_fibers, "Subject list for fibers:", subject_fiber_list.shape, "TEST:", test.shape) + print(f"End iteration Polydata size: {output_number_of_fibers} Subject list for fibers: {subject_fiber_list.shape} TEST: {test.shape}") log_file = open(fname_progress, 'a') print(iteration,'\t', input_number_of_fibers,'\t', output_number_of_fibers,'\t', removed_fibers, '\t', float(removed_fibers)/original_total_fibers, '\t', numpy.mean(cluster_mean_distances), '\t', numpy.mean(cluster_mean_probabilities), '\t', numpy.mean(cluster_subjects_before), '\t', numpy.mean(cluster_subjects_after), '\t', numpy.mean(cluster_size_before), '\t', numpy.mean(cluster_size_after), file=log_file) log_file.close() print("==========================\n") - print(f'<{os.path.basename(__file__)}> Done clustering atlas. See output in directory:\n ', outdir, '\n') + print(f'<{os.path.basename(__file__)}> Done clustering atlas. See output in directory:\n {outdir} \n') if __name__ == '__main__': main() diff --git a/bin/wm_cluster_from_atlas.py b/bin/wm_cluster_from_atlas.py index 3b4e1ec1..237f0654 100644 --- a/bin/wm_cluster_from_atlas.py +++ b/bin/wm_cluster_from_atlas.py @@ -70,24 +70,24 @@ def main(): exit() if not os.path.isdir(args.atlasDirectory): - print(f"<{os.path.basename(__file__)}> Error: Atlas directory", args.atlasDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Atlas directory {args.atlasDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) fname = args.inputFile subject_id = os.path.splitext(os.path.basename(fname))[0] outdir = os.path.join(outdir, subject_id) if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) print("\n==========================") print("input file:", args.inputFile) - print("atlas directory:", args.atlasDirectory) + print(f"atlas directory {args.atlasDirectory}") print("output directory:", args.outputDirectory) if args.numberOfFibers is not None: @@ -100,7 +100,7 @@ def main(): print("minimum length of fibers to analyze (in mm): ", fiber_length) number_of_jobs = int(args.numberOfJobs) - print('Using N jobs:', number_of_jobs) + print(f'Using N jobs: {number_of_jobs}') if args.flag_verbose: print("Verbose ON.") @@ -130,7 +130,7 @@ def main(): print("==========================\n") # read atlas - print(f"<{os.path.basename(__file__)}> Loading input atlas:", args.atlasDirectory) + print(f"<{os.path.basename(__file__)}> Loading input atlas: {args.atlasDirectory}") atlas = wma.cluster.load_atlas(args.atlasDirectory, 'atlas') # read data diff --git a/bin/wm_cluster_remove_outliers.py b/bin/wm_cluster_remove_outliers.py index b7297afa..9c8e1a61 100644 --- a/bin/wm_cluster_remove_outliers.py +++ b/bin/wm_cluster_remove_outliers.py @@ -65,16 +65,16 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input subject directory", args.inputDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input subject directory {args.inputDirectory} does not exist.") exit() if not os.path.isdir(args.atlasDirectory): - print(f"<{os.path.basename(__file__)}> Error: Atlas directory", args.atlasDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Atlas directory {args.atlasDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) subject_id = os.path.basename(args.inputDirectory) @@ -84,9 +84,9 @@ def main(): else: subject_id = path_split[1] - outdir = os.path.join(outdir, subject_id + '_outlier_removed') + outdir = os.path.join(outdir, f'{subject_id}_outlier_removed') if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) if args.numberOfJobs is not None: @@ -96,12 +96,12 @@ def main(): # multiprocessing is not used efficiently in our code and should # be avoided for large clustering problems, for now. number_of_jobs = 1 - print('Using N jobs:', number_of_jobs) + print(f'Using N jobs: {number_of_jobs}') print("\n==========================") - print("input directory:", args.inputDirectory) - print("atlas directory:", args.atlasDirectory) - print("output directory:", outdir) + print(f"input directory {args.inputDirectory}") + print(f"atlas directory {args.atlasDirectory}") + print(f"output directory {outdir}") if args.flag_verbose: print("Verbose ON.") @@ -126,7 +126,7 @@ def main(): log_file.close() # read atlas - print(f"<{os.path.basename(__file__)}> Loading input atlas:", args.atlasDirectory) + print(f"<{os.path.basename(__file__)}> Loading input atlas: {args.atlasDirectory}") atlas = wma.cluster.load_atlas(args.atlasDirectory, 'atlas') bilateral = atlas.bilateral @@ -148,10 +148,10 @@ def main(): number_ac = len(atlas_clusters) number_sc = len(subject_clusters) - print("Number of atlas and subject clusters:", number_ac, number_sc) + print(f"Number of atlas and subject clusters: {number_ac} {number_sc}") if number_ac != number_sc: - print(f"<{os.path.basename(__file__)}> Error: Cluster number mismatch. \nAtlas directory (", args.atlasDirectory, ") has", number_ac, "clusters but subject directory (", args.inputDirectory, ") has", number_sc, "clusters.") + print(f"<{os.path.basename(__file__)}> Error: Cluster number mismatch. \nAtlas directory ({args.atlasDirectory} has {number_ac} clusters but subject directory ({args.inputDirectory}) has {number_sc} clusters.") exit() @@ -160,10 +160,10 @@ def main(): distance_method = 'Mean' # 20mm works well for cluster-specific outlier removal in conjunction with the mean distance. cluster_local_sigma = args.outlierSigma - print("Local sigma for cluster outlier removal:", cluster_local_sigma) + print(f"Local sigma for cluster outlier removal: {cluster_local_sigma}") cluster_outlier_std_threshold = args.clusterOutlierStandardDeviation - print("Standard deviation for fiber outlier removal after clustering (accurate local probability): ", cluster_outlier_std_threshold) + print(f"Standard deviation for fiber outlier removal after clustering (accurate local probability): {cluster_outlier_std_threshold}") def outlier_removal_one_cluster(ca, cs, outdir, c): @@ -178,8 +178,8 @@ def outlier_removal_one_cluster(ca, cs, outdir, c): # if either cluster is empty, we have to skip outlier removal if number_fibers_in_subject_cluster == 0: wma.io.write_polydata(pd_subject, pd_out_fname) - print("cluster", c, "empty in subject") - log_str = str(c) + '\t' + str(number_fibers_in_subject_cluster)+'\t 0 \t 0 \t 0 \t 0 \t 0 \t 0 \t 0' + print(f"cluster {c} empty in subject") + log_str = f'{str(c)} \t {str(number_fibers_in_subject_cluster)} \t 0 \t 0 \t 0 \t 0 \t 0 \t 0 \t 0' return log_str @@ -187,9 +187,9 @@ def outlier_removal_one_cluster(ca, cs, outdir, c): # this should not ever happen. Note that "outlier removed" atlas is temporary and should not be used for classification. # (Note the other option is to remove this cluster as it was removed at the end of the iteration for the atlas.) wma.io.write_polydata(pd_subject, pd_out_fname) - print("cluster", c, "empty in atlas") + print(f"cluster {c} empty in atlas") print("ERROR: An atlas should not contain empty clusters. Please use the initial_clusters atlas for this outlier removal script and for subject clustering.") - log_str = str(c) + '\t' + str(number_fibers_in_subject_cluster)+'\t 0 \t 0 \t 0 \t 0 \t 0 \t 0 \t 0' + log_str = f'{str(c)} \t {str(number_fibers_in_subject_cluster)} \t 0 \t 0 \t 0 \t 0 \t 0 \t 0 \t 0' return log_str @@ -231,8 +231,8 @@ def outlier_removal_one_cluster(ca, cs, outdir, c): #print "SHAPE total similarity:", total_similarity_atlas.shape, total_similarity_subject.shape if verbose: - print("cluster", c, "tsim_atlas:", numpy.min(total_similarity_atlas), numpy.mean(total_similarity_atlas), numpy.max(total_similarity_atlas), "num fibers atlas:", number_fibers_in_atlas_cluster) - print("cluster", c, "tsim_subject:", numpy.min(total_similarity_subject), numpy.mean(total_similarity_subject), numpy.max(total_similarity_subject), "num fibers subject:", number_fibers_in_subject_cluster) + print(f"cluster {c} tsim_atlas: {numpy.min(total_similarity_atlas)} {numpy.mean(total_similarity_atlas)} {numpy.max(total_similarity_atlas)} num fibers atlas: {number_fibers_in_atlas_cluster}") + print(f"cluster {c} tsim_subject: {numpy.min(total_similarity_subject)} {numpy.mean(total_similarity_subject)} {numpy.max(total_similarity_subject)} num fibers subject: {number_fibers_in_subject_cluster}") # remove outliers with low similarity to their cluster mean_sim_atlas = numpy.mean(total_similarity_atlas) @@ -248,7 +248,7 @@ def outlier_removal_one_cluster(ca, cs, outdir, c): number_rejected = len(reject_idx) - print("cluster", c, "rejecting cluster outlier fibers:", number_rejected, "/", number_fibers_in_subject_cluster, "=", float(number_rejected)/number_fibers_in_subject_cluster) + print(f"cluster {c} rejecting cluster outlier fibers: {number_rejected} / {number_fibers_in_subject_cluster} = {float(number_rejected)/number_fibers_in_subject_cluster}") # Output a new polydata with the outliers removed. mask = numpy.ones([number_fibers_in_subject_cluster]) diff --git a/bin/wm_cluster_volumetric_measurements.py b/bin/wm_cluster_volumetric_measurements.py index 22ca3f53..6db440bc 100644 --- a/bin/wm_cluster_volumetric_measurements.py +++ b/bin/wm_cluster_volumetric_measurements.py @@ -55,24 +55,24 @@ def main(): inputdir = os.path.abspath(args.inputDirectory) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() inputvol = os.path.abspath(args.inputVolumetricMap) if not os.path.exists(inputvol): - print("Error: Input volumetric map", inputvol, "does not exist.") + print(f"Error: Input volumetric map {inputvol} does not exist.") exit() outdir = os.path.abspath(args.outputDirectory) if not os.path.exists(args.outputDirectory): - print("Output directory", args.outputDirectory, "does not exist, creating it.") + print(f"Output directory {args.outputDirectory} does not exist, creating it.") os.makedirs(outdir) input_volume = nibabel.load(inputvol) - print(f'<{os.path.basename(__file__)}> Input volume shape:', input_volume.get_data().shape) + print(f'<{os.path.basename(__file__)}> Input volume shape: {input_volume.get_data().shape}') input_vtk_list = wma.io.list_vtk_files(inputdir) - print(f'<{os.path.basename(__file__)}> Number of input clusters:', len(input_vtk_list)) + print(f'<{os.path.basename(__file__)}> Number of input clusters: {len(input_vtk_list)}') output_stats_file = os.path.join(outdir, args.outputStatFile) @@ -139,7 +139,7 @@ def compute_stat(inpd, volume, sampling_size=None): point_ijk = numpy.rint(point_ijk).astype(numpy.int32) if point_ijk[0] > new_voxel_data.shape[0] or point_ijk[1]> new_voxel_data.shape[1] or point_ijk[2] > new_voxel_data.shape[2]: - print('Warning: point', point_ijk, 'is outside the input volumetric map.') + print(f'Warning: point {point_ijk} is outside the input volumetric map.') continue if new_voxel_data[(point_ijk[0], point_ijk[1], point_ijk[2])] == 0: @@ -171,19 +171,19 @@ def compute_stat(inpd, volume, sampling_size=None): input_vtk = wma.io.read_polydata(input_vtk_path) vtk_file_name = os.path.split(input_vtk_path)[1][:-4] - print(f"<{os.path.basename(__file__)}> Working on", vtk_file_name) + print(f"<{os.path.basename(__file__)}> Working on {vtk_file_name}") new_voxel_data, mean_v, var_v, max_v, min_v, median_v, num_voxels, volume_size = compute_stat(input_vtk, input_volume, args.sampleSize) - str_line = vtk_file_name+','+str(num_voxels)+','+str(volume_size)+','+str(mean_v)+','+str(median_v)+','+str(min_v)+','+str(max_v)+','+str(var_v) - print(' -', str_head) - print(' -', str_line) + str_line = f"{vtk_file_name},{str(num_voxels)},{str(volume_size)},{str(mean_v)},{str(median_v)},{str(min_v)},{str(max_v)},{str(var_v)}" + print(f' - {str_head}') + print(f' - {str_line}') str_out = str_out + '\n' + str_line if args.outputLabelmap: volume_new = nibabel.Nifti1Image(new_voxel_data, input_volume.affine, input_volume.header) - output_labelmap = os.path.join(outdir, vtk_file_name+'.nii.gz') + output_labelmap = os.path.join(outdir, f'{vtk_file_name}.nii.gz') nibabel.save(volume_new, output_labelmap) output_file = open(output_stats_file, 'w') @@ -191,7 +191,7 @@ def compute_stat(inpd, volume, sampling_size=None): output_file.close() print('') - print(f'<{os.path.basename(__file__)}> Done! Result is in', output_stats_file) + print(f'<{os.path.basename(__file__)}> Done! Result is in {output_stats_file}') if __name__ == '__main__': main() diff --git a/bin/wm_compare_vtks.py b/bin/wm_compare_vtks.py index a9036b53..65f1cf0c 100755 --- a/bin/wm_compare_vtks.py +++ b/bin/wm_compare_vtks.py @@ -67,19 +67,19 @@ def main(): sz = farray1.fiber_array_r.shape number_of_points = float(sz[0]*sz[1]) - print(100 * numpy.sum(distance < 0.001) / number_of_points, 'percent < 0.001 mm') - print(100 * numpy.sum(distance < 0.01) / number_of_points, 'percent < 0.01 mm') - print(100 * numpy.sum(distance < 0.1) / number_of_points, 'percent < 0.1 mm') - print(100 * numpy.sum(distance < 0.5) / number_of_points, 'percent < 0.5 mm') - print(100 * numpy.sum(distance < 1) / number_of_points, 'percent < 1 mm') - print(100 * numpy.sum(distance < 1.2) / number_of_points, 'percent < 1.2 mm') - print(100 * numpy.sum(distance < 1.3) / number_of_points, 'percent < 1.3 mm') - print(100 * numpy.sum(distance < 1.4) / number_of_points, 'percent < 1.4 mm') - print(100 * numpy.sum(distance < 1.5) / number_of_points, 'percent < 1.5 mm') - print(100 * numpy.sum(distance < 2) / number_of_points, 'percent < 2 mm') - print(100 * numpy.sum(distance < 3) / number_of_points, 'percent < 3 mm') - print(100 * numpy.sum(distance < 4) / number_of_points, 'percent < 4 mm') - print(100 * numpy.sum(distance < 5) / number_of_points, 'percent < 5 mm') + print(f'{100 * numpy.sum(distance < 0.001) / number_of_points} percent < 0.001 mm') + print(f'{100 * numpy.sum(distance < 0.01) / number_of_points} percent < 0.01 mm') + print(f'{100 * numpy.sum(distance < 0.1) / number_of_points} percent < 0.1 mm') + print(f'{100 * numpy.sum(distance < 0.5) / number_of_points} percent < 0.5 mm') + print(f'{100 * numpy.sum(distance < 1) / number_of_points} percent < 1 mm') + print(f'{100 * numpy.sum(distance < 1.2) / number_of_points} percent < 1.2 mm') + print(f'{100 * numpy.sum(distance < 1.3) / number_of_points} percent < 1.3 mm') + print(f'{100 * numpy.sum(distance < 1.4) / number_of_points} percent < 1.4 mm') + print(f'{100 * numpy.sum(distance < 1.5) / number_of_points} percent < 1.5 mm') + print(f'{100 * numpy.sum(distance < 2) / number_of_points} percent < 2 mm') + print(f'{100 * numpy.sum(distance < 3) / number_of_points} percent < 3 mm') + print(f'{100 * numpy.sum(distance < 4) / number_of_points} percent < 4 mm') + print(f'{100 * numpy.sum(distance < 5) / number_of_points} percent < 5 mm') # output a new polydata showing what the distances are in case there is a pattern diff --git a/bin/wm_create_mrml_file.py b/bin/wm_create_mrml_file.py index 11600a63..d3401571 100644 --- a/bin/wm_create_mrml_file.py +++ b/bin/wm_create_mrml_file.py @@ -34,14 +34,14 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist.") exit() mrml_filename = "scene.mrml" input_polydatas = wma.io.list_vtk_files(args.inputDirectory) number_of_files = len(input_polydatas) - print(f"<{os.path.basename(__file__)}> Found ", number_of_files, "vtk files in input directory:", args.inputDirectory) + print(f"<{os.path.basename(__file__)}> Found {number_of_files} vtk files in input directory {args.inputDirectory}") # define R, G, B colors # hack a colormap. 0..255 values for each diff --git a/bin/wm_diffusion_measurements.py b/bin/wm_diffusion_measurements.py index 868d07c0..a76b164b 100644 --- a/bin/wm_diffusion_measurements.py +++ b/bin/wm_diffusion_measurements.py @@ -41,7 +41,7 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() cli= args.Slicer.split()[-1] @@ -53,22 +53,20 @@ def main(): outdir = os.path.split(args.outputCSV)[0] if not os.path.exists(outdir): - print("Output directory", outdir, "does not exist, creating it.") + print(f"Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) print(f"<{os.path.basename(__file__)}>. Starting scalar measurement extraction.") print("") - print("=====input directory======\n", args.inputDirectory) - print("=====output directory=====\n", outdir) - print("=====3D Slicer====\n", args.Slicer) + print(f"=====input directory======\n {args.inputDirectory}") + print(f"=====output directory=====\n {outdir}") + print(f"=====3D Slicer====\n {args.Slicer}") print("==========================") - os.system(module_FTSM + \ - ' --inputtype Fibers_File_Folder --format Column_Hierarchy --separator Comma ' + \ - ' --inputdirectory ' + args.inputDirectory + \ - ' --outputfile ' + args.outputCSV) + os.system( + f"{module_FTSM} --inputtype Fibers_File_Folder --format Column_Hierarchy --separator Comma --inputdirectory {args.inputDirectory} --outputfile {args.outputCSV}") - print(f"<{os.path.basename(__file__)}> Measurements done at:", args.outputCSV) + print(f"<{os.path.basename(__file__)}> Measurements done at: {args.outputCSV}") if __name__ == '__main__': main() diff --git a/bin/wm_download_anatomically_curated_atlas.py b/bin/wm_download_anatomically_curated_atlas.py index 7b60054a..453ebf32 100755 --- a/bin/wm_download_anatomically_curated_atlas.py +++ b/bin/wm_download_anatomically_curated_atlas.py @@ -96,7 +96,7 @@ def download_file(url, output_file): file_size_dl += len(buffer) output.write(buffer) p = float(file_size_dl) / file_size - status = fr"{file_size_dl} bytes [{p:.2%}]" + status = f"{file_size_dl} bytes [{p:.2%}]" status = status + chr(8)*(len(status)+1) sys.stdout.write(status) @@ -123,13 +123,13 @@ def extract_from_zip(archive_file, output_dir, remove_after_extraction=False): outdir = os.path.abspath(args.outputDirectory) if not os.path.exists(args.outputDirectory): - print(f"<{os.path.basename(__file__)}> Output directory", args.outputDirectory, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {args.outputDirectory} does not exist, creating it.") os.makedirs(outdir) requested_atlas = args.requested_atlas repo = build_download_url(ZENODO_RECORD_ROOT_URL, args.version) - repo = repo + "/" + repo = f"{repo}/" version = ORGAtlasVersion.get_version(ORGAtlasVersion.__getitem__(args.version)) metadata_url = build_download_url(ZENODO_API_RECORD_ROOT_URL, args.version) @@ -137,7 +137,7 @@ def extract_from_zip(archive_file, output_dir, remove_after_extraction=False): metadata_local_file_basename = metadata_local_file_rootname + build_suffix( DataExchangeFormatFileExtension.JSON) - org_atlases_version_folder_name = 'ORG-Atlases-' + version[1:] + org_atlases_version_folder_name = f'ORG-Atlases-{version[1:]}' org_atlases_version_folder = os.path.join(outdir, org_atlases_version_folder_name) if not os.path.exists(org_atlases_version_folder): os.makedirs(org_atlases_version_folder) @@ -148,38 +148,38 @@ def extract_from_zip(archive_file, output_dir, remove_after_extraction=False): ) if requested_atlas == 'ORG-800FC-100HCP' or requested_atlas == 'ORG-2000FC-100HCP': - FC_atlas_url = repo + 'files/' + requested_atlas + '.zip?download=1' - REG_atlas_url = repo + 'files/' + 'ORG-RegAtlas-100HCP.zip?download=1' + FC_atlas_url = f'{repo}files/{requested_atlas}.zip?download=1' + REG_atlas_url = f'{repo}files/ORG-RegAtlas-100HCP.zip?download=1' - population_T1_url = repo + 'files/' + '100HCP-population-mean-T1.nii.gz?download=1' - population_T2_url = repo + 'files/' + '100HCP-population-mean-T2.nii.gz?download=1' - population_b0_url = repo + 'files/' + '100HCP-population-mean-b0.nii.gz?download=1' + population_T1_url = f'{repo}files/100HCP-population-mean-T1.nii.gz?download=1' + population_T2_url = f'{repo}files/100HCP-population-mean-T2.nii.gz?download=1' + population_b0_url = f'{repo}files/100HCP-population-mean-b0.nii.gz?download=1' - downloaded_FC_atlas_file = os.path.join(org_atlases_version_folder, requested_atlas + '.zip') + downloaded_FC_atlas_file = os.path.join(org_atlases_version_folder, f'{requested_atlas}.zip') downloaded_Reg_atlas_file = os.path.join(org_atlases_version_folder, 'ORG-RegAtlas-100HCP.zip') downloaded_population_T1_file = os.path.join(org_atlases_version_folder, '100HCP-population-mean-T1.nii.gz') downloaded_population_T2_file = os.path.join(org_atlases_version_folder, '100HCP-population-mean-T2.nii.gz') downloaded_population_b0_file = os.path.join(org_atlases_version_folder, '100HCP-population-mean-b0.nii.gz') else: - print(f'<{os.path.basename(__file__)}> ' + requested_atlas + 'is not available. Please check input.') + print(f'<{os.path.basename(__file__)}> {requested_atlas} is not available. Please check input.') print('') exit() print("") print("===== ") - print("===== Release version : ", version) - print("===== Download from : ", repo) - print("===== Output directory : ", outdir) - print("===== Requested atlas : ", requested_atlas) + print(f"===== Release version : {version}") + print(f"===== Download from : {repo}") + print(f"===== Output directory : {outdir}") + print(f"===== Requested atlas : {requested_atlas}") print("") if requested_atlas == 'ORG-800FC-100HCP': - print(f'<{os.path.basename(__file__)}> The ' +requested_atlas+ ' atlas is an anatomically curated white matter atlas, with annotated anatomical label provided for each cluster. '\ + print(f'<{os.path.basename(__file__)}> The {requested_atlas} atlas is an anatomically curated white matter atlas, with annotated anatomical label provided for each cluster. '\ + 'MRML files that are used to organize fiber clusters that belong to the same anatomical fiber tract are provided in the atlas.') print('') elif requested_atlas == 'ORG-2000FC-100HCP': - print(f'<{os.path.basename(__file__)}> The ' +requested_atlas+ ' atlas is provided for applications that can be benefit from a fine scale white matter parcellation. This an uncurated white matter atlas.') + print(f'<{os.path.basename(__file__)}> The {requested_atlas} atlas is provided for applications that can be benefit from a fine scale white matter parcellation. This an uncurated white matter atlas.') print('') if requested_atlas == 'ORG-800FC-100HCP' or requested_atlas == 'ORG-2000FC-100HCP': @@ -197,7 +197,7 @@ def extract_from_zip(archive_file, output_dir, remove_after_extraction=False): download_file(FC_atlas_url, downloaded_FC_atlas_file) extract_from_zip(downloaded_FC_atlas_file, org_atlases_version_folder, remove_after_extraction=True) else: - print(f'<{os.path.basename(__file__)}> Skip downloading: There is an existing fiber clustering atlas at \''+requested_atlas+'\' in the output folder.') + print(f'<{os.path.basename(__file__)}> Skip downloading: There is an existing fiber clustering atlas at \'{requested_atlas}\' in the output folder.') print('') print(f'<{os.path.basename(__file__)}> Population mean T1/T2/b0 images.') @@ -223,7 +223,7 @@ def extract_from_zip(archive_file, output_dir, remove_after_extraction=False): print("") print('') - print(f'<{os.path.basename(__file__)}> Successfully downloaded to', outdir) + print(f'<{os.path.basename(__file__)}> Successfully downloaded to {outdir}') if __name__ == '__main__': main() diff --git a/bin/wm_harden_transform.py b/bin/wm_harden_transform.py index fc45466c..2676bff5 100644 --- a/bin/wm_harden_transform.py +++ b/bin/wm_harden_transform.py @@ -55,17 +55,17 @@ def main(): inputdir = os.path.abspath(args.inputDirectory) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = os.path.abspath(args.outputDirectory) if not os.path.exists(args.outputDirectory): - print("Output directory", args.outputDirectory, "does not exist, creating it.") + print(f"Output directory {args.outputDirectory} does not exist, creating it.") os.makedirs(outdir) slicer_path = os.path.abspath(args.Slicer) if not os.path.exists(args.Slicer): - print("Error: 3D Slicer", args.Slicer, "does not exist.") + print(f"Error: 3D Slicer {args.Slicer} does not exist.") exit() if (args.transform_file is None and args.transform_folder is None) or \ @@ -76,13 +76,13 @@ def main(): transform_way = 'individual' transform_path = os.path.abspath(args.transform_file) if not os.path.isfile(args.transform_file): - print("Error: Input transform file", args.transform_file, "does not exist or it is not a file.") + print(f"Error: Input transform file {args.transform_file} does not exist or it is not a file.") exit() elif args.transform_folder is not None: transform_way = 'multiple' transform_path = os.path.abspath(args.transform_folder) if not os.path.isdir(args.transform_folder): - print("Error: Input transform file folder ", args.transform_folder, "does not exist.") + print(f"Error: Input transform file folder {args.transform_folder} does not exist.") exit() inverse = args.inverse_transform @@ -99,18 +99,18 @@ def main(): input_transforms = wma.io.list_transform_files(transform_path) number_of_transforms = len(input_transforms) if number_of_polydatas != number_of_transforms: - print("Error: The number of input VTK/VTP files", number_of_polydatas,"should be equal to the number of transform files", number_of_transforms) + print(f"Error: The number of input VTK/VTP files {number_of_polydatas} should be equal to the number of transform files {number_of_transforms}") exit() print(f"<{os.path.basename(__file__)}> Starting harden transforms.") print("") - print("=====input directory======\n", inputdir) - print("=====output directory=====\n", outdir) - print("=====3D Slicer====\n", slicer_path) - print("=====Way of transform====\n", transform_way) - print("=====Inverse? ====\n", inverse) - print("=====Transform file(s) path====\n", transform_path) - print("=====Number of jobs:====\n", number_of_jobs) + print(f"=====input directory======\n {inputdir}") + print(f"=====output directory=====\n {outdir}") + print(f"=====3D Slicer====\n {slicer_path}") + print(f"=====Way of transform====\n {transform_way}") + print(f"=====Inverse? ====\n {inverse}") + print(f"=====Transform file(s) path====\n {transform_path}") + print(f"=====Number of jobs:====\n {number_of_jobs}") def command_harden_transform(polydata, transform, inverse, slicer_path, outdir): if inverse: @@ -118,22 +118,20 @@ def command_harden_transform(polydata, transform, inverse, slicer_path, outdir): else: str_inverse = 0 - print(f"<{os.path.basename(__file__)}> Transforming:", polydata) - cmd = slicer_path + " --no-main-window --python-script $(which harden_transform_with_slicer.py) " + \ - polydata + " " + transform + " " + str(str_inverse) + " " + outdir + " --python-code 'slicer.app.quit()' " + \ - ' >> ' + os.path.join(outdir, 'log.txt 2>&1') + print(f"<{os.path.basename(__file__)}> Transforming: {polydata}") + cmd = f"{slicer_path} --no-main-window --python-script $(which harden_transform_with_slicer.py) {polydata} {transform} {str(str_inverse)} {outdir} --python-code 'slicer.app.quit()' >> f{os.path.join(outdir, 'log.txt 2>&1')}" os.system(cmd) if transform_way == 'multiple': for polydata, transform in zip(input_polydatas, input_transforms): - print("======", transform, ' ', polydata) + print(f"====== {transform} {polydata}") print("\n") Parallel(n_jobs=number_of_jobs, verbose=1)( delayed(command_harden_transform)(polydata, transform, inverse, slicer_path, outdir) for polydata, transform in zip(input_polydatas, input_transforms)) else: - print("======", transform_path, "will be applied to all inputs.\n") + print(f"====== {transform_path} will be applied to all inputs.\n") command_harden_transform(inputdir, transform_path, inverse, slicer_path, outdir) @@ -143,7 +141,7 @@ def command_harden_transform(polydata, transform, inverse, slicer_path, outdir): output_polydatas = wma.io.list_vtk_files(outdir) number_of_results = len(output_polydatas) - print(f"<{os.path.basename(__file__)}> Transform were conducted for", number_of_results, "subjects.") + print(f"<{os.path.basename(__file__)}> Transform were conducted for {number_of_results} subjects.") if number_of_results != number_of_polydatas: print("Error: The numbers of inputs and outputs are different. Check log file for errors.") diff --git a/bin/wm_preprocess_all.py b/bin/wm_preprocess_all.py index 0bd0ca37..181d85ab 100755 --- a/bin/wm_preprocess_all.py +++ b/bin/wm_preprocess_all.py @@ -56,36 +56,36 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print("Output directory", outdir, "does not exist, creating it.") + print(f"Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) print(f"{os.path.basename(__file__)}. Starting white matter preprocessing.") print("") - print("=====input directory======\n", args.inputDirectory) - print("=====output directory=====\n", args.outputDirectory) + print(f"=====input directory======\n {args.inputDirectory}") + print(f"=====output directory=====\n {args.outputDirectory}") print("==========================") if args.numberOfFibers is not None: - print("fibers to retain per subject: ", args.numberOfFibers) + print(f"fibers to retain per subject: {args.numberOfFibers}") else: print("fibers to retain per subject: ALL") if args.fiberLength is not None: - print("minimum length of fibers to retain (in mm): ", args.fiberLength) + print(f"minimum length of fibers to retain (in mm): {args.fiberLength}") else: print("minimum length of fibers to retain (in mm): 0") - print('CPUs detected:', multiprocessing.cpu_count()) + print(f'CPUs detected: {multiprocessing.cpu_count()}') if args.numberOfJobs is not None: parallel_jobs = args.numberOfJobs else: parallel_jobs = multiprocessing.cpu_count() - print('Using N jobs:', parallel_jobs) + print(f'Using N jobs: {parallel_jobs}') if args.flag_retaindata: print("Retain all data stored along the tractography.") @@ -102,7 +102,7 @@ def main(): # Loop over input DWIs inputPolyDatas = wma.io.list_vtk_files(args.inputDirectory) - print(f"<{os.path.basename(__file__)}> Input number of files: ", len(inputPolyDatas)) + print(f"<{os.path.basename(__file__)}> Input number of files: {len(inputPolyDatas)}") # for testing #inputPolyDatas = inputPolyDatas[0:2] @@ -111,13 +111,13 @@ def pipeline(inputPolyDatas, sidx, args): # get subject identifier from unique input filename # ------------------- subjectID = os.path.splitext(os.path.basename(inputPolyDatas[sidx]))[0] - id_msg = f"<{os.path.basename(__file__)}> ", sidx + 1, "/", len(inputPolyDatas) - msg = "**Starting subject:", subjectID + id_msg = f"<{os.path.basename(__file__)}> {sidx + 1} / {len(inputPolyDatas)}" + msg = f"**Starting subject: {subjectID}" print(id_msg + msg) # read input vtk data # ------------------- - msg = "**Reading input:", subjectID + msg = f"**Reading input: {subjectID}" print(id_msg + msg) wm = wma.io.read_polydata(inputPolyDatas[sidx]) @@ -129,7 +129,7 @@ def pipeline(inputPolyDatas, sidx, args): # ------------------- wm2 = None if args.fiberLength is not None or args.maxFiberLength is not None: - msg = "**Preprocessing:", subjectID + msg = f"**Preprocessing: {subjectID}" print(id_msg + msg) if args.fiberLength is not None: @@ -143,7 +143,7 @@ def pipeline(inputPolyDatas, sidx, args): maxlen = None wm2 = wma.filter.preprocess(wm, minlen, preserve_point_data=retaindata, preserve_cell_data=retaindata, verbose=False, max_length_mm=maxlen) - print("Number of fibers retained (length threshold", args.fiberLength, "): ", wm2.GetNumberOfLines(), "/", num_lines) + print(f"Number of fibers retained (length threshold {args.fiberLength}): {wm2.GetNumberOfLines()} / {num_lines}") if wm2 is None: wm2 = wm @@ -154,12 +154,12 @@ def pipeline(inputPolyDatas, sidx, args): # ------------------- wm3 = None if args.numberOfFibers is not None: - msg = "**Downsampling input:", subjectID, " number of fibers: ", args.numberOfFibers + msg = f"**Downsampling input: {subjectID} number of fibers: {args.numberOfFibers}" print(id_msg + msg) # , preserve_point_data=True needs editing of preprocess function to use mask function wm3 = wma.filter.downsample(wm2, args.numberOfFibers, preserve_point_data=retaindata, preserve_cell_data=retaindata, verbose=False, random_seed=random_seed) - print("Number of fibers retained: ", wm3.GetNumberOfLines(), "/", num_lines) + print(f"Number of fibers retained: {wm3.GetNumberOfLines()} / {num_lines}") if wm3 is None: wm3 = wm2 @@ -168,16 +168,16 @@ def pipeline(inputPolyDatas, sidx, args): # outputs # ------------------- - msg = "**Writing output data for subject:", subjectID + msg = f"**Writing output data for subject: {subjectID}" print(id_msg, msg) ext = Path(inputPolyDatas[sidx]).suffixes[0][1:] - fname = os.path.join(args.outputDirectory, subjectID+'_pp'+'.'+ext) + fname = os.path.join(args.outputDirectory, f"{subjectID}_pp.{ext}") try: - print("Writing output polydata", fname, "...") + print(f"Writing output polydata {fname}...") wma.io.write_polydata(wm3, fname) - print("Wrote output", fname, ".") + print(f"Wrote output {fname}.") except: print("Unknown exception in IO") raise diff --git a/bin/wm_quality_control_after_clustering.py b/bin/wm_quality_control_after_clustering.py index 60904115..7066c3e3 100644 --- a/bin/wm_quality_control_after_clustering.py +++ b/bin/wm_quality_control_after_clustering.py @@ -51,16 +51,16 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() output_dir = args.outputDirectory if not os.path.exists(output_dir): - print(f"<{os.path.basename(__file__)}> Output directory", output_dir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {output_dir} does not exist, creating it.") os.makedirs(output_dir) subject_list = os.listdir(args.inputDirectory) - print(f"<{os.path.basename(__file__)}> found", len(subject_list), "subjects.") + print(f"<{os.path.basename(__file__)}> found {len(subject_list)} subjects.") # Check if all subjects have the same number of clusters. # This can also help find the sub folders that are not the fiber clustering results. @@ -73,7 +73,7 @@ def main(): input_mask2 = f"{sub_dir}/cluster_*.vtp" cluster_polydatas = glob.glob(input_mask) + glob.glob(input_mask2) cluster_polydatas = sorted(cluster_polydatas) - print(" ", sub, "has", len(cluster_polydatas), "clusters.") + print(f" {sub} has {len(cluster_polydatas)} clusters.") if sidx == 0: num_of_clusters = len(cluster_polydatas) else: @@ -89,7 +89,7 @@ def main(): num_fibers_per_subject = numpy.zeros([num_of_subjects, num_of_clusters]) for sidx in range(0, num_of_subjects): sub = subject_list[sidx] - print(" loading", sub) + print(f" loading {sub}") sub_dir = os.path.join(args.inputDirectory, sub) input_mask = f"{sub_dir}/cluster_*.vtk" input_mask2 = f"{sub_dir}/cluster_*.vtp" diff --git a/bin/wm_quality_control_cluster_measurements.py b/bin/wm_quality_control_cluster_measurements.py index 473d93d9..be5dadce 100755 --- a/bin/wm_quality_control_cluster_measurements.py +++ b/bin/wm_quality_control_cluster_measurements.py @@ -40,10 +40,10 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() - print("\n\n== Testing measurement files in directory:", args.inputDirectory) + print(f"\n\n== Testing measurement files in directory: {args.inputDirectory}") ## output_dir = args.outputDirectory ## if not os.path.exists(output_dir): @@ -52,10 +52,10 @@ def main(): measurement_list = wma.tract_measurement.load_measurement_in_folder(args.inputDirectory, hierarchy = 'Column', separator = 'Tab') - print("\n== Number of subjects data found:", len(measurement_list)) + print(f"\n== Number of subjects data found: {len(measurement_list)}") if len(measurement_list) < 1: - print("ERROR, no measurement files found in directory:", args.inputDirectory) + print(f"ERROR, no measurement files found in directory {args.inputDirectory}") exit() # print the header @@ -70,7 +70,7 @@ def main(): for subject_measured in measurement_list: all_same = all(subject_measured.measurement_header == header) if not all_same: - print("ERROR: Subject does not have same measurement header:", subject_measured.case_id) + print(f"ERROR: Subject does not have same measurement header: {subject_measured.case_id}") test = False if test: print("Passed. All subjects have the same measurement header.") @@ -87,20 +87,20 @@ def main(): print("\n== Testing if all subjects have the same number of clusters.") test = True number_of_clusters = measurement_list[0].measurement_matrix[:,0].shape[0] - print("Group number of clusters (from first subject):", number_of_clusters) + print(f"Group number of clusters (from first subject): {number_of_clusters}") for subject_measured, id in zip(measurement_list, subject_id_list): nc = subject_measured.measurement_matrix[:,0].shape[0] if not nc == number_of_clusters: print(nc, " : ", id) test = False if test: - print("Passed. All subjects have the same number of clusters (", number_of_clusters, ").") + print(f"Passed. All subjects have the same number of clusters ({number_of_clusters}).") else: print("ERROR: All subjects do not have the same number of clusters. There was an earlier error in clustering or measurement that must be fixed before analysis.") # sanity check numbers of fibers are ok for all subjects print("\n== Testing if all subjects have reasonable mean fibers per cluster.") - print("Will print any subjects with more than", args.OutlierStandardDeviation, "standard deviations below group mean.") + print(f"Will print any subjects with more than {args.OutlierStandardDeviation} standard deviations below group mean.") test = True vidx = list(header).index('Num_Fibers') mean_fibers_list = [] @@ -111,7 +111,7 @@ def main(): mean_fibers_list = numpy.array(mean_fibers_list) mean_fibers = numpy.mean(mean_fibers_list) std_fibers = numpy.std(mean_fibers_list) - print("Mean and standard deviation of mean fibers per cluster in group:", mean_fibers, "+/-", std_fibers) + print(f"Mean and standard deviation of mean fibers per cluster in group: {mean_fibers} +/- {std_fibers}") threshold = mean_fibers - args.OutlierStandardDeviation * std_fibers ## mean_sorted_idx = numpy.argsort(numpy.array(mean_fibers_list)) ## for idx in mean_sorted_idx: @@ -120,15 +120,15 @@ def main(): for mf, sp in zip(mean_fibers_list[sorted_idx], subject_id_list[sorted_idx]): if mf < threshold: if test: - print("Subject(s) found with mean fibers more than", args.OutlierStandardDeviation, "standard deviations below group mean.") - print(mf, " : ", sp) + print(f"Subject(s) found with mean fibers more than {args.OutlierStandardDeviation} standard deviations below group mean.") + print(f"{mf} : {sp}") test = False if test: print("Passed. All subjects have reasonable mean fibers per cluster.") # sanity check number of empty clusters is not too large print("\n== Testing if all subjects have reasonable numbers of empty clusters.") - print("Will print any subjects with more than", args.OutlierStandardDeviation, "standard deviations above group mean.") + print(f"Will print any subjects with more than {args.OutlierStandardDeviation} standard deviations above group mean.") test = True empty_clusters_list = [] for subject_measured in measurement_list: @@ -141,14 +141,14 @@ def main(): empty_clusters_list = numpy.array(empty_clusters_list) mean_clusters = numpy.mean(empty_clusters_list) std_clusters = numpy.std(empty_clusters_list) - print("Mean and standard deviation of empty cluster number in group:", mean_clusters, "+/-", std_clusters) + print(f"Mean and standard deviation of empty cluster number in group: {mean_clusters} +/- {std_clusters}") threshold = mean_clusters + args.OutlierStandardDeviation * std_clusters sorted_idx = numpy.argsort(empty_clusters_list) for mf, sp in zip(empty_clusters_list[sorted_idx], subject_id_list[sorted_idx]): if mf > threshold: if test: - print("Subject(s) found with empty clusters more than", args.OutlierStandardDeviation, "standard deviations above group mean.") - print(mf, " : ", sp) + print(f"Subject(s) found with empty clusters more than {args.OutlierStandardDeviation} standard deviations above group mean.") + print(f"{mf} : {sp}") test = False if test: print("Passed. All subjects have reasonable numbers of empty clusters.") diff --git a/bin/wm_quality_control_tract_overlap.py b/bin/wm_quality_control_tract_overlap.py index ce2a322a..c22e65d7 100755 --- a/bin/wm_quality_control_tract_overlap.py +++ b/bin/wm_quality_control_tract_overlap.py @@ -51,16 +51,16 @@ def main(): print(f"<{os.path.basename(__file__)}> Starting...") if not os.path.exists(args.inputTract1): - print(f"<{os.path.basename(__file__)}> Error: Input tract 1", args.inputTract1, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input tract 1 {args.inputTract1} does not exist.") exit() if not os.path.exists(args.inputTract2): - print(f"<{os.path.basename(__file__)}> Error: Input tract 2", args.inputTract2, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input tract 2 {args.inputTract2} does not exist.") exit() output_dir = args.outputDirectory if not os.path.exists(output_dir): - print(f"<{os.path.basename(__file__)}> Output directory", output_dir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {output_dir} does not exist, creating it.") os.makedirs(output_dir) input_polydatas = [] @@ -77,7 +77,7 @@ def main(): appender = vtk.vtkAppendPolyData() for fname in input_polydatas: subject_id = os.path.splitext(os.path.basename(fname))[0] - print("Subject ", subject_idx, "/", number_of_subjects, "ID:", subject_id) + print(f"Subject {subject_idx} / {number_of_subjects} ID: {subject_id}") # Read data pd = wma.io.read_polydata(fname) diff --git a/bin/wm_quality_control_tractography.py b/bin/wm_quality_control_tractography.py index 2f865f1a..40d01772 100755 --- a/bin/wm_quality_control_tractography.py +++ b/bin/wm_quality_control_tractography.py @@ -52,17 +52,17 @@ def main(): print(f"<{os.path.basename(__file__)}> Starting...") if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist.") exit() output_dir = args.outputDirectory if not os.path.exists(output_dir): - print(f"<{os.path.basename(__file__)}> Output directory", output_dir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {output_dir} does not exist, creating it.") os.makedirs(output_dir) input_polydatas = wma.io.list_vtk_files(args.inputDirectory) number_of_subjects = len(input_polydatas) - print(f"<{os.path.basename(__file__)}> Found ", number_of_subjects, "subjects in input directory:", args.inputDirectory) + print(f"<{os.path.basename(__file__)}> Found {number_of_subjects} subjects in input directory {args.inputDirectory}") if number_of_subjects < 1: print("\n Error: No .vtk or .vtp files were found in the input directory.\n") @@ -87,14 +87,14 @@ def main(): outstr += str(number_of_subjects) outstr += '\n' outstr += '\n' - outstr += "Current date: " + time.strftime("%x") + outstr += f"Current date: {time.strftime("%x")}" outstr += '\n' - outstr += "Current time: " + time.strftime("%X") + outstr += f"Current time: {time.strftime("%X")}" outstr += '\n' outstr += '\n' - outstr += "Path to Script: " + os.path.realpath(__file__) + outstr += f"Path to Script: {os.path.realpath(__file__)}" outstr += '\n' - outstr += "Working Directory: " + os.getcwd() + outstr += f"Working Directory: {os.getcwd()}" outstr += '\n' outstr += '\n' outstr += "Description of Outputs\n" @@ -159,19 +159,19 @@ def main(): outstr = "SUBJECT_ID\tFIBER_STEP_SIZE\tTOTAL_POINTS\tMEAN_FIBER_LENGTH\tTOTAL_FIBERS\t" for test_length in fiber_test_lengths[1:]: outstr = outstr + "LEN_" + str(test_length) + '\t' - outstr = outstr + '\n' + outstr = f'{outstr}\n' fibers_qc_file.write(outstr) fibers_qc_file.close() data_qc_file = open(data_qc_fname, 'w') outstr = "SUBJECT_ID\tDATA_INFORMATION (field name, number of components, point or cell data)" - outstr = outstr + '\n' + outstr = f'{outstr}\n' data_qc_file.write(outstr) data_qc_file.close() spatial_qc_file = open(spatial_qc_fname, 'w') outstr = "SUBJECT_ID\tXmin\tXmax\tYmin\tYmax\tZmin\tZmax" - outstr = outstr + '\n' + outstr = f'{outstr}\n' spatial_qc_file.write(outstr) spatial_qc_file.close() @@ -183,7 +183,7 @@ def main(): appender = vtk.vtkAppendPolyData() for fname in input_polydatas: subject_id = os.path.splitext(os.path.basename(fname))[0] - print("Subject ", subject_idx, "/", number_of_subjects, "ID:", subject_id) + print(f"Subject {subject_idx} / {number_of_subjects} ID: {subject_id}") # Read data pd = wma.io.read_polydata(fname) @@ -194,14 +194,14 @@ def main(): # Render individual subject, only including fibers above 5mm. ren = wma.render.render(pd2, 1000, verbose=False) - output_dir_subdir = os.path.join(output_dir, 'tract_QC_' + subject_id) + output_dir_subdir = os.path.join(output_dir, f'tract_QC_{subject_id}') if not os.path.exists(output_dir_subdir): os.makedirs(output_dir_subdir) ren.save_views(output_dir_subdir, subject_id) del ren print('Multiple views for individual subject') - html_individual_multiviews = os.path.join(output_dir_subdir, 'view_multiple_'+subject_id+'.html') + html_individual_multiviews = os.path.join(output_dir_subdir, f'view_multiple_{subject_id}.html') f = open(html_individual_multiviews, 'w') outstr = "\n\n" f.write(outstr) @@ -212,24 +212,24 @@ def main(): outstr += "h1 {text-align: center;\n}\n" outstr += "\n" f.write(outstr) - outstr = "\n

All " + subject_id + " Views

\n" + outstr = f"\n

All {subject_id} Views

\n" f.write(outstr) f.close() for (view, descrip) in zip(html_views, html_views_descrip): f = open(html_individual_multiviews, 'a') - img_fname = os.path.join(view + subject_id + '.jpg') + img_fname = os.path.join(f'{view}{subject_id}.jpg') outstr = "
\n" - outstr+= "\""\n" - outstr+= "

" + descrip + "

\n
\n" + outstr+= f"\"{subject_id}\"\n" + outstr+= f"

{descrip}

\n\n" f.write(outstr) f.close() # Save view information in html file for (fname, view) in zip(html_view_fnames, html_views): f = open(fname, 'a') - img_fname = os.path.join('tract_QC_' + subject_id, view + subject_id + '.jpg') - html_fname = os.path.join('tract_QC_' + subject_id, 'view_multiple_'+subject_id+'.html') + img_fname = os.path.join(f'tract_QC_{subject_id}', f'{view}{subject_id}.jpg') + html_fname = os.path.join(f'tract_QC_{subject_id}', f'view_multiple_{subject_id}.html') print(output_dir_subdir) print(img_fname) print(html_individual_multiviews) @@ -239,8 +239,8 @@ def main(): #outstr+= "
" + subject_id + "
\n" #outstr+= "\n" outstr = "
\n" - outstr+= "\""\n" - outstr+= "

" + subject_id + "

\n
\n" + outstr+= f"\"{subject_id}\"\n" + outstr+= f"

{subject_id}

\n\n" f.write(outstr) f.close() @@ -249,26 +249,26 @@ def main(): pd2, lengths, step_size = wma.filter.preprocess(pd, 20, return_lengths=True, verbose=False) lengths = numpy.array(lengths) fibers_qc_file = open(fibers_qc_fname, 'a') - outstr = str(subject_id) + '\t' - outstr = outstr + f'{step_size:.4f}' + '\t' + outstr = f'{str(subject_id)}\t' + outstr = f'{outstr}{step_size:.4f}\t' # total points in the dataset - outstr = outstr + str(pd.GetNumberOfPoints()) + '\t' + outstr = f'{outstr}{str(pd.GetNumberOfPoints())}\t' # mean fiber length - outstr = outstr + str(numpy.mean(lengths)) + '\t' + outstr = f'{outstr}{str(numpy.mean(lengths))}\t' # total numbers of fibers for test_length in fiber_test_lengths: number_fibers = numpy.count_nonzero(lengths > test_length) - outstr = outstr + str(number_fibers) + '\t' - outstr = outstr + '\n' + outstr = f'{outstr}{str(number_fibers)}\t' + outstr = f'{outstr}\n' fibers_qc_file.write(outstr) fibers_qc_file.close() # Save information about the spatial location of the fiber tracts spatial_qc_file = open(spatial_qc_fname, 'a') - outstr = str(subject_id) + '\t' + outstr = f'{str(subject_id)}\t' for bound in pd.GetBounds(): - outstr = outstr + str(bound) + '\t' - outstr = outstr + '\n' + outstr = f'{outstr}{str(bound)}\t' + outstr = f'{outstr}\n' spatial_qc_file.write(outstr) # Save the subject's fiber lengths @@ -306,20 +306,20 @@ def main(): # Record what scalar/tensor data is present in this subject's file data_qc_file = open(data_qc_fname, 'a') - outstr = str(subject_id) + '\t' + outstr = f'{str(subject_id)} \t' inpointdata = pd.GetPointData() incelldata = pd.GetCellData() if inpointdata.GetNumberOfArrays() > 0: point_data_array_indices = list(range(inpointdata.GetNumberOfArrays())) for idx in point_data_array_indices: array = inpointdata.GetArray(idx) - outstr = outstr + str(array.GetName()) + '\t' + str(array.GetNumberOfComponents()) + '\t' + 'point' + '\t' + outstr = f'{outstr}{str(array.GetName())}\t{str(array.GetNumberOfComponents())}\tpoint\t' if incelldata.GetNumberOfArrays() > 0: cell_data_array_indices = list(range(incelldata.GetNumberOfArrays())) for idx in cell_data_array_indices: array = incelldata.GetArray(idx) - outstr = outstr + str(array.GetName()) + '\t' + str(array.GetNumberOfComponents()) +'\t' + 'cell' + '\t' - outstr = outstr + '\n' + outstr = f'{outstr}{str(array.GetName())}\t{str(array.GetNumberOfComponents())}\tcell\t' + outstr = f'{outstr}\n' data_qc_file.write(outstr) data_qc_file.close() @@ -358,7 +358,7 @@ def main(): # Finish html files for (fname) in html_view_fnames: f = open(fname, 'a') - img_fname = os.path.join(output_dir_subdir, view + subject_id + '.jpg') + img_fname = os.path.join(output_dir_subdir, f'{view}{subject_id}.jpg') outstr = "\n\n\n" f.write(outstr) f.close() diff --git a/bin/wm_register_multisubject_faster.py b/bin/wm_register_multisubject_faster.py index a9304401..a153899a 100755 --- a/bin/wm_register_multisubject_faster.py +++ b/bin/wm_register_multisubject_faster.py @@ -70,33 +70,33 @@ def main(): print("\n\n =========GROUP REGISTRATION============") print(f"<{os.path.basename(__file__)}> Performing unbiased group registration.") - print(f"<{os.path.basename(__file__)}> Input directory: ", args.inputDirectory) - print(f"<{os.path.basename(__file__)}> Output directory: ", args.outputDirectory) + print(f"<{os.path.basename(__file__)}> Input directory: {args.inputDirectory}") + print(f"<{os.path.basename(__file__)}> Output directory: {args.outputDirectory}") print("\n ============PARAMETERS=================") mode = args.mode - print(f"<{os.path.basename(__file__)}> Registration mode:", mode) + print(f"<{os.path.basename(__file__)}> Registration mode: {mode}") if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) number_of_fibers = args.numberOfFibers - print(f"<{os.path.basename(__file__)}> Number of fibers to analyze per subject: ", number_of_fibers) + print(f"<{os.path.basename(__file__)}> Number of fibers to analyze per subject: {number_of_fibers}") fiber_length = args.fiberLength - print(f"<{os.path.basename(__file__)}> Minimum length of fibers to analyze (in mm): ", fiber_length) + print(f"<{os.path.basename(__file__)}> Minimum length of fibers to analyze (in mm): {fiber_length}") fiber_length_max = args.fiberLengthMax - print(f"<{os.path.basename(__file__)}> Maximum length of fibers to analyze (in mm): ", fiber_length_max) + print(f"<{os.path.basename(__file__)}> Maximum length of fibers to analyze (in mm): {fiber_length_max}") parallel_jobs = args.numberOfJobs - print(f"<{os.path.basename(__file__)}> Number of jobs to use:", parallel_jobs) + print(f"<{os.path.basename(__file__)}> Number of jobs to use: {parallel_jobs}") if args.flag_verbose: print(f"<{os.path.basename(__file__)}> Verbose display and intermediate image saving ON.") @@ -121,7 +121,7 @@ def main(): midsag_symmetric = args.flag_midsag_symmetric if args.randomSeed is not None: - print(f"<{os.path.basename(__file__)}> Setting random seed to: ", args.randomSeed) + print(f"<{os.path.basename(__file__)}> Setting random seed to: {args.randomSeed}") random_seed = args.randomSeed # ------------- @@ -287,7 +287,7 @@ def main(): nonrigid = True else: - print("\n Error: Unknown registration mode:", mode) + print(f"\n Error: Unknown registration mode: {mode}") exit() # ------------- @@ -297,7 +297,7 @@ def main(): # Test the input files exist input_polydatas = wma.io.list_vtk_files(args.inputDirectory) number_of_subjects = len(input_polydatas) - print(f"<{os.path.basename(__file__)}> Found ", number_of_subjects, "subjects in input directory:", args.inputDirectory) + print(f"<{os.path.basename(__file__)}> Found {number_of_subjects} subjects in input directory {args.inputDirectory}") if number_of_subjects < 1: print("\n Error: No .vtk or .vtp files were found in the input directory.\n") exit() @@ -342,7 +342,7 @@ def main(): # We have to add polydatas after setting nonrigid in the register object for (pd, id) in zip(input_pds, subject_ids): register.add_polydata(pd, id) - print(f"<{os.path.basename(__file__)}> Number of points for fiber representation: ", points_per_fiber) + print(f"<{os.path.basename(__file__)}> Number of points for fiber representation: {points_per_fiber}") register.points_per_fiber = points_per_fiber # output summary file to save information about what was run @@ -361,14 +361,14 @@ def main(): outstr += str(number_of_subjects) outstr += '\n' outstr += '\n' - outstr += "Current date: " + time.strftime("%x") + outstr += f"Current date: {time.strftime("%x")}" outstr += '\n' - outstr += "Current time: " + time.strftime("%X") + outstr += f"Current time: {time.strftime("%X")}" outstr += '\n' outstr += '\n' - outstr += "Path to Script: " + os.path.realpath(__file__) + outstr += f"Path to Script: {os.path.realpath(__file__)}" outstr += '\n' - outstr += "Working Directory: " + os.getcwd() + outstr += f"Working Directory: {os.getcwd()}" outstr += '\n' outstr += '\n' outstr += "Description of Outputs\n" @@ -392,23 +392,23 @@ def main(): outstr += '\n' outstr += "Parameters\n" outstr += '----------------------\n' - outstr += "Registration mode: " + mode + outstr += f"Registration mode: {mode}" outstr += '\n' - outstr += "Number of fibers to analyze per subject: " + str(number_of_fibers) + outstr += f"Number of fibers to analyze per subject: {str(number_of_fibers)}" outstr += '\n' - outstr += "Minimum length of fibers to analyze (in mm): " + str(fiber_length) + outstr += f"Minimum length of fibers to analyze (in mm): {str(fiber_length)}" outstr += '\n' - outstr += "Maximum length of fibers to analyze (in mm): " + str(fiber_length_max) + outstr += f"Maximum length of fibers to analyze (in mm): {str(fiber_length_max)}" outstr += '\n' - outstr += "Number of jobs to use: " + str(parallel_jobs) + outstr += f"Number of jobs to use: {str(parallel_jobs)}" outstr += '\n' - outstr += "verbose: " + str(verbose) + outstr += f"verbose: {str(verbose)}" outstr += '\n' - outstr += "render: " + str(not no_render) + outstr += f"render: {str(not no_render)}" outstr += '\n' - outstr += "midsag_symmetric: " + str(midsag_symmetric) + outstr += f"midsag_symmetric: {str(midsag_symmetric)}" outstr += '\n' - outstr += "random seed: " + str(random_seed) + outstr += f"random seed: {str(random_seed)}" outstr += '\n' outstr += '\n' outstr += "Input Fiber Files\n" @@ -446,8 +446,8 @@ def main(): progress_filename = os.path.join(args.outputDirectory, 'progress.txt') progress_file = open(progress_filename, 'w') print("Beginning registration. Total iterations will be:", total_iterations, file=progress_file) - print("Start date: " + time.strftime("%x"), file=progress_file) - print("Start time: " + time.strftime("%X") + '\n', file=progress_file) + print(f"Start date: {time.strftime("%x")}", file=progress_file) + print(f'Start time: {time.strftime("%X")}\n', file=progress_file) progress_file.close() prev_time = time.time() do_scales = list(range(len(sigma_per_scale))) @@ -468,10 +468,10 @@ def main(): comparisons_this_scale = mean_brain_size_per_scale[scale]*subject_brain_size_per_scale[scale] comparisons_so_far += comparisons_this_scale percent = 100*(float(comparisons_so_far)/total_comparisons) - print("Done iteration", iteration, "/", total_iterations, ". Percent finished approx:", "%.2f" % percent) + print(f"Done iteration {iteration} / {total_iterations}. Percent finished approx: {percent:.2f}") progress_file = open(progress_filename, 'a') curr_time = time.time() - print("Done iteration", iteration, "/", total_iterations, ". Percent finished approx:", "%.2f" % percent, ". Time:", time.strftime("%X"), ". Minutes Elapsed:", (curr_time - prev_time)/60, file=progress_file) + print(f"Done iteration {iteration} / {total_iterations}. Percent finished approx: {percent:.2f}. Time: {time.strftime("%X")}. Minutes Elapsed: {(curr_time - prev_time)/60}", file=progress_file) progress_file.close() prev_time = curr_time @@ -483,12 +483,12 @@ def main(): # Final save when we are done register.save_transformed_polydatas(midsag_symmetric=midsag_symmetric) - print("\nDone registering. For more information on the output, please read:", readme_fname, "\n") + print(f"\nDone registering. For more information on the output, please read: {readme_fname}\n") progress_file = open(progress_filename, 'a') print("\nFinished registration.", file=progress_file) - print("End date: " + time.strftime("%x"), file=progress_file) - print("End time: " + time.strftime("%X"), file=progress_file) + print(f"End date: {time.strftime("%x")}", file=progress_file) + print(f"End time: {time.strftime("%X")}", file=progress_file) progress_file.close() diff --git a/bin/wm_register_to_atlas_new.py b/bin/wm_register_to_atlas_new.py index 450efc8f..a9dc57d7 100755 --- a/bin/wm_register_to_atlas_new.py +++ b/bin/wm_register_to_atlas_new.py @@ -83,11 +83,11 @@ def main(): outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) subject_outdir = os.path.join(outdir, subject_id) if not os.path.exists(subject_outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(subject_outdir) fiber_length = args.fiberLength @@ -287,7 +287,7 @@ def main(): nonrigid = True else: - print("\n Error: Unknown registration mode:", mode) + print(f"\n Error: Unknown registration mode: {mode}") exit() @@ -320,9 +320,9 @@ def main(): progress_filename = os.path.join(subject_outdir, 'progress.txt') progress_file = open(progress_filename, 'w') - print("Beginning registration. Total iterations will be:", total_iterations, file=progress_file) - print("Start date: " + time.strftime("%x"), file=progress_file) - print("Start time: " + time.strftime("%X") + '\n', file=progress_file) + print(f"Beginning registration. Total iterations will be: {total_iterations}", file=progress_file) + print(f"Start date: {time.strftime("%x")}", file=progress_file) + print(f'Start time: {time.strftime("%X")}\n', file=progress_file) progress_file.close() prev_time = time.time() @@ -349,10 +349,10 @@ def main(): comparisons_this_scale = mean_brain_size_per_scale[scale]*subject_brain_size_per_scale[scale] comparisons_so_far += comparisons_this_scale percent = 100*(float(comparisons_so_far)/total_comparisons) - print("Done iteration", iteration, "/", total_iterations, ". Percent finished approx:", "%.2f" % percent) + print(f"Done iteration {iteration} / {total_iterations}. Percent finished approx: {percent:.2f}") progress_file = open(progress_filename, 'a') curr_time = time.time() - print("Done iteration", iteration, "/", total_iterations, ". Percent finished approx:", "%.2f" % percent, ". Time:", time.strftime("%X"), ". Minutes Elapsed:", (curr_time - prev_time)/60, file=progress_file) + print(f"Done iteration {iteration} / {total_iterations}. Percent finished approx: {percent:.2f}. Time: {time.strftime("%X")}. Minutes Elapsed: {(curr_time - prev_time) / 60}", file=progress_file) progress_file.close() prev_time = curr_time iteration += 1 @@ -363,12 +363,12 @@ def main(): # Final save when we are done register.save_transformed_polydata() - print("Done registering. See output in:", subject_outdir) + print(f"Done registering. See output in: {subject_outdir}") progress_file = open(progress_filename, 'a') print("\nFinished registration.", file=progress_file) - print("End date: " + time.strftime("%x"), file=progress_file) - print("End time: " + time.strftime("%X"), file=progress_file) + print(f"End date: {time.strftime("%x")}", file=progress_file) + print(f"End time: {time.strftime("%X")}", file=progress_file) progress_file.close() if __name__ == '__main__': diff --git a/bin/wm_remove_data_along_tracts.py b/bin/wm_remove_data_along_tracts.py index 3229a510..fc97a412 100644 --- a/bin/wm_remove_data_along_tracts.py +++ b/bin/wm_remove_data_along_tracts.py @@ -47,25 +47,25 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print("Output directory", outdir, "does not exist, creating it.") + print(f"Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) print("") - print("=====input directory======\n", args.inputDirectory) - print("=====output directory=====\n", args.outputDirectory) + print(f"=====input directory======\n {args.inputDirectory}") + print(f"=====output directory=====\n {args.outputDirectory}") print("==========================") - print('CPUs detected:', multiprocessing.cpu_count()) + print(f'CPUs detected: {multiprocessing.cpu_count()}') if args.numberOfJobs is not None: parallel_jobs = args.numberOfJobs else: parallel_jobs = multiprocessing.cpu_count() - print('Using N jobs:', parallel_jobs) + print(f'Using N jobs: {parallel_jobs}') print("==========================") @@ -73,7 +73,7 @@ def main(): # Loop over input DWIs inputPolyDatas = wma.io.list_vtk_files(args.inputDirectory) - print(f"<{os.path.basename(__file__)}> Input number of files: ", len(inputPolyDatas)) + print(f"<{os.path.basename(__file__)}> Input number of files: f{len(inputPolyDatas)}") # for testing #inputPolyDatas = inputPolyDatas[0:2] @@ -83,7 +83,7 @@ def pipeline(inputPolyDatas, sidx, args): # ------------------- #subjectID = os.path.splitext(os.path.basename(inputPolyDatas[sidx]))[0] fname = os.path.basename(inputPolyDatas[sidx]) - print(f"<{os.path.basename(__file__)}> ", sidx + 1, "/", len(inputPolyDatas)) + print(f"<{os.path.basename(__file__)}> {sidx + 1} / {len(inputPolyDatas)}") # read input vtk data # ------------------- @@ -91,13 +91,13 @@ def pipeline(inputPolyDatas, sidx, args): num_lines = inpd.GetNumberOfLines() fiber_mask = numpy.ones(num_lines) outpd = wma.filter.mask(inpd, fiber_mask, preserve_point_data=False, preserve_cell_data=False, verbose=False) - print("Number of fibers retained: ", outpd.GetNumberOfLines(), "/", num_lines) + print(f"Number of fibers retained: {outpd.GetNumberOfLines()} / {num_lines}") # outputs # ------------------- fname = os.path.join(args.outputDirectory, fname) try: - print("Writing output polydata", fname, "...") + print(f"Writing output polydata {fname}...") wma.io.write_polydata(outpd, fname) except: print("Unknown exception in IO") diff --git a/bin/wm_separate_clusters_by_hemisphere.py b/bin/wm_separate_clusters_by_hemisphere.py index d8bba213..5503be45 100755 --- a/bin/wm_separate_clusters_by_hemisphere.py +++ b/bin/wm_separate_clusters_by_hemisphere.py @@ -54,12 +54,12 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist or is not a directory.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist or is not a directory.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) outdir_right = os.path.join(outdir, 'tracts_right_hemisphere') @@ -149,8 +149,8 @@ def _read_location(_inpd): print(f"<{os.path.basename(__file__)}> Starting computation.") print("") - print("=====input directory ======\n", args.inputDirectory) - print("=====output directory =====\n", args.outputDirectory) + print(f"=====input directory ======\n {args.inputDirectory}") + print(f"=====output directory =====\n {args.outputDirectory}") print("==========================") print("") @@ -158,7 +158,7 @@ def _read_location(_inpd): number_of_clusters = len(input_polydatas) - print(f"<{os.path.basename(__file__)}> Input number of vtk/vtp files: ", number_of_clusters) + print(f"<{os.path.basename(__file__)}> Input number of vtk/vtp files: {number_of_clusters}") # midsagittal alignment step to get transform to apply to each polydata # alternatively could use the transform into atlas space that is found @@ -172,19 +172,19 @@ def _read_location(_inpd): fname_base = os.path.basename(fname) # read data - print(f"<{os.path.basename(__file__)}> Separating input file:", fname) + print(f"<{os.path.basename(__file__)}> Separating input file: {fname}") pd = wma.io.read_polydata(fname) flag_location, mask_location = read_mask_location_from_vtk(pd) # If HemisphereLocation is not defined in the input vtk file, the location of each fiber in the cluster is decided. if not flag_location: - print("Error:", fname, "has no hemisphere location information") + print(f"Error: {fname} has no hemisphere location information") exit() # for sanity check if len(numpy.where(mask_location ==0)[0]) > 1: - print("Error: Not all fibers in", fname, "is labeled with hemisphere location information.") + print(f"Error: Not all fibers in {fname} is labeled with hemisphere location information.") exit() # output separated clusters diff --git a/bin/wm_tract_to_volume.py b/bin/wm_tract_to_volume.py index 7e58b450..7b387a93 100644 --- a/bin/wm_tract_to_volume.py +++ b/bin/wm_tract_to_volume.py @@ -49,7 +49,7 @@ def main(): args = _parse_args(parser) if not os.path.exists(args.inputVTK): - print("Error: Input directory", args.inputVTK, "does not exist.") + print(f"Error: Input directory {args.inputVTK} does not exist.") exit() def convert_cluster_to_volume_with_sz(inpd, volume, sampling_size=0.5): @@ -150,7 +150,7 @@ def convert_cluster_to_volume(inpd, volume, measure=None): return new_voxel_data volume = nibabel.load(args.refvolume) - print(f'<{os.path.basename(__file__)}>', args.refvolume, ', input volume shape: ', volume.get_fdata().shape) + print(f'<{os.path.basename(__file__)}> {args.refvolume} input volume shape: f{volume.get_fdata().shape}') inpd = wma.io.read_polydata(args.inputVTK) @@ -160,7 +160,7 @@ def convert_cluster_to_volume(inpd, volume, measure=None): nibabel.save(volume_new, args.outputVol) - print('Done: save tract map to', args.outputVol) + print(f'Done: save tract map to {args.outputVol}') if __name__ == '__main__': main() diff --git a/bin/wm_vtp2vtk.py b/bin/wm_vtp2vtk.py index b34b3dd6..a6561dd4 100644 --- a/bin/wm_vtp2vtk.py +++ b/bin/wm_vtp2vtk.py @@ -35,12 +35,12 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(args.outputDirectory): - print("Output directory", outdir, "does not exist, creating it.") + print(f"Output directory {outdir} does not exist, creating it.") os.makedirs(args.outputDirectory) # Preserve input directory name which may contain provenance like subject ID or structure, e.g UF @@ -48,14 +48,14 @@ def main(): print(subject_dir) outdir_subject = os.path.join(args.outputDirectory, subject_dir+"_vtk") if not os.path.exists(outdir_subject): - print("Output directory", outdir_subject, "does not exist, creating it.") + print(f"Output directory {outdir_subject} does not exist, creating it.") os.makedirs(outdir_subject) print("") print("wm_vtp2vtk.py: Convert all vtp files in input directory to vtk files in output directory.") - print("=====input directory======\n", args.inputDirectory) - print("=====top-level output directory requested by user=====\n", args.outputDirectory) - print("=====final output directory=====\n", outdir_subject) + print(f"=====input directory======\n {args.inputDirectory}") + print(f"=====top-level output directory requested by user=====\n {args.outputDirectory}") + print(f"=====final output directory=====\n {outdir_subject}") print("==========================") def list_vtp_files(input_dir): @@ -70,7 +70,7 @@ def list_vtp_files(input_dir): # Loop over input vtps inputPolyDatas = list_vtp_files(args.inputDirectory) - print("Input number of vtp files found: ", len(inputPolyDatas), '\n') + print(f'Input number of vtp files found: {len(inputPolyDatas)}\n') if len(inputPolyDatas) < 1: print("Warning: no input vtp files found in input directory.") @@ -88,7 +88,7 @@ def list_vtp_files(input_dir): # read input vtk data # ------------------- - print("**Reading input:", pd_fname, "(", subjectID, ")") + print(f"**Reading input: {pd_fname} ({subjectID})") wm = wma.io.read_polydata(pd_fname) @@ -96,7 +96,7 @@ def list_vtp_files(input_dir): # ------------------- fname = os.path.join(outdir_subject, subjectID+'.vtk') try: - print("====> Writing output polydata", fname, "...") + print(f"====> Writing output polydata {fname}...") wma.io.write_polydata(wm, fname) except: print("Unknown exception in IO") diff --git a/utilities/wm_assess_cluster_location.py b/utilities/wm_assess_cluster_location.py index 0429ae69..c9986654 100644 --- a/utilities/wm_assess_cluster_location.py +++ b/utilities/wm_assess_cluster_location.py @@ -48,9 +48,9 @@ def main(): print(f"<{os.path.basename(__file__)}>. Starting processing.") print("") - print("=====input atlas directory======\n", args.inputAtlasDirectory) - print("=====pthresh====\n", args.hemispherePercentThreshold) - print("=====advanced_times_threshold====\n", args.advanced_times_threshold) + print(f"=====input atlas directory======\n {args.inputAtlasDirectory}") + print(f"=====pthresh====\n {args.hemispherePercentThreshold}") + print(f"=====advanced_times_threshold====\n {args.advanced_times_threshold}") def list_cluster_files(input_dir): # Find input files @@ -104,10 +104,9 @@ def list_cluster_files(input_dir): location = 'Not Given' ng_list.append(fname_base) - print("%20s: %10s %10s %10s - %10s" \ - % (fname_base, num_left, num_right, num_comm, location)) + print(f"{fname_base:20s}: {num_left:10s} {num_right:10s} {num_comm:10s} - {location:10s}") - outstr = outstr + fname_base + '\t'+ str(num_left) + '\t'+ str(num_right) + '\t'+ str(num_comm) + '\t'+ location + '\n' + outstr = f'{outstr}{fname_base}\t{str(num_left)}\t{str(num_right)}\t{str(num_comm)}\t{location}\n' output_file.write(outstr) output_file.close() diff --git a/utilities/wm_compute_FA_from_DWIs.py b/utilities/wm_compute_FA_from_DWIs.py index 284a32d3..39e63b9b 100644 --- a/utilities/wm_compute_FA_from_DWIs.py +++ b/utilities/wm_compute_FA_from_DWIs.py @@ -52,16 +52,16 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectoryDWI): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() if not os.path.isdir(args.inputDirectoryMask): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) # get inputs @@ -71,11 +71,11 @@ def main(): # create FA images for (dwi, mask) in zip(dwi_list, mask_list): subject_id = os.path.splitext(os.path.basename(dwi))[0] - fname_out_dti = os.path.join(args.outputDirectory, subject_id + '_DTI.nhdr') - fname_out_b0 = os.path.join(args.outputDirectory, subject_id + '_B0.nhdr') - fname_out_fa = os.path.join(args.outputDirectory, subject_id + '_FA.nhdr') - print("/Applications/Slicer.app/Contents/lib/Slicer-4.4/cli-modules/DWIToDTIEstimation -m ", mask, dwi, fname_out_dti, fname_out_b0) - print("/Applications/Slicer.app/Contents/lib/Slicer-4.4/cli-modules/DiffusionTensorScalarMeasurements", fname_out_dti, fname_out_fa, "-e FractionalAnisotropy &") + fname_out_dti = os.path.join(args.outputDirectory, f'{subject_id}_DTI.nhdr') + fname_out_b0 = os.path.join(args.outputDirectory, f'{subject_id}_B0.nhdr') + fname_out_fa = os.path.join(args.outputDirectory, f'{subject_id}_FA.nhdr') + print(f"/Applications/Slicer.app/Contents/lib/Slicer-4.4/cli-modules/DWIToDTIEstimation -m {mask} {dwi} {fname_out_dti} {fname_out_b0}") + print(f"/Applications/Slicer.app/Contents/lib/Slicer-4.4/cli-modules/DiffusionTensorScalarMeasurements {fname_out_dti} {fname_out_fa} -e FractionalAnisotropy &") if __name__ == "__main__": diff --git a/utilities/wm_compute_TAP.py b/utilities/wm_compute_TAP.py index 19d0c2cb..1626b0a1 100755 --- a/utilities/wm_compute_TAP.py +++ b/utilities/wm_compute_TAP.py @@ -38,7 +38,7 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist or is not a directory.") + print(f"Error: Input directory {args.inputDirectory} does not exist or is not a directory.") exit() def list_vtk_files(input_dir): @@ -94,7 +94,7 @@ def compute_TAP(inpd): r_list = r_list[arg] for r, p in zip(r_list, r_prob): - TAP_str += "'%d:%0.6f'," % (r, p) + TAP_str += f"{r}:{p:0.6f}" return TAP_str @@ -107,7 +107,7 @@ def compute_TAP(inpd): tract_name = os.path.basename(vtk_file).replace(".vtp", "").replace(".vtk", "") pd = wma.io.read_polydata(vtk_file) TAP_str = compute_TAP(pd) - all_TAP_str += tract_name + "," + TAP_str + "\n" + all_TAP_str += f"{tract_name},{TAP_str}\n" print(all_TAP_str) diff --git a/utilities/wm_extract_cluster.py b/utilities/wm_extract_cluster.py index fc3cdf30..e38c56ec 100755 --- a/utilities/wm_extract_cluster.py +++ b/utilities/wm_extract_cluster.py @@ -44,12 +44,12 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist or is not a directory.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist or is not a directory.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) # cluster filename we want @@ -70,10 +70,10 @@ def main(): print(dir) fname = os.path.join(dir, fname_c) if os.path.exists(fname): - fname1 = subject_id+'_'+fname_c + fname1 = f'{subject_id}_{fname_c}' fname_list.append(fname1) fname2 = os.path.join(outdir, fname1) - print(fname, "===>>>>", fname2) + print(f"{fname} ===>>>> {fname2}") shutil.copy(fname, fname2) # also output a MRML file to load them all into Slicer diff --git a/utilities/wm_extract_clusters_by_endpoints.py b/utilities/wm_extract_clusters_by_endpoints.py index a24fe6d7..9870ec7f 100755 --- a/utilities/wm_extract_clusters_by_endpoints.py +++ b/utilities/wm_extract_clusters_by_endpoints.py @@ -48,7 +48,7 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() percentage_threshold = args.percentage_threshold @@ -58,11 +58,11 @@ def main(): measurement_list = wma.tract_measurement.load_measurement_in_folder(args.inputDirectory, hierarchy = 'Column', separator = 'Tab') if len(measurement_list) < 1: - print("Error: No measurement files found in directory:", args.inputDirectory) + print(f"Error: No measurement files found in directory {args.inputDirectory}") exit() number_of_subjects = len(measurement_list) - print("\n== Number of subjects data found:", number_of_subjects) + print(f"\n== Number of subjects data found: {number_of_subjects}") # print the header print("\n== Region labels found in the measurement file:") @@ -70,7 +70,7 @@ def main(): print(header) region = args.region - print("\n== Input region label:", region) + print(f"\n== Input region label: {region}") # region_exist = False # region_index = 0 @@ -88,7 +88,7 @@ def main(): subject_number_threshold = args.subject_number_threshold if subject_number_threshold < 0 or subject_number_threshold > number_of_subjects: - print("Error: Subject number threshold should be in 0 to number of subject ("+ str(number_of_subjects)+ ")") + print(f"Error: Subject number threshold should be in 0 to number of subject ({str(number_of_subjects)})") exit() # sanity check all measured variables are the same @@ -105,10 +105,10 @@ def main(): region_index = region_index + 1 if not region_exist: - print("\nError: region", region, "does not exist in", subject_measured.case_id) + print(f"\nError: region {region} does not exist in {subject_measured.case_id}") exit() else: - print(subject_measured.case_id, ", total number of regions:",len(subject_measured.measurement_header), ", input region found at position", region_index+1) + print(f"{subject_measured.case_id}, total number of regions: {len(subject_measured.measurement_header)}, input region found at position {region_index + 1}") region_indices.append(region_index) # Make subject id list for reporting of any potential outliers @@ -121,14 +121,14 @@ def main(): print("\n== Testing if all subjects have the same number of clusters.") test = True number_of_clusters = measurement_list[0].measurement_matrix[:,0].shape[0] - print("Group number of clusters (from first subject):", number_of_clusters) + print(f"Group number of clusters (from first subject): {number_of_clusters}") for subject_measured, id in zip(measurement_list, subject_id_list): nc = subject_measured.measurement_matrix[:,0].shape[0] if not nc == number_of_clusters: - print(nc, " : ", id) + print(f"{nc} : {id}") test = False if test: - print("Passed. All subjects have the same number of clusters ("+ str(number_of_clusters) + ").") + print(f"Passed. All subjects have the same number of clusters ({str(number_of_clusters)}).") else: print("ERROR: All subjects do not have the same number of clusters. There was an earlier error in clustering or measurement that must be fixed before analysis.") @@ -139,11 +139,11 @@ def main(): subject_percentage_distribution = subject_measured.measurement_matrix[:, region_index] - percent_str = sub_id + '(' + str(subject_measured.measurement_header[region_index]) + ')' - title_str = ("{:<"+str(len(sub_id)+len(region)+2)+"}").format('Percentage:') + percent_str = f'{sub_id}({str(subject_measured.measurement_header[region_index])})' + title_str = f"Percentage: {{:<{str(len(sub_id)+len(region)+2)}}}" for p in percentage_range: - percent_str = percent_str + ', ' + f"{str(sum(subject_percentage_distribution > p)):<4}" - title_str = title_str + ', ' + f"{str(p):<4}" + percent_str = f"{percent_str}, {str(sum(subject_percentage_distribution > p)):<4}" + title_str = f"{title_str}, {str(p):<4}" if not print_title: print(title_str) @@ -161,12 +161,12 @@ def main(): print("\n== Number of clusters that connect to the input region across all subjects.") num_subjects_per_cluster = sum(connected_matrix) for num_idx in range(number_of_subjects+1): - print("Clusters that are connected in", num_idx, '(subject number threshold) subjects: ', sum(num_subjects_per_cluster == num_idx)) + print(f"Clusters that are connected in {num_idx} (subject number threshold) subjects: {sum(num_subjects_per_cluster == num_idx)}") - print("\n== Result clusters that are detected by at least",subject_number_threshold,'subjects') + print(f"\n== Result clusters that are detected by at least {subject_number_threshold} subjects") output_cluster_idx = numpy.where(num_subjects_per_cluster >= subject_number_threshold)[0] output_cluster_idx = output_cluster_idx + 1 - print("Total", len(output_cluster_idx), "clusters.") + print(f"Total {len(output_cluster_idx)} clusters.") print(output_cluster_idx) if not (args.fiber_cluster_folder is None): @@ -178,7 +178,7 @@ def main(): cluster_polydatas = [] for c_idx in output_cluster_idx: - cluster_polydatas.append("cluster_"+str(c_idx).zfill(5)+suffix+".vtp") + cluster_polydatas.append(f"cluster_{str(c_idx).zfill(5)}{suffix}.vtp") number_of_files = len(cluster_polydatas) @@ -194,12 +194,12 @@ def main(): idx += 1 colors = numpy.array(colors) - mrml_filename = "cluster_connecting_region_" + region + ".mrml" + mrml_filename = f"cluster_connecting_region_{region}.mrml" for sub_folder in os.listdir(args.fiber_cluster_folder): wma.mrml.write(cluster_polydatas, colors, os.path.join(args.fiber_cluster_folder+'/'+sub_folder, mrml_filename), ratio=1.0) - print('Done!' + '\n') + print('Done!\n') if __name__ == "__main__": diff --git a/utilities/wm_fix_UKF_trace.py b/utilities/wm_fix_UKF_trace.py index 70e729a4..efaaec23 100644 --- a/utilities/wm_fix_UKF_trace.py +++ b/utilities/wm_fix_UKF_trace.py @@ -13,7 +13,7 @@ def list_files(input_dir,regstr): # Find input files - input_mask = ("{0}/"+regstr).format(input_dir) + input_mask = f"{input_dir}/{regstr}" input_pd_fnames = glob.glob(input_mask) input_pd_fnames = sorted(input_pd_fnames) return(input_pd_fnames) @@ -29,7 +29,7 @@ def fix_trace(inpd): if array.GetName() == 'trace1' or array.GetName() == 'trace2': array_trace_fixed = vtk.vtkFloatArray() - array_trace_fixed.SetName("correct_"+array.GetName()) + array_trace_fixed.SetName(f"correct_{array.GetName()}") inpd.GetLines().InitTraversal() for lidx in range(0, inpd.GetNumberOfLines()): @@ -83,7 +83,7 @@ def main(): vtk_list = list_files(args.inputDirectory, "*.vt*") for vtk_file in vtk_list: - print("Correcting:", vtk_file) + print(f"Correcting: {vtk_file}") pd = wma.io.read_polydata(vtk_file) pd = fix_trace(pd) wma.io.write_polydata(pd, vtk_file) diff --git a/utilities/wm_fix_hemisphere_loc_name_in_tractogram.py b/utilities/wm_fix_hemisphere_loc_name_in_tractogram.py index 027a3702..ebaacd7c 100644 --- a/utilities/wm_fix_hemisphere_loc_name_in_tractogram.py +++ b/utilities/wm_fix_hemisphere_loc_name_in_tractogram.py @@ -78,8 +78,8 @@ def main(): print(f"{os.path.basename(__file__)}. Starting.") print("") - print("=====input filename======\n", args.in_fname) - print("=====output filename=====\n", args.out_fname) + print(f"=====input filename======\n {args.in_fname}") + print(f"=====output filename=====\n {args.out_fname}") print("==========================") polydata = wma.io.read_polydata(args.in_fname) diff --git a/utilities/wm_flatten_length_distribution.py b/utilities/wm_flatten_length_distribution.py index c35131bb..8ee5bbf3 100644 --- a/utilities/wm_flatten_length_distribution.py +++ b/utilities/wm_flatten_length_distribution.py @@ -56,42 +56,42 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print("Output directory", outdir, "does not exist, creating it.") + print(f"Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) print(f"{os.path.basename(__file__)}. Starting streamline length distribution flattening.") print("") - print("=====input directory======\n", args.inputDirectory) - print("=====output directory=====\n", args.outputDirectory) + print(f"=====input directory======\n {args.inputDirectory}") + print(f"=====output directory=====\n {args.outputDirectory}") print("==========================") if args.numberOfFibers is not None: - print("fibers to retain per subject: ", args.numberOfFibers) + print(f"fibers to retain per subject: {args.numberOfFibers}") args.fibersPerBin = numpy.divide(args.numberOfFibers,args.numberOfBins) else: print("fibers to retain per subject: ALL") if args.fiberLengthMin is not None: - print("minimum length of fibers to retain (in mm): ", args.fiberLengthMin) + print(f"minimum length of fibers to retain (in mm): {args.fiberLengthMin}") else: - print("minimum length of fibers to retain (in mm): 0") + print(f"minimum length of fibers to retain (in mm): 0") if args.fiberLengthMax is not None: - print("maximum length of fibers to retain (in mm): ", args.fiberLengthMax) + print("maximum length of fibers to retain (in mm): {args.fiberLengthMax}") - print("Bins:", args.numberOfBins) - print("Fibers per bin:", args.fibersPerBin) + print(f"Bins: {args.numberOfBins}") + print(f"Fibers per bin: {args.fibersPerBin}") if args.numberOfJobs is not None: parallel_jobs = args.numberOfJobs else: parallel_jobs = 1 - print('Using N jobs:', parallel_jobs) + print(f'Using N jobs: {parallel_jobs}') print("==========================") @@ -102,7 +102,7 @@ def main(): inputPolyDatas = glob.glob(inputMask1) + glob.glob(inputMask2) - print(f"<{os.path.basename(__file__)}> Input number of files: ", len(inputPolyDatas)) + print(f"<{os.path.basename(__file__)}> Input number of files: {len(inputPolyDatas)}") # for testing #inputPolyDatas = inputPolyDatas[0:2] @@ -111,13 +111,13 @@ def pipeline(inputPolyDatas, sidx, args): # get subject identifier from unique input filename # ------------------- subjectID = os.path.splitext(os.path.basename(inputPolyDatas[sidx]))[0] - id_msg = f"<{os.path.basename(__file__)}> ", sidx + 1, "/", len(inputPolyDatas) - msg = "**Starting subject:", subjectID + id_msg = f"<{os.path.basename(__file__)}> {sidx + 1} / {len(inputPolyDatas)}" + msg = f"**Starting subject: {subjectID}" print(id_msg + msg) # read input vtk data # ------------------- - msg = "**Reading input:", subjectID + msg = f"**Reading input: {subjectID}" print(id_msg + msg) wm = wma.io.read_polydata(inputPolyDatas[sidx]) @@ -129,14 +129,14 @@ def pipeline(inputPolyDatas, sidx, args): # outputs # ------------------- - msg = "**Writing output data for subject:", subjectID + msg = f"**Writing output data for subject: {subjectID}" print(id_msg, msg) - fname = os.path.join(args.outputDirectory, subjectID+'_flat.vtp') + fname = os.path.join(args.outputDirectory, f'{subjectID}_flat.vtp') try: - print("Writing output polydata", fname, "...") + print(f"Writing output polydata {fname}...") wma.io.write_polydata(wm2, fname) - print("Wrote output", fname, ".") + print(f"Wrote output {fname}.") except: print("Unknown exception in IO") raise diff --git a/utilities/wm_laterality_all.py b/utilities/wm_laterality_all.py index 32777abf..7a337a35 100755 --- a/utilities/wm_laterality_all.py +++ b/utilities/wm_laterality_all.py @@ -58,18 +58,18 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print(f"<{os.path.basename(__file__)}> Output directory", outdir, "does not exist, creating it.") + print(f"<{os.path.basename(__file__)}> Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) print("f<{os.path.basename(__file__)}> Starting white matter laterality computation.") print("") - print("=====input directory======\n", args.inputDirectory) - print("=====output directory=====\n", args.outputDirectory) + print(f"=====input directory======\n {args.inputDirectory}") + print(f"=====output directory=====\n {args.outputDirectory}") print("==========================") if args.numberOfFibers is not None: @@ -97,7 +97,7 @@ def main(): print("Use equal fiber number from each hemisphere is OFF. Using input fiber number.") if args.numberOfFibersPerHem is not None: - print("fibers to analyze per hemisphere: ", args.numberOfFibersPerHem) + print(f"fibers to analyze per hemisphere: {args.numberOfFibersPerHem}") else: print("fibers to analyze per hemisphere: all or equal") @@ -113,13 +113,13 @@ def main(): # get subject identifier from unique input filename # ------------------- subjectID = os.path.splitext(os.path.basename(inputPolyDatas[sidx]))[0] - id_msg = f"<{os.path.basename(__file__)}> ", sidx + 1, "/", len(inputPolyDatas) - msg = "**Starting subject:", subjectID + id_msg = f"<{os.path.basename(__file__)}> {sidx + 1} / {len(inputPolyDatas)}" + msg = f"**Starting subject: {subjectID}" print(id_msg, msg) # read input vtk data # ------------------- - msg = "**Reading input:", subjectID + msg = f"**Reading input: {subjectID}" print(id_msg, msg) wm = wma.io.read_polydata(inputPolyDatas[sidx]) @@ -127,16 +127,16 @@ def main(): # remove short fibers # ------------------- if args.fiberLength is not None: - msg = "**Preprocessing:", subjectID + msg = f"**Preprocessing: {subjectID}" print(id_msg, msg) wm = wma.filter.preprocess(wm, args.fiberLength, remove_u=True, remove_u_endpoint_dist=50, remove_brainstem=True) - print("Number of fibers retained: ", wm.GetNumberOfLines()) + print(f"Number of fibers retained: {wm.GetNumberOfLines()}") # remove outlier fibers # ------------------- if args.flag_removeOutliers: - msg = "**Removing outliers:", subjectID + msg = f"**Removing outliers: {subjectID}" print(id_msg, msg) # if it's huge downsample to twice requested size first @@ -152,7 +152,7 @@ def main(): # downsample if requested # ------------------- if args.numberOfFibers is not None: - msg = "**Downsampling input:", subjectID + msg = f"**Downsampling input: {subjectID}" print(id_msg, msg) wm = wma.filter.downsample(wm, args.numberOfFibers) @@ -162,7 +162,7 @@ def main(): # compute laterality on each dataset # ------------------- - msg = "**Computing laterality:", subjectID + msg = f"**Computing laterality: {subjectID}" print(id_msg, msg) laterality = wma.laterality.WhiteMatterLaterality() @@ -184,7 +184,7 @@ def main(): # outputs # ------------------- - msg = "**Writing output data for subject:", subjectID + msg = f"**Writing output data for subject: {subjectID}" print(id_msg, msg) outdir = os.path.join(args.outputDirectory, subjectID) diff --git a/utilities/wm_measure_all_clusters.py b/utilities/wm_measure_all_clusters.py index c1c7a640..cb385f2b 100755 --- a/utilities/wm_measure_all_clusters.py +++ b/utilities/wm_measure_all_clusters.py @@ -39,13 +39,13 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() print(f"<{os.path.basename(__file__)}> Starting computation.") print("") - print("=====input directory ======\n", args.inputDirectory) - print("=====output file =====\n", args.outputFile) + print(f"=====input directory ======\n {args.inputDirectory}") + print(f"=====output file =====\n {args.outputFile}") print("==========================") print("") @@ -55,7 +55,7 @@ def compute_point_data_stats(pd, array_name): return None # make sure this is a one-component scalar if point_array.GetNumberOfComponents() > 1: - print("Error in compute_point_data_stats: Array", array_name, "has more than one component", point_array.GetNumberOfComponents(), ".") + print(f"Error in compute_point_data_stats: Array {array_name} has more than one component {point_array.GetNumberOfComponents()}.") return None print(point_array) num_points = pd.GetNumberOfPoints() @@ -63,7 +63,7 @@ def compute_point_data_stats(pd, array_name): for pidx in range(0, num_points): if (pidx % 1000) == 0: - print("Point", pidx, '/', num_points) + print(f"Point {pidx} / {num_points}") # this assumes we have scalars here points_copy[pidx] = point_array.GetTuple(pidx)[0] @@ -71,7 +71,7 @@ def compute_point_data_stats(pd, array_name): #points_std = numpy.std(points_copy) #points_median = numpy.median(points_copy) - print("Mean ", array_name, ":", points_mean) + print(f"Mean {array_name} : {points_mean}") return points_mean @@ -82,7 +82,7 @@ def compute_point_data_stats(pd, array_name): input_polydatas = input_polydatas[0:10] - print(f"<{os.path.basename(__file__)}> Input number of vtk/vtp files: ", number_of_clusters) + print(f"<{os.path.basename(__file__)}> Input number of vtk/vtp files: {number_of_clusters}") scalars = ['FA', 'Trace', 'FA1', 'FA2', 'Trace1', 'Trace2'] @@ -93,10 +93,10 @@ def compute_point_data_stats(pd, array_name): for fname in input_polydatas: print(fname) # read data - print(f"<{os.path.basename(__file__)}> Reading input file:", fname) + print(f"<{os.path.basename(__file__)}> Reading input file: {fname}") pd = wma.io.read_polydata(fname) # preprocessing step: minimum length - print(f"<{os.path.basename(__file__)}> Computing stats for input file:", fname) + print(f"<{os.path.basename(__file__)}> Computing stats for input file: {fname}") output_row = list() output_row.append(fname) for sc in scalars: @@ -113,9 +113,9 @@ def compute_point_data_stats(pd, array_name): outstr = '' for item in row: if outstr != '': - outstr = outstr + '\t' - outstr = outstr + str(item) - outstr = outstr + '\n' + outstr = f'{outstr}\t' + outstr = f'{outstr}{str(item)}' + outstr = f'{outstr}\n' f.write(outstr) f.close() diff --git a/utilities/wm_measure_endpoint_overlap.py b/utilities/wm_measure_endpoint_overlap.py index a2b61cfa..c57d2f48 100755 --- a/utilities/wm_measure_endpoint_overlap.py +++ b/utilities/wm_measure_endpoint_overlap.py @@ -49,15 +49,15 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputTractDirectory): - print("Error: Input directory", args.inputTractDirectory, "does not exist.") + print(f"Error: Input directory {args.inputTractDirectory} does not exist.") exit() if not os.path.isdir(args.inputLabelMapDirectory): - print("Error: Input label map directory", args.inputLabelMapDirectory, "does not exist.") + print(f"Error: Input label map directory {args.inputLabelMapDirectory} does not exist.") exit() if not os.path.exists(args.modulePath): - print("Error: FiberEndPointFromLabelMap", args.modulePath, "does not exist.") + print(f"Error: FiberEndPointFromLabelMap {args.modulePath} does not exist.") exit() if args.numberOfJobs is not None: @@ -66,21 +66,21 @@ def main(): number_of_jobs = 1 if not os.path.exists(args.outputDirectory): - print("Output directory", args.outputDirectory, "does not exist, creating it.") + print(f"Output directory {args.outputDirectory} does not exist, creating it.") os.makedirs(args.outputDirectory) print(f"<{os.path.basename(__file__)}>. Starting processing.") print("") - print("=====input fiber cluster directory======\n", args.inputTractDirectory) - print("=====input label map directory======\n", args.inputLabelMapDirectory) - print("=====output directory=====\n", args.outputDirectory) - print("=====module path====\n", args.modulePath) - print('=====using N jobs:', number_of_jobs, "====\n") + print(f"=====input fiber cluster directory======\n {args.inputTractDirectory}") + print(f"=====input label map directory======\n {args.inputLabelMapDirectory}") + print(f"=====output directory=====\n {args.outputDirectory}") + print(f"=====module path====\n {args.modulePath}") + print(f"=====using N jobs: {number_of_jobs} ====\n") tract_dir_list = os.listdir(args.inputTractDirectory) tract_dir_list = sorted(tract_dir_list) - print(f"<{os.path.basename(__file__)}> found", len(tract_dir_list), "subjects.") + print(f"<{os.path.basename(__file__)}> found {len(tract_dir_list)} subjects.") def list_label_map_files(input_dir): # Find input files @@ -92,23 +92,21 @@ def list_label_map_files(input_dir): label_map_file_list = list_label_map_files(args.inputLabelMapDirectory) - print(f"<{os.path.basename(__file__)}> found", len(label_map_file_list), "label maps. \n") + print(f"<{os.path.basename(__file__)}> found {len(label_map_file_list)} label maps.\n") if len(tract_dir_list) != len(label_map_file_list): - print("Error: The number of subjects", len(tract_dir_list), "should be equal to the number of label maps", len(label_map_file_list)) + print(f"Error: The number of subjects {len(tract_dir_list)} should be equal to the number of label maps {len(label_map_file_list)}") exit() def extract_endpoint(tract_dir, lalel_map_file, args): pds = wma.io.list_vtk_files(os.path.join(args.inputTractDirectory, tract_dir)) - print(f"<{os.path.basename(__file__)}> Computing:", os.path.join(args.inputTractDirectory, tract_dir)) - print(" using", lalel_map_file) - print(" with:", len(pds), "vtk/vtp files.") + print(f"<{os.path.basename(__file__)}> Computing: {os.path.join(args.inputTractDirectory, tract_dir)}") + print(f" using {lalel_map_file}") + print(f" with: {len(pds)} vtk/vtp files.") sub_name = os.path.split(tract_dir)[1] - os.system(args.modulePath + ' ' + lalel_map_file + ' ' + os.path.join(args.inputTractDirectory, tract_dir) + ' ' + \ - os.path.join(args.outputDirectory, sub_name+'_endpoint.txt')+ \ - ' > ' + os.path.join(args.outputDirectory, 'log'+sub_name)) + os.system(f"{args.modulePath} {lalel_map_file} {os.path.join(args.inputTractDirectory, tract_dir)} {os.path.join(args.outputDirectory, sub_name + '_endpoint.txt')} > {os.path.join(args.outputDirectory, 'log' + sub_name)}") Parallel(n_jobs=number_of_jobs, verbose=1)( delayed(extract_endpoint)(tract_dir, label_map_file, args) @@ -123,12 +121,12 @@ def list_txt_files(input_dir): return input_fnames endpoint_txt_list = list_txt_files(args.outputDirectory) - print(f"<{os.path.basename(__file__)}> Endpoint analysis were measured for", len(endpoint_txt_list), "subjects.") + print(f"<{os.path.basename(__file__)}> Endpoint analysis were measured for {len(endpoint_txt_list)} subjects.") if len(tract_dir_list) != len(endpoint_txt_list): print("Error: The numbers of inputs and outputs are different. Check the log file of each subject.") else: - os.system("rm -rf "+os.path.join(args.outputDirectory, 'log*')) + os.system(f"rm -rf {os.path.join(args.outputDirectory, 'log*')}") if __name__ == "__main__": diff --git a/utilities/wm_statistics.py b/utilities/wm_statistics.py index 68ea3e4e..58d6e116 100644 --- a/utilities/wm_statistics.py +++ b/utilities/wm_statistics.py @@ -54,7 +54,7 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist.") exit() if not os.path.exists(args.subjectsInformationFile): @@ -63,7 +63,7 @@ def main(): if args.atlasDirectory: if not os.path.isdir(args.atlasDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.atlasDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.atlasDirectory} does not exist.") exit() measurement = args.measure @@ -74,17 +74,17 @@ def main(): # Read and check data measurement_list = wma.tract_measurement.load_measurement_in_folder(args.inputDirectory, hierarchy = 'Column', separator = 'Tab') - print("Measurement directory:", args.inputDirectory) + print(f"Measurement directory {args.inputDirectory}") number_of_subjects = len(measurement_list) - print("Number of subjects data found:", number_of_subjects) + print(f"Number of subjects data found: {number_of_subjects}") if number_of_subjects < 1: - print("ERROR, no measurement files found in directory:", args.inputDirectory) + print(f"ERROR, no measurement files found in directory {args.inputDirectory}") exit() header = measurement_list[0].measurement_header #print "Measurement header is:", header #print "Clusters found:", measurement_list[0].cluster_path number_of_clusters = measurement_list[0].measurement_matrix[:,0].shape[0] - print("Number of measurement regions (clusters and hierarchy groups) is:", number_of_clusters) + print(f"Number of measurement regions (clusters and hierarchy groups) is: {number_of_clusters}") # Read and check subject ID and other information dg = wma.tract_measurement.load_demographics(args.subjectsInformationFile) @@ -105,7 +105,7 @@ def main(): #print subject_measured.case_id, subj_id, group if not str(subj_id) in subject_measured.case_id: print("ERROR: id list and input data mismatch.") - print("ERROR at:", subject_measured.case_id, subj_id, group) + print(f"ERROR at: {subject_measured.case_id} {subj_id} {group}") exit() print("Dataset passed. Subject IDs in subject information file match subject IDs in measurement directory.") @@ -115,9 +115,9 @@ def main(): if len(study_groups) > 2: print("ERROR: this code can currently only handle two study groups.") exit() - print("Subjects per group:", numpy.sum(numpy.array(group_id_list)==study_groups[0]), numpy.sum(numpy.array(group_id_list)==study_groups[1])) + print(f"Subjects per group: {numpy.sum(numpy.array(group_id_list) == study_groups[0])} {numpy.sum(numpy.array(group_id_list) == study_groups[1])}") - print("Performing statistics for measure:", measurement) + print(f"Performing statistics for measure: {measurement}") # Note this hierarchy code will be removed/simplified when the Slicer measurement file is simplified # ------ @@ -191,7 +191,7 @@ def main(): # plot the measurement of interest across all subjects and clusters - fname = 'plot_all_data_'+measurement+'.pdf' + fname = f'plot_all_data_{measurement}.pdf' vidx = list(header).index(measurement) plt.figure() for subject_measured, group in zip(measurement_list, group_id_list): @@ -202,10 +202,10 @@ def main(): color = 'r' #plt.plot(numpy.sort(value_list), color) plt.plot(value_list, color+'.') - plt.title(measurement+' measurements by group') + plt.title(f'{measurement} measurements by group') plt.savefig(fname) plt.close() - print("Saved data plot:", fname) + print(f"Saved data plot: {fname}") # get data by group to perform stats on measurement of interest vidx = list(header).index(measurement) @@ -238,11 +238,11 @@ def main(): shape_after = numpy.array([c_data_0.shape[0], c_data_1.shape[0]]) # warn if any cluster is totally absent if len(c_data_0) == 0 | len(c_data_1) == 0: - print("Empty cluster across all subjects indicates data issue:", c) + print(f"Empty cluster across all subjects indicates data issue: {c}") # warn if empty clusters nan_count = shape_before - shape_after if numpy.sum(nan_count) > 0: - print("\tCluster", c, ": Warning. Empty/nan found in :", nan_count, "subjects.") + print(f"\tCluster {c} : Warning. Empty/nan found in : {nan_count} subjects.") t, p = scipy.stats.ttest_ind(c_data_0, c_data_1) #print c_data_0.shape, c_data_1.shape if args.one_tailed: @@ -251,7 +251,7 @@ def main(): p_val_list.append(p) uncorrected_significant = numpy.sum(numpy.array(p_val_list) < 0.05) - print("Uncorrected:", uncorrected_significant, "/", number_of_tests, ":", 100*uncorrected_significant/float(number_of_tests), "%") + print(f"Uncorrected: {uncorrected_significant} / {number_of_tests} : {100 * uncorrected_significant / float(number_of_tests)} %") ## if analyze_hierarchy: ## for name, p_val in zip(names_for_stats, p_val_list): @@ -260,13 +260,13 @@ def main(): # bonferroni threshold = 0.05 / number_of_clusters corrected_significant = numpy.sum(numpy.array(p_val_list) < threshold) - print("Bonferroni:", corrected_significant, "/", number_of_tests, ":", 100*corrected_significant/float(number_of_tests), "%") + print(f"Bonferroni: {corrected_significant} / {number_of_tests} : {100 * corrected_significant / float(number_of_tests)} %") # FDR reject_null, corrected_p = statsmodels.sandbox.stats.multicomp.fdrcorrection0(numpy.array(p_val_list), alpha=fdr_q, method='indep') #reject_null, corrected_p = statsmodels.sandbox.stats.multicomp.fdrcorrection0(numpy.array(p_val_list), alpha=fdr_q, method='negcorr') corrected_significant = numpy.sum(reject_null) - print("FDR at alpha/q =", fdr_q, ":", corrected_significant, "/", number_of_tests, ":", 100*corrected_significant/float(number_of_tests), "%") + print(f"FDR at alpha/q = {fdr_q} : {corrected_significant} / {number_of_tests} : {100 * corrected_significant / float(number_of_tests)} %") ## # plot the data we tested, sorted by p-value ## cluster_order = numpy.argsort(numpy.array(p_val_list)) @@ -277,8 +277,8 @@ def main(): ## plt.savefig('tested_'+measurement+'.pdf') ## plt.close() - fname = 'plot_region_means_'+measurement+'.pdf' - print("Plotting mean values per cluster:", fname) + fname = f'plot_region_means_{measurement}.pdf' + print(f"Plotting mean values per cluster: {fname}") cluster_mean_0 = numpy.nanmean(data_0, axis=0) cluster_mean_1 = numpy.nanmean(data_1, axis=0) # Plot the mean values of the groups against each other in each cluster @@ -298,13 +298,13 @@ def main(): # plot the significant ones on top #plt.plot(cluster_mean_0[reject_null], cluster_mean_1[reject_null],'ro',markersize=3) plt.plot(cluster_mean_0[reject_null], cluster_mean_1[reject_null],'ro',markersize=4) - plt.title(measurement+' region means') + plt.title(f'{measurement} region means') plt.axis('equal') plt.savefig(fname) plt.close() - fname = 'plot_boxplot_'+measurement+'.pdf' - print("Plotting complete boxplot of all data:", fname) + fname = f'plot_boxplot_{measurement}.pdf' + print(f"Plotting complete boxplot of all data: {fname}") # Get information to plot all significant and non-significant data in a boxplot. label_list = [] text_list = [] @@ -325,7 +325,7 @@ def main(): x_pos_list = list(range(len(data_list))) #plt.boxplot(data_list, labels=label_list, showmeans=True, positions=x_pos_list) plt.boxplot(data_list, showmeans=True, positions=x_pos_list) - plt.title(measurement+' all regions') + plt.title(f'{measurement} all regions') ylim = plt.gca().get_ylim()[1] for text, pos in zip(text_list, text_pos_list): plt.text(pos, ylim*0.95, text, bbox=dict(facecolor='red', alpha=0.5)) @@ -333,8 +333,8 @@ def main(): plt.savefig(fname) plt.close() - fname = 'plot_boxplot_sig_'+measurement+'.pdf' - print("Plotting significant boxplot:", fname) + fname = f'plot_boxplot_sig_{measurement}.pdf' + print(f"Plotting significant boxplot: {fname}") # Now make a boxplot of only the significant ones # The data list length is number of clusters * number of groups, which must be 2 for now significant_data_list = [] @@ -365,7 +365,7 @@ def main(): plt.plot(p, numpy.mean(d), color) # Pad margins so that markers don't get clipped by the axes plt.margins(0.2) - plt.title(measurement+' significant regions') + plt.title(f'{measurement} significant regions') plt.savefig(fname) plt.close() @@ -375,7 +375,7 @@ def main(): if atlas_directory: # save a MRML file with tracts colored by p-value - fname = './test_'+measurement+'.mrml' + fname = f'./test_{measurement}.mrml' print("Saving MRML visualization:", fname) # find clusters in subject and atlas input directories input_mask = f"{atlas_directory}/cluster_*.vtp" @@ -403,7 +403,7 @@ def main(): #wma.mrml.write(atlas_clusters, colors, './test_'+str(vidx)+'.mrml', ratio=1.0) wma.mrml.write(significant_clusters, colors, fname, ratio=1.0) else: - print("Error: atlas directory and measurements have different cluster numbers:", number_of_files, number_of_clusters) + print(f"Error: atlas directory and measurements have different cluster numbers: {number_of_files} {number_of_clusters}") if __name__ == "__main__": diff --git a/utilities/wm_statistics_export_data.py b/utilities/wm_statistics_export_data.py index fe6ad1dd..66f30f07 100644 --- a/utilities/wm_statistics_export_data.py +++ b/utilities/wm_statistics_export_data.py @@ -42,29 +42,29 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print(f"<{os.path.basename(__file__)}> Error: Input directory", args.inputDirectory, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input directory {args.inputDirectory} does not exist.") exit() if not os.path.exists(args.subjectsInformationFile): - print(f"<{os.path.basename(__file__)}> Error: Input file", args.subjectsInformationFile, "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input file {args.subjectsInformationFile} does not exist.") exit() measurements = args.measures # Read and check data measurement_list = wma.tract_measurement.load_measurement_in_folder(args.inputDirectory, hierarchy = 'Column', separator = 'Tab') - print("Measurement directory:", args.inputDirectory) + print(f"Measurement directory {args.inputDirectory}") number_of_subjects = len(measurement_list) - print("Number of subjects data found:", number_of_subjects) + print(f"Number of subjects data found: {number_of_subjects}") if number_of_subjects < 1: - print("ERROR, no measurement files found in directory:", args.inputDirectory) + print(f"ERROR, no measurement files found in directory {args.inputDirectory}") exit() header = measurement_list[0].measurement_header region_list = measurement_list[0].cluster_path #print "Measurement header is:", header #print "Clusters found:", measurement_list[0].cluster_path number_of_clusters = measurement_list[0].measurement_matrix[:,0].shape[0] - print("Number of measurement regions (clusters and hierarchy groups) is:", number_of_clusters) + print(f"Number of measurement regions (clusters and hierarchy groups) is: {number_of_clusters}") # Read and check subject ID list dg = wma.tract_measurement.load_demographics(args.subjectsInformationFile) @@ -81,7 +81,7 @@ def main(): for subject_measured, subject_id in zip(measurement_list, case_id_list): if not str(subject_id) in subject_measured.case_id: print("ERROR: id list and input data mismatch.") - print("ERROR at:", subject_measured.case_id, subject_id) + print(f"ERROR at: {subject_measured.case_id} {subject_id}") exit() print("Dataset passed. Subject IDs in subject information excel file match subject IDs in measurement directory.") @@ -89,7 +89,7 @@ def main(): vidx_list = []; # index of values of interest for measure in measurements: vidx_list.append(list(header).index(measure)) - print("Column indices of measures for export:", vidx_list) + print(f"Column indices of measures for export: {vidx_list}") # reformat this information for export. # export format for header is subject ID, region1.measure1, region1.measure2, ..., regionN.measureN diff --git a/utilities/wm_transform_polydata.py b/utilities/wm_transform_polydata.py index 4c9d44f6..bbc22987 100644 --- a/utilities/wm_transform_polydata.py +++ b/utilities/wm_transform_polydata.py @@ -54,25 +54,25 @@ def main(): args = _parse_args(parser) if not os.path.isdir(args.inputDirectory): - print("Error: Input directory", args.inputDirectory, "does not exist.") + print(f"Error: Input directory {args.inputDirectory} does not exist.") exit() outdir = args.outputDirectory if not os.path.exists(outdir): - print("Output directory", outdir, "does not exist, creating it.") + print(f"Output directory {outdir} does not exist, creating it.") os.makedirs(outdir) print("") - print("=====input directory======\n", args.inputDirectory) - print("=====output directory=====\n", args.outputDirectory) + print(f"=====input directory======\n {args.inputDirectory}") + print(f"=====output directory=====\n {args.outputDirectory}") print("==========================") - print('CPUs detected:', multiprocessing.cpu_count()) + print(f'CPUs detected: {multiprocessing.cpu_count()}') if args.numberOfJobs is not None: parallel_jobs = args.numberOfJobs else: parallel_jobs = multiprocessing.cpu_count() - print('Using N jobs:', parallel_jobs) + print(f'Using N jobs: {parallel_jobs}') print("==========================") @@ -80,7 +80,7 @@ def main(): # Loop over input DWIs inputPolyDatas = wma.io.list_vtk_files(args.inputDirectory) - print(f"<{os.path.basename(__file__)}> Input number of files: ", len(inputPolyDatas)) + print(f"<{os.path.basename(__file__)}> Input number of files: {len(inputPolyDatas)}") # Read in the transform to print its contents for the user reader = vtk.vtkMNITransformReader() @@ -94,7 +94,7 @@ def pipeline(inputPolyDatas, sidx, args): # ------------------- #subjectID = os.path.splitext(os.path.basename(inputPolyDatas[sidx]))[0] fname = os.path.basename(inputPolyDatas[sidx]) - print(f"<{os.path.basename(__file__)}> ", sidx + 1, "/", len(inputPolyDatas)) + print(f"<{os.path.basename(__file__)}> {sidx + 1} / {len(inputPolyDatas)}") # read input vtk data # ------------------- @@ -120,7 +120,7 @@ def pipeline(inputPolyDatas, sidx, args): # ------------------- fname = os.path.join(args.outputDirectory, fname) try: - print("Writing output polydata", fname, "...") + print(f"Writing output polydata {fname}...") wma.io.write_polydata(outpd, fname) except: print("Unknown exception in IO") diff --git a/whitematteranalysis/cluster.py b/whitematteranalysis/cluster.py index 3a4652aa..fd34e670 100644 --- a/whitematteranalysis/cluster.py +++ b/whitematteranalysis/cluster.py @@ -92,18 +92,18 @@ def save(self, directory, atlas_name): def load(self, directory, atlas_name, verbose=False): if not os.path.isdir(directory): - print("Error: Atlas directory", directory, "does not exist or is not a directory.") + print(f"Error: Atlas directory {directory} does not exist or is not a directory.") raise " I/O error" fname_base = os.path.join(directory,atlas_name) - fname_atlas = fname_base+'.p' - fname_polydata = fname_base+'.vtp' + fname_atlas = f'{fname_base}.p' + fname_polydata = f'{fname_base}.vtp' if not os.path.exists(fname_atlas): - print("Error: Atlas file", fname_atlas, "does not exist.") + print(f"Error: Atlas file {fname_atlas} does not exist.") raise " I/O error" if not os.path.exists(fname_polydata): - print("Error: Atlas file", fname_polydata, "does not exist.") + print(f"Error: Atlas file {fname_polydata} does not exist.") raise " I/O error" try: @@ -111,9 +111,8 @@ def load(self, directory, atlas_name, verbose=False): except UnicodeDecodeError: atlas = pickle.load(open(fname_atlas,'rb'), encoding="latin1") atlas.nystrom_polydata = io.read_polydata(fname_polydata) - print(f"<{os.path.basename(__file__)}> Loaded atlas", atlas_name, "from", directory, ".") - print(f"<{os.path.basename(__file__)}> Atlas Nystrom polydata sample:", atlas.nystrom_polydata.GetNumberOfLines(), \ - "\nAtlas size:", atlas.pinv_A.shape, "\nAtlas number of eigenvectors:", atlas.number_of_eigenvectors) + print(f"<{os.path.basename(__file__)}> Loaded atlas {atlas_name} from {directory}.") + print(f"<{os.path.basename(__file__)}> Atlas Nystrom polydata sample: {atlas.nystrom_polydata.GetNumberOfLines()}\nAtlas size: {atlas.pinv_A.shape}\nAtlas number of eigenvectors: {atlas.number_of_eigenvectors}") atlas.polydata_filename = fname_polydata @@ -198,8 +197,8 @@ def spectral(input_polydata, number_of_clusters=200, # test pd has lines first number_fibers = input_polydata.GetNumberOfLines() print(f"<{os.path.basename(__file__)}> Starting spectral clustering.") - print(f"<{os.path.basename(__file__)}> Number of input fibers:", number_fibers) - print(f"<{os.path.basename(__file__)}> Number of clusters:", number_of_clusters) + print(f"<{os.path.basename(__file__)}> Number of input fibers: {number_fibers}") + print(f"<{os.path.basename(__file__)}> Number of clusters: {number_of_clusters}") if number_fibers == 0: print(f"<{os.path.basename(__file__)}> ERROR: Cannot cluster polydata with 0 fibers.") @@ -243,7 +242,7 @@ def spectral(input_polydata, number_of_clusters=200, atlas.nystrom_polydata = polydata_m polydata_n = filter.mask(input_polydata, nystrom_mask == False, verbose=False) sz = polydata_m.GetNumberOfLines() - print(f'<{os.path.basename(__file__)}> Using Nystrom approximation. Subset size:', sz, '/', number_fibers) + print(f'<{os.path.basename(__file__)}> Using Nystrom approximation. Subset size: {sz} / {number_fibers}') # Determine ordering to get embedding to correspond to original input data. reorder_embedding = numpy.concatenate((numpy.where(nystrom_mask)[0], numpy.where(~nystrom_mask)[0])) if landmarks is not None: @@ -261,8 +260,8 @@ def spectral(input_polydata, number_of_clusters=200, sigma, number_of_jobs, landmarks_n, landmarks_m, distance_method, bilateral) # sanity check - print(f"<{os.path.basename(__file__)}> Range of values in A:", numpy.min(A), numpy.max(A)) - print(f"<{os.path.basename(__file__)}> Range of values in B:", numpy.min(B), numpy.max(B)) + print(f"<{os.path.basename(__file__)}> Range of values in A: {numpy.min(A)} {numpy.max(A)}") + print(f"<{os.path.basename(__file__)}> Range of values in B: {numpy.min(B)} {numpy.max(B)}") else: # Calculate all fiber similarities @@ -272,7 +271,7 @@ def spectral(input_polydata, number_of_clusters=200, atlas.nystrom_polydata = input_polydata # sanity check - print(f"<{os.path.basename(__file__)}> Range of values in A:", numpy.min(A), numpy.max(A)) + print(f"<{os.path.basename(__file__)}> Range of values in A: {numpy.min(A)} {numpy.max(A)}") testval = numpy.max(A-A.T) if not testval == 0.0: @@ -280,26 +279,26 @@ def spectral(input_polydata, number_of_clusters=200, print(f"<{os.path.basename(__file__)}> ERROR: A matrix is not symmetric.") raise AssertionError else: - print(f"<{os.path.basename(__file__)}> Maximum of A - A^T:", testval) + print(f"<{os.path.basename(__file__)}> Maximum of A - A^T: {testval}") # Ensure that A is symmetric A = numpy.divide(A+A.T, 2.0) testval = numpy.min(A) if not testval > 0.0: print(f"<{os.path.basename(__file__)}> ERROR: A matrix is not positive.") - print(f"<{os.path.basename(__file__)}> Minimum value in A: ", testval) + print(f"<{os.path.basename(__file__)}> Minimum value in A: {testval}") if testval < 0.0: raise AssertionError # Outliers will have low measured (or estimated) row sums. Detect outliers in A: # to turn off for testing: outlier_std_threshold = numpy.inf row_sum_A_initial = numpy.sum(A, axis=0) + numpy.sum(B.T, axis=0) - print(f"<{os.path.basename(__file__)}> Initial similarity (row) sum A:", numpy.mean(row_sum_A_initial), numpy.std(row_sum_A_initial), numpy.min(row_sum_A_initial)) + print(f"<{os.path.basename(__file__)}> Initial similarity (row) sum A: {numpy.mean(row_sum_A_initial)} {numpy.std(row_sum_A_initial)} {numpy.min(row_sum_A_initial)}") atlas.outlier_std_threshold = outlier_std_threshold atlas.row_sum_threshold_for_rejection = numpy.mean(row_sum_A_initial) - outlier_std_threshold*numpy.std(row_sum_A_initial) bad_idx = numpy.nonzero(row_sum_A_initial < atlas.row_sum_threshold_for_rejection)[0] reject_A = bad_idx - print(f"<{os.path.basename(__file__)}> Rejecting n=", len(bad_idx), "/", sz, "fibers >", outlier_std_threshold, "standard deviations below the mean total fiber similarity") + print(f"<{os.path.basename(__file__)}> Rejecting n= {len(bad_idx)} / {sz} fibers > {outlier_std_threshold} standard deviations below the mean total fiber similarity") A = numpy.delete(A,reject_A,0) A = numpy.delete(A,reject_A,1) @@ -310,13 +309,13 @@ def spectral(input_polydata, number_of_clusters=200, # Ensure that A is positive definite. if pos_def_approx: e_val, e_vec = numpy.linalg.eigh(A) - print(f"<{os.path.basename(__file__)}> Eigenvalue range of A:", e_val[0], e_val[-1]) + print(f"<{os.path.basename(__file__)}> Eigenvalue range of A: {e_val[0]} {e_val[-1]}") A2 = nearPSD(A) e_val, e_vec = numpy.linalg.eigh(A2) - print(f"<{os.path.basename(__file__)}> Eigenvalue range of nearest PSD matrix to A:", e_val[0], e_val[-1]) + print(f"<{os.path.basename(__file__)}> Eigenvalue range of nearest PSD matrix to A: {e_val[0]} e{_val[-1]}") testval = numpy.max(A-A2) if not testval == 0.0: - print(f"<{os.path.basename(__file__)}> A matrix differs by PSD matrix by maximum of:", testval) + print(f"<{os.path.basename(__file__)}> A matrix differs by PSD matrix by maximum of: {testval}") if testval > 0.25: print(f"<{os.path.basename(__file__)}> ERROR: A matrix changed by more than 0.25.") raise AssertionError @@ -356,7 +355,7 @@ def spectral(input_polydata, number_of_clusters=200, atlas.row_sum_matrix = numpy.dot(numpy.sum(B.T, axis=0), atlas.pinv_A) #test = numpy.sum(B.T, axis=0) #print " B column sums range (should be > 0):", numpy.min(test), numpy.max(test) - print(f"<{os.path.basename(__file__)}> Range of row sum weights:", numpy.min(atlas.row_sum_matrix), numpy.max(atlas.row_sum_matrix)) + print(f"<{os.path.basename(__file__)}> Range of row sum weights: {numpy.min(atlas.row_sum_matrix)} {numpy.max(atlas.row_sum_matrix)}") #print " First 10 entries in weight matrix:", atlas.row_sum_matrix[0:10] #test = numpy.dot(atlas.row_sum_matrix, B) #print " Test partial sum estimation for B:", numpy.min(test), numpy.max(test) @@ -365,17 +364,17 @@ def spectral(input_polydata, number_of_clusters=200, # row sum estimate for current B part of the matrix row_sum_2 = numpy.sum(B, axis=0) + \ numpy.dot(atlas.row_sum_matrix, B) - print(f"<{os.path.basename(__file__)}> Row sum check (min/max, should be > 0) A:", numpy.min(atlas.row_sum_1), numpy.median(atlas.row_sum_1), numpy.max(atlas.row_sum_1), "B:", numpy.min(row_sum_2), numpy.median(row_sum_2), numpy.max(row_sum_2)) + print(f"<{os.path.basename(__file__)}> Row sum check (min/max, should be > 0) A: {numpy.min(atlas.row_sum_1)} {numpy.median(atlas.row_sum_1)} {numpy.max(atlas.row_sum_1)} B: {numpy.min(row_sum_2)} {numpy.median(row_sum_2)} {numpy.max(row_sum_2)}") # reject outliers in B bad_idx = numpy.nonzero(row_sum_2 < atlas.row_sum_threshold_for_rejection)[0] reject_B = bad_idx - print(f"<{os.path.basename(__file__)}> Rejecting n=", len(bad_idx), "/", B.shape[1], "fibers >", outlier_std_threshold, "standard deviations below the mean total fiber similarity") + print(f"<{os.path.basename(__file__)}> Rejecting n= {len(bad_idx)} / {B.shape[1]} fibers > {outlier_std_threshold} standard deviations below the mean total fiber similarity") row_sum_2 = numpy.delete(row_sum_2,reject_B) B = numpy.delete(B,reject_B,1) - print(f"<{os.path.basename(__file__)}> After outlier rejection A:", A.shape, "B:", B.shape) - print(f"<{os.path.basename(__file__)}> Row sum check (min/max, should be > 0) A:", numpy.min(atlas.row_sum_1), numpy.median(atlas.row_sum_1), numpy.max(atlas.row_sum_1), "B:", numpy.min(row_sum_2), numpy.median(row_sum_2), numpy.max(row_sum_2)) + print(f"<{os.path.basename(__file__)}> After outlier rejection A: {A.shape} B: {B.shape}") + print(f"<{os.path.basename(__file__)}> Row sum check (min/max, should be > 0) A: {numpy.min(atlas.row_sum_1)} {numpy.median(atlas.row_sum_1)} {numpy.max(atlas.row_sum_1)} B: {numpy.min(row_sum_2)} {numpy.median(row_sum_2)} {numpy.max(row_sum_2)}") # Separate the Nystrom sample and the rest of the data after removing outliers nystrom_mask_2 = nystrom_mask @@ -392,7 +391,7 @@ def spectral(input_polydata, number_of_clusters=200, output_polydata = filter.mask(input_polydata, numpy.add(nystrom_mask_2, not_nystrom_mask),verbose=False) sz = polydata_m.GetNumberOfLines() number_fibers = output_polydata.GetNumberOfLines() - print(f'<{os.path.basename(__file__)}> Using Nystrom approximation. Subset size (A):', sz, '/', number_fibers, "B:", polydata_n.GetNumberOfLines()) + print(f'<{os.path.basename(__file__)}> Using Nystrom approximation. Subset size (A): {sz} / {number_fibers} B: {polydata_n.GetNumberOfLines()}') # Determine ordering to get embedding to correspond to original input data. # reject outliers from masks reject_idx = numpy.concatenate((midxA[reject_A],midxB[reject_B])) @@ -405,7 +404,7 @@ def spectral(input_polydata, number_of_clusters=200, # in case of negative row sum estimation if any(row_sum_2<=0): print(f"<{os.path.basename(__file__)}> Warning: Consider increasing sigma or using the Mean distance. negative row sum approximations.") - print("Number of negative row sums:", numpy.count_nonzero(row_sum_2<=0)) + print(f"Number of negative row sums: {numpy.count_nonzero(row_sum_2 <= 0)}") #row_sum_2[row_sum_2<0] = 0.1 # save for testing @@ -424,7 +423,7 @@ def spectral(input_polydata, number_of_clusters=200, else: # normalized cuts normalization using row (same as column) sums row_sum = numpy.sum(A, axis=0) - print(f"<{os.path.basename(__file__)}> A matrix row sums range (should be > 0):", numpy.min(row_sum), numpy.max(row_sum)) + print(f"<{os.path.basename(__file__)}> A matrix row sums range (should be > 0): {numpy.min(row_sum)} {numpy.max(row_sum)}") dhat = numpy.divide(1, numpy.sqrt(row_sum)) A = \ numpy.multiply(A, numpy.outer(dhat, dhat.T)) @@ -433,11 +432,11 @@ def spectral(input_polydata, number_of_clusters=200, print(f'<{os.path.basename(__file__)}> Calculating eigenvectors of similarity matrix A...') atlas.e_val, atlas.e_vec = numpy.linalg.eigh(A) print(f'<{os.path.basename(__file__)}> Done calculating eigenvectors.') - print(f"<{os.path.basename(__file__)}> Eigenvalue range:", atlas.e_val[0], atlas.e_val[-1]) + print(f"<{os.path.basename(__file__)}> Eigenvalue range: {atlas.e_val[0]} {atlas.e_val[-1]}") # Check how well our chosen number of eigenvectors models the data power = numpy.cumsum(atlas.e_val[::-1]) / numpy.sum(atlas.e_val) - print(f"<{os.path.basename(__file__)}> Power from chosen number of eigenvectors (", number_of_eigenvectors, ')', power[number_of_eigenvectors]) - print(f'<{os.path.basename(__file__)}> Top eigenvalues:', atlas.e_val[::-1][1:number_of_eigenvectors]) + print(f"<{os.path.basename(__file__)}> Power from chosen number of eigenvectors ({number_of_eigenvectors}) {power[number_of_eigenvectors]}") + print(f'<{os.path.basename(__file__)}> Top eigenvalues: {atlas.e_val[::-1][1:number_of_eigenvectors]}') # 4) Compute embedding using eigenvectors print(f'<{os.path.basename(__file__)}> Compute embedding using eigenvectors.') @@ -507,10 +506,10 @@ def spectral(input_polydata, number_of_clusters=200, if 0: # This is extremely slow, but leave code here if ever wanted for testing cluster_metric = metrics.silhouette_score(embed, cluster_idx, metric='sqeuclidean') - print("Silhouette Coefficient: %0.3f" % cluster_metric) + print(f"Silhouette Coefficient: {cluster_metric:0.3f}") else: - print("ERROR: Unknown centroid finder", centroid_finder) + print(f"ERROR: Unknown centroid finder {centroid_finder}") ## # This found fewer clusters than we need to represent the anatomy well ## # Leave code here in case wanted in future for more testing. ## print ' Affinity Propagation clustering in embedding space.' @@ -790,9 +789,9 @@ def _pairwise_similarity_matrix(input_polydata, threshold, sigma, test = numpy.min(numpy.diag(similarity_matrix)) == 1.0 if not test: print(f"<{os.path.basename(__file__)}> ERROR: On-diagonal elements are not all 1.0.") - print(" Minimum on-diagonal value:", numpy.min(numpy.diag(similarity_matrix))) - print(" Maximum on-diagonal value:", numpy.max(numpy.diag(similarity_matrix))) - print(" Mean value:", numpy.mean(numpy.diag(similarity_matrix))) + print(f" Minimum on-diagonal value: {numpy.min(numpy.diag(similarity_matrix))}") + print(f" Maximum on-diagonal value: {numpy.max(numpy.diag(similarity_matrix))}") + print(f" Mean value: {numpy.mean(numpy.diag(similarity_matrix))}") raise AssertionError return similarity_matrix @@ -939,7 +938,7 @@ def output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_f idx = 1 for fname in input_polydatas: subject_id = os.path.splitext(os.path.basename(fname))[0] - outstr = str(idx) + '\t' + str(subject_id) + '\t' + str(fname) + '\n' + outstr = f'{str(idx)}\t{str(subject_id)}\t{str(fname)}\n' subjects_qc_file.write(outstr) idx += 1 subjects_qc_file.close() @@ -978,11 +977,9 @@ def output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_f # Save output quality control information print(f"<{os.path.basename(__file__)}> Saving cluster quality control information file.") clusters_qc_file = open(clusters_qc_fname, 'w') - print('cluster_idx','\t', 'number_subjects','\t', 'percent_subjects','\t', 'mean_length','\t', 'std_length','\t', 'mean_fibers_per_subject','\t', 'std_fibers_per_subject', file=clusters_qc_file) + print('cluster_idx\tnumber_subjects\tpercent_subjects\tmean_length\tstd_length\tmean_fibers_per_subject\tstd_fibers_per_subject', file=clusters_qc_file) for cidx in cluster_indices: - print(cidx + 1,'\t', subjects_per_cluster[cidx],'\t', percent_subjects_per_cluster[cidx] * 100.0,'\t', \ - mean_fiber_len_per_cluster[cidx],'\t', std_fiber_len_per_cluster[cidx],'\t', \ - mean_fibers_per_subject_per_cluster[cidx],'\t', std_fibers_per_subject_per_cluster[cidx], file=clusters_qc_file) + print(f'{cidx + 1}\t{subjects_per_cluster[cidx]}\t{percent_subjects_per_cluster[cidx] * 100.0}\t{mean_fiber_len_per_cluster[cidx]}\t{std_fiber_len_per_cluster[cidx]}\t{mean_fibers_per_subject_per_cluster[cidx]}\t{std_fibers_per_subject_per_cluster[cidx]}', file=clusters_qc_file) clusters_qc_file.close() @@ -1006,7 +1003,7 @@ def output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_f # Figure out file name and mean color for each cluster, and write the individual polydatas pd_c_list = mask_all_clusters(output_polydata_s, cluster_numbers_s, len(cluster_indices), preserve_point_data=True, preserve_cell_data=True, verbose=False) - print(f"<{os.path.basename(__file__)}> Beginning to save individual clusters as polydata files. TOTAL CLUSTERS:", len(cluster_indices), end=' ') + print(f"<{os.path.basename(__file__)}> Beginning to save individual clusters as polydata files. TOTAL CLUSTERS: {len(cluster_indices)}", end=' ') fnames = list() cluster_colors = list() cluster_sizes = list() @@ -1050,10 +1047,10 @@ def output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_f print(sz, ":", fname) empty_count += 1 if empty_count: - print(f"<{os.path.basename(__file__)}> Warning. Empty clusters found:", empty_count) + print(f"<{os.path.basename(__file__)}> Warning. Empty clusters found: {empty_count}") cluster_sizes = numpy.array(cluster_sizes) - print(f"<{os.path.basename(__file__)}> Mean number of fibers per cluster:", numpy.mean(cluster_sizes), "Range:", numpy.min(cluster_sizes), "..", numpy.max(cluster_sizes)) + print(f"<{os.path.basename(__file__)}> Mean number of fibers per cluster: {numpy.mean(cluster_sizes)} Range: {numpy.min(cluster_sizes)}..{numpy.max(cluster_sizes)}") # Estimate subsampling ratio to display approximately number_of_fibers_to_display total fibers in 3D Slicer number_fibers = len(cluster_numbers_s) @@ -1061,7 +1058,7 @@ def output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_f ratio = 1.0 else: ratio = number_of_fibers_to_display / number_fibers - print(f"<{os.path.basename(__file__)}> Subsampling ratio for display of", number_of_fibers_to_display, "total fibers estimated as:", ratio) + print(f"<{os.path.basename(__file__)}> Subsampling ratio for display of {number_of_fibers_to_display} total fibers estimated as: {ratio}") # Write the MRML file into the directory where the polydatas were already stored fname = os.path.join(outdir, 'clustered_tracts.mrml') @@ -1080,7 +1077,7 @@ def output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_f def mask_all_clusters(inpd, cluster_numbers_s, number_of_clusters, color=None, preserve_point_data=True, preserve_cell_data=False, verbose=True): - print(f'<{os.path.basename(__file__)}> Masking all clusters: total ', number_of_clusters) + print(f'<{os.path.basename(__file__)}> Masking all clusters: total {number_of_clusters}') inpoints = inpd.GetPoints() inpointdata = inpd.GetPointData() @@ -1125,7 +1122,7 @@ def mask_all_clusters(inpd, cluster_numbers_s, number_of_clusters, color=None, p out_array.SetName(array.GetName()) if verbose and c_idx == 0: - print("Cell data array found:", array.GetName(), array.GetNumberOfComponents()) + print(f"Cell data array found: {array.GetName()} {array.GetNumberOfComponents()}") outpd_list[c_idx].GetCellData().AddArray(out_array) @@ -1145,7 +1142,7 @@ def mask_all_clusters(inpd, cluster_numbers_s, number_of_clusters, color=None, p out_array.SetName(array.GetName()) if verbose and c_idx == 0: - print("Point data array found:", array.GetName(), array.GetNumberOfComponents()) + print(f"Point data array found: {array.GetName()} {array.GetNumberOfComponents()}") outpd_list[c_idx].GetPointData().AddArray(out_array) @@ -1183,7 +1180,7 @@ def mask_all_clusters(inpd, cluster_numbers_s, number_of_clusters, color=None, p if not tensors_labeled: if len(tensor_names) > 0: - print("Data has unexpected tensor name(s). Unable to set active for visualization:", tensor_names) + print(f"Data has unexpected tensor name(s). Unable to set active for visualization: {tensor_names}") # now set cell data visualization inactive. for c_idx in range(0, number_of_clusters): @@ -1203,7 +1200,7 @@ def mask_all_clusters(inpd, cluster_numbers_s, number_of_clusters, color=None, p if verbose: if lidx % 100 == 0: - print("Line:", lidx, "/", inpd.GetNumberOfLines(), ", belonging to cluster ", c_idx) + print(f"Line: {lidx} / {inpd.GetNumberOfLines()} belonging to cluster {c_idx}") # get points for each ptid and add to output polydata cellptids = vtk.vtkIdList() @@ -1232,6 +1229,6 @@ def mask_all_clusters(inpd, cluster_numbers_s, number_of_clusters, color=None, p outpd_list[c_idx].SetPoints(outpoints_list[c_idx]) if verbose: - print(f"<{os.path.basename(__file__)}> Cluster", c_idx," have fibers ", outpd_list[c_idx].GetNumberOfLines(), "/", inpd.GetNumberOfLines(), ", points ", outpd_list[c_idx].GetNumberOfPoints()) + print(f"<{os.path.basename(__file__)}> Cluster {c_idx} have fibers {outpd_list[c_idx].GetNumberOfLines()} / {inpd.GetNumberOfLines()}, points {outpd_list[c_idx].GetNumberOfPoints()}") return outpd_list diff --git a/whitematteranalysis/congeal_multisubject.py b/whitematteranalysis/congeal_multisubject.py index f0463d5a..d78d1c6a 100644 --- a/whitematteranalysis/congeal_multisubject.py +++ b/whitematteranalysis/congeal_multisubject.py @@ -106,7 +106,7 @@ def update_nonrigid_grid(self): new_transforms.append(newtrans.ravel()) # Update all the relevant variables (the spline transform does not change but all source and target points do) - print("UPDATE NONRIGID GRID: ", self.nonrigid_grid_resolution, len(trans), "==>", len(new_transforms[-1]), "SHAPE:", newtrans.shape, "GRID:", grid) + print(f"UPDATE NONRIGID GRID: {self.nonrigid_grid_resolution} {len(trans)} ==> {len(new_transforms[-1])} SHAPE: {newtrans.shape} GRID: {grid}") self.transforms_as_array = new_transforms def add_polydata(self, polydata, subject_id): @@ -144,14 +144,14 @@ def remove_mean_from_transforms(self): # this means the mean of the source points must equal the target point transforms_array = numpy.array(self.transforms_as_array) res = self.nonrigid_grid_resolution - print("SHAPE of transforms:", transforms_array.shape, "RES:", res, "TOT:", res*res*res*3) + print(f"SHAPE of transforms: {transforms_array.shape} RES: {res} TOT: {res*res*res*3}") meanabs = numpy.mean(numpy.abs(transforms_array), 0) meandisp = numpy.mean(transforms_array, 0) #if self.verbose: #print "MEAN DISPLACEMENT:", meandisp - print("MEAN ABS DISPLACEMENT:", numpy.min(meanabs), numpy.mean(meanabs), numpy.max(meanabs)) - print("NONZERO > 0.5:", numpy.sum(meanabs > 0.5), "> 0.1:", numpy.sum(meanabs > 0.1), "/", res*res*res*3) + print(f"MEAN ABS DISPLACEMENT: {numpy.min(meanabs)} {numpy.mean(meanabs)} {numpy.max(meanabs)}") + print(f"NONZERO > 0.5: {numpy.sum(meanabs > 0.5)} > 0.1: {numpy.sum(meanabs > 0.1)} / {res*res*res*3}") for transform in self.transforms_as_array: transform[:] = transform - meandisp @@ -187,14 +187,14 @@ def iterate(self): if self.total_iterations == 1: self.progress_filename = os.path.join(self.output_directory, 'registration_performance.txt') progress_file = open(self.progress_filename, 'w') - print('iteration','\t', 'sigma', '\t', 'nonrigid', '\t', 'subject_brain_fibers', '\t', 'fibers_per_subject_in_mean_brain','\t', 'mean_brain_fibers','\t', 'maxfun','\t', 'grid_resolution_if_nonrigid','\t', 'initial_step','\t', 'final_step','\t', 'objective_before','\t', 'objective_after', '\t', 'objective_change', '\t', 'objective_percent_change', '\t', 'mean_function_calls_per_subject','\t', 'min_function_calls_per_subject','\t', 'max_function_calls_per_subject','\t', 'subjects_hitting_maxfun','\t', 'total_subjects','\t', 'subjects_decreased','\t', 'mean_subject_change', '\t', 'mean_subject_decrease_if_decreased', '\t', 'time', file=progress_file) + print('iteration\tsigma\tnonrigid\tsubject_brain_fibers\tfibers_per_subject_in_mean_brain\tmean_brain_fibers\tmaxfun\tgrid_resolution_if_nonrigid\tinitial_step\tfinal_step\tobjective_before\tobjective_after\tobjective_change\tobjective_percent_change\tmean_function_calls_per_subject\tmin_function_calls_per_subject\tmax_function_calls_per_subject\tsubjects_hitting_maxfun\ttotal_subjects\tsubjects_decreased\tmean_subject_change\tmean_subject_decrease_if_decreased\ttime', file=progress_file) progress_file.close() # make a directory for the current iteration if self.mode == "Nonrigid": - dirname = "iteration_%05d_sigma_%03d_grid_%03d" % (self.total_iterations, self.sigma, self.nonrigid_grid_resolution) + dirname = f"iteration_{self.total_iterations:05d}_sigma_{self.sigma:03d}_grid_{self.nonrigid_grid_resolution:03d}" else: - dirname = "iteration_%05d_sigma_%03d" % (self.total_iterations, self.sigma) + dirname = f"iteration_{self.total_iterations:05d}_sigma_{self.sigma:03d}" outdir = os.path.join(self.output_directory, dirname) if not os.path.exists(outdir): @@ -207,7 +207,7 @@ def iterate(self): # Calculate how many fibers are needed to sample from each subject to compute the mean brain at the requested size fibers_per_subject = int(self.mean_brain_size / (len(self.polydatas) - 1)) if self.verbose: - print("Fibers per subject for computing mean brain:", fibers_per_subject, "=", self.mean_brain_size, "/", len(self.polydatas) -1) + print(f"Fibers per subject for computing mean brain: {fibers_per_subject} = {self.mean_brain_size} / {len(self.polydatas) - 1}") # Set up lists of data to pass to the per-subject processes mean_list = list() @@ -283,7 +283,7 @@ def iterate(self): grid_resolution_list.append(self.nonrigid_grid_resolution) # Multiprocess over subjects - print("\nITERATION", self.total_iterations, "STARTING MULTIPROCESSING. NUMBER OF JOBS:", self.parallel_jobs, "\n") + print(f"\nITERATION {self.total_iterations} STARTING MULTIPROCESSING. NUMBER OF JOBS: {self.parallel_jobs}\n") # note we can't pass vtk objects to subprocesses since they can't be pickled. ret = Parallel( @@ -307,13 +307,13 @@ def iterate(self): if HAVE_PLT: plt.close('all') plt.figure(0) - plt.title('Iteration '+str(self.total_iterations)+' Objective Values for All Subjects') + plt.title(f'Iteration {str(self.total_iterations)} Objective Values for All Subjects') plt.xlabel('objective function computations') plt.ylabel('objective value') for (trans, objectives, diff) in ret: self.transforms_as_array.append(trans) - print("Iteration:", self.total_iterations, "Subject:", sidx, "Objective function computations:", len(objectives), "change", diff) + print(f"Iteration: {self.total_iterations} Subject: {sidx} Objective function computations: {len(objectives)} change {diff}") functions_per_subject.append(len(objectives)) # Normalize by the number of fibers so this is comparable across iterations if sigma does not change objectives = numpy.divide(objectives, self.subject_brain_size) @@ -339,18 +339,18 @@ def iterate(self): self.objectives_after.append(objective_total_after) total_change = self.objectives_after[-1] - self.objectives_before[-1] percent_change = total_change / self.objectives_before[-1] - print("Iteration:", self.total_iterations, "TOTAL objective change:", total_change) - print("Iteration:", self.total_iterations, "PERCENT objective change:", percent_change) + print(f"Iteration: {self.total_iterations} TOTAL objective change: {total_change}") + print(f"Iteration: {self.total_iterations} PERCENT objective change: {percent_change}") if HAVE_PLT: plt.figure(0) if self.mode == "Nonrigid": - fname_fig_base = "iteration_%05d_sigma_%03d_grid_%03d" % (self.total_iterations, self.sigma, self.nonrigid_grid_resolution) + fname_fig_base = f"iteration_{self.total_iterations:05d}_sigma_{self.sigma:03d}_grid_{self.nonrigid_grid_resolution:03d}" else: - fname_fig_base = "iteration_%05d_sigma_%03d_" % (self.total_iterations, self.sigma) + fname_fig_base = f"iteration_{self.total_iterations:05d}_sigma_{self.sigma:03d}_" # Place the legend below the plot so it does not overlap it when there are many subjects #lgd = plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), fancybox=False, shadow=False, ncol=1) - fname_fig = 'objectives_per_subject_' + fname_fig_base + '.pdf' + fname_fig = f'objectives_per_subject_{fname_fig_base}.pdf' # save everything even if the legend is long and goes off the plot #plt.savefig(os.path.join(outdir, fname_fig), bbox_extra_artists=(lgd,), bbox_inches='tight') plt.savefig(os.path.join(outdir, fname_fig)) @@ -362,7 +362,7 @@ def iterate(self): mean_decreases = 0.0 else: mean_decreases = numpy.mean(decreases) - print(self.total_iterations,'\t', self.sigma, '\t', self.mode, '\t', self.subject_brain_size, '\t', fibers_per_subject,'\t', self.mean_brain_size,'\t', self.maxfun,'\t', self.nonrigid_grid_resolution,'\t', self.initial_step,'\t', self.final_step,'\t', self.objectives_before[-1],'\t', self.objectives_after[-1],'\t', total_change,'\t', percent_change,'\t', numpy.mean(functions_per_subject), '\t', numpy.min(functions_per_subject), '\t', numpy.max(functions_per_subject), '\t', numpy.sum(functions_per_subject >= self.maxfun), '\t', number_of_subjects,'\t', len(decreases),'\t', numpy.mean(objective_changes_per_subject), '\t', mean_decreases, '\t', elapsed_time, file=progress_file) + print(f'{self.total_iterations}\t{self.sigma}\t{self.mode}\t{self.subject_brain_size}\t{fibers_per_subject}\t{self.mean_brain_size}\t{self.maxfun}\t{self.nonrigid_grid_resolution}\t{self.initial_step}\t{self.final_step}\t{self.objectives_before[-1]}\t{self.objectives_after[-1]}\t{total_change}\t{percent_change}\t{numpy.mean(functions_per_subject)}\t{numpy.min(functions_per_subject)}\t{numpy.max(functions_per_subject)}\t{numpy.sum(functions_per_subject >= self.maxfun)}\t{number_of_subjects}\t{len(decreases)}\t{numpy.mean(objective_changes_per_subject)}\t{mean_decreases}\t{elapsed_time}', file=progress_file) progress_file.close() # remove_mean_from_transforms @@ -392,7 +392,7 @@ def save_transformed_polydatas(self, intermediate_save=False, midsag_symmetric=F """ Output polydatas for final or intermediate iterations. """ # this can be slow so notify the user what is happening - print("\nSAVING TRANSFORMED TRACTOGRAPHY FROM ITERATION", self.total_iterations, "\n") + print(f"\nSAVING TRANSFORMED TRACTOGRAPHY FROM ITERATION {self.total_iterations}\n") transform_list = self.transforms subject_id_list = self.subject_ids @@ -404,9 +404,9 @@ def save_transformed_polydatas(self, intermediate_save=False, midsag_symmetric=F # Make a directory for the current iteration. # This directory name must match the one created above in the iteration. if self.mode == "Nonrigid": - dirname = "iteration_%05d_sigma_%03d_grid_%03d" % (self.total_iterations, self.sigma, self.nonrigid_grid_resolution) + dirname = f"iteration_{self.total_iterations:05d}_sigma_{self.sigma:03d}_grid_{self.nonrigid_grid_resolution:03d}" else: - dirname = "iteration_%05d_sigma_%03d" % (self.total_iterations, self.sigma) + dirname = f"iteration_{self.total_iterations:05d}_sigma_{self.sigma:03d}" outdir = os.path.join(self.output_directory, dirname) if not os.path.exists(outdir): os.makedirs(outdir) @@ -460,7 +460,7 @@ def save_transformed_polydatas(self, intermediate_save=False, midsag_symmetric=F start_time = time.time() wma.io.write_transforms_to_itk_format(transform_list, outdir, subject_id_list) elapsed_time = time.time() - start_time - print("WRITE TXFORMS:", elapsed_time) + print(f"WRITE TXFORMS: {elapsed_time}") def congeal_multisubject_inner_loop(mean, subject, initial_transform, mode, sigma, subject_idx, iteration_count, output_directory, step_size, maxfun, render, grid_resolution): @@ -475,7 +475,7 @@ def congeal_multisubject_inner_loop(mean, subject, initial_transform, mode, sigm # Set up registration objects and parameters that are specific to affine vs nonrigid if mode == "Affine" or mode == "Rigid": register = wma.register_two_subjects.RegisterTractography() - register.process_id_string = "_subject_%05d_iteration_%05d_sigma_%03d" % (subject_idx, iteration_count, sigma) + register.process_id_string = f"_subject_{subject_idx:05d}_iteration_{iteration_count:05d}_sigma_{sigma:03d}" if mode == "Rigid": register.mode = [1, 1, 0, 0] # Make sure the initial iterations are performed with Cobyla. @@ -487,7 +487,7 @@ def congeal_multisubject_inner_loop(mean, subject, initial_transform, mode, sigm register = wma.register_two_subjects_nonrigid_bsplines.RegisterTractographyNonrigid() register.nonrigid_grid_resolution = grid_resolution register.initialize_nonrigid_grid() - register.process_id_string = "_subject_%05d_iteration_%05d_sigma_%03d_grid_%03d" % (subject_idx, iteration_count, sigma, grid_resolution) + register.process_id_string = f"_subject_{subject_idx:05d}_iteration_{iteration_count:05d}_sigma_{sigma:03d}_grid_{grid_resolution:03d}" else: print("ERROR: Unknown registration mode") diff --git a/whitematteranalysis/congeal_to_atlas.py b/whitematteranalysis/congeal_to_atlas.py index 508dd7ab..c18d201e 100644 --- a/whitematteranalysis/congeal_to_atlas.py +++ b/whitematteranalysis/congeal_to_atlas.py @@ -81,7 +81,7 @@ def update_nonrigid_grid(self): #print displacement_field_vtk.GetPointData().GetArray(0) newtrans = vtk.util.numpy_support.vtk_to_numpy(displacement_field_vtk.GetPointData().GetArray(0)).ravel() #print newtrans.shape - print("UPDATE NONRIGID GRID: ", self.nonrigid_grid_resolution, len(self.transform_as_array), "==>", len(newtrans), end=' ') + print(f"UPDATE NONRIGID GRID: {self.nonrigid_grid_resolution} {len(self.transform_as_array)} ==> {len(newtrans)}", end=' ') self.transform_as_array = newtrans def set_subject(self, polydata, subject_id): @@ -93,7 +93,7 @@ def set_subject(self, polydata, subject_id): vtktrans = wma.register_two_subjects_nonrigid_bsplines.convert_transform_to_vtk(trans) self.transform = vtktrans self.transform_as_array = trans - print("ADD PD:", trans) + print(f"ADD PD: {trans}") else: trans = vtk.vtkTransform() self.transform = trans @@ -108,7 +108,7 @@ def iterate(self): self.total_iterations += 1 # make a directory for the current iteration - dirname = "iteration_%05d_sigma_%05d" % (self.total_iterations, self.sigma) + dirname = f"iteration_{self.total_iterations:05d}_sigma_{ self.sigma:05d}" outdir = os.path.join(self.output_directory, dirname) if not os.path.exists(outdir): os.makedirs(outdir) @@ -163,7 +163,7 @@ def save_transformed_polydata(self, intermediate_save=False): if intermediate_save: # make a directory for the current iteration - dirname = "iteration_%05d_sigma_%05d" % (self.total_iterations, self.sigma) + dirname = f"iteration_{self.total_iterations:05d}_sigma_{self.sigma:05d}" outdir = os.path.join(self.output_directory, dirname) if not os.path.exists(outdir): os.makedirs(outdir) @@ -196,8 +196,8 @@ def save_transformed_polydata(self, intermediate_save=False): wma.io.write_transforms_to_itk_format(tx_list, outdir, id_list) def save_transformed_polydata_to_disk(self, outdir): - out_fname = os.path.join(outdir, self.subject_id + '_reg.vtk') - print(self.subject_id, " Transforming ", self.input_polydata_filename, "->", out_fname, "...") + out_fname = os.path.join(outdir, f'{self.subject_id}_reg.vtk') + print(f"{self.subject_id} Transforming {self.input_polydata_filename} -> {out_fname}...") pd = wma.io.read_polydata(self.input_polydata_filename) diff --git a/whitematteranalysis/fibers.py b/whitematteranalysis/fibers.py index 84dfd251..65e5eb9a 100644 --- a/whitematteranalysis/fibers.py +++ b/whitematteranalysis/fibers.py @@ -145,10 +145,7 @@ def __init__(self): self.number_commissure = None def __str__(self): - output = "\n points_per_fiber\t" + str(self.points_per_fiber) \ - + "\n number_of_fibers\t\t" + str(self.number_of_fibers) \ - + "\n fiber_hemisphere\t\t" + str(self.fiber_hemisphere) \ - + "\n verbose\t" + str(self.verbose) + output = f"\n points_per_fiber\t{str(self.points_per_fiber)}\n number_of_fibers\t\t{str(self.number_of_fibers)}\n fiber_hemisphere\t\t{str(self.fiber_hemisphere)}\n verbose\t{str(self.verbose)}" return output @@ -340,7 +337,7 @@ def convert_from_polydata(self, input_vtk_polydata, points_per_fiber=None): self.number_of_fibers = input_vtk_polydata.GetNumberOfLines() if self.verbose: - print(f"<{os.path.basename(__file__)}> Converting polydata to array representation. Lines:", self.number_of_fibers) + print(f"<{os.path.basename(__file__)}> Converting polydata to array representation. Lines: {self.number_of_fibers}") # allocate array number of lines by line length self.fiber_array_r = numpy.zeros((self.number_of_fibers, @@ -362,8 +359,8 @@ def convert_from_polydata(self, input_vtk_polydata, points_per_fiber=None): if self.verbose: if lidx % 100 == 0: - print(f"<{os.path.basename(__file__)}> Line:", lidx, "/", self.number_of_fibers) - print(f"<{os.path.basename(__file__)}> number of points:", line_length) + print(f"<{os.path.basename(__file__)}> Line: {lidx} / {self.number_of_fibers}") + print(f"<{os.path.basename(__file__)}> number of points: {line_length}") # loop over the indices that we want and get those points pidx = 0 diff --git a/whitematteranalysis/filter.py b/whitematteranalysis/filter.py index a1137c4a..4ee6b88f 100644 --- a/whitematteranalysis/filter.py +++ b/whitematteranalysis/filter.py @@ -82,7 +82,7 @@ def flatten_length_distribution(inpd, min_length_mm=None, max_length_mm=None, nu bin_ends.append(max_l) max_l += increment if verbose: - print("Bins/length ranges:", bin_ends) + print(f"Bins/length ranges: {bin_ends}") print(bin_ends[0:-1], bin_ends[1:]) @@ -92,7 +92,7 @@ def flatten_length_distribution(inpd, min_length_mm=None, max_length_mm=None, nu pd = preprocess(inpd, bin_low, max_length_mm=bin_hi, verbose=False) pd2 = downsample(pd, fibers_per_bin,verbose=False) if verbose: - print(pd2.GetNumberOfLines(), "fibers in length range:", [bin_low, bin_hi]) + print(f"{pd2.GetNumberOfLines()} fibers in length range [{bin_low}, {bin_hi}]") if (vtk.vtkVersion().GetVTKMajorVersion() >= 6.0): appender.AddInputData(pd2) else: @@ -188,8 +188,7 @@ def preprocess(inpd, min_length_mm, min_length_pts = round(min_length_mm / float(step_size)) if verbose: - print(f"<{os.path.basename(__file__)}> Minimum length", min_length_mm, \ - "mm. Tractography step size * minimum number of points =", step_size, "*", min_length_pts, ")") + print(f"<{os.path.basename(__file__)}> Minimum length {min_length_mm} mm. Tractography step size * minimum number of points = {step_size} * {min_length_pts}") # set up processing and output objects ptids = vtk.vtkIdList() @@ -268,7 +267,7 @@ def downsample(inpd, output_number_of_lines, return_indices=False, preserve_poin # use the input random seed every time for code testing experiments if random_seed is not None: if verbose: - print(f"<{os.path.basename(__file__)}> Setting random seed to", random_seed) + print(f"<{os.path.basename(__file__)}> Setting random seed to {random_seed}") numpy.random.seed(seed=random_seed) # randomly pick the lines that we will keep @@ -355,7 +354,7 @@ def mask(inpd, fiber_mask, color=None, preserve_point_data=False, preserve_cell_ out_array.SetNumberOfComponents(array.GetNumberOfComponents()) out_array.SetName(array.GetName()) if verbose: - print("Cell data array found:", array.GetName(), array.GetNumberOfComponents()) + print(f"Cell data array found: {array.GetName()} {array.GetNumberOfComponents()}") outcelldata.AddArray(out_array) # make sure some scalars are active so rendering works #outpd.GetCellData().SetActiveScalars(array.GetName()) @@ -380,7 +379,7 @@ def mask(inpd, fiber_mask, color=None, preserve_point_data=False, preserve_cell_ out_array.SetNumberOfComponents(array.GetNumberOfComponents()) out_array.SetName(array.GetName()) if verbose: - print("Point data array found:", array.GetName(), array.GetNumberOfComponents()) + print(f"Point data array found: {array.GetName()} {array.GetNumberOfComponents()}") outpointdata.AddArray(out_array) # make sure some scalars are active so rendering works #outpd.GetPointData().SetActiveScalars(array.GetName()) @@ -414,7 +413,7 @@ def mask(inpd, fiber_mask, color=None, preserve_point_data=False, preserve_cell_ tensors_labeled = True if not tensors_labeled: if len(tensor_names) > 0: - print("Data has unexpected tensor name(s). Unable to set active for visualization:", tensor_names) + print(f"Data has unexpected tensor name(s). Unable to set active for visualization: {tensor_names}") # now set cell data visualization inactive. outpd.GetCellData().SetActiveScalars(None) @@ -429,7 +428,7 @@ def mask(inpd, fiber_mask, color=None, preserve_point_data=False, preserve_cell_ if verbose: if lidx % 100 == 0: - print(f"<{os.path.basename(__file__)}> Line:", lidx, "/", inpd.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> Line: {lidx} / {inpd.GetNumberOfLines()}") # get points for each ptid and add to output polydata cellptids = vtk.vtkIdList() @@ -468,7 +467,7 @@ def mask(inpd, fiber_mask, color=None, preserve_point_data=False, preserve_cell_ outpd.GetCellData().SetScalars(outcolors) if verbose: - print(f"<{os.path.basename(__file__)}> Fibers sampled:", outpd.GetNumberOfLines(), "/", inpd.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> Fibers sampled: {outpd.GetNumberOfLines()} / {inpd.GetNumberOfLines()}") return outpd @@ -497,14 +496,14 @@ def symmetrize(inpd): # index into end of point array lastidx = outpoints.GetNumberOfPoints() - print(f"<{os.path.basename(__file__)}> Input number of points: ", lastidx) + print(f"<{os.path.basename(__file__)}> Input number of points: {lastidx}") # loop over all lines, insert line and reflected copy into output pd for lidx in range(0, inpd.GetNumberOfLines()): # progress if verbose: if lidx % 100 == 0: - print(f"<{os.path.basename(__file__)}> Line:", lidx, "/", inpd.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> Line: {lidx} / {inpd.GetNumberOfLines()}") inpd.GetLines().GetNextCell(ptids) @@ -555,7 +554,7 @@ def remove_hemisphere(inpd, hemisphere=-1): # progress if verbose: if lidx % 100 == 0: - print(f"<{os.path.basename(__file__)}> Line:", lidx, "/", inpd.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> Line: {lidx} / {inpd.GetNumberOfLines()}") inpd.GetLines().GetNextCell(ptids) @@ -637,8 +636,8 @@ def remove_outliers(inpd, min_fiber_distance, n_jobs=0, distance_method ='Mean') fiber_mask = mindist < min_fiber_distance if True: - num_fibers = len(numpy.nonzero(fiber_mask)[0]), "/", len(fiber_mask) - print(f"<{os.path.basename(__file__)}> Number retained after outlier removal: ", num_fibers) + num_fibers = f"{len(numpy.nonzero(fiber_mask)[0])} / {len(fiber_mask)}" + print(f"<{os.path.basename(__file__)}> Number retained after outlier removal: {num_fibers}") outpd = mask(inpd, fiber_mask, mindist) outpd_reject = mask(inpd, ~fiber_mask, mindist) @@ -700,7 +699,7 @@ def smooth(inpd, fiber_distance_sigma = 25, points_per_fiber=30, n_jobs=2, upper # gaussian smooth all fibers using local neighborhood for fidx in fiber_indices: if (fidx % 100) == 0: - print(fidx, '/', current_fiber_array.number_of_fibers) + print(f'{fidx} / {current_fiber_array.number_of_fibers}') # find indices of all nearby fibers indices = numpy.nonzero(distances[fidx] < upper_thresh)[0] @@ -786,8 +785,8 @@ def anisotropic_smooth(inpd, fiber_distance_threshold, points_per_fiber=30, n_jo iteration_count = 0 while not converged: - print(f"<{os.path.basename(__file__)}> ITERATION:", iteration_count, "SUM FIBER COUNTS:", numpy.sum(numpy.array(curr_count))) - print(f"<{os.path.basename(__file__)}> number indices", len(curr_indices)) + print(f"<{os.path.basename(__file__)}> ITERATION: {iteration_count} SUM FIBER COUNTS: {numpy.sum(numpy.array(curr_count))}") + print(f"<{os.path.basename(__file__)}> number indices {len(curr_indices)}") # fiber data structures for output of this iteration next_fibers = list() @@ -829,8 +828,7 @@ def anisotropic_smooth(inpd, fiber_distance_threshold, points_per_fiber=30, n_jo distances_flat = distances.flatten() pair_order = numpy.argsort(distances_flat) - print(f"<{os.path.basename(__file__)}> DISTANCE MIN:", distances_flat[pair_order[0]], \ - "DISTANCE COUNT:", distances.shape) + print(f"<{os.path.basename(__file__)}> DISTANCE MIN: {distances_flat[pair_order[0]]} DISTANCE COUNT: {distances.shape}") # if the smallest distance is greater or equal to the # threshold, we have converged @@ -904,8 +902,8 @@ def anisotropic_smooth(inpd, fiber_distance_threshold, points_per_fiber=30, n_jo current_fiber_array.fiber_array_s[curr_fidx] = curr_fib.s curr_fidx += 1 - print(f"<{os.path.basename(__file__)}> SUM FIBER COUNTS:", numpy.sum(numpy.array(curr_count)), "SUM DONE FIBERS:", numpy.sum(done)) - print(f"<{os.path.basename(__file__)}> MAX COUNT:" , numpy.max(numpy.array(curr_count)), "AVGS THIS ITER:", number_averages) + print(f"<{os.path.basename(__file__)}> SUM FIBER COUNTS: {numpy.sum(numpy.array(curr_count))} SUM DONE FIBERS: {numpy.sum(done)}") + print(f"<{os.path.basename(__file__)}> MAX COUNT: {numpy.max(numpy.array(curr_count))} AVGS THIS ITER: {number_averages}") # when converged, convert output to polydata outpd = current_fiber_array.convert_to_polydata() @@ -998,7 +996,7 @@ def laplacian_of_gaussian(inpd, fiber_distance_sigma = 25, points_per_fiber=30, # gaussian smooth all fibers using local neighborhood for fidx in fiber_indices: if (fidx % 100) == 0: - print(fidx, '/', fiber_array.number_of_fibers) + print(f'{fidx} / {fiber_array.number_of_fibers}') current_fiber = fiber_list[fidx] @@ -1126,9 +1124,7 @@ def pd_to_array(inpd, dims=225): data_vol = numpy.ndarray([dims,dims,dims]) # loop over lines inpd.GetLines().InitTraversal() - print(f"<{os.path.basename(__file__)}> Input number of points: ",\ - points.GetNumberOfPoints(),\ - "lines:", inpd.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> Input number of points: {points.GetNumberOfPoints()} lines: {inpd.GetNumberOfLines()}") # loop over all lines for lidx in range(0, inpd.GetNumberOfLines()): # progress @@ -1168,9 +1164,7 @@ def measure_line_lengths(inpd): output_lengths = numpy.zeros(inpd.GetNumberOfLines()) # loop over lines inpd.GetLines().InitTraversal() - print(f"<{os.path.basename(__file__)}> Input number of points: ",\ - points.GetNumberOfPoints(),\ - "lines:", inpd.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> Input number of points: {points.GetNumberOfPoints()} lines: {inpd.GetNumberOfLines()}") # loop over all lines for lidx in range(0, inpd.GetNumberOfLines()): # progress diff --git a/whitematteranalysis/io.py b/whitematteranalysis/io.py index 3ea23055..f18af4be 100644 --- a/whitematteranalysis/io.py +++ b/whitematteranalysis/io.py @@ -38,7 +38,7 @@ def read_polydata(filename): """Read whole-brain tractography as vtkPolyData format.""" if VERBOSE: - print("Reading in data from", filename, "...") + print(f"Reading in data from {filename}...") basename, extension = os.path.splitext(filename) @@ -55,8 +55,8 @@ def read_polydata(filename): outpd = reader.GetOutput() del reader if VERBOSE: - print("Done reading in data from", filename) - print("Number of lines found:", outpd.GetNumberOfLines()) + print(f"Done reading in data from {filename}") + print(f"Number of lines found: {outpd.GetNumberOfLines()}") return outpd @@ -84,8 +84,8 @@ def read_and_preprocess_polydata_directory(input_dir, fiber_length, number_of_fi num_pd = len(input_pd_fnames) print(f"<{os.path.basename(__file__)}> =======================================") - print(f"<{os.path.basename(__file__)}> Reading vtk and vtp files from directory: ", input_dir) - print(f"<{os.path.basename(__file__)}> Total number of files found: ", num_pd) + print(f"<{os.path.basename(__file__)}> Reading vtk and vtp files from directory: {input_dir}") + print(f"<{os.path.basename(__file__)}> Total number of files found: {num_pd}") print(f"<{os.path.basename(__file__)}> =======================================") input_pds = list() @@ -95,20 +95,20 @@ def read_and_preprocess_polydata_directory(input_dir, fiber_length, number_of_fi for fname in input_pd_fnames: subject_id = os.path.splitext(os.path.basename(fname))[0] subject_ids.append(subject_id) - print(f"<{os.path.basename(__file__)}> ", sidx + 1, "/", num_pd, subject_id, " Reading ", fname, "...") + print(f"<{os.path.basename(__file__)}> {sidx + 1} / {num_pd} {subject_id} Reading {fname}...") pd = read_polydata(fname) - print(f"<{os.path.basename(__file__)}> ", sidx + 1, "/", num_pd, subject_id, " Input number of fibers:", pd.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> {sidx + 1} / {num_pd} {subject_id} Input number of fibers: {pd.GetNumberOfLines()}") pd2 = filter.preprocess(pd, min_length_mm=fiber_length, verbose=False, max_length_mm=fiber_length_max) - print(f"<{os.path.basename(__file__)}> ", sidx + 1, "/", num_pd, subject_id, " Length threshold", fiber_length, "mm. Number of fibers retained:", pd2.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> {sidx + 1} / {num_pd} {subject_id} Length threshold {fiber_length} mm. Number of fibers retained: {pd2.GetNumberOfLines()}") pd3 = filter.downsample(pd2, number_of_fibers, verbose=False, random_seed=random_seed) - print(f"<{os.path.basename(__file__)}> ", sidx + 1, "/", num_pd, subject_id, " Downsample to", number_of_fibers, "fibers. Number of fibers retained:", pd3.GetNumberOfLines()) + print(f"<{os.path.basename(__file__)}> {sidx + 1} / num_pd {subject_id} Downsample to {number_of_fibers} fibers. Number of fibers retained: {pd3.GetNumberOfLines()}") input_pds.append(pd3) sidx += 1 print(f"<{os.path.basename(__file__)}> =======================================") print(f"<{os.path.basename(__file__)}> =======================================") - print(f"<{os.path.basename(__file__)}> Done reading vtk and vtp files from directory: ", input_dir) - print(f"<{os.path.basename(__file__)}> Total number of files read: ", len(input_pds)) + print(f"<{os.path.basename(__file__)}> Done reading vtk and vtp files from directory: {input_dir}") + print(f"<{os.path.basename(__file__)}> Total number of files read: {len(input_pds)}") print(f"<{os.path.basename(__file__)}> =======================================") return input_pds, subject_ids @@ -143,11 +143,11 @@ def write_polydata(polydata, filename): del writer if VERBOSE: - print("Done writing ", filename) + print(f"Done writing {filename}") def transform_polydata_from_disk(in_filename, transform_filename, out_filename): # Read it in. - print(f"<{os.path.basename(__file__)}> Transforming ", in_filename, "->", out_filename, "...") + print(f"<{os.path.basename(__file__)}> Transforming {in_filename} -> {out_filename}...") # Read the transform from disk because we cannot pickle it (root, ext) = os.path.splitext(transform_filename) @@ -180,7 +180,7 @@ def transform_polydata_from_disk(in_filename, transform_filename, out_filename): start_time = time.time() pd = read_polydata(in_filename) elapsed_time = time.time() - start_time - print("READ:", elapsed_time) + print(f"READ: {elapsed_time}") # Transform it. start_time = time.time() transformer = vtk.vtkTransformPolyDataFilter() @@ -191,14 +191,14 @@ def transform_polydata_from_disk(in_filename, transform_filename, out_filename): transformer.SetTransform(transform) transformer.Update() elapsed_time = time.time() - start_time - print("TXFORM:", elapsed_time) + print(f"TXFORM: {elapsed_time}") # Write it out. start_time = time.time() pd2 = transformer.GetOutput() write_polydata(pd2, out_filename) elapsed_time = time.time() - start_time - print("WRITE:", elapsed_time) + print(f"WRITE: {elapsed_time}") # Clean up. del transformer @@ -208,11 +208,11 @@ def transform_polydata_from_disk(in_filename, transform_filename, out_filename): def transform_polydata_from_disk_using_transform_object(in_filename, transform, out_filename): # Read it in. - print(f"<{os.path.basename(__file__)}> Transforming ", in_filename, "->", out_filename, "...") + print(f"<{os.path.basename(__file__)}> Transforming {in_filename} -> {out_filename}...") start_time = time.time() pd = read_polydata(in_filename) elapsed_time = time.time() - start_time - print("READ:", elapsed_time) + print(f"READ: {elapsed_time}") # Transform it. start_time = time.time() transformer = vtk.vtkTransformPolyDataFilter() @@ -223,14 +223,14 @@ def transform_polydata_from_disk_using_transform_object(in_filename, transform, transformer.SetTransform(transform) transformer.Update() elapsed_time = time.time() - start_time - print("TXFORM:", elapsed_time) + print(f"TXFORM: {elapsed_time}") # Write it out. start_time = time.time() pd2 = transformer.GetOutput() write_polydata(pd2, out_filename) elapsed_time = time.time() - start_time - print("WRITE:", elapsed_time) + print(f"WRITE: {elapsed_time}") # Clean up. del transformer @@ -249,9 +249,9 @@ def transform_polydatas_from_disk(input_dir, transforms, output_dir): input_pd_fnames = list_vtk_files(input_dir) num_pd = len(input_pd_fnames) print(f"<{os.path.basename(__file__)}> =======================================") - print(f"<{os.path.basename(__file__)}> Transforming vtk and vtp files from directory: ", input_dir) - print(f"<{os.path.basename(__file__)}> Total number of files found: ", num_pd) - print(f"<{os.path.basename(__file__)}> Writing output to directory: ", output_dir) + print(f"<{os.path.basename(__file__)}> Transforming vtk and vtp files from directory: {input_dir}") + print(f"<{os.path.basename(__file__)}> Total number of files found: {num_pd}") + print(f"<{os.path.basename(__file__)}> Writing output to directory: {output_dir}") print(f"<{os.path.basename(__file__)}> =======================================") if not os.path.exists(output_dir): @@ -265,7 +265,7 @@ def transform_polydatas_from_disk(input_dir, transforms, output_dir): for idx in range(0, len(input_pd_fnames)): in_filename = input_pd_fnames[idx] subject_id = os.path.splitext(os.path.basename(in_filename))[0] - out_filename = os.path.join(output_dir, subject_id + '_reg.vtk') + out_filename = os.path.join(output_dir, f'{subject_id}_reg.vtk') transform_polydata_from_disk_using_transform_object(in_filename, transforms[idx], out_filename) # This function was faster but is not always safe. Could crash due to missing file if any write/disk access issue @@ -281,9 +281,9 @@ def transform_polydatas_from_diskUNSAFE(input_dir, transforms, output_dir, paral input_pd_fnames = list_vtk_files(input_dir) num_pd = len(input_pd_fnames) print(f"<{os.path.basename(__file__)}> =======================================") - print(f"<{os.path.basename(__file__)}> Transforming vtk and vtp files from directory: ", input_dir) - print(f"<{os.path.basename(__file__)}> Total number of files found: ", num_pd) - print(f"<{os.path.basename(__file__)}> Writing output to directory: ", output_dir) + print(f"<{os.path.basename(__file__)}> Transforming vtk and vtp files from directory: {input_dir}") + print(f"<{os.path.basename(__file__)}> Total number of files found: {num_pd}") + print(f"<{os.path.basename(__file__)}> Writing output to directory: {output_dir}") print(f"<{os.path.basename(__file__)}> =======================================") if not os.path.exists(output_dir): @@ -302,14 +302,14 @@ def transform_polydatas_from_diskUNSAFE(input_dir, transforms, output_dir, paral fname = input_pd_fnames[idx] tx = transforms[idx] subject_id = os.path.splitext(os.path.basename(fname))[0] - out_fname = os.path.join(output_dir, subject_id + '_reg.vtk') + out_fname = os.path.join(output_dir, f'{subject_id}_reg.vtk') fname_list.append(fname) out_fname_list.append(out_fname) # save the transform to disk because we cannot pickle it if tx.GetClassName() == 'vtkThinPlateSplineTransform': writer = vtk.vtkMNITransformWriter() writer.AddTransform(tx) - fname = '.tmp_vtk_txform_' + str(subject_id) + '.xfm' + fname = f'.tmp_vtk_txform_{str(subject_id)}.xfm' fname = os.path.join(output_dir, fname) writer.SetFileName(fname) writer.Write() @@ -329,7 +329,7 @@ def transform_polydatas_from_diskUNSAFE(input_dir, transforms, output_dir, paral ## writer.Write() ## del writer else: - fname = '.tmp_vtk_txform_' + str(subject_id) + '.txt' + fname = f'.tmp_vtk_txform_{str(subject_id)}.txt' f = open(fname, 'w') for i in range(0,4): for j in range(0,4): @@ -364,9 +364,9 @@ def transform_polydatas_from_diskOLD(input_dir, transforms, output_dir): input_pd_fnames = list_vtk_files(input_dir) num_pd = len(input_pd_fnames) print(f"<{os.path.basename(__file__)}> =======================================") - print(f"<{os.path.basename(__file__)}> Transforming vtk and vtp files from directory: ", input_dir) - print(f"<{os.path.basename(__file__)}> Total number of files found: ", num_pd) - print(f"<{os.path.basename(__file__)}> Writing output to directory: ", output_dir) + print(f"<{os.path.basename(__file__)}> Transforming vtk and vtp files from directory: {input_dir}") + print(f"<{os.path.basename(__file__)}> Total number of files found: {num_pd}") + print(f"<{os.path.basename(__file__)}> Writing output to directory: {output_dir}") print(f"<{os.path.basename(__file__)}> =======================================") if not os.path.exists(output_dir): @@ -380,8 +380,8 @@ def transform_polydatas_from_diskOLD(input_dir, transforms, output_dir): # Read it in. fname = input_pd_fnames[idx] subject_id = os.path.splitext(os.path.basename(fname))[0] - out_fname = os.path.join(output_dir, subject_id + '_reg.vtk') - print(f"<{os.path.basename(__file__)}> ", idx + 1, "/", num_pd, subject_id, " Transforming ", fname, "->", out_fname, "...") + out_fname = os.path.join(output_dir, f'{subject_id}_reg.vtk') + print(f"<{os.path.basename(__file__)}> {idx + 1} / {num_pd} {subject_id} Transforming {fname} -> {out_fname}...") pd = read_polydata(fname) # Transform it. transformer = vtk.vtkTransformPolyDataFilter() @@ -423,7 +423,7 @@ def write_transforms_to_itk_format(transform_list, outdir, subject_ids=None): writer = vtk.vtkMNITransformWriter() writer.AddTransform(tx) if subject_ids is not None: - fname = 'vtk_txform_' + str(subject_ids[idx]) + '.xfm' + fname = f'vtk_txform_{str(subject_ids[idx])}.xfm' else: fname = f'vtk_txform_{idx:05d}.xfm' writer.SetFileName(os.path.join(outdir, fname)) @@ -431,7 +431,7 @@ def write_transforms_to_itk_format(transform_list, outdir, subject_ids=None): # file name for itk transform written below if subject_ids is not None: - fname = 'itk_txform_' + str(subject_ids[idx]) + '.tfm' + fname = f'itk_txform_{str(subject_ids[idx])}.tfm' else: fname = f'itk_txform_{idx:05d}.tfm' fname = os.path.join(outdir, fname) @@ -520,7 +520,7 @@ def write_transforms_to_itk_format(transform_list, outdir, subject_ids=None): displacements_LPS = list() - print("LPS grid for storing transform:", grid_points_LPS[0], grid_points_LPS[-1], grid_spacing) + print(f"LPS grid for storing transform: {grid_points_LPS[0]} {grid_points_LPS[-1]} {grid_spacing}") lps_points = vtk.vtkPoints() lps_points2 = vtk.vtkPoints() @@ -580,7 +580,7 @@ def write_transforms_to_itk_format(transform_list, outdir, subject_ids=None): f.write(f' {origin[0]}') f.write(f' {origin[1]}') f.write(f' {origin[2]}') - f.write(' {0} {0} {0}'.format(grid_spacing)) + f.write(f' {grid_spacing} {grid_spacing} {grid_spacing}') f.write(' 1 0 0 0 1 0 0 0 1\n') f.close() @@ -726,7 +726,7 @@ def read(self, dirname, readpd=False, readdist=False): """Read output (class laterality.LateralityResults) for one subject.""" if not os.path.isdir(dirname): - print(f"<{os.path.basename(__file__)}> error: directory does not exist.", dirname) + print(f"<{os.path.basename(__file__)}> error: directory does not exist {dirname}") if readpd: # input polydata diff --git a/whitematteranalysis/laterality.py b/whitematteranalysis/laterality.py index dd1cbd29..b69131df 100644 --- a/whitematteranalysis/laterality.py +++ b/whitematteranalysis/laterality.py @@ -83,14 +83,7 @@ def __init__(self): self.fibers = FiberArray() def __str__(self): - output = " sigma\t\t\t" + str(self.sigma) \ - + "\n points_per_fiber\t" + str(self.points_per_fiber) \ - + "\n threshold\t\t" + str(self.threshold) \ - + "\n verbose\t\t" + str(self.verbose) \ - + "\n parallel_jobs\t\t" + str(self.parallel_jobs) \ - + "\n parallel_verbose\t" + str(self.parallel_verbose) \ - + "\n fibers\n\t\t\t" \ - + str(self.fibers) + output = f" sigma\t\t\t{str(self.sigma)}\n points_per_fiber\t{str(self.points_per_fiber)}\n threshold\t\t{str(self.threshold)}\n verbose\t\t{str(self.verbose)} \n parallel_jobs\t\t{str(self.parallel_jobs)}\n parallel_verbose\t{str(self.parallel_verbose)}\n fibers\n\t\t\t{str(self.fibers)}" return output @@ -130,7 +123,7 @@ def compute(self, input_vtk_polydata): if self.fibers_per_hemisphere <= num_fibers: num_fibers = self.fibers_per_hemisphere else: - raise Exception("Fibers per hemisphere is set too high for the dataset. Current subject maximum is"+str(num_fibers)) + raise Exception(f"Fibers per hemisphere is set too high for the dataset. Current subject maximum is {str(num_fibers)}") # grab num_fibers fibers from each hemisphere. # use the first n since they were randomly sampled from the whole dataset @@ -144,7 +137,7 @@ def compute(self, input_vtk_polydata): # Now convert to array with points and hemispheres as above self.fibers.convert_from_polydata(input_vtk_polydata) if self.verbose: - print(f"<{os.path.basename(__file__)}> Using ", num_fibers , " fibers per hemisphere.") + print(f"<{os.path.basename(__file__)}> Using {num_fibers} fibers per hemisphere.") # square sigma for later Gaussian sigmasq = self.sigma * self.sigma @@ -164,17 +157,13 @@ def compute(self, input_vtk_polydata): # tell user we are doing something if self.verbose: - print(f"<{os.path.basename(__file__)}> Fibers in each hemisphere.", \ - "L:", self.fibers.number_left_hem, \ - "R:", self.fibers.number_right_hem, \ - "/ Total:", self.fibers.number_of_fibers) + print(f"<{os.path.basename(__file__)}> Fibers in each hemisphere. L: {self.fibers.number_left_hem} R: {self.fibers.number_right_hem} / Total: {self.fibers.number_of_fibers}") print(f"<{os.path.basename(__file__)}> Starting to compute laterality indices") # run the computation, either in parallel or not if (USE_PARALLEL & (self.parallel_jobs > 1)): if self.verbose: - print(f"<{os.path.basename(__file__)}> Starting parallel code. Processes:", \ - self.parallel_jobs) + print(f"<{os.path.basename(__file__)}> Starting parallel code. Processes: {self.parallel_jobs}") # compare to right hemisphere (reflect fiber first if in left hem) ret = Parallel( diff --git a/whitematteranalysis/mrml.py b/whitematteranalysis/mrml.py index 04060a48..ffc31a1c 100644 --- a/whitematteranalysis/mrml.py +++ b/whitematteranalysis/mrml.py @@ -29,7 +29,7 @@ def write(self, pd_filenames, colors, filename, ratio=1.0): col = str(colors[cidx,0]/256.0) + " " + str(colors[cidx,1]/256.0) + " " + str(colors[cidx,2]/256.0) color_list.append(col) #print color_list - print(f"<{os.path.basename(__file__)}> Writing", len(pd_filenames), " filenames in MRML scene:", filename) + print(f"<{os.path.basename(__file__)}> Writing f{len(pd_filenames)} filenames in MRML scene: {filename}") f = open(filename, "w") f.write(self.header) for pidx in range(len(pd_filenames)): @@ -48,9 +48,9 @@ def write_node(self, pd_fname, color, name, f, ratio): f.write("") f.write("\n") @@ -62,11 +62,11 @@ def write_node(self, pd_fname, color, name, f, ratio): f.write("") f.write("\n") @@ -78,11 +78,11 @@ def write_node(self, pd_fname, color, name, f, ratio): f.write("") f.write("\n") @@ -94,11 +94,11 @@ def write_node(self, pd_fname, color, name, f, ratio): f.write("") f.write("\n") @@ -107,21 +107,21 @@ def write_node(self, pd_fname, color, name, f, ratio): f.write("") + f.write(f"SubsamplingRatio=\"{str(ratio)}\" >") f.write("\n") for prop in props: @@ -134,6 +134,6 @@ def write_prop_node(self, idx, f): f.write("") f.write("\n") diff --git a/whitematteranalysis/register_two_subjects.py b/whitematteranalysis/register_two_subjects.py index 1fc2b761..8cfae20c 100644 --- a/whitematteranalysis/register_two_subjects.py +++ b/whitematteranalysis/register_two_subjects.py @@ -160,7 +160,7 @@ def objective_function(self, current_x): self.objective_function_values.append(obj) if self.verbose: - print("O:", obj, "X:", self._x_opt) + print(f"O: {obj} X: {self._x_opt}") return obj @@ -215,13 +215,13 @@ def compute(self): ren = wma.render.render(pd2, number_of_fibers_fixed, verbose=False) # save low-res images for speed ren.magnification = 3 - ren.save_views(self.output_directory, 'fixed_brain_' + self.process_id_string) + ren.save_views(self.output_directory, f'fixed_brain_{self.process_id_string}') del ren self.iterations += 1 if self.verbose: - print(f"<{os.path.basename(__file__)}> Initial value for X:", self.initial_transform) + print(f"<{os.path.basename(__file__)}> Initial value for X: {self.initial_transform}") if self.optimizer == "Cobyla": @@ -267,7 +267,7 @@ def compute(self): maxiter=self.maxfun, disp=1, full_output=True) - print("FLAG:", warnflag) + print(f"FLAG: {warnflag}") else: print("Unknown optimizer.") @@ -299,7 +299,7 @@ def compute(self): self.final_transform[14] = 0.0 tx = self.final_transform - print("TRANS:", tx[0], tx[1], tx[2], "ROT:", tx[3], tx[4], tx[5], "SCALE:", tx[6], tx[7], tx[8], "SHEAR:", tx[9], tx[10], tx[11], tx[12], tx[13], tx[14], "MODE:", self.mode, "MODE0:", self.mode[0]) + print(f"TRANS: {tx[0]} {tx[1]} {tx[2]} ROT: {tx[3]} {tx[4]} {tx[5]} SCALE: {tx[6]} {tx[7]} {tx[8]} SHEAR: {tx[9]} {tx[10]} {tx[11]} {tx[12]} {tx[13]} tx[14] MODE: {self.mode} MODE0: {self.mode[0]}") # Return output transform from this iteration return self.final_transform diff --git a/whitematteranalysis/register_two_subjects_nonrigid.py b/whitematteranalysis/register_two_subjects_nonrigid.py index 3f3e3e00..2afe8222 100644 --- a/whitematteranalysis/register_two_subjects_nonrigid.py +++ b/whitematteranalysis/register_two_subjects_nonrigid.py @@ -136,7 +136,7 @@ def initialize_nonrigid_grid(self): grid = self.nonrigid_grid_10 grid_order = self.grid_order_10 else: - print(f"<{os.path.basename(__file__)}> Error: Unknown nonrigid grid mode:", self.nonrigid_grid_resolution) + print(f"<{os.path.basename(__file__)}> Error: Unknown nonrigid grid mode: {self.nonrigid_grid_resolution}") tmp = list() for r in grid: for a in grid: @@ -170,7 +170,7 @@ def objective_function(self, current_x): self.objective_function_values.append(obj) if self.verbose: - print("O:", obj, "X:", current_x) + print(f"O: {obj} X: {current_x}") #print "X:", self._x_opt return obj @@ -255,14 +255,14 @@ def compute(self): ren = wma.render.render(pd2, number_of_fibers_fixed, verbose=False) # save low-res images for speed ren.magnification = 3 - ren.save_views(self.output_directory, 'fixed_brain_' + self.process_id_string) + ren.save_views(self.output_directory, f'fixed_brain_{self.process_id_string}') del ren self.iterations += 1 self.final_transform = numpy.zeros(self.initial_transform.shape) if self.verbose: - print(f"<{os.path.basename(__file__)}> Initial value for X:", self.initial_transform) + print(f"<{os.path.basename(__file__)}> Initial value for X: {self.initial_transform}") if self.optimizer == "Cobyla": @@ -319,7 +319,7 @@ def compute(self): maxiter=self.maxfun, disp=1, full_output=True) - print("TRANS:", self.final_transform, "FLAG:", warnflag) + print(f"TRANS: {self.final_transform} FLAG: {warnflag}") else: print("Unknown optimizer.") diff --git a/whitematteranalysis/register_two_subjects_nonrigid_bsplines.py b/whitematteranalysis/register_two_subjects_nonrigid_bsplines.py index 88c8606e..7b8b7515 100644 --- a/whitematteranalysis/register_two_subjects_nonrigid_bsplines.py +++ b/whitematteranalysis/register_two_subjects_nonrigid_bsplines.py @@ -60,7 +60,7 @@ def constraint(self, x_current): self.total_time = time.time() - self.start_time #print iters, "/", self.maxfun, "Total time:", self.total_time, "Last iters:", elapsed_time, "Per iter:", elapsed_time / print_every, "Memory:", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss progress_file = open(self.progress_filename, 'a') - print(iters, "/", self.maxfun, "Total time:", self.total_time, "Last iters:", elapsed_time, "Per iter:", elapsed_time / print_every, "Memory:", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss, file=progress_file) + print(f"{iters} / {self.maxfun} Total time: {self.total_time} Last iters: {elapsed_time} Per iter: {elapsed_time / print_every} Memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss}", file=progress_file) progress_file.close() return penalty @@ -157,7 +157,7 @@ def objective_function(self, current_x): self.constraint(current_x) if self.verbose: - print("O:", obj, "X:", scaled_current_x) + print(f"O: {obj} X: {scaled_current_x}") #print "X:", self._x_opt # Stop the optimizer if needed @@ -213,13 +213,13 @@ def compute(self): compute). Then call compute several times, using different parameters for the class, for example first just for translation.""" - print("OPTIMIZER:", self.optimizer) + print(f"OPTIMIZER: {self.optimizer}") self.start_time = time.time() self.total_time = 0.0 - self.progress_filename = os.path.join(self.output_directory, "log"+self.process_id_string+".log") + self.progress_filename = os.path.join(self.output_directory, f"log{self.process_id_string}log") print(self.progress_filename) progress_file = open(self.progress_filename, 'w') - print("Starting computation, time:", self.start_time, file=progress_file) + print(f"Starting computation, time: {self.start_time}", file=progress_file) progress_file.close() # subject data must be input first. No check here for speed #self.fixed = None @@ -254,18 +254,18 @@ def compute(self): ren = wma.render.render(pd2, number_of_fibers_fixed, verbose=False) # save low-res images for speed ren.magnification = 3 - ren.save_views(self.output_directory, 'fixed_brain_' + self.process_id_string) + ren.save_views(self.output_directory, f'fixed_brain_{self.process_id_string}') del ren self.iterations += 1 self.final_transform = numpy.zeros(self.initial_transform.shape) if self.verbose: - print(f"<{os.path.basename(__file__)}> Initial value for X:", self.initial_transform) + print(f"<{os.path.basename(__file__)}> Initial value for X: {self.initial_transform}") progress_file = open(self.progress_filename, 'a') self.total_time = time.time() - self.start_time - print("INITIAL transform shape", self.initial_transform.shape, "Init time:", self.total_time, file=progress_file) + print(f"INITIAL transform shape {self.initial_transform.shape} Init time: {self.total_time}", file=progress_file) progress_file.close() # initialize time for objective function computations @@ -273,7 +273,7 @@ def compute(self): if self.optimizer == "Cobyla": - print("INITIAL transform shape", self.initial_transform.shape) + print(f"INITIAL transform shape {self.initial_transform.shape}") # Optimize using cobyla. Allows definition of initial and # final step size scales (rhos), as well as constraints. Here # we use the constraints to encourage that the transform stays a transform. @@ -317,7 +317,7 @@ def compute(self): epsilon=self.final_step * self.scaling, iprint=1) except: - print("EXCEPTION WAS CAUGHT, total objectives:", self.objective_computations) + print(f"EXCEPTION WAS CAUGHT, total objectives: {self.objective_computations}") #print f, dict elif self.optimizer == "Powell": @@ -332,18 +332,18 @@ def compute(self): maxiter=self.maxfun, disp=1, full_output=True) - print("TRANS:", self.final_transform, "FLAG:", warnflag) + print(f"TRANS: {self.final_transform} FLAG: {warnflag}") else: print("Unknown optimizer.") progress_file = open(self.progress_filename, 'a') self.total_time = time.time() - self.start_time - print("Done optimizing. TOTAL TIME:", self.total_time, file=progress_file) + print(f"Done optimizing. TOTAL TIME: {self.total_time}", file=progress_file) progress_file.close() if self.verbose: - print("O:", self.objective_function_values) + print(f"O: {self.objective_function_values}") # Return output transforms from this iteration return self.final_transform diff --git a/whitematteranalysis/render.py b/whitematteranalysis/render.py index ea622845..93c22737 100644 --- a/whitematteranalysis/render.py +++ b/whitematteranalysis/render.py @@ -23,13 +23,13 @@ def render(input_polydata, number_of_fibers=None, opacity=1, depth_peeling=False if number_of_fibers is not None: if verbose: - print(f"<{os.path.basename(__file__)}> Downsampling vtkPolyData:", number_of_fibers) + print(f"<{os.path.basename(__file__)}> Downsampling vtkPolyData: {number_of_fibers}") # downsample if requested input_polydata = filter.downsample(input_polydata, number_of_fibers, preserve_point_data=True, preserve_cell_data=True, verbose=verbose) if data_name is not None: if verbose: - print(f"<{os.path.basename(__file__)}> Visualizing data:", data_name) + print(f"<{os.path.basename(__file__)}> Visualizing data: {data_name}") if data_mode == "Cell": input_polydata.GetCellData().SetActiveScalars(data_name) if data_mode == "Point": @@ -213,7 +213,7 @@ def render_polydata(self, input_polydata, scalar_range=None, self.render_RGB = True self.renderer.RemoveActor2D(self.scalarbar) if verbose: - print(f"<{os.path.basename(__file__)}> RGB: ", self.render_RGB) + print(f"<{os.path.basename(__file__)}> RGB: f{self.render_RGB}") if data_mode == "Cell": mapper.SetScalarModeToUseCellData() @@ -380,10 +380,10 @@ def save_image(self, filename="test.jpg"): def save_views(self, directory=".", subjectID=None, verbose=True): if verbose: - print(f"<{os.path.basename(__file__)}> Saving rendered views to disk:", directory) + print(f"<{os.path.basename(__file__)}> Saving rendered views to disk: {directory}") if not os.path.isdir(directory): - print(f"<{os.path.basename(__file__)}> ERROR: directory does not exist.", directory) + print(f"<{os.path.basename(__file__)}> ERROR: directory does not exist. {directory}") return #ext = ".png" @@ -392,19 +392,19 @@ def save_views(self, directory=".", subjectID=None, verbose=True): # Use subject ID as part of filename for easier visual # identification of problem cases if subjectID is not None: - fname_sup = "view_sup_"+subjectID+ext - fname_inf = "view_inf_"+subjectID+ext - fname_left = "view_left_"+subjectID+ext - fname_right = "view_right_"+subjectID+ext - fname_ant = "view_ant_"+subjectID+ext - fname_post = "view_post_"+subjectID+ext + fname_sup = f"view_sup_{subjectID}{ext}" + fname_inf = f"view_inf_{subjectID}{ext}" + fname_left = f"view_left_{subjectID}{ext}" + fname_right = f"view_right_{subjectID}{ext}" + fname_ant = f"view_ant_{subjectID}{ext}" + fname_post = f"view_post_{subjectID}{ext}" else: - fname_sup = "view_sup"+ext - fname_inf = "view_inf"+ext - fname_left = "view_left"+ext - fname_right = "view_right"+ext - fname_ant = "view_ant"+ext - fname_post = "view_post"+ext + fname_sup = f"view_sup{ext}" + fname_inf = f"view_inf{ext}" + fname_left = f"view_left{ext}" + fname_right = f"view_right{ext}" + fname_ant = f"view_ant{ext}" + fname_post = f"view_post{ext}" # sometimes the model gets clipped, this mostly fixes it self.renderer.ResetCameraClippingRange() diff --git a/whitematteranalysis/tract_measurement.py b/whitematteranalysis/tract_measurement.py index cdfdf13c..a2ce7ed0 100755 --- a/whitematteranalysis/tract_measurement.py +++ b/whitematteranalysis/tract_measurement.py @@ -76,7 +76,7 @@ def check(self): def get_measurements_by_name(self, query_header_name): if not numpy.sum(self.measurement_header == query_header_name) == 1: - print(" Error: Header", query_header_name, "cannot be found. Select from:\n", self.measurement_header) + print(f" Error: Header {query_header_name} cannot be found. Select from:\n {self.measurement_header}") return None header_index = numpy.where(self.measurement_header == query_header_name)[0][0] @@ -84,7 +84,7 @@ def get_measurements_by_name(self, query_header_name): def get_measurements_by_index(self, query_index): if query_index >= self.measurement_header.shape[0]: - print(" Error: Index", query_index, "should range from 0 to", self.measurement_header.shape[0]) + print(f" Error: Index {query_index} should range from 0 to {self.measurement_header.shape[0]}") return self.measurement_matrix[:, query_index] @@ -133,7 +133,7 @@ def __init__(self): def load(self): if not os.path.isfile(self.demographics_file): - print(f"<{os.path.basename(__file__)}> Error: Input file", self.demographics_file , "does not exist.") + print(f"<{os.path.basename(__file__)}> Error: Input file {self.demographics_file} does not exist.") raise AssertionError if os.path.splitext(self.demographics_file)[1] != '.xls' and os.path.splitext(self.demographics_file)[1] != '.xlsx': @@ -145,7 +145,7 @@ def load(self): sh = wb.sheet_by_index(0) self.demographics_header = list(map(str, sh.row_values(0))) except: - print(f"<{os.path.basename(__file__)}> Error: Fail to load:", self.demographics_file) + print(f"<{os.path.basename(__file__)}> Error: Fail to load: {self.demographics_file}") print(" Please make sure the file has the demographics in the first sheet.") raise AssertionError @@ -161,13 +161,12 @@ def check(self): # Simple check if the first two fields are subjectID and groupID header = self.demographics_header if header[0] != 'subjectID' or header[1] != 'groupID': - print(f"<{os.path.basename(__file__)}> Error: Demographics loading failed. \n" \ - " First two fields extracted are [", header[0], "] and [", header[1], "], which are expected to be [ subjectID ] and [ groupID ].") + print(f"<{os.path.basename(__file__)}> Error: Demographics loading failed. \nFirst two fields extracted are [{header[0]}] and [{header[1]}] which are expected to be [ subjectID ] and [ groupID ].") raise AssertionError def get_demographics_by_index(self, query_index): if query_index >= len(self.demographics_header): - print(" Error: Index", query_index, "should range from 0 to", len(self.demographics_header)) + print(f" Error: Index {query_index} should range from 0 to {len(self.demographics_header)}") return self.demographics[query_index] @@ -176,7 +175,7 @@ def get_demographics_by_header(self, query_header_name): if query_header_name == self.demographics_header[h]: return self.demographics[h] - print(" Error: Header", query_header_name, "cannot be found. Select from:\n", self.demographics_header) + print(f" Error: Header {query_header_name} cannot be found. Select from:\n{self.demographics_header}") return None def load_demographics(xlsx):