From fc0f9ce81799d78a0652c0a618464edee54ad1f1 Mon Sep 17 00:00:00 2001 From: "Vladimir S. FONOV" Date: Fri, 4 Aug 2017 17:59:03 -0400 Subject: [PATCH] Moved files from internal repository --- .../cerebellum_segment_options.json | 10 + examples/add_configuration/cv_cerebellum.json | 3 + .../manual_library_cerebellum.json | 46 + examples/real_tests/test_grading/cv.json | 8 + examples/real_tests/test_grading/cv_results.R | 70 + examples/real_tests/test_grading/grade.json | 35 + .../test_grading/library_description.json | 50 + examples/real_tests/test_grading/run_test.sh | 131 + .../real_tests/test_grading/run_test_fast.sh | 136 ++ .../test_grading/run_test_linear.sh | 131 + .../test_grading/run_test_separate.sh | 155 ++ .../real_tests/test_grading/run_test_slow.sh | 131 + .../real_tests/test_segmentation/.gitignore | 11 + .../test_segmentation/cv_ants_ln.json | 8 + .../test_segmentation/cv_ants_ln3.json | 8 + .../test_segmentation/cv_nl_results.R | 32 + .../test_segmentation/cv_nl_results_beta.R | 40 + .../real_tests/test_segmentation/cv_re.json | 8 + .../real_tests/test_segmentation/cv_re_2.json | 9 + .../real_tests/test_segmentation/cv_results.R | 33 + .../test_segmentation/ec_library_bbox.csv | 8 + .../test_segmentation/ec_library_bbox_re.csv | 8 + .../test_segmentation/ec_train2.json | 16 + .../test_segmentation/ec_train_ants_ln.json | 14 + .../test_segmentation/ec_train_ants_ln3.json | 14 + .../library_description_ants_ln.json | 53 + .../library_description_ants_ln2.json | 53 + .../library_description_ants_ln3.json | 53 + .../test_segmentation/run_test_adni_ants.sh | 142 ++ .../run_test_adni_ants2.2.sh | 146 ++ .../test_segmentation/run_test_adni_ants2.sh | 247 ++ .../run_test_adni_ants_beta_0.5.sh | 128 + .../run_test_adni_ants_beta_1.0.sh | 128 + .../run_test_adni_ants_beta_new.sh | 129 + .../run_test_adni_ants_beta_none.sh | 129 + .../run_test_adni_ants_label_norm.sh | 149 ++ .../run_test_adni_ants_nm.sh | 162 ++ .../run_test_adni_ants_nuyl2.sh | 136 ++ .../test_segmentation/run_test_adni_ec.sh | 131 + .../test_segmentation/run_test_adni_ec2.sh | 128 + .../test_segmentation/run_test_adni_ec3.sh | 135 ++ .../run_test_adni_ec_ants.sh | 153 ++ .../run_test_adni_ec_dumb.sh | 128 + .../run_test_adni_ec_dumb2.sh | 128 + .../run_test_adni_ec_dumb_ants.sh | 141 ++ .../test_segmentation/run_test_adni_ec_xgb.sh | 135 ++ .../run_test_adni_elastix.sh | 145 ++ .../test_segmentation/run_test_adni_elx.sh | 158 ++ .../test_segmentation/run_test_adni_elx2.sh | 158 ++ .../test_segmentation/run_test_adni_elx3.sh | 159 ++ .../test_segmentation/run_test_adni_elx4.sh | 158 ++ .../test_segmentation/segment_ants_ln.json | 28 + .../test_segmentation/segment_ants_ln3.json | 29 + .../synthetic_tests/test_lng_model/.gitignore | 7 + .../test_lng_model/log_std.txt | 233 ++ .../test_lng_model/make_lin_graph.sh | 34 + .../test_lng_model/prepare_test_data.sh | 41 + .../test_lng_model/prepare_test_data_ldd.sh | 36 + .../test_lng_model/prepare_test_data_lin.sh | 41 + .../test_lng_model/prepare_test_data_rot.sh | 45 + .../test_lng_model/run_all_ldd.sh | 6 + .../test_lng_model/scoop_test_ldd.py | 19 + .../test_lng_model/subjects_1.lst | 18 + .../test_lng_model/subjects_ldd.lst | 9 + .../test_lng_model/subjects_ldd_cut.lst | 9 + .../test_lng_model/subjects_lin.lst | 18 + .../test_lng_model/subjects_lin_cut.lst | 18 + .../test_lng_model/subjects_nomask.lst | 18 + .../test_lng_model/subjects_rot.lst | 9 + .../test_lng_model/subjects_rot_1.lst | 9 + .../test_lng_model/subjects_rot_2.lst | 9 + .../test_lng_model/subjects_rot_2a.lst | 9 + .../test_lng_model/subjects_rot_cut.lst | 9 + .../test_lng_model/test_lng_model.py | 36 + .../test_lng_model/test_lng_model_LCC.py | 39 + .../test_lng_model/test_lng_model_LCC_1.py | 39 + .../test_lng_model/test_lng_model_LCC_ldd.py | 40 + .../test_lng_model/test_lng_model_LCC_lin.py | 40 + .../test_lng_model/test_lng_model_LCC_rot.py | 38 + .../test_lng_model_LCC_rot_m.py | 38 + .../test_lng_model/test_lng_model_lim.py | 32 + .../test_lng_model/test_lng_model_lim_nd.py | 33 + .../test_lng_model/test_lng_model_nomask.py | 26 + .../test_lng_model/test_lng_model_std.py | 52 + .../test_lng_model/test_model_LCC_ldd.py | 38 + .../test_lng_model/test_model_SSD_ldd.py | 38 + .../test_lng_model/test_model_std.py | 33 + .../test_lng_model/test_regression.py | 104 + .../test_model_creation/.gitignore | 7 + .../test_model_creation/big_subjects.lst | 9 + .../test_model_creation/prepare_test_data.sh | 37 + .../prepare_test_data_with_bias.sh | 53 + .../test_model_creation/scoop_test_1.py | 26 + .../test_model_creation/scoop_test_2.py | 26 + .../test_model_creation/scoop_test_bias.py | 16 + .../test_model_creation/scoop_test_bias_n4.py | 17 + .../scoop_test_bias_sym.py | 16 + .../scoop_test_downsample.py | 14 + .../test_model_creation/scoop_test_ldd.py | 24 + .../test_model_creation/scoop_test_ldd_sym.py | 22 + .../test_model_creation/scoop_test_lsq6.py | 12 + .../test_model_creation/scoop_test_nl.py | 19 + .../test_model_creation/scoop_test_nl_ants.py | 34 + .../test_model_creation/scoop_test_nl_dd.py | 36 + .../scoop_test_nl_elastix.py | 34 + .../test_model_creation/scoop_test_nl_sym.py | 17 + .../test_model_creation/scoop_test_nobias.py | 13 + .../test_model_creation/subjects.lst | 9 + .../test_model_creation/test_nl.py | 16 + .../test_model_creation/test_scipy.xfm | 11 + .../test_model_creation/test_std.xfm | 12 + .../test_registration/.gitignore | 5 + .../test_registration/prepare_test_data.sh | 29 + .../test_registration/test_ants.sh | 22 + .../test_registration/test_ldd_reg.py | 47 + .../test_registration/test_nl_reg.py | 143 ++ .../test_segmentation/.gitignore | 6 + .../synthetic_tests/test_segmentation/cv.json | 9 + .../test_segmentation/cv_ants.json | 5 + .../test_segmentation/cv_ants2.json | 9 + .../test_segmentation/cv_nl.json | 5 + .../test_segmentation/cv_triple.json | 9 + .../test_segmentation/dumb_segment.py | 71 + .../test_segmentation/ec_train.json | 14 + .../test_segmentation/ec_train_ants2.json | 20 + .../test_segmentation/ec_train_triple.json | 11 + .../library_description.json | 21 + .../library_description_ants.json | 22 + .../library_description_nl.json | 22 + .../library_description_triple.json | 23 + .../test_segmentation/prepare_test_data.sh | 50 + .../test_segmentation/run_test.sh | 105 + .../test_segmentation/run_test2.sh | 107 + .../test_segmentation/run_test3.sh | 104 + .../test_segmentation/run_test4.sh | 107 + .../test_segmentation/run_test5.sh | 105 + .../test_segmentation/run_test6.sh | 106 + .../test_segmentation/run_test_ants.sh | 103 + .../test_segmentation/run_test_ants2.sh | 120 + .../test_segmentation/run_test_ants3.sh | 121 + .../test_segmentation/run_test_ants4.sh | 129 + .../test_segmentation/run_test_ants5.sh | 128 + .../run_test_ants_elastix.sh | 175 ++ .../test_segmentation/run_test_ext.sh | 85 + .../test_segmentation/run_test_nl.sh | 108 + .../test_segmentation/run_test_nnls.sh | 111 + .../test_segmentation/run_test_python.py | 91 + .../run_test_python_grid_search.py | 157 ++ .../test_segmentation/run_test_triple.sh | 107 + .../test_segmentation/run_test_xgb.sh | 107 + .../test_segmentation/seg_subjects.lst | 7 + .../test_segmentation/seg_subjects_triple.lst | 7 + .../test_segmentation/segment.json | 24 + .../test_segmentation/segment_ants.json | 38 + .../test_segmentation/segment_ants2.json | 28 + .../test_segmentation/segment_nl_ext.json | 23 + .../test_segmentation/segment_nnls.json | 28 + .../test_segmentation/segment_triple.json | 25 + .../validation_dataset/run_validation_nc.sh | 50 + ipl/__init__.py | 10 + ipl/ants_registration.py | 667 ++++++ ipl/create_pairwise_registrations.py | 336 +++ ipl/dd_registration.py | 272 +++ ipl/elastix_registration.py | 729 ++++++ ipl/grading/__init__.py | 38 + ipl/grading/analysis.py | 165 ++ ipl/grading/cross_validation.py | 387 +++ ipl/grading/filter.py | 248 ++ ipl/grading/fuse.py | 830 +++++++ ipl/grading/fuse_grading.py | 208 ++ ipl/grading/labels.py | 58 + ipl/grading/library.py | 109 + ipl/grading/model.py | 133 ++ ipl/grading/preselect.py | 119 + ipl/grading/qc.py | 166 ++ ipl/grading/registration.py | 638 +++++ ipl/grading/resample.py | 296 +++ ipl/grading/structures.py | 171 ++ ipl/grading/train.py | 653 +++++ ipl/lp/__init__.py | 10 + ipl/lp/aqc.py | 38 + ipl/lp/iter_pipeline.py | 368 +++ ipl/lp/pipeline.py | 638 +++++ ipl/lp/preprocess.py | 175 ++ ipl/lp/qc.py | 91 + ipl/lp/registration.py | 333 +++ ipl/lp/resample.py | 99 + ipl/lp/segment.py | 82 + ipl/lp/structures.py | 265 +++ ipl/lp/utils.py | 85 + ipl/minc_hl.py | 267 +++ ipl/minc_qc.py | 449 ++++ ipl/minc_tools.py | 2112 +++++++++++++++++ ipl/model/__init__.py | 5 + ipl/model/filter.py | 505 ++++ ipl/model/generate_linear.py | 321 +++ ipl/model/generate_nonlinear.py | 342 +++ ipl/model/registration.py | 858 +++++++ ipl/model/regress.py | 450 ++++ ipl/model/resample.py | 161 ++ ipl/model/structures.py | 180 ++ ipl/model_ldd/__init__.py | 5 + ipl/model_ldd/filter_ldd.py | 453 ++++ ipl/model_ldd/generate_nonlinear_ldd.py | 260 ++ ipl/model_ldd/registration_ldd.py | 311 +++ ipl/model_ldd/regress_ldd.py | 457 ++++ ipl/model_ldd/resample_ldd.py | 118 + ipl/model_ldd/structures_ldd.py | 181 ++ ipl/qc/metric.py | 153 ++ ipl/registration.py | 853 +++++++ ipl/segment/__init__.py | 44 + ipl/segment/analysis.py | 150 ++ ipl/segment/cerebellum_qc_v5.lut | 31 + ipl/segment/cross_validation.py | 504 ++++ ipl/segment/error_correction.py | 793 +++++++ ipl/segment/filter.py | 263 ++ ipl/segment/fuse.py | 906 +++++++ ipl/segment/fuse_segmentations.py | 390 +++ ipl/segment/labels.py | 58 + ipl/segment/library.py | 129 + ipl/segment/model.py | 116 + ipl/segment/preselect.py | 109 + ipl/segment/qc.py | 275 +++ ipl/segment/registration.py | 673 ++++++ ipl/segment/resample.py | 367 +++ ipl/segment/structures.py | 169 ++ ipl/segment/train.py | 672 ++++++ ipl/segment/train_ec.py | 398 ++++ ipl/temp_files.py | 0 ipl/test.jpg | Bin 0 -> 120188 bytes 230 files changed, 32241 insertions(+) create mode 100644 examples/add_configuration/cerebellum_segment_options.json create mode 100644 examples/add_configuration/cv_cerebellum.json create mode 100644 examples/add_configuration/manual_library_cerebellum.json create mode 100644 examples/real_tests/test_grading/cv.json create mode 100644 examples/real_tests/test_grading/cv_results.R create mode 100644 examples/real_tests/test_grading/grade.json create mode 100644 examples/real_tests/test_grading/library_description.json create mode 100755 examples/real_tests/test_grading/run_test.sh create mode 100755 examples/real_tests/test_grading/run_test_fast.sh create mode 100755 examples/real_tests/test_grading/run_test_linear.sh create mode 100755 examples/real_tests/test_grading/run_test_separate.sh create mode 100755 examples/real_tests/test_grading/run_test_slow.sh create mode 100644 examples/real_tests/test_segmentation/.gitignore create mode 100644 examples/real_tests/test_segmentation/cv_ants_ln.json create mode 100644 examples/real_tests/test_segmentation/cv_ants_ln3.json create mode 100644 examples/real_tests/test_segmentation/cv_nl_results.R create mode 100644 examples/real_tests/test_segmentation/cv_nl_results_beta.R create mode 100644 examples/real_tests/test_segmentation/cv_re.json create mode 100644 examples/real_tests/test_segmentation/cv_re_2.json create mode 100644 examples/real_tests/test_segmentation/cv_results.R create mode 100644 examples/real_tests/test_segmentation/ec_library_bbox.csv create mode 100644 examples/real_tests/test_segmentation/ec_library_bbox_re.csv create mode 100644 examples/real_tests/test_segmentation/ec_train2.json create mode 100644 examples/real_tests/test_segmentation/ec_train_ants_ln.json create mode 100644 examples/real_tests/test_segmentation/ec_train_ants_ln3.json create mode 100644 examples/real_tests/test_segmentation/library_description_ants_ln.json create mode 100644 examples/real_tests/test_segmentation/library_description_ants_ln2.json create mode 100644 examples/real_tests/test_segmentation/library_description_ants_ln3.json create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants2.2.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants2.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants_beta_0.5.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants_beta_1.0.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants_beta_new.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants_beta_none.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants_label_norm.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants_nm.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ants_nuyl2.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ec.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ec2.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ec3.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ec_ants.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ec_dumb.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ec_dumb2.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ec_dumb_ants.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_ec_xgb.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_elastix.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_elx.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_elx2.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_elx3.sh create mode 100755 examples/real_tests/test_segmentation/run_test_adni_elx4.sh create mode 100644 examples/real_tests/test_segmentation/segment_ants_ln.json create mode 100644 examples/real_tests/test_segmentation/segment_ants_ln3.json create mode 100644 examples/synthetic_tests/test_lng_model/.gitignore create mode 100644 examples/synthetic_tests/test_lng_model/log_std.txt create mode 100755 examples/synthetic_tests/test_lng_model/make_lin_graph.sh create mode 100755 examples/synthetic_tests/test_lng_model/prepare_test_data.sh create mode 100755 examples/synthetic_tests/test_lng_model/prepare_test_data_ldd.sh create mode 100755 examples/synthetic_tests/test_lng_model/prepare_test_data_lin.sh create mode 100755 examples/synthetic_tests/test_lng_model/prepare_test_data_rot.sh create mode 100755 examples/synthetic_tests/test_lng_model/run_all_ldd.sh create mode 100644 examples/synthetic_tests/test_lng_model/scoop_test_ldd.py create mode 100644 examples/synthetic_tests/test_lng_model/subjects_1.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_ldd.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_ldd_cut.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_lin.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_lin_cut.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_nomask.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_rot.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_rot_1.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_rot_2.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_rot_2a.lst create mode 100644 examples/synthetic_tests/test_lng_model/subjects_rot_cut.lst create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_LCC.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_LCC_1.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_LCC_ldd.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_LCC_lin.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot_m.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_lim.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_lim_nd.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_nomask.py create mode 100644 examples/synthetic_tests/test_lng_model/test_lng_model_std.py create mode 100644 examples/synthetic_tests/test_lng_model/test_model_LCC_ldd.py create mode 100644 examples/synthetic_tests/test_lng_model/test_model_SSD_ldd.py create mode 100644 examples/synthetic_tests/test_lng_model/test_model_std.py create mode 100644 examples/synthetic_tests/test_lng_model/test_regression.py create mode 100644 examples/synthetic_tests/test_model_creation/.gitignore create mode 100644 examples/synthetic_tests/test_model_creation/big_subjects.lst create mode 100755 examples/synthetic_tests/test_model_creation/prepare_test_data.sh create mode 100755 examples/synthetic_tests/test_model_creation/prepare_test_data_with_bias.sh create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_1.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_2.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_bias.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_bias_n4.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_bias_sym.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_downsample.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_ldd.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_ldd_sym.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_lsq6.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_nl.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_nl_ants.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_nl_dd.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_nl_elastix.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_nl_sym.py create mode 100644 examples/synthetic_tests/test_model_creation/scoop_test_nobias.py create mode 100644 examples/synthetic_tests/test_model_creation/subjects.lst create mode 100755 examples/synthetic_tests/test_model_creation/test_nl.py create mode 100644 examples/synthetic_tests/test_model_creation/test_scipy.xfm create mode 100644 examples/synthetic_tests/test_model_creation/test_std.xfm create mode 100644 examples/synthetic_tests/test_registration/.gitignore create mode 100755 examples/synthetic_tests/test_registration/prepare_test_data.sh create mode 100755 examples/synthetic_tests/test_registration/test_ants.sh create mode 100755 examples/synthetic_tests/test_registration/test_ldd_reg.py create mode 100755 examples/synthetic_tests/test_registration/test_nl_reg.py create mode 100644 examples/synthetic_tests/test_segmentation/.gitignore create mode 100644 examples/synthetic_tests/test_segmentation/cv.json create mode 100644 examples/synthetic_tests/test_segmentation/cv_ants.json create mode 100644 examples/synthetic_tests/test_segmentation/cv_ants2.json create mode 100644 examples/synthetic_tests/test_segmentation/cv_nl.json create mode 100644 examples/synthetic_tests/test_segmentation/cv_triple.json create mode 100755 examples/synthetic_tests/test_segmentation/dumb_segment.py create mode 100644 examples/synthetic_tests/test_segmentation/ec_train.json create mode 100644 examples/synthetic_tests/test_segmentation/ec_train_ants2.json create mode 100644 examples/synthetic_tests/test_segmentation/ec_train_triple.json create mode 100644 examples/synthetic_tests/test_segmentation/library_description.json create mode 100644 examples/synthetic_tests/test_segmentation/library_description_ants.json create mode 100644 examples/synthetic_tests/test_segmentation/library_description_nl.json create mode 100644 examples/synthetic_tests/test_segmentation/library_description_triple.json create mode 100755 examples/synthetic_tests/test_segmentation/prepare_test_data.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test2.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test3.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test4.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test5.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test6.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_ants.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_ants2.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_ants3.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_ants4.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_ants5.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_ants_elastix.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_ext.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_nl.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_nnls.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_python.py create mode 100755 examples/synthetic_tests/test_segmentation/run_test_python_grid_search.py create mode 100755 examples/synthetic_tests/test_segmentation/run_test_triple.sh create mode 100755 examples/synthetic_tests/test_segmentation/run_test_xgb.sh create mode 100644 examples/synthetic_tests/test_segmentation/seg_subjects.lst create mode 100644 examples/synthetic_tests/test_segmentation/seg_subjects_triple.lst create mode 100644 examples/synthetic_tests/test_segmentation/segment.json create mode 100644 examples/synthetic_tests/test_segmentation/segment_ants.json create mode 100644 examples/synthetic_tests/test_segmentation/segment_ants2.json create mode 100644 examples/synthetic_tests/test_segmentation/segment_nl_ext.json create mode 100644 examples/synthetic_tests/test_segmentation/segment_nnls.json create mode 100644 examples/synthetic_tests/test_segmentation/segment_triple.json create mode 100755 examples/validation_dataset/run_validation_nc.sh create mode 100755 ipl/__init__.py create mode 100755 ipl/ants_registration.py create mode 100755 ipl/create_pairwise_registrations.py create mode 100644 ipl/dd_registration.py create mode 100755 ipl/elastix_registration.py create mode 100644 ipl/grading/__init__.py create mode 100644 ipl/grading/analysis.py create mode 100644 ipl/grading/cross_validation.py create mode 100644 ipl/grading/filter.py create mode 100644 ipl/grading/fuse.py create mode 100644 ipl/grading/fuse_grading.py create mode 100644 ipl/grading/labels.py create mode 100644 ipl/grading/library.py create mode 100644 ipl/grading/model.py create mode 100644 ipl/grading/preselect.py create mode 100644 ipl/grading/qc.py create mode 100644 ipl/grading/registration.py create mode 100644 ipl/grading/resample.py create mode 100644 ipl/grading/structures.py create mode 100644 ipl/grading/train.py create mode 100644 ipl/lp/__init__.py create mode 100644 ipl/lp/aqc.py create mode 100644 ipl/lp/iter_pipeline.py create mode 100644 ipl/lp/pipeline.py create mode 100644 ipl/lp/preprocess.py create mode 100644 ipl/lp/qc.py create mode 100644 ipl/lp/registration.py create mode 100644 ipl/lp/resample.py create mode 100644 ipl/lp/segment.py create mode 100644 ipl/lp/structures.py create mode 100644 ipl/lp/utils.py create mode 100755 ipl/minc_hl.py create mode 100755 ipl/minc_qc.py create mode 100755 ipl/minc_tools.py create mode 100644 ipl/model/__init__.py create mode 100644 ipl/model/filter.py create mode 100644 ipl/model/generate_linear.py create mode 100644 ipl/model/generate_nonlinear.py create mode 100644 ipl/model/registration.py create mode 100644 ipl/model/regress.py create mode 100644 ipl/model/resample.py create mode 100644 ipl/model/structures.py create mode 100644 ipl/model_ldd/__init__.py create mode 100644 ipl/model_ldd/filter_ldd.py create mode 100644 ipl/model_ldd/generate_nonlinear_ldd.py create mode 100644 ipl/model_ldd/registration_ldd.py create mode 100644 ipl/model_ldd/regress_ldd.py create mode 100644 ipl/model_ldd/resample_ldd.py create mode 100644 ipl/model_ldd/structures_ldd.py create mode 100755 ipl/qc/metric.py create mode 100644 ipl/registration.py create mode 100644 ipl/segment/__init__.py create mode 100644 ipl/segment/analysis.py create mode 100644 ipl/segment/cerebellum_qc_v5.lut create mode 100644 ipl/segment/cross_validation.py create mode 100755 ipl/segment/error_correction.py create mode 100644 ipl/segment/filter.py create mode 100644 ipl/segment/fuse.py create mode 100644 ipl/segment/fuse_segmentations.py create mode 100644 ipl/segment/labels.py create mode 100644 ipl/segment/library.py create mode 100644 ipl/segment/model.py create mode 100644 ipl/segment/preselect.py create mode 100644 ipl/segment/qc.py create mode 100644 ipl/segment/registration.py create mode 100644 ipl/segment/resample.py create mode 100644 ipl/segment/structures.py create mode 100644 ipl/segment/train.py create mode 100644 ipl/segment/train_ec.py create mode 100644 ipl/temp_files.py create mode 100644 ipl/test.jpg diff --git a/examples/add_configuration/cerebellum_segment_options.json b/examples/add_configuration/cerebellum_segment_options.json new file mode 100644 index 0000000..96ce75e --- /dev/null +++ b/examples/add_configuration/cerebellum_segment_options.json @@ -0,0 +1,10 @@ +{ + "local_linear_register": true, + "non_linear_pairwise": false, + "simple_fusion": false, + "non_linear_register_level": 2, + "resample_order": 2, + "resample_baa": true, + "library_preselect":10, + "fuse_options": {"patch":1,"search":1,"threshold":0.0,"gco_energy":null} +} \ No newline at end of file diff --git a/examples/add_configuration/cv_cerebellum.json b/examples/add_configuration/cv_cerebellum.json new file mode 100644 index 0000000..56d9731 --- /dev/null +++ b/examples/add_configuration/cv_cerebellum.json @@ -0,0 +1,3 @@ +{ + "library":"manual_library_cerebellum.lst" +} \ No newline at end of file diff --git a/examples/add_configuration/manual_library_cerebellum.json b/examples/add_configuration/manual_library_cerebellum.json new file mode 100644 index 0000000..5abb6c7 --- /dev/null +++ b/examples/add_configuration/manual_library_cerebellum.json @@ -0,0 +1,46 @@ +{ + "reference_model": "../../models/icbm152_model_09c/mni_icbm152_t1_tal_nlin_sym_09c.mnc", + "reference_mask": "../../models/icbm152_model_09c/mni_icbm152_t1_tal_nlin_sym_09c_mask.mnc", + "library":"manual_library_cerebellum.lst", + "build_remap": [ [101,1], + [103,2], + [105,3], + [107,4], + [108,5], + [109,6], + [113,7], + [115,8], + [116,9], + [117,10], + [119,11], + [121,12], + [123,13], + [125,14], + [131,15], + [133,16], + [135,17], + [137,18], + [138,19], + [139,20], + [143,21], + [145,22], + [146,23], + [147,24], + [149,25], + [153,26], + [155,27], + [171,28], + [200,29], + [201,30], + [300,31], + [301,32], + [400,33], + [500,34] ] , + "classes": 35, + "linear_register": false, + "local_linear_register": true, + "non_linear_register": true, + "non_linear_register_level": 2, + "resample_baa": true, + "resample_order": 2 +} diff --git a/examples/real_tests/test_grading/cv.json b/examples/real_tests/test_grading/cv.json new file mode 100644 index 0000000..54e03a0 --- /dev/null +++ b/examples/real_tests/test_grading/cv.json @@ -0,0 +1,8 @@ +{ + "validation_library":"snipe_library.lst", + "iterations":-1, + "cv":1, + "fuse_variant":"fuse", + "cv_variant":"cv", + "regularize_variant":"reg_1" +} diff --git a/examples/real_tests/test_grading/cv_results.R b/examples/real_tests/test_grading/cv_results.R new file mode 100644 index 0000000..8ec3d98 --- /dev/null +++ b/examples/real_tests/test_grading/cv_results.R @@ -0,0 +1,70 @@ +library(ggplot2) +library(jsonlite) +library(grid) +library(plyr) +theme_set(theme_bw(base_size = 14, base_family = "Arial")) + +cv_slow<-fromJSON("test_cv/cv_stats.json") +cv_fast<-fromJSON("test_cv_fast/cv_stats.json") +cv_lin<-fromJSON("test_cv_lin/cv_stats.json") +cv_v_slow<-fromJSON("test_cv_slow/cv_stats.json") +cv_v_slow_1<-fromJSON("test_cv_slow_1/cv_stats.json") + +cv_kappa<-data.frame( + cv_slow=cv_slow$gkappa, + cv_fast=cv_fast$gkappa, + cv_lin=cv_lin$gkappa, + cv_v_slow=cv_v_slow$gkappa, + cv_v_slow_1=cv_v_slow_1$gkappa + ) + +cvv<-stack(cv_kappa) + +names(cvv)=c('GenKappa','Method') + +png('cv_kappa.png',width=800,height=400,type='cairo') + +ggplot(data=cvv,aes(x=Method,y=GenKappa))+ + geom_boxplot(notch=T)+ + theme_bw()+ + theme( + axis.text = element_text(face = 'bold', vjust = 0.2, size = 18), + axis.title = element_text(face = 'bold', vjust = 0.2, size = 20), + plot.margin = unit(c(0.2,2.8,0.2,0.2), "cm") + ) + + +slen=length(names(cv_slow$result)) +lcv <- vector(mode = "list", length = slen) + +for(l in seq(slen)) { + i=names(cv_slow$result)[l] + cv_grading<-data.frame( + grad_slow=cv_slow$result[,i]$grad, + grad_fast=cv_fast$result[,i]$grad, + grad_lin=cv_lin$result[,i]$grad, + grad_v_slow=cv_v_slow$result[,i]$grad, + grad_v_slow_1=cv_v_slow_1$result[,i]$grad + ) + lcv[[l]]<-stack(cv_grading) + names(lcv[[l]])=c('Grading','Method') + lcv[[l]]$group=rep(cv_slow$group,length(names(cv_grading))) + lcv[[l]]$struct=rep(i,length(lcv[[l]]$group)) +} + +cvv<-rbind.fill(lcv) +cvv$struct<-as.factor(as.numeric(cvv$struct)) +cvv$group<-as.factor(cvv$group) + +png('cv_grading.png',width=800,height=800,type='cairo') + +ggplot(data=cvv,aes(x=group,y=Grading,colour=Method))+ + geom_boxplot(notch=T)+ + theme_bw()+ + facet_grid(struct~Method)+ + geom_abline(intercept=0,slope=0,colour='red',lty=2)+ + theme( + axis.text = element_text(face = 'bold', vjust = 0.2, size = 18), + axis.title = element_text(face = 'bold', vjust = 0.2, size = 20), + plot.margin = unit(c(0.2,2.8,0.2,0.2), "cm") + ) diff --git a/examples/real_tests/test_grading/grade.json b/examples/real_tests/test_grading/grade.json new file mode 100644 index 0000000..e750095 --- /dev/null +++ b/examples/real_tests/test_grading/grade.json @@ -0,0 +1,35 @@ +{ + "initial_local_register": false, + "non_linear_pairwise": false, + "non_linear_register": true, + "non_linear_register_ants": true, + + "non_linear_register_level": 2, + "non_linear_register_start": 8, + + "non_linear_register_options": { + "conf": {"8":100,"4":40,"2":40,"1": 20 }, + "blur": {"8":8 ,"4":4 ,"2":2, "1": 1 }, + "shrink": {"8":8 ,"4":4 ,"2":2, "1": 1 }, + + "transformation": "SyN[ .25, 1.0 , 1.0 ]", + "use_histogram_matching": true, + "cost_function":"CC", + "cost_function_par":"1,3,Regular,1.0" + }, + + "simple_fusion": false, + "resample_order": 1, + "resample_baa": true, + "library_preselect": -1, + "segment_symmetric": false, + + "fuse_options": + { + "patch": 1, + "search": 1, + "threshold": 0.0, + "top": 4 + } + +} diff --git a/examples/real_tests/test_grading/library_description.json b/examples/real_tests/test_grading/library_description.json new file mode 100644 index 0000000..7f4ac27 --- /dev/null +++ b/examples/real_tests/test_grading/library_description.json @@ -0,0 +1,50 @@ +{ + "reference_model": "snipe_library/NC/T1/ADNI.stx_011_S_0002_m00_bbox_snipe.mnc", + "reference_mask": "snipe_library/whole.mnc", + + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"snipe_library.lst", + + "build_remap": [ [2,1], + [4,2], + [19,3], + [21,4]], + + "build_flip_remap": null, + "parts": 0, + "classes": 5, + "groups": 2, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + + "initial_register": false, + "initial_local_register": false, + + "non_linear_register": true, + "non_linear_register_type": "ants", + + "non_linear_register_level": 2, + "non_linear_register_start": 8, + + "non_linear_register_options": { + "conf": {"8":100,"4":40,"2":40,"1": 20 }, + "blur": {"8":8 ,"4":4 ,"2":2, "1": 1 }, + "shrink": {"8":8 ,"4":4 ,"2":2, "1": 1 }, + + "transformation": "SyN[ .25, 1.0 , 1.0 ]", + "use_histogram_matching": true, + "cost_function":"CC", + "cost_function_par":"1,3,Regular,1.0" + }, + + "resample_order": 1, + "resample_baa": true, + "extend_boundary": 4, + "op_mask": "E[2] D[4]", + + "create_patch_norm_lib": false +} diff --git a/examples/real_tests/test_grading/run_test.sh b/examples/real_tests/test_grading/run_test.sh new file mode 100755 index 0000000..828b1dd --- /dev/null +++ b/examples/real_tests/test_grading/run_test.sh @@ -0,0 +1,131 @@ +#! /bin/sh +set -e + +PREFIX=$(pwd)/../../python + +export PYTHONPATH=$PREFIX:$PYTHONPATH + + +cat - > library_description.json < cv.json < grade.json < library_description.json < cv.json < grade.json < library_description.json < cv.json < grade_lin.json < library_description.json < cv.json < grade.json < library_description.json < cv.json < grade_slow.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < cv_re.json <>ec_library_bbox_re.csv +done + + +if [ ! -e test_cv_nl2_re/cv_2_stats.json ];then + +export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 +python -m scoop -n $PARALLEL -vvv \ + $PREFIX/iplScoopFusionSegmentation.py \ + --output test_cv_nl2_re \ + --debug \ + --segment test_lib_nl2 \ + --cv cv_re.json \ + --options segment.json \ + --cleanup --ext +fi + + +if [ ! -e test_cv_nl2_re_ec/cv_2_stats.json ];then +PARALLEL=3 +export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 +python -m scoop -n $PARALLEL -vvv \ + $PREFIX/iplScoopFusionSegmentation.py \ + --output test_cv_nl2_re_ec \ + --debug \ + --segment test_lib_nl2 \ + --cv cv_re.json \ + --options segment.json \ + --cleanup --ext \ + --train-ec ec_train.json +fi + +cat - > cv_re_2.json < ec_train2.json < library_description.json < cv.json < segment.json < library_description.json < cv.json < segment.json < library_description.json < cv.json < segment.json < library_description.json < cv.json < segment.json < library_description_${V}.json < cv_${V}.json < segment_${V}.json < ec_train_${V}.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < + int_par_count=3, + File "/home/vfonov/src/nihpd_pipeline/python/ipl/model/regress.py", line 364, in regress_csv + initial_def_model=initial_def_model) + File "/home/vfonov/src/nihpd_pipeline/python/ipl/model/regress.py", line 190, in regress + futures.wait(r, return_when=futures.ALL_COMPLETED) + File "build/bdist.linux-x86_64/egg/scoop/futures.py", line 397, in wait + for _ in _waitAll(*fs): + File "build/bdist.linux-x86_64/egg/scoop/futures.py", line 364, in _waitAll + for f in _waitAny(future): + File "build/bdist.linux-x86_64/egg/scoop/futures.py", line 341, in _waitAny + raise childFuture.exceptionValue +mincError: mincError('ERROR: command ['minctracc', '/tmp/iplMincTools7UZrh7/vglhuzobject_0_0.mnc_int_approx.002_blur_2.0.mnc', '/tmp/iplMincTools7UZrh7/Or3K6Robject_0_0_blur_2.0.mnc', '-clobber', '-nonlinear', 'corrcoeff', '-weight', '1', '-stiffness', '1', '-similarity', '0.3', '-sub_lattice', '6', '-iterations', '10', '-lattice_diam', '12.0', '12.0', '12.0', '-step', '4.0', '4.0', '4.0', '-transformation', '/tmp/iplMincTools0xSAdH/pXWAYninit.xfm', '-source_mask', 'tmp_regress_std/2/object_0_0.mnc_int_approx.002_mask.mnc', '-model_mask', 'data/mask_0_0.mnc', '/tmp/iplMincTools7UZrh7/l8AeXTobject_0_0.mnc_int_approx.002_object_0_0_5.xfm'] failed 255! +Message: Error in minctracc in file /home/vfonov/src/minc-toolkit-itk4/mni_autoreg/minctracc/Volume/init_lattice.c, line 551 +Cannot calculate size of volume 1 +. +Traceback (most recent call last): + File "build/bdist.linux-x86_64/egg/scoop/_control.py", line 122, in runFuture + uniqueReference = [cb.groupID for cb in future.callback][0] +IndexError: list index out of range +') +AT:[('build/bdist.linux-x86_64/egg/scoop/_control.py', 127, 'runFuture', 'future.resultValue = future.callable(*future.args, **future.kargs)'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/model/registration.py', 661, 'non_linear_register_step_regress_std', 'downsample=downsample,'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/registration.py', 475, 'non_linear_register_full', 'outputs=[tmp_xfm] )'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/minc_tools.py', 399, 'command', 'raise mincError("ERROR: command {} failed {}!\\nMessage: {}\\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()))'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/minc_tools.py', 48, '__init__', 'self.stack = traceback.extract_stack()')] +Traceback (most recent call last): + File "/usr/lib/python2.7/runpy.py", line 162, in _run_module_as_main + "__main__", fname, loader, pkg_name) + File "/usr/lib/python2.7/runpy.py", line 72, in _run_code + exec code in run_globals + File "build/bdist.linux-x86_64/egg/scoop/bootstrap/__main__.py", line 302, in + File "build/bdist.linux-x86_64/egg/scoop/bootstrap/__main__.py", line 92, in main + File "build/bdist.linux-x86_64/egg/scoop/bootstrap/__main__.py", line 290, in run + File "build/bdist.linux-x86_64/egg/scoop/bootstrap/__main__.py", line 271, in futures_startup + File "build/bdist.linux-x86_64/egg/scoop/futures.py", line 64, in _startup + File "build/bdist.linux-x86_64/egg/scoop/_control.py", line 253, in runController +ipl.minc_tools.mincError: mincError('ERROR: command ['minctracc', '/tmp/iplMincTools7UZrh7/vglhuzobject_0_0.mnc_int_approx.002_blur_2.0.mnc', '/tmp/iplMincTools7UZrh7/Or3K6Robject_0_0_blur_2.0.mnc', '-clobber', '-nonlinear', 'corrcoeff', '-weight', '1', '-stiffness', '1', '-similarity', '0.3', '-sub_lattice', '6', '-iterations', '10', '-lattice_diam', '12.0', '12.0', '12.0', '-step', '4.0', '4.0', '4.0', '-transformation', '/tmp/iplMincTools0xSAdH/pXWAYninit.xfm', '-source_mask', 'tmp_regress_std/2/object_0_0.mnc_int_approx.002_mask.mnc', '-model_mask', 'data/mask_0_0.mnc', '/tmp/iplMincTools7UZrh7/l8AeXTobject_0_0.mnc_int_approx.002_object_0_0_5.xfm'] failed 255! +Message: Error in minctracc in file /home/vfonov/src/minc-toolkit-itk4/mni_autoreg/minctracc/Volume/init_lattice.c, line 551 +Cannot calculate size of volume 1 +. +Traceback (most recent call last): + File "build/bdist.linux-x86_64/egg/scoop/_control.py", line 122, in runFuture + uniqueReference = [cb.groupID for cb in future.callback][0] +IndexError: list index out of range +') +AT:[('build/bdist.linux-x86_64/egg/scoop/_control.py', 127, 'runFuture', 'future.resultValue = future.callable(*future.args, **future.kargs)'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/model/registration.py', 661, 'non_linear_register_step_regress_std', 'downsample=downsample,'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/registration.py', 475, 'non_linear_register_full', 'outputs=[tmp_xfm] )'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/minc_tools.py', 399, 'command', 'raise mincError("ERROR: command {} failed {}!\\nMessage: {}\\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()))'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/minc_tools.py', 48, '__init__', 'self.stack = traceback.extract_stack()')] +[2015-10-28 12:39:46,385] launcher (127.0.0.1:60389) INFO Root process is done. +[2015-10-28 12:39:46,385] launcher (127.0.0.1:60389) INFO Finished cleaning spawned subprocesses. diff --git a/examples/synthetic_tests/test_lng_model/make_lin_graph.sh b/examples/synthetic_tests/test_lng_model/make_lin_graph.sh new file mode 100755 index 0000000..ed100e3 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/make_lin_graph.sh @@ -0,0 +1,34 @@ +#! /bin/sh + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 + +rm -f $tempdir/lst + +for i in $(seq 0 8);do + + mincpik --image_range 0 100 data_lin/object_0_${i}.mnc $tempdir/object_0_${i}.miff + + echo $tempdir/object_0_${i}.miff >> $tempdir/lst + echo $i + + for it in $(seq 2 20);do + printf -v in "tmp_regress_LCC_lin_4mm/%d/object_0_%d.mnc_int_approx.%03d.mnc" $it $i ${it} + mincpik --image_range 0 100 $in $tempdir/${it}_${i}.miff + convert -shave 4x4 $tempdir/${it}_${i}.miff $tempdir/${it}_${i}.miff + echo $tempdir/${it}_${i}.miff >> $tempdir/lst + echo $it $i + done +done + +echo "-label Inp null:" >> $tempdir/lst + +for it in $(seq -w 2 20);do + mincpik --image_range 0 20 tmp_regress_LCC_lin_4mm/model_intensity.0${it}_RMS.mnc $tempdir/${it}_RMS.miff + echo "-label $it $tempdir/${it}_RMS.miff" >> $tempdir/lst + echo $it +done + + +montage -geometry 152x152+1+1 -background black -fill white -tile 20x10 -pointsize 40 $(cat $tempdir/lst) \ + lin_progression.png diff --git a/examples/synthetic_tests/test_lng_model/prepare_test_data.sh b/examples/synthetic_tests/test_lng_model/prepare_test_data.sh new file mode 100755 index 0000000..d40ff13 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/prepare_test_data.sh @@ -0,0 +1,41 @@ +#! /bin/sh + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 80 80 80 -step 1 1 1 -start -40 -40 -40" + +# make growing "object" +# 1st group: body growth 4% each step, hand growth 8% +# 2nd group: body growth 8% each step, hand growth 8% + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 + +mkdir -p data + +rm -f subjects.lst + +for i in $(seq 0 8);do + + main_dim_1=$(echo "10*1.04^${i}"|bc -l) + main_dim_2=$(echo "10*1.08^${i}"|bc -l) + + handle_width=$(echo "20*1.08^${i}"|bc -l) + handle_height=$(echo "5*1.08^${i}"|bc -l) + + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_1} ${main_dim_1} ${main_dim_1} $tempdir/ellipse_1.mnc -clob + make_phantom $object_opts -ellipse -center 10 0 0 -width ${handle_width} ${handle_height} ${handle_height} $tempdir/ellipse_2.mnc -clob + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_2} ${main_dim_2} ${main_dim_2} $tempdir/ellipse_3.mnc -clob + + mincmath -max $tempdir/ellipse_1.mnc $tempdir/ellipse_2.mnc data/object_0_$i.mnc -clob + itk_morph --threshold 1 --exp 'D[2]' data/object_0_$i.mnc $tempdir/mask_0_$i.mnc --clob + mincresample -nearest -like data/object_0_$i.mnc $tempdir/mask_0_$i.mnc data/mask_0_$i.mnc + + mincmath -max $tempdir/ellipse_3.mnc $tempdir/ellipse_2.mnc data/object_1_$i.mnc -clob + itk_morph --threshold 1 --exp 'D[2]' data/object_1_$i.mnc $tempdir/mask_1_$i.mnc --clob + mincresample -nearest -like data/object_1_$i.mnc $tempdir/mask_1_$i.mnc data/mask_1_$i.mnc + + echo data/object_0_$i.mnc,data/mask_0_$i.mnc,1.0,1.0,0,$i >> subjects.lst + echo data/object_1_$i.mnc,data/mask_1_$i.mnc,1.0,1.0,1,$i >> subjects.lst + +done + +cut -d , -f 1,2 subjects.lst > subjects_cut.lst diff --git a/examples/synthetic_tests/test_lng_model/prepare_test_data_ldd.sh b/examples/synthetic_tests/test_lng_model/prepare_test_data_ldd.sh new file mode 100755 index 0000000..c2ad94b --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/prepare_test_data_ldd.sh @@ -0,0 +1,36 @@ +#! /bin/sh + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 100 100 100 -step 1 1 1 -start -50 -50 -50" + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 +out=data_ldd +mkdir -p $out + +rm -f subjects_ldd.lst + +for i in $(seq 0 8);do + + main_dim_1=50 + main_dim_2=30 + pos=$(echo "40-${i}*2"|bc -l) + + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_1} ${main_dim_1} ${main_dim_1} $tempdir/ellipse_1.mnc -clob + make_phantom $object_opts -ellipse -center $pos 0 0 -width ${main_dim_2} ${main_dim_2} ${main_dim_2} $tempdir/ellipse_2.mnc -clob + + minccalc -express 'clamp(A[0]-A[1],0,100)' $tempdir/ellipse_1.mnc $tempdir/ellipse_2.mnc $tempdir/object_0_$i.mnc -clob + + itk_morph --threshold 50 $tempdir/object_0_$i.mnc $tempdir/object_0_${i}_b.mnc + itk_distance --signed $tempdir/object_0_${i}_b.mnc $tempdir/object_0_${i}_bd.mnc + mincresample -nearest -like $tempdir/object_0_$i.mnc $tempdir/object_0_${i}_bd.mnc $tempdir/object_0_${i}_bd_.mnc -clob + minccalc -express '(A[0]<0?sin(A[0]*3.14/3)*10:exp(-A[0]/4))*10+A[1]+1.0' $tempdir/object_0_${i}_bd_.mnc $tempdir/object_0_$i.mnc ${out}/object_0_${i}.mnc + + + itk_morph --exp 'D[3]' $tempdir/object_0_${i}_b.mnc $tempdir/mask_0_$i.mnc --clob + mincresample -nearest -like ${out}/object_0_$i.mnc $tempdir/mask_0_$i.mnc ${out}/mask_0_$i.mnc -clob + + echo ${out}/object_0_$i.mnc,${out}/mask_0_$i.mnc,1.0,1.0,$i >> subjects_ldd.lst + #exit +done + +cut -d , -f 1,2 subjects_ldd.lst > subjects_ldd_cut.lst diff --git a/examples/synthetic_tests/test_lng_model/prepare_test_data_lin.sh b/examples/synthetic_tests/test_lng_model/prepare_test_data_lin.sh new file mode 100755 index 0000000..65920c8 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/prepare_test_data_lin.sh @@ -0,0 +1,41 @@ +#! /bin/sh + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 80 80 80 -step 1 1 1 -start -40 -40 -40" + +# make growing "object" +# 1st group: body growth 4% each step, hand growth 8% +# 2nd group: body growth 8% each step, hand growth 8% + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 +out=data_lin +mkdir -p data_lin + +rm -f subjects.lst + +for i in $(seq 0 8);do + + main_dim_1=$(echo "10+${i}"|bc -l) + main_dim_2=$(echo "10+${i}/2"|bc -l) + + handle_width=$(echo "20+${i}*2"|bc -l) + handle_height=$(echo "5+${i}"|bc -l) + + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_1} ${main_dim_1} ${main_dim_1} $tempdir/ellipse_1.mnc -clob + make_phantom $object_opts -ellipse -center 10 0 0 -width ${handle_width} ${handle_height} ${handle_height} $tempdir/ellipse_2.mnc -clob + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_2} ${main_dim_2} ${main_dim_2} $tempdir/ellipse_3.mnc -clob + + mincmath -max $tempdir/ellipse_1.mnc $tempdir/ellipse_2.mnc ${out}/object_0_$i.mnc -clob + itk_morph --threshold 1 --exp 'D[2]' ${out}/object_0_$i.mnc $tempdir/mask_0_$i.mnc --clob + mincresample -nearest -like ${out}/object_0_$i.mnc $tempdir/mask_0_$i.mnc ${out}/mask_0_$i.mnc + + mincmath -max $tempdir/ellipse_3.mnc $tempdir/ellipse_2.mnc ${out}/object_1_$i.mnc -clob + itk_morph --threshold 1 --exp 'D[2]' ${out}/object_1_$i.mnc $tempdir/mask_1_$i.mnc --clob + mincresample -nearest -like ${out}/object_1_$i.mnc $tempdir/mask_1_$i.mnc ${out}/mask_1_$i.mnc + + echo ${out}/object_0_$i.mnc,${out}/mask_0_$i.mnc,1.0,1.0,0,$i >> subjects_lin.lst + echo ${out}/object_1_$i.mnc,${out}/mask_1_$i.mnc,1.0,1.0,1,$i >> subjects_lin.lst + +done + +cut -d , -f 1,2 subjects_lin.lst > subjects_lin_cut.lst diff --git a/examples/synthetic_tests/test_lng_model/prepare_test_data_rot.sh b/examples/synthetic_tests/test_lng_model/prepare_test_data_rot.sh new file mode 100755 index 0000000..9ee47dc --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/prepare_test_data_rot.sh @@ -0,0 +1,45 @@ +#! /bin/sh + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 80 80 80 -step 1 1 1 -start -40 -40 -40" + +# make growing "object" +# 1st group: body growth 4% each step, hand growth 8% +# 2nd group: body growth 8% each step, hand growth 8% + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 +out=data_rot +mkdir -p data_rot + +rm -f subjects_rot.lst + +main_dim_1=10 +main_dim_2=10 + +handle_width=20 +handle_height=5 + +make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_1} ${main_dim_1} ${main_dim_1} $tempdir/ellipse_1.mnc -clob +make_phantom $object_opts -ellipse -center 10 0 0 -width ${handle_width} ${handle_height} ${handle_height} $tempdir/ellipse_2.mnc -clob +make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_2} ${main_dim_2} ${main_dim_2} $tempdir/ellipse_3.mnc -clob + +mincmath -max $tempdir/ellipse_1.mnc $tempdir/ellipse_2.mnc $tempdir/object_0_.mnc -clob +minccalc -express 'A[0]+1.0' $tempdir/object_0_.mnc $tempdir/object_0.mnc -clob +itk_morph --threshold 4 --exp 'D[2]' $tempdir/object_0.mnc $tempdir/mask_0.mnc --clob + +mincmath -max $tempdir/ellipse_3.mnc $tempdir/ellipse_2.mnc $tempdir/object_1.mnc -clob +itk_morph --threshold 4 --exp 'D[2]' $tempdir/object_1.mnc $tempdir/mask_1.mnc --clob + + +for i in $(seq 0 8);do + + param2xfm -rotations 0 0 $(($i*10-40)) $tempdir/rot_$i.xfm + + itk_resample --transform $tempdir/rot_$i.xfm $tempdir/object_0.mnc ${out}/object_0_$i.mnc --clob + itk_resample --transform $tempdir/rot_$i.xfm $tempdir/mask_0.mnc ${out}/mask_0_$i.mnc --byte --labels --like ${out}/object_0_$i.mnc --clob + + echo ${out}/object_0_$i.mnc,${out}/mask_0_$i.mnc,1.0,1.0,$(($i-4)) >> subjects_rot.lst + +done + +cut -d , -f 1,2 subjects_rot.lst > subjects_rot_cut.lst diff --git a/examples/synthetic_tests/test_lng_model/run_all_ldd.sh b/examples/synthetic_tests/test_lng_model/run_all_ldd.sh new file mode 100755 index 0000000..8e8119c --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/run_all_ldd.sh @@ -0,0 +1,6 @@ +#! /bin/sh + +python -m scoop -n 4 test_lng_model_LCC_ldd.py +python -m scoop -n 4 test_model_std.py +python -m scoop -n 4 test_model_SSD_ldd.py +python -m scoop -n 4 test_lng_model_LCC_ldd.py diff --git a/examples/synthetic_tests/test_lng_model/scoop_test_ldd.py b/examples/synthetic_tests/test_lng_model/scoop_test_ldd.py new file mode 100644 index 0000000..50a1082 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/scoop_test_ldd.py @@ -0,0 +1,19 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + gm.generate_ldd_model_csv('subjects_cut.lst', + work_prefix='tmp_ldd', + options={'symmetric':False, + 'refine':True, + 'protocol': [{'iter':4,'level':8}, + {'iter':4,'level':4}, + ], + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:20,16:20,8:20,4:20,2:20,1:20 } } + } + + ) diff --git a/examples/synthetic_tests/test_lng_model/subjects_1.lst b/examples/synthetic_tests/test_lng_model/subjects_1.lst new file mode 100644 index 0000000..6586f68 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_1.lst @@ -0,0 +1,18 @@ +data/object_0_0.mnc,data/mask_0_0.mnc,1.0 +data/object_1_0.mnc,data/mask_1_0.mnc,1.0 +data/object_0_1.mnc,data/mask_0_1.mnc,1.0 +data/object_1_1.mnc,data/mask_1_1.mnc,1.0 +data/object_0_2.mnc,data/mask_0_2.mnc,1.0 +data/object_1_2.mnc,data/mask_1_2.mnc,1.0 +data/object_0_3.mnc,data/mask_0_3.mnc,1.0 +data/object_1_3.mnc,data/mask_1_3.mnc,1.0 +data/object_0_4.mnc,data/mask_0_4.mnc,1.0 +data/object_1_4.mnc,data/mask_1_4.mnc,1.0 +data/object_0_5.mnc,data/mask_0_5.mnc,1.0 +data/object_1_5.mnc,data/mask_1_5.mnc,1.0 +data/object_0_6.mnc,data/mask_0_6.mnc,1.0 +data/object_1_6.mnc,data/mask_1_6.mnc,1.0 +data/object_0_7.mnc,data/mask_0_7.mnc,1.0 +data/object_1_7.mnc,data/mask_1_7.mnc,1.0 +data/object_0_8.mnc,data/mask_0_8.mnc,1.0 +data/object_1_8.mnc,data/mask_1_8.mnc,1.0 diff --git a/examples/synthetic_tests/test_lng_model/subjects_ldd.lst b/examples/synthetic_tests/test_lng_model/subjects_ldd.lst new file mode 100644 index 0000000..a13c941 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_ldd.lst @@ -0,0 +1,9 @@ +data_ldd/object_0_0.mnc,data_ldd/mask_0_0.mnc,1.0,0 +data_ldd/object_0_1.mnc,data_ldd/mask_0_1.mnc,1.0,1 +data_ldd/object_0_2.mnc,data_ldd/mask_0_2.mnc,1.0,2 +data_ldd/object_0_3.mnc,data_ldd/mask_0_3.mnc,1.0,3 +data_ldd/object_0_4.mnc,data_ldd/mask_0_4.mnc,1.0,4 +data_ldd/object_0_5.mnc,data_ldd/mask_0_5.mnc,1.0,5 +data_ldd/object_0_6.mnc,data_ldd/mask_0_6.mnc,1.0,6 +data_ldd/object_0_7.mnc,data_ldd/mask_0_7.mnc,1.0,7 +data_ldd/object_0_8.mnc,data_ldd/mask_0_8.mnc,1.0,8 diff --git a/examples/synthetic_tests/test_lng_model/subjects_ldd_cut.lst b/examples/synthetic_tests/test_lng_model/subjects_ldd_cut.lst new file mode 100644 index 0000000..b83fdef --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_ldd_cut.lst @@ -0,0 +1,9 @@ +data_ldd/object_0_0.mnc,data_ldd/mask_0_0.mnc +data_ldd/object_0_1.mnc,data_ldd/mask_0_1.mnc +data_ldd/object_0_2.mnc,data_ldd/mask_0_2.mnc +data_ldd/object_0_3.mnc,data_ldd/mask_0_3.mnc +data_ldd/object_0_4.mnc,data_ldd/mask_0_4.mnc +data_ldd/object_0_5.mnc,data_ldd/mask_0_5.mnc +data_ldd/object_0_6.mnc,data_ldd/mask_0_6.mnc +data_ldd/object_0_7.mnc,data_ldd/mask_0_7.mnc +data_ldd/object_0_8.mnc,data_ldd/mask_0_8.mnc diff --git a/examples/synthetic_tests/test_lng_model/subjects_lin.lst b/examples/synthetic_tests/test_lng_model/subjects_lin.lst new file mode 100644 index 0000000..e9312c8 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_lin.lst @@ -0,0 +1,18 @@ +data_lin/object_0_0.mnc,data_lin/mask_0_0.mnc,1.0,1.0,0,0 +data_lin/object_1_0.mnc,data_lin/mask_1_0.mnc,1.0,1.0,1,0 +data_lin/object_0_1.mnc,data_lin/mask_0_1.mnc,1.0,1.0,0,1 +data_lin/object_1_1.mnc,data_lin/mask_1_1.mnc,1.0,1.0,1,1 +data_lin/object_0_2.mnc,data_lin/mask_0_2.mnc,1.0,1.0,0,2 +data_lin/object_1_2.mnc,data_lin/mask_1_2.mnc,1.0,1.0,1,2 +data_lin/object_0_3.mnc,data_lin/mask_0_3.mnc,1.0,1.0,0,3 +data_lin/object_1_3.mnc,data_lin/mask_1_3.mnc,1.0,1.0,1,3 +data_lin/object_0_4.mnc,data_lin/mask_0_4.mnc,1.0,1.0,0,4 +data_lin/object_1_4.mnc,data_lin/mask_1_4.mnc,1.0,1.0,1,4 +data_lin/object_0_5.mnc,data_lin/mask_0_5.mnc,1.0,1.0,0,5 +data_lin/object_1_5.mnc,data_lin/mask_1_5.mnc,1.0,1.0,1,5 +data_lin/object_0_6.mnc,data_lin/mask_0_6.mnc,1.0,1.0,0,6 +data_lin/object_1_6.mnc,data_lin/mask_1_6.mnc,1.0,1.0,1,6 +data_lin/object_0_7.mnc,data_lin/mask_0_7.mnc,1.0,1.0,0,7 +data_lin/object_1_7.mnc,data_lin/mask_1_7.mnc,1.0,1.0,1,7 +data_lin/object_0_8.mnc,data_lin/mask_0_8.mnc,1.0,1.0,0,8 +data_lin/object_1_8.mnc,data_lin/mask_1_8.mnc,1.0,1.0,1,8 diff --git a/examples/synthetic_tests/test_lng_model/subjects_lin_cut.lst b/examples/synthetic_tests/test_lng_model/subjects_lin_cut.lst new file mode 100644 index 0000000..eba2b93 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_lin_cut.lst @@ -0,0 +1,18 @@ +data_lin/object_0_0.mnc,data_lin/mask_0_0.mnc +data_lin/object_1_0.mnc,data_lin/mask_1_0.mnc +data_lin/object_0_1.mnc,data_lin/mask_0_1.mnc +data_lin/object_1_1.mnc,data_lin/mask_1_1.mnc +data_lin/object_0_2.mnc,data_lin/mask_0_2.mnc +data_lin/object_1_2.mnc,data_lin/mask_1_2.mnc +data_lin/object_0_3.mnc,data_lin/mask_0_3.mnc +data_lin/object_1_3.mnc,data_lin/mask_1_3.mnc +data_lin/object_0_4.mnc,data_lin/mask_0_4.mnc +data_lin/object_1_4.mnc,data_lin/mask_1_4.mnc +data_lin/object_0_5.mnc,data_lin/mask_0_5.mnc +data_lin/object_1_5.mnc,data_lin/mask_1_5.mnc +data_lin/object_0_6.mnc,data_lin/mask_0_6.mnc +data_lin/object_1_6.mnc,data_lin/mask_1_6.mnc +data_lin/object_0_7.mnc,data_lin/mask_0_7.mnc +data_lin/object_1_7.mnc,data_lin/mask_1_7.mnc +data_lin/object_0_8.mnc,data_lin/mask_0_8.mnc +data_lin/object_1_8.mnc,data_lin/mask_1_8.mnc diff --git a/examples/synthetic_tests/test_lng_model/subjects_nomask.lst b/examples/synthetic_tests/test_lng_model/subjects_nomask.lst new file mode 100644 index 0000000..841997e --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_nomask.lst @@ -0,0 +1,18 @@ +data/object_0_0.mnc,,1.0,1.0,0,0 +data/object_1_0.mnc,,1.0,1.0,1,0 +data/object_0_1.mnc,,1.0,1.0,0,1 +data/object_1_1.mnc,,1.0,1.0,1,1 +data/object_0_2.mnc,,1.0,1.0,0,2 +data/object_1_2.mnc,,1.0,1.0,1,2 +data/object_0_3.mnc,,1.0,1.0,0,3 +data/object_1_3.mnc,,1.0,1.0,1,3 +data/object_0_4.mnc,,1.0,1.0,0,4 +data/object_1_4.mnc,,1.0,1.0,1,4 +data/object_0_5.mnc,,1.0,1.0,0,5 +data/object_1_5.mnc,,1.0,1.0,1,5 +data/object_0_6.mnc,,1.0,1.0,0,6 +data/object_1_6.mnc,,1.0,1.0,1,6 +data/object_0_7.mnc,,1.0,1.0,0,7 +data/object_1_7.mnc,,1.0,1.0,1,7 +data/object_0_8.mnc,,1.0,1.0,0,8 +data/object_1_8.mnc,,1.0,1.0,1,8 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot.lst b/examples/synthetic_tests/test_lng_model/subjects_rot.lst new file mode 100644 index 0000000..7f433fb --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_0_0.mnc,1.0,1.0,-4 +data_rot/object_0_1.mnc,data_rot/mask_0_1.mnc,1.0,1.0,-3 +data_rot/object_0_2.mnc,data_rot/mask_0_2.mnc,1.0,1.0,-2 +data_rot/object_0_3.mnc,data_rot/mask_0_3.mnc,1.0,1.0,-1 +data_rot/object_0_4.mnc,data_rot/mask_0_4.mnc,1.0,1.0,0 +data_rot/object_0_5.mnc,data_rot/mask_0_5.mnc,1.0,1.0,1 +data_rot/object_0_6.mnc,data_rot/mask_0_6.mnc,1.0,1.0,2 +data_rot/object_0_7.mnc,data_rot/mask_0_7.mnc,1.0,1.0,3 +data_rot/object_0_8.mnc,data_rot/mask_0_8.mnc,1.0,1.0,4 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot_1.lst b/examples/synthetic_tests/test_lng_model/subjects_rot_1.lst new file mode 100644 index 0000000..ea7ff36 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot_1.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_0_0.mnc,1.0,1.0 +data_rot/object_0_1.mnc,data_rot/mask_0_1.mnc,1.0,1.0 +data_rot/object_0_2.mnc,data_rot/mask_0_2.mnc,1.0,1.0 +data_rot/object_0_3.mnc,data_rot/mask_0_3.mnc,1.0,1.0 +data_rot/object_0_4.mnc,data_rot/mask_0_4.mnc,1.0,1.0 +data_rot/object_0_5.mnc,data_rot/mask_0_5.mnc,1.0,1.0 +data_rot/object_0_6.mnc,data_rot/mask_0_6.mnc,1.0,1.0 +data_rot/object_0_7.mnc,data_rot/mask_0_7.mnc,1.0,1.0 +data_rot/object_0_8.mnc,data_rot/mask_0_8.mnc,1.0,1.0 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot_2.lst b/examples/synthetic_tests/test_lng_model/subjects_rot_2.lst new file mode 100644 index 0000000..1f4dc65 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot_2.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_0_0.mnc,1.0,-4 +data_rot/object_0_1.mnc,data_rot/mask_0_1.mnc,1.0,-3 +data_rot/object_0_2.mnc,data_rot/mask_0_2.mnc,1.0,-2 +data_rot/object_0_3.mnc,data_rot/mask_0_3.mnc,1.0,-1 +data_rot/object_0_4.mnc,data_rot/mask_0_4.mnc,1.0,0 +data_rot/object_0_5.mnc,data_rot/mask_0_5.mnc,1.0,1 +data_rot/object_0_6.mnc,data_rot/mask_0_6.mnc,1.0,2 +data_rot/object_0_7.mnc,data_rot/mask_0_7.mnc,1.0,3 +data_rot/object_0_8.mnc,data_rot/mask_0_8.mnc,1.0,4 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot_2a.lst b/examples/synthetic_tests/test_lng_model/subjects_rot_2a.lst new file mode 100644 index 0000000..5a9bb24 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot_2a.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_all.mnc,1.0,-4 +data_rot/object_0_1.mnc,data_rot/mask_all.mnc,1.0,-3 +data_rot/object_0_2.mnc,data_rot/mask_all.mnc,1.0,-2 +data_rot/object_0_3.mnc,data_rot/mask_all.mnc,1.0,-1 +data_rot/object_0_4.mnc,data_rot/mask_all.mnc,1.0,0 +data_rot/object_0_5.mnc,data_rot/mask_all.mnc,1.0,1 +data_rot/object_0_6.mnc,data_rot/mask_all.mnc,1.0,2 +data_rot/object_0_7.mnc,data_rot/mask_all.mnc,1.0,3 +data_rot/object_0_8.mnc,data_rot/mask_all.mnc,1.0,4 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot_cut.lst b/examples/synthetic_tests/test_lng_model/subjects_rot_cut.lst new file mode 100644 index 0000000..e05215d --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot_cut.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_0_0.mnc +data_rot/object_0_1.mnc,data_rot/mask_0_1.mnc +data_rot/object_0_2.mnc,data_rot/mask_0_2.mnc +data_rot/object_0_3.mnc,data_rot/mask_0_3.mnc +data_rot/object_0_4.mnc,data_rot/mask_0_4.mnc +data_rot/object_0_5.mnc,data_rot/mask_0_5.mnc +data_rot/object_0_6.mnc,data_rot/mask_0_6.mnc +data_rot/object_0_7.mnc,data_rot/mask_0_7.mnc +data_rot/object_0_8.mnc,data_rot/mask_0_8.mnc diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model.py b/examples/synthetic_tests/test_lng_model/test_lng_model.py new file mode 100644 index 0000000..49f07c3 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model.py @@ -0,0 +1,36 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + + gm.regress_ldd_csv( + 'subjects_fix_vel.lst', + work_prefix='tmp_regress_nr_b0', + options={ + 'protocol': [ + {'iter':16, 'level':8, 'blur_int': None, 'blur_vel': 4 }, + #{'iter':4, 'level':4, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 4.0 }, + + 'start_level':8, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': True, + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=3, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC.py new file mode 100644 index 0000000..4bfe248 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC.py @@ -0,0 +1,39 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + + gm.regress_ldd_csv( + 'subjects.lst', + work_prefix='tmp_regress_LCC_nr_nd_2', + options={ + 'protocol': [ + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': None }, + {'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 2.0, + 'LCC':True }, + + 'start_level': 8, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': False, + 'qc': True, + 'incremental': False + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_1.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_1.py new file mode 100644 index 0000000..7abcde8 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_1.py @@ -0,0 +1,39 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + + gm.regress_ldd_csv( + 'subjects_1.lst', + work_prefix='tmp_regress_LCC_1', + options={ + 'protocol': [ + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': None }, + {'iter':4, 'level':2, 'blur_int': None, 'blur_vel': None }, + {'iter':4, 'level':1, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':False, + 'max_step': 2.0, + 'LCC':True }, + + 'start_level': 8, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': False, + 'qc': True, + 'incremental': False + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_ldd.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_ldd.py new file mode 100644 index 0000000..b5e570f --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_ldd.py @@ -0,0 +1,40 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.regress_ldd_csv( + 'subjects_ldd.lst', + work_prefix='tmp_regress_LCC_ldd_sym2', + options={ + 'protocol': [ + #{'iter':10, 'level':8, 'blur_int': None, 'blur_vel': None }, + {'iter':10, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:200, 16:200, 8:200, 4:200, 2:40 }, + 'LCC':True }, + + 'start_level': 8, + 'refine': True, + 'cleanup':False, + 'debug': True, + 'debias': False, + 'qc': True, + 'incremental': True, + 'remove0':True, + 'sym':True, + }, + #regress_model=['data/object_0_4.mnc'], + model='data_ldd/object_0_4.mnc', + mask='data_ldd/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_lin.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_lin.py new file mode 100644 index 0000000..ce6534e --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_lin.py @@ -0,0 +1,40 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm +import os + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.regress_ldd_csv( + 'subjects_lin.lst', + work_prefix='tmp_regress_LCC_lin_4mm', + options={ + 'protocol': [ + {'iter':20, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 2.0, + 'LCC':True }, + + 'start_level': 8, + 'refine': True, + 'cleanup': False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': False + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot.py new file mode 100644 index 0000000..42f8775 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot.py @@ -0,0 +1,38 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.regress_ldd_csv( + 'subjects_rot.lst', + work_prefix='tmp_regress_LCC_rot_inc', + options={ + 'protocol': [ + {'iter':2, 'level':8, 'blur_int': None, 'blur_vel': None }, + {'iter':2, 'level':4, 'blur_int': None, 'blur_vel': None }, + {'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'LCC':True }, + + 'start_level': 8, + 'refine': True, + 'cleanup': False, + 'debug': True, + 'debias': False, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_rot/object_0_4.mnc', + mask='data_rot/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot_m.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot_m.py new file mode 100644 index 0000000..837d202 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot_m.py @@ -0,0 +1,38 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.regress_ldd_csv( + 'subjects_rot_2.lst', + work_prefix='tmp_regress_LCC_rot_inc_2ba_std', + options={ + 'protocol': [ + {'iter':16, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':2, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'LCC':False }, + + 'start_level': 4, + 'refine': True, + 'cleanup': False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_rot/object_0_4.mnc', + mask='data_rot/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_lim.py b/examples/synthetic_tests/test_lng_model/test_lng_model_lim.py new file mode 100644 index 0000000..9e060da --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_lim.py @@ -0,0 +1,32 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + gm.regress_ldd_csv('subjects_lim.lst', + work_prefix='tmp_regress_lim_nr_v2', + options={ + 'protocol': [ + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': 4 }, + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': 2 }, + {'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + {'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40, 1:40 }, + 'hist_match':True, + 'max_step': 4.0 }, + 'start_level':16, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': True, + 'qc': True, + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_lim_nd.py b/examples/synthetic_tests/test_lng_model/test_lng_model_lim_nd.py new file mode 100644 index 0000000..45bf85c --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_lim_nd.py @@ -0,0 +1,33 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + gm.regress_ldd_csv('subjects_lim.lst', + work_prefix='tmp_regress_lim_nr_nd', + options={ + 'protocol': [ + {'iter':8, 'level':4 }, + #{'iter':4, 'level':4 }, + #{'iter':4, 'level':2 }, + ], + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 4.0 }, + 'start_level':16, + 'refine': False, + 'blur_int_model': None, + 'blur_vel_model': 4, + 'cleanup': False, + 'debug': True, + 'debias': True, + 'qc': True, + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_nomask.py b/examples/synthetic_tests/test_lng_model/test_lng_model_nomask.py new file mode 100644 index 0000000..b7566fa --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_nomask.py @@ -0,0 +1,26 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + gm.regress_ldd_csv('subjects_nomask.lst', + work_prefix='tmp_regress_nomask', + options={ + 'protocol': [ + {'iter':4, 'level':8 }, + {'iter':4, 'level':4 }, + #{'iter':4, 'level':2 }, + ], + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 4.0 }, + 'start_level':16, + 'refine':False, + }, + regress_model=['data/object_0_4.mnc'], + mask=None, + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_std.py b/examples/synthetic_tests/test_lng_model/test_lng_model_std.py new file mode 100644 index 0000000..73d9c0e --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_std.py @@ -0,0 +1,52 @@ +from iplMincTools import mincTools,mincError +import traceback +import os + +from scoop import futures, shared + +import iplScoopGenerateModel as gm +# setup data for parallel processing +# have to be at global level + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + try: + + res=gm.regress_csv( + 'subjects.lst', + work_prefix='tmp_regress_std_dd_nr_nd', + options={ + 'protocol': [ + {'iter':4, 'level':4, 'blur_int': None, 'blur_def': None }, + {'iter':4, 'level':2, 'blur_int': None, 'blur_def': None }, + {'iter':4, 'level':1, 'blur_int': None, 'blur_def': None }, + ], + 'start_level':8, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': False, + 'qc': True, + 'nl_mode': 'dd', + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) + + # + + + except mincError as e: + print "Exception in regress_csv:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in regress_csv:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/examples/synthetic_tests/test_lng_model/test_model_LCC_ldd.py b/examples/synthetic_tests/test_lng_model/test_model_LCC_ldd.py new file mode 100644 index 0000000..760a8fe --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_model_LCC_ldd.py @@ -0,0 +1,38 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.generate_ldd_model_csv( + 'subjects_ldd_cut.lst', + work_prefix='tmp_avg_LCC_ldd', + options={ + 'protocol': [ + {'iter':4, 'level':8, 'blur_int': None, 'blur_vel': None }, + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 8:100, 4:100, 2:40 }, + 'LCC':True }, + + 'start_level': 8, + 'refine': True, + 'cleanup':False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_ldd/object_0_0.mnc', + mask='data_ldd/mask_0_0.mnc', + ) diff --git a/examples/synthetic_tests/test_lng_model/test_model_SSD_ldd.py b/examples/synthetic_tests/test_lng_model/test_model_SSD_ldd.py new file mode 100644 index 0000000..2dc0d31 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_model_SSD_ldd.py @@ -0,0 +1,38 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.generate_ldd_model_csv( + 'subjects_ldd_cut.lst', + work_prefix='tmp_avg_SSD_ldd', + options={ + 'protocol': [ + {'iter':4, 'level':8, 'blur_int': None, 'blur_vel': None }, + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 8:200, 4:200, 2:40 }, + 'LCC':False }, + + 'start_level': 8, + 'refine': True, + 'cleanup':False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_ldd/object_0_0.mnc', + mask='data_ldd/mask_0_0.mnc', + ) diff --git a/examples/synthetic_tests/test_lng_model/test_model_std.py b/examples/synthetic_tests/test_lng_model/test_model_std.py new file mode 100644 index 0000000..d6ad547 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_model_std.py @@ -0,0 +1,33 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.generate_nonlinear_model_csv( + 'subjects_ldd_cut.lst', + work_prefix='tmp_avg_std', + options={ + 'protocol': [ + {'iter':4, 'level':8, }, + {'iter':4, 'level':4, }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'start_level': 8, + 'refine': True, + 'cleanup':False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_ldd/object_0_0.mnc', + mask='data_ldd/mask_0_0.mnc', + ) diff --git a/examples/synthetic_tests/test_lng_model/test_regression.py b/examples/synthetic_tests/test_lng_model/test_regression.py new file mode 100644 index 0000000..70df80a --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_regression.py @@ -0,0 +1,104 @@ +#! /usr/bin/env python + +import minc +import sys +import os +import pyezminc +import numpy as np + +from sklearn import linear_model + +if __name__ == "__main__": + + inp=pyezminc.parallel_input_iterator() + out=pyezminc.parallel_output_iterator() + + + design_matrix=np.array( [ [ 1, -0.5,-4], + [ 1, 0.5 ,-4], + [ 1, -0.5,-3], + [ 1, 0.5 ,-3], + [ 1, -0.5,-2], + [ 1, 0.5 ,-2], + [ 1, -0.5,-1], + [ 1, 0.5 ,-1], + [ 1, -0.5, 0], + [ 1, 0.5 , 0], + [ 1, -0.5, 1], + [ 1, 0.5 , 1], + [ 1, -0.5, 2], + [ 1, 0.5 , 2], + [ 1, -0.5, 3], + [ 1, 0.5 , 3], + [ 1, -0.5, 4], + [ 1, 0.5 , 4]] ) + + inp.open([ 'tmp_regress/8/object_0_0.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_1.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_2.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_3.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_4.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_5.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_6.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_7.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_8.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_0.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_1.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_2.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_3.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_4.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_5.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_6.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_7.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_8.mnc.008_vel.mnc', + ]) + + out.open(["tmp_regress/fit_{}.mnc".format(i) for i in range(design_matrix.shape[1])], + 'tmp_regress/8/object_0_0.mnc.008_vel.mnc' ) + + out_error=pyezminc.output_iterator_real(None) + out_error.open("tmp_regress/fit_error.mnc",reference_file="tmp_regress/8/object_1_8.mnc.008.mnc") + + inp.begin() + out.begin() + out_error.begin() + + # allocate sum + v1=np.zeros(shape=[design_matrix.shape[0]], dtype=np.float64, order='C') + v2=np.zeros(shape=[design_matrix.shape[0]], dtype=np.float64, order='C') + v3=np.zeros(shape=[design_matrix.shape[0]], dtype=np.float64, order='C') + + # allocate work space + qqq=np.empty_like(v1) + + clf=linear_model.LinearRegression(fit_intercept=False) + + while not inp.last(): + # assume that we are dealing with 3D vectors + # TODO: add check somewhere to make sure it is the case + v1=inp.value(v1);inp.next() + v2=inp.value(v2);inp.next() + v3=inp.value(v3) + + # put things together + y=np.column_stack((v1,v2,v3)) + x=design_matrix + + clf.fit(x,y) + + out.value(np.ravel(clf.coef_[0,:]));out.next() + out.value(np.ravel(clf.coef_[1,:]));out.next() + out.value(np.ravel(clf.coef_[2,:]));out.next() + + out_error.value(clf.score(x,y));out_error.next() + + inp.next() + + print out.progress() + print inp.progress() + print out_error.progress() + + del inp + del out + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on;hl python diff --git a/examples/synthetic_tests/test_model_creation/.gitignore b/examples/synthetic_tests/test_model_creation/.gitignore new file mode 100644 index 0000000..ceff4c0 --- /dev/null +++ b/examples/synthetic_tests/test_model_creation/.gitignore @@ -0,0 +1,7 @@ +*.pyc +*.mnc +tmp +tmp_nl +tmp_sym +tmp_nl_sym +tmp_* \ No newline at end of file diff --git a/examples/synthetic_tests/test_model_creation/big_subjects.lst b/examples/synthetic_tests/test_model_creation/big_subjects.lst new file mode 100644 index 0000000..53af808 --- /dev/null +++ b/examples/synthetic_tests/test_model_creation/big_subjects.lst @@ -0,0 +1,9 @@ +test_data/big_ellipse_1.mnc,test_data/big_mask.mnc +test_data/big_ellipse_2.mnc,test_data/big_mask.mnc +test_data/big_ellipse_3.mnc,test_data/big_mask.mnc +test_data/big_ellipse_4.mnc,test_data/big_mask.mnc +test_data/big_ellipse_5.mnc,test_data/big_mask.mnc +test_data/big_ellipse_6.mnc,test_data/big_mask.mnc +test_data/big_ellipse_7.mnc,test_data/big_mask.mnc +test_data/big_ellipse_8.mnc,test_data/big_mask.mnc +test_data/big_ellipse_9.mnc,test_data/big_mask.mnc diff --git a/examples/synthetic_tests/test_model_creation/prepare_test_data.sh b/examples/synthetic_tests/test_model_creation/prepare_test_data.sh new file mode 100755 index 0000000..cadeb68 --- /dev/null +++ b/examples/synthetic_tests/test_model_creation/prepare_test_data.sh @@ -0,0 +1,37 @@ +#! /bin/sh +mkdir -p test_data + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 50 50 50 -step 2 2 2 -start -50 -50 -50" +mask_opts="-byte -real_range 0 1 -background 0 -edge_value 1 -fill_value 1 -no_partial -nelements 50 50 50 -step 2 2 2 -start -50 -50 -50" + +# make bunch of ellipses +make_phantom $object_opts -ellipse -center -10 0 0 -width 20 10 10 test_data/ellipse_1.mnc +make_phantom $object_opts -ellipse -center 0 0 0 -width 20 10 10 test_data/ellipse_2.mnc +make_phantom $object_opts -ellipse -center 10 0 0 -width 20 10 10 test_data/ellipse_3.mnc + +make_phantom $object_opts -ellipse -center 0 -10 0 -width 10 20 10 test_data/ellipse_4.mnc +make_phantom $object_opts -ellipse -center 0 0 0 -width 10 20 10 test_data/ellipse_5.mnc +make_phantom $object_opts -ellipse -center 0 10 0 -width 10 20 10 test_data/ellipse_6.mnc + +make_phantom $object_opts -ellipse -center 0 0 -10 -width 10 10 20 test_data/ellipse_7.mnc +make_phantom $object_opts -ellipse -center 0 0 0 -width 10 10 20 test_data/ellipse_8.mnc +make_phantom $object_opts -ellipse -center 0 0 10 -width 10 10 20 test_data/ellipse_9.mnc + +# make mask +make_phantom $mask_opts -rectangle -center 0 0 0 -width 50 50 50 test_data/mask.mnc + + +# make reference +make_phantom $mask_opts -ellipse -center 0 0 0 -width 15 15 15 test_data/ref.mnc + +cat - >subjects.lst <big_subjects.lst <>>> xfmavg tmp_lsq6_downsample/4/ellipse_1.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_2.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_3.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_4.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_5.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_6.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_7.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_8.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_9.mnc_corr.004.xfm test_std.xfm + +Transform_Type = Linear; +Linear_Transform = + 9.99990658e-01 1.41336445e-03 -4.08492676e-03 3.23595092e-01 + -1.47889049e-03 9.99869585e-01 -1.60819040e-02 -3.64787875e-01 + 4.06166498e-03 1.60877979e-02 9.99862333e-01 -5.46621577e-01; diff --git a/examples/synthetic_tests/test_registration/.gitignore b/examples/synthetic_tests/test_registration/.gitignore new file mode 100644 index 0000000..49c63a4 --- /dev/null +++ b/examples/synthetic_tests/test_registration/.gitignore @@ -0,0 +1,5 @@ +# ignore all automatically generated files and outputs +*.xfm +*.mnc +*.txt +*.log diff --git a/examples/synthetic_tests/test_registration/prepare_test_data.sh b/examples/synthetic_tests/test_registration/prepare_test_data.sh new file mode 100755 index 0000000..46478a2 --- /dev/null +++ b/examples/synthetic_tests/test_registration/prepare_test_data.sh @@ -0,0 +1,29 @@ +#! /bin/sh + + +#tempdir=`mktemp -t test -d` +#trap "rm -rf $tempdir" 0 1 2 15 +tempdir=data +mkdir -p $tempdir + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 97 101 103 -step 4 4 4 -start -200 -200 -200" + +object_opts2="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 97 101 103 -step 1 1 1 -start -50 -50 -50" + + +# make bunch of ellipses +make_phantom $object_opts -ellipse -center 0 0 0 -width 150 150 150 $tempdir/ellipse_0.mnc +make_phantom $object_opts -ellipse -center 0 20 0 -width 100 150 100 $tempdir/ellipse_1.mnc +make_phantom $object_opts -ellipse -center 0 -20 0 -width 100 150 100 $tempdir/ellipse_2.mnc + +for i in $(seq 0 2);do + fast_blur --fwhm 8 $tempdir/ellipse_$i.mnc $tempdir/ellipse_${i}_blur.mnc +done + +make_phantom $object_opts2 -ellipse -center 0 0 0 -width 37 37 37 $tempdir/ellipse_0_.mnc +make_phantom $object_opts2 -ellipse -center 0 5 0 -width 25 37 25 $tempdir/ellipse_1_.mnc +make_phantom $object_opts2 -ellipse -center 0 -5 0 -width 25 37 25 $tempdir/ellipse_2_.mnc + +for i in $(seq 0 2);do + fast_blur --fwhm 4 $tempdir/ellipse_${i}_.mnc $tempdir/ellipse_${i}_blur_.mnc +done diff --git a/examples/synthetic_tests/test_registration/test_ants.sh b/examples/synthetic_tests/test_registration/test_ants.sh new file mode 100755 index 0000000..69a86a9 --- /dev/null +++ b/examples/synthetic_tests/test_registration/test_ants.sh @@ -0,0 +1,22 @@ +#! /bin/sh + +in1=data/ellipse_0_blur.mnc +in2=data/ellipse_2_blur.mnc + +# run Exponential +antsRegistration --collapse-output-transforms 0 -d 3 \ + --float 0 --verbose 1 --minc 1 \ + -c '[1000x1000x1000,1e-7,100]' \ + --transform 'Exponential[0.2,1.0,1.0]' \ + -m "CC[$in1,$in2,1.0,4,Regular,0.1]" \ + -s 8x4x2 -f 8x4x2 \ + -o "[test_exp_,test_exp_in1.mnc,test_exp_in2.mnc]" + +# run SyN +antsRegistration --collapse-output-transforms 0 -d 3 \ + --float 0 --verbose 1 --minc 1 \ + -c '[1000x1000x1000,1e-7,100]' \ + --transform 'SyN[0.2,1.0,1.0]' \ + -m "CC[$in1,$in2,1.0,4,Regular,0.1]" \ + -s 8x4x2 -f 8x4x2 \ + -o "[test_syn_,test_syn_in1.mnc,test_syn_in2.mnc]" \ No newline at end of file diff --git a/examples/synthetic_tests/test_registration/test_ldd_reg.py b/examples/synthetic_tests/test_registration/test_ldd_reg.py new file mode 100755 index 0000000..58c77c7 --- /dev/null +++ b/examples/synthetic_tests/test_registration/test_ldd_reg.py @@ -0,0 +1,47 @@ +#! /usr/bin/env python + + +import shutil +import os +import sys +import csv +import traceback +import argparse +import json +import tempfile +import re +import copy +import random + +# MINC stuff +from iplMincTools import mincTools,mincError + +if __name__=='__main__': + with mincTools() as minc: + + + for s in range(0,8): + for g in range(0,8): + par={'conf':{}, + 'smooth_update':s, + 'smooth_field':g, + 'update_rule':1, + 'grad_type':0, + 'max_step':2.0 } + + xfm="test_{}_{}_ldd.xfm".format(s,g) + grid="test_{}_{}_ldd_grid_0.mnc".format(s,g) + grid_m="test_{}_{}_ldd_grid_m.mnc".format(s,g) + test_out="test_{}_{}_ldd_test.mnc".format(s,g) + test_qc="test_{}_{}_ldd_test.jpg".format(s,g) + + minc.non_linear_register_ldd( + "data/ellipse_0_blur.mnc","data/ellipse_1_blur.mnc","test_{}_{}_vel.mnc".format(s,g), + output_xfm=xfm, start=8,level=2,parameters=par) + + minc.grid_magnitude(grid,grid_m) + + minc.resample_smooth("data/ellipse_0_blur.mnc",test_out,transform=xfm) + + minc.qc("data/ellipse_1_blur.mnc",test_qc,mask=test_out,image_range=[0,100],mask_range=[0,100]) + \ No newline at end of file diff --git a/examples/synthetic_tests/test_registration/test_nl_reg.py b/examples/synthetic_tests/test_registration/test_nl_reg.py new file mode 100755 index 0000000..b70dee3 --- /dev/null +++ b/examples/synthetic_tests/test_registration/test_nl_reg.py @@ -0,0 +1,143 @@ +#! /usr/bin/env python + + +import shutil +import os +import sys +import csv +import traceback +import argparse +import json +import tempfile +import re +import copy +import random + +# MINC stuff +from iplMincTools import mincTools,mincError + +elx_par1=""" +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(ShowExactMetricValue "false") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "AdaptiveStochasticGradientDescent") +(Transform "BSplineTransform") +(Metric "AdvancedNormalizedCorrelation") + +(FinalGridSpacingInPhysicalUnits 32) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions 2) + +(ImagePyramidSchedule 8 8 8 4 4 4 ) + +(MaximumNumberOfIterations 200 200 200 ) +(MaximumNumberOfSamplingAttempts 3) + +(NumberOfSpatialSamples 1024 ) + +(NewSamplesEveryIteration "true") +(ImageSampler "Random" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 1) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +""" + + +elx_par2=""" +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(ShowExactMetricValue "false") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "AdaptiveStochasticGradientDescent") +(Transform "BSplineTransform") +(Metric "AdvancedNormalizedCorrelation") + +(FinalGridSpacingInPhysicalUnits 2) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions 2) + +(ImagePyramidSchedule 4 4 4 2 2 2 ) + +(MaximumNumberOfIterations 200 200 200 ) +(MaximumNumberOfSamplingAttempts 3) + +(NumberOfSpatialSamples 1024 ) + +(NewSamplesEveryIteration "true") +(ImageSampler "Random" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 1) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +""" + + +if __name__=='__main__': + with mincTools() as minc: + minc.register_elastix("data/ellipse_0_blur.mnc","data/ellipse_1_blur.mnc", + output_par="test_4mm_0_1_par.txt",output_xfm="test_4mm_0_1.xfm",parameters=elx_par1) + + minc.grid_magnitude("test_4mm_0_1_grid_0.mnc","test_4mm_0_1_grid_m.mnc") + + minc.register_elastix("data/ellipse_0_blur.mnc","data/ellipse_1_blur.mnc",output_par="test_4mm_0_1_2_par.txt", + output_xfm="test_4mm_0_1_2.xfm",parameters=elx_par1,init_xfm="test_4mm_0_1.xfm") + + minc.grid_magnitude("test_4mm_0_1_2_grid_0.mnc","test_4mm_0_1_2_grid_m.mnc") + + minc.register_elastix("data/ellipse_0_blur.mnc","data/ellipse_1_blur.mnc",output_par="test_4mm_0_1_3_par.txt", + output_xfm="test_4mm_0_1_3.xfm",parameters=elx_par1,init_par="test_4mm_0_1_par.txt") + + minc.grid_magnitude("test_4mm_0_1_3_grid_0.mnc","test_4mm_0_1_3_grid_m.mnc") + + minc.register_elastix("data/ellipse_0_blur_.mnc","data/ellipse_1_blur_.mnc",output_par="test_1mm_0_1_par.txt",output_xfm="test_1mm_0_1.xfm",parameters=elx_par1) + minc.grid_magnitude("test_1mm_0_1_grid_0.mnc","test_1mm_0_1_grid_m.mnc") \ No newline at end of file diff --git a/examples/synthetic_tests/test_segmentation/.gitignore b/examples/synthetic_tests/test_segmentation/.gitignore new file mode 100644 index 0000000..c30390d --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/.gitignore @@ -0,0 +1,6 @@ +# ignore all automatically generated files and outputs +*.xfm +*.mnc +*.txt +*.log +test_* \ No newline at end of file diff --git a/examples/synthetic_tests/test_segmentation/cv.json b/examples/synthetic_tests/test_segmentation/cv.json new file mode 100644 index 0000000..0eac201 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv.json @@ -0,0 +1,9 @@ +{ + "validation_library":"seg_subjects.lst", + "iterations":10, + "cv":2, + "fuse_variant":"fuse_1", + "ec_variant":"ec_xgb", + "cv_variant":"cv_xgb", + "regularize_variant":"reg_1" +} diff --git a/examples/synthetic_tests/test_segmentation/cv_ants.json b/examples/synthetic_tests/test_segmentation/cv_ants.json new file mode 100644 index 0000000..66d71d3 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv_ants.json @@ -0,0 +1,5 @@ +{ + "validation_library":"seg_subjects.lst", + "iterations":-1, + "cv":1 +} diff --git a/examples/synthetic_tests/test_segmentation/cv_ants2.json b/examples/synthetic_tests/test_segmentation/cv_ants2.json new file mode 100644 index 0000000..7879706 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv_ants2.json @@ -0,0 +1,9 @@ +{ + "validation_library":"seg_subjects.lst", + "iterations":-1, + "cv":1, + "fuse_variant":"fuse", + "ec_variant":"ec2", + "cv_variant":"cv2", + "regularize_variant":"gc" +} diff --git a/examples/synthetic_tests/test_segmentation/cv_nl.json b/examples/synthetic_tests/test_segmentation/cv_nl.json new file mode 100644 index 0000000..66d71d3 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv_nl.json @@ -0,0 +1,5 @@ +{ + "validation_library":"seg_subjects.lst", + "iterations":-1, + "cv":1 +} diff --git a/examples/synthetic_tests/test_segmentation/cv_triple.json b/examples/synthetic_tests/test_segmentation/cv_triple.json new file mode 100644 index 0000000..4ff5132 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv_triple.json @@ -0,0 +1,9 @@ +{ + "validation_library":"seg_subjects_triple.lst", + "iterations":10, + "cv":2, + "fuse_variant":"fuse_1", + "ec_variant":"ec_1", + "cv_variant":"cv_1", + "regularize_variant":"reg_1" +} diff --git a/examples/synthetic_tests/test_segmentation/dumb_segment.py b/examples/synthetic_tests/test_segmentation/dumb_segment.py new file mode 100755 index 0000000..6b1b039 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/dumb_segment.py @@ -0,0 +1,71 @@ +#! /usr/bin/env python + +# standard library +import string +import os +import argparse +import pickle +import cPickle +import sys +import json +import csv +# minc +import minc + +# numpy +import numpy as np + +def coords(string): + c=[float(i) for i in string.split(',')] + + if len(c)!=3 : + raise argparse.ArgumentTypeError('Expect three coordinates') + return c + +def parse_options(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Perform error-correction learning and application') + + parser.add_argument('--center',type=coords, + default=[0.0,0.0,0.0],action = 'store', + help="Center coordinate") + + parser.add_argument('input', + help='Input image') + + parser.add_argument('output', + help='Output image') + + options = parser.parse_args() + + return options + + +def dumb_segment(img, center): + + c=np.mgrid[ 0:img.shape[0] , + 0:img.shape[1] , + 0:img.shape[2] ] + + seg=np.zeros_like( img, dtype=np.int32 ) + + seg=( c[2]>center[0] )*1+\ + ( c[1]>center[1] )*2+\ + ( c[0]>center[2] )*4+ 1 + + seg[ img < 50 ] = 0 + + return np.asarray(seg,dtype=np.int32 ) + +if __name__ == "__main__": + options = parse_options() + print(repr(options)) + input = minc.Image(options.input) + #seg=np.zeros_like( input.data, dtype=np.int32 ) + center_vox=[(options.center[i]-input.start()[i])/input.spacing()[i] for i in xrange(3)] + print(repr(center_vox)) + seg=dumb_segment(input.data, center_vox) + + minc.Label( data=seg ).save(name=options.output, imitate=options.input) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/examples/synthetic_tests/test_segmentation/ec_train.json b/examples/synthetic_tests/test_segmentation/ec_train.json new file mode 100644 index 0000000..7eeb5a1 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/ec_train.json @@ -0,0 +1,14 @@ +{ + "method" : "xgb", + "method2" : "xgb", + + "border_mask": true, + "border_mask_width": 3, + "patch_size": 0, + "use_coord": false, + "use_joint": false, + "use_raw": true, + + "train_rounds": 3, + "train_cv": 2 +} diff --git a/examples/synthetic_tests/test_segmentation/ec_train_ants2.json b/examples/synthetic_tests/test_segmentation/ec_train_ants2.json new file mode 100644 index 0000000..d53f5a9 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/ec_train_ants2.json @@ -0,0 +1,20 @@ +{ + "method" : "AdaBoost", + "method_n" : 200, + "border_mask": true, + "border_mask_width": 2, + "use_coord": true, + "use_joint": true, + "patch_size": 1 , + "use_raw": true, + + "normalize_input": false, + "primary_features": 1, + "max_samples": -1, + "sample_pick": "first", + + "antialias_labels": false, + "blur_labels": 2, + "expit_labels": 2, + "normalize_labels": true +} diff --git a/examples/synthetic_tests/test_segmentation/ec_train_triple.json b/examples/synthetic_tests/test_segmentation/ec_train_triple.json new file mode 100644 index 0000000..be61d47 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/ec_train_triple.json @@ -0,0 +1,11 @@ +{ + "method" : "AdaBoost", + "method_n" : 100, + "border_mask": true, + "border_mask_width": 3, + "use_coord": true, + "use_joint": true, + + "train_rounds": 3, + "train_cv": 2 +} diff --git a/examples/synthetic_tests/test_segmentation/library_description.json b/examples/synthetic_tests/test_segmentation/library_description.json new file mode 100644 index 0000000..c6d3590 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/library_description.json @@ -0,0 +1,21 @@ +{ + "reference_model": "data/ellipse_0_blur.mnc", + "reference_mask": "data/ellipse_0_mask.mnc", + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"seg_subjects.lst", + "build_remap": [ [1, 1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8] ], + "build_flip_remap": null, + "parts": 0, + "classes": 9, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + "linear_register": false, + "local_linear_register": true, + "non_linear_register": false, + "resample_order": 2, + "resample_baa": true +} diff --git a/examples/synthetic_tests/test_segmentation/library_description_ants.json b/examples/synthetic_tests/test_segmentation/library_description_ants.json new file mode 100644 index 0000000..c730e0c --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/library_description_ants.json @@ -0,0 +1,22 @@ +{ + "reference_model": "data/ellipse_0_blur.mnc", + "reference_mask": "data/ellipse_0_mask.mnc", + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"seg_subjects.lst", + "build_remap": [ [1, 1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8] ], + "build_flip_remap": null, + "parts": 0, + "classes": 9, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + "initial_register": null, + "initial_local_register": null, + "non_linear_register": true, + "resample_order": 2, + "resample_baa": true, + "non_linear_register_ants": true +} diff --git a/examples/synthetic_tests/test_segmentation/library_description_nl.json b/examples/synthetic_tests/test_segmentation/library_description_nl.json new file mode 100644 index 0000000..182f9ff --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/library_description_nl.json @@ -0,0 +1,22 @@ +{ + "reference_model": "data/ellipse_0_blur.mnc", + "reference_mask": "data/ellipse_0_mask.mnc", + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"seg_subjects.lst", + "build_remap": [ [1, 1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8] ], + "build_flip_remap": null, + "parts": 0, + "classes": 9, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + "linear_register": false, + "local_linear_register": true, + "non_linear_register": false, + "resample_order": 2, + "resample_baa": true, + "non_linear_register_level": 4 +} diff --git a/examples/synthetic_tests/test_segmentation/library_description_triple.json b/examples/synthetic_tests/test_segmentation/library_description_triple.json new file mode 100644 index 0000000..cf1b169 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/library_description_triple.json @@ -0,0 +1,23 @@ +{ + "reference_model": "data/ellipse_0_blur.mnc", + "reference_mask": "data/ellipse_0_mask.mnc", + "reference_add": [ "data/ellipse_0_blur.mnc", "data/ellipse_0_blur.mnc"], + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"seg_subjects_triple.lst", + "build_remap": [ [1, 1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8] ], + "build_flip_remap": null, + "parts": 0, + "classes": 9, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + "linear_register": false, + "local_linear_register": true, + "non_linear_register": false, + "resample_order": 2, + "resample_baa": true, + "modalities" : 3 +} diff --git a/examples/synthetic_tests/test_segmentation/prepare_test_data.sh b/examples/synthetic_tests/test_segmentation/prepare_test_data.sh new file mode 100755 index 0000000..d307172 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/prepare_test_data.sh @@ -0,0 +1,50 @@ +#! /bin/sh + + +#tempdir=`mktemp -t test -d` +#trap "rm -rf $tempdir" 0 1 2 15 +tempdir=data +mkdir -p $tempdir + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 97 101 103 -step 4 4 4 -start -200 -200 -200" + +seg_opts="-byte -real_range 0 1 -background 0 -edge_value 1 -fill_value 1 -no_partial -nelements 100 100 100 -step 4 4 4 -start -200 -200 -200" + + +make_phantom $object_opts -ellipse -center 0 0 0 -width 150 150 150 $tempdir/ellipse_0.mnc + +# make bunch of ellipses +make_phantom $object_opts -ellipse -center -10 0 0 -width 150 100 100 $tempdir/ellipse_1.mnc +make_phantom $object_opts -ellipse -center 10 0 0 -width 150 100 100 $tempdir/ellipse_2.mnc + +make_phantom $object_opts -ellipse -center 0 -10 0 -width 100 150 100 $tempdir/ellipse_3.mnc +make_phantom $object_opts -ellipse -center 0 10 0 -width 100 150 100 $tempdir/ellipse_4.mnc + +make_phantom $object_opts -ellipse -center 0 0 -10 -width 100 100 150 $tempdir/ellipse_5.mnc +make_phantom $object_opts -ellipse -center 0 0 10 -width 100 100 150 $tempdir/ellipse_6.mnc + + +for i in $(seq 0 6);do + fast_blur --fwhm 8 $tempdir/ellipse_$i.mnc $tempdir/ellipse_${i}_blur.mnc +done + +# make segmentations +./dumb_segment.py $tempdir/ellipse_0.mnc $tempdir/ellipse_0_seg.mnc --center 0,0,0 +./dumb_segment.py $tempdir/ellipse_1.mnc $tempdir/ellipse_1_seg.mnc --center " -10,0,0" +./dumb_segment.py $tempdir/ellipse_2.mnc $tempdir/ellipse_2_seg.mnc --center 10,0,0 +./dumb_segment.py $tempdir/ellipse_3.mnc $tempdir/ellipse_3_seg.mnc --center 0,-10,0 +./dumb_segment.py $tempdir/ellipse_4.mnc $tempdir/ellipse_4_seg.mnc --center 0,10,0 +./dumb_segment.py $tempdir/ellipse_5.mnc $tempdir/ellipse_5_seg.mnc --center 0,0,-10 +./dumb_segment.py $tempdir/ellipse_6.mnc $tempdir/ellipse_6_seg.mnc --center 0,0,10 + + +# create reference mask +itk_morph --threshold 10 --exp 'D[2]' $tempdir/ellipse_0_blur.mnc $tempdir/ellipse_0_mask.mnc + +rm -f seg_subjects.lst + +for i in $(seq 0 6);do + echo $tempdir/ellipse_${i}_blur.mnc,$tempdir/ellipse_${i}_seg.mnc >> seg_subjects.lst +done + + diff --git a/examples/synthetic_tests/test_segmentation/run_test.sh b/examples/synthetic_tests/test_segmentation/run_test.sh new file mode 100755 index 0000000..7d06f88 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/run_test.sh @@ -0,0 +1,105 @@ +#! /bin/sh + + +PREFIX=$(pwd)/../../python + +export PYTHONPATH=$PREFIX:$PYTHONPATH + + +cat - > library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description_ants.json < cv_ants.json < segment_ants.json < library_description_ants.json < cv_ants2.json < segment_ants2.json < ec_train_ants2.json < library_description_ants.json < cv_ants3.json < segment_ants.json < ec_train_ants3.json < library_description_ants.json < cv_ants4.json < segment_ants.json < ec_train_ants4.json < library_description_ants.json < cv_ants5.json < segment_ants.json < ec_train_ants5.json < library_description_ants.json < cv_ants2.json < elastix_lin.txt < segment_ants2.json < ec_train_ants2.json < library_description_nl.json < cv_nl.json < segment_nl_ext.json < library_description_nl.json < cv_nl.json < segment_nl.json < ec_train_nl.json < library_description.json < cv.json < segment_nnls.json < ec_train.json < library_description_triple.json < cv_triple.json < segment_triple.json < ec_train_triple.json < library_description.json < cv.json < segment.json < ec_train.json < " + echo "Usefull environment variables:" + echo "MNI_DATAPATH - location of MNI datasets ( /opt/minc/share )" + echo " should include icbm152_model_09c and beast-library-1.1" + echo "PARALLEL - number of paralell processes to use" + exit 1 +fi + +# setup variables +MNI_DATAPATH=${MNI_DATAPATH:-/opt/minc/share} +PARALLEL=${PARALLEL:-1} + + +icbm_model_dir=$MNI_DATAPATH/icbm152_model_09c +beast_model_dir=$MNI_DATAPATH/beast-library-1.1 + + +if [ ! -d $icbm_model_dir ];then + echo "Missing $icbm_model_dir" + exit 1 +fi + +if [ ! -d $beast_model_dir ];then + echo "Missing $beast_model_dir" + exit 1 +fi + +pipeline_dir=$(dirname $0)/.. +data_dir=$(dirname $0) + +#export PYTHONPATH=$(realpath $pipeline_dir/python) +export PYTHONPATH=$pipeline_dir/python +#export PATH=$pipeline_dir/bin:$PATH +export OMP_NUM_THREADS=1 +export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 + +### Run pipeline for NC scan rescan data ### +python -m scoop -vvv -n $PARALLEL $pipeline_dir/python/iplLongitudinalPipeline.py \ + -L \ + -l $data_dir/nc_scan_rescan_validation_list.csv \ + -o $output_dir \ + --model-dir=$icbm_model_dir \ + --model-name=mni_icbm152_t1_tal_nlin_sym_09c \ + --beast-dir=$beast_model_dir diff --git a/ipl/__init__.py b/ipl/__init__.py new file mode 100755 index 0000000..6cfae61 --- /dev/null +++ b/ipl/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# +# @author Vladimir S. FONOV +# @date 12/10/2014 +# +# Run fusion segmentation + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/ants_registration.py b/ipl/ants_registration.py new file mode 100755 index 0000000..d12417f --- /dev/null +++ b/ipl/ants_registration.py @@ -0,0 +1,667 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 29/06/2015 +# +# registration tools + + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math + +# command-line interface +import argparse + +# local stuff +import minc_tools + + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + + +def ants_linear_register( + source, + target, + output_xfm, + parameters=None, + source_mask=None, + target_mask=None, + init_xfm=None, + objective=None, + conf=None, + debug=False, + close=False, + work_dir=None, + downsample=None, + verbose=0 + ): + """perform linear registration with ANTs""" + + # TODO: make use of parameters + + if parameters is None: + parameters={} + + with minc_tools.mincTools(verbose=verbose) as minc: + if not minc.checkfiles(inputs=[source,target], outputs=[output_xfm]): + return + + prev_xfm = None + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + source_mask_lr=source_mask + target_mask_lr=target_mask + # figure out what to do here: + with minc_tools.cache_files(work_dir=work_dir,context='reg') as tmp: + + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if source_mask is not None: + source_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(source_mask,source_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + iterations=parameters.get('affine-iterations','10000x10000x10000x10000x10000') + + default_gradient_descent_option='0.5x0.95x1.e-5x1.e-4' + if close:default_gradient_descent_option='0.05x0.5x1.e-4x1.e-4' + gradient_descent_option=parameters.get('gradient_descent_option',default_gradient_descent_option) + + mi_option=parameters.get('mi-option','32x16000') + use_mask=parameters.get('use_mask',True) + use_histogram_matching=parameters.get('use_histogram_matching',False) + affine_metric=parameters.get('metric_type','MI') + affine_rigid=parameters.get('rigid',False) + + cost_function_par='1,4' + + cmd=['ANTS','3'] + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + target_mask_lr=target_mask + + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + cmd.extend(['-m','{}[{},{},{}]'.format('CC',source_lr,target_lr,cost_function_par)]) + cmd.extend(['-i','0']) + cmd.extend(['--number-of-affine-iterations',iterations]) + cmd.extend(['--affine-gradient-descent-option', gradient_descent_option]) + cmd.extend(['--MI-option', mi_option]) + cmd.extend(['--affine-metric-type', affine_metric]) + + if affine_rigid: + cmd.append('--rigid-affine') + + cmd.extend(['-o',output_xfm]) + + inputs=[source_lr,target_lr] + if target_mask_lr is not None and use_mask: + inputs.append(target_mask_lr) + cmd.extend(['-x',target_mask_lr]) + + if use_histogram_matching: + cmd.append('--use-Histogram-Matching') + + if winsorize_intensity is not None: + if isinstance(winsorize_intensity, dict): + cmd.extend(['--winsorize-image-intensities',winsorize_intensity.get('low',5),winsorize_intensity.get('high',95)]) + else: + cmd.append('--winsorize-image-intensities') + + if init_xfm is not None: + cmd.extend(['--initial-affine',init_xfm]) + + outputs=[output_xfm ] # TODO: add inverse xfm ? + minc.command(cmd, inputs=inputs, outputs=outputs) + + + +def non_linear_register_ants( + source, target, output_xfm, + target_mask=None, + init_xfm =None, + parameters =None, + downsample =None, + verbose=0 + ): + """perform non-linear registration using ANTs, WARNING: will create inverted xfm will be named output_invert.xfm""" + + with minc_tools.mincTools(verbose=verbose) as minc: + + if parameters is None: + #print("Using default ANTS parameters") + parameters={} + + + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm ]): + return + + cost_function=parameters.get('cost_function','CC') + cost_function_par=parameters.get('cost_function_par','1,2') + + reg=parameters.get('regularization','Gauss[2,0.5]') + iterations=parameters.get('iter','20x20x0') + transformation=parameters.get('transformation','SyN[0.25]') + affine_iterations=parameters.get('affine-iterations','0x0x0') + use_mask=parameters.get('use_mask',True) + use_histogram_matching=parameters.get('use_histogram_matching',False) + + cmd=['ANTS','3'] + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + target_mask_lr=target_mask + + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + cmd.extend(['-m','{}[{},{},{}]'.format(cost_function,source_lr,target_lr,cost_function_par)]) + cmd.extend(['-i',iterations]) + cmd.extend(['-t',transformation]) + cmd.extend(['-r',reg]) + cmd.extend(['--number-of-affine-iterations',affine_iterations]) + cmd.extend(['-o',output_xfm]) + + inputs=[source_lr,target_lr] + if target_mask_lr is not None and use_mask: + inputs.append(target_mask_lr) + cmd.extend(['-x',target_mask_lr]) + + if use_histogram_matching: + cmd.append('--use-Histogram-Matching') + + outputs=[output_xfm ] # TODO: add inverse xfm ? + + #print(repr(cmd)) + + minc.command(cmd, inputs=inputs, outputs=outputs) + + +def non_linear_register_ants2( + source, target, output_xfm, + target_mask=None, + source_mask=None, + init_xfm =None, + parameters =None, + downsample =None, + start =None, + level =32.0, + verbose =0 + ): + """perform non-linear registration using ANTs, WARNING: will create inverted xfm will be named output_invert.xfm""" + if start is None: + start=level + + with minc_tools.mincTools(verbose=verbose) as minc: + + if parameters is None: + #TODO add more options here + parameters={'conf':{}, + 'blur':{}, + 'shrink':{} + } + else: + if not 'conf' in parameters: parameters['conf'] = {} + if not 'blur' in parameters: parameters['blur'] = {} + if not 'shrink' in parameters: parameters['shrink'] = {} + + prog='' + shrink='' + blur='' + for i in range(int(math.log(start)/math.log(2)),-1,-1): + res=2**i + if res>=level: + prog+= str(parameters['conf']. get(res,parameters['conf']. get(str(res),20))) + shrink+=str(parameters['shrink'].get(res,parameters['shrink'].get(str(res),2**i))) + blur+= str(parameters['blur']. get(res,parameters['blur']. get(str(res),2**i))) + if res>level: + prog+='x' + shrink+='x' + blur+='x' + + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm ]): + return + + prog+=','+parameters.get('convergence','1.e-6,10') + + output_base=output_xfm.rsplit('.xfm',1)[0] + + cost_function=parameters.get('cost_function','CC') + cost_function_par=parameters.get('cost_function_par','1,2,Regular,1.0') + + transformation=parameters.get('transformation','SyN[ .25, 2, 0.5 ]') + use_mask=parameters.get('use_mask',True) + use_histogram_matching=parameters.get('use_histogram_matching',False) + use_float=parameters.get('use_float',False) + + winsorize_intensity=parameters.get('winsorize-image-intensities',None) + + cmd=['antsRegistration','--minc','-a','--dimensionality','3'] + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + source_mask_lr=source_mask + target_mask_lr=target_mask + + if downsample is not None: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=minc.tmp(s_base+'_'+str(downsample)+'.mnc') + target_lr=minc.tmp(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + cmd.extend(['--metric','{}[{},{},{}]'.format(cost_function,source_lr,target_lr,cost_function_par)]) + cmd.extend(['--convergence','[{}]'.format(prog)]) + cmd.extend(['--shrink-factors',shrink]) + cmd.extend(['--smoothing-sigmas',blur]) + cmd.extend(['--transform',transformation]) + + cmd.extend(['--output',output_base]) + #cmd.extend(['--save-state',output_xfm]) + + if init_xfm is not None: + cmd.extend(['--initial-fixed-transform',init_xfm]) + + inputs=[source_lr,target_lr] + + if target_mask_lr is not None and source_mask_lr is not None and use_mask: + inputs.extend([source_mask_lr, target_mask_lr]) + cmd.extend(['-x','[{},{}]'.format(source_mask_lr, target_mask_lr)]) + + if use_histogram_matching: + cmd.append('--use-histogram-matching') + + if winsorize_intensity is not None: + if isinstance(winsorize_intensity, dict): + cmd.extend(['--winsorize-image-intensities',str(winsorize_intensity.get('low',1)),str(winsorize_intensity.get('high',99))]) + else: + cmd.append('--winsorize-image-intensities') + + if use_float: + cmd.append('--float') + + if verbose>0: + cmd.extend(['--verbose','1']) + + outputs=[output_xfm ] # TODO: add inverse xfm ? + + + print(">>>\n{}\n>>>>".format(' '.join(cmd))) + + minc.command(cmd, inputs=inputs, outputs=outputs) + +def linear_register_ants2( + source, target, output_xfm, + target_mask=None, + source_mask=None, + init_xfm =None, + parameters =None, + downsample =None, + close=False, + verbose=0 + ): + """perform linear registration using ANTs""" + #TODO:implement close + + + with minc_tools.mincTools(verbose=verbose) as minc: + + + if parameters is None: + #TODO add more options here + parameters={ + 'conf':{}, + 'blur':{}, + 'shrink':{} + } + else: + if not 'conf' in parameters: parameters['conf'] = {} + if not 'blur' in parameters: parameters['blur'] = {} + if not 'shrink' in parameters: parameters['shrink'] = {} + + levels=parameters.get('levels',3) + prog='' + shrink='' + blur='' + + for i in range(levels,0,-1): + _i=str(i) + prog+= str(parameters['conf']. get(i,parameters['conf']. get(_i,10000))) + shrink+=str(parameters['shrink'].get(i,parameters['shrink'].get(_i,2**i))) + blur+= str(parameters['blur']. get(i,parameters['blur']. get(_i,2**i))) + + if i>1: + prog+='x' + shrink+='x' + blur+='x' + # TODO: make it a parameter? + prog+=','+parameters.get('convergence','1.e-8,20') + + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm ]): + return + + output_base=output_xfm.rsplit('.xfm',1)[0] + + cost_function=parameters.get('cost_function','Mattes') + cost_function_par=parameters.get('cost_function_par','1,32,regular,0.3') + + transformation=parameters.get('transformation','affine[ 0.1 ]') + use_mask=parameters.get('use_mask',True) + use_histogram_matching=parameters.get('use_histogram_matching',False) + winsorize_intensity=parameters.get('winsorize-image-intensities',None) + use_float=parameters.get('use_float',False) + intialize_fixed=parameters.get('initialize_fixed',None) + intialize_moving=parameters.get('intialize_moving',None) + + cmd=['antsRegistration','--collapse-output-transforms', '0', '--minc','-a','--dimensionality','3'] + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + source_mask_lr=source_mask + target_mask_lr=target_mask + + if downsample is not None: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=minc.tmp(s_base+'_'+str(downsample)+'.mnc') + target_lr=minc.tmp(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + cmd.extend(['--metric','{}[{},{},{}]'.format(cost_function,source_lr,target_lr,cost_function_par)]) + cmd.extend(['--convergence','[{}]'.format(prog)]) + cmd.extend(['--shrink-factors',shrink]) + cmd.extend(['--smoothing-sigmas',blur]) + cmd.extend(['--transform',transformation]) + cmd.extend(['--output',output_base]) + #cmd.extend(['--save-state',output_xfm]) + + if init_xfm is not None: + cmd.extend(['--initial-fixed-transform',init_xfm]) + # this is a hack in attempt to make initial linear transform to work as expected + # currently, it looks like the center of the transform (i.e center of rotation) is messed up :( + # and it causes lots of problems + cmd.extend(['--initialize-transforms-per-stage','1']) + elif intialize_fixed is not None: + cmd.extend(['--initial-fixed-transform',"[{},{},{}]".format(source_lr,target_lr,str(intialize_fixed))]) + elif not close: + cmd.extend(['--initial-fixed-transform',"[{},{},{}]".format(source_lr,target_lr,'0')]) + + if intialize_moving is not None: + cmd.extend(['--initial-moving-transform',"[{},{},{}]".format(source_lr,target_lr,str(intialize_moving))]) + elif not close: + cmd.extend(['--initial-moving-transform',"[{},{},{}]".format(source_lr,target_lr,'0')]) + + inputs=[source_lr,target_lr] + + if target_mask_lr is not None and source_mask_lr is not None and use_mask: + inputs.extend([source_mask_lr, target_mask_lr]) + cmd.extend(['-x','[{},{}]'.format(source_mask_lr, target_mask_lr)]) + + if use_histogram_matching: + cmd.append('--use-histogram-matching') + + if winsorize_intensity is not None: + if isinstance(winsorize_intensity, dict): + cmd.extend(['--winsorize-image-intensities',winsorize_intensity.get('low',1),winsorize_intensity.get('high',99)]) + else: + cmd.append('--winsorize-image-intensities') + + if use_float: + cmd.append('--float') + + if verbose>0: + cmd.extend(['--verbose','1']) + + outputs=[output_xfm ] # TODO: add inverse xfm ? + minc.command(cmd, inputs=inputs, outputs=outputs,verbose=verbose) + + +def parse_options(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Run ANTs registration" ) + + parser.add_argument("source", + help="Source file") + + parser.add_argument("target", + help="Target file") + + parser.add_argument("--output", + help="Output transformation file, MINC xfm format", + default=None) + + parser.add_argument("--source_mask", + default= None, + help="Source mask") + + parser.add_argument("--target_mask", + default= None, + help="Target mask") + + parser.add_argument("--init", + default = None, + help="Initial transformation, minc format") + + parser.add_argument("--downsample", + default = None, + help="Downsample to given voxel size ", + type=float) + + parser.add_argument("--start", + default = 32, + help="Start level ", + type=float) + + parser.add_argument("--level", + default = 2, + help="Final level ", + type=float) + + parser.add_argument("--iter", + default = '20x20x20x20x20', + help="Non-linear iterations ") + + + parser.add_argument("--cost", + default="Mattes", + help="Cost Function", + choices=[ "Mattes", + "CC", + "MI", + "MeanSquares", + "Demons", + "GC"]) + + parser.add_argument("--par", + default="1,32,regular,0.3", + help="Cost Function parameters", + ) + + parser.add_argument("--nl", + dest="nl", + action="store_true", + help="Use nonlinear mode", + default=False) + + parser.add_argument("--lin", + dest="nl", + action="store_false", + help="Use linear mode", + default=False) + + parser.add_argument("--close", + dest="close", + action="store_true", + help="Start close", + default=False) + + parser.add_argument("--verbose", + default = 0, + help="Verbosity level ", + type=int) + + parser.add_argument("--transform", + default=None, + help="Transform options, default affine[0.1] for linear and SyN[.25,2,0.5] for nonlinear") + + options = parser.parse_args() + return options + + +if __name__ == "__main__": + options = parse_options() + + if options.source is None or options.target is None: + print("Error in arguments, run with --help") + print(repr(options)) + else: + + parameters= { 'conf': {}, + 'blur': {}, + 'shrink':{}, + 'convergence':'1.e-8,20', + 'cost_function':options.cost, + 'cost_function_par':options.par, + 'use_histogram_matching':False, + 'transformation':'affine[ 0.1 ]' + } + + if options.nl: + + conf=options.iter.split('x') + + for (i,j) in zip(range(int(math.log(options.start)/math.log(2)),-1,-1),conf): + res=2**i + if res>=options.level: + parameters['conf'][str(res)]=j + + if options.transform is not None: + parameters['transformation']=options.transform + else: + parameters['transformation']='SyN[.25,2,0.5]' + + non_linear_register_ants2( + options.source, options.target, + options.output, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init, + parameters = parameters, + downsample = options.downsample, + start = options.start, + level = options.level, + verbose = options.verbose + ) + else: + if options.transform is not None: + parameters['transformation']=options.transform + + linear_register_ants2( + options.source, options.target, + options.output, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init, + parameters = parameters, + downsample = options.downsample, + close = options.close, + verbose = options.verbose + ) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/create_pairwise_registrations.py b/ipl/create_pairwise_registrations.py new file mode 100755 index 0000000..7700a85 --- /dev/null +++ b/ipl/create_pairwise_registrations.py @@ -0,0 +1,336 @@ +#! /usr/bin/env python + + +import shutil +import os +import json + +from iplMincTools import mincTools,mincError +from scoop import futures, shared + +def generate_xfm_model(i , j, xfm1, xfm2, mri1, mri2, mask1, mask2, seg1, seg2, output_base,step=2,baa=False): + with mincTools(verbose=2) as minc: + # all xfms are mapping subject to common space, so to map one subject to another it will be xfm1 * xfm2^1 + minc.xfminvert(xfm2,minc.tmp('xfm1.xfm')) + # concatenate xfms + minc.xfmconcat([xfm1,minc.tmp('xfm1.xfm')],minc.tmp('xfm1_dot_xfm2_inv.xfm')) + # normalize xfms + minc.xfm_normalize(minc.tmp('xfm1_dot_xfm2_inv.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + +def generate_xfm_direct_minctracc(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,step=2,baa=False): + with mincTools(verbose=2) as minc: + # normalize xfms + minc.non_linear_register_full(mri1,mri2,output_base+'_map.xfm',level=step,source_mask=mask1,target_mask=mask2) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + + +def generate_xfm_direct_ANTS_CC(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,baa=False,step=2): + with mincTools(verbose=2) as minc: + # normalize xfms + param_cc={'cost_function':'CC','iter':'40x40x40x00'} + + minc.non_linear_register_ants(mri1,mri2,minc.tmp('transform.xfm'),target_mask=mask2,parameters=param_cc) + minc.xfm_normalize(minc.tmp('transform.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + + +def generate_xfm_direct_ANTS_MI(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,baa=False,step=2): + with mincTools(verbose=2) as minc: + # normalize xfms + param_mi={'cost_function':'MI','iter':'40x40x40x00','cost_function_par':'1,32'} + + minc.non_linear_register_ants(mri1,mri2,minc.tmp('transform.xfm'),target_mask=mask2,parameters=param_mi) + minc.xfm_normalize(minc.tmp('transform.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + +def generate_xfm_direct_elastix_cc(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,baa=False,step=2): + with mincTools(verbose=2) as minc: + + param_cc=""" +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(ShowExactMetricValue "false") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "AdaptiveStochasticGradientDescent") +(Transform "BSplineTransform") +(Metric "AdvancedNormalizedCorrelation") + +(FinalGridSpacingInPhysicalUnits 4) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions 3) + +(ImagePyramidSchedule 8 8 8 4 4 4 2 2 2) + +(MaximumNumberOfIterations 2000 2000 2000 ) +(MaximumNumberOfSamplingAttempts 3) + +(NumberOfSpatialSamples 1024 1024 4096 ) + +(NewSamplesEveryIteration "true") +(ImageSampler "Random" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 3) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +""" + # normalize xfms + minc.register_elastix(mri1,mri2,output_xfm=minc.tmp('transform.xfm'),source_mask=mask1,target_mask=mask2,parameters=param_cc) + minc.xfm_normalize(minc.tmp('transform.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + + +def generate_xfm_direct_elastix_mi(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,baa=False,step=2): + with mincTools(verbose=2) as minc: + + param_mi=""" +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(ShowExactMetricValue "false") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "AdaptiveStochasticGradientDescent") +(Transform "BSplineTransform") +(Metric "AdvancedMattesMutualInformation") + +(FinalGridSpacingInPhysicalUnits 4) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions 3) + +(ImagePyramidSchedule 8 8 8 4 4 4 2 2 2) + +(MaximumNumberOfIterations 2000 2000 2000 ) +(MaximumNumberOfSamplingAttempts 3) + +(NumberOfSpatialSamples 1024 1024 4096 ) + +(NewSamplesEveryIteration "true") +(ImageSampler "Random" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 3) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +""" + # normalize xfms + minc.register_elastix(mri1,mri2,output_xfm=minc.tmp('transform.xfm'),source_mask=mask1,target_mask=mask2,parameters=param_mi) + minc.xfm_normalize(minc.tmp('transform.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + + +if __name__ == '__main__': + model='model_nl' + output='pairwise' + input_prefix='minc_prep_bbox/' + step_size=2 + + model_results={} + + with open(model+os.sep+'results.json','r') as f: + model_results=json.load(f) + + if not os.path.exists(output): + os.makedirs(output) + # generate fake seg and mri names + #TODO replace with CSV file input + mri= [input_prefix+k['name'] for k in model_results['scan']] + mask=[input_prefix+k['name'].rstrip('.mnc')+'_mask.mnc' for k in model_results['scan']] + seg= [input_prefix+k['name'].rstrip('.mnc')+'_glm.mnc' for k in model_results['scan']] + + print(repr(mri)) + print(repr(mask)) + print(repr(seg)) + rr=[] + + # generate uniform file names! + for (i,j) in enumerate(model_results['xfm']): + for (k,t) in enumerate(model_results['xfm']): + if i!=k: + if not os.path.exists(output+os.sep+'A_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_model,i,k, + j['xfm'],t['xfm'], + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'A_{:02d}_{:02d}'.format(i,k) ) ) + + if not os.path.exists(output+os.sep+'B_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_minctracc,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'B_{:02d}_{:02d}'.format(i,k), + step=2) ) + + if not os.path.exists(output+os.sep+'C_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_ANTS_CC,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'C_{:02d}_{:02d}'.format(i,k) ) ) + + if not os.path.exists( output+os.sep+'D_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_ANTS_MI,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'D_{:02d}_{:02d}'.format(i,k) ) ) + + if not os.path.exists( output+os.sep+'E_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_elastix_cc,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'E_{:02d}_{:02d}'.format(i,k) ) ) + + if not os.path.exists( output+os.sep+'F_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_elastix_mi,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'F_{:02d}_{:02d}'.format(i,k) ) ) + + futures.wait(rr, return_when=futures.ALL_COMPLETED) +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/dd_registration.py b/ipl/dd_registration.py new file mode 100644 index 0000000..d8d2126 --- /dev/null +++ b/ipl/dd_registration.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 29/06/2015 +# +# registration tools + + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math + +# local stuff +import minc_tools + + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + +def non_linear_register_ldd( + source, target, + output_velocity, + output_xfm=None, + source_mask=None, + target_mask=None, + init_xfm= None, + init_velocity=None, + level=2, + start=32, + parameters=None, + work_dir=None, + downsample=None + ): + """Use log-diffeomorphic demons to run registration""" + + with minc_tools.mincTools() as minc: + if not minc.checkfiles(inputs=[source,target], + outputs=[output_velocity]): + return + if parameters is None: + parameters={'conf':{}, + 'smooth_update':2, + 'smooth_field':2, + 'update_rule':1, + 'grad_type':0, + 'max_step':2.0, + 'hist_match':True, + 'LCC': False } + + LCC=parameters.get('LCC',False) + + source_lr=source + target_lr=target + source_mask_lr=source_mask + target_mask_lr=target_mask + + if downsample is not None: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + source_lr=minc.tmp(s_base+'_'+str(downsample)+'.mnc') + target_lr=minc.tmp(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + prog='' + + for i in range(int(math.log(start)/math.log(2)),-1,-1): + res=2**i + if res>=level: + prog+=str(parameters['conf'].get(res,20)) + else: + prog+='0' + if i>0: + prog+='x' + + inputs=[source,target] + cmd=None + + if LCC: + cmd=['rpiLCClogDemons', + '-f',source_lr,'-m', target_lr, + '--output-transform', output_velocity, + '-S',str(parameters.get('tradeoff',0.15)), + '-u',str(parameters.get('smooth_update',2)), + '-d',str(parameters.get('smooth_field',2)), + '-C',str(parameters.get('smooth_similarity',3)), + '-b',str(parameters.get('bending_weight',1)), + '-x',str(parameters.get('harmonic_weight',0)), + '-r',str(parameters.get('update_rule',2)), + '-g',str(parameters.get('grad_type',0)), + '-l',str(parameters.get('max_step',2.0)), + '-a',prog ] + + if parameters.get('hist_match',True): + cmd.append('--use-histogram-matching') + + # generate programm + if source_mask_lr is not None: + cmd.extend(['--mask-image', source_mask_lr]) + inputs.append(source_mask_lr) + + if init_velocity is not None: + cmd.extend(['--initial-transform',init_velocity]) + inputs.append(init_velocity) + else: + cmd=['LogDomainDemonsRegistration', + '-f',source_lr,'-m', target_lr, + '--outputVel-field', output_velocity, + '-g',str(parameters.get('smooth_update',2)), + '-s',str(parameters.get('smooth_field',2)), + '-a',str(parameters.get('update_rule',1)), + '-t',str(parameters.get('grad_type',0)), + '-l',str(parameters.get('max_step',2.0)), + '-i',prog ] + + if parameters.get('hist_match',True): + cmd.append('--use-histogram-matching') + + # generate programm + if source_mask_lr is not None: + cmd.extend(['--fixed-mask', source_mask_lr]) + inputs.append(source_mask_lr) + + if target_mask_lr is not None: + cmd.extend(['--moving-mask', target_mask_lr]) + inputs.append(target_mask_lr) + + if init_velocity is not None: + cmd.extend(['--input-field',init_velocity]) + inputs.append(init_velocity) + + if init_xfm is not None: + cmd.extend(['--input-transform',init_xfm]) + inputs.append(init_xfm) + + if output_xfm is not None: + cmd.extend(['--outputDef-field',output_xfm]) + outputs.append(output_xfm) + + outputs=[output_velocity] + + minc.command(cmd, inputs=inputs, outputs=outputs) + # todo add dependency for masks + +def non_linear_register_dd( + source, + target, + output_xfm, + source_mask=None, + target_mask=None, + init_xfm=None, + level=4, + start=32, + parameters=None, + work_dir=None, + downsample=None + ): + """perform incremental non-linear registration with diffeomorphic demons""" + + with minc_tools.mincTools() as minc: + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm]): + return + + if parameters is None: + parameters={'conf':{}, + 'smooth_update':2, + 'smooth_field':2, + 'update_rule':0, + 'grad_type':0, + 'max_step':2.0, + 'hist_match':True } + + + source_lr=source + target_lr=target + source_mask_lr=source_mask + target_mask_lr=target_mask + + if downsample is not None: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + source_lr=minc.tmp(s_base+'_'+str(downsample)+'.mnc') + target_lr=minc.tmp(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + prog='' + + for i in range(int(math.log(start)/math.log(2)),-1,-1): + res=2**i + if res>=level: + prog+=str(parameters['conf'].get(res,20)) + else: + prog+='0' + if i>0: + prog+='x' + + inputs=[source_lr,target_lr] + cmd=['DemonsRegistration', + '-f',source_lr,'-m', target_lr, + '--outputDef-field', output_xfm, + '-g',str(parameters.get('smooth_update',2)), + '-s',str(parameters.get('smooth_field',2)), + '-a',str(parameters.get('update_rule',0)), + '-t',str(parameters.get('grad_type',0)), + '-l',str(parameters.get('max_step',2.0)), + '-i',prog ] + + if parameters.get('hist_match',True): + cmd.append('--use-histogram-matching') + # generate programm + + if source_mask_lr is not None: + cmd.extend(['--fixed-mask', source_mask_lr]) + inputs.append(source_mask_lr) + + if target_mask_lr is not None: + cmd.extend(['--moving-mask', target_mask_lr]) + inputs.append(target_mask_lr) + + if init_xfm is not None: + cmd.extend(['--input-transform',init_xfm]) + inputs.append(init_xfm) + + outputs=[output_xfm] + + minc.command(cmd, inputs=inputs, outputs=outputs) + # todo add dependency for masks + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/elastix_registration.py b/ipl/elastix_registration.py new file mode 100755 index 0000000..cf1547f --- /dev/null +++ b/ipl/elastix_registration.py @@ -0,0 +1,729 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 29/06/2015 +# +# registration tools + + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math + +# command-line interface +import argparse + +# local stuff +import minc_tools + + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + +def parse_tags(tag): + tags=[] + #p = re.compile(r'\S') + volumes=1 + with open(tag,'r') as f: + started=False + for line in f: + line=line.rstrip('\r\n') + + if not started: + m = re.match(".*Volumes = (\S)",line) + + if re.match(".*Points =",line): + started=True + continue + elif m is not None : + volumes=int(m.group(1)) + else: + if re.match('.*;',line) is not None: # this is the last line + line=line.replace(';','') + # last line? + c=line.split(' ') + if len(c[0])==0: + c.pop(0) + + #print(','.join(c)) + #shift @c unless $c[0]; #protection against empty first parameter + #push(@tags, [$c[0], $c[1], $c[2], $c[3], $c[4], $c[5]] ); + tags.append([float(i) for i in c]) + + return (volumes,tags) + +def tag2elx(tags,out1,out2): + (vols,tags)=parse_tags(tags) + + with open(out1,'w') as f: + f.write("point\n{}\n".format(len(tags))) + for i in tags: + f.write("{} {} {}\n".format(i[0],i[1],i[2])) + + if vols>1: + with open(out2,'w') as f: + f.write("point\n{}\n".format(len(tags))) + for i in tags: + f.write("{} {} {}\n".format(i[3],i[4],i[5])) + + return vols + + +def nl_xfm_to_elastix(xfm, elastix_par): + """Convert MINC style xfm into elastix style registration parameters + Assuming that xfm file is strictly non-linear, with a single non-linear deformation field + """ + # TODO: make a proper parsing of XFM file + with minc_tools.mincTools() as minc: + grid=xfm.rsplit('.xfm',1)[0]+'_grid_0.mnc' + if not os.path.exists(grid): + print("nl_xfm_to_elastix error!") + raise minc_tools.mincError("Unfortunately currently only a very primitive way of dealing with Minc XFM files is implemented\n{}".format(traceback.format_exc())) + + with open(elastix_par,'w') as f: + f.write("(Transform \"DeformationFieldTransform\")\n") + f.write("(DeformationFieldInterpolationOrder 0)\n") + f.write("(DeformationFieldFileName \"{}\")\n".format(grid)) + return elastix_par + + +def lin_xfm_to_elastix(xfm,elastix_par): + """Convert MINC style xfm into elastix style registration parameters + Assuming that xfm fiel is strictly linear + """ + with minc_tools.mincTools() as minc: + minc.command(['itk_convert_xfm',xfm,minc.tmp('input.txt')], + inputs=xfm,outputs=[minc.tmp('input.txt')]) + # parsing text transformation + param=None + fix_param=None + + with open(minc.tmp('input.txt'),'r') as f: + for ln in f: + if re.match('^Parameters: ', ln): + param=ln.split(' ') + if re.match('^FixedParameters: ', ln): + fix_param=ln.split(' ') + param.pop(0) + fix_param.pop(0) + with open(minc.tmp('elastix_par'),'w') as f: + f.write('''(Transform "AffineTransform") +(NumberOfParameters 12) +(TransformParameters {}) +(InitialTransformParametersFileName "NoInitialTransform") +(HowToCombineTransforms "Compose") + +// EulerTransform specific +(CenterOfRotationPoint {}) +'''.format(' '.join(param),' '.join(fix_param))) + + +def nl_elastix_to_xfm(elastix_par, xfm, downsample_grid=None, nl=True ): + """Convert elastix transformation file into minc XFM file""" + with minc_tools.mincTools() as minc: + threads=os.environ.get('ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS',1) + cmd=['transformix', '-tp', elastix_par, '-out', minc.tempdir,'-xfm', xfm, '-q', '-threads', str(threads)] + + if nl: + cmd.extend(['-def', 'all']) + if downsample_grid is not None: + cmd.extend(['-sub',str(downsample_grid)]) + + minc.command(cmd, inputs=[elastix_par], outputs=[xfm]); + return xfm + + +def register_elastix( + source, target, + output_par = None, + output_xfm = None, + source_mask= None, + target_mask= None, + init_xfm = None, + init_par = None, + parameters = None, + work_dir = None, + downsample = None, + downsample_grid=None, + nl = True, + output_log = None, + tags = None, + verbose = 0): + """Run elastix with given parameters + Arguments: + source -- source image (fixed image in Elastix notation) + target -- target, or reference image (moving image in Elastix notation) + + Keyword arguments: + output_par -- output transformation in elastix format + output_xfm -- output transformation in MINC XFM format + source_mask -- source mask + target_mask -- target mask + init_xfm -- initial transform in XFM format + init_par -- initial transform in Elastix format + parameters -- parameters for transformation + if it is a string starting with @ it's a text file name that contains + parameters in elastix format + if it any other string - it should be treated as transformation parameters in elastix format + if it is a dictionary: + for non-linear mode (nl==True): + "optimizer" , "AdaptiveStochasticGradientDescent" (default for nonlinear) + "CMAEvolutionStrategy" (default for linear) + "ConjugateGradient" + "ConjugateGradientFRPR" + "FiniteDifferenceGradientDescent" + "QuasiNewtonLBFGS" + "RegularStepGradientDescent" + "RSGDEachParameterApart" + + "transform", "BSplineTransform" (default for nonlinear mode) + "SimilarityTransform" (default for linear) + "AffineTransform" + "AffineDTITransform" + "EulerTransform" + "MultiBSplineTransformWithNormal" + "TranslationTransform" + + "metric" , "AdvancedNormalizedCorrelation" (default) + "AdvancedMattesMutualInformation" + "NormalizedMutualInformation" + "AdvancedKappaStatistic" + "KNNGraphAlphaMutualInformation" + + "resolutions", 3 - number of resolution steps + "pyramid","8 8 8 4 4 4 2 2 2" - downsampling schedule + "iterations",4000 - number of iterations + "samples",4096 - number of samples + "sampler", "Random" (default) + "Full" + "RandomCoordinate" + "Grid" TODO: add SampleGridSpacing + "RandomSparseMask" + + "grid_spacing",10 - grid spacing in mm + "max_step","1.0" - maximum step (mm) + + for linear mode (nl==False): + "optimizer","CMAEvolutionStrategy" - optimizer + "transform","SimilarityTransform" - transform + "metric","AdvancedNormalizedCorrelation" - cost function + "resolutions", 3 - number of resolutions + "pyramid","8 8 8 4 4 4 2 2 2" - resampling schedule + "iterations",4000 - number of iterations + "samples",4096 - number of samples + "sampler","Random" - sampler + "max_step","1.0" - max step + "automatic_transform_init",True - perform automatic transform initialization + "automatic_transform_init_method", - type of automatic transform initalization method, + "CenterOfGravity" (default) + "GeometricalCenter" - center of the image based + work_dir -- Work directory + downsample -- Downsample input images + downsample_grid -- Downsample output nl-deformation + nl -- flag to show that non-linear version is running + output_log -- output log + """ + with minc_tools.mincTools(verbose=2) as minc: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + source_mask_lr=source_mask + target_mask_lr=target_mask + use_mask=True + + if (init_par is not None) and (init_xfm is not None): + print("register_elastix: init_xfm={} init_par={}".format(repr(init_xfm),repr(init_par))) + raise minc_tools.mincError("Specify either init_xfm or init_par") + + outputs=[] + if output_par is not None: outputs.append(output_par) + if output_xfm is not None: outputs.append(output_xfm) + + if len(outputs)>0 and (not minc.checkfiles( inputs=[source,target], + outputs=outputs )): + return + + threads=os.environ.get('ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS',1) + + if parameters is None: + parameters={} + #print("Running elastix with parameters:{}".format(repr(parameters))) + # figure out what to do here: + with minc_tools.cache_files(work_dir=work_dir,context='elastix') as tmp: + + if init_xfm is not None: + if nl: + init_par=nl_xfm_to_elastix(init_xfm, tmp.cache('init.txt')) + else: + init_par=lin_xfm_to_elastix(init_xfm, tmp.cache('init.txt')) + + # a fitting we shall go... + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if source_mask is not None: + source_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(source_mask,source_mask_lr,unistep=downsample,datatype='byte') + + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + par_file=tmp.cache('parameters.txt') + measure_mode=False + # paramters could be stored in a file + if isinstance(parameters, dict): + use_mask=parameters.get('use_mask',True) + measure_mode=parameters.get('measure',False) + def_iterations=4000 + + if measure_mode: + def_iterations=1 + parameters['iterations']=1 + + with open(par_file,'w') as p: + if nl: + p.write(''' +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) +(ShowExactMetricValue {exact_metric}) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "{optimizer}") +(Transform "{transform}") +(Metric "{metric}") +(MaximumStepLength {max_step}) + +(FinalGridSpacingInPhysicalUnits {grid_spacing}) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions {resolutions}) + +(ImagePyramidSchedule {pyramid} ) + +(MaximumNumberOfIterations {iterations} ) +(MaximumNumberOfSamplingAttempts 10) + +(NumberOfSpatialSamples {samples} ) + +(NewSamplesEveryIteration "{new_samples}") +(ImageSampler "{sampler}" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 1) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +'''.format( optimizer= parameters.get('optimizer','AdaptiveStochasticGradientDescent'), + transform= parameters.get('transform','BSplineTransform'), + metric= parameters.get('metric','AdvancedNormalizedCorrelation'), + resolutions=parameters.get('resolutions',3), + pyramid= parameters.get('pyramid','8 8 8 4 4 4 2 2 2'), + iterations=parameters.get('iterations',def_iterations), + samples= parameters.get('samples',4096), + sampler= parameters.get('sampler',"Random"), + grid_spacing=parameters.get('grid_spacing',10), + max_step =parameters.get('max_step',"1.0"), + exact_metric=str(parameters.get("exact_metric",False)).lower(), + new_samples=str(parameters.get("new_samples",True)).lower(), + )) + else: + p.write(''' +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(AutomaticTransformInitialization "{automatic_transform_init}") +(AutomaticTransformInitializationMethod "{automatic_transform_init_method}") +(AutomaticScalesEstimation "true") +(AutomaticParameterEstimation "true") +(MaximumStepLength {max_step}) + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) +(ShowExactMetricValue {exact_metric}) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "{optimizer}") +(Transform "{transform}") +(Metric "{metric}") + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions {resolutions}) + +(ImagePyramidSchedule {pyramid} ) + +(MaximumNumberOfIterations {iterations} ) +(RequiredRatioOfValidSamples 0.01) +(MaximumNumberOfSamplingAttempts 10) + +(NumberOfSpatialSamples {samples} ) + +(NewSamplesEveryIteration "{new_samples}") +(ImageSampler "{sampler}" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 1) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") + '''.format( + optimizer=parameters.get('optimizer','CMAEvolutionStrategy'), + transform=parameters.get('transform','SimilarityTransform'), + metric=parameters.get('metric','AdvancedNormalizedCorrelation'), + resolutions=parameters.get('resolutions', 3 ), + pyramid=parameters.get('pyramid','8 8 8 4 4 4 2 2 2'), + iterations=parameters.get('iterations',def_iterations), + samples=parameters.get('samples',4096), + sampler=parameters.get('sampler',"Random"), + max_step=parameters.get('max_step',"1.0"), + automatic_transform_init=str(parameters.get("automatic_transform_init",True)).lower(), # to convert True to true + automatic_transform_init_method=parameters.get("automatic_transform_init_method","CenterOfGravity"), + exact_metric=str(parameters.get("exact_metric",False)).lower(), + new_samples=str(parameters.get("new_samples",True)).lower(), + )) + # + if 'grid_spacing' in parameters: p.write("(SampleGridSpacing {})\n".format(parameters['grid_spacing'])) + #if 'exact_metric' in parameters: p.write("(ShowExactMetricValue {})\n".format(parameters['exact_metric'])) + if 'exact_metric_spacing' in parameters: p.write("(ExactMetricSampleGridSpacing {})\n".format(parameters['exact_metric_spacing'])) + else: + if parameters[0]=="@": + par_file=parameters.split("@",1)[1] + #print("Using:{}".format(par_file)) + else: + with open(par_file,'w') as p: + p.write(parameters) + + cmd=['elastix', + '-f', source_lr , '-m', target_lr, + '-out', tmp.tempdir+os.sep , '-p', par_file, + '-threads', str(threads)] # , '-q' + + if measure_mode: + cmd.append('-M') + + if verbose<1: + cmd.append('-q') + + inputs=[source_lr , target_lr] + + if init_par is not None: + cmd.extend(['-t0',init_par]) + inputs.append(init_par) + + if source_mask is not None and use_mask: + cmd.extend( ['-fMask',source_mask_lr] ) + inputs.append(source_mask_lr) + + if target_mask is not None and use_mask: + cmd.extend( ['-mMask',target_mask_lr] ) + inputs.append(target_mask_lr) + + if tags is not None: + vols=tag2elx(tags,tmp.cache(s_base+'_tags.txt'),tmp.cache(t_base+'_tags.txt')) + inputs.append(tmp.cache(s_base+'_tags.txt') ) + cmd.extend(['-fp',tmp.cache(s_base+'_tags.txt')] ) + shutil.copyfile(tmp.cache(s_base+'_tags.txt'),"source.tag") + + if vols>1: + inputs.append(tmp.cache(t_base+'_tags.txt') ) + cmd.extend(['-mp',tmp.cache(t_base+'_tags.txt')] ) + shutil.copyfile(tmp.cache(t_base+'_tags.txt'),"target.tag") + + outputs=[ tmp.tempdir+os.sep+'TransformParameters.0.txt' ] + + outcome=None + + if measure_mode: + # going to read the output of iterations + out_=minc.execute_w_output(cmd).split("\n") + for l,j in enumerate(out_): + if re.match("^1\:ItNr\s2\:Metric\s.*",j): + outcome=float(out_[l+1].split("\t")[1]) + #print(out_[l]) + #print(out_[l+1]) + break + else: + # + print("Elastix output:\n{}".format("\n".join(out_))) + raise minc_tools.mincError("Elastix didn't report measure") + else: + minc.command(cmd, inputs=inputs, outputs=outputs, verbose=verbose) + + if output_par is not None: + shutil.copyfile( tmp.tempdir+os.sep+'TransformParameters.0.txt' , output_par ) + + if output_xfm is not None: + nl_elastix_to_xfm( tmp.tempdir+os.sep+'TransformParameters.0.txt', + output_xfm, + downsample_grid=downsample_grid, + nl=nl) + + if output_log is not None: + shutil.copyfile(tmp.tempdir+os.sep+'elastix.log',output_log) + + return outcome + + +def parse_options(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Run elastix registration") + + parser.add_argument("--verbose", + action="store_true", + default=False, + help="Be verbose", + dest="verbose") + + parser.add_argument("source", + help="Source file") + + parser.add_argument("target", + help="Target file") + + parser.add_argument("--output_par", + help="Output transformation file, elastix format", + default=None) + + parser.add_argument("--output_xfm", + help="Output transformation file, MINC xfm format", + default=None) + + parser.add_argument("--source_mask", + default= None, + help="Source mask") + parser.add_argument("--target_mask", + default= None, + help="Target mask") + parser.add_argument("--init_xfm", + default = None, + help="Initial transformation, minc format") + parser.add_argument("--init_par", + default = None, + help="Initial transformation elastix format") + + parser.add_argument("--optimizer", + default="AdaptiveStochasticGradientDescent", + help="Elastix optimizer", + choices=["AdaptiveStochasticGradientDescent", + "CMAEvolutionStrategy" , + "ConjugateGradient", + "ConjugateGradientFRPR", + "FiniteDifferenceGradientDescent", + "QuasiNewtonLBFGS", + "RegularStepGradientDescent", + "RSGDEachParameterApart"] + ) + + parser.add_argument("--transform", + default="BSplineTransform", + help="Elastix transform", + choices=[ "BSplineTransform", + "SimilarityTransform", + "AffineTransform", + "AffineDTITransform", + "EulerTransform", + "MultiBSplineTransformWithNormal", + "TranslationTransform"] + ) + + parser.add_argument("--metric", + default="AdvancedNormalizedCorrelation", + help="Elastix metric", + choices=[ "AdvancedNormalizedCorrelation", + "AdvancedMattesMutualInformation", + "NormalizedMutualInformation", + "AdvancedKappaStatistic", + "KNNGraphAlphaMutualInformation", + "AdvancedMeanSquares"]) + + parser.add_argument("--resolutions", + default=3, + type=int, + help="Number of resolutions") + + parser.add_argument("--pyramid", + default="8 8 8 4 4 4 2 2 2", + help="Downsampling program") + + parser.add_argument("--iterations", + default=4000, + help="Number of iterations per level") + + parser.add_argument("--samples", + default=4096, + help="Number of samples") + + parser.add_argument("--sampler", + default="Random", + help="Elastix sampler") + + parser.add_argument("--grid_spacing", + default=10, + type=float, + help="Final node-distance for B-Splines") + + parser.add_argument("--max_step", + default="1.0", + help="Elastix maximum optimizer step") + + parser.add_argument("--work_dir", + default = None, + help="Work directory") + + parser.add_argument("--downsample", + default = None, + help="Downsample to given voxel size ", + type=float) + + parser.add_argument("--downsample_grid", + default=None, + help="Downsample output grid by factor", + type=int) + + parser.add_argument("--tags", + default=None, + help="tags") + + parser.add_argument("--nl", + dest="nl", + action="store_true", + help="Use nonlinear mode", + default=False) + + parser.add_argument("--lin", + dest="nl", + action="store_false", + help="Use linear mode", + default=False) + + parser.add_argument("--output_log", + default = None, + help="Output log file") + + parser.add_argument("-M","--measure", + default = False, + action = "store_true", + help = "Measure mode", + dest="measure") + + parser.add_argument("--close", + dest="close", + action="store_true", + help="Do not initialize transform", + default=False) + + options = parser.parse_args() + return options + + +if __name__ == "__main__": + options = parse_options() + + if options.source is None or options.target is None: + print("Error in arguments, run with --help") + print(repr(options)) + else: + if not options.nl and options.transform=="BSplineTransform": + options.transform="SimilarityTransform" + + parameters= { + "optimizer": options.optimizer, + "transform": options.transform, + "metric": options.metric, + "resolutions": options.resolutions, + "pyramid": options.pyramid, + "iterations": options.iterations, + "samples": options.samples, + "sampler": options.sampler, + "grid_spacing":options.grid_spacing, + "max_step": options.max_step, + "measure": options.measure, + "automatic_transform_init": not options.close + } + + out=register_elastix( + options.source, options.target, + output_par = options.output_par, + output_xfm = options.output_xfm, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init_xfm, + init_par = options.init_par, + parameters = parameters, + work_dir = options.work_dir, + downsample = options.downsample, + downsample_grid=options.downsample_grid, + nl = options.nl, + output_log = options.output_log, + tags = options.tags, + verbose = 2) + if options.measure: + print(out) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/__init__.py b/ipl/grading/__init__.py new file mode 100644 index 0000000..edb624a --- /dev/null +++ b/ipl/grading/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# +# image grading functions + +# internal funcions +from .structures import MriDataset +from .structures import MriTransform +from .labels import split_labels_seg +from .labels import merge_labels_seg +from .resample import resample_file +from .resample import resample_split_segmentations +from .resample import warp_rename_seg +from .resample import warp_sample +from .resample import concat_resample +from .registration import linear_registration +from .registration import non_linear_registration +from .model import create_local_model +from .model import create_local_model_flip +from .filter import apply_filter +from .filter import make_border_mask +from .filter import generate_flip_sample +from .library import save_library_info +from .library import load_library_info +from .train import generate_library +from .fuse import fusion_grading +from .cross_validation import cv_fusion_grading +from .cross_validation import run_grading_experiment +from .analysis import calc_similarity_stats + +__all__= ['generate_library', + 'load_library_info', + 'cv_fusion_grading', + 'fusion_grading' ] + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/analysis.py b/ipl/grading/analysis.py new file mode 100644 index 0000000..22686fa --- /dev/null +++ b/ipl/grading/analysis.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def calc_similarity_stats(input_ground_truth, + input_segmentation, + output_stats=None, + relabel=None ): + ''' + Calculate similarity stats + ''' + stats={} + + stats[ 'sample' ] = input_segmentation + stats[ 'ground_truth' ] = input_ground_truth + + with mincTools() as m: + sim = m.execute_w_output( + ['volume_gtc_similarity', input_ground_truth, input_segmentation,'--csv'] + ).rstrip("\n").split(',') + + stats['gkappa'] = float(sim[0]) + stats['gtc'] = float(sim[1]) + stats['akappa'] = float(sim[2]) + + sim = m.execute_w_output( + [ 'volume_similarity', input_ground_truth, input_segmentation,'--csv'] + ).split("\n") + + ka={} + se={} + sp={} + js={} + + for i in sim: + q=i.split(',') + if len(q)==5: + l=int(q[0]) + + if relabel is not None: + l=relabel[l] + + ka[l] = float( q[1] ) + se[l] = float( q[2] ) + sp[l] = float( q[3] ) + js[l] = float( q[4] ) + + stats['ka']=ka + stats['se']=se + stats['sp']=sp + stats['js']=js + + if output_stats is not None: + with open(output_stats,'w') as f: + f.write("{},{},{},{}\n".format(stats['sample'],stats['gkappa'],stats['gtc'],stats['akappa'])) + + return stats + +def create_grading_map( + output_grading, + output_map, + lin_xfm=None, + nl_xfm=None, + template=None ): + try: + with mincTools( verbose=2 ) as m: + xfm=None + + if lin_xfm is not None and nl_xfm is not None: + xfm=m.tmp('concat.xfm') + m.xfmconcat([lin_xfm,nl_xfm],xfm) + elif lin_xfm is not None: + xfm=lin_xfm + else: + xfm=nl_xfm + + m.resample_smooth(output_grading,output_map, + transform=xfm, + like=template, + order=2, + datatype='short') + + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def create_error_map(input_ground_truth, + input_segmentation, + output_maps, + lin_xfm=None, + nl_xfm=None, + template=None, + label_list=[] ): + try: + with mincTools( verbose=2 ) as m: + # go over labels and calculate errors per label + # + for (i,l) in enumerate(label_list): + # extract label error + out=m.tmp(str(l)+'.mnc') + xfm=None + + m.calc([input_segmentation, input_ground_truth], + "abs(A[0]-{})<0.5&&abs(A[1]-{})>0.5 || abs(A[0]-{})>0.5&&abs(A[1]-{})<0.5 ? 1:0".format(l,l,l,l), + out, datatype='-byte') + + if lin_xfm is not None and nl_xfm is not None: + xfm=m.tmp(str(l)+'.xfm') + m.xfmconcat([lin_xfm,nl_xfm],xfm) + elif lin_xfm is not None: + xfm=lin_xfm + else: + xfm=nl_xfm + + m.resample_smooth(out,output_maps[i], + transform=xfm, + like=template, + order=1, + datatype='byte') + + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def average_error_maps(maps, out_avg): + try: + with mincTools( verbose=2 ) as m: + print("average_error_maps {} {}".format(repr(maps),repr(out_avg))) + m.average(maps, out_avg, datatype='-short') + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/cross_validation.py b/ipl/grading/cross_validation.py new file mode 100644 index 0000000..aad8387 --- /dev/null +++ b/ipl/grading/cross_validation.py @@ -0,0 +1,387 @@ +import shutil +import os +import sys +import csv +import copy +import json +import random + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .fuse import * +from .structures import * +from .resample import * +from .filter import * +from .analysis import * + +def run_grading_experiment( input_scan, + input_seg, + grading_library, + output_experiment, + grading_parameters={}, + debug=False, + mask=None, + work_dir=None, + fuse_variant='fuse', + add=[], + cleanup=False, + group=None, + grading=None + ): + """run a grading experiment: perform grading and compare with ground truth + + Arguments: + input_scan -- input scan object MriDataset + input_seg -- input segmentation file name (ground truth) + grading_library -- segmntation library object + output_experiment -- prefix for output + + Keyword arguments: + grading_parameters -- paramteres for segmentation algorithm, + debug -- debug flag, (default False) + mask -- mask file name to restrict segmentation , (default None) + work_dir -- work directory, (default None - use output_experiment) + fuse_variant -- name of fusion parameters, (default 'fuse' ) + add -- additional modalities [T2w,PDw etc] + cleanup -- flag to clean most of the temporary files + + """ + try: + relabel=grading_library.get("label_map",None) + + if relabel is not None and isinstance(relabel, list) : + _r={i[0]:i[1] for i in relabel} + relabel=_r + + if debug: + if not os.path.exists(os.path.dirname(output_experiment)): + os.makedirs(os.path.dirname(output_experiment)) + with open(output_experiment+'_par.json','w') as f: + json.dump(grading_parameters,f,indent=1) + + (output_seg, output_grad, output_volumes, output_info) = fusion_grading( + input_scan, + grading_library, + output_experiment, + input_mask=mask, + parameters=grading_parameters, + debug=debug, + work_dir=work_dir, + fuse_variant=fuse_variant, + add=add, + cleanup=cleanup) + + stats = calc_similarity_stats( input_seg, + output_seg, + output_stats = output_experiment+'_stats.csv', + relabel = relabel) + + stats['group']=group + stats['grading']=grading + stats['result']=output_volumes + + name=os.path.basename(input_scan).rsplit('.mnc',1)[0] + grading_map=work_dir+os.sep+fuse_variant+'_'+name+'_grading_nl.mnc' + + lin_xfm=None + nl_xfm=None + + if output_info['bbox_initial_xfm'] is not None: + lin_xfm=output_info['bbox_initial_xfm'].xfm + + if output_info['nonlinear_xfm'] is not None: + nl_xfm=output_info['nonlinear_xfm'].xfm + + create_grading_map(output_grad, grading_map, + lin_xfm=lin_xfm, + nl_xfm=nl_xfm, + template=grading_library.get('local_model',None)) + + output_info['stats']=stats + output_info['output']=output_seg + output_info['ground_truth']=input_seg + output_info['grading_map']=grading_map + output_info['group']=group + output_info['grading']=grading + output_info['volumes']=output_volumes + + with open(output_experiment+'_out.json','w') as f: + json.dump(output_info,f,indent=1, cls=GMRIEncoder) + + with open(output_experiment+'_stats.json','w') as f: + json.dump(stats,f,indent=1, cls=GMRIEncoder) + + return (stats, output_info) + + except mincError as e: + print("Exception in run_grading_experiment:{}".format( str(e)) ) + traceback.print_exc( file=sys.stderr ) + raise + + except : + print("Exception in run_grading_experiment:{}".format( sys.exc_info()[0]) ) + traceback.print_exc( file=sys.stderr ) + raise + + +def loo_cv_fusion_grading(validation_library, + grading_library, + output, + grading_parameters, + debug=False, + fuse_variant='fuse', + cv_variant='cv', + cleanup=False, + cv_iter=None): + '''Run leave-one-out cross-validation experiment''' + # for each N subjects run segmentation and compare + # Right now run LOOCV + if not os.path.exists(output): + try: + os.makedirs(output) + except: + pass # assume directory was created by competing process + + results=[] + results_json=[] + + modalities=grading_library.get('modalities',1)-1 + + print("cv_iter={}".format(repr(cv_iter))) + + for (i,j) in enumerate(validation_library): + n = os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + output_experiment = output+os.sep+n+'_'+cv_variant + + validation_sample = j[0] + validation_segment = j[1] + + validation_group = int( j[-2] ) + validation_grading = float(j[-1] ) + + add=j[2:2+modalities] + + experiment_grading_library=copy.deepcopy(grading_library) + + # remove sample + experiment_grading_library['library']=[ _i for _i in grading_library['library'] if _i[2].find(n)<0 ] + + if (cv_iter is None) or (i == cv_iter): + results.append( futures.submit( + run_grading_experiment, + validation_sample, validation_segment, + experiment_grading_library, + output_experiment, + grading_parameters=grading_parameters, + debug=debug, + work_dir=output+os.sep+'work_'+n+'_'+fuse_variant, + fuse_variant=fuse_variant, + add=add, + cleanup=cleanup, + group=validation_group, + grading=validation_grading + )) + else: + results_json.append( (output_experiment+'_stats.json', + output_experiment+'_out.json') ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + stat_results=[] + output_results=[] + + if cv_iter is None: + stat_results = [ _i.result()[0] for _i in results ] + output_results= [ _i.result()[1] for _i in results ] + + elif cv_iter==-1: + # TODO: load from json files + for _i in results_json: + with open(_i[0],'r') as _f: + stat_results.append(json.load(_f)) + with open(_i[1],'r') as _f: + output_results.append(json.load(_f)) + + return (stat_results, output_results) + +def full_cv_fusion_grading(validation_library, + grading_library, + output, + grading_parameters, + cv_iterations, + cv_exclude, + debug=False, + fuse_variant='fuse', + cv_variant='cv', + cleanup=False, + cv_iter=None): + if cv_iter is not None: + raise "Not Implemented!" + + validation_library_idx=range(len(validation_library)) + # randomly exlcude samples, repeat + results=[] + if not os.path.exists(output): + try: + os.makedirs(output) + except: + pass # assume directory was created by competing process + + modalities=grading_library.get('modalities',1)-1 + + for i in range( cv_iterations ): + #TODO: save this list in a file + rem_list=[] + ran_file=output+os.sep+ ('random_{}_{}.json'.format(cv_variant,i)) + + if not os.path.exists( ran_file ): + rem_list=random.sample( validation_library_idx, cv_exclude ) + + with open( ran_file , 'w') as f: + json.dump(rem_list,f) + else: + with open( ran_file ,'r') as f: + rem_list=json.load(f) + + # list of subjects + rem_items=[ validation_library[j] for j in rem_list ] + + rem_n=[os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] for j in rem_items] + rem_lib=[] + + for j in rem_n: + rem_lib.extend( [ k for (k,t) in enumerate( grading_library['library'] ) if t[2].find(j)>=0 ] ) + + if debug: print(repr(rem_lib)) + rem_lib=set(rem_lib) + #prepare exclusion list + experiment_grading_library=copy.deepcopy(grading_library) + + experiment_grading_library['library']=\ + [ k for j,k in enumerate( grading_library['library'] ) if j not in rem_lib ] + + for j,k in enumerate(rem_items): + output_experiment=output+os.sep+('{}_{}_{}'.format(i,rem_n[j],cv_variant)) + work_dir=output+os.sep+('work_{}_{}_{}'.format(i,rem_n[j],fuse_variant)) + + results.append( futures.submit( + run_grading_experiment, k[0], k[1], + experiment_grading_library, + output_experiment, + grading_parameters=grading_parameters, + debug=debug, + work_dir=work_dir, + fuse_variant=fuse_variant, + add=k[4:4+modalities], + cleanup=cleanup, + group=int(k[-2]), + grading=float(k[-1]) + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + stat_results = [ i.result()[0] for i in results ] + output_results = [ i.result()[1] for i in results ] + + return ( stat_results, output_results ) + + +def cv_fusion_grading( cv_parameters, + grading_library, + output, + grading_parameters, + debug=False, + cleanup=False, + cv_iter=None): + '''Run cross-validation experiment + for each N subjects run segmentation and compare + Right now run LOOCV or random CV + ''' + + # TODO: implement more realistic, random schemes + validation_library=cv_parameters['validation_library'] + + # maximum number of iterations + cv_iterations=cv_parameters.get('iterations',-1) + + # number of samples to exclude + cv_exclude=cv_parameters.get('cv',1) + + # use to distinguish different versions of label fusion + fuse_variant=cv_parameters.get('fuse_variant','fuse') + + # use to distinguish different versions of cross-validation + cv_variant=cv_parameters.get('cv_variant','cv') + + cv_output=output+os.sep+cv_variant+'_stats.json' + res_output=output+os.sep+cv_variant+'_res.json' + + if validation_library is not list: + with open(validation_library,'r') as f: + validation_library=list(csv.reader(f)) + + print("Validation library:",validation_library) + stat_results=None + output_results=None + + if cv_iter is not None: + cv_iter=int(cv_iter) + + if cv_iterations==-1 and cv_exclude==1: # simle LOO cross-validation + (stat_results, output_results) = loo_cv_fusion_grading(validation_library, + grading_library, + output, grading_parameters, + debug=debug, + cleanup=cleanup, + fuse_variant=fuse_variant, + cv_variant=cv_variant, + cv_iter=cv_iter) + else: # arbitrary number of iterations + (stat_results, output_results) = full_cv_fusion_grading(validation_library, + grading_library, + output, grading_parameters, + cv_iterations, cv_exclude, + debug=debug, + cleanup=cleanup, + fuse_variant=fuse_variant, + cv_variant=cv_variant, + cv_iter=cv_iter) + + if cv_iter is None or cv_iter==-1: + # build glim-image tables (?) + results=[] + + #for GLIM image + with open(output+os.sep+cv_variant+'_grading.glim','w') as f: + for k in output_results: + group=k['group'] + grading=k['grading'] + grading_map=k['grading_map'] + f.write("{} {} {}\n".format(grading_map,1.0,grading)) + + #for RMINC image + with open(output+os.sep+cv_variant+'_grading.csv','w') as f: + f.write("grading_map,group,grading\n") + for k in output_results: + group=k['group'] + grading=k['grading'] + grading_map=k['grading_map'] + f.write("{},{},{}\n".format(grading_map,group,grading)) + + #TODO: run glim-image or RMINC here + + with open(cv_output,'w') as f: + json.dump(stat_results, f, indent=1 ) + + with open(res_output,'w') as f: + json.dump(output_results, f, indent=1, cls=GMRIEncoder) + + return stat_results + else: + # we assume that results will be available later + return None + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/filter.py b/ipl/grading/filter.py new file mode 100644 index 0000000..34d59c1 --- /dev/null +++ b/ipl/grading/filter.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + + +def filter_sample(input,output,filters,model=None): + apply_filter(input.scan,output.scan,filters,model=model.scan,input_mask=input.mask,model_mask=model.mask) + # TODO: parallelalize? + for (i,j) in enumerate( input.add ): + apply_filter(input.add[i],output.add[i],filters,model=model.add[i],input_mask=i.mask,model_mask=model.mask) + + +def apply_filter(input, output, filters, model=None, input_mask=None, model_mask=None,input_labels=None,model_labels=None): + output_scan=input + try: + if filters is not None : + with mincTools() as m: + if filters.get('denoise',False): + # TODO: choose between ANLM and NLM here? + m.anlm(output_scan,m.tmp('denoised.mnc'), + beta =filters.get('beta',0.5), + patch =filters.get('patch',1), + search =filters.get('search',1), + regularize=filters.get('regularize',None)) + + output_scan =m.tmp('denoised.mnc') + + if filters.get('normalize',False) and model is not None: + if filters.get('nuyl',False): + m.nuyl_normalize(output_scan,model,m.tmp('normalized.mnc'),source_mask=input_mask,target_mask=model_mask) + else: + m.volume_pol(output_scan,model, m.tmp('normalized.mnc'),source_mask=input_mask,target_mask=model_mask) + + output_scan = m.tmp('normalized.mnc') + # TODO: implement more filters + patch_norm = filters.get('patch_norm',None) + + if patch_norm is not None: + print("Running patch normalization") + db = patch_norm.get('db',None) + idx = patch_norm.get('idx',None) + thr = patch_norm.get('threshold',None) + spl = patch_norm.get('spline',None) + med = patch_norm.get('median',None) + it = patch_norm.get('iterations',None) + if db is not None and idx and not None: + # have all the pieces + m.patch_norm(output_scan, m.tmp('patch_norm.mnc'), + index=idx, db=db, threshold=thr, spline=spl, + median=med, field = m.tmp('patch_norm_field.mnc'), + iterations=it) + output_scan = m.tmp('patch_norm.mnc') + + label_norm = filters.get('label_norm',None) + + if label_norm is not None and input_labels is not None and model_labels is not None: + print("Running label norm:{}".format(repr(label_norm))) + norm_order=label_norm.get('order',3) + norm_median=label_norm.get('median',True) + hl.label_normalize(output_scan,input_labels,model,model_labels,out=m.tmp('label_norm.mnc'),order=norm_order,median=norm_median) + output_scan = m.tmp('label_norm.mnc') + + shutil.copyfile(output_scan,output) + else: + shutil.copyfile(input,output) + except mincError as e: + print("Exception in apply_filter:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in apply_filter:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def make_border_mask( input, output, width=1,labels=1): + '''Extract a border along the edge''' + try: + if not os.path.exists(output): + with mincTools() as m: + if labels==1: + m.binary_morphology(input,"D[{}]".format((width+1)//2),m.tmp('d.mnc')) + m.binary_morphology(input,"E[{}]".format(width//2),m.tmp('e.mnc')) + m.calc([m.tmp('d.mnc'),m.tmp('e.mnc')],'A[0]>0.5&&A[1]<0.5?1:0',output) + else: # have to split up labels and then create a mask of all borders + split_labels(input,labels, m.tmp('split')) + borders=[] + for i in range(1,labels): + l='{}_{:02d}.mnc' .format(m.tmp('split'),i) + d='{}_{:02d}_d.mnc'.format(m.tmp('split'),i) + e='{}_{:02d}_e.mnc'.format(m.tmp('split'),i) + b='{}_{:02d}_b.mnc'.format(m.tmp('split'),i) + m.binary_morphology(l,"D[{}]".format((width+1)//2),d) + m.binary_morphology(l,"E[{}]".format(width//2),e) + m.calc([d,e],'A[0]>0.5&&A[1]<0.5?1:0',b) + borders.append(b) + m.math(borders,'max',m.tmp('max'),datatype='-float') + m.reshape(m.tmp('max'),output,datatype='byte', + image_range=[0,1],valid_range=[0,1]) + + except mincError as e: + print("Exception in make_border_mask:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in make_border_mask:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def split_labels(input, n_labels,output_prefix, + antialias=False, blur=None, + expit=None, normalize=False ): + try: + with mincTools() as m: + inputs=[ input ] + outputs=['{}_{:02d}.mnc'.format(output_prefix,i) for i in range(n_labels) ] + + cmd=['itk_split_labels',input,'{}_%02d.mnc'.format(output_prefix), + '--missing',str(n_labels)] + if antialias: + cmd.append('--antialias') + if normalize: + cmd.append('--normalize') + if blur is not None: + cmd.extend(['--blur',str(blur)]) + if expit is not None: + cmd.extend(['--expit',str(expit)]) + m.command(cmd, inputs=inputs, outputs=outputs) + #return outputs + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def generate_flip_sample(input, labels_datatype='byte'): + '''generate flipped version of sample''' + try: + with mincTools() as m: + m.flip_volume_x(input.scan,input.scan_f) + + for (i,j) in enumerate(input.add): + m.flip_volume_x(input.add[i],input.add_f[i]) + + if input.mask is not None: + m.flip_volume_x(input.mask, input.mask_f, labels=True) + + #for i in input.add: + # m.flip_volume_x(i, input.seg_f, labels=True,datatype=labels_datatype) + except mincError as e: + print("Exception in generate_flip_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in generate_flip_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def create_unflip_remap(remap,remap_flip): + if remap is not None and remap_flip is not None: + # convert both into dict + _remap={ int( i[0] ):int(i[1]) for i in remap } + _remap_flip={ int(i[0]):int(i[1]) for i in remap_flip } + _rr={} + for i,j in _remap.items(): + if i in _remap_flip: + _rr[j]=_remap_flip[i] + return _rr + else: + return None + +def log_transform_sample(input, output, threshold=1.0): + try: + with mincTools() as m: + m.calc([input.scan],'A[0]>{}?log(A[0]):0.0'.format(threshold), + output.scan) + except mincError as e: + print("Exception in log_transform_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in log_transform_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def create_patch_norm_db( input_samples, + patch_norm_db, + patch_norm_idx, + pct=0.1, + patch=2, + sub=1): + try: + with mincTools() as m: + patch_lib=os.path.dirname(input_samples[0].scan)+os.sep+'patch_lib.lst' + inputs=[] + outputs=[patch_norm_db] + + with open(patch_lib,'w') as f: + for i in input_samples: + f.write( os.path.basename( i.scan ) ) + f.write("\n") + inputs.append(i.scan) + + cmd=['create_feature_database', + patch_lib, patch_norm_db, + '--patch', + '--patch-radius', str(patch), + '--subsample', str(sub), + '--random', str(pct), + '--log', + '--threshold', str(1.0), + ] + + m.command(cmd, inputs=inputs, outputs=outputs) + + cmd=['refine_feature_database', + patch_norm_db, patch_norm_idx + ] + m.command(cmd, inputs=[patch_norm_db], outputs=[patch_norm_idx]) + + except mincError as e: + print("Exception in create_patch_norm_db:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in create_patch_norm_db:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/fuse.py b/ipl/grading/fuse.py new file mode 100644 index 0000000..0c58a4a --- /dev/null +++ b/ipl/grading/fuse.py @@ -0,0 +1,830 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .preselect import * +from .qc import * +from .fuse_grading import * + +import traceback + +def seg_to_volumes(seg, output_json, label_map=None,grad=None,median=False): + with mincTools( verbose=2 ) as m: + _out=m.label_stats(seg,label_defs=label_map,volume=grad,median=median) + # convert to a dictionary + # label_id, volume, mx, my, mz,[mean/median] + out={i[0]: { 'volume':i[1], 'x':i[2], 'y':i[3], 'z': i[4], 'grad':i[5] } for i in _out } + + with open(output_json,'w') as f: + json.dump(out,f,indent=1) + return out + +def fusion_grading( input_scan, + library_description, + output_segment, + input_mask=None, + parameters={}, + exclude=[], + work_dir=None, + debug=False, + ec_variant=None, + fuse_variant=None, + regularize_variant=None, + add=[], + cleanup=False, + cleanup_xfm=False, + exclude_re=None): + """Apply fusion segmentation""" + + if debug: + print( "Segmentation parameters:") + print( repr(parameters) ) + + out_variant='' + if fuse_variant is not None: + out_variant+=fuse_variant + + if regularize_variant is not None: + out_variant+='_'+regularize_variant + + if ec_variant is not None: + out_variant+='_'+ec_variant + + if work_dir is None: + work_dir=output_segment+os.sep+'work_segment' + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + work_lib_dir= work_dir+os.sep+'library' + work_lib_dir_f=work_dir+os.sep+'library_f' + + if not os.path.exists(work_lib_dir): + os.makedirs(work_lib_dir) + + if not os.path.exists(work_lib_dir_f): + os.makedirs(work_lib_dir_f) + + library_nl_samples_avail=library_description['nl_samples_avail'] + library_modalities=library_description.get('modalities',1)-1 + + # perform symmetric segmentation + segment_symmetric= parameters.get('segment_symmetric', False ) + + # read filter paramters + pre_filters= parameters.get('pre_filters', None ) + post_filters= parameters.get('post_filters', parameters.get( 'filters', None )) + + + # perform local linear registration + do_initial_register = parameters.get( 'initial_register', + parameters.get( 'linear_register', {})) + + if do_initial_register is not None and isinstance(do_initial_register,dict): + initial_register = do_initial_register + do_initial_register = True + else: + initial_register={} + + inital_reg_type = parameters.get( 'initial_register_type', + parameters.get( 'linear_register_type', + initial_register.get('type','-lsq12'))) + + inital_reg_ants = parameters.get( 'initial_register_ants', + parameters.get( 'linear_register_ants', False)) + + inital_reg_options = parameters.get( 'initial_register_options', + initial_register.get('options',None) ) + + inital_reg_downsample = parameters.get( 'initial_register_downsample', + initial_register.get('downsample',None)) + + inital_reg_use_mask = parameters.get( 'initial_register_use_mask', + initial_register.get('use_mask',False)) + + initial_reg_objective = initial_register.get('objective','-xcorr') + + # perform local linear registration + do_initial_local_register = parameters.get( 'initial_local_register', + parameters.get( 'local_linear_register', {}) ) + if do_initial_local_register is not None and isinstance(do_initial_local_register,dict): + initial_local_register=do_initial_local_register + do_initial_local_register=True + else: + initial_local_register={} + + local_reg_type = parameters.get( 'local_register_type', + initial_local_register.get('type','-lsq12')) + + local_reg_ants = parameters.get( 'local_register_ants', False) + + local_reg_opts = parameters.get( 'local_register_options', + initial_local_register.get('options',None)) + + local_reg_bbox = parameters.get( 'local_register_bbox', + initial_local_register.get('bbox',False )) + + local_reg_downsample = parameters.get( 'local_register_downsample', + initial_local_register.get('downsample',None)) + + local_reg_use_mask = parameters.get( 'local_register_use_mask', + initial_local_register.get('use_mask',True)) + + local_reg_objective = initial_local_register.get('objective','-xcorr') + # if non-linear registraiton should be performed for library creation + do_nonlinear_register=parameters.get('non_linear_register', False ) + + # if non-linear registraiton should be performed with ANTS + do_nonlinear_register_ants=parameters.get('non_linear_register_ants',False ) + nonlinear_register_type = parameters.get( 'non_linear_register_type',None) + if nonlinear_register_type is None: + if do_nonlinear_register_ants: + nonlinear_register_type='ants' + + # if non-linear registraiton should be performed pairwise + do_pairwise =parameters.get('non_linear_pairwise', False ) + + # if pairwise registration should be performed using ANTS + do_pairwise_ants =parameters.get('non_linear_pairwise_ants', True ) + pairwise_register_type = parameters.get( 'non_linear_pairwise_type',None) + if pairwise_register_type is None: + if do_pairwise_ants: + pairwise_register_type='ants' + + # should we use ANTs + library_preselect= parameters.get('library_preselect', 10) + library_preselect_step= parameters.get('library_preselect_step', None) + library_preselect_method= parameters.get('library_preselect_method', 'MI') + + + nlreg_level = parameters.get('non_linear_register_level', 2) + nlreg_start = parameters.get('non_linear_register_start', 16) + nlreg_options = parameters.get('non_linear_register_options', None) + nlreg_downsample = parameters.get('non_linear_register_downsample', None) + + + pairwise_level = parameters.get('pairwise_level', 2) + pairwise_start = parameters.get('pairwise_start', 16) + pairwise_options = parameters.get('pairwise_options', None) + + fuse_options = parameters.get('fuse_options', None) + + resample_order = parameters.get('resample_order', 2) + label_resample_order= parameters.get( 'label_resample_order',resample_order) + + resample_baa = parameters.get('resample_baa', True) + + use_median = parameters.get('use_median', False) + # QC image paramters + qc_options = parameters.get('qc_options', None) + + # special case for training error correction, assume input scan is already pre-processed + run_in_bbox = parameters.get('run_in_bbox', False) + + classes_number = library_description['classes_number'] + groups = library_description['groups'] + seg_datatype = 'byte' + + output_info = {} + + sample= MriDataset(scan=input_scan, seg=None, + mask=input_mask, protect=True, + add=add) + # get parameters + model = MriDataset(scan=library_description['model'], + mask=library_description['model_mask'], + add= library_description.get('model_add',[]) ) + + local_model = MriDataset(scan=library_description['local_model'], + mask=library_description['local_model_mask'], + scan_f=library_description.get('local_model_flip',None), + mask_f=library_description.get('local_model_mask_flip',None), + seg= library_description.get('local_model_seg',None), + seg_f= library_description.get('local_model_seg_flip',None), + add= library_description.get('local_model_add',[]), + add_f= library_description.get('local_model_add_flip',[]), + ) + + library = library_description['library'] + + sample_modalities=len(add) + + print("\n\n") + print("Sample modalities:{}".format(sample_modalities)) + print("\n\n") + # apply the same steps as used in library creation to perform segmentation: + + # global + initial_xfm=None + nonlinear_xfm=None + bbox_sample=None + nl_sample=None + bbox_linear_xfm=None + + sample_filtered=MriDataset(prefix=work_dir, name='flt_'+sample.name, add_n=sample_modalities ) + + # QC file + # TODO: allow for alternative location, extension + sample_qc=work_dir+os.sep+'qc_'+sample.name+'_'+out_variant+'.jpg' + + if run_in_bbox: + segment_symmetric=False + do_initial_register=False + do_initial_local_register=False + # assume filter already applied! + pre_filters=None + post_filters=None + + if segment_symmetric: + # need to flip the inputs + flipdir=work_dir+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + sample.scan_f=flipdir+os.sep+os.path.basename(sample.scan) + sample.add_f=['' for (i,j) in enumerate(sample.add)] + + for (i,j) in enumerate(sample.add): + sample.add_f[i]=flipdir+os.sep+os.path.basename(sample.add[i]) + + if sample.mask is not None: + sample.mask_f=flipdir+os.sep+'mask_'+os.path.basename(sample.scan) + generate_flip_sample( sample ) + + if pre_filters is not None: + apply_filter( sample.scan, + sample_filtered.scan, + pre_filters, + model=model.scan, + model_mask=model.mask) + + if sample.mask is None: + sample_filtered.mask=None + # hack + sample_filtered.add=sample.add + sample=sample_filtered + else: + sample_filtered=None + + output_info['sample_filtered']=sample_filtered + + if do_initial_register: + initial_xfm=MriTransform(prefix=work_dir, name='init_'+sample.name ) + + if inital_reg_type=='elx' or inital_reg_type=='elastix' : + elastix_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + parameters=inital_reg_options, + nl=False, + downsample=inital_reg_downsample + ) + elif inital_reg_type=='ants' or inital_reg_ants: + linear_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + ants=True, + downsample=inital_reg_downsample + ) + else: + linear_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + downsample=inital_reg_downsample, + objective=initial_reg_objective + ) + + output_info['initial_xfm']=initial_xfm + + + # local + bbox_sample = MriDataset(prefix=work_dir, name='bbox_init_'+sample.name, + add_n=sample_modalities ) + + + if do_initial_local_register: + bbox_linear_xfm=MriTransform(prefix=work_dir, name='bbox_init_'+sample.name ) + + if local_reg_type=='elx' or local_reg_type=='elastix' : + elastix_registration( sample, + local_model, + bbox_linear_xfm, + symmetric=segment_symmetric, + init_xfm=initial_xfm, + resample_order=resample_order, + parameters=local_reg_opts, + bbox=local_reg_bbox, + downsample=local_reg_downsample + ) + elif local_reg_type=='ants' or local_reg_ants: + linear_registration( sample, + local_model, + bbox_linear_xfm, + init_xfm=initial_xfm, + symmetric=segment_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + resample_order=resample_order, + ants=True, + close=True, + bbox=local_reg_bbox, + downsample=local_reg_downsample + ) + else: + linear_registration( sample, + local_model, + bbox_linear_xfm, + init_xfm=initial_xfm, + symmetric=segment_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + resample_order=resample_order, + close=True, + bbox=local_reg_bbox, + downsample=local_reg_downsample, + objective=local_reg_objective + ) + + else: + bbox_linear_xfm=initial_xfm + + output_info['bbox_initial_xfm']=bbox_linear_xfm + bbox_sample.mask=None + bbox_sample.seg=None + bbox_sample.seg_f=None + + warp_sample(sample, local_model, bbox_sample, + transform=bbox_linear_xfm, + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric,# need to flip symmetric dataset + resample_order=resample_order, + filters=post_filters, + ) + + output_info['bbox_sample']=bbox_sample + + # TODO: run local intensity normalization + + # 3. run non-linear registration if needed + if do_nonlinear_register: + nl_sample=MriDataset(prefix=work_dir, name='nl_'+sample.name, add_n=sample_modalities ) + nonlinear_xfm=MriTransform(prefix=work_dir, name='nl_'+sample.name ) + + + if nonlinear_register_type=='elx' or nonlinear_register_type=='elastix' : + elastix_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + nl=True, + downsample=nlreg_downsample ) + elif nonlinear_register_type=='ants' or do_nonlinear_register_ants: + non_linear_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + ants=True, + downsample=nlreg_downsample ) + else: + non_linear_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + ants=False, + downsample=nlreg_downsample ) + + print("\n\n\nWarping the sample!:{}\n\n\n".format(bbox_sample)) + nl_sample.seg=None + nl_sample.seg_f=None + nl_sample.mask=None + + warp_sample(bbox_sample, local_model, nl_sample, + transform=nonlinear_xfm, + symmetric=segment_symmetric, + resample_order=resample_order) + + output_info['nl_sample']=nl_sample + else: + nl_sample=bbox_sample + + output_info['nonlinear_xfm']=nonlinear_xfm + + if exclude_re is not None: + _exclude_re=re.compile(exclude_re) + selected_library=[i for i in library if not _exclude_re.match(i[2]) and i[2] not in exclude] + else: + selected_library=[i for i in library if i[2] not in exclude] + + selected_library_f=[] + + if segment_symmetric: # fill up with all entries + selected_library_f=selected_library + + # library pre-selection if needed + # we need balanced number of samples for each group + if library_preselect>0 and library_preselect < len(selected_library): + loaded=False + loaded_f=False + + if os.path.exists(work_lib_dir+os.sep+'sel_library.json'): + with open(work_lib_dir+os.sep+'sel_library.json','r') as f: + selected_library=json.load(f) + loaded=True + + if segment_symmetric and os.path.exists(work_lib_dir_f+os.sep+'sel_library.json'): + with open(work_lib_dir_f+os.sep+'sel_library.json','r') as f: + selected_library_f=json.load(f) + loaded_f=True + + if do_nonlinear_register: + if not loaded: + selected_library=preselect(nl_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=library_nl_samples_avail, + step=library_preselect_step, + lib_add_n=library_modalities, + groups=groups) + if segment_symmetric: + if not loaded_f: + selected_library_f=preselect(nl_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=library_nl_samples_avail, + flip=True, + step=library_preselect_step, + lib_add_n=library_modalities, + groups=groups) + else: + if not loaded: + selected_library=preselect(bbox_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=False, + step=library_preselect_step, + lib_add_n=library_modalities, + groups=groups) + if segment_symmetric: + if not loaded_f: + selected_library_f=preselect(bbox_sample, selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=False,flip=True, + step=library_preselect_step, + lib_add_n=library_modalities, + groups=groups) + + if not loaded: + with open(work_lib_dir+os.sep+'sel_library.json','w') as f: + json.dump(selected_library,f) + + if not loaded_f: + if segment_symmetric: + with open(work_lib_dir_f+os.sep+'sel_library.json','w') as f: + json.dump(selected_library_f,f) + + output_info['selected_library']=selected_library + if segment_symmetric: + output_info['selected_library_f']=selected_library_f + + selected_library_scan=[] + selected_library_xfm=[] + selected_library_warped2=[] + selected_library_xfm2=[] + + selected_library_scan_f=[] + selected_library_xfm_f=[] + selected_library_warped_f=[] + selected_library_warped2_f=[] + selected_library_xfm2_f=[] + + for (i,j) in enumerate(selected_library): + d=MriDataset(scan=j[2],seg=j[3], add=j[4:4+library_modalities],group=int(j[0]), grading=float(j[1]) ) + + selected_library_scan.append(d) + + selected_library_warped2.append( MriDataset(name=d.name, prefix=work_lib_dir, add_n=sample_modalities,group=int(j[0]), grading=float(j[1]) )) + selected_library_xfm2.append( MriTransform(name=d.name,prefix=work_lib_dir )) + + if library_nl_samples_avail: + selected_library_xfm.append( MriTransform(xfm=j[4+library_modalities], xfm_inv=j[5+library_modalities] ) ) + + output_info['selected_library_warped2']=selected_library_warped2 + output_info['selected_library_xfm2']=selected_library_xfm2 + if library_nl_samples_avail: + output_info['selected_library_xfm']=selected_library_xfm + + if segment_symmetric: + for (i,j) in enumerate(selected_library_f): + d=MriDataset(scan=j[2],seg=j[3], add=j[4:4+library_modalities], group=int(j[0]), grading=float(j[1]) ) + selected_library_scan_f.append(d) + selected_library_warped2_f.append(MriDataset(name=d.name, prefix=work_lib_dir_f, add_n=sample_modalities )) + selected_library_xfm2_f.append(MriTransform( name=d.name, prefix=work_lib_dir_f )) + + if library_nl_samples_avail: + selected_library_xfm_f.append( MriTransform(xfm=j[4+library_modalities], xfm_inv=j[5+library_modalities] )) + + output_info['selected_library_warped2_f']=selected_library_warped2_f + output_info['selected_library_xfm2_f']=selected_library_xfm2_f + if library_nl_samples_avail: + output_info['selected_library_xfm_f']=selected_library_xfm_f + + # nonlinear registration to template or individual + + if do_pairwise: # Right now ignore precomputed transformations + results=[] + if debug: + print("Performing pairwise registration") + + for (i,j) in enumerate(selected_library): + # TODO: make clever usage of precomputed transform if available + if pairwise_register_type=='elx' or pairwise_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + nl=True, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + elif pairwise_register_type=='ants' or do_pairwise_ants: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=True, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + else: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=False, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + + if segment_symmetric: + for (i,j) in enumerate(selected_library_f): + # TODO: make clever usage of precomputed transform if available + if pairwise_register_type=='elx' or pairwise_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + nl=True, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + elif pairwise_register_type=='ants' or do_pairwise_ants: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=True, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + else: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=False, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + else: + + results=[] + + for (i, j) in enumerate(selected_library): + + lib_xfm=None + if library_nl_samples_avail: + lib_xfm=selected_library_xfm[i] + + results.append( futures.submit( + concat_resample, + selected_library_scan[i], + lib_xfm , + nonlinear_xfm, + selected_library_warped2[i], + resample_order=resample_order, + label_resample_order=label_resample_order, + resample_baa=resample_baa + ) ) + + if segment_symmetric: + for (i, j) in enumerate(selected_library_f): + lib_xfm=None + if library_nl_samples_avail: + lib_xfm=selected_library_xfm_f[i] + + results.append( futures.submit( + concat_resample, + selected_library_scan_f[i], + lib_xfm, + nonlinear_xfm, + selected_library_warped2_f[i], + resample_order=resample_order, + label_resample_order=label_resample_order, + resample_baa=resample_baa, + flip=True + ) ) + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + + results=[] + + sample_seg=MriDataset(name='bbox_seg_' + sample.name+out_variant, prefix=work_dir ) + sample_grad=MriDataset(name='bbox_grad_' + sample.name+out_variant, prefix=work_dir ) + + results.append( futures.submit( + fuse_grading, + bbox_sample, + sample_seg, + selected_library_warped2, + flip=False, + classes_number=classes_number, + fuse_options=fuse_options, + model=local_model, + debug=debug, + fuse_variant=fuse_variant, + groups=groups + )) + + if segment_symmetric: + results.append( futures.submit( + fuse_grading, + bbox_sample, + sample_seg, + selected_library_warped2_f, + flip=True, + classes_number=classes_number, + fuse_options=fuse_options, + model=local_model, + debug=debug, + fuse_variant=fuse_variant, + groups=groups + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + output_info['fuse']=results[0].result() + if segment_symmetric: + output_info['fuse_f']=results[1].result() + + if qc_options: + # generate QC images + output_info['qc'] = generate_qc_image(sample_seg, + bbox_sample, + sample_qc, + options=qc_options, + model=local_model, + symmetric=segment_symmetric, + labels=library_description['classes_number']) + # cleanup if need + if cleanup: + shutil.rmtree(work_lib_dir) + shutil.rmtree(work_lib_dir_f) + if nl_sample is not None: + nl_sample.cleanup() + + if cleanup_xfm: + if nonlinear_xfm is not None: + nonlinear_xfm.cleanup() + + if not run_in_bbox: + # TODO: apply error correction here + # rename labels to final results + sample_seg_native=MriDataset(name='seg_' + sample.name+out_variant, prefix=work_dir ) + + warp_rename_seg( sample_seg, sample, sample_seg_native, + transform=bbox_linear_xfm, invert_transform=True, + lut=library_description['map'] , + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric, + use_flipped=segment_symmetric, # needed to flip .seg_f back to right orientation + flip_lut=library_description['flip_map'], + resample_baa=resample_baa, + resample_order=label_resample_order, + datatype=seg_datatype ) + + warp_sample(sample_seg, sample, sample_seg_native, + transform=bbox_linear_xfm, invert_transform=True, + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric,# need to flip symmetric dataset + resample_order=resample_order) + + output_info['sample_seg_native']=sample_seg_native + + if segment_symmetric: + # TODO: join left and right if needed + #raise "Not implemented yet!" + # join sample_seg_native.seg and sample_seg_native.seg_f into a single file + join_left_right(sample_seg_native, output_segment+'_seg.mnc',output_segment+'_grad.mnc', datatype=seg_datatype) + else: + shutil.copyfile(sample_seg_native.seg, output_segment+'_seg.mnc') + shutil.copyfile(sample_seg_native.scan, output_segment+'_grad.mnc') + + output_info['output_segment']=output_segment+'_seg.mnc' + output_info['output_grading']=output_segment+'_grad.mnc' + + volumes=seg_to_volumes( output_segment+'_seg.mnc', + output_segment+'_vol.json', + label_map=library_description.get('label_map',None), + grad=output_segment+'_grad.mnc', + median=use_median) + + output_info['output_volumes']=volumes + output_info['output_volumes_json']=output_segment+'_vol.json' + + # TODO: cleanup more here (?) + + return (output_segment+'_seg.mnc', output_segment+'_grad.mnc', volumes, output_info) + else: # special case, needed to train error correction TODO: remove? + volumes=seg_to_volumes(sample_seg.seg, + output_segment+'_vol.json', + grad=sample_seg.scan, + median=use_median) + return (sample_seg.seg, sample_seg.scan, volumes, output_info) + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/fuse_grading.py b/ipl/grading/fuse_grading.py new file mode 100644 index 0000000..62bb10e --- /dev/null +++ b/ipl/grading/fuse_grading.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .preselect import * +from .qc import * + +import traceback + +def fuse_grading( sample, output, library, + fuse_options={}, + flip=False, + classes_number=2, + model=None, + debug=False, + fuse_variant='', + work_dir=None, + groups=None): + try: + final_out_seg=output.seg + final_out_grad=output.scan + + scan=sample.scan + add_scan=sample.add + output_info={} + + if flip: + scan=sample.scan_f + add_scan=sample.add_f + final_out_seg=output.seg_f + final_out_grad=output.scan_f + + if not os.path.exists( final_out_grad ): + with mincTools( verbose=2 ) as m: + patch=0 + search=0 + threshold=0 + iterations=0 + gco_optimize=False + nnls=False + gco_diagonal=False + label_norm=None + select_top=None + if fuse_options is not None: + + patch= fuse_options.get('patch', 0) + search= fuse_options.get('search', 0) + threshold= fuse_options.get('threshold', 0.0) + iterations= fuse_options.get('iter', 3) + weights= fuse_options.get('weights', None) + nnls = fuse_options.get('nnls', False) + label_norm = fuse_options.get('label_norm', None) + select_top = fuse_options.get('top', None) + beta = fuse_options.get('beta', None) + + if work_dir is None: + work_dir=os.path.dirname(output.seg) + + dataset_name=sample.name + + if flip: + dataset_name+='_f' + + output_info['work_dir']=work_dir + output_info['dataset_name']=work_dir + + + ##out_seg_fuse = work_dir+os.sep+dataset_name+'_'+fuse_variant+'.mnc' + out_dist = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_dist.mnc' + out_grading = final_out_grad + + output_info['out_seg']=final_out_seg + output_info['out_grading']=out_grading + output_info['out_dist']=out_dist + + if label_norm is not None: + print("Using label_norm:{}".format(repr(label_norm))) + # need to create rough labeling and average + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in library ]) + segs.extend(['--majority', m.tmp('maj_seg.mnc'), '--bg'] ) + m.execute(segs) + + scans=[ i.scan for i in library ] + m.median(scans,m.tmp('median.mnc')) + + norm_order=label_norm.get('order',3) + norm_median=label_norm.get('median',True) + + n_scan=work_dir+os.sep+dataset_name+'_'+fuse_variant+'_norm.mnc' + + if flip: + n_scan=work_dir+os.sep+dataset_name+'_'+fuse_variant+'_f_norm.mnc' + + hl.label_normalize(scan,m.tmp('maj_seg.mnc'),m.tmp('median.mnc'),m.tmp('maj_seg.mnc'),out=n_scan,order=norm_order,median=norm_median) + scan=n_scan + + if patch==0 and search==0: # perform simple majority voting + # create majority voted model segmentation, for ANIMAL segmentation if needed + # TODO: figure out what it means for grading + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in library ]) + segs.extend(['--majority', out_seg_fuse, '--bg'] ) + m.execute(segs) + else: + # create text file for the training library + train_lib=os.path.dirname(library[0].seg)+os.sep+sample.name+'.lst' + + if flip: + train_lib=os.path.dirname(library[0].seg)+os.sep+sample.name+'_f.lst' + + output_info['train_lib']=train_lib + + with open(train_lib,'w') as f: + for i in library: + ss=[ os.path.basename(i.scan) ] + ss.extend([os.path.basename(j) for j in i.add]) + ss.append(os.path.basename(i.seg)) + ss.append(str(i.grading)) + ss.append(str(i.group)) + f.write(",".join(ss)) + f.write("\n") + + outputs=[] + + if len(add_scan)>0: + segs=['itk_patch_morphology_mc', + scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--adist', out_dist, + '--grading', out_grading] + + if weights is not None: + segs.extend(['--weights',weights]) + + segs.extend(add_scan) + segs.extend(['--output', final_out_seg]) + else: + segs=['itk_patch_morphology', scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--iter', str(iterations), + '--adist', out_dist, + '--threshold', str(threshold), + '--grading', out_grading, + '--verbose' ] + segs.append(final_out_seg) + + if beta is not None: + segs.extend(['--beta',str(beta)]) + if sample.mask is not None: + segs.extend(['--mask', sample.mask]) + if select_top is not None: + segs.extend(['--top',str(select_top)]) + if groups is not None: + segs.extend(['--groups',str(groups)]) + + outputs=[ final_out_seg, out_grading, out_dist ] + + m.command(segs, inputs=[sample.scan], outputs=outputs) + print(' '.join(segs)) + return output_info + except mincError as e: + print("Exception in fuse_segmentations:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in fuse_segmentations:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def join_left_right(sample,output_seg,output_grad=None,datatype=None): + with mincTools() as m: + cmd=['itk_merge_discrete_labels',sample.seg,sample.seg_f,output] + if datatype is not None: + cmd.append('--'+datatype) + m.command(cmd,inputs=[sample.seg,sample.seg_f],outputs=[output]) + if output_grad is not None: + # TODO:figure out how to merge gradings + print("Can't merge gradings yet!") + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/labels.py b/ipl/grading/labels.py new file mode 100644 index 0000000..c6ed099 --- /dev/null +++ b/ipl/grading/labels.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +def split_labels_seg(sample): + ''' split-up one multi-label segmentation into a set of files''' + try: + with mincTools() as m: + if sample.seg is not None: + base=sample.seg.rsplit('.mnc',1)[0]+'_%03d.mnc' + sample.seg_split=m.split_labels(sample.seg,base) + if sample.seg_f is not None: + base=sample.seg_f.rsplit('.mnc',1)[0]+'_%03d.mnc' + sample.seg_f_split=m.split_labels(sample.seg,base) + except mincError as e: + print("Exception in split_labels_seg:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in split_labels_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def merge_labels_seg(sample): + ''' merge multiple segmentation into a single files''' + try: + with mincTools() as m: + if any(sample.seg_split): + if sample.seg is None: + sample.seg=sample.seg_split[0].rsplit('_000.mnc',1)[0]+'.mnc' + m.merge_labels(sample.seg_split,sample.seg) + if any(sample.seg_f_split): + if sample.seg_f is None: + sample.seg_f=sample.seg_f_split[0].rsplit('_000.mnc',1)[0]+'.mnc' + m.merge_labels(sample.seg_f_split,sample.seg_f) + except mincError as e: + print("Exception in merge_labels_seg:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in merge_labels_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/library.py b/ipl/grading/library.py new file mode 100644 index 0000000..5b7cea6 --- /dev/null +++ b/ipl/grading/library.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import copy +import json +import os +import sys +import traceback + +def save_library_info(library_description, output,name='library.json'): + """Save library information into directory, using predfined file structure + Arguments: + library_description -- dictionary with library description + output -- output directory + + Keyword arguments: + name -- optional name of .json file, relative to the output directory, default 'library.json' + """ + try: + tmp_library_description=copy.deepcopy(library_description) + tmp_library_description.pop('prefix',None) + + for i in ['local_model','local_model_mask', 'local_model_flip', + 'local_model_mask_flip','local_model_seg']: + if tmp_library_description[i] is not None: + tmp_library_description[i]=os.path.relpath(tmp_library_description[i],output) + + for (j, i) in enumerate(tmp_library_description['local_model_add']): + tmp_library_description['local_model_add'][j]=os.path.relpath(i, output) + + for (j, i) in enumerate(tmp_library_description['local_model_add_flip']): + tmp_library_description['local_model_add_flip'][j]=os.path.relpath(i, output) + + for i in ['model','model_mask']: + # if it starts with the same prefix, remove it + if os.path.dirname(tmp_library_description[i])==output \ + or tmp_library_description[i][0]!=os.sep: + tmp_library_description[i]=os.path.relpath(tmp_library_description[i],output) + + for (j, i) in enumerate(tmp_library_description['model_add']): + if os.path.dirname(i)==output: + tmp_library_description['model_add'][j]=os.path.relpath(i, output) + + for (j, i) in enumerate(tmp_library_description['library']): + for (k,t) in enumerate(i): + if k>1: # skip group and grading + tmp_library_description['library'][j][k]=os.path.relpath(t, output) + + with open(output+os.sep+name,'w') as f: + json.dump(tmp_library_description,f,indent=1) + except : + print "Error saving library information into:{} {}".format(output,sys.exc_info()[0]) + traceback.print_exc(file=sys.stderr) + raise + +def load_library_info(prefix, name='library.json'): + """Load library information from directory, using predfined file structure + Arguments: + prefix -- directory path + + Keyword arguments: + name -- optional name of .json file, relative to the input directory, default 'library.json' + """ + try: + library_description={} + with open(prefix+os.sep+name,'r') as f: + library_description=json.load(f) + + library_description['prefix']=prefix + + for i in ['local_model','local_model_mask', 'local_model_flip', + 'local_model_mask_flip','local_model_seg']: + if library_description[i] is not None: library_description[i]=prefix+os.sep+library_description[i] + try: + for (j, i) in enumerate(library_description['local_model_add']): + library_description['local_model_add'][j]=prefix+os.sep+i + + for (j, i) in enumerate(library_description['local_model_add_flip']): + library_description['local_model_add_flip'][j]=prefix+os.sep+i + except KeyError: + pass + + for (j, i) in enumerate(library_description['library']): + for (k,t) in enumerate(i): + if k>1: # skip group and grading + library_description['library'][j][k]=prefix+os.sep+t + + for i in ['model','model_mask']: + # if it starts with '/' assume it's absolute path + if library_description[i] is not None and library_description[i][0]!=os.sep: + library_description[i]=prefix+os.sep+library_description[i] + try: + for (j, i) in enumerate(library_description['model_add']): + if library_description['model_add'][j][0]!='/': + library_description['model_add'][j]=prefix+os.sep+i + except KeyError: + pass + + return library_description + except : + print "Error loading library information from:{} {}".format(prefix,sys.exc_info()[0]) + traceback.print_exc(file=sys.stderr) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/model.py b/ipl/grading/model.py new file mode 100644 index 0000000..c326a5c --- /dev/null +++ b/ipl/grading/model.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def create_fake_mask(in_scan, out_mask, op=None ): + try: + with mincTools() as m : + if op is None : + m.calc([in_scan], 'A[0]>0.5?1:0', out_mask, labels=True) + else : + m.binary_morphology(in_scan, op, out_mask, binarize_threshold=0.5) + except mincError as e: + print("Exception in create_fake_mask:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_fake_mask:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def create_local_model(tmp_lin_samples, model, local_model, + extend_boundary=4, + op=None, + symmetric=False ): + '''create an average segmentation and use it to create local model''' + try: + with mincTools() as m: + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in tmp_lin_samples ]) + + if symmetric: segs.extend([ i.seg_f for i in tmp_lin_samples ]) + + segs.extend(['--majority', m.tmp('majority.mnc')] ) + m.execute(segs) + maj=m.tmp('majority.mnc') + + if op is not None: + m.binary_morphology(maj, op, m.tmp('majority_op.mnc'),binarize_threshold=0.5) + maj=m.tmp('majority_op.mnc') + + # TODO: replace mincreshape/mincbbox with something more sensible + out=m.execute_w_output(['mincbbox', '-threshold', '0.5', '-mincreshape', maj ]).rstrip("\n").split(' ') + + s=[ int(i) for i in out[1].split(',') ] + c=[ int(i) for i in out[3].split(',') ] + + start=[s[0]-extend_boundary, s[1]-extend_boundary ,s[2]-extend_boundary ] + ext= [c[0]+extend_boundary*2, c[1]+extend_boundary*2 ,c[2]+extend_boundary*2] + + # reshape the mask + m.execute(['mincreshape', + '-start','{},{},{}'.format(start[0], start[1], start[2]), + '-count','{},{},{}'.format(ext[0], ext[1], ext[2] ), + maj , local_model.mask , '-byte' ] ) + + m.resample_smooth(model.scan, local_model.scan, like=local_model.mask, order=0) + m.resample_labels(m.tmp('majority.mnc'),local_model.seg, like=local_model.mask, order=0) + + for (i,j) in enumerate(model.add): + m.resample_smooth(model.add[i], local_model.add[i], like=local_model.mask, order=0) + + except mincError as e: + print("Exception in create_local_model:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_local_model:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def create_local_model_flip(local_model, model, remap={}, + extend_boundary=4, op=None ): + try: + with mincTools() as m: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + m.resample_labels(local_model.seg, m.tmp('flip_seg.mnc'), + transform=m.tmp('flip_x.xfm'), + order=0, remap=remap, like=model.scan) + + seg=m.tmp('flip_seg.mnc') + + if op is not None: + m.binary_morphology(seg, op, m.tmp('flip_seg_op.mnc'),binarize_threshold=0.5) + seg=m.tmp('flip_seg_op.mnc') + + # TODO: replace mincreshape/mincbbox with something more sensible + out=m.execute_w_output(['mincbbox', '-threshold', '0.5', '-mincreshape', seg ]).rstrip("\n").split(' ') + + s=[ int(i) for i in out[1].split(',') ] + c=[ int(i) for i in out[3].split(',') ] + + start=[s[0]-extend_boundary, s[1]-extend_boundary ,s[2]-extend_boundary ] + ext= [c[0]+extend_boundary*2, c[1]+extend_boundary*2 ,c[2]+extend_boundary*2] + # reshape the mask + m.execute(['mincreshape', + '-start','{},{},{}'.format(start[0], start[1], start[2]), + '-count','{},{},{}'.format(ext[0], ext[1], ext[2] ), + seg, + local_model.mask_f, + '-byte' ] ) + + m.resample_smooth(local_model.scan, local_model.scan_f, + like=local_model.mask_f, order=0, transform=m.tmp('flip_x.xfm')) + + for (i,j) in enumerate(model.add_f): + m.resample_smooth(model.add[i], local_model.add_f[i], + like=local_model.mask_f, order=0, transform=m.tmp('flip_x.xfm')) + + except mincError as e: + print("Exception in create_local_model_flip:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_local_model_flip:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/preselect.py b/ipl/grading/preselect.py new file mode 100644 index 0000000..9b0c79b --- /dev/null +++ b/ipl/grading/preselect.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * + +import traceback + + +def preselect(sample, + library, + method='MI', + number=10, + mask=None, + use_nl=False, + flip=False, + step=None, + lib_add_n=0, + groups=None): + '''calculate requested similarity function and return top number of elements from the library''' + results=[] + column=2 # skip over grading and group + + # TODO: use multiple modalities for preselection? + if use_nl: + column=6+lib_add_n + + for (i,j) in enumerate(library): + results.append( futures.submit( + calculate_similarity, sample, MriDataset(scan=j[column]), method=method, mask=mask, flip=flip, step=step + ) ) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + val=[ (j.result(), int(library[i][0]), library[i] ) for (i,j) in enumerate(results)] + + if groups is None: + val_sorted=sorted(val, key=lambda s: s[0] ) + return [ i[2] for i in val_sorted[ 0:number] ] + else: + s_number=number/groups + res=[] + + for i in range(groups): + val_sorted=sorted( [v for v in val if v[1]==i] , key=lambda s: s[0] ) + res.extend( val_sorted[0:s_number] ) + + return [ i[2] for i in res ] + + +def calculate_similarity(sample1, sample2, + mask=None, method='MI', + flip=False, step=None): + try: + with mincTools() as m: + scan=sample1.scan + + if flip: + scan=sample1.scan_f + + # figure out step size, minctracc works extremely slow when step size is smaller then file step size + info_sample1=m.mincinfo( sample1.scan ) + + cmds=[ 'minctracc', scan, sample2.scan, '-identity' ] + + if method=='MI': + cmds.extend( ['-nmi', '-blur_pdf', '9'] ) + else: + cmds.append( '-xcorr' ) + + if step is None: + step= max( abs( info_sample1['xspace'].step ) , + abs( info_sample1['yspace'].step ) , + abs( info_sample1['zspace'].step ) ) + + cmds.extend([ + '-step', str(step), str(step), str(step), + '-simplex', '1', + '-tol', '0.01', + '-lsq6', + '-est_center', + '-clob', + m.tmp('similarity.xfm') + ]) + + if mask is not None: + cmds.extend( ['-source_mask', mask]) + + output=re.search( '^Final objective function value = (\S+)' , m.execute_w_output(cmds, verbose=0), flags=re.MULTILINE).group(1) + + return float(output) + + except mincError as e: + print("Exception in calculate_similarity:{}".format( str(e)) ) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in calculate_similarity:{}".format( sys.exc_info()[0]) ) + traceback.print_exc( file=sys.stdout ) + raise diff --git a/ipl/grading/qc.py b/ipl/grading/qc.py new file mode 100644 index 0000000..cfd9adc --- /dev/null +++ b/ipl/grading/qc.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import traceback + + +def make_contours(input, output, width=1): + """Convert multi-label image into another multilabel images with borders only + Arguments: + input -- input minc file + output -- output file + + Keyword arguments: + width -- width of the border to leave behind, default 1 (voxels) + """ + with mincTools() as m: + m.command(['c3d', input,'-split', + '-foreach', + '-dup', '-erode', '1' ,'{}x{}x{}'.format(width,width,width), '-scale', '-1', + '-add', + '-endfor', + '-merge', + '-type', 'short','-o',output], + inputs=[input],outputs=[output], + verbose=True) + +def generate_qc_image(sample_seg, + sample, + sample_qc, + options={}, + model=None, + symmetric=False, + labels=2, + title=None): + """Gnerate QC image for multilabel segmentation + Arguments: + sample_seg -- input segmentation + sample -- input file + sample_qc -- output QC file + + Keyword arguments: + options -- options as dictionary with following keys: + lut_file -- LUT file for minclookup, default None + spectral_mask -- boolean , if spectral mask should be used, default False + dicrete_mask -- boolean , if discrete mask should be used, default False + image_range -- list of two real values + clamp -- boolean, if range clamp should be used + big + contours + contour_width + crop + model -- reference model, default None + symmetric -- boolean, if symmetric QC is needed + width -- width of the border to leave behind, default 1 (voxels) + labels -- integer, number of labels present, default 2 + title -- QC image title + """ + try: + + #TODO: implement advanced features + qc_lut=options.get('lut_file',None) + spectral_mask=options.get('spectral_mask',False) + dicrete_mask=options.get('dicrete_mask',False) + image_range=options.get('image_range',None) + clamp=options.get('clamp',False) + big=options.get('big',False) + contours=options.get('contours',False) + contour_width=options.get('contour_width',1) + crop=options.get('crop',None) + + if qc_lut is not None: + spectral_mask=False + dicrete_mask=True + + with mincTools() as m: + seg=sample_seg.seg + seg_f=sample_seg.seg_f + scan=sample.scan + scan_f=sample.scan_f + + if crop is not None: + # remove voxels from the edge + m.autocrop(scan,m.tmp('scan.mnc'),isoexpand=-crop) + scan=m.tmp('scan.mnc') + m.resample_labels(seg,m.tmp('seg.mnc'),like=scan) + seg=m.tmp('seg.mnc') + + if symmetric: + m.autocrop(scan_f,m.tmp('scan_f.mnc'),isoexpand=-crop) + scan_f=m.tmp('scan_f.mnc') + m.resample_labels(seg_f,m.tmp('seg_f.mnc'),like=scan) + seg_f=m.tmp('seg_f.mnc') + + if contours: + make_contours(seg,m.tmp('seg_contours.mnc'),width=contour_width) + seg=m.tmp('seg_contours.mnc') + if symmetric: + make_contours(seg_f,m.tmp('seg_f_contours.mnc'),labels=labels,width=contour_width) + seg_f=m.tmp('seg_f_contours.mnc') + + if symmetric: + + m.qc( scan, + m.tmp('qc.png'), + mask=seg, + mask_range=[0,labels-1], + big=True, + clamp=clamp, + image_range=image_range, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + mask_lut=qc_lut) + + m.qc( scan_f, + m.tmp('qc_f.png'), + mask=seg_f, + mask_range=[0,labels-1], + image_range=image_range, + big=True, + clamp=clamp, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + title=title, + mask_lut=qc_lut) + + m.command(['montage','-tile','2x1','-geometry','+1+1', + m.tmp('qc.png'),m.tmp('qc.png'),sample_qc], + inputs=[m.tmp('qc.png'),m.tmp('qc.png')], + outputs=[sample_qc]) + else: + m.qc( scan, + sample_qc, + mask=seg, + mask_range=[0,labels-1], + image_range=image_range, + big=True, + mask_lut=qc_lut, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + clamp=clamp, + title=title) + + return [sample_qc] + except mincError as e: + print("Exception in generate_qc_image:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in generate_qc_image:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/registration.py b/ipl/grading/registration.py new file mode 100644 index 0000000..2ca1467 --- /dev/null +++ b/ipl/grading/registration.py @@ -0,0 +1,638 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.registration +import ipl.ants_registration +import ipl.elastix_registration + +def linear_registration( + sample, + model, + output_xfm, + output_sample=None, + output_invert_xfm=None, + init_xfm=None, + symmetric=False, + ants=False, + reg_type='-lsq12', + objective='-xcorr', + linreg=None, + work_dir=None, + close=False, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + downsample=None, + bbox=False + ): + """perform linear registration to the model, and calculate inverse""" + try: + + + + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + + if not m.checkfiles(inputs=[sample.scan], outputs=[output_xfm.xfm]): return + + #if _init_xfm is None: + # _init_xfm=_init_xfm_f=m.tmp('identity.xfm') + # m.param2xfm(m.tmp('identity.xfm')) + + scan=sample.scan + scan_f=sample.scan_f + mask=sample.mask + mask_f=sample.mask_f + + _output_xfm=output_xfm.xfm + _output_xfm_f=output_xfm.xfm_f + + if bbox: + scan=m.tmp('scan.mnc') + m.resample_smooth(sample.scan, scan, like=model.scan, transform=_init_xfm) + if sample.mask is not None: + mask=m.tmp('mask.mnc') + m.resample_labels(sample.mask, mask, like=model.scan, transform=_init_xfm) + + _init_xfm=None + close=True + _output_xfm=m.tmp('output.xfm') + if symmetric: + scan_f=m.tmp('scan_f.mnc') + m.resample_smooth(sample.scan_f, scan_f, like=model.scan, transform=_init_xfm_f) + if sample.mask_f is not None: + mask=m.tmp('mask_f.mnc') + m.resample_labels(sample.mask_f, mask_f, like=model.scan, transform=_init_xfm_f) + _init_xfm_f=None + _output_xfm_f=m.tmp('output_f.xfm') + + if symmetric: + if ants: + ipl.ants_registration.linear_register_ants2( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=linreg, + close=close, + downsample=downsample, + ) + ipl.ants_registration.linear_register_ants2( + scan_f, + model.scan, + _output_xfm_f, + source_mask=mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=linreg, + close=close, + downsample=downsample, + ) + else: + ipl.registration.linear_register( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model.mask, + init_xfm=_init_xfm, + objective=objective, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + + ipl.registration.linear_register( + scan_f, + model.scan, + _output_xfm_f, + source_mask=mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + objective=objective, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + else: + if ants: + ipl.ants_registration.linear_register_ants2( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=linreg, + close=close, + downsample=downsample + ) + else: + ipl.registration.linear_register( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + if bbox : + if init_xfm is not None: + m.xfmconcat([init_xfm.xfm,_output_xfm],output_xfm.xfm) + if symmetric: + m.xfmconcat([init_xfm.xfm_f,_output_xfm_f],output_xfm.xfm_f) + else: + shutil.copyfile(_output_xfm,output_xfm.xfm) + if symmetric: + shutil.copyfile(_output_xfm_f,output_xfm.xfm_f) + + if output_invert_xfm is not None: + m.xfminvert(output_xfm.xfm, output_invert_xfm.xfm) + if symmetric: + m.xfminvert(output_xfm.xfm_f, output_invert_xfm.xfm_f) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output_xfm.xfm, + like=model.scan, + order=resample_order) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output_xfm.xfm, + aa=resample_aa, + order=resample_order, + like=model.scan, + baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output_xfm.xfm_f, + like=model.scan, + order=resample_order) + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output_xfm.xfm_f, + aa=resample_aa, + order=resample_order, + like=model.scan, + baa=resample_baa) + + return True + except mincError as e: + print("Exception in linear_registration:{} {}".format(sample.name,str(e))) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in linear_registration:{} {}".format(sample.name,sys.exc_info()[0])) + traceback.print_exc(file=sys.stderr) + raise + + +def elastix_registration( + sample, + model, + output_xfm, + output_sample=None, + output_invert_xfm=None, + init_xfm=None, + symmetric=False, + work_dir=None, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + downsample=None, + downsample_grid=None, + parameters=None, + bbox=False, + nl=False + ): + """perform elastix registration to the model, and calculate inverse""" + try: + + with mincTools() as m: + + if not m.checkfiles(inputs=[sample.scan], outputs=[output_xfm.xfm]): return + + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + mask=sample.mask + mask_f=sample.mask_f + model_mask=model.mask + + if mask is None: + model_mask=None + + scan=sample.scan + scan_f=sample.scan_f + + _output_xfm=output_xfm.xfm + _output_xfm_f=output_xfm.xfm_f + + if bbox: + scan=m.tmp('scan.mnc') + m.resample_smooth(sample.scan, scan, like=model.scan, transform=_init_xfm) + if sample.mask is not None: + mask=m.tmp('mask.mnc') + m.resample_labels(sample.mask, mask, like=model.scan, transform=_init_xfm) + _init_xfm=None + close=True + _output_xfm=m.tmp('output.xfm') + + if symmetric: + scan_f=m.tmp('scan_f.mnc') + m.resample_smooth(sample.scan_f, scan_f, like=model.scan, transform=_init_xfm_f) + if sample.mask_f is not None: + mask_f=m.tmp('mask_f.mnc') + m.resample_labels(sample.mask_f, mask_f, like=model.scan, transform=_init_xfm_f) + _init_xfm_f=None + _output_xfm_f=m.tmp('output_f.xfm') + + #TODO: update elastix registration to downsample xfm? + if symmetric: + ipl.elastix_registration.register_elastix( + scan, + model.scan, + output_xfm=_output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + downsample=downsample, + downsample_grid=downsample_grid, + parameters=parameters, + nl=nl + ) + ipl.elastix_registration.register_elastix( + scan_f, + model.scan, + output_xfm=_output_xfm_f, + source_mask=mask_f, + target_mask=model_mask, + init_xfm=_init_xfm_f, + downsample=downsample, + downsample_grid=downsample_grid, + parameters=parameters, + nl=nl + ) + else: + ipl.elastix_registration.register_elastix( + scan, + model.scan, + output_xfm=_output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + downsample=downsample, + downsample_grid=downsample_grid, + parameters=parameters, + nl=nl + ) + + if bbox : + if init_xfm is not None: + m.xfmconcat([init_xfm.xfm,_output_xfm],output_xfm.xfm) + if symmetric: + m.xfmconcat([init_xfm.xfm_f,_output_xfm_f],output_xfm.xfm_f) + else: + shutil.copyfile(_output_xfm,output_xfm.xfm) + if symmetric: + shutil.copyfile(_output_xfm_f,output_xfm.xfm_f) + + + if output_invert_xfm is not None: + m.xfminvert(output_xfm.xfm, output_invert_xfm.xfm) + if symmetric: + m.xfminvert(output_xfm.xfm_f, output_invert_xfm.xfm_f) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output_xfm.xfm, + like=model.scan, order=resample_order) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output_xfm.xfm, + aa=resample_aa, order=resample_order, + like=model.scan, baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output_xfm.xfm_f, + like=model.scan, order=resample_order) + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output_xfm.xfm_f, + aa=resample_aa, order=resample_order, + like=model.scan, baa=resample_baa) + + return True + except mincError as e: + print("Exception in elastix_registration:{} {}".format(sample.name,str(e))) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in elastix_registration:{} {}".format(sample.name,sys.exc_info()[0])) + traceback.print_exc(file=sys.stderr) + raise + + + +def non_linear_registration( + sample, + model, + output, + output_sample=None, + output_invert=True, + init_xfm=None, + level=2, + start_level=8, + symmetric=False, + parameters=None, + work_dir=None, + ants=False, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + output_inv_target=None, + flip=False, + downsample=None, + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + + if not m.checkfiles(inputs=[sample.scan], outputs=[output.xfm]): return + + if symmetric: + # TODO: split up into two jobs? + if not os.path.exists( output.xfm ) or \ + not os.path.exists( output.xfm_f ) : + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + m.tmp('forward_f')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward_f')+'.xfm', + #source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward')+'.xfm',model.scan,output.xfm,step=level) + #TODO: regularize here + m.xfm_normalize(m.tmp('forward_f')+'.xfm',model.scan,output.xfm_f,step=level) + + if output_invert: + if ants: + m.xfm_normalize(m.tmp('forward')+'_inverse.xfm', model.scan, output.xfm_inv, step=level ) + m.xfm_normalize(m.tmp('forward_f')+'_inverse.xfm',model.scan, output.xfm_f_inv, step=level ) + else: + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm_inv, step=level, invert=True) + m.xfm_normalize(m.tmp('forward_f')+'.xfm',model.scan, output.xfm_f_inv, step=level, invert=True) + else: + if not os.path.exists( output.xfm ) : + if flip: + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + else: + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm, step=level) + + if output_invert: + if ants: # ANTS produces forward and invrese + m.xfm_normalize(m.tmp('forward')+'_inverse.xfm', model.scan, output.xfm_inv, step=level ) + else: + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm_inv, step=level, invert=True) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + for (i,j) in enumerate(sample.add): + m.resample_smooth(sample.add[i], output_sample.add[i], + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output.xfm_inv, + aa=resample_aa, + order=resample_order, + like=model.scan, + invert_transform=True, + baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output.xfm_f_inv, + like=model.scan, + invert_transform=True, + order=resample_order) + + for (i,j) in enumerate(sample.add_f): + m.resample_smooth(sample.add_f[i], output_sample.add_f[i], + transform=output.xfm_f_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output.xfm_f_inv, + aa=resample_aa, + order=resample_order, + like=model.scan, + invert_transform=True, + baa=resample_baa ) + + if output_inv_target is not None: + m.resample_smooth(model.scan, output_inv_target.scan, + transform=output.xfm, + like=sample.scan, + order=resample_order, + invert_transform=True) + + for (i,j) in enumerate(output_inv_target.add): + m.resample_smooth(model.add[i], output_inv_target.add[i], + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + if warp_seg: + m.resample_labels(model.seg, output_inv_target.seg, + transform=output.xfm, + aa=resample_aa, + order=resample_order, + like=sample.scan, + invert_transform=True, + baa=resample_baa) + + if symmetric: + m.resample_smooth(model.scan, output_inv_target.scan_f, + transform=output.xfm_f, + like=sample.scan, + invert_transform=True, + order=resample_order) + + for (i,j) in enumerate(output_inv_target.add): + m.resample_smooth(model.add_f[i], output_inv_target.add_f[i], + transform=output.xfm_f, + like=sample.scan, + invert_transform=True, + order=resample_order) + + if warp_seg: + m.resample_labels(model.seg, output_inv_target.seg_f, + transform=output.xfm_f, + aa=resample_aa, + order=resample_order, + like=sample.scan, + invert_transform=True, + baa=resample_baa ) + + except mincError as e: + print("Exception in non_linear_registration:{} {}".format(sample.name,repr(e))) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in non_linear_registration:{} {}".format(sample.name,sys.exc_info()[0])) + traceback.print_exc(file=sys.stderr) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/resample.py b/ipl/grading/resample.py new file mode 100644 index 0000000..74db66b --- /dev/null +++ b/ipl/grading/resample.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from .filter import * + + +# scoop parallel execution +from scoop import futures, shared + + +def resample_file(input,output,xfm=None,like=None,order=4,invert_transform=False): + '''resample input file using proveded transformation''' + try: + with mincTools() as m: + m.resample_smooth(input,output,xfm=xfm,like=like,order=order,invert_transform=invert_transform) + except mincError as e: + print("Exception in resample_file:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in resample_file:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def resample_split_segmentations(input, output,xfm=None, like=None, order=4, invert_transform=False, symmetric=False): + '''resample individual segmentations, using parallel execution''' + results=[] + base=input.seg.rsplit('.mnc',1)[0] + for (i,j) in input.seg_split.items(): + if not output.seg_split.has_key(i): + output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) + + results.append(futures.submit( + resample_file,j,output.seg_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform + )) + if symmetric: + base=input.seg_f.rsplit('.mnc',1)[0] + for (i,j) in input.seg_f_split.items(): + if not output.seg_f_split.has_key(i): + output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) + + results.append(futures.submit( + resample_file,j,output.seg_f_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform + )) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + +def warp_rename_seg( sample, model, output, + transform=None, + symmetric=False, + symmetric_flip=False, + lut=None, + flip_lut=None, + resample_order=2, + resample_aa=None, + resample_baa=False, + invert_transform=False, + use_flipped=False, + datatype=None): + try: + with mincTools() as m: + xfm=None + if transform is not None: + xfm=transform.xfm + + if symmetric: + xfm_f=transform.xfm_f + + m.resample_labels(sample.seg, output.seg, + transform=xfm, + aa=resample_aa, + order=resample_order, + remap=lut, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype, + baa=resample_baa) + + if symmetric: + + seg_f=sample.seg + + if use_flipped: + seg_f=sample.seg_f + + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + m.resample_labels(seg_f, output.seg_f, + transform=xfm_f, + aa=resample_aa, + order=resample_order, + remap=flip_lut, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype, + baa=resample_baa) + + output.mask=None + output.mask_f=None + + except mincError as e: + print("Exception in warp_rename_seg:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in warp_rename_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def warp_sample( sample, + model, + output, + transform=None, + symmetric=False, + symmetric_flip=False, + resample_order=None, + use_flipped=False, + invert_transform=False, + filters=None): + # TODO: add filters here + try: + with mincTools() as m: + xfm=None + xfm_f=None + seg_output=output.seg + seg_output_f=output.seg_f + + #if seg_output is None: + #seg_output=model.seg + + #if seg_output_f is None: + #seg_output_f=model.seg + + if transform is not None: + xfm=transform.xfm + if symmetric: + xfm_f=transform.xfm_f + + output_scan=output.scan + + if filters is not None: + output_scan=m.tmp('sample.mnc') + + m.resample_smooth(sample.scan, output_scan, transform=xfm, like=model.scan, order=resample_order, invert_transform=invert_transform) + + if filters is not None: + # TODO: maybe move it to a separate stage? + # HACK: assuming that segmentation was already warped! + apply_filter(output_scan, output.scan, filters, model=model.scan, input_mask=output.mask, input_labels=seg_output, model_labels=model.seg) + + for (i,j) in enumerate( sample.add ): + output_scan = output.add[i] + if filters is not None: + output_scan=m.tmp('sample_{}.mnc').format(i) + + m.resample_smooth(sample.add[i], output_scan, transform=xfm, like=model.scan, order=resample_order,invert_transform=invert_transform) + + if filters is not None: + # TODO: maybe move it to a separate stage? + # TODO: apply segmentations for seg-based filtering + apply_filter(output_scan, output.add[i], filters, model=model.scan, input_mask=output.mask, input_labels=seg_output, model_labels=model.seg) + + if symmetric: + scan_f=sample.scan + if use_flipped: + scan_f=sample.scan + + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + output_scan_f=output.scan_f + if filters is not None: + output_scan_f=m.tmp('sample_f.mnc') + + m.resample_smooth(scan_f, output_scan_f, transform=xfm_f, like=model.scan, order=resample_order,invert_transform=invert_transform) + + if filters is not None: + # TODO: maybe move it to a separate stage? + apply_filter(output_scan_f, output.scan_f, filters, model=model.scan, input_mask=output.mask_f, input_labels=seg_output_f, model_labels=model.seg) + + for (i,j) in enumerate( sample.add_f ): + output_scan_f = output.add_f[i] + if filters is not None: + output_scan_f=m.tmp('sample_f_{}.mnc').format(i) + + m.resample_smooth( sample.add_f[i], output_scan_f, transform=xfm_f, like=model.scan, order=resample_order,invert_transform=invert_transform) + + if filters is not None: + # TODO: maybe move it to a separate stage? + apply_filter( output_scan_f, output.add_f[i], filters, model=model.scan, input_mask=output.mask_f, input_labels=seg_output_f, model_labels=model.seg) + + output.mask=None + output.mask_f=None + + except mincError as e: + print("Exception in warp_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in warp_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def concat_resample(lib_scan, + xfm_lib, + xfm_sample, + output, + model=None, + resample_aa=None, + resample_order=2, + label_resample_order=2, + resample_baa=False, + flip=False ): + '''Cocnatenate inv(xfm2) and inv(xfm1) and resample scan''' + try: + + if not os.path.exists(output.seg) or \ + not os.path.exists(output.scan) : + with mincTools() as m: + _model=None + + if model is not None: + _model=model.scan + + full_xfm=None + + if xfm_lib is not None and xfm_sample is not None: + if flip: + m.xfmconcat([ xfm_sample.xfm_f, xfm_lib.xfm_inv ], m.tmp('Full.xfm') ) + else: + m.xfmconcat([ xfm_sample.xfm, xfm_lib.xfm_inv ], m.tmp('Full.xfm') ) + full_xfm=m.tmp('Full.xfm') + elif xfm_lib is not None: + full_xfm=xfm_lib.xfm_inv + elif xfm_sample is not None: + if flip: + full_xfm=xfm_sample.xfm_f + else: + full_xfm=xfm_sample.xfm + + m.resample_labels(lib_scan.seg, output.seg, + transform=full_xfm, + aa=resample_aa, + order=label_resample_order, + like=_model, + invert_transform=True, + baa=resample_baa ) + + m.resample_smooth(lib_scan.scan, output.scan, + transform=full_xfm, + order=resample_order, + like=_model, + invert_transform=True) + + for (i,j) in enumerate(lib_scan.add): + m.resample_smooth(lib_scan.add[i], output.add[i], + transform=full_xfm, + order=resample_order, + like=_model, + invert_transform=True) + except mincError as e: + print("Exception in concat_resample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in concat_resample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/structures.py b/ipl/grading/structures.py new file mode 100644 index 0000000..7f7dad9 --- /dev/null +++ b/ipl/grading/structures.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# +# data structures used in segmentation package + +import shutil +import os +import sys +import traceback +import json + + +class MriDataset(object): + ''' Scan sample with segmentation and mask''' + def __init__(self, prefix=None, name=None, scan=None, mask=None, seg=None, + scan_f=None, mask_f=None, seg_f=None, protect=False, + add=[], add_n=None, + add_f=[], group=None,grading=None ): + self.prefix=prefix + self.name=name + self.scan=scan + self.mask=mask + self.seg=seg + self.protect=protect + self.seg_split={} + self.group=group + self.grading=grading + + self.scan_f = scan_f + self.mask_f = mask_f + self.seg_f = seg_f + self.seg_f_split={} + self.add = add + self.add_f = add_f + + if self.name is None : + if scan is not None: + self.name=os.path.basename(scan).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + else: + if self.prefix is None: + raise("trying to create dataset without name and prefix") + (_h, _name) = tempfile.mkstemp(suffix='.mnc', dir=prefix) + os.close(_h) + self.name=os.path.relpath(_name,prefix) + os.unlink(_name) + + if scan is None: + if self.prefix is not None: + self.scan=self.prefix+os.sep+self.name+'.mnc' + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + self.seg=self.prefix+os.sep+self.name+'_seg.mnc' + self.scan_f=self.prefix+os.sep+self.name+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'_f_mask.mnc' + self.seg_f=self.prefix+os.sep+self.name+'_f_seg.mnc' + + if add_n is not None: + self.add=[self.prefix+os.sep+self.name+'_{}.mnc'.format(i) for i in range(add_n)] + self.add_f=[self.prefix+os.sep+self.name+'_{}_f.mnc'.format(i) for i in range(add_n)] + else: + self.add=[] + self.add_f=[] + #------ + + def __repr__(self): + return "MriDataset(\n prefix=\"{}\",\n name=\"{}\",\n scan=\"{}\",\n scan_f=\"{}\",\n mask=\"{}\",\n mask_f=\"{}\",\n seg=\"{}\",\n seg_f=\"{}\",\n protect={},\n add={},\n add_f={},\n group={}\n grading={})".\ + format(self.prefix,self.name,self.scan,self.scan_f,self.mask,self.mask_f,self.seg,self.seg_f,repr(self.protect),repr(self.add),repr(self.add_f),self.group,self.grading) + + def cleanup(self): + if not self.protect: + for i in (self.scan, self.mask, self.seg, self.scan_f, self.mask_f, self.seg_f ): + if i is not None and os.path.exists(i): + os.unlink(i) + + for (i,j) in self.seg_split.items(): + if os.path.exists(j): + os.unlink(j) + + for (i,j) in self.seg_f_split.items(): + if os.path.exists(j): + os.unlink(j) + + for (i,j) in enumerate(self.add): + if os.path.exists(j): + os.unlink(j) + # ------------ + + +class MriTransform(object): + '''Transformation''' + def __init__(self, prefix=None, name=None, xfm=None, protect=False, xfm_f=None, xfm_inv=None, xfm_f_inv=None, nl=False ): + self.prefix=prefix + self.name=name + + self.xfm=xfm + self.grid=None + + self.xfm_f=xfm_f + self.grid_f=None + + self.xfm_inv=xfm_inv + self.grid_inv=None + + self.xfm_f_inv=xfm_f_inv + self.grid_f_inv=None + + self.protect=protect + self.nl=nl + + if name is None and xfm is None: + raise "Undefined name and xfm" + + if name is None and xfm is not None: + self.name=os.path.basename(xfm).rsplit('.xfm',1)[0] + + if self.prefix is None: + self.prefix=os.path.dirname(self.xfm) + + if xfm is None: + if self.prefix is not None: + self.xfm= self.prefix+os.sep+self.name+'.xfm' + self.grid= self.prefix+os.sep+self.name+'_grid_0.mnc' + + self.xfm_f= self.prefix+os.sep+self.name+'_f.xfm' + self.grid_f= self.prefix+os.sep+self.name+'_f_grid_0.mnc' + + self.xfm_inv= self.prefix+os.sep+self.name+'_invert.xfm' + self.grid= self.prefix+os.sep+self.name+'_invert_grid_0.mnc' + + self.xfm_f_inv= self.prefix+os.sep+self.name+'_f_invert.xfm' + self.grid_f_inv= self.prefix+os.sep+self.name+'_f_invert_grid_0.mnc' + + def __repr__(self): + return 'MriTransform(prefix="{}",name="{}")'.\ + format(self.prefix, self.name ) + + def cleanup(self): + if not self.protect: + for i in (self.xfm, self.grid, self.xfm_f, self.grid_f, self.xfm_inv, self.grid_inv, self.xfm_f_inv, self.grid_f_inv ): + if i is not None and os.path.exists(i): + os.unlink(i) + +class GMRIEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, MriTransform): + return {'name':obj.name, + 'xfm' :obj.xfm, + 'xfm_f':obj.xfm_f, + 'xfm_inv' :obj.xfm_inv, + 'xfm_f_inv':obj.xfm_f_inv, + 'prefix':obj.prefix + } + elif isinstance(obj, MriDataset): + return {'name':obj.name, + 'scan':obj.scan, + 'mask':obj.mask, + 'scan_f':obj.scan_f, + 'mask_f':obj.mask_f, + 'prefix':obj.prefix, + 'add':obj.add, + 'add_f':obj.add_f, + 'group':obj.group, + 'grading':obj.grading, + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/train.py b/ipl/grading/train.py new file mode 100644 index 0000000..1bdcc29 --- /dev/null +++ b/ipl/grading/train.py @@ -0,0 +1,653 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +from __future__ import print_function + +import shutil +import os +import sys +import csv +import copy +import traceback + +# MINC stuff +# from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .model import * +from .library import * + + +def inv_dict(d): + return { v:k for (k,v) in d.items() } + +def generate_library(parameters, output, debug=False,cleanup=False): + '''Actual generation of the segmentation library''' + try: + if debug: print(repr(parameters)) + + # read parameters + reference_model = parameters[ 'reference_model'] + reference_mask = parameters.get( 'reference_mask', None) + reference_model_add = parameters.get( 'reference_model_add', []) + + reference_local_model = parameters.get( 'reference_local_model', None) + reference_local_mask = parameters.get( 'reference_local_mask', None) + + reference_local_model_flip= parameters.get( 'reference_local_model_flip', None) + reference_local_mask_flip = parameters.get( 'reference_local_mask_flip', None) + + library = parameters[ 'library' ] + + work_dir = parameters.get( 'workdir',output+os.sep+'work') + + train_groups = parameters[ 'groups'] + + # should we build symmetric model + build_symmetric = parameters.get( 'build_symmetric' ,False) + + # should we build symmetric flipped model + build_symmetric_flip = parameters.get( 'build_symmetric_flip' ,False) + + # lookup table for renaming labels for more compact representation + build_remap = parameters.get( 'build_remap' ,{}) + + # lookup table for renaming labels for more compact representation, + # when building symmetrized library + build_flip_remap = parameters.get( 'build_flip_remap' ,{}) + + # lookup table for renaming labels for more compact representation, + # when building symmetrized library + build_unflip_remap = parameters.get( 'build_unflip_remap' ,{}) + + if not build_unflip_remap and build_flip_remap and build_remap: + build_unflip_remap = create_unflip_remap(build_remap,build_flip_remap) + + # label map + label_map = parameters.get( 'label_map' ,None) + + # perform filtering as final stage of the library creation + pre_filters = parameters.get( 'pre_filters' , None ) + post_filters = parameters.get( 'post_filters' , parameters.get( 'filters', None )) + + resample_order = parameters.get( 'resample_order',2) + label_resample_order = parameters.get( 'label_resample_order',resample_order) + + # use boundary anti-aliasing filter when resampling labels + resample_baa = parameters.get( 'resample_baa',True) + + # perform label warping to create final library + do_warp_labels = parameters.get( 'warp_labels',False) + + # extent bounding box to reduce boundary effects + extend_boundary = parameters.get( 'extend_boundary',4) + + # extend maks + #dilate_mask = parameters.get( 'dilate_mask',3) + op_mask = parameters.get( 'op_mask','E[2] D[4]') + + # if linear registration should be performed + # if linear registration should be performed + do_initial_register = parameters.get( 'initial_register', + parameters.get( 'linear_register', {})) + + if do_initial_register is not None and isinstance(do_initial_register,dict): + initial_register = do_initial_register + do_initial_register = True + else: + initial_register={} + + + inital_reg_type = parameters.get( 'initial_register_type', + parameters.get( 'linear_register_type', + initial_register.get('type','-lsq12'))) + + inital_reg_ants = parameters.get( 'initial_register_ants', + parameters.get( 'linear_register_ants', False)) + + inital_reg_options = parameters.get( 'initial_register_options', + initial_register.get('options',None) ) + + inital_reg_downsample = parameters.get( 'initial_register_downsample', + initial_register.get('downsample',None)) + + inital_reg_use_mask = parameters.get( 'initial_register_use_mask', + initial_register.get('use_mask',False)) + + initial_reg_objective = initial_register.get('objective','-xcorr') + + # perform local linear registration + do_initial_local_register = parameters.get( 'initial_local_register', + parameters.get( 'local_linear_register', {}) ) + if do_initial_local_register is not None and isinstance(do_initial_local_register,dict): + initial_local_register=do_initial_local_register + do_initial_local_register=True + else: + initial_local_register={} + + local_reg_type = parameters.get( 'local_register_type', + initial_local_register.get('type','-lsq12')) + + local_reg_ants = parameters.get( 'local_register_ants', False) + + local_reg_opts = parameters.get( 'local_register_options', + initial_local_register.get('options',None)) + + local_reg_bbox = parameters.get( 'local_register_bbox', + initial_local_register.get('bbox',False )) + + local_reg_downsample = parameters.get( 'local_register_downsample', + initial_local_register.get('downsample',None)) + + local_reg_use_mask = parameters.get( 'local_register_use_mask', + initial_local_register.get('use_mask',True)) + + local_reg_objective = initial_local_register.get('objective','-xcorr') + + # if non-linear registraiton should be performed for library creation + do_nonlinear_register = parameters.get( 'non_linear_register',False) + + # if non-linear registraiton should be performed with ANTS + do_nonlinear_register_ants= parameters.get( 'non_linear_register_ants',False) + nlreg_level = parameters.get('non_linear_register_level', 2) + nlreg_start = parameters.get('non_linear_register_start', 16) + nlreg_options = parameters.get('non_linear_register_options', None) + nlreg_downsample = parameters.get('non_linear_register_downsample', None) + + nonlinear_register_type = parameters.get( 'non_linear_register_type',None) + if nonlinear_register_type is None: + if do_nonlinear_register_ants: + nonlinear_register_type='ants' + + + + modalities = parameters.get( 'modalities',1 ) - 1 + + create_patch_norm_lib = parameters.get( 'create_patch_norm_lib',False) + patch_norm_lib_pct = parameters.get( 'patch_norm_lib_pct', 0.1 ) + patch_norm_lib_sub = parameters.get( 'patch_norm_lib_sub', 1 ) + patch_norm_lib_patch = parameters.get( 'patch_norm_lib_patch', 2 ) # 5x5x5 patches + + # prepare directories + if not os.path.exists(output): + os.makedirs(output) + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + # 0. go over input samples, prepare variables + input_samples=[] + filtered_samples=[] + lin_xfm=[] + lin_samples=[] + tmp_lin_samples=[] + bbox_lin_xfm=[] + #nl_xfm=[] + #bbox_samples=[] + + final_samples=[] + warped_samples=[] + final_transforms=[] + tmp_log_samples=[] + + patch_norm_db = output + os.sep + 'patch_norm.db' + patch_norm_idx = output + os.sep + 'patch_norm.idx' + + # identity xfm + identity_xfm=MriTransform(prefix=work_dir, name='identity' ) + with mincTools() as m: + m.param2xfm(identity_xfm.xfm) + m.param2xfm(identity_xfm.xfm_f) + + # check if library is list, if it is not, assume it's a reference to a csv file + if library is not list: + with open(library,'r') as f: + library=list(csv.reader(f)) + + # setup files + model = MriDataset(scan=reference_model, mask=reference_mask, add=reference_model_add) + + for (j,i) in enumerate(library): + scan=i[0] + seg=i[1] + add=i[2:modalities+2] # additional modalties + group=None + grading=None + + mask = work_dir + os.sep + 'fake_mask_' + os.path.basename(scan) + create_fake_mask(seg, mask) + + if len(i)>modalities+2: # assume that the extra columns is group and grading + group= int(i[modalities+2]) + grading=float(i[modalities+3]) + + sample= MriDataset(scan=scan, seg=seg, mask=mask,protect=True, add=add, group=group, grading=grading) + input_samples.append( sample ) + filtered_samples.append( MriDataset( prefix=work_dir, name='flt_'+sample.name, add_n=modalities, group=group, grading=grading ) ) + + lin_xfm.append( MriTransform(prefix=work_dir, name='lin_'+sample.name ) ) + bbox_lin_xfm.append( MriTransform(prefix=work_dir, name='lin_bbox_'+sample.name ) ) + lin_samples.append( MriDataset( prefix=work_dir, name='lin_'+sample.name, add_n=modalities, group=group, grading=grading ) ) + tmp_lin_samples.append( MriDataset( prefix=work_dir, name='tmp_lin_'+ sample.name, add_n=modalities, group=group, grading=grading ) ) + tmp_log_samples.append( MriDataset( prefix=work_dir, name='tmp_log_'+ sample.name, group=group, grading=grading ) ) + final_samples.append( MriDataset( prefix=output, name=sample.name, add_n=modalities, group=group, grading=grading ) ) + warped_samples.append( MriDataset( prefix=output, name='nl_'+sample.name, add_n=modalities, group=group, grading=grading ) ) + final_transforms.append( MriTransform(prefix=output, name='nl_'+sample.name ) ) + + # temp array + results=[] + + if pre_filters is not None: + # apply pre-filtering before other stages + filter_all=[] + + for (j,i) in enumerate(input_samples): + # a HACK? + filtered_samples[j].seg = input_samples[j].seg + filtered_samples[j].group = input_samples[j].group + filtered_samples[j].grading = input_samples[j].grading + filtered_samples[j].mask = input_samples[j].mask + + filter_all.append( futures.submit( + filter_sample, input_samples[j], filtered_samples[j], pre_filters, model=model + )) + + futures.wait(filter_all, return_when=futures.ALL_COMPLETED) + else: + filtered_samples=input_samples + + if build_symmetric: + # need to flip the inputs + flipdir=work_dir+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + flip_all=[] + + labels_datatype='short'# TODO: determine optimal here + #if largest_label>255:labels_datatype='short' + + for (j,i) in enumerate(filtered_samples): + i.scan_f=flipdir+os.sep+os.path.basename(i.scan) + i.add_f=[] + for (k,j) in enumerate(i.add): + i.add_f.append(flipdir+os.sep+os.path.basename(i.add[k])) + + if i.mask is not None: + i.mask_f=flipdir+os.sep+'mask_'+os.path.basename(i.scan) + + flip_all.append( futures.submit( generate_flip_sample, i, labels_datatype=labels_datatype ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + + # 1. run global linear registration if nedded + if do_initial_register: + for (j,i) in enumerate(filtered_samples): + if inital_reg_type=='elx' or inital_reg_type=='elastix' : + results.append( futures.submit( + elastix_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + parameters=inital_reg_options, + ) ) + elif inital_reg_type=='ants' or inital_reg_ants: + results.append( futures.submit( + linear_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + linreg=inital_reg_options, + ants=True + ) ) + else: + results.append( futures.submit( + linear_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + objective=initial_reg_objective + ) ) + # TODO: do we really need to wait for result here? + futures.wait( results, return_when=futures.ALL_COMPLETED ) + # TODO: determine if we need to resample input files here + lin_samples=input_samples + else: + lin_samples=input_samples + + # 2. for each part run linear registration, apply flip and do symmetric too + # 3. perform local linear registrtion and local intensity normalization if needed + # create a local reference model + local_model=None + local_model_ovl=None + local_model_avg=None + local_model_sd=None + + if reference_local_model is None : + local_model =MriDataset( prefix=output, name='local_model', add_n=modalities ) + local_model_ovl=MriDataset( prefix=output, name='local_model_ovl' ) + local_model_avg=MriDataset( prefix=output, name='local_model_avg', add_n=modalities ) + local_model_sd =MriDataset( prefix=output, name='local_model_sd', add_n=modalities ) + + if not os.path.exists( local_model.scan ): + for (j,i) in enumerate( filtered_samples ): + xfm=None + if do_initial_register: + xfm=lin_xfm[j] + + results.append( futures.submit( + warp_rename_seg, i, model, tmp_lin_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + lut=build_remap, + flip_lut=build_flip_remap, + resample_order=0, + resample_baa=False # This is quick and dirty part + ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + create_local_model(tmp_lin_samples, model, local_model, extend_boundary=extend_boundary, op=op_mask) + + if not os.path.exists(local_model.scan_f) and build_symmetric and build_symmetric_flip: + create_local_model_flip(local_model, model, remap=build_unflip_remap, op=op_mask) + else: + local_model=MriDataset(scan=reference_local_model, mask=reference_local_mask) + + local_model.scan_f=reference_local_model_flip + local_model.mask_f=reference_local_mask_flip + + if do_initial_local_register: + for (j,i) in enumerate(lin_samples): + init_xfm=None + if do_initial_register: + init_xfm=lin_xfm[j] + + if local_reg_type=='elx' or local_reg_type=='elastix' : + results.append( futures.submit( + elastix_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + parameters=local_reg_opts, + bbox=local_reg_bbox + ) ) + elif local_reg_type=='ants' or local_reg_ants: + results.append( futures.submit( + linear_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + close=True, + ants=True, + bbox=local_reg_bbox + ) ) + else: + if not do_initial_register: + init_xfm=identity_xfm # to avoid strange initialization errors + + results.append( futures.submit( + linear_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + close=True, + bbox=local_reg_bbox, + objective=local_reg_objective + ) ) + + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED ) + else: + bbox_lin_xfm=lin_xfm + + + # create bbox samples + results=[] + for (j, i) in enumerate(input_samples): + xfm=None + + if i.mask is None: + final_samples[j].mask=None + + if i.mask_f is None: + final_samples[j].mask_f=None + + if do_initial_local_register or do_initial_register: + xfm=bbox_lin_xfm[j] + # + results.append( futures.submit( + warp_rename_seg, i, local_model, final_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + lut=build_remap, + flip_lut=build_flip_remap, + resample_order=label_resample_order, + resample_baa=resample_baa + ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + results=[] + for (j, i) in enumerate(input_samples): + xfm=None + if do_initial_local_register or do_initial_register: + xfm=bbox_lin_xfm[j] + + results.append( futures.submit( + warp_sample, i, local_model, final_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + resample_order=resample_order, + filters=post_filters, + ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + if create_patch_norm_lib: + #for (j, i) in enumerate(final_samples): + # results.append( futures.submit( + # log_transform_sample, i , tmp_log_samples[j] ) ) + # + # futures.wait(results, return_when=futures.ALL_COMPLETED) + + create_patch_norm_db( final_samples, patch_norm_db, + patch_norm_idx, + pct=patch_norm_lib_pct, + sub=patch_norm_lib_sub, + patch=patch_norm_lib_patch) + results=[] + if do_nonlinear_register: + for (j, i) in enumerate(final_samples): + # TODO: decide what to do with mask + i.mask=None + + if nonlinear_register_type=='elx' or nonlinear_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + nl=True, + downsample=nlreg_downsample + ) ) + elif nonlinear_register_type=='ants' or do_nonlinear_register_ants: + results.append( futures.submit( + non_linear_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + ants=True, + downsample=nlreg_downsample + ) ) + else: + results.append( futures.submit( + non_linear_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + ants=False, + downsample=nlreg_downsample + ) ) + + final_samples[j].mask=None + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + + with mincTools() as m: + # a hack, to replace a rough model with a new one + if os.path.exists(local_model.seg): + os.unlink(local_model.seg) + + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + + segs.extend([ i.seg for i in warped_samples ]) + if build_symmetric: segs.extend([ i.seg_f for i in warped_samples ]) + + segs.extend(['--majority', local_model.seg, '--bg', '--overlap', local_model_ovl.scan ] ) + m.command(segs,inputs=[],outputs=[local_model.seg,local_model_ovl.scan]) + + avg=['mincaverage','-float'] + avg.extend([ i.scan for i in warped_samples ]) + if build_symmetric: avg.extend([ i.scan_f for i in warped_samples ]) + avg.extend([local_model_avg.scan, '-sdfile', local_model_sd.scan ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.scan,local_model_sd.scan]) + + for i in range(modalities): + avg=['mincaverage','-float'] + avg.extend([ j.add[i] for j in warped_samples ]) + if build_symmetric: avg.extend([ j.add_f[i] for j in warped_samples ]) + + avg.extend([local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.add[i],local_model_sd.add[i]]) + else: + with mincTools() as m: + # a hack, to replace a rough model with a new one + if os.path.exists(local_model.seg): + os.unlink(local_model.seg) + + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in final_samples ]) + + if build_symmetric: segs.extend([ i.seg_f for i in final_samples ]) + + segs.extend(['--majority', local_model.seg, '--bg','--overlap', local_model_ovl.scan] ) + m.command(segs,inputs=[],outputs=[local_model.seg,local_model_ovl.scan]) + + avg=['mincaverage','-float'] + avg.extend([ i.scan for i in final_samples ]) + if build_symmetric: avg.extend([ i.scan_f for i in final_samples ]) + avg.extend([local_model_avg.scan, '-sdfile', local_model_sd.scan ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.scan,local_model_sd.scan]) + + for i in range(modalities): + avg=['mincaverage','-float'] + avg.extend([ j.add[i] for j in final_samples ]) + if build_symmetric: avg.extend([ j.add_f[i] for j in final_samples ]) + avg.extend([local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.add[i],local_model_sd.add[i]]) + + # number of classes including bg + classes_number=2 + # 6. create training library description + with mincTools() as m: + classes_number=int(m.execute_w_output(['mincstats', '-q', '-max',local_model.seg ]).rstrip("\n"))+1 + + library_description={} + # library models + library_description['model'] = model.scan + library_description['model_mask'] = model.mask + library_description['model_add'] = model.add + + library_description['local_model'] = local_model.scan + library_description['local_model_add']= local_model.add + library_description['local_model_mask']=local_model.mask + library_description['local_model_seg']= local_model.seg + + # library parameters + library_description['map']=inv_dict(dict(build_remap)) + library_description['classes_number']= classes_number + library_description['nl_samples_avail']=do_nonlinear_register + library_description['modalities']=modalities+1 + library_description['groups']=train_groups + library_description['label_map'] = label_map + + if build_symmetric and build_symmetric_flip: + library_description['local_model_flip'] =local_model.scan_f + library_description['local_model_add_flip'] =local_model.add_f + library_description['local_model_mask_flip']=local_model.mask_f + library_description['local_model_seg_flip'] =local_model.seg_f + library_description['flip_map']=inv_dict(dict(build_flip_remap)) + else: + library_description['local_model_flip']=None + library_description['local_model_add_flip']=[] + library_description['local_model_mask_flip']=None + library_description['flip_map']={} + + library_description['library']=[] + + for (j, i) in enumerate(final_samples): + ss=[i.group,i.grading] + ss.extend([i.scan, i.seg ]) + ss.extend(i.add) + + if do_nonlinear_register: + ss.extend( [ final_transforms[j].xfm, final_transforms[j].xfm_inv, warped_samples[j].scan, warped_samples[j].seg ]) + + library_description['library'].append(ss) + + if build_symmetric: + ss=[i.group,i.grading] + ss.extend([i.scan_f, i.seg_f ]) + ss.extend(i.add_f) + + if do_nonlinear_register: + ss.extend( [ final_transforms[j].xfm_f, final_transforms[j].xfm_f_inv, warped_samples[j].scan_f, warped_samples[j].seg_f ]) + + library_description['library'].append(ss) + + save_library_info( library_description, output) + # cleanup + if cleanup: + shutil.rmtree(work_dir) + + except mincError as e: + print("Exception in generate_library:{}".format(str(e)),file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in generate_library:{}".format(sys.exc_info()[0]),file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/__init__.py b/ipl/lp/__init__.py new file mode 100644 index 0000000..ef7a225 --- /dev/null +++ b/ipl/lp/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/aqc.py b/ipl/lp/aqc.py new file mode 100644 index 0000000..7f5fe83 --- /dev/null +++ b/ipl/lp/aqc.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline resampling + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +#from ipl.minc_qc import qc,qc_field_contour + + + +def make_aqc_nu(t1w_field,aqc_nu,options={}): + pass + +def make_aqc_stx(t1w_tal,model_outline,aqc_tal,options={}): + with mincTools() as m: + m.aqc(t1w_tal.scan, aqc_tal.fname, + slices=options.get("slices",3)) + +def make_aqc_add(t1w_tal,add_tal,aqc,options={}): + pass + +def make_aqc_mask(t1w_tal,aqc_mask,options={}): + pass + +def make_aqc_cls(t1w_tal,tal_cls,aqc_cls,options={}): + pass + +def make_aqc_lobes( t1w_tal, tal_lob,aqc_lob,options={}): + pass \ No newline at end of file diff --git a/ipl/lp/iter_pipeline.py b/ipl/lp/iter_pipeline.py new file mode 100644 index 0000000..c34d164 --- /dev/null +++ b/ipl/lp/iter_pipeline.py @@ -0,0 +1,368 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# local stuff +from .structures import * +from .preprocess import * +from .utils import * +from .registration import * +from .resample import * +from .segment import * +from .qc import * + +def iter_step( t1w_scan, + iteration, + output_dir, + prev_iter={}, + options =None, + t2w_scan=None, + pdw_scan=None, + work_dir=None, + subject_id=None, + timepoint_id=None, + corr_t1w=None, + corr_t2w=None, + corr_pdw=None ): + """ + drop-in replacement for the standard pipeline + + Argumets: t1w_scan -- `MriScan` for T1w scan + iteration -- iteration number + output_dir -- string pointing to output directory + + Kyword arguments: + prev_iter -- information from previous iteration + options -- pipeline optins (dict) + t2w_scan -- T2w scan + pdw_scan -- PDw scan + work_dir -- string pointing to work directory , default None - use output_dir + subject_id -- ID of subject + timepoint_id -- ID of timepoint + """ + try: + print("running iter_step options={}".format(repr(options))) + + if options is None: + # TODO: load defaults from a settings file? + if iteration >0 : + options= { + 'model': 'mni_icbm152_t1_tal_nlin_sym_09c', + 'model_dir': '/opt/minc/share/icbm152_model_09c', + 't1w_nuc': {}, + 't2w_nuc': {}, + 'pdw_nuc': {}, + 't1w_stx': { + 'type':'ants', + 'resample':False, + #'options': { + #'levels': 3, + #'conf': {'3':1000,'2':1000,'1':1000}, + #'blur': {'3':8, '2':4, '1': 2 }, + #'shrink':{'3':8, '2':4, '1': 2 }, + #'convergence':'1.e-8,20', + #'cost_function':'MI', + #'cost_function_par':'1,32,random,0.3', + #'transformation':'similarity[ 0.3 ]', + #} + }, + 'stx': { + 'noscale':False, + }, + 'beast': { 'beastlib': '/opt/minc/share/beast-library-1.1' }, + 'tissue_classify': {}, + 'lobe_segment': {}, + 'nl': True, + 'lobes': True, + 'cls' : True, + 'qc': True, + 'denoise': {}, + + } + else: + options= { + 'model': 'mni_icbm152_t1_tal_nlin_sym_09c', + 'model_dir': '/opt/minc/share/icbm152_model_09c', + 't1w_nuc': {}, + 't2w_nuc': {}, + 'pdw_nuc': {}, + 't1w_stx': { + 'type':'ants', + 'resample':False, + #'options': { + #'levels': 2, + #'conf': {'2':1000,'1':1000}, + #'blur': {'2':4, '1': 2 }, + #'shrink':{'2':4, '1': 2 }, + #'convergence':'1.e-8,20', + #'cost_function':'MI', + #'cost_function_par':'1,32,random,0.3', + #'transformation':'similarity[ 0.3 ]', + #} + }, + 'stx': { + 'noscale':False, + }, + 'beast': { 'beastlib': '/opt/minc/share/beast-library-1.1' }, + 'tissue_classify': {}, + 'lobe_segment': {}, + 'nl': True, + 'lobes': True, + 'cls' : True, + 'qc': True, + 'denoise': {}, + + } + + dataset_id=subject_id + + if dataset_id is None: + dataset_id=t1w_scan.name + + if timepoint_id is not None: + dataset_id=dataset_id+'_'+timepoint_id + + # generate model reference + model_dir =options['model_dir'] + model_name=options['model'] + + model_t1w=MriScan(scan=model_dir+os.sep+options['model']+'.mnc', + mask=model_dir+os.sep+options['model']+'_mask.mnc') + + model_outline=MriScan(scan=model_dir+os.sep+options['model']+'_outline.mnc', + mask=None) + + lobe_atlas_dir=options.get('lobe_atlas_dir',None) + lobe_atlas_defs=options.get('lobe_atlas_defs',None) + + if lobe_atlas_dir is None: + lobe_atlas_dir=model_dir + os.sep + model_name + '_atlas'+os.sep + + if lobe_atlas_defs is None: + lobe_atlas_defs=model_dir + os.sep + model_name + '_atlas'+os.sep+'lobe_defs.csv' + if not os.path.exists(lobe_atlas_defs): + lobe_atlas_defs=None + + if work_dir is None: + work_dir=output_dir+os.sep+'work_'+dataset_id + + run_qc=options.get('qc',True) + run_nl=options.get('nl',True) + run_cls=options.get('cls',True) + run_lobes=options.get('lobes',True) + denoise_parameters=options.get('denoise',None) + create_unscaled=options.get('stx',{}).get('noscale',False) + + clp_dir=work_dir+os.sep+'clp' + tal_dir=work_dir+os.sep+'tal' + nl_dir =work_dir+os.sep+'nl' + cls_dir=work_dir+os.sep+'tal_cls' + qc_dir =work_dir+os.sep+'qc' + lob_dif=work_dir+os.sep+'lob' + vol_dir=work_dir+os.sep+'vol' + + # create all + create_dirs([clp_dir,tal_dir,nl_dir,cls_dir,qc_dir,lob_dif,vol_dir]) + + # files produced by pipeline + # native space + t1w_den=MriScan(prefix=clp_dir, name='den_'+dataset_id, modality='t1w', mask=None, iter=iteration) + t1w_field=MriScan(prefix=clp_dir,name='fld_'+dataset_id, modality='t1w', mask=None, iter=iteration) + t1w_nuc=MriScan(prefix=clp_dir, name='n4_' +dataset_id, modality='t1w', mask=None, iter=iteration) + t1w_clp=MriScan(prefix=clp_dir, name='clamp_'+dataset_id, modality='t1w', mask=None, iter=iteration) + # warp cls and mask back into native space + native_t1w_cls=MriScan(prefix=clp_dir, name='cls_'+dataset_id, modality='t1w', iter=iteration) + # stereotaxic space + t1w_tal_xfm=MriTransform(prefix=tal_dir,name='tal_xfm_'+dataset_id, iter=iteration) + t1w_tal_noscale_xfm=MriTransform(prefix=tal_dir,name='tal_noscale_xfm_'+dataset_id, iter=iteration) + unscale_xfm=MriTransform(prefix=tal_dir,name='unscale_xfm_'+dataset_id, iter=iteration) + + t1w_tal=MriScan(prefix=tal_dir, name='tal_'+dataset_id, modality='t1w', iter=iteration) + prev_t1w_xfm=None + t1w_tal_noscale=MriScan(prefix=tal_dir, name='tal_noscale_'+dataset_id,modality='t1w', iter=iteration) + + # tissue classification results + tal_cls=MriScan(prefix=cls_dir, name='cls_'+dataset_id, iter=iteration) + # lobe segmentation results + tal_lob=MriScan(prefix=lob_dif, name='lob_'+dataset_id, iter=iteration) + + # nl space + nl_xfm= MriTransform(prefix=nl_dir, name='nl_'+dataset_id, iter=iteration) + + # QC files + qc_tal= MriQCImage(prefix=qc_dir,name='tal_t1w_'+dataset_id, iter=iteration) + qc_mask=MriQCImage(prefix=qc_dir,name='tal_mask_'+dataset_id,iter=iteration) + qc_cls= MriQCImage(prefix=qc_dir,name='tal_cls_'+dataset_id, iter=iteration) + qc_lob= MriQCImage(prefix=qc_dir,name='tal_lob_'+dataset_id, iter=iteration) + qc_nu= MriQCImage(prefix=qc_dir,name='nu_'+dataset_id, iter=iteration) + + # AUX files + lob_volumes=MriAux(prefix=vol_dir,name='vol_'+dataset_id, iter=iteration) + lob_volumes_json=MriAux(prefix=vol_dir,name='vol_'+dataset_id,suffix='.json', iter=iteration) + summary_file=MriAux(prefix=work_dir,name='summary_'+dataset_id,suffix='.json', iter=iteration) + + print("Iteration step dataset:{} iteration:{}".format(dataset_id,iteration)) + + # actual processing steps + # 1. preprocessing + if prev_iter is not None: + t1w_scan.mask=prev_iter['native_t1w_cls'].mask + t1w_den.mask =prev_iter['native_t1w_cls'].mask + t1w_nuc.mask =prev_iter['native_t1w_cls'].mask + t1w_clp.mask =prev_iter['native_t1w_cls'].mask + prev_t1w_xfm =prev_iter['t1w_tal_xfm'] + print("Previous iteration:") + print(repr(prev_iter)) + + iter_summary={ + 'iter': iteration, + 'input_t1w': t1w_scan, + 'output_dir': output_dir, + 'dataset_id': dataset_id, + "t1w_field": t1w_field, + "t1w_nuc": t1w_nuc, + "t1w_clp": t1w_clp, + "t1w_tal_xfm": t1w_tal_xfm, + "t1w_tal_noscale_xfm":t1w_tal_noscale_xfm, + "t1w_tal": t1w_tal, + "t1w_tal_noscale":t1w_tal_noscale, + + "corr_t1w": corr_t1w, + "corr_t2w": corr_t2w, + "corr_pdw": corr_pdw, + } + + + if denoise_parameters is not None: + # reuse old denoising + if prev_iter is not None : + t1w_den=prev_iter.get('t1w_den',None) + t1w_den.mask=prev_iter['native_t1w_cls'].mask + else: + denoise(t1w_scan, t1w_den, parameters=denoise_parameters) + + iter_summary["t1w_den"]=t1w_den + + # non-uniformity correction + estimate_nu(t1w_den, t1w_field, + parameters=options.get('t1w_nuc',{})) + if run_qc: + draw_qc_nu(t1w_field,qc_nu) + iter_summary["qc_nu"]=qc_nu + + # apply field + apply_nu(t1w_den, t1w_field, t1w_nuc, + parameters=options.get('t1w_nuc',{})) + else: + # non-uniformity correction + estimate_nu(t1w_scan, t1w_field, + parameters=options.get('t1w_nuc',{})) + + if run_qc: + draw_qc_nu(t1w_field,qc_nu) + iter_summary["qc_nu"]=qc_nu + + # apply field + apply_nu(t1w_scan, t1w_field, t1w_nuc, + parameters=options.get('t1w_nuc',{})) + + # normalize intensity + normalize_intensity(t1w_nuc, t1w_clp, + parameters=options.get('t1w_clp',{}), + model=model_t1w) + # TODO coregister other modalities here? + + # register to STX space + lin_registration(t1w_clp, model_t1w, t1w_tal_xfm, + parameters=options.get('t1w_stx',{}), + init_xfm=prev_t1w_xfm, + corr_xfm=corr_t1w) + + warp_scan(t1w_clp,model_t1w,t1w_tal,transform=t1w_tal_xfm,corr_xfm=corr_t1w) + + if run_qc: + draw_qc_stx(t1w_tal,model_outline,qc_tal) + iter_summary["qc_tal"]=qc_tal + + # run beast to create brain mask + extract_brain_beast(t1w_tal,parameters=options.get('beast'),model=model_t1w) + if run_qc: + draw_qc_mask(t1w_tal,qc_mask) + iter_summary["qc_mask"]=qc_mask + + # create unscaled version + if create_unscaled: + xfm_remove_scale(t1w_tal_xfm, t1w_tal_noscale_xfm, unscale=unscale_xfm) + iter_summary["t1w_tal_noscale_xfm"]=t1w_tal_noscale_xfm + #warp scan to create unscaled version + warp_scan(t1w_clp,model_t1w,t1w_tal_noscale,transform=t1w_tal_noscale_xfm,corr_xfm=corr_t1w) + # warping mask from tal space to unscaled tal space + warp_mask(t1w_tal, model_t1w, t1w_tal_noscale, transform=unscale_xfm) + iter_summary["t1w_tal_noscale"]=t1w_tal_noscale + + # perform non-linear registration + if run_nl: + nl_registration(t1w_tal, model_t1w, nl_xfm, + parameters=options.get('nl_reg',{})) + iter_summary["nl_xfm"]=nl_xfm + + # run tissue classification + if run_nl and run_cls: + classify_tissue(t1w_tal, tal_cls, model_name=model_name, + model_dir=model_dir, xfm=nl_xfm, + parameters=options.get('tissue_classify',{})) + iter_summary["tal_cls"]=tal_cls + if run_qc: + draw_qc_cls(t1w_tal,tal_cls,qc_cls) + iter_summary["qc_cls"]=qc_cls + + warp_cls_back(t1w_tal, tal_cls, t1w_tal_xfm, t1w_nuc, native_t1w_cls,corr_xfm=corr_t1w) + iter_summary["native_t1w_cls"]=native_t1w_cls + + # run lobe segmentation + if run_nl and run_cls and run_lobes: + segment_lobes( tal_cls, nl_xfm, tal_lob, + model=model_t1w, + lobe_atlas_dir=lobe_atlas_dir, + parameters=options.get('lobe_segment',{})) + + iter_summary["tal_lob"]=tal_lob + if run_qc: + draw_qc_lobes( t1w_tal, tal_lob,qc_lob) + iter_summary["qc_lob"]=qc_lob + + # calculate volumes + extract_volumes(tal_lob, tal_cls, t1w_tal_xfm, lob_volumes, + subject_id=subject_id, timepoint_id=timepoint_id , lobedefs=lobe_atlas_defs) + + extract_volumes(tal_lob, tal_cls, t1w_tal_xfm, lob_volumes_json, + produce_json=True,subject_id=subject_id, timepoint_id=timepoint_id,lobedefs=lobe_atlas_defs) + + iter_summary["lob_volumes"]= lob_volumes + iter_summary["lob_volumes_json"]=lob_volumes_json + + save_summary(iter_summary,summary_file.fname) + return iter_summary + + except mincError as e: + print("Exception in iter_step:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in iter_step:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/pipeline.py b/ipl/lp/pipeline.py new file mode 100644 index 0000000..bb36d81 --- /dev/null +++ b/ipl/lp/pipeline.py @@ -0,0 +1,638 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError,temp_files + +# local stuff +from .structures import * +from .preprocess import * +from .utils import * +from .registration import * +from .resample import * +from .segment import * +from .qc import * +from .aqc import * + + +def standard_pipeline(info, + output_dir, + options =None, + work_dir=None): + """ + drop-in replacement for the standard pipeline + + Argumets: t1w_scan `MriScan` for T1w scan + output_dir string pointing to output directory + + Kyword arguments: + work_dir string pointing to work directory , default None - use output_dir + """ + try: + with temp_files() as tmp: + if options is None: + # TODO: load defaults from a settings file? + options= { + 'model': 'mni_icbm152_t1_tal_nlin_sym_09c', + 'model_dir': '/opt/minc/share/icbm152_model_09c', + + 't1w_nuc': {}, + 'add_nuc': {}, + + 't1w_clp': {}, + 'add_clp': {}, + + 't1w_stx': { + 'type':'ants', + 'resample':False, + #'options': { + #'levels': 2, + #'conf': {'2':1000,'1':1000}, + #'blur': {'2':4, '1': 2 }, + #'shrink':{'2':4, '1': 2 }, + #'convergence':'1.e-8,20', + #'cost_function':'MI', + #'cost_function_par':'1,32,random,0.3', + #'transformation':'similarity[ 0.3 ]', + #} + }, + + 'stx': { + 'noscale':False, + 'nuc': None, + }, + + 'beast': { 'beastlib': '/opt/minc/share/beast-library-1.1' }, + 'brain_nl_seg': None, + 'tissue_classify': {}, + 'lobe_segment': {}, + + 'nl': True, + 'lobes': True, + 'cls' : True, + + 'qc': { + 'nu': False, + 't1w_stx': True, + 'add_stx': True, + 'cls': True, + 'lob': True + }, + + 'aqc': { + 'nu': False, + 't1w_stx': False, + 'add_stx': False, + 'cls': False, + 'lob': False, + 'slices': 3 + }, + + 'denoise': {}, + } + + # setup parameters + subject_id = info['subject'] + timepoint_id = info.get('visit', None) + t1w_scan = info['t1w'] + add_scans = info.get('add', None) + init_t1w_lin_xfm = info.get('init_t1w_lin_xfm', None) + + + corr_t1w = info.get('corr_t1w', None) + corr_add = info.get('corr_add', None) + + dataset_id=subject_id + + if dataset_id is None: + dataset_id=t1w_scan.name + + if timepoint_id is not None: + dataset_id=dataset_id+'_'+timepoint_id + + model_name=None + model_dir=None + + #print(json.dumps(options,indent=2)) + + # generate model reference + if info.get('model_dir',None) is not None: + model_dir =info['model_dir'] + model_name=info['model'] + else: + model_dir =options['model_dir'] + model_name=options['model'] + + model_t1w=MriScan(scan=model_dir+os.sep+options['model']+'.mnc', + mask=model_dir+os.sep+options['model']+'_mask.mnc') + + model_outline=MriScan(scan=model_dir+os.sep+options['model']+'_outline.mnc', + mask=None) + + lobe_atlas_dir =options.get('lobe_atlas_dir',None) + lobe_atlas_defs=options.get('lobe_atlas_defs',None) + + if lobe_atlas_dir is None: + lobe_atlas_dir=model_dir + os.sep + model_name + '_atlas'+os.sep + + if lobe_atlas_defs is None: + lobe_atlas_defs=model_dir + os.sep + model_name + '_atlas'+os.sep+'lobe_defs.csv' + if not os.path.exists(lobe_atlas_defs): + lobe_atlas_defs=None + + if work_dir is None: + work_dir=output_dir+os.sep+'work_'+dataset_id + + run_qc = options.get('qc',{}) + run_aqc = options.get('aqc',{}) + run_nl = options.get('nl',True) + run_cls = options.get('cls',True) + run_lobes = options.get('lobes',True) + + if isinstance(run_qc, bool): # fix for old version of options + run_qc={} + if isinstance(run_aqc, bool): # fix for old version of options + run_aqc={} + + denoise_parameters = options.get('denoise',None) + nuc_parameters = options.get('t1w_nuc',{}) + clp_parameters = options.get('t1w_clp',{}) + stx_parameters = options.get('t1w_stx',{}) + + create_unscaled = stx_parameters.get('noscale',False) + stx_nuc = stx_parameters.get('nuc',None) + stx_disable = stx_parameters.get('disable',False) + + clp_dir = work_dir+os.sep+'clp' + tal_dir = work_dir+os.sep+'tal' + nl_dir = work_dir+os.sep+'nl' + cls_dir = work_dir+os.sep+'tal_cls' + qc_dir = work_dir+os.sep+'qc' + aqc_dir = work_dir+os.sep+'aqc' + lob_dif = work_dir+os.sep+'lob' + vol_dir = work_dir+os.sep+'vol' + + # create all + create_dirs([clp_dir,tal_dir,nl_dir,cls_dir,qc_dir,aqc_dir,lob_dif,vol_dir]) + + # files produced by pipeline + # native space + t1w_den=MriScan(prefix=clp_dir, name='den_'+dataset_id, modality='t1w', mask=None) + t1w_field=MriScan(prefix=clp_dir,name='fld_'+dataset_id, modality='t1w', mask=None) + t1w_nuc=MriScan(prefix=clp_dir, name='n4_'+dataset_id, modality='t1w', mask=None) + t1w_clp=MriScan(prefix=clp_dir, name='clamp_'+dataset_id, modality='t1w', mask=None) + + # stereotaxic space + t1w_tal_xfm=MriTransform(prefix=tal_dir,name='tal_xfm_'+dataset_id) + t1w_tal_noscale_xfm=MriTransform(prefix=tal_dir,name='tal_noscale_xfm_'+dataset_id) + unscale_xfm=MriTransform(prefix=tal_dir,name='unscale_xfm_'+dataset_id) + + t1w_tal=MriScan(prefix=tal_dir, name='tal_'+dataset_id, modality='t1w') + t1w_tal_fld=MriScan(prefix=tal_dir, name='tal_fld_'+dataset_id, modality='t1w') + + t1w_tal_noscale=MriScan(prefix=tal_dir, name='tal_noscale_'+dataset_id,modality='t1w') + + t1w_tal_par=MriAux(prefix=tal_dir,name='tal_par_t1w_'+dataset_id) + t1w_tal_log=MriAux(prefix=tal_dir,name='tal_log_t1w_'+dataset_id) + + # tissue classification results + tal_cls=MriScan(prefix=cls_dir, name='cls_'+dataset_id) + native_t1w_cls=MriScan(prefix=clp_dir, name='cls_'+dataset_id, modality='t1w') + # lobe segmentation results + tal_lob=MriScan(prefix=lob_dif, name='lob_'+dataset_id) + + # nl space + nl_xfm=MriTransform(prefix=nl_dir, name='nl_'+dataset_id) + + # QC files + qc_tal= MriQCImage(prefix=qc_dir,name='tal_t1w_'+dataset_id) + qc_mask=MriQCImage(prefix=qc_dir,name='tal_mask_'+dataset_id) + qc_cls= MriQCImage(prefix=qc_dir,name='tal_cls_'+dataset_id) + qc_lob= MriQCImage(prefix=qc_dir,name='tal_lob_'+dataset_id) + qc_nu= MriQCImage(prefix=qc_dir,name='nu_'+dataset_id) + + # QC files + aqc_tal= MriQCImage(prefix=aqc_dir,name='tal_t1w_'+dataset_id,suffix='') + aqc_mask=MriQCImage(prefix=aqc_dir,name='tal_mask_'+dataset_id,suffix='') + aqc_cls= MriQCImage(prefix=aqc_dir,name='tal_cls_'+dataset_id,suffix='') + aqc_lob= MriQCImage(prefix=aqc_dir,name='tal_lob_'+dataset_id,suffix='') + aqc_nu= MriQCImage(prefix=aqc_dir,name='nu_'+dataset_id,suffix='') + + # AUX files + lob_volumes=MriAux(prefix=vol_dir,name='vol_'+dataset_id) + lob_volumes_json=MriAux(prefix=vol_dir,name='vol_'+dataset_id,suffix='.json') + summary_file=MriAux(prefix=work_dir,name='summary_'+dataset_id,suffix='.json') + + + + iter_summary={ + 'subject': subject_id, + 'timepoint': timepoint_id, + 'dataset_id': dataset_id, + + 'input_t1w': t1w_scan, + 'input_add': add_scans, + + 'output_dir': output_dir, + + "t1w_field": t1w_field, + "t1w_nuc": t1w_nuc, + "t1w_clp": t1w_clp, + + "t1w_tal_xfm": t1w_tal_xfm, + "t1w_tal": t1w_tal, + "t1w_tal_noscale":t1w_tal_noscale, + + "corr_t1w": corr_t1w, + "corr_add": corr_add + } + + # actual processing steps + # 1. preprocessing + if denoise_parameters is not None: + denoise(t1w_scan, t1w_den, parameters=denoise_parameters) + t1w_den.mask=t1w_scan.mask + else: + t1w_den=t1w_scan + + iter_summary["t1w_den"]=t1w_den + + if nuc_parameters is not None: + # non-uniformity correction + print("Running N4") + + estimate_nu(t1w_den, t1w_field, + parameters=nuc_parameters, + model=model_t1w) + if run_qc is not None and run_qc.get('nu',False): + draw_qc_nu(t1w_field,qc_nu,options=run_qc) + iter_summary["qc_nu"]=qc_nu + if run_aqc is not None and run_aqc.get('nu',False): + make_aqc_nu(t1w_field,aqc_nu,options=run_aqc) + iter_summary["aqc_nu"]=aqc_nu + + # apply field + apply_nu(t1w_den, t1w_field, t1w_nuc, + parameters=nuc_parameters) + t1w_nuc.mask=t1w_den.mask + else: + t1w_nuc=t1w_den + t1w_field=None + + iter_summary["t1w_field"] = t1w_field + iter_summary["t1w_nuc"] = t1w_nuc + + ################ + # normalize intensity + + if clp_parameters is not None: + normalize_intensity(t1w_nuc, t1w_clp, + parameters=options.get('t1w_clp',{}), + model=model_t1w) + t1w_clp.mask=t1w_nuc.mask + else: + t1w_clp=t1w_nuc + + iter_summary["t1w_clp"] = t1w_clp + + #### + if add_scans is not None: + iter_summary["add_den"] = [] + iter_summary["add_field"] = [] + iter_summary["add_nuc"] = [] + iter_summary["add_clp"] = [] + iter_summary["add_xfm"] = [] + + prev_co_xfm=None + + for i,c in enumerate(add_scans): + # get add options + #TODO do it per modality + add_options = options.get('add',options) + + add_denoise_parameters = add_options.get('denoise',denoise_parameters) + add_nuc_parameters = add_options.get('nuc' ,nuc_parameters) + add_clp_parameters = add_options.get('clp' ,clp_parameters) + add_stx_parameters = add_options.get('stx' ,stx_parameters) + add_model_dir = add_options.get('model_dir',model_dir) + add_model_name = add_options.get('model' ,model_name) + + add_denoise_parameters = add_options.get('{}_denoise'.format(c.modality),add_denoise_parameters) + add_nuc_parameters = add_options.get('{}_nuc' .format(c.modality),add_nuc_parameters) + add_clp_parameters = add_options.get('{}_clp' .format(c.modality),add_clp_parameters) + add_stx_parameters = add_options.get('{}_stx' .format(c.modality),add_stx_parameters) + add_model_dir = add_options.get('{}_model_dir'.format(c.modality),add_model_dir) + add_model_name = add_options.get('{}_model' .format(c.modality),add_model_name) + + add_model = MriScan(scan=add_model_dir+os.sep+add_model_name+'.mnc', + mask=model_t1w.mask) + + den = MriScan(prefix=clp_dir, name='den_' +dataset_id, modality=c.modality, mask=None) + field = MriScan(prefix=clp_dir, name='fld_' +dataset_id, modality=c.modality, mask=None) + nuc = MriScan(prefix=clp_dir, name='n4_' +dataset_id, modality=c.modality, mask=None) + clp = MriScan(prefix=clp_dir, name='clamp_'+dataset_id, modality=c.modality, mask=None) + + add_qc_nu = MriQCImage(prefix=qc_dir, name='nu_' + c.modality+'_' + dataset_id) + add_aqc_nu= MriQCImage(prefix=aqc_dir, name='nu_' + c.modality+'_' + dataset_id) + co_xfm= MriTransform(prefix=clp_dir, name='xfm_'+ c.modality+'_' + dataset_id) + + co_par=MriAux(prefix=clp_dir, name='xfm_par_'+ c.modality+'_'+dataset_id) + co_log=MriAux(prefix=clp_dir, name='xfm_log_'+ c.modality+'_'+dataset_id) + + corr_xfm=None + if corr_add is not None: + corr_xfm=corr_add[i] + + # denoising + if add_denoise_parameters is not None: + denoise(c, den, parameters=add_denoise_parameters) + iter_summary["add_den"].append(den) + den.mask=c.mask # maybe transfer mask from t1w ? + else: + den=c + + # non-uniformity correction + if add_nuc_parameters is not None: + estimate_nu(den, field, parameters=add_nuc_parameters,model=add_model) + if run_qc is not None and run_qc.get('nu',False): + draw_qc_nu(field,add_qc_nu,options=run_qc) + iter_summary["qc_nu_"+c.modality]=add_qc_nu + if run_aqc is not None and run_aqc.get('nu',False): + make_aqc_nu(field,add_aqc_nu,options=run_aqc) + iter_summary["aqc_nu_"+c.modality]=add_aqc_nu + # apply field + apply_nu(den, field, nuc, parameters=add_nuc_parameters) + nuc.mask=den.mask + else: + nuc=den + + # + iter_summary["add_field"].append(field) + iter_summary["add_nuc"].append(nuc) + + if clp_parameters is not None: + normalize_intensity(nuc, clp, + parameters=clp_parameters, + model=add_model) + clp.mask=nuc.mask + else: + clp=nuc + + iter_summary["add_clp"].append(clp) + + # co-registering to T1w + if add_stx_parameters.get('independent',False) or (prev_co_xfm is None): + # run co-registration unless another one can be used + intermodality_co_registration(clp, t1w_clp, co_xfm, + parameters=add_stx_parameters, + corr_xfm=corr_xfm, + corr_ref=corr_t1w, + par=co_par, + log=co_log) + prev_co_xfm=co_xfm + else: + co_xfm=prev_co_xfm + + iter_summary["add_xfm"].append(co_xfm) + + if not stx_disable: + # register to STX space + lin_registration(t1w_clp, model_t1w, t1w_tal_xfm, + parameters=stx_parameters, + corr_xfm=corr_t1w, + par=t1w_tal_par, + log=t1w_tal_log, + init_xfm=init_t1w_lin_xfm) + + if stx_nuc is not None: + tmp_t1w=MriScan(prefix=tmp.tempdir, name='tal_'+dataset_id, modality='t1w') + tmp_t1w_n4=MriScan(prefix=tmp.tempdir, name='tal_n4_'+dataset_id, modality='t1w') + + warp_scan(t1w_clp ,model_t1w, tmp_t1w, + transform=t1w_tal_xfm, + corr_xfm=corr_t1w, + parameters=stx_parameters) + tmp_t1w.mask=None + tmp_t1w_n4.mask=None + + estimate_nu(tmp_t1w, t1w_tal_fld, + parameters=stx_nuc) + + apply_nu(tmp_t1w, t1w_tal_fld, tmp_t1w_n4, + parameters=stx_nuc) + + #TODO: maybe apply region-based intensity normalization here? + normalize_intensity(tmp_t1w_n4, t1w_tal, + parameters=options.get('t1w_clp',{}), + model=model_t1w) + + iter_summary['t1w_tal_fld']=t1w_tal_fld + + else: + warp_scan(t1w_clp,model_t1w, t1w_tal, + transform=t1w_tal_xfm, + corr_xfm=corr_t1w, + parameters=options.get('t1w_stx',{})) + + + if add_scans is not None: + iter_summary["add_stx_xfm"] = [] + iter_summary["add_tal_fld"] = [] + iter_summary["add_tal"] = [] + + for i,c in enumerate(add_scans): + add_stx_parameters = add_options.get('stx' ,stx_parameters) + add_clp_parameters = add_options.get('clp' ,clp_parameters) + add_model_dir = add_options.get('model_dir',model_dir) + add_model_name = add_options.get('model' ,model_name) + + add_stx_parameters = add_options.get('{}_stx' .format(c.modality),add_stx_parameters) + add_clp_parameters = add_options.get('{}_clp' .format(c.modality),add_clp_parameters) + add_model_dir = add_options.get('{}_model_dir'.format(c.modality),add_model_dir) + add_model_name = add_options.get('{}_model' .format(c.modality),add_model_name) + + add_model=MriScan(scan=add_model_dir+os.sep+add_model_name+'.mnc', + mask=model_t1w.mask) + + add_stx_nuc = add_stx_parameters.get('nuc',None) + + + stx_xfm=MriTransform(prefix=tal_dir, name='xfm_'+c.modality+'_'+dataset_id) + + clp=iter_summary["add_clp"][i] + xfm=iter_summary["add_xfm"][i] + tal_fld=MriScan(prefix=tal_dir, name='tal_fld_'+dataset_id, modality=c.modality) + tal=MriScan(prefix=tal_dir, name='tal_'+dataset_id, modality=c.modality) + + xfm_concat( [xfm,t1w_tal_xfm], stx_xfm ) + iter_summary["add_stx_xfm"].append(stx_xfm) + + corr_xfm=None + if corr_add is not None: + corr_xfm=corr_add[i] + + if add_stx_nuc is not None: + tmp_=MriScan(prefix=tmp.tempdir, name='tal_'+dataset_id, modality=c.modality) + tmp_n4=MriScan(prefix=tmp.tempdir, name='tal_n4_'+dataset_id, modality=c.modality) + + warp_scan(clp ,model_t1w, tmp_, + transform=stx_xfm, + corr_xfm=corr_xfm, + parameters=add_stx_parameters) + + tmp_.mask=None + tmp_n4.mask=None + + estimate_nu(tmp_, tal_fld, + parameters=stx_nuc) + + apply_nu(tmp_, tal_fld, tmp_n4, parameters=stx_nuc) + + #TODO: maybe apply region-based intensity normalization here? + normalize_intensity(tmp_n4, tal, + parameters=add_clp_parameters, + model=add_model) + + iter_summary["add_tal_fld"].append(tal_fld) + + else: + warp_scan(clp,model_t1w, tal, + transform=stx_xfm, + corr_xfm=corr_xfm, + parameters=add_stx_parameters) + + iter_summary["add_tal"].append(tal) + + if run_qc is not None and run_qc.get('t1w_stx',True): + draw_qc_stx(t1w_tal,model_outline,qc_tal,options=run_qc) + iter_summary["qc_tal"]=qc_tal + + if add_scans is not None: + iter_summary["qc_add"]=[] + for i,c in enumerate(add_scans): + qc=MriQCImage(prefix=qc_dir,name='tal_'+c.modality+'_'+dataset_id) + if run_qc is not None and run_qc.get('add_stx',True): + draw_qc_add(t1w_tal,iter_summary["add_tal"][i],qc,options=run_qc) + iter_summary["qc_add"].append(qc) + + if run_aqc is not None and run_aqc.get('t1w_stx',True): + make_aqc_stx(t1w_tal,model_outline,aqc_tal,options=run_aqc) + iter_summary["aqc_tal"]=aqc_tal + + if add_scans is not None: + iter_summary["aqc_add"]=[] + for i,c in enumerate(add_scans): + aqc=MriQCImage(prefix=aqc_dir,name='tal_'+c.modality+'_'+dataset_id) + if run_aqc is not None and run_aqc.get('add_stx',True): + make_aqc_add(t1w_tal,iter_summary["add_tal"][i],aqc,options=run_aqc) + iter_summary["aqc_add"].append(aqc) + + # run beast to create brain mask + beast_parameters=options.get('beast',None) + if beast_parameters is not None: + extract_brain_beast(t1w_tal,parameters=beast_parameters,model=model_t1w) + if run_qc is not None and run_qc.get('beast',True): + draw_qc_mask(t1w_tal,qc_mask,options=run_qc) + iter_summary["qc_mask"]=qc_mask + if run_aqc is not None and run_aqc.get('beast',True): + make_aqc_mask(t1w_tal,aqc_mask,options=run_aqc) + iter_summary["aqc_mask"]=aqc_mask + + else: + #extract_brain_nlreg(t1w_tal,parameters=options.get('brain_nl_seg',{}),model=model_t1w) + # if we have initial mask, keep using that! + if t1w_clp.mask is not None: + warp_mask(t1w_clp,model_t1w, t1w_tal, + transform=t1w_tal_xfm, + corr_xfm=corr_t1w, + parameters=options.get('t1w_stx',{})) + t1w_tal.mask=None + pass + + + # create unscaled version + if create_unscaled: + xfm_remove_scale(t1w_tal_xfm, t1w_tal_noscale_xfm, unscale=unscale_xfm) + iter_summary["t1w_tal_noscale_xfm"]=t1w_tal_noscale_xfm + #warp scan to create unscaled version + warp_scan(t1w_clp, model_t1w, t1w_tal_noscale, transform=t1w_tal_noscale_xfm, corr_xfm=corr_t1w) + # warping mask from tal space to unscaled tal space + warp_mask(t1w_tal, model_t1w, t1w_tal_noscale, transform=unscale_xfm) + iter_summary["t1w_tal_noscale"]=t1w_tal_noscale + + # perform non-linear registration + if run_nl: + nl_registration(t1w_tal, model_t1w, nl_xfm, + parameters=options.get('nl_reg',{})) + iter_summary["nl_xfm"]=nl_xfm + + # run tissue classification + if run_nl and run_cls: + classify_tissue(t1w_tal, tal_cls, model_name=model_name, + model_dir=model_dir, xfm=nl_xfm, + parameters=options.get('tissue_classify',{})) + + warp_cls_back (t1w_tal, tal_cls, t1w_tal_xfm, t1w_nuc, native_t1w_cls,corr_xfm=corr_t1w) + warp_mask_back(t1w_tal, t1w_tal_xfm, t1w_nuc, native_t1w_cls,corr_xfm=corr_t1w) + iter_summary["native_t1w_cls"]=native_t1w_cls + iter_summary["tal_cls"]=tal_cls + if run_qc is not None and run_qc.get('cls',True): + draw_qc_cls(t1w_tal,tal_cls,qc_cls,options=run_qc) + if run_aqc is not None and run_aqc.get('cls',True): + make_aqc_cls(t1w_tal,tal_cls,aqc_cls,options=run_aqc) + else: + # just warp mask back + if beast_parameters is not None: + warp_mask_back(t1w_tal, t1w_tal_xfm, t1w_nuc, native_t1w_cls,corr_xfm=corr_t1w) + native_t1w_cls.scan=None + iter_summary["tal_cls"]=tal_cls + + + # run lobe segmentation + if run_nl and run_cls and run_lobes: + segment_lobes( tal_cls, nl_xfm, tal_lob, + model=model_t1w, + lobe_atlas_dir=lobe_atlas_dir, + parameters=options.get('lobe_segment',{})) + iter_summary["tal_lob"]=tal_lob + + if run_qc is not None and run_qc.get('lob',True): + draw_qc_lobes( t1w_tal, tal_lob,qc_lob,options=run_qc) + iter_summary["qc_lob"]=qc_lob + if run_aqc is not None and run_aqc.get('lob',True): + make_aqc_lobes( t1w_tal, tal_lob,aqc_lob,options=run_aqc) + iter_summary["aqc_lob"]=aqc_lob + + # calculate volumes + extract_volumes(tal_lob, tal_cls, t1w_tal_xfm, lob_volumes, + subject_id=subject_id, timepoint_id=timepoint_id , lobedefs=lobe_atlas_defs) + + extract_volumes(tal_lob, tal_cls, t1w_tal_xfm, lob_volumes_json, + produce_json=True,subject_id=subject_id, timepoint_id=timepoint_id,lobedefs=lobe_atlas_defs) + + iter_summary["lob_volumes"]= lob_volumes + iter_summary["lob_volumes_json"]=lob_volumes_json + + save_summary(iter_summary,summary_file.fname) + return iter_summary + + except mincError as e: + print("Exception in iter_step:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in iter_step:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/preprocess.py b/ipl/lp/preprocess.py new file mode 100644 index 0000000..b31edb6 --- /dev/null +++ b/ipl/lp/preprocess.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + +def fix_spacing(scan): + """make sure all spacing in 3D volume are regular + + Arguments: `scan` scan to be fixed + """ + with mincTools() as minc: + for s in ['xspace', 'yspace', 'zspace']: + spacing = minc.query_attribute( scan, s + ':spacing' ) + + if spacing.count( 'irregular' ): + minc.set_attribute( scan, s + ':spacing', 'regular__' ) + return scan + +def denoise(in_scan, out_scan, parameters={}): + """Apply patch-based denoising + + Arguments: in `MriScan` input + out `MriScan` output + parameters `dict` of parameters + """ + use_anlm=parameters.get('anlm', False ) + denoise_beta=parameters.get('beta', 0.7 ) + patch=parameters.get('patch', 2 ) + search=parameters.get('search', 2 ) + regularize=parameters.get('regularize', None ) + with mincTools() as minc: + if use_anlm: + minc.anlm( in_scan.scan, out_scan.scan, beta=denoise_beta, patch=patch, search=search ) + else: + minc.nlm( in_scan.scan, out_scan.scan, beta=denoise_beta, patch=patch, search=search ) + # TODO: maybe USE anlm sometimes? + + +def estimate_nu(in_scan, out_field, parameters={},model=None): + """Estimate non-uniformity correction field + + Arguments: in `MriScan` input + out_field `MriScan` output + parameters `dict` of parameters + """ + with mincTools() as minc: + # + #print("Running N4, parameters={}".format(repr(parameters))) + #traceback.print_stack() + weight_mask=None + init_xfm=None # TODO: maybe add as a parameter, in case manual registration was done? + if in_scan.mask is not None and os.path.exists(in_scan.mask): + weight_mask=in_scan.mask + else: + #TODO: maybe use some kind of threshold here instead of built-in? + pass + + if not minc.checkfiles(inputs=[in_scan.scan], outputs=[out_field.scan]): + return + + if parameters.get('disable',False): + minc.calc([in_scan.scan],'1.0',out_field.scan,datatype='-float') + else: + if parameters.get('use_stx_mask',False) and model is not None: + # method from Gabriel + minc.winsorize_intensity(in_scan.scan,minc.tmp('trunc_t1.mnc')) + minc.binary_morphology(minc.tmp('trunc_t1.mnc'),'',minc.tmp('otsu_t1.mnc'),binarize_bimodal=True) + minc.defrag(minc.tmp('otsu_t1.mnc'),minc.tmp('otsu_defrag_t1.mnc')) + minc.autocrop(minc.tmp('otsu_defrag_t1.mnc'),minc.tmp('otsu_defrag_expanded_t1.mnc'),isoexpand='50mm') + minc.binary_morphology(minc.tmp('otsu_defrag_expanded_t1.mnc'),'D[25] E[25]',minc.tmp('otsu_expanded_closed_t1.mnc')) + minc.resample_labels(minc.tmp('otsu_expanded_closed_t1.mnc'),minc.tmp('otsu_closed_t1.mnc'),like=minc.tmp('trunc_t1.mnc')) + + minc.calc([minc.tmp('trunc_t1.mnc'),minc.tmp('otsu_closed_t1.mnc')], 'A[0]*A[1]', minc.tmp('trunc_masked_t1.mnc')) + minc.calc([in_scan.scan,minc.tmp('otsu_closed_t1.mnc')],'A[0]*A[1]' ,minc.tmp('masked_t1.mnc')) + + minc.linear_register( minc.tmp('trunc_masked_t1.mnc'), model.scan, minc.tmp('stx.xfm'), + init_xfm=init_xfm, objective='-nmi',conf='bestlinreg_new') + + minc.resample_labels( model.mask, minc.tmp('brainmask_t1.mnc'), + transform=minc.tmp('stx.xfm'), invert_transform=True, + like=minc.tmp('otsu_defrag_t1.mnc') ) + + minc.calc([minc.tmp('otsu_defrag_t1.mnc'),minc.tmp('brainmask_t1.mnc')],'A[0]*A[1]',minc.tmp('weightmask_t1.mnc')) + + minc.n4(minc.tmp('masked_t1.mnc'), + output_field=out_field.scan, + shrink=parameters.get('shrink',4), + iter=parameters.get('iter','200x200x200x200'), + weight_mask=minc.tmp('weightmask_t1.mnc'), + mask=minc.tmp('otsu_closed_t1.mnc'), + distance=parameters.get('distance',200), + datatype=parameters.get('datatype',None) + ) + else: + minc.n4(in_scan.scan, + output_field=out_field.scan, + weight_mask=weight_mask, + shrink=parameters.get('shrink',4), + datatype=parameters.get('datatype',None), + iter=parameters.get('iter','200x200x200'), + distance=parameters.get('distance',200)) + +def apply_nu(in_scan, field, out_scan, parameters={}): + """ Apply non-uniformity correction + """ + with mincTools() as minc: + if not minc.checkfiles(inputs=[field.scan],outputs=[out_scan.scan]): + return + minc.resample_smooth(field.scan,minc.tmp('fld.mnc'),like=in_scan.scan,order=1) + minc.calc([in_scan.scan,minc.tmp('fld.mnc')], + 'A[0]/A[1]', out_scan.scan) + + +def normalize_intensity(in_scan, out_scan, + parameters={}, + model=None): + """ Perform global intensity scale normalization + """ + # TODO: make output exp file + with mincTools() as minc: + + if not minc.checkfiles(inputs=[in_scan.scan],outputs=[out_scan.scan]): + return + + order = parameters.get('order',1) + _model=None + + # + if model is None: + _model = parameters.get('model',None) + else: + _model = model.scan + + if _model is None: + raise mincError('Need model ') + + scan_mask = None + model_mask = None + + if in_scan.mask is not None and model is not None: + scan_mask = in_scan.mask + model_mask = model.mask + + if parameters.get('disable',False): + # just bypass + shutil.copyfile(in_scan.scan,out_scan.scan) + elif parameters.get('nuyl',False): + minc.nuyl_normalize(in_scan.scan,_model,out_scan.scan, + source_mask=scan_mask, + target_mask=model_mask) + elif parameters.get('nuyl2',False): + hl.nuyl_normalize2( in_scan.scan,_model,out_scan.scan, + #source_mask=input_mask,target_mask=model_mask, + fwhm=parameters.get('nuyl2_fwhm',2.0), + iterations=parameters.get('nuyl2_iter',4), + ) + else: + minc.volume_pol(in_scan.scan, _model, out_scan.scan, + order=order, + source_mask=scan_mask, + target_mask=model_mask) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/qc.py b/ipl/lp/qc.py new file mode 100644 index 0000000..4af72ac --- /dev/null +++ b/ipl/lp/qc.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline resampling + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +from ipl.minc_qc import qc,qc_field_contour + + +def draw_qc_stx(in_scan,in_outline,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan.scan,out_qc.fname, + big=True,mask=in_outline.scan, + mask_range=[0.0,1.0]) + else: + qc(in_scan.scan,out_qc.fname, + mask=in_outline.scan, + mask_range=[0.0,1.0], + mask_bg=0.5, use_max=True) + + +def draw_qc_mask(in_scan,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan.scan,out_qc.fname, + big=True,mask=in_scan.mask, + mask_range=[0.0,1.0]) + else: + qc(in_scan.scan,out_qc.fname, + mask=in_scan.mask, + mask_range=[0.0,1.0], + mask_bg=0.5, use_max=True) + +def draw_qc_cls(in_scan,in_cls,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan.scan,out_qc.fname, + big=True,mask=in_cls.scan, + mask_range=[0.0,3.5], + spectral_mask=True) + else: + qc(in_scan.scan,out_qc.fname, + mask=in_cls.scan, + mask_range=[0.0,3.5], + mask_cmap='spectral', + mask_bg=0.5, use_max=True) + + +def draw_qc_lobes(in_scan,in_lobes,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan.scan,out_qc.fname, + big=True,mask=in_lobes.scan, + spectral_mask=True) + else: + qc(in_scan.scan,out_qc.fname, + mask=in_lobes.scan, + mask_cmap='spectral', + mask_bg=0.5, use_max=True) + + +def draw_qc_add(in_scan1,in_scan2,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan1.scan,out_qc.fname, + big=True,red=True, + mask=in_scan2.scan, + green_mask=True) + else: + qc(in_scan1.scan,out_qc.fname, + mask=in_scan2.scan, + image_cmap='red', + mask_cmap='green', + mask_bg=0.5, use_max=True) + +def draw_qc_nu(in_field,out_qc,options={}): + qc_field_contour(in_field.scan,out_qc.fname, + image_cmap='jet') + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/lp/registration.py b/ipl/lp/registration.py new file mode 100644 index 0000000..3b191e7 --- /dev/null +++ b/ipl/lp/registration.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline registration + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +import ipl.registration +import ipl.ants_registration +import ipl.elastix_registration + + +def lin_registration(scan, model, out_xfm, init_xfm=None, parameters={},corr_xfm=None,par=None, log=None): + """Perform linear registration + + """ + with mincTools() as m: + + if not m.checkfiles(inputs=[scan.scan,model.scan],outputs=[out_xfm.xfm]): + return + + use_inverse = parameters.get('inverse', False) + lin_mode = parameters.get('type', 'ants') + options = parameters.get('options', None) + downsample = parameters.get('downsample',None) + close = parameters.get('close', False) + resample = parameters.get('resample', False) + objective = parameters.get('objective','-xcorr') + use_model_mask = parameters.get('use_model_mask',False) + + print("Running lin_registration with parameters:{}".format(repr(parameters))) + + _init_xfm=None + _in_scan = scan.scan + _in_mask = scan.mask + + _in_model = model.scan + _in_model_mask = model.mask + _out_xfm = out_xfm.xfm + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + + if corr_xfm is not None: + # apply distortion correction before linear registration, + # but then don't include it it into linear XFM + _in_scan=m.tmp('corr_scan.mnc') + m.resample_smooth(scan.scan,_in_scan, transform=corr_xfm.xfm) + if scan.mask is not None: + _in_mask=m.tmp('corr_scan_mask.mnc') + m.resample_labels(scan.mask,_in_mask, transform=corr_xfm.xfm, like=_in_scan) + + if init_xfm is not None and resample: + #_init_xfm=init_xfm.xfm + _init_xfm=None + _out_xfm=m.tmp('out.xfm') + m.resample_smooth(_in_scan,m.tmp('scan_scan.mnc'), transform=init_xfm.xfm, like=model.scan) + _in_scan=m.tmp('scan_scan.mnc') + if scan.mask is not None: + m.resample_labels(scan.mask, _in_mask, transform=init_xfm.xfm, like=model.scan) + _in_mask=m.tmp('scan_mask.mnc') + + print("lin_registration: mode={} init_xfm={} scan_mask={} use_inverse={}".format(lin_mode,_init_xfm,scan.mask,use_inverse)) + + _model_mask=None + + # use model mask even if scan mask is unspecified! + # to run experminets mostly + if use_model_mask or _in_mask is not None: + _model_mask=model.mask + + _save_out_xfm=_out_xfm + if use_inverse: + _save_out_xfm=_out_xfm + _out_xfm=m.tmp('inverted_out.xfm') + + save_in_scan=_in_scan + save_in_mask=_in_mask + + _in_scan=_in_model + _in_mask=_in_model_mask + + _in_model=save_in_scan + _in_model_mask=save_in_mask + + + if lin_mode=='ants': + ipl.ants_registration.linear_register_ants2( + _in_scan, + _in_model, + _out_xfm, + source_mask=_in_mask, + target_mask=_model_mask, + init_xfm=_init_xfm, + parameters=options, + close=close, + downsample=downsample, + ) + elif lin_mode=='elx': + output_par=None + output_log=None + + if par is not None: + output_par=par.fname + + if log is not None: + output_log=log.fname + + ipl.elastix_registration.register_elastix( + _in_scan, + _in_model, + output_xfm=_out_xfm, + source_mask=_in_mask, + target_mask=_model_mask, + init_xfm=_init_xfm, + downsample=downsample, + parameters=options, + nl=False, + output_log=output_log, + output_par=output_par + ) + elif lin_mode=='mritotal': + # going to use mritotal directly + #m.command() + model_name=os.path.basename(model.scan).rsplit('.mnc',1)[0] + model_dir=os.path.dirname(model.scan) + # TODO: add more options? + cmd=['mritotal','-model',model_name,'-modeldir',model_dir, _in_scan, _out_xfm] + if options is not None: + cmd.extend(options) + + m.command(cmd, + inputs=[_in_scan], + outputs=[_out_xfm]) + else: + ipl.registration.linear_register( + _in_scan, + _in_model, + _out_xfm, + source_mask=_in_mask, + target_mask=_model_mask, + init_xfm=_init_xfm, + objective=objective, + downsample=downsample, + conf=options, + parameters=lin_mode + ) + + if use_inverse: # need to invert transform + m.xfminvert(_out_xfm,_save_out_xfm) + _out_xfm=_save_out_xfm + + if init_xfm is not None and resample: + m.xfmconcat([init_xfm.xfm,_out_xfm],out_xfm.xfm) + + +def intermodality_co_registration(scan, ref, out_xfm, + init_xfm=None, + parameters={}, + corr_xfm=None, + corr_ref=None, + par=None, log=None): + with mincTools() as m: + + if not m.checkfiles(inputs=[scan.scan,ref.scan],outputs=[out_xfm.xfm]): + return + + lin_mode= parameters.get('type', 'ants') + options= parameters.get('options', None) + downsample=parameters.get('downsample', None) + close= parameters.get('close', True) + resample= parameters.get('resample', False) + objective =parameters.get('objective', '-nmi') + nl =parameters.get('nl', False) + + print("Running intermodality_co_registration with parameters:{}".format(repr(parameters))) + + _init_xfm=None + _in_scan=scan.scan + _in_mask=scan.mask + _out_xfm=out_xfm.xfm + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + + if corr_xfm is not None: + # apply distortion correction before linear registration, + # but then don't include it it into linear XFM + _in_scan=m.tmp('corr_scan.mnc') + m.resample_smooth(scan.scan,_in_scan, transform=corr_xfm.xfm) + if scan.mask is not None: + _in_mask=m.tmp('corr_scan_mask.mnc') + m.resample_labels(scan.mask,_in_mask, transform=corr_xfm.xfm, like=_in_scan) + + if init_xfm is not None and resample: + #_init_xfm=init_xfm.xfm + _init_xfm=None + _out_xfm=m.tmp('out.xfm') + m.resample_smooth(_in_scan,m.tmp('scan_scan.mnc'), transform=init_xfm.xfm, like=model.scan) + _in_scan=m.tmp('scan_scan.mnc') + if scan.mask is not None: + m.resample_labels(scan.mask,_in_mask, transform=init_xfm.xfm, like=model.scan) + _in_mask=m.tmp('scan_mask.mnc') + + print("intermodality_co_registration: mode={} init_xfm={} scan_mask={}".format(lin_mode,_init_xfm,scan.mask)) + + if lin_mode=='ants': + ipl.ants_registration.linear_register_ants2( + _in_scan, + ref.scan, + _out_xfm, + source_mask=_in_mask, + target_mask=ref.mask, + init_xfm=_init_xfm, + parameters=options, + close=close, + downsample=downsample, + ) + elif lin_mode=='elx': + output_par=None + output_log=None + + if par is not None: + output_par=par.fname + + if log is not None: + output_log=log.fname + + ipl.elastix_registration.register_elastix( + _in_scan, + ref.scan, + output_xfm=_out_xfm, + source_mask=_in_mask, + target_mask=ref.mask, + init_xfm=_init_xfm, + downsample=downsample, + parameters=options, + nl=nl, + output_log=output_log, + output_par=output_par + ) + else: + ipl.registration.linear_register( + _in_scan, + ref.scan, + _out_xfm, + source_mask=_in_mask, + target_mask=ref.mask, + init_xfm=_init_xfm, + objective=objective, + downsample=downsample, + conf=options, + parameters=lin_mode, + close=close + ) + + if init_xfm is not None and resample: + m.xfmconcat([init_xfm.xfm,_out_xfm],out_xfm.xfm) + + +def nl_registration(scan, model, out_xfm, init_xfm=None, parameters={}): + """Perform non-linear registration + + """ + nl_mode=parameters.get('type','ants') + options=parameters.get('options',None) + downsample=parameters.get('downsample',None) + level=parameters.get('level',2) + start=parameters.get('start_level',32) + + with mincTools() as m: + + if not m.checkfiles(inputs=[scan.scan,model.scan],outputs=[out_xfm.xfm]): + return + + _init_xfm=None + if init_xfm is not None: + _init_xfm=init_xfm.xfm + + if nl_mode=='ants': + ipl.ants_registration.non_linear_register_ants2( + scan.scan, + model.scan, + out_xfm.xfm, + source_mask=scan.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=options, + downsample=downsample, + level=level, + start=start, + ) + elif nl_mode=='elx': + ipl.elastix_registration.register_elastix( + scan.scan, + model.scan, + output_xfm=out_xfm.xfm, + source_mask=scan.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + downsample=downsample, + parameters=options, + nl=True + ) + else: + objective='-xcorr' + if options is not None: + objective=options.get('objective') + + ipl.registration.non_linear_register_full( + scan.scan, + model.scan, + out_xfm.xfm, + source_mask=scan.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + objective=objective, + downsample=downsample, + parameters=options, + level=level, + start=start, + ) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/resample.py b/ipl/lp/resample.py new file mode 100644 index 0000000..28ed8c6 --- /dev/null +++ b/ipl/lp/resample.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline resampling + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +def warp_scan(sample, reference, output_scan, transform=None, parameters={},corr_xfm=None): + with mincTools() as m: + xfm=None + xfms=[] + + if corr_xfm is not None: + xfms.append(corr_xfm.xfm) + if transform is not None: + xfms.append(transform.xfm) + + if len(xfms)==0: + pass + if len(xfms)==1: + xfm=transform.xfm + else: + m.xfmconcat(xfms,m.tmp('concatenated.xfm')) + xfm=m.tmp('concatenated.xfm') + + resample_order=parameters.get('resample_order',4) + + m.resample_smooth(sample.scan, output_scan.scan, + transform=xfm, like=reference.scan, + order=resample_order) + + +def warp_mask(sample, reference, output_scan, transform=None, parameters={},corr_xfm=None): + with mincTools() as m: + xfm=None + xfms=[] + + if corr_xfm is not None: + xfms.append(corr_xfm.xfm) + if transform is not None: + xfms.append(transform.xfm) + + if len(xfms)==0: + pass + if len(xfms)==1: + xfm=transform.xfm + else: + m.xfmconcat(xfms,m.tmp('concatenated.xfm')) + xfm=m.tmp('concatenated.xfm') + + resample_order=parameters.get('resample_order',4) + m.resample_labels(sample.mask, output_scan.mask, transform=xfm, like=reference.scan, order=resample_order) + + +def warp_cls_back(t1w_tal, tal_cls, t1w_tal_xfm,reference, native_t1w_cls, parameters={},corr_xfm=None): + with mincTools() as m: + resample_order=parameters.get('resample_order',0) + resample_baa =parameters.get('resample_baa',False) + + xfm=t1w_tal_xfm.xfm + if corr_xfm is not None: + m.xfmconcat([corr_xfm.xfm,t1w_tal_xfm.xfm],m.tmp('concatenated.xfm')) + xfm=m.tmp('concatenated.xfm') + + + m.resample_labels(tal_cls.scan, native_t1w_cls.scan, + transform=xfm, + like=reference.scan, + order=resample_order, + baa=resample_baa, + invert_transform=True) + +def warp_mask_back(t1w_tal, t1w_tal_xfm, reference, native_t1w_cls, parameters={},corr_xfm=None): + with mincTools() as m: + resample_order=parameters.get('resample_order',0) + resample_baa =parameters.get('resample_baa',False) + + xfm=t1w_tal_xfm.xfm + if corr_xfm is not None: + m.xfmconcat([corr_xfm.xfm,t1w_tal_xfm.xfm],m.tmp('concatenated.xfm')) + xfm=m.tmp('concatenated.xfm') + + m.resample_labels(t1w_tal.mask, native_t1w_cls.mask, + transform=xfm, + like=reference.scan, + order=resample_order, + baa=resample_baa, + invert_transform=True) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/segment.py b/ipl/lp/segment.py new file mode 100644 index 0000000..b4fdb48 --- /dev/null +++ b/ipl/lp/segment.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline resampling + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from .registration import nl_registration + +def extract_brain_beast(scan, parameters={},model=None): + """extract brain using BEaST """ + with mincTools() as m: + # TODO: come up with better default? + beast_lib=parameters.get('beastlib','/opt/minc/share/beast-library-1.1') + beast_res=parameters.get('resolution',2) + beast_mask=beast_lib+os.sep+'union_mask.mnc' + + if m.checkfiles(inputs=[scan.scan], outputs=[scan.mask]): + tmp_in=m.tmp('like_beast.mnc') + m.resample_smooth(scan.scan,tmp_in,like=beast_mask) + # run additional intensity normalizaton + if parameters.get('normalize',True) and model is not None: + m.volume_pol(tmp_in,model.scan,m.tmp('like_beast_norm.mnc')) + tmp_in=m.tmp('like_beast_norm.mnc') + + # run beast + beast_v10_template = beast_lib + os.sep \ + + 'intersection_mask.mnc' + beast_v10_margin = beast_lib + os.sep + 'margin_mask.mnc' + + beast_v10_intersect = beast_lib + os.sep \ + + 'intersection_mask.mnc' + + # perform segmentation + m.run_mincbeast(tmp_in,m.tmp('beast_mask.mnc'), + beast_lib=beast_lib, beast_res=beast_res) + + m.resample_labels(m.tmp('beast_mask.mnc'),scan.mask,like=scan.scan) + +def extract_brain_nlreg(scan, parameters={},model=None): + """extract brain using non-linear registration to the template""" + with mincTools() as m: + if m.checkfiles(inputs=[scan.scan], outputs=[scan.mask]): + tmp_xfm=MriTransform(prefix=m.tempdir, name='nl_'+scan.name) + nl_registration(scan, model, tmp_xfm, parameters=parameters) + # warp template atlas to subject's scan + m.resample_labels(model.mask,scan.mask, transform=tmp_xfm.xfm, invert_transform=True) + + +def classify_tissue(scan, cls, + model_name=None, + model_dir=None, + parameters={}, + xfm=None ): + """Tissue classification + """ + with mincTools() as m: + m.classify_clean([scan.scan], cls.scan, + mask=scan.mask, model_dir=model_dir, + model_name=model_name,xfm=xfm.xfm) + + +def segment_lobes(tal_cls,nl_xfm, tal_lob, model=None, lobe_atlas_dir=None, + parameters={}): + """Lobe segmentation + """ + with mincTools() as m: + m.lobe_segment(tal_cls.scan,tal_lob.scan, + nl_xfm=nl_xfm.xfm,template=model.scan, + atlas_dir=lobe_atlas_dir) + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/structures.py b/ipl/lp/structures.py new file mode 100644 index 0000000..f35f423 --- /dev/null +++ b/ipl/lp/structures.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +# Longitudinal pipeline data structures + +import shutil +import os +import sys +import traceback +import json + + +class MriScan(object): + """Represents a 3D volume as an object on disk + (optionally) a mask + """ + def __init__(self, + prefix = None, name = None, modality = None, + iter = None, scan = None, mask = '', + protect = False ): + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.modality=modality + + if scan is None : + if self.iter is None: + if self.modality is not None: self.scan=self.prefix+os.sep+self.name+'_'+self.modality+'.mnc' + else: self.scan=self.prefix+os.sep+self.name+'.mnc' + else: + if self.modality is not None: self.scan=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_'+self.modality+'.mnc' + else: self.scan=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_'+'.mnc' + else: + self.scan=scan + + if mask=='': + if self.iter is None: + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + else: + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + else: + self.mask=mask + + if self.name is None: + self.name=os.path.basename(self.scan) + + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + + def __repr__(self): + return 'MriScan(prefix="{}", name="{}", modality="{}", iter="{}",scan="{}",mask="{}",protect={})'.\ + format(self.prefix,self.name,self.modality,repr(self.iter),self.scan,self.mask,repr(self.protect)) + + def cleanup(self,verbose=False): + if not self.protect: + for i in (self.scan, self.mask ): + if i is not None and os.path.exists(i): + if verbose: + print("Removing:{}".format(i)) + os.unlink(i) + + +class MriTransform(object): + """Represents transformation + """ + def __init__(self, prefix, name, iter=None, nl=False, xfm=None, grid=None): + self.prefix=prefix + self.name=name + self.iter=iter + self.nl=nl + self.xfm=xfm + self.grid=grid + + if self.xfm is None: + if self.iter is None: + self.xfm= self.prefix+os.sep+self.name+'.xfm' + else: + self.xfm= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'.xfm' + + if self.grid is None and xfm is None and nl: + if self.iter is None: + self.grid= self.prefix+os.sep+self.name+'_grid_0.mnc' + else: + self.grid= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_grid_0.mnc' + + + def __repr__(self): + return 'MriTransform(prefix="{}",name="{}",iter="{}",nl={})'.\ + format(self.prefix,self.name,repr(self.iter),self.nl) + + def cleanup(self, verbose=False): + for i in (self.xfm, self.grid ): + if i is not None and os.path.exists(i): + if verbose: + print("Removing:{}".format(i)) + os.unlink(i) + + +class MriQCImage(object): + """Represents QC image (.jpg) + """ + def __init__(self, prefix, name, iter=None, fname=None, suffix='.jpg'): + self.prefix=prefix + self.name=name + self.iter=iter + self.fname=fname + self.suffix=suffix + + if self.fname is None: + if self.iter is None: + self.fname=self.prefix+os.sep+self.name+self.suffix + else: + self.fname=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+self.suffix + + def __repr__(self): + return 'MriQCImage(prefix="{}",name="{}",iter="{}",fname={})'.\ + format(self.prefix,self.name,repr(self.iter),self.fname) + + def cleanup(self, verbose=False): + #TODO: implement? + pass + + +class MriAux(object): + """Represents an auxiliary file (text) + """ + def __init__(self, prefix, name, iter=None, fname=None,suffix='.txt'): + self.prefix=prefix + self.name=name + self.iter=iter + self.fname=fname + self.suffix=suffix + + if self.fname is None: + if self.iter is None: + self.fname=self.prefix+os.sep+self.name+self.suffix + else: + self.fname=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+self.suffix + + def __repr__(self): + return 'MriAux(prefix="{}",name="{}",iter="{}",fname={})'.\ + format(self.prefix,self.name,repr(self.iter),self.fname) + + def cleanup(self, verbose=False): + #TODO: implement? + pass + + +class PipelineEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, MriTransform): + return {'name':obj.name, + 'iter':obj.iter, + 'xfm':obj.xfm, + 'grid':obj.grid, + 'nl': obj.nl, + 'type':'transform', + } + + if isinstance(obj, MriScan): + return {'name':obj.name, + 'modality': obj.modality, + 'iter':obj.iter, + 'scan':obj.scan, + 'mask':obj.mask, + 'modality': obj.modality, + 'type':'scan', + } + if isinstance(obj, MriQCImage): + return {'name':obj.name, + 'iter':obj.iter, + 'fname':obj.fname, + 'type':'qc_image', + } + + if isinstance(obj, MriAux): + return {'name':obj.name, + 'iter':obj.iter, + 'fname':obj.fname, + 'type':'aux' + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + + +def save_summary(summary,out_file): + with open(out_file,'w') as f: + json.dump(summary, f, indent=1, cls=PipelineEncoder, sort_keys=True) + +def save_pipeline_output(summary,out_file): + save_summary(summary,out_file) + +def convert_summary(in_dict): + ret={} + # iterate over all entries, assuming they should contain only + # recognized types + for i,j in in_dict.iteritems(): + if isinstance(j, dict): + if j.get('type',None)=='aux': + ret[i]=MriAux( + os.path.dirname(j.get('fname','.')), + name=j.get('name',None), + iter=j.get('iter',None), + fname=j.get('fname',None)) + + elif j.get('type',None)=='qc_image': + ret[i]=MriQCImage( + os.path.dirname(j.get('fname','.')), + j.get('name',''), + iter=j.get('iter',None), + fname=j.get('fname',None), + suffix='.'+j.get('fname','.jpg').rsplit('.',1)[-1], + ) + + elif j.get('type',None)=='scan': + ret[i]=MriScan( + prefix=os.path.dirname(j.get('fname','.')), + name=j.get('name',''), + iter=j.get('iter',None), + scan=j.get('scan',None), + mask=j.get('mask',''), + modality=j.get('modality','') + ) + + elif j.get('type',None)=='transform': + ret[i]=MriTransform( + os.path.dirname(j.get('fname','.')), + j.get('name',''), + iter=j.get('iter',None), + xfm=j.get('xfm',None), + grid=j.get('grid',None), + nl=j.get('nl',False) + ) + + else: # just copy it! + ret[i]=j + + else: + ret[i]=j + return ret + +def load_summary(in_file): + tmp=None + with open(in_file,'r') as f: + tmp=json.load(f) + ret=convert_summary(tmp) + return ret + +def load_pipeline_output(in_file): + tmp=None + with open(in_file,'r') as f: + tmp=json.load(f) + ret=[] + for i in tmp: + o=convert_summary(i) + o['output']=convert_summary(o['output']) + ret.append(o) + return ret + + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/utils.py b/ipl/lp/utils.py new file mode 100644 index 0000000..24d03d6 --- /dev/null +++ b/ipl/lp/utils.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def create_dirs(dirs): + for i in dirs: + if not os.path.exists(i): + os.makedirs(i) + + +def xfm_remove_scale(in_xfm,out_xfm,unscale=None): + """remove scaling factors from linear XFM + + """ + _unscale=None + if unscale is not None: + _unscale=unscale.xfm + + with mincTools() as minc: + minc.xfm_noscale(in_xfm.xfm,out_xfm.xfm,unscale=_unscale) + + +def xfm_concat(in_xfms,out_xfm): + """Concatenate multiple transforms + + """ + with mincTools() as minc: + minc.xfmconcat([ i.xfm for i in in_xfms],out_xfm.xfm) + + + +def extract_volumes(in_lob, in_cls, tal_xfm, out, + produce_json=False, + subject_id=None, + timepoint_id=None, + lobedefs=None): + """Convert lobe segmentation to volumetric measurements + + """ + with mincTools() as minc: + vol_lobes= minc.label_stats( in_lob.scan, label_defs=lobedefs ) + vol_cls = minc.label_stats( in_cls.scan ) + params=minc.xfm2param(tal_xfm.xfm) + vol_scale=params['scale'][0]*params['scale'][1]*params['scale'][2] + + volumes={ k[0]:k[1]*vol_scale for k in vol_lobes } + _vol_cls = { k[0]: k[1]*vol_scale for k in vol_cls } + # TODO: figure out what to do when keys are missing, i.e something is definetely wrong + volumes['CSF']=_vol_cls.get(1,0.0) + volumes['GM']=_vol_cls.get(2,0.0) + volumes['WM']=_vol_cls.get(3,0.0) + + volumes['ICC']=volumes['CSF']+volumes['GM']+volumes['WM'] + + if subject_id is not None: + volumes['id']=subject_id + + if timepoint_id is not None: + volumes['timepoint']=timepoint_id + + volumes['scale']=vol_scale + + # save either as text file or json + with open(out.fname,'w') as f: + if produce_json: + json.dump(volumes,f,indent=1) + else: + for i,j in volumes.iteritems(): + f.write("{} {}\n".format(i,j)) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/minc_hl.py b/ipl/minc_hl.py new file mode 100755 index 0000000..3733ff0 --- /dev/null +++ b/ipl/minc_hl.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 4/01/2016 +# +# high level tools + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import traceback + +#from sigtools.modifiers import kwoargs,autokwoargs + +# local stuff +from ipl.minc_tools import mincTools,mincError +#from ipl.optfunc import optfunc +#from clize import run + +# numpy & scipy +#from scipy import stats +import numpy as np +from sklearn import linear_model + +try: + # needed to read and write XFM files + import pyezminc +except: + pass + +try: + # needed for matrix log and exp + import scipy.linalg +except: + pass + +def label_normalize(sample, sample_labels, ref, ref_labels, out=None,sample_mask=None, ref_mask=None,median=False,order=3,debug=False): + '''Use label-based intensity normalization''' + with mincTools() as minc: + if not mincTools.checkfiles(outputs=[out]): return + + ref_stats = {i[0]:i[5] for i in minc.label_stats(ref_labels, volume=ref, mask=ref_mask,median=median)} + sample_stats= {i[0]:i[5] for i in minc.label_stats(sample_labels,volume=sample,mask=sample_mask,median=median)} + x=[] + y=[] + + for i in ref_stats: + # use 0-intercept + if i in sample_stats: + #x.append( [1.0, sample_stats[i], sample_stats[i]*sample_stats[i] ] ) + x.append( sample_stats[i] ) + y.append( ref_stats[i] ) + #print('{} -> {}'.format(sample_stats[i],ref_stats[i])) + # FIX origin? (HACK) + x.append(0.0) + y.append(0.0) + # run linear regression + clf = linear_model.LinearRegression() + __x=np.array(x) + + _x=np.column_stack( ( np.power(__x,i) for i in range(1,order+1) ) ) + _y=np.array( y ) + #print(_x) + #print(_y) + clf.fit(_x, _y) + + if debug: + import matplotlib.pyplot as plt + print('Coefficients: \n', clf.coef_) + #print('[0.0 100.0] -> {}'.format(clf.predict([[1.0,0.0,0.0], [1.0,100.0,100.0*100.0]] ))) + + plt.scatter(_x[:,0], _y, color='black') + #plt.plot(_x[:,0], clf.predict(_x), color='blue', linewidth=3) + prx=np.linspace(0,100,20) + prxp=np.column_stack( ( np.power(prx,i) for i in range(1,order+1) ) ) + plt.plot( prx , clf.predict( prxp ), color='red', linewidth=3) + + plt.xticks(np.arange(0,100,5)) + plt.yticks(np.arange(0,100,5)) + + plt.show() + # create command-line for minccalc + cmd='' + for i in range(order): + if i==0: + cmd+='A[0]*{}'.format(clf.coef_[i]) + else: + cmd+='+'+'*'.join(['A[0]']*(i+1))+'*{}'.format(clf.coef_[i]) + if out is not None: + minc.calc([sample],cmd,out) + return cmd + +def nuyl_normalize2( + source,target, + output, + source_mask=None, + target_mask=None, + linear=False, + iterations=4, + filter_gradients=True, + fwhm=2.0, + verbose=0, + remove_bg=False, + ): + """normalize intensities, using areas with uniform intensity """ + with mincTools(verbose=verbose) as minc: + if not mincTools.checkfiles(outputs=[output]): return + # create gradient maps + + if filter_gradients: + minc.blur(source,minc.tmp('source_grad.mnc'),fwhm,gmag=True,output_float=True) + minc.blur(target,minc.tmp('target_grad.mnc'),fwhm,gmag=True,output_float=True) + # create masks of areas with low gradient + minc.binary_morphology(minc.tmp('source_grad.mnc'),'D[1] I[0]',minc.tmp('source_grad_mask.mnc'),binarize_bimodal=True) + source_mask=minc.tmp('source_grad_mask.mnc') + + minc.binary_morphology(minc.tmp('target_grad.mnc'),'D[1] I[0]',minc.tmp('target_grad_mask.mnc'),binarize_bimodal=True) + target_mask=minc.tmp('target_grad_mask.mnc') + + if remove_bg: + minc.binary_morphology(source,'D[8]',minc.tmp('source_mask.mnc'),binarize_bimodal=True) + minc.binary_morphology(target,'D[8]',minc.tmp('target_mask.mnc'),binarize_bimodal=True) + minc.calc([source_mask,minc.tmp('source_mask.mnc')],'A[0]>0.5&&A[1]>0.5?1:0',minc.tmp('source_grad_mask2.mnc')) + minc.calc([target_mask,minc.tmp('target_mask.mnc')],'A[0]>0.5&&A[1]>0.5?1:0',minc.tmp('target_grad_mask2.mnc')) + source_mask=minc.tmp('source_grad_mask2.mnc') + target_mask=minc.tmp('target_grad_mask2.mnc') + + if source_mask is not None: + minc.resample_labels(source_mask,minc.tmp('source_mask.mnc'),like=minc.tmp('source_grad_mask.mnc')) + minc.calc([minc.tmp('source_grad_mask.mnc'),minc.tmp('source_mask.mnc')],'A[0]>0.5&&A[1]>0.5?1:0',minc.tmp('source_mask2.mnc')) + source_mask=minc.tmp('source_mask2.mnc') + + if target_mask is not None: + minc.resample_labels(target_mask,minc.tmp('target_mask.mnc'),like=minc.tmp('target_grad_mask.mnc')) + minc.calc([minc.tmp('target_grad_mask.mnc'),minc.tmp('target_mask.mnc')],'A[0]>0.5&&A[1]>0.5?1:0',minc.tmp('target_mask2.mnc')) + target_mask=minc.tmp('target_mask2.mnc') + + # now run iterative normalization + for i in range(iterations): + if (i+1)==iterations: out=output + else: out=minc.tmp('{}.mnc'.format(i)) + + minc.nuyl_normalize(source,target,out,source_mask=source_mask,target_mask=target_mask,linear=linear) + source=out + + # done here? + +def patch_normalize(sample, sample_labels, ref, ref_labels, out=None,sample_mask=None, ref_mask=None,median=False,order=3,debug=False): + '''Use label-based intensity normalization''' + with mincTools() as minc: + if not mincTools.checkfiles(outputs=[out]): return + + ref_stats = {i[0]:i[5] for i in minc.label_stats(ref_labels, volume=ref, mask=ref_mask,median=median)} + sample_stats= {i[0]:i[5] for i in minc.label_stats(sample_labels,volume=sample,mask=sample_mask,median=median)} + x=[] + y=[] + + for i in ref_stats: + # use 0-intercept + if i in sample_stats: + #x.append( [1.0, sample_stats[i], sample_stats[i]*sample_stats[i] ] ) + x.append( sample_stats[i] ) + y.append( ref_stats[i] ) + #print('{} -> {}'.format(sample_stats[i],ref_stats[i])) + # FIX origin? (HACK) + x.append(0.0) + y.append(0.0) + # run linear regression + clf = linear_model.LinearRegression() + __x=np.array(x) + + _x=np.column_stack( ( np.power(__x,i) for i in range(1,order+1) ) ) + _y=np.array( y ) + #print(_x) + #print(_y) + clf.fit(_x, _y) + + if debug: + import matplotlib.pyplot as plt + print('Coefficients: \n', clf.coef_) + #print('[0.0 100.0] -> {}'.format(clf.predict([[1.0,0.0,0.0], [1.0,100.0,100.0*100.0]] ))) + + plt.scatter(_x[:,0], _y, color='black') + #plt.plot(_x[:,0], clf.predict(_x), color='blue', linewidth=3) + prx=np.linspace(0,100,20) + prxp=np.column_stack( ( np.power(prx,i) for i in range(1,order+1) ) ) + plt.plot( prx , clf.predict( prxp ), color='red', linewidth=3) + + plt.xticks(np.arange(0,100,5)) + plt.yticks(np.arange(0,100,5)) + + plt.show() + # create command-line for minccalc + cmd='' + for i in range(order): + if i==0: + cmd+='A[0]*{}'.format(clf.coef_[i]) + else: + cmd+='+'+'*'.join(['A[0]']*(i+1))+'*{}'.format(clf.coef_[i]) + if out is not None: + minc.calc([sample],cmd,out) + return cmd + + +def xfmavg(inputs,output): + # TODO: handle inversion flag correctly + all_linear=True + all_nonlinear=True + input_xfms=[] + if not mincTools.checkfiles(inputs=inputs, + outputs=[output ]): + return + for j in inputs: + x=pyezminc.read_transform(j) + if x[0][0] and len(x)==1 and (not x[0][1]): + # this is a linear matrix + input_xfms.append(x[0]) + else: + all_linear&=False + # strip identity matrixes + nl=[] + _identity=np.asmatrix(np.identity(4)) + _eps=1e-6 + for i in x: + if i[0]: + if scipy.linalg.norm(_identity-i[2])>_eps: # this is non-identity matrix + all_nonlinear&=False + else: + nl.append(i) + if len(nl)!=1: + all_nonlinear&=False + else: + input_xfms.append(nl[0]) + if all_linear: + acc=np.asmatrix(np.zeros([4,4],dtype=np.complex)) + for i in input_xfms: + acc+=scipy.linalg.logm(i[2]) + acc/=len(input_xfms) + out_xfm=[(True,False,scipy.linalg.expm(acc).real)] + pyezminc.write_transform(output,out_xfm) + elif all_nonlinear: + input_grids=[] + for i in input_xfms: + input_grids.append(i[2]) + output_grid=output.rsplit('.xfm',1)[0]+'_grid_0.mnc' + with mincTools(verbose=2) as m: + m.average(input_grids,output_grid) + out_xfm=[(False,False,output_grid)] + print("xfmavg output:{}".format(repr(out_xfm))) + pyezminc.write_transform(output,out_xfm) + else: + raise Exception("Mixed XFM files provided as input") + + + +if __name__ == '__main__': + #optfunc.run([nuyl_normalize2,label_normalize]) + # TODO: re-implement using optparse + pass + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on + diff --git a/ipl/minc_qc.py b/ipl/minc_qc.py new file mode 100755 index 0000000..401921f --- /dev/null +++ b/ipl/minc_qc.py @@ -0,0 +1,449 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 11/21/2011 +# +# Tools for creating QC images + +from __future__ import print_function + +import numpy as np +import numpy.ma as ma + +import scipy +import matplotlib +matplotlib.use('AGG') + +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +#from minc2.simple import minc2_file +from minc2_simple import minc2_file + +import matplotlib.cm as cmx +import matplotlib.colors as colors +import argparse + + +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + + +def alpha_blend(si, so, ialpha, oalpha): + """Perform alpha-blending + """ + si_rgb = si[..., :3] + si_alpha = si[..., 3]*ialpha + + so_rgb = so[..., :3] + so_alpha = so[..., 3]*oalpha + + out_alpha = si_alpha + so_alpha * (1. - si_alpha) + + out_rgb = (si_rgb * si_alpha[..., None] + + so_rgb * so_alpha[..., None] * (1. - si_alpha[..., None])) / out_alpha[..., None] + + out = np.zeros_like(si) + out[..., :3] = out_rgb + out[..., 3] = out_alpha + + return out + + +def max_blend(si,so): + """Perform max-blending + """ + return np.maximum(si,so) + +def over_blend(si,so, ialpha, oalpha): + """Perform max-blending + """ + si_rgb = si[..., :3] + si_alpha = si[..., 3]*ialpha + + so_rgb = so[..., :3] + so_alpha = so[..., 3]*oalpha + + out_alpha = np.maximum(si_alpha , so_alpha ) + + out_rgb = si_rgb * (si_alpha[..., None]-so_alpha[..., None]) + so_rgb * so_alpha[..., None] + + out = np.zeros_like(si) + out[..., :3] = out_rgb + out[..., 3] = out_alpha + + return out + + +def qc( + input, + output, + image_range=None, + mask=None, + mask_range=None, + title=None, + image_cmap='gray', + mask_cmap='red', + samples=5, + mask_bg=None, + use_max=False, + use_over=False, + show_image_bar=False, # TODO:implement this? + show_overlay_bar=False, + dpi=100, + ialpha=0.8, + oalpha=0.2, + format=None + ): + """QC image generation, drop-in replacement for minc_qc.pl + Arguments: + input -- input minc file + output -- output QC graphics file + + Keyword arguments: + image_range -- (optional) intensity range for image + mask -- (optional) input mask file + mask_range -- (optional) mask file range + title -- (optional) QC title + image_cmap -- (optional) color map name for image, + possibilities: red, green,blue and anything from matplotlib + mask_cmap -- (optional) color map for mask, default red + samples -- number of slices to show , default 5 + mask_bg -- (optional) level for mask to treat as background + use_max -- (optional) use 'max' colour mixing + use_over -- (optional) use 'over' colour mixing + show_image_bar -- show color bar for intensity range, default false + show_overlay_bar -- show color bar for mask intensity range, default false + dpi -- graphics file DPI, default 100 + ialpha -- alpha channel for colour mixing of main image + oalpha -- alpha channel for colour mixing of mask image + """ + + #_img=minc.Image(input) + #_idata=_img.data + _img=minc2_file(input) + _img.setup_standard_order() + _idata=_img.load_complete_volume(minc2_file.MINC2_FLOAT) + _idims=_img.representation_dims() + + data_shape=_idata.shape + spacing=[_idims[0].step,_idims[1].step,_idims[2].step] + + _ovl=None + _odata=None + omin=0 + omax=1 + + if mask is not None: + _ovl=minc2_file(input) + _ovl.setup_standard_order() + _ovl_data=_ovl.load_complete_volume(minc2_file.MINC2_FLOAT) + if _ovl_data.shape != data_shape: + #print("Overlay shape does not match image!\nOvl={} Image={}",repr(_ovl.data.shape),repr(data_shape)) + raise "Overlay shape does not match image!\nOvl={} Image={}",repr(_ovl_data.shape),repr(data_shape) + if mask_range is None: + omin=np.nanmin(_ovl_data) + omax=np.nanmax(_ovl_data) + else: + omin=mask_range[0] + omax=mask_range[1] + _odata=_ovl_data + + if mask_bg is not None: + _odata=ma.masked_less(_odata, mask_bg) + + slices=[] + + # setup ranges + vmin=vmax=0.0 + if image_range is not None: + vmin=image_range[0] + vmax=image_range[1] + else: + vmin=np.nanmin(_idata) + vmax=np.nanmax(_idata) + + cm = plt.get_cmap(image_cmap) + cmo= plt.get_cmap(mask_cmap) + cmo.set_bad('k',alpha=0.0) + + cNorm = colors.Normalize(vmin=vmin, vmax=vmax) + oNorm = colors.Normalize(vmin=omin, vmax=omax) + + scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) + oscalarMap = cmx.ScalarMappable(norm=oNorm, cmap=cmo) + aspects = [] + + # axial slices + for j in range(0,samples): + i=(data_shape[0]/samples)*j+(data_shape[0]%samples)/2 + si=scalarMap.to_rgba(_idata[i , : ,:]) + + if _ovl is not None: + so=oscalarMap.to_rgba(_odata[i , : ,:]) + if use_max: si=max_blend(si,so) + elif use_over: si=over_blend(si,so, ialpha, oalpha) + else: si=alpha_blend(si, so, ialpha, oalpha) + slices.append( si ) + aspects.append( spacing[0]/spacing[1] ) + # coronal slices + for j in range(0,samples): + i=(data_shape[1]/samples)*j+(data_shape[1]%samples)/2 + si=scalarMap.to_rgba(_idata[: , i ,:]) + + if _ovl is not None: + so=oscalarMap.to_rgba(_odata[: , i ,:]) + if use_max: si=max_blend(si,so) + elif use_over: si=over_blend(si,so, ialpha, oalpha) + else: si=alpha_blend(si, so, ialpha, oalpha) + slices.append( si ) + aspects.append( spacing[2]/spacing[0] ) + + # sagittal slices + for j in range(0,samples): + i=(data_shape[2]/samples)*j+(data_shape[2]%samples)/2 + si=scalarMap.to_rgba(_idata[: , : , i]) + if _ovl is not None: + so=oscalarMap.to_rgba(_odata[: , : , i]) + if use_max: si=max_blend(si,so) + elif use_over: si=over_blend(si,so, ialpha, oalpha) + else: si=alpha_blend(si, so, ialpha, oalpha) + slices.append( si ) + aspects.append( spacing[2]/spacing[1] ) + + w, h = plt.figaspect(3.0/samples) + fig = plt.figure(figsize=(w,h)) + + #outer_grid = gridspec.GridSpec((len(slices)+1)/2, 2, wspace=0.0, hspace=0.0) + ax=None + imgplot=None + for i,j in enumerate(slices): + ax = plt.subplot2grid( (3, samples), (i/samples, i%samples) ) + imgplot = ax.imshow(j,origin='lower',cmap=cm, aspect=aspects[i]) + ax.set_xticks([]) + ax.set_yticks([]) + ax.title.set_visible(False) + # show for the last plot + if show_image_bar: + cbar = fig.colorbar(imgplot) + + + if title is not None: + plt.suptitle(title,fontsize=20) + plt.subplots_adjust(wspace = 0.0 ,hspace=0.0) + else: + plt.subplots_adjust(top=1.0,bottom=0.0,left=0.0,right=1.0,wspace = 0.0 ,hspace=0.0) + + #fig.tight_layout() + #plt.show() + plt.savefig(output, bbox_inches='tight', dpi=dpi,format=format) + plt.close() + plt.close('all') + +def qc_field_contour( + input, + output, + image_range=None, + title=None, + image_cmap='gray', + samples=5, + show_image_bar=False, # TODO:implement this? + dpi=100, + format=None + + ): + """show field contours + """ + + _img=minc2_file(input) + _img.setup_standard_order() + _idata=_img.load_complete_volume(minc2_file.MINC2_FLOAT) + _idims=_img.representation_dims() + + data_shape=_idata.shape + spacing=[_idims[0].step,_idims[1].step,_idims[2].step] + + slices=[] + + # setup ranges + vmin=vmax=0.0 + if image_range is not None: + vmin=image_range[0] + vmax=image_range[1] + else: + vmin=np.nanmin(_idata) + vmax=np.nanmax(_idata) + + cm = plt.get_cmap(image_cmap) + + cNorm = colors.Normalize(vmin=vmin, vmax=vmax) + + scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) + + for j in range(0,samples): + i=(data_shape[0]/samples)*j+(data_shape[0]%samples)/2 + si=_idata[i , : ,:] + slices.append( si ) + + for j in range(0,samples): + i=(data_shape[1]/samples)*j+(data_shape[1]%samples)/2 + si=_idata[: , i ,:] + slices.append( si ) + + for j in range(0,samples): + i=(data_shape[2]/samples)*j+(data_shape[2]%samples)/2 + si=_idata[: , : , i] + slices.append( si ) + + w, h = plt.figaspect(3.0/samples) + fig = plt.figure(figsize=(w,h)) + + #outer_grid = gridspec.GridSpec((len(slices)+1)/2, 2, wspace=0.0, hspace=0.0) + ax=None + imgplot=None + for i,j in enumerate(slices): + ax = plt.subplot2grid( (3, samples), (i/samples, i%samples) ) + imgplot = ax.contour(j,origin='lower', cmap=cm, norm=cNorm, levels=np.linspace(vmin,vmax,20)) + #plt.clabel(imgplot, inline=1, fontsize=8) + ax.set_xticks([]) + ax.set_yticks([]) + ax.title.set_visible(False) + # show for the last plot + if show_image_bar: + cbar = fig.colorbar(imgplot) + + + if title is not None: + plt.suptitle(title,fontsize=20) + plt.subplots_adjust(wspace = 0.0 ,hspace=0.0) + else: + plt.subplots_adjust(top=1.0,bottom=0.0,left=0.0,right=1.0,wspace = 0.0 ,hspace=0.0) + + plt.savefig(output, bbox_inches='tight', dpi=dpi) + plt.close('all') + + +# register custom maps +plt.register_cmap(cmap=colors.LinearSegmentedColormap('red', + {'red': ((0.0, 0.0, 0.0), + (1.0, 1.0, 1.0)), + + 'green': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'blue': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'alpha': ((0.0, 0.0, 1.0), + (1.0, 1.0, 1.0)) + })) + +plt.register_cmap(cmap=colors.LinearSegmentedColormap('green', + {'green': ((0.0, 0.0, 0.0), + (1.0, 1.0, 1.0)), + + 'red': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'blue': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'alpha': ((0.0, 0.0, 1.0), + (1.0, 1.0, 1.0)) + })) + +plt.register_cmap(cmap=colors.LinearSegmentedColormap('blue', + {'blue': ((0.0, 0.0, 0.0), + (1.0, 1.0, 1.0)), + + 'red': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'green': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'alpha': ((0.0, 0.0, 1.0), + (1.0, 1.0, 1.0)) + })) + +def parse_options(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Make QC image') + + parser.add_argument("--debug", + action="store_true", + dest="debug", + default=False, + help="Print debugging information" ) + + parser.add_argument("--contour", + action="store_true", + dest="contour", + default=False, + help="Make contour plot" ) + + parser.add_argument("--bar", + action="store_true", + dest="bar", + default=False, + help="Show colour-bar" ) + + parser.add_argument("--cmap", + dest="cmap", + default=None, + help="Colour map" ) + + parser.add_argument("--mask", + dest="mask", + default=None, + help="Add mask" ) + + parser.add_argument("--over", + dest="use_over", + action="store_true", + default=False, + help="Overplot" ) + + parser.add_argument("--max", + dest="use_max", + action="store_true", + default=False, + help="Use max mixing" ) + + parser.add_argument("input", + help="Input minc file") + + parser.add_argument("output", + help="Output QC file") + + options = parser.parse_args() + + if options.debug: + print(repr(options)) + + return options + +if __name__ == '__main__': + options = parse_options() + if options.input is not None and options.output is not None: + if options.contour: + qc_field_contour(options.input,options.output,show_image_bar=options.bar,image_cmap=options.cmap) + else: + qc(options.input,options.output,mask=options.mask,use_max=options.use_max,use_over=options.use_over,mask_bg=0.5) + else: + print("Refusing to run without input data, run --help") + exit(1) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/minc_tools.py b/ipl/minc_tools.py new file mode 100755 index 0000000..dda18c1 --- /dev/null +++ b/ipl/minc_tools.py @@ -0,0 +1,2112 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 11/21/2011 +# +# Generic minc tools + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math + +import inspect + +# local stuff +import registration +import ants_registration +import dd_registration +import elastix_registration + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + +def get_git_hash(): + _script_dir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + _hash_code='' + try: + p=subprocess.Popen(['git', '-C', _script_dir, 'rev-parse', '--short', '--verify', 'HEAD'],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (output,outerr)=p.communicate() + _hash_code=output.decode() + outvalue=p.wait() + except OSError as e: + _hash_code='Unknown' + if not outvalue == 0: + _hash_code='Unknown' + return _hash_code.rstrip("\n") + +class mincError(Exception): + """MINC tools general error""" + def __init__(self, value='ERROR'): + self.value = value + self.stack = traceback.extract_stack() + + def __repr__(self): + return "mincError:{}\nAT:{}".format(self.value, self.stack) + + def __str__(self): + return self.__repr__() + + +class temp_files(object): + """Class to keep track of temp files""" + + def __init__(self,tempdir=None,prefix=None): + + self.tempdir = tempdir + self.clean_tempdir = False + self.tempfiles = {} + if not self.tempdir: + if prefix is None: + prefix='iplMincTools' + self.tempdir = tempfile.mkdtemp(prefix=prefix,dir=os.environ.get('TMPDIR',None)) + self.clean_tempdir = True + + if not os.path.exists(self.tempdir): + os.makedirs(self.tempdir) + + def __enter__(self): + return self + + def __exit__( + self, + type, + value, + traceback, + ): + self.do_cleanup() + + def __del__(self): + self.do_cleanup() + + def do_cleanup(self): + """remove temporary directory if present""" + if self.clean_tempdir and self.tempdir is not None: + shutil.rmtree(self.tempdir) + self.clean_tempdir=False + + def temp_file(self, suffix='', prefix=''): + """create temporary file""" + + (h, name) = tempfile.mkstemp(suffix=suffix, prefix=prefix,dir=self.tempdir) + os.close(h) + os.unlink(name) + return name + + def tmp(self, name): + """return path of a temp file named name""" + try: + return self.tempfiles[name] + except KeyError: + self.tempfiles[name] = self.temp_file(suffix=name) + return self.tempfiles[name] + + def temp_dir(self, suffix='', prefix=''): + """ Create temporary directory for processing""" + + name = tempfile.mkdtemp(suffix=suffix, prefix=prefix, + dir=self.tempdir) + return name + + @property + def dir(self): + return self.tempdir + +class cache_files(temp_files): + """Class to keep track of work files""" + def __init__(self,work_dir=None,context='',tempdir=None): + self._locks={} + super(cache_files,self).__init__(tempdir=tempdir) + self.work_dir=work_dir + self.context=context # TODO: something more clever here? + self.cache_dir=None + if self.work_dir is not None: + self.cache_dir=self.work_dir+os.sep+context+os.sep + if not os.path.exists(self.cache_dir): + os.makedirs(self.cache_dir) + + + def cache(self,name,suffix=''): + """Allocate a name in cache, if cache was setup + also lock the file , so that another process have to wait before using the same file name + + Important: call unlock() on result + """ + #TODO: something more clever here? + fname='' + if self.work_dir is not None: + fname=self.cache_dir+os.sep+name+suffix + lock_name=fname+'.lock' + f=self._locks[lock_name]=open(lock_name, 'a') + fcntl.lockf(f.fileno(), fcntl.LOCK_EX ) + else: + fname=self.tmp(name+suffix) + + return fname + + + def unlock(self,fname): + #TODO: something more clever here? + lock_name=fname+'.lock' + try: + f=self._locks[lock_name] + + if f is not None: + fcntl.lockf(f.fileno(), fcntl.LOCK_UN) + f.close() + + del self._locks[lock_name] + +# try: +# os.unlink(lock_name) +# except OSError: + #probably somebody else is blocking +# pass + + except KeyError: + pass + + + #def __del__(self): + #self.do_cleanup() + # pass + + def __enter__(self): + return self + + def __exit__( + self, + type, + value, + traceback, + ): + self.do_cleanup() + + + def do_cleanup(self): + """unlocking lock files """ + for f in self._locks.keys(): + if self._locks[f] is not None: + fcntl.flock(self._locks[f].fileno(), fcntl.LOCK_UN) + self._locks[f].close() +# try: +# os.unlink(f) +# except OSError: +# #probably somebody else is blocking +# pass + self._locks={} + super(cache_files,self).do_cleanup() + +class mincTools(temp_files): + """minc toolkit interface , mostly basic tools """ + + def __init__(self, tempdir=None, resample=None, verbose=2, prefix=None): + super(mincTools,self).__init__(tempdir=tempdir,prefix=prefix) + # TODO: add some options? + self.resample = resample + self.verbose = verbose + + def __enter__(self): + return super(mincTools,self).__enter__() + + def __exit__( + self, + type, + value, + traceback, + ): + return super(mincTools,self).__exit__(type,value,traceback) + + @staticmethod + def checkfiles( + inputs=None, + outputs=None, + timecheck=False, + verbose=1, + ): + """ Check newer input file """ + + itime = -1 # numer of seconds since epoch + inputs_exist = True + + if inputs is not None: + if isinstance(inputs, basestring): # check if input is only string and not list + if not os.path.exists(inputs): + inputs_exist = False + raise mincError(' ** Error: Input does not exists! :: {}'.format(str(inputs))) + else: + itime = os.path.getmtime(inputs) + else: + for i in inputs: + if not os.path.exists(i): + inputs_exist = False + print(' ** Error: One input does not exists! :: {}'.format(i), file=sys.stderr) + raise mincError(' ** Error: One input does not exists! :: {}'.format(i)) + else: + timer = os.path.getmtime(i) + if timer < itime or itime < 0: + itime = timer + + # Check if outputs exist AND is newer than inputs + + outExists = False + otime = -1 + exists=[] + if outputs is not None: + if isinstance(outputs, basestring): + outExists = os.path.exists(outputs) + if outExists: + otime = os.path.getmtime(outputs) + exists.append(outputs) + else: + for o in outputs: + outExists = os.path.exists(o) + if outExists: + exists.append(outputs) + timer = os.path.getmtime(o) + if timer > otime: + otime = timer + if not outExists: + break + + if outExists: + if timecheck and itime > 0 and otime > 0 and otime < itime: + if verbose>1: + print(' -- Warning: Output exists but older than input! Redoing command',file=sys.stderr) + print(' otime ' + str(otime) + ' < itime ' \ + + str(itime),file=sys.stderr) + return True + else: + if verbose>1: + print(' -- Skipping: Output Exists:{}'.format(repr(exists)),file=sys.stderr) + return False + return True + + @staticmethod + def execute(cmds, verbose=1): + """ + Execute a command line waiting for the end of it + Arguments: + cmds: list containg the command line + + Keyword arguments: + verbose: if false no message will appear + + return : False if error, otherwise the execution output + """ + output_stderr="" + output="" + outvalue=0 + if verbose>0: + print(repr(cmds)) + try: + + if verbose<2: + with open(os.devnull, "w") as fnull: + p=subprocess.Popen(cmds, stdout=fnull, stderr=subprocess.PIPE) + else: + p=subprocess.Popen(cmds, stderr=subprocess.PIPE) + + (output,output_stderr)=p.communicate() + outvalue=p.wait() + + except OSError: + print("ERROR: command {} Error:{}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()), file=sys.stderr) + raise mincError("ERROR: command {} Error:{}!\nMessage: {}\n{}".format(str(cmds),str(outerr),output_stderr,traceback.format_exc())) + if not outvalue == 0: + print("ERROR: command {} failed {}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()), file=sys.stderr) + raise mincError("ERROR: command {} failed {}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc())) + return outvalue + + @staticmethod + def execute_w_output(cmds, verbose=0): + """ + Execute a command line waiting for the end of it + + cmds: list containg the command line + verbose: if false no message will appear + + return : False if error, otherwise the execution output + """ + output='' + outvalue=0 + + if verbose>0: + print(repr(cmds)) + try: + p=subprocess.Popen(cmds,stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (output,outerr)=p.communicate() + if verbose>0: + print(output.decode()) + outvalue=p.wait() + except OSError as e: + print("ERROR: command {} Error:{}!\n{}".format(repr(cmds),str(e),traceback.format_exc()),file=sys.stderr) + raise mincError("ERROR: command {} Error:{}!\n{}".format(repr(cmds),str(e),traceback.format_exc())) + if not outvalue == 0: + print("Command: {} generated output:{} {}\nError:{}".format(' '.join(cmds),outvalue,output,outerr),file=sys.stderr) + raise mincError("ERROR: command {} failed {}!\nError:{}\n{}".format(repr(cmds),str(outvalue),outerr,traceback.format_exc())) + return output.decode() + + @staticmethod + def command( + cmds, + inputs=None, + outputs=None, + timecheck=False, + verbose=1, + ): + """ + Execute a command line waiting for the end of it, testing inputs and outputs + + cmds: list containg the command line + inputs: list of files to check if they exist before executing command + outputs: list of files that should be when finishing + verbose: if 0 no message will appear + outputlines: store the output as a string + timecheck: The command won't be executed if the output exists and is newer than the input file. + + return : False if error, otherwise the execution output + """ + + if verbose>0: + print(repr(cmds)) + + if not mincTools.checkfiles(inputs=inputs, outputs=outputs, + verbose=verbose, + timecheck=timecheck): + return 0 + outvalue=0 + output_stderr="" + output="" + use_shell=not isinstance(cmds, list) + try: + if verbose<2: + with open(os.devnull, "w") as fnull: + p=subprocess.Popen(cmds, stdout=fnull, stderr=subprocess.PIPE,shell=use_shell) + else: + p=subprocess.Popen(cmds, stderr=subprocess.PIPE,shell=use_shell) + + (output,output_stderr)=p.communicate() + outvalue=p.wait() + + except OSError: + print("ERROR: command {} Error:{}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()), file=sys.stderr) + raise mincError("ERROR: command {} Error:{}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc())) + if not outvalue == 0: + print("ERROR: command {} failed {}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()), file=sys.stderr) + raise mincError("ERROR: command {} failed {}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc())) + + outExists = False + if outputs is None: + outExists = True + elif isinstance(outputs, basestring): + outExists = os.path.exists(outputs) + else: + for o in outputs: + outExists = os.path.exists(o) + if not outExists: + break + + if not outExists: + raise mincError('ERROR: Command didn not produce output: {}!'.format(str(cmds))) + + return outvalue + + @staticmethod + def qsub( + comm, + queue='all.q', + name=None, + logfile=None, + depends=None, + ): + """ + Send the job into the sge queue + TODO: improve dependencies and so on + """ + + if not name: + name = comm[0] + try: + qsub_comm = [ + 'qsub','-cwd', + '-N', name, + '-j', 'y', + '-V', '-q', + queue, + ] + path = '' + if logfile: + path = os.path.abspath(logfile) + qsub_comm.extend(['-o', path]) + if depends: + qsub_comm.extend(['-hold_jid', depends]) + + print(' - Name ' + name) + print(' - Queue ' + queue) + print(' - Cmd ' + ' '.join(comm)) + print(' - logfile ' + path) + + #qsub_comm.append(tmpscript) + + cmds="#!/bin/bash\nhostname\n" + cmds+=' '.join(comm)+"\n" + + p=subprocess.Popen(qsub_comm, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT) + + p.communicate(cmds) + # TODO: check error code? + finally: + pass + + @staticmethod + def qsub_pe( + comm, + pe='all.pe', + slots=1, + name=None, + logfile=None, + depends=None, + ): + """ + Send the job into the sge queue + TODO: improve dependencies and so on + """ + + if not name: + name = comm[0] + try: + qsub_comm = [ + 'qsub','-cwd', + '-N', name, + '-j', 'y', + '-V', '-pe', + pe,str(slots) + ] + path = '' + if logfile: + path = os.path.abspath(logfile) + qsub_comm.extend(['-o', path]) + if depends: + qsub_comm.extend(['-hold_jid', depends]) + + print(' - Name ' + name) + print(' - PE ' + pe) + print(' - Slots ' + str(slots)) + print(' - Cmd ' + ' '.join(comm)) + print(' - logfile ' + path) + + cmds="#!/bin/bash\nhostname\n" + cmds+=' '.join(comm)+"\n" + + p=subprocess.Popen(qsub_comm, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT) + + p.communicate(cmds) + # TODO: check error code? + finally: + pass + + @staticmethod + def query_dimorder(input): + '''read a value of an attribute inside minc file''' + + i = subprocess.Popen(['mincinfo', '-vardims', 'image', input], + stdout=subprocess.PIPE).communicate() + return i[0].decode().rstrip('\n').split(' ') + + @staticmethod + def query_attribute(input, attribute): + '''read a value of an attribute inside minc file''' + + i = subprocess.Popen(['mincinfo', '-attvalue', attribute, + input], + stdout=subprocess.PIPE).communicate() + return i[0].decode().rstrip('\n').rstrip(' ') + + @staticmethod + def set_attribute(input, attribute, value): + '''set a value of an attribute inside minc file + if value=None - delete the attribute + ''' + if value is None: + mincTools.execute(['minc_modify_header', input, '-delete', attribute]) + elif isinstance(value, basestring): + mincTools.execute(['minc_modify_header', input, '-sinsert', attribute + '=' + + value]) + else: + # assume that it's a number + mincTools.execute(['minc_modify_header', input, '-dinsert', attribute + '=' + + str(value)]) + + @staticmethod + def mincinfo(input): + """read a basic information about minc file + Arguments: + input -- input minc file + Returns dict with entries per dimension + """ + # TODO: make this robust to errors! + _image_dims = subprocess.Popen(['mincinfo', '-vardims', 'image', input], + stdout=subprocess.PIPE).communicate()[0].decode().rstrip('\n').rstrip(' ').split(' ') + + _req=['mincinfo'] + for i in _image_dims: + _req.extend(['-dimlength',i, + '-attvalue', '{}:start'.format(i), + '-attvalue', '{}:step'.format(i), + '-attvalue', '{}:direction_cosines'.format(i)]) + _req.append(input) + _info= subprocess.Popen(_req, + stdout=subprocess.PIPE).communicate()[0].decode().rstrip('\n').rstrip(' ').split("\n") + + diminfo=collections.namedtuple('dimension',['length','start','step','direction_cosines']) + + _result={} + for i,j in enumerate(_image_dims): + _result[j]=diminfo(length=int(_info[i*4]), + start=float(_info[i*4+1]), + step=float(_info[i*4+2]), + direction_cosines=[float(k) for k in _info[i*4+3].rstrip(' ').split(' ') ]) + + return _result + + def ants_linear_register( + self, + source, + target, + output_xfm, + **kwargs + ): + """perform linear registration with ANTs, obsolete""" + return ants_registration.ants_linear_register(source,target,output_xfm,**kwargs) + + + def linear_register( + self, + source, + target, + output_xfm, + **kwargs + ): + """perform linear registration""" + + return registration.linear_register(source,target,output_xfm,**kwargs) + + def linear_register_to_self( + self, + source, + target, + output_xfm, + **kwargs + ): + """perform linear registration""" + + return registration.linear_register_to_self(source,target,output_xfm,**kwargs) + + def nl_xfm_to_elastix(self , xfm, elastix_par): + """Convert MINC style xfm into elastix style registration parameters""" + return elastix_registration.nl_xfm_to_elastix(sfm,elastix_par) + + def nl_elastix_to_xfm(self , elastix_par, xfm, **kwargs ): + """Convert elastix style parameter file into a nonlinear xfm file""" + return elastix_registration.nl_elastix_to_xfm(elastix_par,xfm,**kwargs) + + def register_elastix( self, source, target, **kwargs ): + """Perform registration with elastix """ + return elastix_registration.register_elastix(source,target,**kwargs) + + def non_linear_register_ants( + self, source, target, output_xfm, **kwargs + ): + """perform non-linear registration using ANTs, + WARNING: will create inverted xfm will be named output_invert.xfm + """ + return ants_registration.non_linear_register_ants(source, target, output_xfm, **kwargs) + + def non_linear_register_ldd( + self, + source, target, + output_velocity, + **kwargs ): + """Use log-diffeomorphic demons to run registration""" + return dd_registration.non_linear_register_ldd(source,target,output_velocity,**kwargs) + + def non_linear_register_full( + self, + source, target, output_xfm, + **kwargs + ): + """perform non-linear registration""" + return registration.non_linear_register_full(source,target,output_xfm,**kwargs) + + def non_linear_register_increment( + self, source, target, output_xfm,** kwargs + ): + """perform incremental non-linear registration""" + return registration.non_linear_register_increment(source, target, output_xfm,** kwargs) + + def resample_smooth( + self, + input, + output, + transform=None, + like=None, + order=4, + uniformize=None, + unistep=None, + invert_transform=False, + resample=None, + datatype=None, + ): + """resample an image, interpreting voxels as intnsities + + Arguments: + input -- input minc file + output -- output minc file + transform -- (optional) transformation file + like -- (optional) reference file for sampling + order -- interpolation order for B-Splines , default 4 + uniformize -- (optional) uniformize volume to have identity direction + cosines and uniform sampling + unistep -- (optional) resample volume to have uniform steps + invert_transform -- invert input transform, default False + resample -- (optional) resample type, variants: + 'sinc', 'linear', 'cubic','nearest' - mincresample + otherwise use itk_resample + datatype -- output minc file data type, variants + 'byte','short','long','float','double' + """ + if os.path.exists(output): + return + + if not resample: + resample = self.resample + if resample == 'sinc': + cmd = ['mincresample', input, output, '-sinc', '-q'] + if transform: + cmd.extend(['-transform', transform]) + if like: + cmd.extend(['-like', like]) + else: + cmd.append('-use_input_sampling') + if invert_transform: + cmd.append('-invert_transform') + if uniformize: + raise mincError('Not implemented!') + if datatype: + cmd.append('-' + datatype) + self.command(cmd, inputs=[input], outputs=[output]) + elif resample == 'linear': + cmd = ['mincresample', input, output, '-trilinear', '-q'] + if transform: + cmd.extend(['-transform', transform]) + if like: + cmd.extend(['-like', like]) + else: + cmd.append('-use_input_sampling') + if invert_transform: + cmd.append('-invert_transform') + if uniformize: + raise mincError('Not implemented!') + if datatype: + cmd.append('-' + datatype) + self.command(cmd, inputs=[input], outputs=[output]) + elif resample == 'cubic': + cmd = ['mincresample', input, output, '-tricubic', '-q'] + if transform: + cmd.extend(['-transform', transform]) + if like: + cmd.extend(['-like', like]) + else: + cmd.append('-use_input_sampling') + if invert_transform: + cmd.append('-invert_transform') + if uniformize: + raise mincError('Not implemented!') + if datatype: + cmd.append('-' + datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + elif resample == 'nearest': + cmd = ['mincresample', input, output, '-nearest', '-q'] + if transform: + cmd.extend(['-transform', transform]) + if like: + cmd.extend(['-like', like]) + else: + cmd.append('-use_input_sampling') + if invert_transform: + cmd.append('-invert_transform') + if uniformize: + raise mincError('Not implemented!') + if datatype: + cmd.append('-' + datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + else: + cmd = ['itk_resample', input, output, '--order', str(order)] + if transform: + cmd.extend(['--transform', transform]) + if like: + cmd.extend(['--like', like]) + if invert_transform: + cmd.append('--invert_transform') + if uniformize: + cmd.extend(['--uniformize', str(uniformize)]) + if unistep: + cmd.extend(['--unistep', str(unistep)]) + if datatype: + cmd.append('--' + datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def resample_labels( + self, + input, + output, + transform=None, + like=None, + invert_transform=False, + order=None, + datatype=None, + remap=None, + aa=None, + baa=False, + uniformize=None, + unistep=None, + ): + """resample an image with discrete labels""" + if datatype is None: + datatype='byte' + + cmd = ['itk_resample', input, output, '--labels'] + + + if remap is not None: + if isinstance(remap, list): + remap=dict(remap) + + if isinstance(remap, dict): + if any(remap): + _remap="" + for (i,j) in remap.items(): _remap+='{} {};'.format(i,j) + cmd.extend(['--lut-string', _remap ]) + else: + cmd.extend(['--lut-string', str(remap) ]) + if transform is not None: + cmd.extend(['--transform', transform]) + if like is not None: + cmd.extend(['--like', like]) + if invert_transform: + cmd.append('--invert_transform') + if order is not None: + cmd.extend(['--order',str(order)]) + if datatype is not None: + cmd.append('--' + datatype) + if aa is not None: + cmd.extend(['--aa',str(aa)]) + if baa : + cmd.append('--baa') + if uniformize: + cmd.extend(['--uniformize', str(uniformize)]) + if unistep: + cmd.extend(['--unistep', str(unistep)]) + + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + + def resample_smooth_logspace( + self, + input, + output, + velocity=None, + like=None, + order=4, + invert_transform=False, + datatype=None, + ): + """resample an image """ + if os.path.exists(output): + return + + cmd = ['log_resample', input, output, '--order', str(order)] + if velocity: + cmd.extend(['--log_transform', velocity]) + if like: + cmd.extend(['--like', like]) + if invert_transform: + cmd.append('--invert_transform') + if datatype: + cmd.append('--' + datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def resample_labels_logspace( + self, + input, + output, + velocity=None, + like=None, + invert_transform=False, + order=None, + datatype=None, + ): + """resample an image with discrete labels""" + if datatype is None: + datatype='byte' + + cmd = ['log_resample', input, output, '--labels'] + + + if velocity is not None: + cmd.extend(['--log_transform', velocity]) + if like is not None: + cmd.extend(['--like', like]) + if invert_transform: + cmd.append('--invert_transform') + if order is not None: + cmd.extend(['--order',str(order)]) + if datatype is not None: + cmd.append('--' + datatype) + + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + + def xfminvert(self, input, output): + """invert transformation""" + + self.command(['xfminvert', input, output], inputs=[input], + outputs=[output],verbose=self.verbose) + + def xfmavg( + self, + inputs, + output, + nl=False, + ): + """average transformations""" + + cmd = ['xfmavg'] + cmd.extend(inputs) + cmd.append(output) + if nl: + cmd.append('-ignore_linear') + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def xfmconcat(self, inputs, output): + """concatenate transformations""" + + cmd = ['xfmconcat'] + cmd.extend(inputs) + cmd.append(output) + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def xfm_v0_scaling(self, inputs, output): + """concatenate transformations""" + + cmd = ['xfm_v0_scaling.pl'] + cmd.extend(inputs) + cmd.append(output) + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def average( + self, + inputs, + output, + sdfile=None, + datatype=None, + ): + """average images""" + + cmd = ['mincaverage', '-q', '-clob'] + cmd.extend(inputs) + cmd.append(output) + + if sdfile: + cmd.extend(['-sdfile', sdfile]) + if datatype: + cmd.append(datatype) + cmd.extend(['-max_buffer_size_in_kb', '1000000', '-copy_header']) + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + def median( + self, + inputs, + output, + madfile=None, + datatype=None, + ): + """average images""" + + cmd = ['minc_median', '--clob'] + cmd.extend(inputs) + cmd.append(output) + + if madfile: + cmd.extend(['--mad', madfile]) + if datatype: + cmd.append(datatype) + + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def calc( + self, + inputs, + expression, + output, + datatype=None, + labels=False + ): + """apply mathematical expression to image(s)""" + + cmd = ['minccalc', '-copy_header','-q', '-clob', '-express', expression] + + if datatype: + cmd.append(datatype) + if labels: + cmd.append('-labels') + + cmd.extend(inputs) + cmd.append(output) + + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + def math( + self, + inputs, + operation, + output, + datatype=None, + ): + """apply mathematical operation to image(s)""" + + cmd = ['mincmath', '-q', '-clob', '-copy_header', '-'+operation] + + if datatype: + cmd.append(datatype) + + cmd.extend(inputs) + cmd.append(output) + + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def stats(self, input, + stats, mask=None, + mask_binvalue=1, + val_floor=None, + val_ceil=None, + val_range=None, + single_value=True): + args=['mincstats',input,'-q'] + + if isinstance(stats, list): + args.extend(stats) + else: + args.append(stats) + + if mask is not None: + args.extend(['-mask',mask,'-mask_binvalue',str(mask_binvalue)]) + if val_floor is not None: + args.extend(['-floor',str(val_floor)]) + if val_ceil is not None: + args.extend(['-ceil',str(val_ceil)]) + if val_range is not None: + args.extend(['-range',str(val_range[0]),str(val_range[1])]) + + r=self.execute_w_output(args,verbose=self.verbose) + if single_value : + return float(r) + else: + return [float(i) for i in r.split(' ')] + + def similarity(self, reference, sample, ref_mask=None, sample_mask=None,method="msq"): + """Calculate image similarity metric""" + args=['itk_similarity',reference,sample,'--'+method] + + if ref_mask is not None: + args.extend(['--src_mask',ref_mask]) + if sample_mask is not None: + args.extend(['--target_mask',sample_mask]) + + r=self.execute_w_output(args,verbose=self.verbose) + return float(r) + + + def label_similarity(self, reference, sample, method="gkappa"): + """Calculate image similarity metric""" + args=['volume_gtc_similarity',reference, sample,'--'+method] + r=self.execute_w_output(args,verbose=self.verbose) + return float(r) + + def noise_estimate(self, input): + '''Estimate file noise (absolute)''' + args=['noise_estimate',input] + r=self.execute_w_output(args,verbose=self.verbose) + return float(r) + + def snr_estimate(self, input): + '''Estimate file SNR''' + args=['noise_estimate',input,'--snr'] + r=self.execute_w_output(args,verbose=self.verbose) + return float(r) + + def log_average(self, inputs, output): + """perform log-average (geometric average)""" + tmp = ['log(A[%d])' % i for i in xrange(len(inputs))] + self.calc(inputs, 'exp((%s)/%d)' % ('+'.join(tmp), + len(inputs)), output, datatype='-float') + + + def param2xfm(self, output, scales=None, translation=None, rotations=None, shears=None): + cmd = ['param2xfm','-clobber',output] + + if translation is not None: + cmd.extend(['-translation',str(translation[0]),str(translation[1]),str(translation[2])]) + if rotations is not None: + cmd.extend(['-rotations',str(rotations[0]),str(rotations[1]),str(rotations[2])]) + if scales is not None: + cmd.extend(['-scales',str(scales[0]),str(scales[1]),str(scales[2])]) + if shears is not None: + cmd.extend(['-shears',str(shears[0]),str(shears[1]),str(shears[2])]) + + self.command(cmd, inputs=[], outputs=[output], verbose=self.verbose) + + + + def flip_volume_x(self,input,output, labels=False, datatype=None): + '''flip along x axis''' + if not os.path.exists(self.tmp('flip_x.xfm')): + self.param2xfm(self.tmp('flip_x.xfm'), + scales=[-1.0,1.0,1.0]) + if labels: + self.resample_labels(input,output,order=0,transform=self.tmp('flip_x.xfm'),datatype=datatype) + else: + self.resample_smooth(input,output,order=0,transform=self.tmp('flip_x.xfm'),datatype=datatype) + + + def volume_pol( + self, + source,target, + output, + source_mask=None, + target_mask=None, + order=1, + expfile=None, + datatype=None, + ): + """normalize intensities""" + + if (expfile is None or os.path.exists(expfile) ) and os.path.exists(output): + return + + rm_expfile = False + if not expfile: + expfile = self.temp_file(suffix='.exp') + rm_expfile = True + try: + cmd = ['volume_pol', + source, target, + '--order', str(order), + '--expfile', expfile, + '--noclamp','--clob', ] + if source_mask: + cmd.extend(['--source_mask', source_mask]) + if target_mask: + cmd.extend(['--target_mask', target_mask]) + self.command(cmd, inputs=[source, target], + outputs=[expfile], verbose=self.verbose) + exp = open(expfile).read().rstrip() + cmd = ['minccalc', '-q' ,'-expression', exp, source, output] + if datatype: + cmd.append(datatype) + self.command(cmd, inputs=[source, target], outputs=[output], verbose=self.verbose) + finally: + if rm_expfile and os.path.exists(expfile): + os.unlink(expfile) + + def nuyl_normalize( + self, + source,target, + output, + source_mask=None, + target_mask=None, + linear=False, + steps=10 + ): + """normalize intensities + Arguments: + source - input image + target - reference image + output - output image + + Optional Arguments: + souce_mask - input image mask (used for calculating intensity mapping) + target_mask - reference image mask + linear - use linear intensity model (False) + steps - number of steps in linear-piece-wise approximatation (10) + + """ + cmd = ['minc_nuyl', source, target,'--clob', output,'--steps',str(steps) ] + if source_mask: + cmd.extend(['--source-mask', source_mask]) + if target_mask: + cmd.extend(['--target-mask', target_mask]) + if linear: + cmd.append('--linear') + + self.command(cmd, inputs=[source, target], + outputs=[output], verbose=self.verbose) + + + def nu_correct( + self, + input, + output_imp=None, + output_field=None, + output_image=None, + mask=None, + mri3t=False, + normalize=False, + distance=None, + downsample_field=None, + datatype=None + ): + """apply N3""" + + if (output_image is None or os.path.exists(output_image)) and \ + (output_imp is None or os.path.exists(output_imp)) and \ + (output_field is None or os.path.exists(output_field)): + return + + output_imp_ = output_imp + + if not output_imp_ is not None: + output_imp_ = self.temp_file(suffix='.imp') + + if output_field is not None: + output_field_tmp=self.temp_file(suffix='.mnc') + + output_image_ = output_image + + if not output_image_: + output_image_ = self.temp_file(suffix='nuc.mnc') + + cmd = [ + 'nu_estimate', + '-stop', '0.00001', + '-fwhm', '0.1', + '-iterations','1000', + input, output_imp_, + ] + + if normalize: + cmd.append('-normalize_field') + + if mask is not None: + cmd.extend(['-mask', mask]) + + if distance is not None: + cmd.extend(['-distance', str(distance)]) + elif mri3t: + cmd.extend(['-distance', '50']) + + try: + self.command(cmd, inputs=[input], outputs=[output_imp_], + verbose=self.verbose) + + cmd=['nu_evaluate', + input, '-mapping', + output_imp_, + output_image_] + + if mask is not None: + cmd.extend(['-mask', mask]) + + if output_field is not None: + cmd.extend(['-field', output_field_tmp] ) + + self.command(cmd,inputs=[input], outputs = [output_image_], + verbose=self.verbose) + + if output_field is not None: + self.resample_smooth(output_field_tmp, output_field, datatype=datatype,unistep=downsample_field) + + finally: + if output_imp is None : + os.unlink(output_imp_) + if output_image is None : + os.unlink(output_image_) + + def n4(self, input, + output_corr=None, output_field=None, + mask=None, distance=200, + shrink=None, weight_mask=None, + datatype=None, iter=None, + sharpening=None, threshold=None, + downsample_field=None + ): + + outputs=[] + if output_corr is not None: + outputs.append(output_corr) + + if output_field is not None: + outputs.append(output_field) + + if not self.checkfiles(inputs=[input],outputs=outputs, + verbose=self.verbose): + return + + _out=self.temp_file(suffix='.mnc') + _out_fld=self.temp_file(suffix='.mnc') + + cmd=[ 'N4BiasFieldCorrection', '-d', '3', + '-i', input,'--rescale-intensities', '1', + '--bspline-fitting', str(distance), + '--output','[{},{}]'.format(_out,_out_fld) ] + + if mask is not None: + cmd.extend(['--mask-image',mask]) + if weight_mask is not None: + cmd.extend(['--weight-image',weight_mask]) + if shrink is not None: + cmd.extend(['--shrink-factor',str(shrink)]) + if iter is not None: + if threshold is None: threshold=0.0 + cmd.extend(['--convergence','[{}]'.format( ','.join([str(iter),str(threshold)]) )]) + if sharpening is not None: + cmd.extend(['--histogram-sharpening','[{}]'.format(str(sharpening))]) + self.command(cmd,inputs=[input],outputs=[_out,_out_fld], + verbose=self.verbose) + + if output_corr is not None: + if datatype is not None: + self.reshape(_out, output_corr, datatype=datatype) + os.unlink(_out) + else: + shutil.move(_out, output_corr) + + if output_field is not None: + if downsample_field is not None: + self.resample_smooth(_out_fld, output_field, datatype=datatype,unistep=downsample_field) + else: + if datatype is not None: + self.reshape(_out_fld, output_field, datatype=datatype) + os.unlink(_out_fld) + else: + shutil.move(_out_fld, output_field) + + def difference_n4( + self, + input, + model, + output, + mask=None, + distance=None, + iter=None + ): + + diff = self.temp_file(suffix='.mnc') + _output = self.temp_file(suffix='_out.mnc') + try: + if mask: + self.calc([input, model, mask], + 'A[2]>0.5?A[0]-A[1]+100:0', diff) + else: + self.calc([input, model], + 'A[0]-A[1]+100', diff) + + self.n4(diff, mask=mask, output_field=_output, + distance=distance, + iter=iter) + # fix , because N4 doesn't preserve dimension order + self.resample_smooth(_output, output, like=diff) + + finally: + os.unlink(diff) + os.unlink(_output) + + + def apply_fld(self,input,fld,output): + '''Apply inhomogeniety correction field''' + _res_fld=self.temp_file(suffix='.mnc') + if not self.checkfiles(inputs=[input],outputs=[output], + verbose=self.verbose): + return + try: + self.resample_smooth(fld, _res_fld, like=input,order=1) + self.calc([input, _res_fld], + 'A[1]>0.0?A[0]/A[1]:A[0]', output) + finally: + os.unlink(_res_fld) + + + + def apply_n3_vol_pol( + self, + input, + model, + output, + source_mask=None, + target_mask=None, + bias=None, + ): + + intermediate = input + try: + if bias: + intermediate = self.temp_file(suffix='.mnc') + self.calc([input, bias], + 'A[1]>0.5&&A[1]<1.5?A[0]/A[1]:A[0]', + intermediate, datatype='-float') + self.volume_pol( + intermediate, + model, + output, + source_mask=source_mask, + target_mask=target_mask, + datatype='-short', + ) + finally: + if bias: + os.unlink(intermediate) + + def difference_n3( + self, + input, + model, + output, + mask=None, + mri3t=False, + distance=None, + normalize=True, + ): + + diff = self.temp_file(suffix='.mnc') + + try: + if mask: + self.calc([input, model, mask], + 'A[2]>0.5?A[0]-A[1]+100:0', diff) + else: + self.calc([input, model], + 'A[0]-A[1]+100', diff) + + self.nu_correct(diff, mask=mask, output_field=output, + mri3t=mri3t, distance=distance, + normalize=normalize) + finally: + os.unlink(diff) + + + def xfm_normalize( + self, input, + like, output, + step=None, + exact=False, + invert=False, + ): + + # TODO: convert xfm_normalize.pl to python + cmd = ['xfm_normalize.pl', input, '--like', like, output] + if step: + cmd.extend(['--step', str(step)]) + if exact: + cmd.extend(['--exact']) + if invert: + cmd.extend(['--invert']) + + self.command(cmd, inputs=[input, like], outputs=[output], verbose=self.verbose) + + + def xfm_noscale(self, input, output, unscale=None): + """remove scaling from linear part of XFM""" + + scale = self.temp_file(suffix='scale.xfm') + _unscale=unscale + if unscale is None: + _unscale = self.temp_file(suffix='unscale.xfm') + try: + (out, err) = subprocess.Popen(['xfm2param', input], + stdout=subprocess.PIPE).communicate() + scale_ = filter(lambda x: re.match('^\-scale', x), + out.decode().split('\n')) + if len(scale_) != 1: + raise mincError("Can't extract scale from " + input) + scale__ = re.split('\s+', scale_[0]) + cmd = ['param2xfm'] + cmd.extend(scale__) + cmd.extend([scale]) + self.command(cmd, verbose=self.verbose) + self.xfminvert(scale, _unscale) + self.xfmconcat([input, _unscale], output) + finally: + if os.path.exists(scale): + os.unlink(scale) + if unscale!=_unscale and os.path.exists(_unscale): + os.unlink(_unscale) + + + def blur( + self, + input, + output, + fwhm, + gmag=False, + dx=False, + dy=False, + dz=False, + output_float=False, + ): + """Apply gauissian blurring to the input image""" + + cmd = ['fast_blur', input, output, '--fwhm', str(fwhm)] + if gmag: + cmd.append('--gmag') + if dx: + cmd.append('--dx') + if dy: + cmd.append('--dy') + if dz: + cmd.append('--dz') + if output_float: + cmd.append('--float') + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def blur_orig( + self, + input, + output, + fwhm, + gmag=False, + dx=False, + dy=False, + dz=False, + output_float=False, + ): + """Apply gauissian blurring to the input image""" + with temp_files() as tmp: + p=tmp.tmp('blur_orig') + cmd = ['mincblur', input, p, '-fwhm', str(fwhm),'-no_apodize'] + if gmag: + cmd.append('-gradient') + if output_float: + cmd.append('-float') + self.command(cmd, inputs=[input], outputs=[p+'_blur.mnc'], verbose=2) + + if gmag: + shutil.move(p+'_dxyz.mnc',output) + else: + shutil.move(p+'_blur.mnc',output) + + + def blur_vectors( + self, + input, + output, + fwhm, + gmag=False, + output_float=False, + dim=3 + ): + """Apply gauissian blurring to the input vector field """ + + if not self.checkfiles(inputs=[input], outputs=[output], + verbose=self.verbose): + return + + with temp_files() as tmp: + b=[] + dimorder=self.query_dimorder(input) + for i in range(dim): + self.reshape(input,tmp.tmp(str(i)+'.mnc'),dimrange='vector_dimension={}'.format(i)) + self.blur(tmp.tmp(str(i)+'.mnc'),tmp.tmp('blur_'+str(i)+'.mnc'),fwhm=fwhm,output_float=output_float,gmag=gmag) + b.append(tmp.tmp('blur_'+str(i)+'.mnc')) + # assemble + cmd=['mincconcat','-concat_dimension','vector_dimension','-quiet'] + cmd.extend(b) + cmd.append(tmp.tmp('output.mnc')) + self.command(cmd,inputs=b,outputs=[],verbose=self.verbose) + self.command(['mincreshape','-dimorder',','.join(dimorder),tmp.tmp('output.mnc'),output,'-quiet'], + inputs=[],outputs=[output],verbose=self.verbose) + # done + + + def nlm(self, + input,output, + beta=0.7, + patch=3, + search=1, + sigma=None, + ): + + if sigma is None: + sigma=self.noise_estimate(input) + + cmd=['itk_minc_nonlocal_filter', + input, output, + #'--beta', str(beta), + '--patch',str(patch), + '--search',str(search) + ] + + cmd.extend(['--sigma',str(sigma*beta)]) + + self.command(cmd, + inputs=[input], outputs=[output], verbose=self.verbose) + + + def anlm(self, + input, output, + beta=0.7, + patch=None, + search=None, + regularize=None, + ): + cmd=['itk_minc_nonlocal_filter', '--clobber', '--anlm', + input, output,'--beta', str(beta),] + + if patch is not None: cmd.extend(['--patch', str(patch)] ) + if search is not None: cmd.extend(['--search', str(search)] ) + if regularize is not None: cmd.extend(['--regularize',str(regularize)]) + + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + + def qc( + self, + input, + output, + image_range=None, + mask=None, + mask_range=None, + title=None, + labels=False, + labels_mask=False, + spectral_mask=False, + big=False, + clamp=False, + bbox=False, + dicrete=False, + dicrete_mask=False, + red=False, + green_mask=False, + cyanred=False, + cyanred_mask=False, + mask_lut=None + ): + + cmd = ['minc_qc.pl', input, output, '--verbose'] + + if image_range is not None: + cmd.extend(['--image-range', str(image_range[0]), + str(image_range[1])]) + if mask is not None: + cmd.extend(['--mask', mask]) + if mask_range is not None: + cmd.extend(['--mask-range', str(mask_range[0]), + str(mask_range[1])]) + if title is not None: + cmd.extend(['--title', title]) + if labels: + cmd.append('--labels') + if labels_mask: + cmd.append('--labels-mask') + if spectral_mask: + cmd.append('--spectral-mask') + if big: + cmd.append('--big') + if clamp: + cmd.append('--clamp') + if bbox: + cmd.append('--bbox') + if labels: + cmd.append('--labels') + if labels_mask: + cmd.append('--labels-mask') + if dicrete: + cmd.append('--discrete') + if dicrete_mask: + cmd.append('--discrete-mask') + if red: + cmd.append('--red') + if green_mask: + cmd.append('--green-mask') + if cyanred: + cmd.append('--cyanred') + if cyanred_mask: + cmd.append('--cyanred-mask') + if mask_lut is not None: + cmd.extend(['--mask-lut',mask_lut]) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def aqc( + self, + input, + output_prefix, + slices=3 + ): + + cmd = ['minc_aqc.pl', input, output_prefix, '--slices', str(slices) ] + + self.command(cmd, inputs=[input], outputs=[output_prefix+'_0.jpg'], verbose=self.verbose) + + + def grid_determinant( + self, + input, + output, + datatype=None + ): + cmd=['grid_proc','--det',input,output] + if datatype is not None: + cmd.append('--'+datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def grid_2_log( + self, + input, + output, + datatype=None, + exp=False, + factor=None, + ): + cmd=['grid_2_log',input,output] + if datatype is not None: + cmd.append('--'+datatype) + if exp: + cmd.append('--exp') + if factor is not None: + cmd.extend(['--factor',str(factor)]) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def grid_magnitude( + self, + input, + output, + datatype=None + ): + cmd=['grid_proc','--mag',input,output] + if datatype is not None: + cmd.append('--'+datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def reshape( + self, + input, + output, + normalize=False, + datatype=None, + image_range=None, + valid_range=None, + dimorder=None, + signed=False, + unsigned=False, + dimrange=None + ): + """reshape minc files, #TODO add more options to fully support mincreshape""" + if signed and unsigned: + raise mincError('Attempt to reshape file to have both signed and unsigned datatype') + cmd = ['mincreshape', input, output, '-q'] + if image_range: + cmd.extend(['-image_range', str(image_range[0]), + str(image_range[1])]) + if valid_range: + cmd.extend(['-valid_range', str(image_range[0]), + str(image_range[1])]) + if dimorder: + cmd.extend(['-dimorder', ','.join(dimorder)]) + if datatype: + cmd.append('-' + datatype) + if normalize: + cmd.append('-normalize') + if signed: + cmd.append('-signed') + if unsigned: + cmd.append('-unsigned') + if dimrange is not None: + if type(dimrange) is list: + [ cmd.extend(['-dimrange',i]) for i in dimrange ] + else: + cmd.extend(['-dimrange',dimrange]) + + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def split_labels(self, input, output_prefix, normalize=True, lut=None, aa=True, expit=4.0): + '''split a miltilabel file into a set of files possibly with anti-aliasing filter applied''' + output_file_pattern=output_prefix+'_%03d.mnc' + cmd=['itk_split_labels',input,output_file_pattern] + if aa : cmd.append('--aa') + if expit> 0: cmd.extend(['--expit',expit]) + if normalize: cmd.append('--normalize') + if lut is not None: + if isinstance(lut, list): + lut=dict(remap) + + if isinstance(lut, dict): + if any(lut): + _lut="" + for (i,j) in lut.items(): _lut+='{} {};'.format(i,j) + cmd.extend(['--lut-string', _lut ]) + else: + cmd.extend(['--lut-string', str(lut) ]) + # TODO: figure out how to effectively predict output file names + # and report it to the calling programm + out_=self.execute_w_output(cmd).split("\n") + return dict( [int(i[0]),i[1]] for i in [ j.split(',') for j in out_] ) + + def merge_labels(self,input,output): + '''merge labels using voting''' + try: + data_type='--byte' + inputs=[self.temp_file(suffix='merge.csv')] + with open(self.temp_file(suffix='merge.csv'),'w') as f: + for (i,j) in input.items(): + f.write("{},{}\n".format(i,j)) + inputs.append(j) + if int(i)>255: data_type='--short' + + cmd=['itk_merge_labels', '--csv', self.temp_file(suffix='merge.csv'), output, '--clob', data_type] + self.command(cmd,inputs=inputs,outputs=[output]) + finally: + os.unlink(self.temp_file(suffix='merge.csv')) + + def label_stats(self, input, + bg=False, + label_defs=None, + volume=None, + median=False, + mask=None): + ''' calculate label statistics : label_id, volume, mx, my, mz,[mean/median] ''' + _label_file=label_defs + cmd=['itk_label_stats',input] + if bg: cmd.append('--bg') + if label_defs is not None: + if isinstance(label_defs, list) : + _label_file=self.temp_file(suffix='.csv') + with open(_label_file,'w') as f: + for i in label_defs: + f.write("{},{}\n".format(i[0],i[1])) + elif isinstance(label_defs, dict) : + _label_file=self.temp_file(suffix='.csv') + with open(_label_file,'w') as f: + for i, j in label_defs.iteritems(): + f.write("{},{}\n".format(i,j)) + + cmd.extend(['--labels',_label_file]) + if volume is not None: + cmd.extend(['--volume',volume]) + if median: + cmd.append('--median') + + if mask is not None: + cmd.extend(['--mask',mask]) + + _out=self.execute_w_output(cmd).split("\n") + _out.pop(0)# remove header + if _label_file != label_defs: + os.unlink(_label_file) + out=[] + + if label_defs is not None: + out=[ [ ( float(j) if k>0 else j ) for k,j in enumerate(i.split(',')) ] for i in _out if len(i)>0 ] + else: + out=[ [ ( float(j) if k>0 else int(j) ) for k,j in enumerate(i.split(',')) ] for i in _out if len(i)>0 ] + return out + + def skullregistration( + self, + source, + target, + source_mask, + target_mask, + output_xfm, + init_xfm=None, + stxtemplate_xfm=None, + ): + """perform linear registration based on the skull segmentaton""" + + temp_dir = self.temp_dir(prefix='skullregistration') + os.sep + fit = '-xcorr' + try: + if init_xfm: + resampled_source = temp_dir + 'resampled_source.mnc' + resampled_source_mask = temp_dir \ + + 'resampled_source_mask.mnc' + self.resample_smooth(source, resampled_source, + like=target, transform=init_xfm) + self.resample_labels(source_mask, + resampled_source_mask, like=target, + transform=init_xfm) + source = resampled_source + source_mask = resampled_source_mask + if stxtemplate_xfm: + resampled_target = temp_dir + 'resampled_target.mnc' + resampled_target_mask = temp_dir \ + + 'resampled_target_mask.mnc' + self.resample_smooth(target, resampled_target, + transform=stxtemplate_xfm) + self.resample_labels(target_mask, + resampled_target_mask, + transform=stxtemplate_xfm) + target = resampled_target + target_mask = resampled_target_mask + + self.command(['itk_morph', '--exp', 'D[3]', source_mask, + temp_dir + 'dilated_source_mask.mnc'], verbose=self.verbose) + self.calc([temp_dir + 'dilated_source_mask.mnc', source], + 'A[0]<=0.1 && A[0]>=-0.1 ? A[1]:0', temp_dir + + 'non_brain_source.mnc' ) + self.command(['mincreshape', '-dimrange', 'zspace=48,103', + temp_dir + 'non_brain_source.mnc', temp_dir + + 'non_brain_source_crop.mnc'], verbose=self.verbose ) + self.command(['itk_morph', '--exp', 'D[3]', target_mask, + temp_dir + 'dilated_target_mask.mnc'], verbose=self.verbose) + self.calc([temp_dir + 'dilated_target_mask.mnc', target], + 'A[0]<=0.1 && A[0]>=-0.1 ? A[1]:0', temp_dir + + 'non_brain_target.mnc') + self.command(['mincreshape', '-dimrange', 'zspace=48,103', + temp_dir + 'non_brain_target.mnc', temp_dir + + 'non_brain_target_crop.mnc'], verbose=self.verbose ) + self.command([ + 'bestlinreg_s2', + '-clobber', '-lsq12', source, target, + temp_dir + '1.xfm', + ], verbose=self.verbose ) + self.command([ + 'minctracc', + '-quiet','-clobber', + fit, + '-step', '2', '2', '2', + '-simplex','1', + '-lsq12', + '-model_mask', target_mask, + source, + target, + temp_dir + '2.xfm', + '-transformation', temp_dir + '1.xfm', + ], verbose=self.verbose) + + self.command([ + 'minctracc', + '-quiet','-clobber', + fit, + '-step', '2', '2','2', + '-simplex', '1', + '-lsq12','-transformation', temp_dir + '2.xfm', + temp_dir + 'non_brain_source_crop.mnc', + temp_dir + 'non_brain_target_crop.mnc', + temp_dir + '3.xfm', + ], verbose=self.verbose) + + self.command([ + 'minctracc', + '-quiet', '-clobber', + fit, + '-step', '2', '2', '2', + '-transformation', + temp_dir + '3.xfm', + '-simplex', '1', + '-lsq12', + '-w_scales', '0', '0', '0', + '-w_shear', '0', '0', '0', + '-model_mask', target_mask, + source, + target, + temp_dir + '4.xfm', + ], verbose=self.verbose) + + if init_xfm: + self.command(['xfmconcat', init_xfm, temp_dir + '4.xfm' + , output_xfm, '-clobber'], verbose=self.verbose) + else: + shutil.move(temp_dir + '4.xfm', output_xfm) + finally: + shutil.rmtree(temp_dir) + + def binary_morphology(self, source, expression, target , binarize_bimodal=False, binarize_threshold=None): + cmd=['itk_morph',source,target] + if expression is not None and expression!='': + cmd.extend(['--exp',expression]) + if binarize_bimodal: + cmd.append('--bimodal') + elif binarize_threshold is not None : + cmd.extend(['--threshold',str(binarize_threshold) ]) + self.command(cmd,inputs=[source],outputs=[target], verbose=2) + + def grayscale_morphology(self, source, expression, target ): + cmd=['itk_g_morph',source,'--exp',expression,target] + self.command(cmd,inputs=[source],outputs=[target], verbose=self.verbose) + + + def patch_norm(self, input, output, + index=None, db=None, threshold=0.0, + spline=None, median=None, field=None, + subsample=2, iterations=None ): + + cmd=['flann_patch_normalize.pl',input,output] + if index is None or db is None: + raise mincError("patch normalize need index and db") + cmd.extend(['--db',db,'--index',index]) + + if median is not None: + cmd.extend(['--median',str(median)]) + + if spline is not None: + cmd.extend(['--spline',str(spline)]) + + if iterations is not None: + cmd.extend(['--iter',str(iterations)]) + + cmd.extend(['--subsample',str(subsample)]) + + if field is not None: + cmd.extend(['--field',field]) + + self.command(cmd,inputs=[input],outputs=[output], verbose=self.verbose) + + def autocrop(self,input,output, + isoexpand=None,isoextend=None): + # TODO: repimplement in python + cmd=['autocrop',input,output] + if isoexpand: cmd.extend(['-isoexpand',str(isoexpand)]) + if isoextend: cmd.extend(['-isoextend',str(isoextend)]) + self.command(cmd,inputs=[input],outputs=[output], verbose=self.verbose) + + + def run_mincbeast(self, input_scan, output_mask, + beast_lib=None, beast_conf=None, beast_res=2): + if beast_lib is None: + raise mincError('mincbeast needs location of library') + if beast_conf is None: + beast_conf=beast_lib+os.sep+'default.{}mm.conf'.format(beast_res) + + + cmd = [ + 'mincbeast', + beast_lib, + input_scan, + output_mask, + '-median', + '-fill', + '-conf', + beast_conf, + '-same_resolution'] + + self.command(cmd,inputs=[input_scan],outputs=[output_mask], verbose=2) + + + + def classify_clean( + self, input_scans, output_cls, + mask=None, xfm=None, model_dir=None, model_name=None + ): + """ + run classify_clean + """ + # TODO reimplement in python? + + cmd = ['classify_clean', '-clean_tags'] + + cmd.extend(input_scans) + + if mask is not None: cmd.extend(['-mask',mask,'-mask_tag','-mask_classified']) + if xfm is not None: cmd.extend(['-tag_transform',xfm]) + + if model_dir is not None and model_name is not None: + cmd.extend([ + '-tagdir', model_dir, + '-tagfile', "{}_ntags_1000_prob_90_nobg.tag".format(model_name), + '-bgtagfile', "{}_ntags_1000_bg.tag".format(model_name) + ]) + cmd.append(output_cls) + self.command(cmd,inputs=input_scans,outputs=[output_cls], verbose=self.verbose) + + def lobe_segment(self,in_cls,out_lobes, + nl_xfm=None,lin_xfm=None, + atlas_dir=None,template=None): + """ + Run lobe_segment script + """ + # TODO convert to python + identity=self.tmp('identity.xfm') + self.param2xfm(identity) + + if nl_xfm is None: + nl_xfm=identity + if lin_xfm is None: + lin_xfm=identity + + # TODO: setup sensible defaults here? + if atlas_dir is None or template is None: + raise mincError('lobe_segment needs atlas_dir and template') + + cmd = [ + 'lobe_segment', + nl_xfm, + lin_xfm, + in_cls, + out_lobes, + '-modeldir', atlas_dir, + '-template', template, + ] + + self.command(cmd, inputs=[in_cls],outputs=[out_lobes], verbose=self.verbose) + + def xfm2param(self, input): + """extract transformation parameters""" + + out=self.execute_w_output(['xfm2param', input]) + + params_=[ [ float(k) if s>0 else k for s,k in enumerate(re.split('\s+', l))] for l in out.decode().split('\n') if re.match('^\-', l) ] + + return { k[0][1:] :[k[1],k[2],k[3]] for k in params_ } + + + def defrag(self,input,output,stencil=6,max_connect=None,label=1): + cmd = [ + 'mincdefrag', + input,output, str(label),str(stencil) + ] + if max_connect is not None: + cmd.append(str(max_connect)) + self.command(cmd, inputs=[input],outputs=[output], verbose=self.verbose) + + def winsorize_intensity(self,input,output,pct1=1,pct2=95): + # obtain percentile + _threshold_1=self.stats(input,['-pctT',str(pct1)]) + _threshold_2=self.stats(input,['-pctT',str(pct2)]) + self.calc([input],"clamp(A[0],{},{})".format(_threshold_1,_threshold_2),output) + + +if __name__ == '__main__': + pass + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/model/__init__.py b/ipl/model/__init__.py new file mode 100644 index 0000000..7076517 --- /dev/null +++ b/ipl/model/__init__.py @@ -0,0 +1,5 @@ +# model generations + + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/filter.py b/ipl/model/filter.py new file mode 100644 index 0000000..d920b91 --- /dev/null +++ b/ipl/model/filter.py @@ -0,0 +1,505 @@ +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def generate_flip_sample(input): + '''generate flipped version of sample''' + with mincTools() as m: + m.flip_volume_x(input.scan,input.scan_f) + + if input.mask is not None: + m.flip_volume_x(input.mask,input.mask_f,labels=True) + + return True + + +def normalize_sample(input, + output, + model, + bias_field=None, + ): + """Normalize sample intensity""" + + with mincTools() as m: + m.apply_n3_vol_pol( + input.scan, + model.scan, + output.scan, + source_mask=input.mask, + target_mask=model.mask, + bias=bias_field, + ) + output.mask=input.mask + return output + + +def average_samples( + samples, + output, + output_sd=None, + symmetric=False, + symmetrize=False, + median=False + ): + """average individual samples""" + try: + with mincTools() as m: + avg = [] + + out_scan=output.scan + out_mask=output.mask + + if symmetrize: + out_scan=m.tmp('avg.mnc') + out_mask=m.tmp('avg_mask.mnc') + + for s in samples: + avg.append(s.scan) + + if symmetric: + for s in samples: + avg.append(s.scan_f) + out_sd=None + + if output_sd: + out_sd=output_sd.scan + + if median: + m.median(avg, out_scan,madfile=out_sd) + else: + m.average(avg, out_scan,sdfile=out_sd) + + if symmetrize: + # TODO: replace flipping of averages with averaging of flipped + # some day + m.flip_volume_x(out_scan,m.tmp('flip.mnc')) + m.average([out_scan,m.tmp('flip.mnc')],output.scan) + + # average masks + if output.mask is not None: + avg = [] + for s in samples: + avg.append(s.mask) + + if symmetric: + for s in samples: + avg.append(s.mask_f) + + if not os.path.exists(output.mask): + + if symmetrize: + m.average(avg,m.tmp('avg_mask.mnc'),datatype='-float') + m.flip_volume_x(m.tmp('avg_mask.mnc'),m.tmp('flip_avg_mask.mnc')) + m.average([m.tmp('avg_mask.mnc'),m.tmp('flip_avg_mask.mnc')],m.tmp('sym_avg_mask.mnc'),datatype='-float') + + m.calc([m.tmp('sym_avg_mask.mnc')],'A[0]>=0.5?1:0',m.tmp('sym_avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('sym_avg_mask_.mnc'),output.mask,image_range=[0,1],valid_range=[0,1]) + else: + m.average(avg,m.tmp('avg_mask.mnc'),datatype='-float') + m.calc([m.tmp('avg_mask.mnc')],'A[0]>=0.5?1:0',m.tmp('avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('avg_mask_.mnc'),output.mask,image_range=[0,1],valid_range=[0,1]) + + + + + return True + except mincError as e: + print "Exception in average_samples:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_samples:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def average_stats( + avg, + sd, + ): + """calculate median sd within mask""" + try: + st=0 + with mincTools(verbose=2) as m: + if avg.mask is not None: + st=float(m.stats(sd.scan,'-median',mask=avg.mask)) + else: + st=float(m.stats(sd.scan,'-median')) + return st + except mincError as e: + print "mincError in average_stats:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def calculate_diff_bias_field(sample, model, output, symmetric=False, distance=100, n4=False ): + try: + with mincTools() as m: + if n4: + if model.mask is not None: + m.difference_n4(sample.scan, model.scan, output.scan, mask=model.mask, distance=distance) + else: + m.difference_n4(sample.scan, model.scan, output.scan, distance=distance ) + if symmetric: + if model.mask is not None: + m.difference_n4(sample.scan_f, model.scan, output.scan_f, mask=model.mask, distance=distance) + else: + m.difference_n4(sample.scan_f, model.scan, output.scan_f, distance=distance ) + else: + if model.mask is not None: + m.difference_n3(sample.scan, model.scan, output.scan, mask=model.mask, distance=distance, normalize=True) + else: + m.difference_n3(sample.scan, model.scan, output.scan, distance=distance, normalize=True ) + if symmetric: + if model.mask is not None: + m.difference_n3(sample.scan_f, model.scan, output.scan_f, mask=model.mask, distance=distance, normalize=True) + else: + m.difference_n3(sample.scan_f, model.scan, output.scan_f, distance=distance, normalize=True ) + return True + except mincError as e: + print "mincError in average_stats:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def average_bias_fields(samples, output, symmetric=False ): + try: + with mincTools() as m: + + avg = [] + + for s in samples: + avg.append(s.scan) + + if symmetric: + for s in samples: + avg.append(s.scan_f) + + m.log_average(avg, output.scan) + return True + except mincError as e: + print "mincError in average_stats:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def resample_and_correct_bias( + sample, + transform, + avg_bias, + output, + previous=None, + symmetric=False, + ): + # resample bias field and apply previous estimate + try: + with mincTools() as m: + + m.calc([sample.scan, avg_bias.scan], + 'A[1]>0.1?A[0]/A[1]:1.0', m.tmp('corr_bias.mnc')) + + m.resample_smooth(m.tmp('corr_bias.mnc'), + m.tmp('corr_bias2.mnc'), + like=sample.scan, + transform=transform.xfm, + invert_transform=True) + if previous: + m.calc([previous.scan, m.tmp('corr_bias2.mnc') ], 'A[0]*A[1]', + output.scan, datatype='-float') + else: + shutil.copy(m.tmp('corr_bias2.mnc'), output.scan) + + if symmetric: + m.calc([sample.scan_f, avg_bias.scan], + 'A[1]>0.1?A[0]/A[1]:1.0', m.tmp('corr_bias_f.mnc')) + + m.resample_smooth(m.tmp('corr_bias_f.mnc'), + m.tmp('corr_bias2_f.mnc'), + like=sample.scan, + transform=transform.xfm, + invert_transform=True) + if previous: + m.calc([previous.scan_f, m.tmp('corr_bias2_f.mnc')], + 'A[0]*A[1]', + output.scan_f, datatype='-float') + else: + shutil.copy(m.tmp('corr_bias2_f.mnc'), output.scan) + + return True + except mincError as e: + print "Exception in resample_and_correct_bias:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in resample_and_correct_bias:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def apply_linear_model( + lin_model, + parameters, + output_volume + ): + """build a volume, for a given regression model and parameters""" + try: + with mincTools() as m: + + if lin_model.N!=len(parameters): + raise mincError("Expected: {} parameters, got {}".format(lin_model.N,len(parameters))) + # create minccalc expression + _exp=[] + for i in range(0,lin_model.N): + _exp.append('A[{}]*{}'.format(i,parameters[i])) + exp='+'.join(_exp) + m.calc(lin_model.volume,exp,output_volume) + return True + except mincError as e: + print( "Exception in apply_linear_model:{}".format(str(e)) ) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in apply_linear_model:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + + +def build_approximation(int_model, + geo_model , + parameters_int, + parameters_def, + output_scan, + output_transform, + noresample=False): + try: + with mincTools() as m: + + intensity=m.tmp('int_model.mnc') + if noresample: + intensity=output_scan.scan + #geometry=m.tmp('geometry_model.mnc') + + # TODO: paralelelize? + if int_model.N>0: + apply_linear_model(int_model,parameters_int,intensity) + else: # not modelling intensity + intensity=int_model.volume[0] + + # if we have geometry information + if geo_model is not None and geo_model.N>0 : + apply_linear_model(geo_model, parameters_def, output_transform.grid ) + # create appropriate .xfm file + with open(output_transform.xfm,'w') as f: + f.write( +""" +MNI Transform File +Transform_Type = Linear; +Linear_Transform = + 1 0 0 0 + 0 1 0 0 + 0 0 1 0; +Transform_Type = Grid_Transform; +Displacement_Volume = {}; +""".format(os.path.basename(output_transform.grid)) + ) + + if not noresample: + m.resample_smooth(intensity, output_scan.scan, + transform=output_transform.xfm, + like=int_model.volume[0]) + + if int_model.mask is not None: + if noresample: + shutil.copyfile(int_model.mask, + output_scan.mask) + else: + m.resample_labels(int_model.mask, + output_scan.mask, + transform=output_transform.xfm, + like=int_model.volume[0]) + else: + output_scan.mask=None + else: # not modelling shape! + shutil.copyfile(intensity,output_scan.scan) + if int_model.mask is not None: + shutil.copyfile(int_model.mask, + output_scan.mask) + else: + output_scan.mask=None + output_transform=None + return (output_scan, output_transform) + except mincError as e: + print( "Exception in build_approximation:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in build_approximation:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def voxel_regression(int_design_matrix, + def_design_matrix, + int_estimate, + def_estimate, + next_int_model, + next_def_model, + int_residual, + def_residual, + blur_int_model=None, + blur_def_model=None, + qc=False): + """Perform voxel-wise regression using given design matrix""" + try: + with mincTools() as m: + #print(repr(next_int_model)) + + # a small hack - assume that input directories are the same + _prefix=def_estimate[0].prefix + _design_vel=_prefix+os.sep+'regression_vel.csv' + _design_int=_prefix+os.sep+'regression_int.csv' + + #nomask=False + #for i in for i in int_estimate: + # if i.mask is None: + # nomask=True + _masks=[i.mask for i in int_estimate] + _inputs=[] + _outputs=[] + _outputs.extend(next_int_model.volume) + _outputs.extend(next_def_model.volume) + + with open(_design_vel,'w') as f: + for (i, l ) in enumerate(def_design_matrix): + f.write(os.path.basename(def_estimate[i].grid)) + f.write(',') + f.write(','.join([str(qq) for qq in l])) + f.write("\n") + _inputs.append(def_estimate[i].grid) + + with open(_design_int,'w') as f: + for (i, l ) in enumerate(int_design_matrix): + f.write(os.path.basename(int_estimate[i].scan)) + f.write(',') + f.write(','.join([str(qq) for qq in l])) + f.write("\n") + _inputs.append(int_estimate[i].scan) + + if not m.checkfiles(inputs=_inputs, outputs=_outputs): + return + + int_model=next_int_model + def_model=next_def_model + + if blur_int_model is not None: + int_model=MriDatasetRegress(prefix=m.tempdir, name='model_int',N=next_int_model.N,nomask=(next_int_model.mask is None)) + + if blur_def_model is not None: + def_model=MriDatasetRegress(prefix=m.tempdir,name='model_def', N=next_def_model.N, nomask=(next_def_model.mask is None)) + + + # regress deformations + m.command(['volumes_lm',_design_vel, def_model.volume[0].rsplit('_0.mnc',1)[0]], + inputs=[_design_vel], + outputs=def_model.volume, + verbose=2) + + + # regress intensity + m.command(['volumes_lm',_design_int, int_model.volume[0].rsplit('_0.mnc',1)[0]], + inputs=[_design_int], + outputs=int_model.volume, + verbose=2) + + if blur_def_model is not None: + # blur estimates + for (i,j) in enumerate(def_model.volume): + m.blur_vectors(def_model.volume[i],next_def_model.volume[i],blur_def_model) + # a hack preserve unfiltered RMS volume + shutil.copyfile(def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + + if blur_int_model is not None: + for (i,j) in enumerate(int_model.volume): + m.blur(int_model.volume[i],next_int_model.volume[i],blur_int_model) + # a hack preserve unfiltered RMS volume + shutil.copyfile(int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + + # average masks + if next_int_model.mask is not None: + m.average(_masks,m.tmp('avg_mask.mnc'),datatype='-float') + m.calc([m.tmp('avg_mask.mnc')],'A[0]>0.5?1:0',m.tmp('avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('avg_mask_.mnc'),next_int_model.mask,image_range=[0,1],valid_range=[0,1]) + + if qc: + m.qc(next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.jpg' ) + + m.grid_magnitude(next_def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + m.tmp('def_RMS_mag.mnc')) + m.qc(m.tmp('def_RMS_mag.mnc'), + next_def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.jpg') + + #cleanup + #os.unlink(_design_vel) + #os.unlink(_design_int) + + + except mincError as e: + print( "Exception in voxel_regression:{}".format(str(e)) ) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in voxel_regression:{}".format(sys.exc_info()[0]) ) + traceback.print_exc(file=sys.stdout) + raise + + +def average_stats_regression( + current_int_model, current_def_model, + int_residual, def_residual, + ): + """calculate median sd within mask for intensity and velocity""" + try: + sd_int=0.0 + sd_def=0.0 + with mincTools(verbose=2) as m: + m.grid_magnitude(def_residual.scan, m.tmp('mag.mnc')) + if current_int_model.mask is not None: + sd_int=float(m.stats(int_residual.scan,'-median',mask=current_int_model.mask)) + m.resample_smooth(m.tmp('mag.mnc'),m.tmp('mag_.mnc'),like=current_int_model.mask) + sd_def=float(m.stats(m.tmp('mag_.mnc'),'-median',mask=current_int_model.mask)) + else: + sd_int=float(m.stats(int_residual.scan,'-median')) + sd_def=float(m.stats(m.tmp('mag.mnc'),'-median')) + + return (sd_int,sd_def) + except mincError as e: + print "mincError in average_stats_regression:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats_regression:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/generate_linear.py b/ipl/model/generate_linear.py new file mode 100644 index 0000000..01e4046 --- /dev/null +++ b/ipl/model/generate_linear.py @@ -0,0 +1,321 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from ipl.model.structures import MriDataset, MriTransform, MRIEncoder +from ipl.model.filter import generate_flip_sample, normalize_sample +from ipl.model.filter import average_samples,average_stats +from ipl.model.filter import calculate_diff_bias_field,average_bias_fields +from ipl.model.filter import resample_and_correct_bias +from ipl.model.registration import linear_register_step, non_linear_register_step +from ipl.model.registration import average_transforms +from ipl.model.resample import concat_resample, concat_resample_nl + +from scoop import futures, shared + + + +def generate_linear_average( + samples, + initial_model=None, + output_model=None, + output_model_sd=None, + prefix='.', + options={} + ): + """ perform iterative model creation""" + + # use first sample as initial model + if not initial_model: + initial_model = samples[0] + + # current estimate of template + current_model = initial_model + current_model_sd = None + transforms=[] + corr=[] + + bias_fields=[] + corr_transforms=[] + corr_samples=[] + sd=[] + + iterations=options.get('iterations',4) + cleanup=options.get('cleanup',False) + symmetric=options.get('symmetric',False) + reg_type=options.get('reg_type','-lsq12') + objective=options.get('objective','-xcorr') + linreg=options.get('linreg',None) + refine=options.get('refine',False) + biascorr=options.get('biascorr',False) + biasdist=options.get('biasdist',100)# default for 1.5T + qc=options.get('qc',False) + downsample=options.get('downsample',None) + use_n4=options.get('N4',False) + use_median=options.get('median',False) + + models=[] + models_sd=[] + models_bias=[] + + if symmetric: + flipdir=prefix+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + flip_all=[] + # generate flipped versions of all scans + for (i, s) in enumerate(samples): + _s_name=os.path.basename(s.scan).rsplit('.gz',1)[0] + s.scan_f=prefix+os.sep+'flip'+os.sep+_s_name + + if s.mask is not None: + s.mask_f=prefix+os.sep+'flip'+os.sep+'mask_'+_s_name + + flip_all.append( futures.submit( generate_flip_sample,s ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + + # go through all the iterations + for it in xrange(1,iterations+1): + + # this will be a model for next iteration actually + + # 1 register all subjects to current template + next_model =MriDataset(prefix=prefix, iter=it, name='avg') + next_model_sd =MriDataset(prefix=prefix, iter=it, name='sd') + next_model_bias=MriDataset(prefix=prefix, iter=it, name='bias') + + transforms=[] + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + inv_transforms=[] + fwd_transforms=[] + for (i, s) in enumerate(samples): + sample_xfm = MriTransform(name=s.name, prefix=it_prefix,iter=it,linear=True) + sample_inv_xfm = MriTransform(name=s.name+'_inv', prefix=it_prefix,iter=it,linear=True) + + prev_transform = None + prev_bias_field = None + + if it > 1 and refine: + prev_transform = corr_transforms[i] + + if it > 1 and biascorr: + prev_bias_field = bias_fields[i] + + + transforms.append( + futures.submit( + linear_register_step, + s, + current_model, + sample_xfm, + output_invert=sample_inv_xfm, + init_xfm=prev_transform, + symmetric=symmetric, + reg_type=reg_type, + objective=objective, + linreg=linreg, + work_dir=prefix, + bias=prev_bias_field, + downsample=downsample) + ) + inv_transforms.append(sample_inv_xfm) + fwd_transforms.append(sample_xfm) + + + # wait for jobs to finish + futures.wait(transforms, return_when=futures.ALL_COMPLETED) + + # remove information from previous iteration + if cleanup and it>1 : + for s in corr_samples: + s.cleanup(verbose=True) + for x in corr_transforms: + x.cleanup(verbose=True) + + # here all the transforms should exist + avg_inv_transform=MriTransform(name='avg_inv', prefix=it_prefix,iter=it,linear=True) + + # 2 average all transformations + result=futures.submit( + average_transforms, inv_transforms, avg_inv_transform, nl=False, symmetric=symmetric + # TODO: maybe make median transforms? + ) + futures.wait([result], return_when=futures.ALL_COMPLETED) + + corr=[] + corr_transforms=[] + corr_samples=[] + # 3 concatenate correction and resample + + for (i, s) in enumerate(samples): + prev_bias_field = None + if it > 1 and biascorr: + prev_bias_field = bias_fields[i] + + c=MriDataset( prefix=it_prefix,iter=it,name=s.name) + x=MriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it,linear=True) + + corr.append(futures.submit( + concat_resample, s, fwd_transforms[i], avg_inv_transform, + c, x, current_model, symmetric=symmetric, qc=qc, bias=prev_bias_field + )) + corr_transforms.append(x) + corr_samples.append(c) + futures.wait(corr, return_when=futures.ALL_COMPLETED) + + # cleanup transforms + if cleanup : + for x in inv_transforms: + x.cleanup() + for x in fwd_transforms: + x.cleanup() + avg_inv_transform.cleanup() + + # 4 average resampled samples to create new estimate + result=futures.submit( + average_samples, corr_samples, next_model, next_model_sd, symmetric=symmetric, symmetrize=symmetric,median=use_median + ) + + if cleanup : + # remove previous template estimate + models.append(next_model) + models_sd.append(next_model_sd) + + futures.wait([result], return_when=futures.ALL_COMPLETED) + + if biascorr: + biascorr_results=[] + new_bias_fields=[] + + for (i, s) in enumerate(samples): + prev_bias_field = None + if it > 1: + prev_bias_field = bias_fields[i] + c=corr_samples[i] + x=corr_transforms[i] + b=MriDataset(prefix=it_prefix,iter=it,name='bias_'+s.name) + biascorr_results.append( futures.submit( + calculate_diff_bias_field, + c, next_model, b, symmetric=symmetric, distance=biasdist, + n4=use_n4 + ) ) + new_bias_fields.append(b) + + futures.wait(biascorr_results, return_when=futures.ALL_COMPLETED) + + result=futures.submit( + average_bias_fields, new_bias_fields, next_model_bias, symmetric=symmetric + ) + futures.wait([result], return_when=futures.ALL_COMPLETED) + biascorr_results=[] + new_corr_bias_fields=[] + for (i, s) in enumerate(samples): + prev_bias_field = None + if it > 1: + prev_bias_field = bias_fields[i] + c=corr_samples[i] + x=corr_transforms[i] + b=new_bias_fields[i] + out=MriDataset(prefix=it_prefix,iter=it,name='c_bias_'+s.name) + biascorr_results.append( futures.submit( + resample_and_correct_bias, b, x , next_model_bias, out, previous=prev_bias_field, symmetric=symmetric + ) ) + new_corr_bias_fields.append( out ) + futures.wait(biascorr_results, return_when=futures.ALL_COMPLETED) + + # swap bias fields + if biascorr: bias_fields=new_bias_fields + + current_model=next_model + current_model_sd=next_model_sd + sd.append( futures.submit(average_stats, next_model, next_model_sd ) ) + + # copy output to the destination + futures.wait(sd, return_when=futures.ALL_COMPLETED) + + with open(prefix+os.sep+'stats.txt','w') as f: + for s in sd: + f.write("{}\n".format(s.result())) + + if cleanup: + # keep the final model + models.pop() + models_sd.pop() + + # delete unneeded models + for m in models: + m.cleanup() + for m in models_sd: + m.cleanup() + + results={ + 'model': current_model, + 'model_sd': current_model_sd, + 'xfm': corr_transforms, + 'biascorr': bias_fields, + 'scan': corr_samples, + 'symmetric': symmetric + } + + with open(prefix+os.sep+'results.json','w') as f: + json.dump(results,f,indent=1,cls=MRIEncoder) + + return results + + + +def generate_linear_model(samples,model=None,mask=None,work_prefix=None,options={}): + internal_sample=[] + + try: + for i in samples: + s=MriDataset(scan=i[0],mask=i[1]) + internal_sample.append(s) + + internal_model=None + if model is not None: + internal_model=MriDataset(scan=model,mask=mask) + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return generate_linear_average(internal_sample,internal_model,prefix=work_prefix,options=options) + except mincError as e: + print "Exception in generate_linear_model:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in generate_linear_model:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def generate_linear_model_csv(input_csv,model=None,mask=None,work_prefix=None,options={}): + internal_sample=[] + + with open(input_csv, 'r') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) + for row in reader: + internal_sample.append(MriDataset(scan=row[0],mask=row[1])) + + internal_model=None + if model is not None: + internal_model=MriDataset(scan=model,mask=mask) + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return generate_linear_average(internal_sample,internal_model,prefix=work_prefix,options=options) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/generate_nonlinear.py b/ipl/model/generate_nonlinear.py new file mode 100644 index 0000000..56f7563 --- /dev/null +++ b/ipl/model/generate_nonlinear.py @@ -0,0 +1,342 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from ipl.model.structures import MriDataset, MriTransform,MRIEncoder +from ipl.model.filter import generate_flip_sample, normalize_sample +from ipl.model.filter import average_samples,average_stats +from ipl.model.filter import calculate_diff_bias_field,average_bias_fields +from ipl.model.filter import resample_and_correct_bias + +from ipl.model.registration import linear_register_step +from ipl.model.registration import non_linear_register_step +from ipl.model.registration import dd_register_step +from ipl.model.registration import ants_register_step +from ipl.model.registration import elastix_register_step +from ipl.model.registration import average_transforms +from ipl.model.resample import concat_resample +from ipl.model.resample import concat_resample_nl + +from scoop import futures, shared + + +def generate_nonlinear_average( + samples, + initial_model =None, + output_model =None, + output_model_sd=None, + prefix='.', + options={}, + skip=0, + stop_early=100000 + ): + """ perform iterative model creation""" + + # use first sample as initial model + if not initial_model: + initial_model = samples[0] + + # current estimate of template + current_model = initial_model + current_model_sd = None + + transforms=[] + corr=[] + + bias_fields=[] + corr_transforms=[] + sd=[] + corr_samples=[] + + protocol=options.get('protocol', [{'iter':4,'level':32}, + {'iter':4,'level':32}] ) + + cleanup= options.get('cleanup',False) + symmetric= options.get('symmetric',False) + parameters= options.get('parameters',None) + refine= options.get('refine',True) + qc= options.get('qc',False) + downsample_= options.get('downsample',None) + use_dd= options.get('use_dd',False) + use_ants= options.get('use_ants',False) + use_elastix= options.get('use_elastix',False) + start_level= options.get('start_level',None) + use_median= options.get('median',False) + + models=[] + models_sd=[] + + if symmetric: + flipdir=prefix+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + flip_all=[] + # generate flipped versions of all scans + for (i, s) in enumerate(samples): + _s_name=os.path.basename(s.scan).rsplit('.gz',1)[0] + s.scan_f=prefix+os.sep+'flip'+os.sep+_s_name + + if s.mask is not None: + s.mask_f=prefix+os.sep+'flip'+os.sep+'mask_'+_s_name + + flip_all.append( futures.submit( generate_flip_sample,s ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + # go through all the iterations + it=0 + for (i,p) in enumerate(protocol): + downsample=p.get('downsample',downsample_) + for j in xrange(1,p['iter']+1): + it+=1 + if it>stop_early: + break + # this will be a model for next iteration actually + + # 1 register all subjects to current template + next_model=MriDataset(prefix=prefix,iter=it,name='avg') + next_model_sd=MriDataset(prefix=prefix,iter=it,name='sd') + transforms=[] + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + inv_transforms=[] + fwd_transforms=[] + + start=None + if it==1: + start=start_level + + for (i, s) in enumerate(samples): + sample_xfm=MriTransform(name=s.name,prefix=it_prefix,iter=it) + sample_inv_xfm=MriTransform(name=s.name+'_inv',prefix=it_prefix,iter=it) + + prev_transform = None + prev_bias_field = None + + if it > 1: + if refine: + prev_transform = corr_transforms[i] + else: + start=start_level # TWEAK? + + if it>skip and itskip and it1 : + # remove information from previous iteration + for s in corr_samples: + s.cleanup(verbose=True) + for x in corr_transforms: + x.cleanup(verbose=True) + + # here all the transforms should exist + avg_inv_transform=MriTransform(name='avg_inv', prefix=it_prefix, iter=it) + + # 2 average all transformations + if it>skip and itskip and itskip and itskip and it1: + # remove previous template estimate + models.append(next_model) + models_sd.append(next_model_sd) + + current_model=next_model + current_model_sd=next_model_sd + + if it>skip and it_eps: # this is non-identity matrix + all_nonlinear&=False + else: + # TODO: if grid have to be inverted! + (grid_file,grid_invert)=x.get_grid_transform(1) + input_grids.append(grid_file) + elif x.get_n_type(1)==minc2_xfm.MINC2_XFM_GRID_TRANSFORM: + # TODO: if grid have to be inverted! + (grid_file,grid_invert)=x.get_grid_transform(0) + input_grids.append(grid_file) + + if all_linear: + acc=np.asmatrix(np.zeros([4,4],dtype=np.complex)) + for i in input_xfms: + print(i) + acc+=scipy.linalg.logm(i) + + acc/=len(input_xfms) + acc=np.asarray(scipy.linalg.expm(acc).real,'float64','C') + + x=minc2_xfm() + x.append_linear_transform(acc) + x.save(output) + + elif all_nonlinear: + + output_grid=output.rsplit('.xfm',1)[0]+'_grid_0.mnc' + + with mincTools(verbose=2) as m: + m.average(input_grids,output_grid) + + x=minc2_xfm() + x.append_grid_transform(output_grid, False) + x.save(output) + else: + raise Exception("Mixed XFM files provided as input") + +def linear_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + symmetric=False, + reg_type='-lsq12', + objective='-xcorr', + linreg=None, + work_dir=None, + bias=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + scan=sample.scan + + if bias is not None: + m.calc([sample.scan,bias.scan],'A[0]*A[1]',m.tmp('corr.mnc')) + scan=m.tmp('corr.mnc') + + if symmetric: + scan_f=sample.scan_f + + if bias is not None: + m.calc([sample.scan_f,bias.scan_f],'A[0]*A[1]',m.tmp('corr_f.mnc')) + scan_f=m.tmp('corr_f.mnc') + + _out_xfm=output.xfm + _out_xfm_f=output.xfm_f + + if avg_symmetric: + _out_xfm=m.tmp('straight.xfm') + _out_xfm_f=m.tmp('flipped.xfm') + + ipl.registration.linear_register( + scan, + model.scan, + _out_xfm, + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + objective=objective, + parameters=reg_type, + conf=linreg, + downsample=downsample, + #work_dir=work_dir + ) + ipl.registration.linear_register( + scan_f, + model.scan, + _out_xfm_f, + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm_f, + objective=objective, + parameters=reg_type, + conf=linreg, + downsample=downsample, + #work_dir=work_dir + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), _out_xfm_f , m.tmp('flip_x.xfm')], m.tmp('double_flipped.xfm')) + + xfmavg([_out_xfm,m.tmp('double_flipped.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], output.xfm_f ) + + else: + ipl.registration.linear_register( + scan, + model.scan, + output.xfm, + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + objective=objective, + parameters=reg_type, + conf=linreg, + downsample=downsample + #work_dir=work_dir + ) + if output_invert is not None: + m.xfminvert(output.xfm, output_invert.xfm) + + if symmetric: + m.xfminvert(output.xfm_f, output_invert.xfm_f) + + return True + except mincError as e: + print "Exception in linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def non_linear_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if start is None: + start=level + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.xfm,output.xfm_f]): + + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward.xfm'), + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward_f.xfm'), + source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), m.tmp('forward_f.xfm') , m.tmp('flip_x.xfm')], m.tmp('forward_f_f.xfm')) + + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,m.tmp('forward_n.xfm'),step=level) + m.xfm_normalize(m.tmp('forward_f_f.xfm'),model.scan,m.tmp('forward_f_f_n.xfm'),step=level) + + xfmavg([m.tmp('forward_n.xfm'),m.tmp('forward_f_f_n.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], m.tmp('output_f.xfm' )) + m.xfm_normalize(m.tmp('output_f.xfm'),model.scan,output.xfm_f,step=level) + + else: + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output.xfm,step=level) + m.xfm_normalize(m.tmp('forward_f.xfm'),model.scan,output.xfm_f,step=level) + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.xfm]): + + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward.xfm'), + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output.xfm,step=level) + + if output_invert is not None and m.checkfiles(inputs=[], outputs=[output_invert.xfm]): + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output_invert.xfm,step=level,invert=True) + if symmetric: + m.xfm_normalize(m.tmp('forward_f.xfm'),model.scan,output_invert.xfm_f,step=level,invert=True) + + return True + except mincError as e: + print "Exception in non_linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def dd_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if start is None: + start=level + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.xfm,output.xfm_f]): + + ipl.dd_registration.non_linear_register_dd( + sample.scan, + model.scan, + m.tmp('forward.xfm'), + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + ipl.dd_registration.non_linear_register_dd( + sample.scan_f, + model.scan, + m.tmp('forward_f.xfm'), + source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), m.tmp('forward_f.xfm') , m.tmp('flip_x.xfm')], m.tmp('forward_f_f.xfm')) + + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,m.tmp('forward_n.xfm'),step=level) + m.xfm_normalize(m.tmp('forward_f_f.xfm'),model.scan,m.tmp('forward_f_f_n.xfm'),step=level) + + xfmavg([m.tmp('forward_n.xfm'),m.tmp('forward_f_f_n.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], m.tmp('output_f.xfm' )) + m.xfm_normalize(m.tmp('output_f.xfm'),model.scan,output.xfm_f,step=level) + + else: + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output.xfm,step=level) + m.xfm_normalize(m.tmp('forward_f.xfm'),model.scan,output.xfm_f,step=level) + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.xfm]): + + ipl.dd_registration.non_linear_register_dd( + sample.scan, + model.scan, + m.tmp('forward.xfm'), + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output.xfm,step=level) + + if output_invert is not None and m.checkfiles(inputs=[], outputs=[output_invert.xfm]): + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output_invert.xfm,step=level,invert=True) + if symmetric: + m.xfm_normalize(m.tmp('forward_f.xfm'),model.scan,output_invert.xfm_f,step=level,invert=True) + + return True + except mincError as e: + print "Exception in non_linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def ants_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if start is None: + start=level + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + out=m.tmp('forward') + out_f=m.tmp('forward_f') + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.xfm, output.xfm_f]): + + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + out+'.xfm', + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + out_f+'.xfm', + source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), out_f+'.xfm', m.tmp('flip_x.xfm')], m.tmp('forward_f_f.xfm')) + + m.xfm_normalize(out+'.xfm', model.scan, m.tmp('forward_n.xfm'),step=level) + m.xfm_normalize(m.tmp('forward_f_f.xfm'),model.scan,m.tmp('forward_f_f_n.xfm'),step=level) + + xfmavg([m.tmp('forward_n.xfm'),m.tmp('forward_f_f_n.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], m.tmp('output_f.xfm' )) + m.xfm_normalize(out_f+'.xfm',model.scan,output.xfm_f,step=level) + + else: + m.xfm_normalize(out+'.xfm',model.scan,output.xfm,step=level) + m.xfm_normalize(out_f+'.xfm',model.scan,output.xfm_f,step=level) + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.xfm]): + + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + out+'.xfm', + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(out+'.xfm',model.scan,output.xfm,step=level) + + if output_invert is not None and m.checkfiles(inputs=[], outputs=[output_invert.xfm]): + m.xfm_normalize(out+'_inverse.xfm',model.scan,output_invert.xfm,step=level) + if symmetric: + m.xfm_normalize(out_f+'_inverse.xfm',model.scan,output_invert.xfm_f,step=level) + + return True + except mincError as e: + print "Exception in non_linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def elastix_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if start is None: + start=level + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + # setup parameters appropriate for given level + elx_parameters=parameters.get(str(level),{}) + downsample_grid=elx_parameters.get('downsample_grid',level/2.0) + + with mincTools() as m: + out=m.tmp('forward') + out_f=m.tmp('forward_f') + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.xfm, output.xfm_f]): + + ipl.elastix_registration.register_elastix( + sample.scan, + model.scan, + output_xfm=out+'.xfm', + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=elx_parameters, + downsample_grid=downsample_grid, + downsample=downsample, + nl=True + ) + + ipl.elastix_registration.register_elastix( + sample.scan_f, + model.scan, + output_xfm=out_f+'.xfm', + source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=elx_parameters, + downsample_grid=downsample_grid, + downsample=downsample, + nl=True + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), out_f+'.xfm', m.tmp('flip_x.xfm')], m.tmp('forward_f_f.xfm')) + + m.xfm_normalize(out+'.xfm', model.scan, m.tmp('forward_n.xfm'),step=level) + m.xfm_normalize(m.tmp('forward_f_f.xfm'),model.scan,m.tmp('forward_f_f_n.xfm'),step=level) + + xfmavg([m.tmp('forward_n.xfm'),m.tmp('forward_f_f_n.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], m.tmp('output_f.xfm' )) + m.xfm_normalize(out_f+'.xfm',model.scan,output.xfm_f,step=level) + + else: + m.xfm_normalize(out+'.xfm', model.scan,output.xfm, step=level) + m.xfm_normalize(out_f+'.xfm',model.scan,output.xfm_f,step=level) + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.xfm]): + + ipl.elastix_registration.register_elastix( + sample.scan, + model.scan, + output_xfm=out+'.xfm', + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=elx_parameters, + downsample_grid=downsample_grid, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(out+'.xfm',model.scan,output.xfm,step=level) + + if output_invert is not None and m.checkfiles(inputs=[], outputs=[output_invert.xfm]): + m.xfm_normalize(out+'.xfm',model.scan,output_invert.xfm,step=level,invert=True) + if symmetric: + m.xfm_normalize(out_f+'.xfm',model.scan,output_invert.xfm_f,step=level,invert=True) + + return True + except mincError as e: + print "Exception in non_linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def average_transforms( + samples, + output, + nl=False, + symmetric=False, + invert=False + ): + """average given transformations""" + try: + with mincTools() as m: + avg = [] + out_xfm=output.xfm + + for i in samples: + avg.append(i.xfm) + + if symmetric: + for i in samples: + avg.append(i.xfm_f) + if invert: + out_xfm=m.tmp("average.xfm") + xfmavg(avg, out_xfm) + + if invert: + m.xfminvert(out_xfm, output.xfm) + return True + except mincError as e: + print "Exception in average_transforms:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_transforms:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def non_linear_register_step_regress_std( + sample, + model_int, + model_def, + output_int, + output_def, + level=32, + start_level=None, + parameters=None, + work_dir=None, + downsample=None, + debug=False, + previous_def=None, + datatype='short', + nl_mode='animal' + ): + """perform linear registration to the model, and calculate new estimate""" + try: + + with mincTools() as m: + if m.checkfiles(inputs=[sample.scan], + outputs=[output_def.xfm]): + + int_approximate = None + def_approximate = None + def_update = None + + if debug: + int_approximate = MriDataset( prefix=output_def.prefix, + name=output_def.name +'_int_approx', + iter=output_def.iter ) + + def_approximate = MriTransform( prefix=output_def.prefix, + name=output_def.name +'_approx', + iter=output_def.iter ) + + def_update = MriTransform( prefix=output_def.prefix, + name=output_def.name +'_update', + iter=output_def.iter ) + else: + int_approximate = MriDataset( prefix=m.tempdir, + name=output_def.name +'_int_approx') + + def_approximate = MriTransform( prefix=m.tempdir, + name=output_def.name +'_approx' ) + + def_update = MriTransform( prefix=m.tempdir, + name=output_def.name +'_update') + + # A hack! assume that if initial model is MriDataset it means zero regression coeff + if isinstance(model_int, MriDataset): + int_approximate=model_int + def_approximate=None + else: + (int_approximate, def_approximate) = \ + build_approximation(model_int, + model_def, + sample.par_int, + sample.par_def, + int_approximate, + def_approximate, + noresample=False) + if model_def is None: + def_approximate=None + + if start_level is None: + start_level=level + + init_xfm=None + + # we are refining previous estimate + if previous_def is not None: + ## have to adjust it based on the current estimate + if def_approximate is not None: + init_xfm=m.tmp('init_def.xfm') + m.xfminvert(def_approximate.xfm, m.tmp('approx_inv.xfm')) + m.xfmconcat(previous_def.xfm,m.tmp('approx_inv.xfm')) + m.xfm_normalize(m.tmp('approx_inv.xfm'),int_approximate.scan,m.tmp('init.xfm'),step=level) + init_xfm=m.tmp('init.xfm') + else: + init_xfm=previous_def.xfm + + print("level={} start={}".format(level,start_level)) + print("parameters={}".format(repr(parameters))) + + if nl_mode=='animal': + ipl.registration.non_linear_register_full( + int_approximate.scan, + sample.scan, + def_update.xfm, + source_mask=int_approximate.mask, + target_mask=sample.mask, + init_xfm=init_xfm, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + elif nl_mode=='ants': + ipl.ants_registration.non_linear_register_ants2( + int_approximate.scan, + sample.scan, + def_update.xfm, + source_mask=int_approximate.mask, + target_mask=sample.mask, + init_xfm=init_xfm, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + elif nl_mode=='dd': + ipl.dd_registration.non_linear_register_dd( + int_approximate.scan, + sample.scan, + def_update.xfm, + source_mask=int_approximate.mask, + target_mask=sample.mask, + init_xfm=init_xfm, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + else: #elastix + ipl.elatix_registration.register_elastix( + int_approximate.scan, + sample.scan, + def_update.xfm, + source_mask=int_approximate.mask, + target_mask=sample.mask, + init_xfm=init_xfm, + parameters=parameters, + downsample_grid=level, + downsample=downsample, + #work_dir=work_dir + ) + + + # update estimate, + if def_approximate is not None: + m.xfmconcat([def_approximate.xfm,def_update.xfm],m.tmp('output_def.xfm')) + m.xfm_normalize(m.tmp('output_def.xfm'),int_approximate.scan,output_def.xfm, step=level) + else: + m.xfm_normalize(def_update.xfm,int_approximate.scan,output_def.xfm, step=level) + + if output_int is not None: + # resample intensity + m.resample_smooth(sample.scan, output_int.scan, + transform=output_def.xfm, + invert_transform=True, + datatype='-'+datatype + ) + if sample.mask is not None: + m.resample_labels(sample.mask, output_int.mask, + transform=output_def.xfm, + invert_transform=True) + # done + + except mincError as e: + print "Exception in non_linear_register_step_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/regress.py b/ipl/model/regress.py new file mode 100644 index 0000000..d97b486 --- /dev/null +++ b/ipl/model/regress.py @@ -0,0 +1,450 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from iplMincTools import mincTools,mincError + +from .structures import MriDataset, MriTransform, MRIEncoder, MriDatasetRegress +from .filter import generate_flip_sample, normalize_sample +from .filter import average_samples,average_stats +from .filter import calculate_diff_bias_field,average_bias_fields +from .filter import resample_and_correct_bias +from .filter import build_approximation +from .filter import average_stats_regression +from .filter import voxel_regression + +from .registration import non_linear_register_step +from .registration import dd_register_step +from .registration import ants_register_step +from .registration import average_transforms +from .registration import non_linear_register_step_regress_std +from .resample import concat_resample_nl + +from scoop import futures, shared + +def regress( + samples, + initial_model=None, + initial_int_model=None, + initial_def_model=None, + output_int_model=None, + output_def_model=None, + output_residuals_int=None, + output_residuals_def=None, + prefix='.', + options={} + ): + """ perform iterative model creation""" + try: + + # make sure all input scans have parameters + N_int=None + N_def=None + + int_design_matrix=[] + def_design_matrix=[] + nomask=False + + for s in samples: + + if N_int is None: + N_int=len(s.par_int) + elif N_int!=len(s.par_int): + raise mincError("Sample {} have inconsisten number of int paramters: {} expected {}".format(repr(s),len(s),N_int)) + + if N_def is None: + N_def=len(s.par_def) + elif N_def!=len(s.par_def): + raise mincError("Sample {} have inconsisten number of int paramters: {} expected {}".format(repr(s),len(s),N_def)) + + int_design_matrix.append(s.par_int) + def_design_matrix.append(s.par_def) + + if s.mask is None: + nomask=True + + #print("Intensity design matrix=\n{}".format(repr(int_design_matrix))) + #print("Velocity design matrix=\n{}".format(repr(def_design_matrix))) + + ref_model=None + # current estimate of template + if initial_model is not None: + current_int_model = initial_model + current_def_model = None + ref_model=initial_model.scan + else: + current_int_model = initial_int_model + current_def_model = initial_def_model + ref_model=initial_int_model.volume[0] + transforms=[] + + full_transforms=[] + + protocol=options.get( + 'protocol', [{'iter':4,'level':32, 'blur_int': None, 'blur_def': None }, + {'iter':4,'level':16, 'blur_int': None, 'blur_def': None }] + ) + + cleanup= options.get('cleanup',False) + cleanup_intermediate= options.get('cleanup_intermediate',False) + + parameters= options.get('parameters',None) + refine= options.get('refine',False) + qc= options.get('qc',False) + downsample = options.get('downsample',None) + start_level= options.get('start_level',None) + debug = options.get('debug',False) + debias = options.get('debias',True) + nl_mode = options.get('nl_mode','animal') + + if parameters is None: + pass + #TODO: make sensible parameters? + + int_models=[] + def_models=[] + int_residuals=[] + def_residuals=[] + + int_residual=None + def_residual=None + + prev_def_estimate=None + # go through all the iterations + it=0 + residuals=[] + + for (i,p) in enumerate(protocol): + blur_int_model=p.get('blur_int',None) + blur_def_model=p.get('blur_def',None) + for j in range(1,p['iter']+1): + it+=1 + _start_level=None + if it==1: + _start_level=start_level + # this will be a model for next iteration actually + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + next_int_model=MriDatasetRegress(prefix=prefix, name='model_int', iter=it, N=N_int, nomask=nomask) + next_def_model=MriDatasetRegress(prefix=prefix, name='model_def', iter=it, N=N_def, nomask=True) + print("next_int_model={}".format( next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') ) + + int_residual=MriDataset(prefix=prefix, scan=next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + #name=next_int_model.name, iter=it ) + + def_residual=MriDataset(prefix=prefix, scan=next_def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + #name=next_def_model.name, iter=it ) + + # skip over existing models here! + + if not next_int_model.exists() or \ + not next_def_model.exists() or \ + not int_residual.exists() or \ + not def_residual.exists(): + + int_estimate=[] + def_estimate=[] + r=[] + + + # 1 for each sample generate current approximation + # 2. perform non-linear registration between each sample and sample-specific approximation + # 3. update transformation + # 1+2+3 - all together + for (i, s) in enumerate(samples): + sample_def= MriTransform(name=s.name,prefix=it_prefix,iter=it) + sample_int= MriDataset(name=s.name, prefix=it_prefix,iter=it) + + previous_def=None + + if refine and it>1: + previous_def=prev_def_estimate[i] + + r.append( + futures.submit( + non_linear_register_step_regress_std, + s, + current_int_model, + current_def_model, + None, + sample_def, + parameters=parameters, + level=p['level'], + start_level=_start_level, + work_dir=prefix, + downsample=downsample, + debug=debug, + previous_def=previous_def, + nl_mode=nl_mode + ) + ) + def_estimate.append(sample_def) + #int_estimate.append(sample_int) + + # wait for jobs to finish + futures.wait(r, return_when=futures.ALL_COMPLETED) + avg_inv_transform=None + + if debias: + # here all the transforms should exist + avg_inv_transform=MriTransform(name='avg_inv',prefix=it_prefix,iter=it) + # 2 average all transformations + average_transforms(def_estimate, avg_inv_transform, symmetric=False, invert=True,nl=True) + + corr=[] + corr_transforms=[] + corr_samples=[] + + # 3 concatenate correction and resample + for (i, s) in enumerate(samples): + c=MriDataset(prefix=it_prefix,iter=it,name=s.name) + x=MriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + + corr.append(futures.submit(concat_resample_nl, + s, def_estimate[i], avg_inv_transform, + c, x, + current_int_model, + p['level'], + symmetric=False, + qc=qc, + invert_transform=True )) + + corr_transforms.append(x) + corr_samples.append(c) + + futures.wait(corr, return_when=futures.ALL_COMPLETED) + + # 4. perform regression and create new estimate + # 5. calculate residulas (?) + # 4+5 + result=futures.submit(voxel_regression, + int_design_matrix, def_design_matrix, + corr_samples, corr_transforms, + next_int_model, next_def_model, + int_residual, def_residual, + blur_int_model=blur_int_model, + blur_def_model=blur_def_model, + qc=qc + ) + + futures.wait([result], return_when=futures.ALL_COMPLETED) + + # 6. cleanup + if cleanup : + print("Cleaning up iteration: {}".format(it)) + for i in def_estimate: + i.cleanup() + for i in corr_samples: + i.cleanup() + if prev_def_estimate is not None: + for i in prev_def_estimate: + i.cleanup() + avg_inv_transform.cleanup() + else: + # files were there, reuse them + print("Iteration {} already performed, skipping".format(it)) + corr_transforms=[] + # this is a hack right now + for (i, s) in enumerate(samples): + x=MriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + corr_transforms.append(x) + + int_models.append(current_int_model) + def_models.append(current_def_model) + int_residuals.append(int_residual) + def_residuals.append(def_residual) + + current_int_model=next_int_model + current_def_model=next_def_model + + + result=futures.submit(average_stats_regression, + current_int_model, current_def_model, + int_residual, def_residual ) + residuals.append(result) + + regression_results={ + 'int_model': current_int_model, + 'def_model': current_def_model, + 'int_residuals': int_residual.scan, + 'def_residuals': def_residual.scan, + } + with open(prefix+os.sep+'results_{:03d}.json'.format(it),'w') as f: + json.dump(regression_results,f,indent=1, cls=MRIEncoder) + + # save for next iteration + # TODO: regularize? + prev_def_estimate=corr_transforms # have to use adjusted def estimate + + # copy output to the destination + futures.wait(residuals, return_when=futures.ALL_COMPLETED) + with open(prefix+os.sep+'stats.txt','w') as f: + for s in residuals: + f.write("{}\n".format(s.result())) + + + with open(prefix+os.sep+'results_final.json','w') as f: + json.dump(regression_results, f, indent=1, cls=MRIEncoder) + + + if cleanup_intermediate: + for i in range(len(int_models)-1): + int_models[i].cleanup() + def_models[i].cleanup() + int_residuals[i].cleanup() + def_residuals[i].cleanup() + # delete unneeded models + #shutil.rmtree(prefix+os.sep+'reg') + + return regression_results + except mincError as e: + print "Exception in regress:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in regress:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def regress_csv(input_csv, + int_par_count=None, + model=None, + mask=None, + work_prefix=None, + options={}, + regress_model=None): + """convinience function to run model generation using CSV input file and a fixed init""" + internal_sample=[] + + with open(input_csv, 'r') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) + for row in reader: + + par=[ float(i) for i in row[2:] ] + par_def=par + par_int=par + + if int_par_count is not None: + par_int=par[:int_par_count] + par_def=par[int_par_count:] + _mask=row[1] + if _mask=='': + _mask=None + internal_sample.append( MriDataset(scan=row[0], mask=_mask, par_int=par_int, par_def=par_def) ) + + internal_model=None + initial_int_model=None + initial_def_model=None + + if regress_model is None: + if model is not None: + internal_model=MriDataset(scan=model, mask=mask) + else: + # assume that regress_model is an array + initial_int_model=MriDatasetRegress(prefix=work_prefix, name='initial_model_int', N=len(regress_model)) + initial_int_model.volume=regress_model + initial_int_model.mask=mask + + initial_int_model.protect=True + initial_def_model=None + + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return regress( internal_sample, + initial_model=internal_model, + prefix=work_prefix, + options=options, + initial_int_model=initial_int_model, + initial_def_model=initial_def_model) + + +def regress_simple(input_samples, + int_design_matrix, + geo_design_matrix, + model=None, + mask=None, + work_prefix=None, + options={}, + regress_model=None): + """convinience function to run model generation using CSV input file and a fixed init""" + internal_sample=[] + + for (i,j) in enumerate(input_samples): + internal_sample.append( MriDataset(scan=j[0], mask=j[1], + par_int=int_design_matrix[i], + par_def=geo_design_matrix[i]) + ) + + internal_model=None + initial_int_model=None + initial_def_model=None + + if regress_model is None: + if model is not None: + internal_model=MriDataset(scan=model, mask=mask) + else: + # assume that regress_model is an array + initial_int_model=MriDatasetRegress(prefix=work_prefix, name='initial_model_int', N=len(regress_model)) + initial_int_model.volume=regress_model + initial_int_model.mask=mask + + initial_int_model.protect=True + initial_def_model=None + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return regress( internal_sample, + initial_model=internal_model, + prefix=work_prefix, + options=options, + initial_int_model=initial_int_model, + initial_def_model=initial_def_model) + + + +def build_estimate(description_json, parameters, output_prefix, int_par_count=None): + desc=None + with open(description_json, 'r') as f: + desc=json.load(f) + + int_parameters=parameters + def_parameters=parameters + + if int_par_count is not None: + int_parameters=parameters[:int_par_count] + def_parameters=parameters[int_par_count:] + + if len(def_parameters)!=len(desc["def_model"]["volume"]) or \ + len(int_parameters)!=len(desc["int_model"]["volume"]): + + print(desc["int_model"]["volume"]) + print("int_parameters={}".format(repr(int_parameters))) + + print(desc["def_model"]["volume"]) + print("def_parameters={}".format(repr(def_parameters))) + + raise mincError("{} inconsisten number of paramters, expected {}". + format(repr(int_parameters), + len(desc["def_model"]["volume"]))) + + deformation=MriDatasetRegress(from_dict=desc["def_model"]) + intensity=MriDatasetRegress(from_dict=desc["int_model"]) + + output_scan=MriDataset(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) + output_transform=MriTransform(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) + + build_approximation(intensity, deformation, + int_parameters, def_parameters, + output_scan, output_transform) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/resample.py b/ipl/model/resample.py new file mode 100644 index 0000000..8916229 --- /dev/null +++ b/ipl/model/resample.py @@ -0,0 +1,161 @@ +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +from .filter import * +from .structures import * + +# scoop parallel execution +from scoop import futures, shared + + +def concat_resample( + input_mri, + input_transform, + corr_transform, + output_mri, + output_transform, + model, + symmetric=False, + qc=False, + bias=None + ): + """apply correction transformation and resample input""" + try: + with mincTools() as m: + + if not ( os.path.exists(output_mri.scan) and os.path.exists(output_transform.xfm) ): + scan=input_mri.scan + + if bias is not None: + m.calc([input_mri.scan,bias.scan],'A[0]*A[1]',m.tmp('corr.mnc')) + scan=m.tmp('corr.mnc') + + m.xfmconcat([input_transform.xfm, corr_transform.xfm], output_transform.xfm) + m.resample_smooth(scan, output_mri.scan, transform=output_transform.xfm,like=model.scan) + + if input_mri.mask is not None and output_mri.mask is not None: + m.resample_labels(input_mri.mask, + output_mri.mask, + transform=output_transform.xfm, + like=model.scan) + if qc: + m.qc(output_mri.scan,output_mri.scan+'.jpg',mask=output_mri.mask) + else: + if qc: + m.qc(output_mri.scan,output_mri.scan+'.jpg') + + + if symmetric: + scan_f=input_mri.scan_f + + if bias is not None: + m.calc([input_mri.scan_f,bias.scan_f],'A[0]*A[1]',m.tmp('corr_f.mnc')) + scan_f=m.tmp('corr_f.mnc') + + m.xfmconcat([input_transform.xfm_f, corr_transform.xfm], output_transform.xfm_f) + m.resample_smooth(scan_f, output_mri.scan_f, transform=output_transform.xfm_f,like=model.scan) + + if input_mri.mask is not None and output_mri.mask is not None: + m.resample_labels(input_mri.mask_f, + output_mri.mask_f, + transform=output_transform.xfm_f, + like=model.scan) + if qc: + m.qc(output_mri.scan_f,output_mri.scan_f+'.jpg',mask=output_mri.mask_f) + else: + if qc: + m.qc(output_mri.scan_f,output_mri.scan_f+'.jpg') + except mincError as e: + print "Exception in concat_resample:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in concat_resample:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def concat_resample_nl( + input_mri, + input_transform, + corr_transform, + output_mri, + output_transform, + model, + level, + symmetric=False, + qc=False, + invert_transform=False + ): + """apply correction transformation and resample input""" + try: + with mincTools() as m: + tfm=input_transform.xfm + if corr_transform is not None: + m.xfmconcat([input_transform.xfm, corr_transform.xfm], m.tmp('transform.xfm')) + tfm=m.tmp('transform.xfm') + ref=None + if isinstance(model, MriDatasetRegress): ref=model.volume[0] + else: ref=model.scan + + m.xfm_normalize( tfm, ref, output_transform.xfm, + step=level) + + m.resample_smooth(input_mri.scan, output_mri.scan, + transform=output_transform.xfm, + like=ref, + invert_transform=invert_transform) + + if input_mri.mask and output_mri.mask: + m.resample_labels(input_mri.mask, + output_mri.mask, + transform=output_transform.xfm, + like=ref, + invert_transform=invert_transform) + if qc: + m.qc(output_mri.scan,output_mri.scan+'.jpg', + mask=output_mri.mask) + else: + if qc: + m.qc(output_mri.scan,output_mri.scan+'.jpg') + + if symmetric: + tfm_f=input_transform.xfm_f + if corr_transform is not None: + m.xfmconcat( [input_transform.xfm_f, corr_transform.xfm], m.tmp('transform_f.xfm') ) + tfm_f=m.tmp('transform_f.xfm') + m.xfm_normalize( tfm_f, ref, output_transform.xfm_f, step=level ) + m.resample_smooth(input_mri.scan_f, output_mri.scan_f, transform=output_transform.xfm_f, + like=ref, + invert_transform=invert_transform ) + + if input_mri.mask and output_mri.mask: + m.resample_labels(input_mri.mask_f, + output_mri.mask_f, + transform=output_transform.xfm_f, + like=ref, + invert_transform=invert_transform) + + if qc: + m.qc(output_mri.scan_f, output_mri.scan_f+'.jpg', + mask=output_mri.mask_f) + else: + if qc: + m.qc(output_mri.scan_f, output_mri.scan_f+'.jpg') + + + return True + except mincError as e: + print "Exception in concat_resample_nl:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in concat_resample_nl:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/structures.py b/ipl/model/structures.py new file mode 100644 index 0000000..5aef213 --- /dev/null +++ b/ipl/model/structures.py @@ -0,0 +1,180 @@ +# data structures used in model generation package + +import shutil +import os +import sys +import traceback +import json + +class MriDataset(object): + def __init__(self, prefix=None, name=None, iter=None, scan=None, mask=None, protect=False, par_int=[],par_def=[]): + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.scan_f=None + self.mask_f=None + self.par_int=par_int + self.par_def=par_def + + if scan is None: + if self.iter is None: + self.scan=self.prefix+os.sep+self.name+'.mnc' + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + self.scan_f=self.prefix+os.sep+self.name+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'_f_mask.mnc' + else: + self.scan=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'.mnc' + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + self.scan_f=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f_mask.mnc' + else: + self.scan=scan + self.mask=mask + + if self.name is None: + self.name=os.path.basename(self.scan) + + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + + def __repr__(self): + return 'MriDataset(prefix="{}",name="{}",iter="{}",scan="{}",mask="{}",protect={},par_int={},par_def={})'.\ + format(self.prefix,self.name,repr(self.iter),self.scan,self.mask,repr(self.protect),repr(self.par_int),repr(self.par_def)) + + def exists(self): + _ex=True + for i in (self.scan, self.mask, self.scan_f, self.mask_f ): + if i is not None : + _ex&=os.path.exists(i) + return _ex + + def cleanup(self,verbose=False): + if not self.protect: + for i in (self.scan, self.mask, self.scan_f, self.mask_f ): + if i is not None and os.path.exists(i): + if verbose: + print("Removing:{}".format(i)) + os.unlink(i) + +class MriTransform(object): + def __init__(self,prefix,name,iter=None,linear=False): + self.prefix=prefix + self.name=name + self.iter=iter + self.xfm_f=None + self.grid_f=None + self.linear=linear + + if self.iter is None: + self.xfm= self.prefix+os.sep+self.name+'.xfm' + self.xfm_f= self.prefix+os.sep+self.name+'_f.xfm' + self.grid= self.prefix+os.sep+self.name+'_grid_0.mnc' + self.grid_f= self.prefix+os.sep+self.name+'_f_grid_0.mnc' + else: + self.xfm= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'.xfm' + self.xfm_f= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f.xfm' + self.grid= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_grid_0.mnc' + self.grid_f= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f_grid_0.mnc' + # just null grids if it is linear + if self.linear: + self.grid=None + self.grid_f=None + + def __repr__(self): + return 'MriTransform(prefix="{}",name="{}",iter="{}")'.\ + format(self.prefix,self.name,repr(self.iter)) + + def cleanup(self,verbose=False): + for i in (self.xfm, self.grid, self.xfm_f, self.grid_f): + if i is not None and os.path.exists(i): + if verbose: + print("Removing:{}".format(i)) + os.unlink(i) + + +class MriDatasetRegress(object): + def __init__(self, prefix=None, name=None, iter=None, N=1, protect=False, from_dict=None, nomask=False): + if from_dict is None: + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.N=N + self.volume=[] + + if self.iter is None: + for n in range(0,N): + self.volume.append(self.prefix+os.sep+self.name+'_{}.mnc'.format(n)) + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + else: + for n in range(0,N): + self.volume.append(self.prefix+os.sep+self.name+'.{:03d}_{}'.format(iter,n)+'.mnc') + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + if nomask: + self.mask=None + else: # simple hack for now + self.volume=from_dict["volume"] + self.iter=from_dict["iter"] + self.name=from_dict["name"] + self.mask=from_dict["mask"] + self.N=len(self.volume) + + def __repr__(self): + return 'MriDatasetRegress(prefix="{}",name="{}",volume={},mask={},iter="{}",protect={})'.\ + format(self.prefix, self.name, repr(self.volume), self.mask, repr(self.iter), repr(self.protect)) + + def cleanup(self): + if not self.protect: + for i in self.volume: + if i is not None and os.path.exists(i): + os.unlink(i) + for i in [self.mask]: + if i is not None and os.path.exists(i): + os.unlink(i) + + def exists(self): + """ + Check that all files are present + """ + _ex=True + for i in self.volume: + if i is not None : + _ex&=os.path.exists(i) + + for i in [self.mask]: + if i is not None : + _ex&=os.path.exists(i) + + return _ex + + +class MRIEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, MriTransform): + return {'name':obj.name, + 'iter':obj.iter, + 'xfm':obj.xfm, 'grid':obj.grid, + 'xfm_f':obj.xfm_f,'grid_f':obj.grid_f, + 'linear':obj.linear + } + if isinstance(obj, MriDataset): + return {'name':obj.name, + 'iter':obj.iter, + 'scan':obj.scan, + 'mask':obj.mask, + 'scan_f':obj.scan_f, + 'mask_f':obj.mask_f, + 'par_def':obj.par_def, + 'par_int':obj.par_int + } + elif isinstance(obj, MriDatasetRegress): + return {'name': obj.name, + 'iter': obj.iter, + 'volume':obj.volume, + 'mask': obj.mask, + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/__init__.py b/ipl/model_ldd/__init__.py new file mode 100644 index 0000000..7076517 --- /dev/null +++ b/ipl/model_ldd/__init__.py @@ -0,0 +1,5 @@ +# model generations + + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/filter_ldd.py b/ipl/model_ldd/filter_ldd.py new file mode 100644 index 0000000..21e494d --- /dev/null +++ b/ipl/model_ldd/filter_ldd.py @@ -0,0 +1,453 @@ +import shutil +import os +import sys +import csv +import traceback +import copy + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +from .structures_ldd import MriDataset, LDDMriTransform, LDDMRIEncoder,MriDatasetRegress + +def generate_flip_sample(input): + '''generate flipped version of sample''' + with mincTools() as m: + m.flip_volume_x(input.scan,input.scan_f) + + if input.mask is not None: + m.flip_volume_x(input.mask,input.mask_f,labels=True) + + print "Flipped!" + return True + + +def normalize_sample( + input, + output, + model, + bias_field=None, + ): + """Normalize sample intensity""" + + with mincTools() as m: + m.apply_n3_vol_pol( + input.scan, + model.scan, + output.scan, + source_mask=input.mask, + target_mask=model.mask, + bias=bias_field, + ) + output.mask=input.mask + return output + + + +def average_samples( + samples, + output, + output_sd=None, + symmetric=False, + ): + """average individual samples""" + try: + with mincTools() as m: + avg = [] + + for s in samples: + avg.append(s.scan) + + if symmetric: + for s in samples: + avg.append(s.scan_f) + + if output_sd: + m.average(avg, output.scan, sdfile=output_sd.scan) + else: + m.average(avg, output.scan) + + # average masks + if output.mask is not None: + avg = [] + for s in samples: + avg.append(s.mask) + + if symmetric: + for s in samples: + avg.append(s.mask_f) + + if not os.path.exists(output.mask): + m.average(avg,m.tmp('avg_mask.mnc'),datatype='-float') + m.calc([m.tmp('avg_mask.mnc')],'A[0]>0.5?1:0',m.tmp('avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('avg_mask_.mnc'),output.mask,image_range=[0,1],valid_range=[0,1]) + + + return True + except mincError as e: + print "Exception in average_samples:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_samples:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def average_stats( + avg, + sd, + ): + """calculate median sd within mask""" + try: + st=0 + with mincTools(verbose=2) as m: + if avg.mask is not None: + st=float(m.stats(sd.scan,'-median',mask=avg.mask)) + else: + st=float(m.stats(sd.scan,'-median')) + return st + except mincError as e: + print "mincError in average_stats:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def average_stats_regression( + current_intensity_model, current_velocity_model, + intensity_residual, velocity_residual, + ): + """calculate median sd within mask for intensity and velocity""" + try: + sd_int=0.0 + sd_vel=0.0 + with mincTools(verbose=2) as m: + m.grid_magnitude(velocity_residual.scan,m.tmp('mag.mnc')) + if current_intensity_model.mask is not None: + sd_int=float(m.stats(intensity_residual.scan,'-median',mask=current_intensity_model.mask)) + m.resample_smooth(m.tmp('mag.mnc'),m.tmp('mag_.mnc'),like=current_intensity_model.mask) + sd_vel=float(m.stats(m.tmp('mag_.mnc'),'-median',mask=current_intensity_model.mask)) + else: + sd_int=float(m.stats(intensity_residual.scan,'-median')) + sd_vel=float(m.stats(m.tmp('mag.mnc'),'-median')) + + return (sd_int,sd_vel) + except mincError as e: + print "mincError in average_stats_regression:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats_regression:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + + +def calculate_diff_bias_field(sample, model, output, symmetric=False, distance=100 ): + try: + with mincTools() as m: + if model.mask is not None: + m.difference_n3(sample.scan, model.scan, output.scan, mask=model.mask, distance=distance, normalize=True) + else: + m.difference_n3(sample.scan, model.scan, output.scan, distance=distance, normalize=True ) + if symmetric: + if model.mask is not None: + m.difference_n3(sample.scan_f, model.scan, output.scan, mask=model.mask_f, distance=distance, normalize=True) + else: + m.difference_n3(sample.scan_f, model.scan, output.scan, distance=distance, normalize=True ) + return True + except mincError as e: + print "mincError in calculate_diff_bias_field:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in calculate_diff_bias_field:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def average_bias_fields(samples, output, symmetric=False ): + try: + with mincTools() as m: + + avg = [] + + for s in samples: + avg.append(s.scan) + + if symmetric: + for s in samples: + avg.append(s.scan_f) + + m.log_average(avg, output.scan) + return True + except mincError as e: + print "mincError in average_bias_fields:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_bias_fields:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def resample_and_correct_bias_ldd( + sample, + transform, + avg_bias, + output, + previous=None, + symmetric=False, + ): + # resample bias field and apply previous estimate + try: + with mincTools() as m: + + m.calc([sample.scan, avg_bias.scan], + 'A[1]>0.1?A[0]/A[1]:1.0', m.tmp('corr_bias.mnc')) + + m.resample_smooth_logspace(m.tmp('corr_bias.mnc'), + m.tmp('corr_bias2.mnc'), + like=sample.scan, + transform=transform.vel, + invert_transform=True) + if previous: + m.calc([previous.scan, m.tmp('corr_bias2.mnc') ], 'A[0]*A[1]', + output.scan, datatype='-float') + else: + shutil.copy(m.tmp('corr_bias2.mnc'), output.scan) + + if symmetric: + m.calc([sample.scan_f, avg_bias.scan], + 'A[1]>0.1?A[0]/A[1]:1.0', m.tmp('corr_bias_f.mnc')) + + m.resample_smooth_logspace(m.tmp('corr_bias_f.mnc'), + m.tmp('corr_bias2_f.mnc'), + like=sample.scan, + transform=transform.vel, + invert_transform=True) + if previous: + m.calc([previous.scan_f, m.tmp('corr_bias2_f.mnc')], + 'A[0]*A[1]', + output.scan_f, datatype='-float') + else: + shutil.copy(m.tmp('corr_bias2_f.mnc'), output.scan) + + return True + except mincError as e: + print "Exception in resample_and_correct_bias_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in resample_and_correct_bias_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def apply_linear_model( + lin_model, + parameters, + output_volume + ): + """build a volume, for a given regression model and parameters""" + try: + with mincTools() as m: + + if lin_model.N!=len(parameters): + raise mincError("Expected: {} parameters, got {}".format(lin_model.N,len(parameters))) + + # create minccalc expression + _exp=[] + for i in range(0,lin_model.N): + _exp.append('A[{}]*{}'.format(i,parameters[i])) + exp='+'.join(_exp) + m.calc(lin_model.volume,exp,output_volume) + + return True + except mincError as e: + print( "Exception in apply_linear_model:{}".format(str(e)) ) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in apply_linear_model:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def build_approximation(intensity_model, geo_model , + parameters_intensity, parameters_velocity, + output_scan, output_transform, + noresample=False, + remove0=False): + try: + with mincTools() as m: + + intensity=m.tmp('intensity_model.mnc') + if noresample: + intensity=output_scan.scan + #geometry=m.tmp('geometry_model.mnc') + + # TODO: paralelelize? + if intensity_model.N>0: + apply_linear_model(intensity_model,parameters_intensity,intensity) + else: # not modelling intensity + intensity=intensity_model.volume[0] + + # if we have geometry information + if geo_model is not None and geo_model.N>0 : + _parameters_velocity=copy.deepcopy(parameters_velocity) + if remove0:_parameters_velocity[0]=0 + apply_linear_model(geo_model, _parameters_velocity, output_transform.vel) + + if not noresample: + m.resample_smooth_logspace(intensity, output_scan.scan, + velocity=output_transform.vel, + like=intensity_model.volume[0]) + + if intensity_model.mask is not None: + if noresample: + shutil.copyfile(intensity_model.mask, + output_scan.mask) + else: + m.resample_labels_logspace(intensity_model.mask, + output_scan.mask, + velocity=output_transform.vel, + like=intensity_model.volume[0]) + else: + output_scan.mask=None + else: # not modelling shape! + shutil.copyfile(intensity,output_scan.scan) + if intensity_model.mask is not None: + shutil.copyfile(intensity_model.mask, + output_scan.mask) + else: + output_scan.mask=None + output_transform=None + + return (output_scan, output_transform) + except mincError as e: + print( "Exception in build_approximation:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in build_approximation:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def voxel_regression(intensity_design_matrix, + velocity_design_matrix, + intensity_estimate, velocity_estimate, + next_intensity_model, next_velocity_model, + intensity_residual, velocity_residual, + blur_int_model=None, blur_vel_model=None, + qc=False): + """Perform voxel-wise regression using given design matrix""" + try: + with mincTools() as m: + #print(repr(next_intensity_model)) + + # a small hack - assume that input directories are the same + _prefix=velocity_estimate[0].prefix + _design_vel=_prefix+os.sep+'regression_vel.csv' + _design_int=_prefix+os.sep+'regression_int.csv' + + #nomask=False + #for i in for i in intensity_estimate: + # if i.mask is None: + # nomask=True + _masks=[i.mask for i in intensity_estimate] + _inputs=[] + _outputs=[] + _outputs.extend(next_intensity_model.volume) + _outputs.extend(next_velocity_model.volume) + + with open(_design_vel,'w') as f: + for (i, l ) in enumerate(velocity_design_matrix): + f.write(os.path.basename(velocity_estimate[i].vel)) + f.write(',') + f.write(','.join([str(qq) for qq in l])) + f.write("\n") + _inputs.append(velocity_estimate[i].vel) + + with open(_design_int,'w') as f: + for (i, l ) in enumerate(intensity_design_matrix): + f.write(os.path.basename(intensity_estimate[i].scan)) + f.write(',') + f.write(','.join([str(qq) for qq in l])) + f.write("\n") + _inputs.append(intensity_estimate[i].scan) + + if not m.checkfiles(inputs=_inputs, outputs=_outputs): + return + + intensity_model=next_intensity_model + velocity_model=next_velocity_model + + if blur_int_model is not None: + intensity_model=MriDatasetRegress(prefix=m.tempdir, name='model_intensity',N=next_intensity_model.N,nomask=(next_intensity_model.mask is None)) + + if blur_vel_model is not None: + velocity_model=MriDatasetRegress(prefix=m.tempdir,name='model_velocity', N=next_velocity_model.N, nomask=(next_velocity_model.mask is None)) + + + # regress velocity + m.command(['volumes_lm',_design_vel, velocity_model.volume[0].rsplit('_0.mnc',1)[0]], + inputs=[_design_vel], + outputs=velocity_model.volume, + verbose=2) + + # regress intensity + m.command(['volumes_lm',_design_int, intensity_model.volume[0].rsplit('_0.mnc',1)[0]], + inputs=[_design_int], + outputs=intensity_model.volume, + verbose=2) + + if blur_vel_model is not None: + # blur estimates + for (i,j) in enumerate(velocity_model.volume): + m.blur_vectors(velocity_model.volume[i],next_velocity_model.volume[i],blur_vel_model) + # a hack preserve unfiltered RMS volume + shutil.copyfile(velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + + if blur_int_model is not None: + for (i,j) in enumerate(intensity_model.volume): + m.blur(intensity_model.volume[i],next_intensity_model.volume[i],blur_int_model) + # a hack preserve unfiltered RMS volume + shutil.copyfile(intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + + # average masks + if next_intensity_model.mask is not None: + m.average(_masks,m.tmp('avg_mask.mnc'),datatype='-float') + m.calc([m.tmp('avg_mask.mnc')],'A[0]>0.5?1:0',m.tmp('avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('avg_mask_.mnc'),next_intensity_model.mask,image_range=[0,1],valid_range=[0,1]) + + if qc: + m.qc(next_intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.jpg' ) + + m.grid_magnitude(next_velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + m.tmp('velocity_RMS_mag.mnc')) + + m.qc(m.tmp('velocity_RMS_mag.mnc'), + next_velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.jpg') + + #cleanup + #os.unlink(_design_vel) + #os.unlink(_design_int) + + + except mincError as e: + print( "Exception in voxel_regression:{}".format(str(e)) ) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in voxel_regression:{}".format(sys.exc_info()[0]) ) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/generate_nonlinear_ldd.py b/ipl/model_ldd/generate_nonlinear_ldd.py new file mode 100644 index 0000000..0633ea0 --- /dev/null +++ b/ipl/model_ldd/generate_nonlinear_ldd.py @@ -0,0 +1,260 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from .structures_ldd import MriDataset, LDDMriTransform, LDDMRIEncoder +from .filter_ldd import generate_flip_sample, normalize_sample +from .filter_ldd import average_samples,average_stats +from .filter_ldd import calculate_diff_bias_field +from .filter_ldd import average_bias_fields +from .filter_ldd import resample_and_correct_bias_ldd +from .registration_ldd import non_linear_register_step_ldd +from .registration_ldd import average_transforms_ldd +from .resample_ldd import concat_resample_ldd + +from scoop import futures, shared + +def generate_ldd_average( + samples, + initial_model=None, + output_model=None, + output_model_sd=None, + prefix='.', + options={} + ): + """ perform iterative model creation""" + try: + #print(repr(options)) + # use first sample as initial model + if not initial_model: + initial_model = samples[0] + + # current estimate of template + current_model = initial_model + current_model_sd = None + + transforms=[] + corr=[] + + bias_fields=[] + corr_transforms=[] + sd=[] + corr_samples=[] + + protocol=options.get('protocol', [ + {'iter':4,'level':32}, + {'iter':4,'level':16}] + ) + + cleanup=options.get('cleanup',False) + symmetric=options.get('symmetric',False) + parameters=options.get('parameters',None) + refine=options.get('refine',True) + qc=options.get('qc',False) + downsample=options.get('downsample',None) + + models=[] + models_sd=[] + + if symmetric: + flipdir=prefix+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + flip_all=[] + # generate flipped versions of all scans + for (i, s) in enumerate(samples): + s.scan_f=prefix+os.sep+'flip'+os.sep+os.path.basename(s.scan) + + if s.mask is not None: + s.mask_f=prefix+os.sep+'flip'+os.sep+'mask_'+os.path.basename(s.scan) + + flip_all.append( futures.submit( generate_flip_sample,s ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + # go through all the iterations + it=0 + for (i,p) in enumerate(protocol): + for j in xrange(1,p['iter']+1): + it+=1 + # this will be a model for next iteration actually + + # 1 register all subjects to current template + next_model=MriDataset(prefix=prefix,iter=it,name='avg') + next_model_sd=MriDataset(prefix=prefix,iter=it,name='sd') + transforms=[] + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + inv_transforms=[] + fwd_transforms=[] + + for (i, s) in enumerate(samples): + sample_xfm=LDDMriTransform(name=s.name,prefix=it_prefix,iter=it) + + prev_transform = None + prev_bias_field = None + + if it > 1 and refine: + prev_transform = corr_transforms[i] + + transforms.append( + futures.submit( + non_linear_register_step_ldd, + s, + current_model, + sample_xfm, + init_vel=prev_transform, + symmetric=symmetric, + parameters=parameters, + level=p['level'], + work_dir=prefix, + downsample=downsample) + ) + fwd_transforms.append(sample_xfm) + + # wait for jobs to finish + futures.wait(transforms, return_when=futures.ALL_COMPLETED) + + if cleanup and it>1 : + # remove information from previous iteration + for s in corr_samples: + s.cleanup() + for x in corr_transforms: + x.cleanup() + + # here all the transforms should exist + avg_inv_transform=LDDMriTransform(name='avg_inv',prefix=it_prefix,iter=it) + + # 2 average all transformations + average_transforms_ldd(fwd_transforms, avg_inv_transform, symmetric=symmetric, invert=True) + + corr=[] + corr_transforms=[] + corr_samples=[] + + # 3 concatenate correction and resample + for (i, s) in enumerate(samples): + c=MriDataset(prefix=it_prefix,iter=it,name=s.name) + x=LDDMriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + + corr.append(futures.submit(concat_resample_ldd, s, + fwd_transforms[i], avg_inv_transform, c, x, current_model.scan, + symmetric=symmetric, qc=qc )) + + corr_transforms.append(x) + corr_samples.append(c) + + futures.wait(corr, return_when=futures.ALL_COMPLETED) + + # 4 average resampled samples to create new estimate + + result=futures.submit(average_samples, corr_samples, next_model, next_model_sd, symmetric=symmetric) + futures.wait([result], return_when=futures.ALL_COMPLETED) + + + if cleanup: + for s in fwd_transforms: + s.cleanup() + + if cleanup and it>1 : + # remove previous template estimate + models.append(next_model) + models_sd.append(next_model_sd) + + current_model=next_model + current_model_sd=next_model_sd + + result=futures.submit(average_stats, next_model, next_model_sd) + sd.append(result) + + # copy output to the destination + futures.wait(sd, return_when=futures.ALL_COMPLETED) + with open(prefix+os.sep+'stats.txt','w') as f: + for s in sd: + f.write("{}\n".format(s.result())) + + results={ + 'model': current_model, + 'model_sd': current_model_sd, + 'vel': corr_transforms, + 'biascorr': None, + 'scan': corr_samples, + 'symmetric': symmetric, + } + + with open(prefix+os.sep+'results.json','w') as f: + json.dump(results,f,indent=1,cls=LDDMRIEncoder) + + if cleanup: + # delete unneeded models + for m in models: + m.cleanup() + for m in models_sd: + m.cleanup() + + return results + except mincError as e: + print "Exception in generate_ldd_average:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in generate_ldd_average:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def generate_ldd_model_csv(input_csv,model=None,mask=None,work_prefix=None,options={}): + internal_sample=[] + + with open(input_csv, 'r') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) + for row in reader: + internal_sample.append(MriDataset(scan=row[0],mask=row[1])) + + internal_model=None + if model is not None: + internal_model=MriDataset(scan=model,mask=mask) + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return generate_ldd_average(internal_sample,internal_model, + prefix=work_prefix,options=options) + + +def generate_ldd_model(samples,model=None,mask=None,work_prefix=None,options={}): + internal_sample=[] + try: + #print(repr(options)) + for i in samples: + s=MriDataset(scan=i[0],mask=i[1]) + internal_sample.append(s) + + internal_model=None + if model is not None: + internal_model=MriDataset(scan=model,mask=mask) + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return generate_ldd_average(internal_sample,internal_model, + prefix=work_prefix,options=options) + + except mincError as e: + print "Exception in generate_ldd_model:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in generate_ldd_model:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/registration_ldd.py b/ipl/model_ldd/registration_ldd.py new file mode 100644 index 0000000..090710e --- /dev/null +++ b/ipl/model_ldd/registration_ldd.py @@ -0,0 +1,311 @@ +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +import ipl.dd_registration + +# internal stuff +from .filter_ldd import build_approximation +from .structures_ldd import * + +def non_linear_register_step_ldd( + sample, + model, + output, + init_vel=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_vel=None + _init_vel_f=None + + if start is None: + start=level + + if init_vel is not None: + _init_vel=init_vel.vel + if symmetric: + _init_vel_f=init_vel.vel_f + + with mincTools() as m: + + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.vel,output.vel_f]): + + ipl.dd_registration.non_linear_register_ldd( + sample.scan, + model.scan, + output.vel, + source_mask=sample.mask, + target_mask=model.mask, + init_velocity=_init_vel, + parameters=parameters, + start=level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + ipl.dd_registration.non_linear_register_ldd( + sample.scan_f, + model.scan, + output.vel_f, + source_mask=sample.mask_f, + target_mask=model.mask, + init_velocity=_init_vel_f, + parameters=parameters, + start=level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.vel]): + + ipl.dd_registration.non_linear_register_ldd( + sample.scan, + model.scan, + output.vel, + source_mask=sample.mask, + target_mask=model.mask, + init_velocity=_init_vel, + parameters=parameters, + start=level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + except mincError as e: + print "Exception in non_linear_register_step_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def average_transforms_ldd( + samples, + output, + symmetric=False, + invert=False + ): + """average given transformations""" + try: + with mincTools() as m: + avg = [] + if not os.path.exists(output.vel): + out=output.vel + + if invert: + out=m.tmp('avg.mnc') + + for i in samples: + avg.append(i.vel) + + if symmetric: + for i in samples: + avg.append(i.vel_f) + m.average(avg, out) + + if invert: + m.calc([out],'-A[0]',output.vel) + + except mincError as e: + print "Exception in average_transforms_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_transforms_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def non_linear_register_step_regress_ldd( + sample, + model_intensity, + model_velocity, + output_intensity, + output_velocity, + level=32, + start_level=None, + parameters=None, + work_dir=None, + downsample=None, + debug=False, + previous_velocity=None, + datatype='short', + incremental=True, + remove0=False, + sym=False + ): + """perform linear registration to the model, and calculate new estimate""" + try: + + with mincTools() as m: + #print repr(sample) + + if m.checkfiles(inputs=[sample.scan], + outputs=[output_velocity.vel]): + + #velocity_approximate = LDDMriTransform(prefix=m.tempdir,name=sample.name+'_velocity') + #intensity_approximate = MriDataset(prefix=m.tempdir,name=sample.name+'_intensity') + intensity_approximate = None + velocity_approximate = None + velocity_update = None + + if debug: + intensity_approximate = MriDataset( prefix=output_velocity.prefix, + name=output_velocity.name +'_int_approx', + iter=output_velocity.iter ) + + velocity_approximate = LDDMriTransform( prefix=output_velocity.prefix, + name=output_velocity.name +'_approx', + iter=output_velocity.iter ) + + velocity_update = LDDMriTransform( prefix=output_velocity.prefix, + name=output_velocity.name +'_update', + iter=output_velocity.iter ) + else: + intensity_approximate = MriDataset( prefix=m.tempdir, + name=output_velocity.name +'_int_approx') + + velocity_approximate = LDDMriTransform( prefix=m.tempdir, + name=output_velocity.name +'_approx' ) + + velocity_update = LDDMriTransform( prefix=m.tempdir, + name=output_velocity.name +'_update') + + # A hack! assume that if initial model is MriDataset it means zero regression coeff + if isinstance(model_intensity, MriDataset): + intensity_approximate=model_intensity + velocity_approximate=None + + else: + (intensity_approximate, velocity_approximate) = \ + build_approximation(model_intensity, + model_velocity, + sample.par_int, + sample.par_vel, + intensity_approximate, + velocity_approximate, + noresample=(not incremental), + remove0=remove0) + if model_velocity is None: + velocity_approximate=None + + if start_level is None: + start_level=level + + # we are refining previous estimate + init_velocity=None + #if velocity_approximate is not None: + #init_velocity=velocity_approximate.vel + if incremental: + if previous_velocity is not None: + ## have to adjust it based on the current estimate + if velocity_approximate is not None: + init_velocity=m.tmp('init_velocity.mnc') + m.calc( [previous_velocity.vel, velocity_approximate.vel ], + 'A[0]-A[1]', init_velocity) + + else: + init_velocity=previous_velocity.vel + else: + if previous_velocity is not None: + init_velocity=previous_velocity.vel + elif velocity_approximate is not None: + init_velocity=velocity_approximate.vel + if sym: + print("Using symmetrization!") + # TODO: parallelalize this + update1=m.tmp('update1.mnc') + m.non_linear_register_ldd( + intensity_approximate.scan, + sample.scan, + update1, + source_mask=intensity_approximate.mask, + target_mask=sample.mask, + init_velocity=init_velocity, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + update2=m.tmp('update2.mnc') + m.non_linear_register_ldd( + sample.scan, + intensity_approximate.scan, + update2, + source_mask=sample.mask, + target_mask=intensity_approximate.mask, + init_velocity=init_velocity, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + m.calc([update1,update2],'(A[0]-A[1])/2.0',velocity_update.vel) + else: + m.non_linear_register_ldd( + intensity_approximate.scan, + sample.scan, + velocity_update.vel, + source_mask=intensity_approximate.mask, + target_mask=sample.mask, + init_velocity=init_velocity, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + + # update estimate, possibility to use link function? + if incremental and velocity_approximate is not None: + m.calc( [velocity_approximate.vel, velocity_update.vel ], 'A[0]+A[1]', output_velocity.vel, + datatype='-'+datatype) + else: + m.calc( [velocity_update.vel ], 'A[0]', output_velocity.vel, + datatype='-'+datatype) + + if output_intensity is not None: + # resample intensity + m.resample_smooth_logspace(sample.scan, output_intensity.scan, + velocity=output_velocity.vel, + invert_transform=True, + datatype='-'+datatype + ) + + if sample.mask is not None: + m.resample_labels_logspace(sample.mask, output_intensity.mask, + velocity=output_velocity.vel, + invert_transform=True) + # done + + except mincError as e: + print "Exception in non_linear_register_step_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/regress_ldd.py b/ipl/model_ldd/regress_ldd.py new file mode 100644 index 0000000..612015d --- /dev/null +++ b/ipl/model_ldd/regress_ldd.py @@ -0,0 +1,457 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from iplMincTools import mincTools,mincError + +from .structures_ldd import MriDataset, LDDMriTransform, LDDMRIEncoder,MriDatasetRegress +from .filter_ldd import generate_flip_sample, normalize_sample +from .filter_ldd import average_samples,average_stats_regression +from .filter_ldd import calculate_diff_bias_field +from .filter_ldd import average_bias_fields +from .filter_ldd import resample_and_correct_bias_ldd +from .filter_ldd import build_approximation +from .filter_ldd import voxel_regression +from .registration_ldd import non_linear_register_step_ldd +from .registration_ldd import average_transforms_ldd +from .registration_ldd import non_linear_register_step_regress_ldd +from .resample_ldd import concat_resample_ldd + +from scoop import futures, shared + + +def regress_ldd( + samples, + initial_model=None, + initial_intensity_model=None, + initial_velocity_model=None, + output_intensity_model=None, + output_velocity_model=None, + output_residuals_int=None, + output_residuals_vel=None, + prefix='.', + options={} + ): + """ perform iterative model creation""" + try: + + + # make sure all input scans have parameters + N_int=None + N_vel=None + + intensity_design_matrix=[] + velocity_design_matrix=[] + nomask=False + + for s in samples: + + if N_int is None: + N_int=len(s.par_int) + elif N_int!=len(s.par_int): + raise mincError("Sample {} have inconsisten number of intensity paramters: {} expected {}".format(repr(s),len(s),N_int)) + + if N_vel is None: + N_vel=len(s.par_vel) + elif N_vel!=len(s.par_vel): + raise mincError("Sample {} have inconsisten number of intensity paramters: {} expected {}".format(repr(s),len(s),N_vel)) + + intensity_design_matrix.append(s.par_int) + velocity_design_matrix.append(s.par_vel) + + if s.mask is None: + nomask=True + + #print("Intensity design matrix=\n{}".format(repr(intensity_design_matrix))) + #print("Velocity design matrix=\n{}".format(repr(velocity_design_matrix))) + + ref_model=None + # current estimate of template + if initial_model is not None: + current_intensity_model = initial_model + current_velocity_model = None + ref_model=initial_model.scan + else: + current_intensity_model = initial_intensity_model + current_velocity_model = initial_velocity_model + ref_model=initial_intensity_model.volume[0] + + transforms=[] + + full_transforms=[] + + protocol=options.get( + 'protocol', [{'iter':4,'level':32, 'blur_int': None, 'blur_vel': None }, + {'iter':4,'level':16, 'blur_int': None, 'blur_vel': None }] + ) + + cleanup= options.get('cleanup',False) + cleanup_intermediate= options.get('cleanup_intermediate',False) + + parameters= options.get('parameters',None) + refine= options.get('refine',False) + qc= options.get('qc',False) + downsample =options.get('downsample',None) + start_level=options.get('start_level',None) + debug =options.get('debug',False) + debias =options.get('debias',True) + incremental=options.get('incremental',True) + remove0 =options.get('remove0',False) + sym =options.get('sym',False) + + if parameters is None: + parameters={ + 'conf':{}, + 'smooth_update':2, + 'smooth_field':2, + 'update_rule':1, + 'grad_type': 0, + 'max_step': 2.0, # This paramter is probably domain specific + 'hist_match':True # this turns out to be very important! + } + + intensity_models=[] + velocity_models=[] + intensity_residuals=[] + velocity_residuals=[] + + intensity_residual=None + velocity_residual=None + + prev_velocity_estimate=None + # go through all the iterations + it=0 + residuals=[] + + for (i,p) in enumerate(protocol): + blur_int_model=p.get('blur_int',None) + blur_vel_model=p.get('blur_vel',None) + for j in range(1,p['iter']+1): + it+=1 + _start_level=None + if it==1: + _start_level=start_level + # this will be a model for next iteration actually + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + next_intensity_model=MriDatasetRegress(prefix=prefix, name='model_intensity',iter=it, N=N_int,nomask=nomask) + next_velocity_model=MriDatasetRegress(prefix=prefix, name='model_velocity', iter=it, N=N_vel, nomask=True) + + + intensity_residual=MriDataset(prefix=prefix, scan= next_intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + #name=next_intensity_model.name, iter=it ) + + velocity_residual =MriDataset(prefix=prefix, scan= next_velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + #name=next_velocity_model.name, iter=it ) + + # skip over existing models here! + + if not next_intensity_model.exists() or \ + not next_velocity_model.exists() or \ + not intensity_residual.exists() or \ + not velocity_residual.exists(): + + intensity_estimate=[] + velocity_estimate=[] + r=[] + + + # 1 for each sample generate current approximation + # 2. perform non-linear registration between each sample and sample-specific approximation + # 3. update transformation + # 1+2+3 - all together + for (i, s) in enumerate(samples): + sample_velocity= LDDMriTransform(name=s.name,prefix=it_prefix,iter=it) + sample_intensity= MriDataset(name=s.name,prefix=it_prefix,iter=it) + + previous_velocity=None + + if refine and it>1 and (not remove0): + previous_velocity=prev_velocity_estimate[i] + + r.append( + futures.submit( + non_linear_register_step_regress_ldd, + s, + current_intensity_model, + current_velocity_model, + None, + sample_velocity, + parameters=parameters, + level=p['level'], + start_level=_start_level, + work_dir=prefix, + downsample=downsample, + debug=debug, + previous_velocity=previous_velocity, + incremental=incremental, + remove0=remove0, + sym=sym + ) + ) + velocity_estimate.append(sample_velocity) + #intensity_estimate.append(sample_intensity) + + # wait for jobs to finish + futures.wait(r, return_when=futures.ALL_COMPLETED) + avg_inv_transform=None + + if debias: + # here all the transforms should exist + avg_inv_transform=LDDMriTransform(name='avg_inv',prefix=it_prefix,iter=it) + # 2 average all transformations + average_transforms_ldd(velocity_estimate, avg_inv_transform, symmetric=False, invert=True) + + corr=[] + corr_transforms=[] + corr_samples=[] + + # 3 concatenate correction and resample + for (i, s) in enumerate(samples): + c=MriDataset(prefix=it_prefix,iter=it,name=s.name) + x=LDDMriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + + corr.append(futures.submit(concat_resample_ldd, + s, velocity_estimate[i], avg_inv_transform, + c, x, + model=ref_model, + symmetric=False, + qc=qc, + invert_transform=True )) + corr_transforms.append(x) + corr_samples.append(c) + + futures.wait(corr, return_when=futures.ALL_COMPLETED) + + # 4. perform regression and create new estimate + # 5. calculate residulas (?) + # 4+5 + result=futures.submit(voxel_regression, + intensity_design_matrix, velocity_design_matrix, + corr_samples, corr_transforms, + next_intensity_model, next_velocity_model, + intensity_residual, velocity_residual, + blur_int_model=blur_int_model, + blur_vel_model=blur_vel_model, + qc=qc + ) + futures.wait([result], return_when=futures.ALL_COMPLETED) + + # 6. cleanup + if cleanup : + print("Cleaning up iteration: {}".format(it)) + for i in velocity_estimate: + i.cleanup() + for i in corr_samples: + i.cleanup() + if prev_velocity_estimate is not None: + for i in prev_velocity_estimate: + i.cleanup() + if debias: + avg_inv_transform.cleanup() + else: + # files were there, reuse them + print("Iteration {} already performed, skipping".format(it)) + corr_transforms=[] + # this is a hack right now + for (i, s) in enumerate(samples): + x=LDDMriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + corr_transforms.append(x) + + intensity_models.append(current_intensity_model) + velocity_models.append(current_velocity_model) + intensity_residuals.append(intensity_residual) + velocity_residuals.append(velocity_residual) + + current_intensity_model=next_intensity_model + current_velocity_model=next_velocity_model + + + result=futures.submit(average_stats_regression, + current_intensity_model, current_velocity_model, + intensity_residual, velocity_residual ) + residuals.append(result) + + regression_results={ + 'intensity_model': current_intensity_model, + 'velocity_model': current_velocity_model, + 'intensity_residuals': intensity_residual.scan, + 'velocity_residuals': velocity_residual.scan, + } + with open(prefix+os.sep+'results_{:03d}.json'.format(it),'w') as f: + json.dump(regression_results,f,indent=1, cls=LDDMRIEncoder) + + # save for next iteration + # TODO: regularize? + prev_velocity_estimate=corr_transforms # have to use adjusted velocity estimate + + # copy output to the destination + futures.wait(residuals, return_when=futures.ALL_COMPLETED) + with open(prefix+os.sep+'stats.txt','w') as f: + for s in residuals: + f.write("{}\n".format(s.result())) + + + with open(prefix+os.sep+'results_final.json','w') as f: + json.dump(regression_results, f, indent=1, cls=LDDMRIEncoder) + + + if cleanup_intermediate: + for i in range(len(intensity_models)-1): + intensity_models[i].cleanup() + velocity_models[i].cleanup() + intensity_residuals[i].cleanup() + velocity_residuals[i].cleanup() + # delete unneeded models + #shutil.rmtree(prefix+os.sep+'reg') + + return regression_results + except mincError as e: + print "Exception in generate_ldd_average:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in generate_ldd_average:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def regress_ldd_csv(input_csv, + int_par_count=None, + model=None, + mask=None, + work_prefix=None, options={}, + regress_model=None): + """convinience function to run model generation using CSV input file and a fixed init""" + internal_sample=[] + + with open(input_csv, 'r') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) + for row in reader: + + par=[ float(i) for i in row[2:] ] + par_vel=par + par_int=par + + if int_par_count is not None: + par_int=par[:int_par_count] + par_vel=par[int_par_count:] + _mask=row[1] + if _mask=='': + _mask=None + internal_sample.append( MriDataset(scan=row[0], mask=_mask, par_int=par_int, par_vel=par_vel) ) + + internal_model=None + initial_intensity_model=None + initial_velocity_model=None + + if regress_model is None: + if model is not None: + internal_model=MriDataset(scan=model, mask=mask) + else: + # assume that regress_model is an array + initial_intensity_model=MriDatasetRegress(prefix=work_prefix, name='initial_model_intensity', N=len(regress_model)) + initial_intensity_model.volume=regress_model + initial_intensity_model.mask=mask + + initial_intensity_model.protect=True + initial_velocity_model=None + + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return regress_ldd( internal_sample, + initial_model=internal_model, + prefix=work_prefix, + options=options, + initial_intensity_model=initial_intensity_model, + initial_velocity_model=initial_velocity_model) + + +def regress_ldd_simple(input_samples, + int_design_matrix, + geo_design_matrix, + model=None, + mask=None, + work_prefix=None, options={}, + regress_model=None): + """convinience function to run model generation using CSV input file and a fixed init""" + internal_sample=[] + + for (i,j) in enumerate(input_samples): + + internal_sample.append( MriDataset(scan=j[0], mask=j[1], + par_int=int_design_matrix[i], + par_vel=geo_design_matrix[i]) + ) + + internal_model=None + initial_intensity_model=None + initial_velocity_model=None + + if regress_model is None: + if model is not None: + internal_model=MriDataset(scan=model, mask=mask) + else: + # assume that regress_model is an array + initial_intensity_model=MriDatasetRegress(prefix=work_prefix, name='initial_model_intensity', N=len(regress_model)) + initial_intensity_model.volume=regress_model + initial_intensity_model.mask=mask + + initial_intensity_model.protect=True + initial_velocity_model=None + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return regress_ldd( internal_sample, + initial_model=internal_model, + prefix=work_prefix, + options=options, + initial_intensity_model=initial_intensity_model, + initial_velocity_model=initial_velocity_model) + + + +def build_estimate(description_json, parameters, output_prefix, int_par_count=None): + desc=None + with open(description_json, 'r') as f: + desc=json.load(f) + intensity_parameters=parameters + velocity_parameters=parameters + + if int_par_count is not None: + intensity_parameters=parameters[:int_par_count] + velocity_parameters=parameters[int_par_count:] + + if len(velocity_parameters)!=len(desc["velocity_model"]["volume"]) or \ + len(intensity_parameters)!=len(desc["intensity_model"]["volume"]): + + print(desc["intensity_model"]["volume"]) + print("intensity_parameters={}".format(repr(intensity_parameters))) + + print(desc["velocity_model"]["volume"]) + print("velocity_parameters={}".format(repr(velocity_parameters))) + + raise mincError("{} inconsisten number of paramters, expected {}". + format(repr(intensity_parameters), + len(desc["velocity_model"]["volume"]))) + + velocity=MriDatasetRegress(from_dict=desc["velocity_model"]) + intensity=MriDatasetRegress(from_dict=desc["intensity_model"]) + + output_scan=MriDataset(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) + output_transform=LDDMriTransform(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) + + build_approximation(intensity, velocity, + intensity_parameters, velocity_parameters, + output_scan, output_transform) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/resample_ldd.py b/ipl/model_ldd/resample_ldd.py new file mode 100644 index 0000000..33b697d --- /dev/null +++ b/ipl/model_ldd/resample_ldd.py @@ -0,0 +1,118 @@ +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +from .filter_ldd import * + + +# scoop parallel execution +from scoop import futures, shared + + +def concat_resample_ldd( + input_mri, + input_transform, + corr_transform, + output_mri, + output_transform, + model, + symmetric=False, + qc=False, + bias=None, + invert_transform=False, + datatype='short' + ): + """apply correction transformation and resample input""" + try: + with mincTools() as m: + + if not ( os.path.exists(output_mri.scan) and os.path.exists(output_transform.vel) ): + scan=input_mri.scan + + if bias is not None: + m.calc([input_mri.scan,bias.scan],'A[0]*A[1]',m.tmp('corr.mnc')) + scan=m.tmp('corr.mnc') + + if corr_transform is not None: + m.calc([input_transform.vel, corr_transform.vel],'A[0]+A[1]', output_transform.vel, datatype='-'+datatype) + else: + # TODO: copy? + m.calc([input_transform.vel ],'A[0]', output_transform.vel, datatype='-'+datatype) + + m.resample_smooth_logspace(scan, output_mri.scan, + velocity=output_transform.vel, + like=model, + invert_transform=invert_transform, + datatype=datatype) + + if input_mri.mask is not None and output_mri.mask is not None: + m.resample_labels_logspace(input_mri.mask, + output_mri.mask, + velocity=output_transform.vel, + like=model, + invert_transform=invert_transform) + if qc: + m.qc(output_mri.scan, output_mri.scan+'.jpg', + mask=output_mri.mask) + else: + if qc: + m.qc(output_mri.scan, output_mri.scan+'.jpg') + + if qc: + m.grid_magnitude(output_transform.vel, + m.tmp('velocity_mag.mnc')) + + m.qc(m.tmp('velocity_mag.mnc'), output_mri.scan+'_vel.jpg') + + if symmetric: + scan_f=input_mri.scan_f + + if bias is not None: + m.calc([input_mri.scan_f,bias.scan_f],'A[0]*A[1]', + m.tmp('corr_f.mnc'),datatype='-'+datatype) + scan_f=m.tmp('corr_f.mnc') + + if corr_transform is not None: + m.calc([input_transform.vel_f, corr_transform.vel],'A[0]+A[1]', output_transform.vel_f, datatype='-'+datatype) + else: + m.calc([input_transform.vel_f],'A[0]', output_transform.vel_f, datatype='-'+datatype) + + m.resample_smooth_logspace(scan_f, output_mri.scan_f, + velocity=output_transform.vel_f, + like=model, + invert_transform=invert_transform, + datatype=datatype) + + if input_mri.mask is not None and output_mri.mask is not None: + m.resample_labels_logspace(input_mri.mask_f, + output_mri.mask_f, + velocity=output_transform.vel_f, + like=model, + invert_transform=invert_transform) + if qc: + m.qc(output_mri.scan_f,output_mri.scan_f+'.jpg', + mask=output_mri.mask_f) + else: + if qc: + m.qc(output_mri.scan_f,output_mri.scan_f+'.jpg') + + if qc: + m.grid_magnitude(output_transform.vel_f, + m.tmp('velocity_mag_f.mnc')) + + m.qc(m.tmp('velocity_mag_f.mnc'), output_mri.scan_f+'_vel.jpg' ) + + except mincError as e: + print "Exception in concat_resample_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in concat_resample_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/structures_ldd.py b/ipl/model_ldd/structures_ldd.py new file mode 100644 index 0000000..0317453 --- /dev/null +++ b/ipl/model_ldd/structures_ldd.py @@ -0,0 +1,181 @@ +# data structures used in model generation package + +import shutil +import os +import sys +import traceback +import json + +class MriDataset(object): + """ + Hold MRI sample together with regression parameters + """ + def __init__(self, prefix=None, name=None, iter=None, scan=None, mask=None, protect=False, par_int=[],par_vel=[]): + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.scan_f=None + self.mask_f=None + self.par_int=par_int + self.par_vel=par_vel + + if scan is None: + if self.iter is None: + self.scan=self.prefix+os.sep+self.name+'.mnc' + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + self.scan_f=self.prefix+os.sep+self.name+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'_f_mask.mnc' + else: + self.scan=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'.mnc' + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + self.scan_f=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f_mask.mnc' + else: + self.scan=scan + self.mask=mask + + if self.name is None: + self.name=os.path.basename(self.scan) + + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + + def __repr__(self): + return 'MriDataset(prefix="{}",name="{}",iter="{}",scan="{}",mask="{}",protect={},par_int={},par_val={})'.\ + format(self.prefix,self.name,repr(self.iter),self.scan,self.mask,repr(self.protect),repr(self.par_int),repr(self.par_vel)) + + def cleanup(self): + """ + Remove files, use if they are not needed anymore + """ + if not self.protect: + for i in (self.scan, self.mask, self.scan_f, self.mask_f ): + if i is not None and os.path.exists(i): + os.unlink(i) + + def exists(self): + _ex=True + for i in (self.scan, self.mask, self.scan_f, self.mask_f ): + if i is not None : + _ex&=os.path.exists(i) + return _ex + + +class MriDatasetRegress(object): + def __init__(self, prefix=None, name=None, iter=None, N=1, protect=False, from_dict=None, nomask=False): + if from_dict is None: + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.N=N + self.volume=[] + + if self.iter is None: + for n in range(0,N): + self.volume.append(self.prefix+os.sep+self.name+'_{}.mnc'.format(n)) + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + else: + for n in range(0,N): + self.volume.append(self.prefix+os.sep+self.name+'.{:03d}_{}'.format(iter,n)+'.mnc') + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + if nomask: + self.mask=None + else: # simple hack for now + self.volume=from_dict["volume"] + self.iter=from_dict["iter"] + self.name=from_dict["name"] + self.mask=from_dict["mask"] + self.N=len(self.volume) + + def __repr__(self): + return 'MriDatasetRegress(prefix="{}",name="{}",volume={},mask={},iter="{}",protect={})'.\ + format(self.prefix, self.name, repr(self.volume), self.mask, repr(self.iter), repr(self.protect)) + + def cleanup(self): + if not self.protect: + for i in self.volume: + if i is not None and os.path.exists(i): + os.unlink(i) + for i in [self.mask]: + if i is not None and os.path.exists(i): + os.unlink(i) + + def exists(self): + """ + Check that all files are present + """ + _ex=True + for i in self.volume: + if i is not None : + _ex&=os.path.exists(i) + + for i in [self.mask]: + if i is not None : + _ex&=os.path.exists(i) + + return _ex + +class LDDMriTransform(object): + """ + Store log-diffemorphic transforation + """ + def __init__(self,prefix,name,iter=None): + self.prefix=prefix + self.name=name + self.iter=iter + self.vel_f=None + + if self.iter is None: + self.vel= self.prefix+os.sep+self.name+'_vel.mnc' + self.vel_f= self.prefix+os.sep+self.name+'_f_vel_0.mnc' + else: + self.vel= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_vel.mnc' + self.vel_f= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f_vel.mnc' + + def __repr__(self): + return 'LDDMriTransform(prefix="{}",name="{}",iter="{}")'.\ + format(self.prefix,self.name,repr(self.iter)) + + def cleanup(self): + for i in (self.vel, self.vel_f): + if i is not None and os.path.exists(i): + os.unlink(i) + + def exists(self): + _ex=True + for i in (self.vel, self.vel_f): + if i is not None : + _ex&=os.path.exists(i) + return _ex + + +class LDDMRIEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, LDDMriTransform): + return {'name':obj.name, + 'iter':obj.iter, + 'vel' :obj.vel, + 'vel_f':obj.vel_f + } + elif isinstance(obj, MriDatasetRegress): + return {'name':obj.name, + 'iter':obj.iter, + 'volume':obj.volume, + 'mask':obj.mask + } + elif isinstance(obj, MriDataset): + return {'name':obj.name, + 'iter':obj.iter, + 'scan':obj.scan, + 'mask':obj.mask, + 'scan_f':obj.scan_f, + 'mask_f':obj.mask_f, + 'par_int':obj.par_int, + 'par_vel':obj.par_vel + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/qc/metric.py b/ipl/qc/metric.py new file mode 100755 index 0000000..942d703 --- /dev/null +++ b/ipl/qc/metric.py @@ -0,0 +1,153 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +from __future__ import print_function + +import argparse +import shutil +import os +import sys +import csv +import copy +import json + +#import minc + +import ipl.elastix_registration +import ipl.minc_tools as minc_tools + +import numpy as np + +def parse_options(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Run registration metric") + + parser.add_argument("--verbose", + action="store_true", + default=False, + help="Be verbose", + dest="verbose") + + parser.add_argument("source", + help="Source file") + + parser.add_argument("target", + help="Target file") + + parser.add_argument("output", + help="Save output in a file") + + parser.add_argument("--exact", + action="store_true", + default=False, + help="Use exact metric", + dest="exact") + + parser.add_argument("--xfm", + help="Apply transform to source before running metric", + default=None) + + parser.add_argument("--random", + help="Apply random transform to source before running metric", + default=False, + action="store_true" + ) + + options = parser.parse_args() + return options + + +def extract_part(inp,outp,info, x=None, y=None, z=None,parts=None): + # + with minc_tools.mincTools() as minc: + ranges=[ + 'zspace={},{}'.format( info['zspace'].length/parts*z , info['zspace'].length/parts ), + 'yspace={},{}'.format( info['yspace'].length/parts*y , info['yspace'].length/parts ), + 'xspace={},{}'.format( info['xspace'].length/parts*x , info['xspace'].length/parts ) + ] + minc.reshape(inp, outp, dimrange=ranges ) + +if __name__ == "__main__": + options = parse_options() + metric = 'NormalizedMutualInformation' + sampler = 'Grid' + if options.source is None or options.target is None or options.output is None: + print("Error in arguments, run with --help") + print(repr(options)) + else: + #src = minc.Image(options.source, dtype=np.float32).data + #trg = minc.Image(options.target, dtype=np.float32).data + measures=[] + with minc_tools.mincTools() as minc: + # + _source=options.source + if options.xfm is not None: + _source=minc.tmp("source.mnc") + minc.resample_smooth(options.source,_source,transform=options.xfm,like=options.target) + + + measures={ + 'source':options.source, + 'target':options.target, + + } + + + if options.random: + xfm=minc.tmp('random.xfm') + rx=np.random.random_sample()*20.0-10.0 + ry=np.random.random_sample()*20.0-10.0 + rz=np.random.random_sample()*20.0-10.0 + tx=np.random.random_sample()*20.0-10.0 + ty=np.random.random_sample()*20.0-10.0 + tz=np.random.random_sample()*20.0-10.0 + minc.param2xfm(xfm,translation=[tx,ty,tz],rotations=[rx,ry,rz]) + measures['rot']=[rx,ry,rz] + measures['tran']=[tx,ty,tz] + _source=minc.tmp("source.mnc") + minc.resample_smooth(options.source,_source,transform=xfm,like=options.target) + + src_info=minc.mincinfo(_source) + trg_info=minc.mincinfo(options.target) + # + parts=3 + os.environ['MINC_COMPRESS']='0' + + parameters={'metric':metric, + 'resolutions':1, + 'pyramid': '1 1 1', + 'measure': True, + 'sampler': sampler, + 'grid_spacing': '3 3 3', + 'exact_metric': options.exact, + 'iterations': 1, + 'new_samples': False, + 'optimizer': "AdaptiveStochasticGradientDescent", + } + # + measures['sim']={'whole':ipl.elastix_registration.register_elastix(_source, options.target, parameters=parameters, nl=False)} + + for z in range(parts): + for y in range(parts): + for x in range(parts): + # + # extract part + src=minc.tmp("src_{}_{}_{}.mnc".format(x,y,z)) + trg=minc.tmp("trg_{}_{}_{}.mnc".format(x,y,z)) + #print(1) + extract_part(_source,src,src_info,x=x,y=y,z=z,parts=parts) + #print(2) + extract_part(options.target,trg,trg_info,x=x,y=y,z=z,parts=parts) + # run elastix measurement + k="{}_{}_{}".format(x,y,z) + measures['sim'][k]=ipl.elastix_registration.register_elastix(src, trg, parameters=parameters, nl=False) + # TODO: parallelize? + with open(options.output,'w') as f: + json.dump(measures,f,indent=2) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/registration.py b/ipl/registration.py new file mode 100644 index 0000000..e265d5f --- /dev/null +++ b/ipl/registration.py @@ -0,0 +1,853 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 29/06/2015 +# +# registration tools + + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math +import argparse +# local stuff +import minc_tools + + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + + + +linear_registration_config={ + 'bestlinreg': [ + { "blur" : "blur", + "trans" : ['-est_translations'], + "blur_fwhm" : 16, + "steps" : [8, 8, 8], + "tolerance" : 0.01, + "simplex" : 32 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4, 4, 4], + "tolerance" : 0.004, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4, 4, 4], + "tolerance" : 0.004, + "simplex" : 8 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4, 4, 4], + "tolerance" : 0.004, + "simplex" : 4 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4, 4, 4], + "tolerance" : 0.004, + "simplex" : 2 } + ], + + 'bestlinreg_s': [ + { "blur" : "blur", + "trans" : ['-est_translations'], + "blur_fwhm" : 16, + "steps" : [8,8,8], + "tolerance" : 0.01, + "simplex" : 32 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.0001, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.0001, + "simplex" : 8 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 2, + "steps" : [2,2,2], + "tolerance" : 0.0005, + "simplex" : 4 } + ], + + 'bestlinreg_s2': [ + { "blur" : "blur", + "trans" : ['-est_translations'], + "blur_fwhm" : 16, + "steps" : [8,8,8], + "tolerance" : 0.01, + "simplex" : 32 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 8 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 4 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 2 } + ], + + 'experiment_1': [ + { "blur" : "blur", + "trans" : ['-est_translations'], + "blur_fwhm" : 8, + "steps" : [8,8,8], + "tolerance" : 0.01, + "simplex" : 32 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 8 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 4 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 2 } + ], + + 'bestlinreg_new': [ # re-imelementation from Claude's bestlinreg ~ 2016-12-01 + { 'blur' : "blur", # -lsq7 scaling only + 'parameters' : "-lsq6", + 'trans' : ['-est_translations'], + 'blur_fwhm' : 8, + 'steps' : [4, 4, 4], + 'tolerance' : 0.0001, + 'simplex' : 16 }, + + { 'blur' : "blur", # -lsqXX full options + 'parameters' : "-lsq7", + 'trans' : None, + 'blur_fwhm' : 8, + 'steps' : [4, 4, 4], + 'tolerance' : 0.0001, + 'simplex' : 16 }, + + { 'blur' : "blur", + 'trans' : None, + 'blur_fwhm' : 4, + 'steps' : [4, 4, 4], + 'tolerance' : 0.0001, + 'simplex' : 8 }, + + { 'blur' : "blur", + 'trans' : None, + 'blur_fwhm' : 2, + 'steps' : [2, 2, 2], + 'tolerance' : 0.0005, + 'simplex' : 4 } + ] + + + } + + +def linear_register( + source, + target, + output_xfm, + parameters=None, + source_mask=None, + target_mask=None, + init_xfm=None, + objective=None, + conf=None, + debug=False, + close=False, + norot=False, + noshear=False, + noshift=False, + noscale=False, + work_dir=None, + start=None, + downsample=None, + verbose=0 + ): + """Perform linear registration, replacement for bestlinreg.pl script + + Args: + source - name of source minc file + target - name of target minc file + output_xfm - name of output transformation file + parameters - registration parameters (optional), can be + '-lsq6', '-lsq9', '-lsq12' + source_mask - name of source mask file (optional) + target_mask - name of target mask file (optional) + init_xfm - name of initial transformation file (optional) + objective - name of objective function (optional), could be + '-xcorr' (default), '-nmi','-mi' + conf - configuration for iterative algorithm (optional) + array of dict, or a string describing a flawor + bestlinreg (default) + bestlinreg_s + bestlinreg_s2 + bestlinreg_new - Claude's latest and greatest + debug - debug flag (optional) , default False + close - closeness flag (optional) , default False + norot - disable rotation flag (optional) , default False + noshear - disable shear flag (optional) , default False + noshift - disable shift flag (optional) , default False + noscale - disable scale flag (optional) , default False + work_dir - working directory (optional) , default create one in temp + start - initial blurring level, default 16mm from configuration + downsample - downsample initial files to this step size, default None + verbose - verbosity level + Returns: + resulting XFM file + + Raises: + mincError when tool fails + """ + print("linear_register source_mask:{} target_mask:{}".format(source_mask,target_mask)) + + with minc_tools.mincTools(verbose=verbose) as minc: + if not minc.checkfiles(inputs=[source,target], outputs=[output_xfm]): + return + + # python version + if conf is None: + conf = linear_registration_config['bestlinreg'] + elif not isinstance(conf, list): # assume that it is a string + if conf in linear_registration_config: + conf = linear_registration_config[conf] + + if parameters is None: + parameters='-lsq9' + + if objective is None: + objective='-xcorr' + + if not isinstance(conf, list): # assume that it is a string + # assume it's external program's name + # else run internally + with minc_tools.mincTools() as m: + cmd=[conf,source,target,output_xfm] + if source_mask is not None: + cmd.extend(['-source_mask',source_mask]) + if target_mask is not None: + cmd.extend(['-target_mask',target_mask]) + if parameters is not None: + cmd.append(parameters) + if objective is not None: + cmd.append(objective) + if init_xfm is not None: + cmd.extend(['-init_xfm',init_xfm]) + m.command(cmd, inputs=[source,target], outputs=[output_xfm],verbose=2) + return output_xfm + else: + + prev_xfm = None + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + source_mask_lr=source_mask + target_mask_lr=target_mask + # figure out what to do here: + with minc_tools.cache_files(work_dir=work_dir,context='reg') as tmp: + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr, unistep=downsample) + minc.resample_smooth(target,target_lr, unistep=downsample) + + if source_mask is not None: + source_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(source_mask,source_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + # a fitting we shall go... + for (i,c) in enumerate(conf): + _parameters=parameters + + if 'parameters' in c and parameters!='-lsq6': # emulate Claude's approach + _parameters=c.get('parameters')#'-lsq7' + + # set up intermediate files + if start is not None and start>c['blur_fwhm']: + continue + elif close and c['blur_fwhm']>8: + continue + + tmp_xfm = tmp.tmp(s_base+'_'+t_base+'_'+str(i)+'.xfm') + + tmp_source = source_lr + tmp_target = target_lr + + if c['blur_fwhm']>0: + tmp_source = tmp.cache(s_base+'_'+c['blur']+'_'+str(c['blur_fwhm'])+'.mnc') + if not os.path.exists(tmp_source): + minc.blur(source_lr,tmp_source,gmag=(c['blur']=='dxyz'), fwhm=c['blur_fwhm']) + + tmp_target = tmp.cache(t_base+'_'+c['blur']+'_'+str(c['blur_fwhm'])+'.mnc') + if not os.path.exists(tmp_target): + minc.blur(target_lr,tmp_target,gmag=(c['blur']=='dxyz'), fwhm=c['blur_fwhm']) + + # set up registration + args =[ 'minctracc', + tmp_source, tmp_target,'-clobber', + _parameters , + objective , + '-simplex', c['simplex'], + '-tol', c['tolerance'] ] + + args.append('-step') + args.extend(c['steps']) + + # Current transformation at this step + if prev_xfm is not None: + args.extend(['-transformation', prev_xfm]) + elif init_xfm is not None: + args.extend(['-transformation',init_xfm,'-est_center']) + elif close: + args.append('-identity') + else: + # Initial transformation will be computed from the from Principal axis + # transformation (PAT). + if c['trans']=='-est_translations': + args.extend(c['trans']) + else : + # will use manual transformation based on shif of CoM, should be identical to '-est_translations' , but it's not + com_src=minc.stats(source,['-com','-world_only'],single_value=False) + com_trg=minc.stats(target,['-com','-world_only'],single_value=False) + diff=[com_trg[k]-com_src[k] for k in range(3)] + xfm=tmp.cache(s_base+'_init.xfm') + minc.param2xfm(xfm,translation=diff) + args.extend(['-transformation',xfm]) + + # masks (even if the blurred image is masked, it's still preferable + # to use the mask in minctracc) + if source_mask is not None: + args.extend(['-source_mask',source_mask_lr]) + if target_mask is not None: + args.extend(['-model_mask',target_mask_lr]) + + if noshear: + args.extend( ['-w_shear',0,0,0] ) + if noscale: + args.extend( ['-w_scales',0,0,0] ) + if noshift: + args.extend( ['-w_translations',0,0,0] ) + if norot: + args.extend( ['-w_rotations',0,0,0] ) + + # add files and run registration + args.append(tmp_xfm) + minc.command([str(ii) for ii in args],inputs=[tmp_source,tmp_target],outputs=[tmp_xfm]) + + prev_xfm = tmp_xfm + + shutil.copyfile(prev_xfm,output_xfm) + return output_xfm + +def linear_register_to_self( + source, + target, + output_xfm, + parameters=None, + mask=None, + target_talxfm=None, + init_xfm=None, + model=None, + modeldir=None, + close=False, + nocrop=False, + noautothreshold=False + ): + """perform linear registration, wrapper around mritoself + + """ + + # TODO convert mritoself to python (?) + with minc_tools.mincTools() as minc: + cmd = ['mritoself', source, target, output_xfm] + if parameters is not None: + cmd.append(parameters) + if mask is not None: + cmd.extend(['-mask', mask]) + if target_talxfm is not None: + cmd.extend(['-target_talxfm', target_talxfm]) + if init_xfm is not None: + cmd.extend(['-transform', init_xfm]) + if model is not None: + cmd.extend(['-model', model]) + if modeldir is not None: + cmd.extend(['-modeldir', modeldir]) + if close: + cmd.append('-close') + if nocrop: + cmd.append('-nocrop') + if noautothreshold: + cmd.append('-noautothreshold') + cmd.append('-nothreshold') + minc.command(cmd, inputs=[source, target], outputs=[output_xfm]) + + + +def non_linear_register_full( + source, target, output_xfm, + source_mask=None, + target_mask=None, + init_xfm= None, + level=4, + start=32, + parameters=None, + work_dir=None, + downsample=None + ): + """perform non-linear registration, multiple levels + Args: + source - name of source minc file + target - name of target minc file + output_xfm - name of output transformation file + source_mask - name of source mask file (optional) + target_mask - name of target mask file (optional) + init_xfm - name of initial transformation file (optional) + parameters - configuration for iterative algorithm dict (optional) + work_dir - working directory (optional) , default create one in temp + start - initial step size, default 32mm + level - final step size, default 4mm + downsample - downsample initial files to this step size, default None + + Returns: + resulting XFM file + + Raises: + mincError when tool fails + """ + with minc_tools.mincTools() as minc: + + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm]): + return + + if parameters is None: + #print("Using default parameters") + parameters = { + 'cost': 'corrcoeff', + 'weight': 1, + 'stiffness': 1, + 'similarity': 0.3, + 'sub_lattice': 6, + + 'conf': [ + {'step' : 32.0, + 'blur_fwhm' : 16.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 16.0, + 'blur_fwhm' : 8.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 12.0, + 'blur_fwhm' : 6.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 8.0, + 'blur_fwhm' : 4.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 6.0, + 'blur_fwhm' : 3.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 4.0, + 'blur_fwhm' : 2.0, + 'iterations' : 10, + 'blur' : 'blur', + }, + {'step' : 2.0, + 'blur_fwhm' : 1.0, + 'iterations' : 10, + 'blur' : 'blur', + }, + {'step' : 1.0, + 'blur_fwhm' : 1.0, + 'iterations' : 10, + 'blur' : 'blur', + }, + {'step' : 1.0, + 'blur_fwhm' : 0.5, + 'iterations' : 10, + 'blur' : 'blur', + }, + {'step' : 0.5, + 'blur_fwhm' : 0.25, + 'iterations' : 10, + 'blur' : 'blur', + }, + ] + } + + prev_xfm = None + prev_grid = None + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + source_mask_lr=source_mask + target_mask_lr=target_mask + + # figure out what to do here: + with minc_tools.cache_files(work_dir=work_dir,context='reg') as tmp: + # a fitting we shall go... + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if source_mask is not None: + source_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(source_mask,source_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + for (i,c) in enumerate(parameters['conf']): + + if c['step']>start: + continue + elif c['step']0: + tmp_source = tmp.cache(s_base+'_'+c['blur']+'_'+str(c['blur_fwhm'])+'.mnc') + + if not os.path.exists(tmp_source): + minc.blur(source_lr,tmp_source,gmag=(c['blur']=='dxyz'),fwhm=c['blur_fwhm']) + tmp.unlock(tmp_source) + + tmp_target = tmp.cache(t_base+'_'+c['blur']+'_'+str(c['blur_fwhm'])+'.mnc') + if not os.path.exists(tmp_target): + minc.blur(target_lr,tmp_target,gmag=(c['blur']=='dxyz'),fwhm=c['blur_fwhm']) + tmp.unlock(tmp_target) + + # set up registration + args =['minctracc', tmp_source,tmp_target,'-clobber', + '-nonlinear', parameters['cost'], + '-weight', parameters['weight'], + '-stiffness', parameters['stiffness'], + '-similarity', parameters['similarity'], + '-sub_lattice',parameters['sub_lattice'], + ] + + args.extend(['-iterations', c['iterations'] ] ) + args.extend(['-lattice_diam', c['step']*3.0, c['step']*3.0, c['step']*3.0 ] ) + args.extend(['-step', c['step'], c['step'], c['step'] ] ) + + if c['step']<4: + args.append('-no_super') + + # Current transformation at this step + if prev_xfm is not None: + args.extend(['-transformation', prev_xfm]) + elif init_xfm is not None: + args.extend(['-transformation', init_xfm]) + else: + args.append('-identity') + + # masks (even if the blurred image is masked, it's still preferable + # to use the mask in minctracc) + if source_mask is not None: + args.extend(['-source_mask',source_mask_lr]) + if target_mask is not None: + args.extend(['-model_mask',target_mask_lr]) + + # add files and run registration + args.append(tmp_xfm) + + minc.command([str(ii) for ii in args], + inputs=[tmp_source,tmp_target], + outputs=[tmp_xfm] ) + + prev_xfm = tmp_xfm + prev_grid = tmp_grid + + # done + if prev_xfm is None: + raise minc_tools.mincError("No iterations were performed!") + + # STOP-gap measure to save space for now + # TODO: fix minctracc? + # TODO: fix mincreshape too! + minc.calc([prev_grid],'A[0]',tmp.tmp('final_grid_0.mnc'),datatype='-float') + shutil.move(tmp.tmp('final_grid_0.mnc'),prev_grid) + + minc.param2xfm(tmp.tmp('identity.xfm')) + minc.xfmconcat([tmp.tmp('identity.xfm'),prev_xfm],output_xfm) + return output_xfm + +def non_linear_register_increment( + source, + target, + output_xfm, + source_mask=None, + target_mask=None, + init_xfm=None, + level=4, + parameters=None, + work_dir=None, + downsample=None + ): + """perform non-linear registration, increment right now there are no + difference with non_linear_register_full , + with start and level set to same value + Args: + source - name of source minc file + target - name of target minc file + output_xfm - name of output transformation file + source_mask - name of source mask file (optional) + target_mask - name of target mask file (optional) + init_xfm - name of initial transformation file (optional) + parameters - configuration for iterative algorithm dict (optional) + work_dir - working directory (optional) , default create one in temp + level - final step size, default 4mm + downsample - downsample initial files to this step size, default None + + Returns: + resulting XFM file + + Raises: + mincError when tool fails + """ + + return non_linear_register_full(source,target,output_xfm, + source_mask=source_mask, + target_mask=target_mask, + init_xfm=init_xfm, + level=level, + start=level, + parameters=parameters, + work_dir=work_dir, + downsample=downsample) + + + +def parse_options(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Run minctracc-based registration") + + parser.add_argument("--verbose", + action="store_true", + default=False, + help="Be verbose", + dest="verbose") + + parser.add_argument("source", + help="Source file") + + parser.add_argument("target", + help="Target file") + + parser.add_argument("output_xfm", + help="Output transformation file, xfm format") + + parser.add_argument("--source_mask", + default= None, + help="Source mask") + + parser.add_argument("--target_mask", + default= None, + help="Target mask") + + parser.add_argument("--init_xfm", + default = None, + help="Initial transformation, minc format") + + parser.add_argument("--work_dir", + default = None, + help="Work directory") + + parser.add_argument("--downsample", + default = None, + help="Downsample to given voxel size ", + type=float) + + parser.add_argument("--start", + default = None, + help="Start level of registration 32 for nonlinear, 16 for linear", + type=float) + + parser.add_argument("--level", + default = 4.0, + help="Final level of registration (nl)", + type=float) + + parser.add_argument("--nl", + action="store_true", + dest='nl', + help="Use nonlinear mode", + default=False) + + parser.add_argument("--lin", + help="Linear mode, default lsq6", + default='lsq6') + + parser.add_argument("--objective", + default="xcorr", + help="Registration objective function (linear)") + + parser.add_argument("--conf", + default="bestlinreg_s2", + help="Linear registrtion configuration") + + + options = parser.parse_args() + return options + + +if __name__ == "__main__": + options = parse_options() + + if options.source is None or options.target is None: + print("Error in arguments, run with --help") + print(repr(options)) + else: + + if options.nl : + if options.start is None: + options.start=32.0 + + non_linear_register_full( + options.source, options.target, options.output_xfm, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init_xfm, + start = options.start, + level = options.level, + work_dir = options.work_dir, + downsample = options.downsample) + else: + if options.start is None: + options.start=16.0 + _verbose=0 + if options.verbose: _verbose=2 + + linear_register( + options.source, options.target, options.output_xfm, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init_xfm, + #start = options.start, + work_dir = options.work_dir, + downsample = options.downsample, + objective = '-'+options.objective, + conf = options.conf, + parameters = '-'+options.lin, + verbose = _verbose + ) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/segment/__init__.py b/ipl/segment/__init__.py new file mode 100644 index 0000000..00817a2 --- /dev/null +++ b/ipl/segment/__init__.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# +# image segmentation functions + +# internal funcions +from .error_correction import errorCorrectionTrain +from .cross_validation import errorCorrectionApply +from .structures import MriDataset +from .structures import MriTransform +from .labels import split_labels_seg +from .labels import merge_labels_seg +from .resample import resample_file +from .resample import resample_split_segmentations +from .resample import warp_rename_seg +from .resample import warp_sample +from .resample import concat_resample +from .registration import linear_registration +from .registration import non_linear_registration +from .model import create_local_model +from .model import create_local_model_flip +from .filter import apply_filter +from .filter import make_border_mask +from .filter import generate_flip_sample +from .library import save_library_info +from .library import load_library_info +from .train import generate_library +from .fuse import fusion_segment +from .train_ec import train_ec_loo +from .cross_validation import loo_cv_fusion_segment +from .cross_validation import full_cv_fusion_segment +from .cross_validation import cv_fusion_segment +from .cross_validation import run_segmentation_experiment +from .analysis import calc_similarity_stats + +__all__= ['generate_library', + 'load_library_info', + 'cv_fusion_segment', + 'fusion_segment', + 'train_ec_loo' ] + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/analysis.py b/ipl/segment/analysis.py new file mode 100644 index 0000000..478ce55 --- /dev/null +++ b/ipl/segment/analysis.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def calc_similarity_stats( input_ground_truth, + input_segmentation, + output_stats=None, + relabel=None, + use_labels=None): + ''' + Calculate similarity stats + ''' + stats={} + + stats[ 'sample' ] = input_segmentation + stats[ 'ground_truth' ] = input_ground_truth + + cmd=['volume_gtc_similarity', input_ground_truth, input_segmentation,'--csv'] + + if use_labels: + cmd.extend(['--include', ','.join([str(i) for i in use_labels])]) + + with mincTools() as m: + sim = m.execute_w_output( cmd ).rstrip("\n").split(',') + + stats['gkappa'] = float(sim[0]) + stats['gtc'] = float(sim[1]) + stats['akappa'] = float(sim[2]) + + + sim = m.execute_w_output( + [ 'volume_similarity', input_ground_truth, input_segmentation,'--csv'] + ).split("\n") + + ka={} + se={} + sp={} + js={} + + for i in sim: + q=i.split(',') + if len(q)==5: + l=int(q[0]) + + if relabel is not None: + l=relabel[str(l)] + + ka[l] = float( q[1] ) + se[l] = float( q[2] ) + sp[l] = float( q[3] ) + js[l] = float( q[4] ) + + stats['ka']=ka + stats['se']=se + stats['sp']=sp + stats['js']=js + + if output_stats is not None: + with open(output_stats,'w') as f: + f.write("{},{},{},{}\n".format(stats['sample'],stats['gkappa'],stats['gtc'],stats['akappa'])) + + return stats + +def create_error_map(input_ground_truth, + input_segmentation, + output_maps, + lin_xfm=None, + nl_xfm=None, + template=None, + label_list=[] ): + try: + with mincTools( verbose=2 ) as m: + # go over labels and calculate errors per label + # + for (i,l) in enumerate(label_list): + # extract label error + out=m.tmp(str(l)+'.mnc') + xfm=None + + m.calc([input_segmentation, input_ground_truth], + "abs(A[0]-{})<0.5&&abs(A[1]-{})>0.5 || abs(A[0]-{})>0.5&&abs(A[1]-{})<0.5 ? 1:0".format(l,l,l,l), + out, datatype='-byte') + + if lin_xfm is not None and nl_xfm is not None: + xfm=m.tmp(str(l)+'.xfm') + m.xfmconcat([lin_xfm,nl_xfm],xfm) + elif lin_xfm is not None: + xfm=lin_xfm + else: + xfm=nl_xfm + + m.resample_smooth(out,output_maps[i], + transform=xfm, + like=template, + order=1, + datatype='byte') + + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def average_error_maps(maps, out_avg): + try: + with mincTools( verbose=2 ) as m: + print("average_error_maps {} {}".format(repr(maps),repr(out_avg))) + m.average(maps, out_avg, datatype='-short') + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def max_error_maps(maps, out_max): + try: + with mincTools( verbose=2 ) as m: + print("average_error_maps {} {}".format(repr(maps),repr(out_max))) + m.math(maps, 'max', out_max, datatype='-short') + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/cerebellum_qc_v5.lut b/ipl/segment/cerebellum_qc_v5.lut new file mode 100644 index 0000000..4d946f6 --- /dev/null +++ b/ipl/segment/cerebellum_qc_v5.lut @@ -0,0 +1,31 @@ +0 0.0 0.0 0.0 +1 1.0 0.678431372549 0.243137254902 +2 0.0 0.0 0.0 +3 0.847058823529 0.749019607843 0.847058823529 +4 0.858823529412 0.439215686275 0.576470588235 +5 1.0 0.0 1.0 +6 0.0 0.172549019608 0.592156862745 +7 0.0 0.980392156863 0.603921568627 +8 0.878431372549 0.752941176471 0.0 +9 0.803921568627 0.360784313725 0.360784313725 +10 0.372549019608 0.619607843137 0.627450980392 +11 1.0 0.270588235294 0.0 +12 0.690196078431 0.76862745098 0.870588235294 +13 1.0 0.0 0.0 +14 0.0 0.0 1.0 +15 0.0 1.0 1.0 +16 1.0 0.937254901961 0.835294117647 +17 0.803921568627 0.521568627451 0.247058823529 +18 0.0 0.545098039216 0.545098039216 +19 1.0 0.894117647059 0.882352941176 +20 0.866666666667 0.627450980392 0.866666666667 +21 1.0 0.250980392157 0.250980392157 +22 0.294117647059 0.0 0.509803921569 +23 0.235294117647 0.701960784314 0.443137254902 +24 0.933333333333 0.909803921569 0.666666666667 +25 0.898039215686 0.992156862745 0.0 +26 0.172549019608 1.0 0.211764705882 +27 1.0 0.545098039216 1.0 +28 1.0 0.0 0.427450980392 +29 0.823529411765 0.807843137255 0.0 +30 0.988235294118 0.976470588235 0.96862745098 diff --git a/ipl/segment/cross_validation.py b/ipl/segment/cross_validation.py new file mode 100644 index 0000000..d63e993 --- /dev/null +++ b/ipl/segment/cross_validation.py @@ -0,0 +1,504 @@ +import shutil +import os +import sys +import csv +import copy +import json +import random + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .fuse import * +from .structures import * +from .resample import * +from .train_ec import * +from .filter import * +from .analysis import * + +def run_segmentation_experiment( input_scan, + input_seg, + segmentation_library, + output_experiment, + segmentation_parameters={}, + debug=False, + mask=None, + work_dir=None, + ec_parameters=None, + ec_variant='ec', + fuse_variant='fuse', + regularize_variant='gc', + add=[], + cleanup=False, + presegment=None, + train_list=None): + """run a segmentation experiment: perform segmentation and compare with ground truth + + Arguments: + input_scan -- input scan object MriDataset + input_seg -- input segmentation file name (ground truth) + segmentation_library -- segmntation library object + output_experiment -- prefix for output + + Keyword arguments: + segmentation_parameters -- paramteres for segmentation algorithm, + debug -- debug flag, (default False) + mask -- mask file name to restrict segmentation , (default None) + work_dir -- work directory, (default None - use output_experiment) + ec_parameters -- error correction paramters, (default None) + ec_variant -- name of error correction parameters setting , (default 'ec') + fuse_variant -- name of fusion parameters, (default 'fuse' ) + regularize_variant -- name of regularization parameters, (default 'gc') + add -- additional modalities [T2w,PDw etc] + cleanup -- flag to clean most of the temporary files + presegment -- use pre-segmented result (when comparing with external tool) + """ + try: + relabel=segmentation_library.get("label_map",None) + + if relabel is not None and isinstance(relabel, list) : + _r={i[0]:i[1] for i in relabel} + relabel=_r + + if ec_parameters is not None: + _ec_parameters=copy.deepcopy(ec_parameters) + # let's train error correction! + + if work_dir is not None: + fuse_output=work_dir+os.sep+fuse_variant+'_'+regularize_variant + else: + fuse_output=output_experiment+os.sep+fuse_variant+'_'+regularize_variant + + _ec_parameters['work_dir']=fuse_output + _ec_parameters['output']=ec_output=fuse_output+os.sep+ec_variant+'.pickle' + _ec_parameters['variant']=ec_variant + + train_ec_loo( segmentation_library, + segmentation_parameters=copy.deepcopy(segmentation_parameters), + ec_parameters=_ec_parameters, + debug=debug, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + cleanup=cleanup, + ext=(presegment is not None), + train_list=train_list ) + + segmentation_parameters['ec_options']=copy.deepcopy(ec_parameters) + segmentation_parameters['ec_options']['training']=ec_output + + if debug: + if not os.path.exists(os.path.dirname(output_experiment)): + os.makedirs(os.path.dirname(output_experiment)) + with open(output_experiment+'_par.json','w') as f: + json.dump(segmentation_parameters,f,indent=1) + + (output_file, output_info) = fusion_segment( + input_scan, + segmentation_library, + output_experiment, + input_mask=mask, + parameters=segmentation_parameters, + debug=debug, + work_dir=work_dir, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=add, + cleanup=cleanup, + presegment=presegment) + + stats = calc_similarity_stats( input_seg, output_file, + output_stats = output_experiment+'_stats.csv', + use_labels = output_info['used_labels'], + relabel = relabel ) + + remap = segmentation_library.get('map',{}) + labels_used=[] + error_maps=[] + + if any(remap): + for (i,j) in remap.items(): + labels_used.append( int(j) ) + else: + # assume binary mode + labels_used=[1] + + for i in labels_used: + error_maps.append( work_dir+os.sep+fuse_variant+'_'+regularize_variant+'_error_{:03d}.mnc'.format(i) ) + + lin_xfm=None + nl_xfm=None + if output_info['bbox_initial_xfm'] is not None: + lin_xfm=output_info['bbox_initial_xfm'].xfm + + if output_info['nonlinear_xfm'] is not None: + nl_xfm=output_info['nonlinear_xfm'].xfm + + create_error_map( input_seg, output_file, error_maps, + lin_xfm=lin_xfm, + nl_xfm=nl_xfm, + template=segmentation_library.get('local_model',None), + label_list=labels_used ) + + output_info['stats'] = stats + output_info['output'] = output_file + output_info['ground_truth']= input_seg + output_info['error_maps'] = error_maps + + if presegment is not None: + output_info['presegment']=presegment + + with open(output_experiment+'_out.json','w') as f: + json.dump(output_info,f,indent=1, cls=MRIEncoder) + + with open(output_experiment+'_stats.json','w') as f: + json.dump(stats,f,indent=1, cls=MRIEncoder) + + return (stats, output_info) + + except mincError as e: + print("Exception in run_segmentation_experiment:{}".format( str(e)) ) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in run_segmentation_experiment:{}".format( sys.exc_info()[0]) ) + traceback.print_exc( file=sys.stdout ) + raise + + +def loo_cv_fusion_segment(validation_library, + segmentation_library, + output, + segmentation_parameters, + ec_parameters=None, + debug=False, + ec_variant='ec', + fuse_variant='fuse', + cv_variant='cv', + regularize_variant='gc', + cleanup=False, + ext=False, + cv_iter=None): + '''Run leave-one-out cross-validation experiment''' + # for each N subjects run segmentation and compare + # Right now run LOOCV + if not os.path.exists(output): + try: + os.makedirs(output) + except: + pass # assume directory was created by competing process + + results=[] + results_json=[] + + modalities=segmentation_library.get('modalities',1)-1 + print("cv_iter={}".format(repr(cv_iter))) + + for (i,j) in enumerate(validation_library): + + n = os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + output_experiment = output+os.sep+n+'_'+cv_variant + + validation_sample = j[0] + validation_segment = j[1] + + presegment=None + add=[] + + if ext: + presegment=j[2] + add=j[3:3+modalities] + else: + add=j[2:2+modalities] + + # remove training sample (?) + _validation_library=validation_library[0:i] + _validation_library.extend(validation_library[i+1:len(validation_library)]) + + + experiment_segmentation_library=copy.deepcopy(segmentation_library) + + # remove sample + experiment_segmentation_library['library']=[ _i for _i in segmentation_library['library'] if _i[0].find(n)<0 ] + + if (cv_iter is None) or (i == cv_iter): + results.append( futures.submit( + run_segmentation_experiment, + validation_sample, validation_segment, + experiment_segmentation_library, + output_experiment, + segmentation_parameters=segmentation_parameters, + debug=debug, + work_dir=output+os.sep+'work_'+n+'_'+fuse_variant, + ec_parameters=ec_parameters, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=add, + cleanup=cleanup, + presegment=presegment, + train_list=_validation_library + )) + else: + results_json.append( (output_experiment+'_stats.json', + output_experiment+'_out.json') ) + + print("Waiting for {} jobs".format(len(results))) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + stat_results=[] + output_results=[] + + if cv_iter is None: + stat_results = [ _i.result()[0] for _i in results ] + output_results= [ _i.result()[1] for _i in results ] + elif cv_iter==-1: + # TODO: load from json files + for _i in results_json: + if os.path.exists(_i[0]) and os.path.exists(_i[1]):# VF: a hack + with open(_i[0],'r') as _f: + stat_results.append(json.load(_f)) + with open(_i[1],'r') as _f: + output_results.append(json.load(_f)) + else: + if not os.path.exists(_i[0]): + print("Warning: missing file:{}".format(_i[0])) + if not os.path.exists(_i[1]): + print("Warning: missing file:{}".format(_i[1])) + + return (stat_results, output_results) + + +def full_cv_fusion_segment(validation_library, + segmentation_library, + output, + segmentation_parameters, + cv_iterations, + cv_exclude, + ec_parameters=None, + debug=False, + ec_variant='ec', + fuse_variant='fuse', + cv_variant='cv', + regularize_variant='gc', + cleanup=False, + ext=False, + cv_iter=None): + if cv_iter is not None: + raise "Not Implemented!" + + validation_library_idx=range(len(validation_library)) + # randomly exlcude samples, repeat + results=[] + if not os.path.exists(output): + try: + os.makedirs(output) + except: + pass # assume directory was created by competing process + + modalities=segmentation_library.get('modalities',1)-1 + + for i in range( cv_iterations ): + #TODO: save this list in a file + rem_list=[] + ran_file=output+os.sep+ ('random_{}_{}.json'.format(cv_variant,i)) + + if not os.path.exists( ran_file ): + rem_list=random.sample( validation_library_idx, cv_exclude ) + + with open( ran_file , 'w') as f: + json.dump(rem_list,f) + else: + with open( ran_file ,'r') as f: + rem_list=json.load(f) + + # list of subjects + rem_items=[ validation_library[j] for j in rem_list ] + + rem_n=[os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] for j in rem_items] + rem_lib=[] + val_lib=[] + + for j in rem_n: + rem_lib.extend( [ k for (k,t) in enumerate( segmentation_library['library'] ) if t[0].find(j)>=0 ] ) + val_lib.extend( [ k for (k,t) in enumerate( validation_library ) if t[0].find(j)>=0 ] ) + + + if debug: print(repr(rem_lib)) + rem_lib=set(rem_lib) + val_lib=set(val_lib) + + #prepare exclusion list + experiment_segmentation_library=copy.deepcopy(segmentation_library) + + experiment_segmentation_library['library']=\ + [ k for j,k in enumerate( segmentation_library['library'] ) if j not in rem_lib ] + + _validation_library=\ + [ k for j,k in enumerate( validation_library ) if j not in val_lib ] + + for j,k in enumerate(rem_items): + + output_experiment=output+os.sep+('{}_{}_{}'.format(i,rem_n[j],cv_variant)) + work_dir=output+os.sep+('work_{}_{}_{}'.format(i,rem_n[j],fuse_variant)) + + validation_sample=k[0] + validation_segment=k[1] + + presegment=None + shift=2 + + if ext: + presegment=k[2] + shift=3 + + results.append( futures.submit( + run_segmentation_experiment, validation_sample, validation_segment, + experiment_segmentation_library, + output_experiment, + segmentation_parameters=segmentation_parameters, + debug=debug, + work_dir=work_dir, + ec_parameters=ec_parameters, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=k[shift:shift+modalities], + cleanup=cleanup, + presegment=presegment, + train_list=_validation_library + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + stat_results = [ i.result()[0] for i in results ] + output_results = [ i.result()[1] for i in results ] + + return ( stat_results, output_results ) + + +def cv_fusion_segment( cv_parameters, + segmentation_library, + output, + segmentation_parameters, + ec_parameters=None, + debug=False, + cleanup=False, + ext=False, + extlib=None, + cv_iter=None ): + '''Run cross-validation experiment + for each N subjects run segmentation and compare + Right now run LOOCV or random CV + ''' + + # TODO: implement more realistic, random schemes + validation_library=cv_parameters['validation_library'] + + # maximum number of iterations + cv_iterations=cv_parameters.get('iterations',-1) + + # number of samples to exclude + cv_exclude=cv_parameters.get('cv',1) + + # use to distinguish different versions of error correction + ec_variant=cv_parameters.get('ec_variant','ec') + + # use to distinguish different versions of label fusion + fuse_variant=cv_parameters.get('fuse_variant','fuse') + + # use to distinguish different versions of cross-validation + cv_variant=cv_parameters.get('cv_variant','cv') + + # different version of label regularization + regularize_variant=cv_parameters.get('regularize_variant','gc') + + cv_output=output+os.sep+cv_variant+'_stats.json' + res_output=output+os.sep+cv_variant+'_res.json' + + if extlib is not None: + validation_library=extlib + + if validation_library is not list: + with open(validation_library,'r') as f: + validation_library=list(csv.reader(f)) + + if cv_iter is not None: + cv_iter=int(cv_iter) + + stat_results=None + output_results=None + + + if ext: + # TODO: move pre-rpcessing here? + # pre-process presegmented scans here! + # we only neeed to re-create left-right flipped segmentation + pass + + + if cv_iterations==-1 and cv_exclude==1: # simle LOO cross-validation + (stat_results, output_results) = loo_cv_fusion_segment(validation_library, + segmentation_library, + output, segmentation_parameters, + ec_parameters=ec_parameters, + debug=debug, + cleanup=cleanup, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + cv_variant=cv_variant, + regularize_variant=regularize_variant, + ext=ext, + cv_iter=cv_iter) + else: # arbitrary number of iterations + (stat_results, output_results) = full_cv_fusion_segment(validation_library, + segmentation_library, + output, segmentation_parameters, + cv_iterations, cv_exclude, + ec_parameters=ec_parameters, + debug=debug, + cleanup=cleanup, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + cv_variant=cv_variant, + regularize_variant=regularize_variant, + ext=ext, + cv_iter=cv_iter) + + # average error maps + + if cv_iter is None or cv_iter==-1: + results=[] + output_results_all={'results':output_results} + output_results_all['cv_stats']=cv_output + output_results_all['error_maps']={} + all_error_maps=[] + + for (i,j) in enumerate(output_results[0]['error_maps']): + out_avg=output+os.sep+cv_variant+'_error_{:03d}.mnc'.format(i) + output_results_all['error_maps'][i]=out_avg + all_error_maps.append(out_avg) + maps=[ k['error_maps'][i] for k in output_results ] + results.append(futures.submit( + average_error_maps,maps,out_avg)) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + output_results_all['max_error']=output+os.sep+cv_variant+'_max_error.mnc'.format(i) + max_error_maps(all_error_maps,output_results_all['max_error']) + + with open(cv_output,'w') as f: + json.dump(stat_results, f, indent=1 ) + + with open(res_output,'w') as f: + json.dump(output_results_all, f, indent=1, cls=MRIEncoder) + + return stat_results + else: + # we assume that results will be available later + return None + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/error_correction.py b/ipl/segment/error_correction.py new file mode 100755 index 0000000..6b87436 --- /dev/null +++ b/ipl/segment/error_correction.py @@ -0,0 +1,793 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +# standard library +import string +import os +import argparse +import pickle + +try: + import cPickle +except ImportError: + pass + +import sys +import json +import csv +# minc +import minc + +# numpy +import numpy as np + +# scikit-learn +from sklearn import svm +from sklearn import neighbors +from sklearn import ensemble +from sklearn import tree +#from sklearn import cross_validation +from sklearn import preprocessing +from sklearn import dummy + +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import Normalizer + +# XGB package +try: + import xgboost as xgb +except ImportError: + pass + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import traceback + + +def prepare_features(images, coords, mask=None, use_coord=True, use_joint=True, patch_size=1, primary_features=1 ): + features=[] + + # add features dependant on coordinates + + image_no=len(images) + if primary_features > image_no or primary_features<0 : + primary_features=image_no + # use with center at 0 and 1.0 at the edge, could have used preprocessing + image_idx=0 + if use_coord: + image_idx=3 + if coords is None: + c=np.mgrid[ 0:images[0].shape[0] , + 0:images[0].shape[1] , + 0:images[0].shape[2] ] + + features.append( ( c[2]-images[0].shape[0]/2.0)/ (images[0].shape[0]/2.0) ) + features.append( ( c[1]-images[0].shape[1]/2.0)/ (images[0].shape[1]/2.0) ) + features.append( ( c[0]-images[0].shape[2]/2.0)/ (images[0].shape[2]/2.0) ) + + else: # assume we have three sets of coords + features.append( coords[0] ) + features.append( coords[1] ) + features.append( coords[2] ) + + + # add apparance and context images (patch around each voxel) + if patch_size>0: + for i in range(primary_features) : + for x in range(-patch_size,patch_size+1) : + for y in range (-patch_size,patch_size+1) : + for z in range(-patch_size,patch_size+1) : + features.append( np.roll( np.roll( np.roll( images[i], shift=x, axis=0 ), shift=y, axis=1), shift=z, axis=2 ) ) + + features.extend(images[primary_features:-1]) # add the rest + app_features=primary_features*(patch_size*2+1)*(patch_size*2+1)*(patch_size*2+1)+(image_no-primary_features) + primary_features=primary_features*(patch_size*2+1)*(patch_size*2+1)*(patch_size*2+1) + else: + features.extend(images) + app_features=image_no + + # add joint features + if use_joint and use_coord: + for i in range(primary_features): + # multiply apparance features by coordinate features + for j in range(3): + # multiply apparance features by coordinate features + features.append( features[i+image_idx] * features[j] ) + + # extract only what's needed + if mask is not None: + return [ i[ mask>0 ] for i in features ] + else: + return [ i for i in features ] + + +def convert_image_list(images): + ''' + convert array of images into a single matrix + ''' + s=[] + for (i,k) in enumerate(images): + s.append(np.column_stack( tuple( np.ravel( j ) for j in k ) ) ) + print(s[-1].shape) + + return np.vstack( tuple( i for i in s ) ) + + +def extract_part(img, partition, part, border): + ''' + extract slice of the image for parallelized execution + ''' + if partition is None or part is None : + return img + else: + strip=img.shape[2]//partition + beg=strip*part + end=strip*(part+1) + + if part>0: + beg-=border + if part<(partition-1): + end+=border + else : + end=img.shape[2] + return img[:,:,beg:end] + + +def pad_data(img, shape, partition, part, border): + if partition is None or part is None : + return img + else: + out=np.zeros(shape,dtype=img.dtype) + strip=shape[2]//partition + + beg=strip*part + end=strip*(part+1) + + _beg=0 + _end=img.shape[2] + + if part>0: + beg-=border + + if part<(partition-1): + end+=border + else : + end=shape[2] + + out[:,:,beg:end]=img[:,:,_beg:_end] + return out + + +def merge_segmentations(inputs, output, partition, parameters): + patch_size=parameters.get('patch_size',1) + border=patch_size*2 + out=None + strip=None + for i in range(len(inputs)): + d=minc.Label( inputs[i] ).data + + if out is None: + out=np.zeros(d.shape,dtype=np.int32) + strip=d.shape[2]/partition + + beg=strip*i + end=strip*(i+1) + + if i==(partition-1): + end=d.shape[2] + + out[:,:,beg:end]=d[:,:,beg:end] + + out_i=minc.Label( data=out ) + out_i.save( name=output, imitate=inputs[0]) + + +def errorCorrectionTrain(input_images, + output, + parameters=None, + debug=False, + partition=None, + part=None, + multilabel=1): + try: + use_coord = parameters.get('use_coord',True) + use_joint = parameters.get('use_joint',True) + patch_size = parameters.get('patch_size',1) + + border=patch_size*2 + + if patch_size==0: + border=2 + + normalize_input=parameters.get('normalize_input',True) + + method = parameters.get('method','lSVC') + method2 = parameters.get('method2',method) + method_n = parameters.get('method_n',15) + method2_n = parameters.get('method2_n',method_n) + method_random = parameters.get('method_random',None) + method_max_features=parameters.get('method_max_features','auto') + method_n_jobs=parameters.get('method_n_jobs',1) + primary_features=parameters.get('primary_features',1) + + training_images = [] + training_diff = [] + training_images_direct = [] + training_direct = [] + + if debug: + print("errorCorrectionTrain use_coord={} use_joint={} patch_size={} normalize_input={} method={} output={} partition={} part={}".\ + format(repr(use_coord),repr(use_joint),repr(patch_size),repr(normalize_input),method,output,partition,part)) + + coords=None + total_mask_size=0 + total_diff_mask_size=0 + + for (i,inp) in enumerate(input_images): + mask=None + diff=None + mask_diff=None + + if inp[-2] is not None: + mask=extract_part(minc.Label( inp[-2] ).data, partition, part, border) + + ground_data = minc.Label( inp[-1] ).data + auto_data = minc.Label( inp[-3] ).data + + ground_shape = ground_data.shape + ground = extract_part(ground_data, partition, part, border) + auto = extract_part(auto_data, partition, part, border) + + shape = ground_shape + if coords is None and use_coord: + c = np.mgrid[ 0:shape[0], 0:shape[1], 0: shape[2] ] + coords = [ extract_part( (c[j]-shape[j]/2.0)/(shape[j]/2.0), partition, part, border ) for j in range(3) ] + + features = [ extract_part( minc.Image(k, dtype=np.float32).data, partition, part, border ) for k in inp[0:-3] ] + + mask_size = shape[0] * shape[1] * shape[2] + + if debug: + print("Training data size:{}".format(len(features))) + if mask is not None: + mask_size = np.sum(mask) + print("Mask size:{}".format(mask_size)) + else: + print("Mask absent") + total_mask_size += mask_size + + if multilabel>1: + diff = (ground != auto) + total_diff_mask_size += np.sum(mask) + + if mask is not None: + mask_diff = diff & ( mask > 0 ) + print("Sample {} mask_diff={} diff={}".format(i,np.sum(mask_diff),np.sum(diff))) + #print(mask_diff) + training_diff.append( diff [ mask>0 ] ) + training_direct.append( ground[ mask_diff ] ) + else: + mask_diff = diff + training_diff.append( diff ) + training_direct.append( ground[ diff ] ) + + training_images.append( prepare_features( + features, + coords, + mask=mask, + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) ) + + training_images_direct.append( prepare_features( + features, + coords, + mask=mask_diff, + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) ) + + else: + mask_diff=mask + if mask is not None: + training_diff.append( ground[ mask>0 ] ) + else: + training_diff.append( ground ) + + + + training_images.append( prepare_features( + features, + coords, + mask=mask, + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) ) + + if debug: + print("feature size:{}".format(len(training_images[-1]))) + + if i == 0 and parameters.get('dump',False): + print("Dumping feature images...") + for (j,k) in enumerate( training_images[-1] ): + test=np.zeros_like( images[0] ) + test[ mask>0 ]=k + out=minc.Image( data=test ) + out.save( name="dump_{}.mnc".format(j), imitate=inp[0] ) + + # calculate normalization coeffecients + + if debug: print("Done") + + clf=None + clf2=None + + if total_mask_size>0: + training_X = convert_image_list( training_images ) + training_Y = np.ravel( np.concatenate( tuple(j for j in training_diff ) ) ) + + if debug: print("Fitting 1st...") + + if method == "xgb": + clf = None + elif method == "SVM": + clf = svm.SVC() + elif method == "nuSVM": + clf = svm.NuSVC() + elif method == 'NC': + clf = neighbors.NearestCentroid() + elif method == 'NN': + clf = neighbors.KNeighborsClassifier(method_n) + elif method == 'RanForest': + clf = ensemble.RandomForestClassifier(n_estimators=method_n, + n_jobs=method_n_jobs, + max_features=method_max_features, + random_state=method_random) + elif method == 'AdaBoost': + clf = ensemble.AdaBoostClassifier(n_estimators=method_n,random_state=method_random) + elif method == 'AdaBoostPP': + clf = Pipeline(steps=[('normalizer', Normalizer()), + ('AdaBoost', ensemble.AdaBoostClassifier(n_estimators=method_n,random_state=method_random)) + ]) + elif method == 'tree': + clf = tree.DecisionTreeClassifier(random_state=method_random) + elif method == 'ExtraTrees': + clf = ensemble.ExtraTreesClassifier(n_estimators=method_n, + max_features=method_max_features, + n_jobs=method_n_jobs, + random_state=method_random) + elif method == 'Bagging': + clf = ensemble.BaggingClassifier(n_estimators=method_n, + max_features=method_max_features, + n_jobs=method_n_jobs, + random_state=method_random) + elif method == 'dumb': + clf = dummy.DummyClassifier(strategy="constant",constant=0) + else: + clf = svm.LinearSVC() + + #scores = cross_validation.cross_val_score(clf, training_X, training_Y) + #print scores + if method == "xgb": + xg_train = xgb.DMatrix( training_X, label=training_Y) + param = {} + num_round = 100 + # use softmax multi-class classification + param['objective'] = 'multi:softmax' + # scale weight of positive examples + param['eta'] = 0.1 + param['max_depth'] = 8 + param['silent'] = 1 + param['nthread'] = 4 + param['num_class'] = 2 + clf = xgb.train(param, xg_train, num_round) + elif method != 'dumb': + clf.fit( training_X, training_Y ) + + if multilabel>1 and method != 'dumb': + if debug: print("Fitting direct...") + + training_X = convert_image_list( training_images_direct ) + training_Y = np.ravel( np.concatenate( tuple(j for j in training_direct ) ) ) + + if method2 == "xgb": + clf2 = None + if method2 == "SVM": + clf2 = svm.SVC() + elif method2 == "nuSVM": + clf2 = svm.NuSVC() + elif method2 == 'NC': + clf2 = neighbors.NearestCentroid() + elif method2 == 'NN': + clf2 = neighbors.KNeighborsClassifier(method_n) + elif method2 == 'RanForest': + clf2 = ensemble.RandomForestClassifier(n_estimators=method_n, + n_jobs=method_n_jobs, + max_features=method_max_features, + random_state=method_random) + elif method2 == 'AdaBoost': + clf2 = ensemble.AdaBoostClassifier(n_estimators=method_n,random_state=method_random) + elif method2 == 'AdaBoostPP': + clf2 = Pipeline(steps=[('normalizer', Normalizer()), + ('AdaBoost', ensemble.AdaBoostClassifier(n_estimators=method_n,random_state=method_random)) + ]) + elif method2 == 'tree': + clf2 = tree.DecisionTreeClassifier(random_state=method_random) + elif method2 == 'ExtraTrees': + clf2 = ensemble.ExtraTreesClassifier(n_estimators=method_n, + max_features=method_max_features, + n_jobs=method_n_jobs, + random_state=method_random) + elif method2 == 'Bagging': + clf2 = ensemble.BaggingClassifier(n_estimators=method_n, + max_features=method_max_features, + n_jobs=method_n_jobs, + random_state=method_random) + elif method2 == 'dumb': + clf2 = dummy.DummyClassifier(strategy="constant",constant=0) + else: + clf2 = svm.LinearSVC() + + if method2 == "xgb" : + xg_train = xgb.DMatrix( training_X, label=training_Y) + + param = {} + num_round = 100 + # use softmax multi-class classification + param['objective'] = 'multi:softmax' + # scale weight of positive examples + param['eta'] = 0.1 + param['max_depth'] = 8 + param['silent'] = 1 + param['nthread'] = 4 + param['num_class'] = multilabel + + clf2 = xgb.train(param, xg_train, num_round) + + elif method != 'dumb': + clf2.fit( training_X, training_Y ) + + #print(clf.score(training_X,training_Y)) + + if debug: + print( clf ) + print( clf2 ) + else: + print("Warning : zero total mask size!, using null classifier") + clf = dummy.DummyClassifier(strategy="constant",constant=0) + + if method == 'xgb' and method2 == 'xgb': + #save + clf.save_model(output) + clf2.save_model(output+'_2') + else: + with open(output,'wb') as f: + cPickle.dump( [clf, clf2] , f, -1) + + except mincError as e: + print("Exception in linear_registration:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in linear_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def errorCorrectionApply(input_images, + output, + input_mask=None, + parameters=None, + debug=False, + history=None, + input_auto=None, + partition=None, + part=None, + multilabel=1, + debug_files=None ): + try: + use_coord=parameters.get('use_coord',True) + use_joint=parameters.get('use_joint',True) + patch_size=parameters.get('patch_size',1) + normalize_input=parameters.get('normalize_input',True) + primary_features=parameters.get('primary_features',1) + + method =parameters.get('method','lSVC') + method2 =parameters.get('method2',method) + + training=parameters['training'] + + clf=None + clf2=None + + border=patch_size*2 + + if patch_size==0: + border=2 + + if debug: print( "Running error-correction, input_image:{} trining:{} partition:{} part:{} output:{} input_auto:{}". + format(repr(input_images), training, partition,part,output,input_auto) ) + + if method == 'xgb' and method2 == 'xgb': + # need to convert from Unicode + _training=str(training) + clf = xgb.Booster(model_file=_training) + if multilabel>1: + clf2 = xgb.Booster(model_file=_training+'_2') + else: + with open(training, 'rb') as f: + c = cPickle.load(f) + clf = c[0] + clf2 = c[1] + + if debug: + print( clf ) + print( clf2 ) + print( "Loading input images..." ) + + input_data=[ minc.Image(k, dtype=np.float32).data for k in input_images ] + shape=input_data[0].shape + + #features = [ extract_part( minc.Image(k, dtype=np.float32).data, partition, part, border) for k in inp[0:-3] ] + #if normalize_input: + #features = [ extract_part( preprocessing.scale( k ), partition, part, border) for k in input_data ] + #else: + features = [ extract_part( k, partition, part, border) for k in input_data ] + + coords=None + + if use_coord: + c=np.mgrid[ 0:shape[0] , 0:shape[1] , 0: shape[2] ] + coords=[ extract_part( (c[j]-shape[j]/2.0)/(shape[j]/2.0), partition, part, border ) for j in range(3) ] + + if debug: + print("Features data size:{}".format(len(features))) + + mask=None + + mask_size=shape[0]*shape[1]*shape[2] + + if input_mask is not None: + mask=extract_part( minc.Label( input_mask ).data, partition, part, border ) + mask_size=np.sum( mask ) + + out_cls = None + out_corr = None + + test_x=convert_image_list ( [ prepare_features( + features, + coords, + mask=mask, + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) + ] ) + + if input_auto is not None: + out_corr = np.copy( extract_part( minc.Label( input_auto ).data, partition, part, border) ) # use input data + out_cls = np.copy( extract_part( minc.Label( input_auto ).data, partition, part, border) ) # use input data + else: + out_corr = np.zeros( shape, dtype=np.int32 ) + out_cls = np.zeros( shape, dtype=np.int32 ) + + if mask_size>0 and not isinstance(clf, dummy.DummyClassifier): + if debug: + print("Running classifier 1 ...") + + if method!='xgb': + pred = np.asarray( clf.predict( test_x ), dtype=np.int32 ) + else: + xg_predict = xgb.DMatrix(test_x) + pred = np.array( clf.predict( xg_predict ), dtype=np.int32 ) + + if debug_files is not None: + out_dbg = np.zeros( shape, dtype=np.int32 ) + if mask is not None: + out_dbg[ mask > 0 ] = pred + else: + out_dbg = pred + + out_dbg=minc.Label( data=pad_data(out_dbg, shape, partition, part, border) ) + out_dbg.save(name=debug_files[0], imitate=input_images[0], history=history) + + + if mask is not None: + out_corr[ mask > 0 ] = pred + else: + out_corr = pred + + if multilabel > 1 and clf2 is not None: + if mask is not None: + mask=np.logical_and(mask>0, out_corr>0) + else: + mask=(out_corr>0) + + if debug: + print("Running classifier 2 ...") + + test_x = convert_image_list ( [ prepare_features( + features, + coords, + mask=mask , + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) + ] ) + if method2!='xgb': + pred = np.asarray( clf2.predict( test_x ), dtype=np.int32 ) + else: + xg_predict = xgb.DMatrix(test_x) + pred = np.array( clf2.predict( xg_predict ), dtype=np.int32 ) + + out_cls[ mask > 0 ] = pred + + if debug_files is not None: + out_dbg = np.zeros( shape, dtype=np.int32 ) + if mask is not None: + out_dbg[ mask > 0 ] = pred + else: + out_dbg = pred + + out_dbg=minc.Label( data=pad_data(out_dbg, shape, partition, part, border) ) + out_dbg.save(name=debug_files[1], imitate=input_images[0], history=history) + + + else: + out_cls=out_corr + + else: + pass # nothing to do! + + if debug: + print("Saving output...") + + out=minc.Label( data=pad_data(out_cls, shape, partition, part, border) ) + + out.save(name=output, imitate=input_images[0], history=history) + except mincError as e: + print("Exception in linear_registration:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in linear_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def parse_options(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Perform error-correction learning and application') + + parser.add_argument('--train', + help="Training library in json format, list of lists: [,img2],,,") + + parser.add_argument('--train_csv', + help="Training library in CSV format, format [,img2],,,") + + parser.add_argument('--input', + help="Automatic seg to be corrected") + + parser.add_argument('--output', + help="Output image, required for application of method") + + parser.add_argument('--param', + help="Load error-correction parameters from file") + + parser.add_argument('--mask', + help="Region for correction, required for application of method" ) + + parser.add_argument('--method', + choices=['SVM','lSVM','nuSVM','NN','RanForest','AdaBoost','tree'], + default='lSVM', + help='Classification algorithm') + + parser.add_argument('-n', + type=int, + help="nearest neighbors", + default=15) + + parser.add_argument('--debug', + action="store_true", + dest="debug", + default=False, + help='Print debugging information' ) + + parser.add_argument('--dump', + action="store_true", + dest="dump", + default=False, + help='Dump first sample features (for debugging)' ) + + parser.add_argument('--coord', + action="store_true", + dest="coord", + default=False, + help='Use image coordinates as additional features' ) + + parser.add_argument('--joint', + action="store_true", + dest="joint", + default=False, + help='Produce joint features between appearance and coordinate' ) + + parser.add_argument('--random', + type=int, + dest="random", + help='Provide random state if needed' ) + + parser.add_argument('--save', + help='Save training results in a file') + + parser.add_argument('--load', + help='Load training results from a file') + + parser.add_argument('image', + help='Input images', nargs='*') + + options = parser.parse_args() + + return options + + +if __name__ == "__main__": + history = minc.format_history(sys.argv) + + options = parse_options() + + parameters={} + if options.param is None: + parameters['method']=options.method + parameters['method_n']=options.n + parameters['method_random']=options.random + + parameters['use_coord']=options.coord + parameters['use_joint']=options.joint + + + # load training images + if ( (options.train is not None or \ + options.train_csv is not None) and \ + options.save is not None) : + + if options.debug: print("Loading training images...") + + train=None + + if options.train is not None: + with open(options.train,'rb') as f: + train=json.load(f) + else: + with open(options.train_csv,'rb') as f: + train=list(csv.reader(f)) + + errorCorrectionTrain(train,options.save, + parameters=parameters, + debug=options.debug) + + + elif options.input is not None and \ + options.image is not None and \ + options.output is not None: + + if options.load is not None: + parameters['training']=options.load + + errorCorrectionApply( + [options.image],options.input, + options.output, + input_mask=options.mask, + debug=options.debug, + history=history) + + else: + print("Error in arguments, run with --help") + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/filter.py b/ipl/segment/filter.py new file mode 100644 index 0000000..6393723 --- /dev/null +++ b/ipl/segment/filter.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + + +def filter_sample(input,output,filters,model=None): + + apply_filter(input.scan, output.scan, filters, + model=model.scan, input_mask=input.mask, + model_mask=model.mask) + # TODO: parallelalize? + for (i,j) in enumerate( input.add ): + apply_filter(input.add[i], output.add[i], filters, + model=model.add[i], input_mask=i.mask, + model_mask=model.mask) + + +def apply_filter(input, output, filters, model=None, input_mask=None, model_mask=None, input_labels=None,model_labels=None): + output_scan=input + try: + if filters is not None : + + with mincTools() as m: + if filters.get('denoise',False): + # TODO: choose between ANLM and NLM here? + m.anlm(output_scan,m.tmp('denoised.mnc'), + beta =filters.get('beta',0.5), + patch =filters.get('patch',1), + search =filters.get('search',1), + regularize=filters.get('regularize',None)) + + output_scan =m.tmp('denoised.mnc') + + if filters.get('normalize',False) and model is not None: + + if filters.get('nuyl',False): + m.nuyl_normalize(output_scan,model,m.tmp('normalized.mnc'), + source_mask=input_mask,target_mask=model_mask) + elif filters.get('nuyl2',False): + hl.nuyl_normalize2(output_scan,model,m.tmp('normalized.mnc'), + #source_mask=input_mask,target_mask=model_mask, + fwhm=filters.get('nuyl2_fwhm',2.0), + iterations=filters.get('nuyl2_iter',4)) + else: + m.volume_pol(output_scan,model, m.tmp('normalized.mnc'), + source_mask=input_mask,target_mask=model_mask) + output_scan = m.tmp('normalized.mnc') + + # TODO: implement more filters + patch_norm = filters.get('patch_norm',None) + + if patch_norm is not None: + print("Running patch normalization") + db = patch_norm.get('db',None) + idx = patch_norm.get('idx',None) + thr = patch_norm.get('threshold',None) + spl = patch_norm.get('spline',None) + med = patch_norm.get('median',None) + it = patch_norm.get('iterations',None) + if db is not None and idx and not None: + # have all the pieces + m.patch_norm(output_scan, m.tmp('patch_norm.mnc'), + index=idx, db=db, threshold=thr, spline=spl, + median=med, field = m.tmp('patch_norm_field.mnc'), + iterations=it) + output_scan = m.tmp('patch_norm.mnc') + + label_norm = filters.get('label_norm',None) + + if label_norm is not None and input_labels is not None and model_labels is not None: + print("Running label norm:{}".format(repr(label_norm))) + norm_order=label_norm.get('order',3) + norm_median=label_norm.get('median',True) + hl.label_normalize(output_scan,input_labels,model,model_labels,out=m.tmp('label_norm.mnc'),order=norm_order,median=norm_median) + output_scan = m.tmp('label_norm.mnc') + + shutil.copyfile(output_scan,output) + else: + shutil.copyfile(input,output) + except mincError as e: + print("Exception in apply_filter:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in apply_filter:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def make_border_mask( input, output, width=1,labels=1): + '''Extract a border along the edge''' + try: + if not os.path.exists(output): + with mincTools() as m: + if labels==1: + m.binary_morphology(input,"D[{}]".format((width+1)//2),m.tmp('d.mnc')) + m.binary_morphology(input,"E[{}]".format(width//2),m.tmp('e.mnc')) + m.calc([m.tmp('d.mnc'),m.tmp('e.mnc')],'A[0]>0.5&&A[1]<0.5?1:0',output) + else: # have to split up labels and then create a mask of all borders + split_labels(input,labels, m.tmp('split')) + borders=[] + for i in range(1,labels): + l='{}_{:02d}.mnc' .format(m.tmp('split'),i) + d='{}_{:02d}_d.mnc'.format(m.tmp('split'),i) + e='{}_{:02d}_e.mnc'.format(m.tmp('split'),i) + b='{}_{:02d}_b.mnc'.format(m.tmp('split'),i) + m.binary_morphology(l,"D[{}]".format((width+1)//2),d) + m.binary_morphology(l,"E[{}]".format(width//2),e) + m.calc([d,e],'A[0]>0.5&&A[1]<0.5?1:0',b) + borders.append(b) + m.math(borders,'max',m.tmp('max'),datatype='-float') + m.reshape(m.tmp('max'),output,datatype='byte', + image_range=[0,1],valid_range=[0,1]) + + except mincError as e: + print("Exception in make_border_mask:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in make_border_mask:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def split_labels(input, n_labels,output_prefix, + antialias=False, blur=None, + expit=None, normalize=False ): + try: + with mincTools() as m: + inputs=[ input ] + outputs=['{}_{:02d}.mnc'.format(output_prefix,i) for i in range(n_labels) ] + + cmd=['itk_split_labels',input,'{}_%02d.mnc'.format(output_prefix), + '--missing',str(n_labels)] + if antialias: + cmd.append('--antialias') + if normalize: + cmd.append('--normalize') + if blur is not None: + cmd.extend(['--blur',str(blur)]) + if expit is not None: + cmd.extend(['--expit',str(expit)]) + m.command(cmd, inputs=inputs, outputs=outputs) + #return outputs + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def generate_flip_sample(input, labels_datatype='byte'): + '''generate flipped version of sample''' + try: + with mincTools() as m: + m.flip_volume_x(input.scan,input.scan_f) + + for (i,j) in enumerate(input.add): + m.flip_volume_x(input.add[i],input.add_f[i]) + + if input.mask is not None: + m.flip_volume_x(input.mask, input.mask_f, labels=True) + + #for i in input.add: + # m.flip_volume_x(i, input.seg_f, labels=True,datatype=labels_datatype) + except mincError as e: + print("Exception in generate_flip_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in generate_flip_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def create_unflip_remap(remap,remap_flip): + if remap is not None and remap_flip is not None: + # convert both into dict + _remap= { int(i[0]):int(i[1]) for i in remap } + _remap_flip={ int(i[1]):int(i[0]) for i in remap_flip } + _rr={} + + for i,j in _remap.items(): + if j in _remap_flip: + _rr[j]=j + return _rr + else: + return None + +def log_transform_sample(input, output, threshold=1.0): + try: + with mincTools() as m: + m.calc([input.scan],'A[0]>{}?log(A[0]):0.0'.format(threshold), + output.scan) + except mincError as e: + print("Exception in log_transform_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in log_transform_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def create_patch_norm_db( input_samples, + patch_norm_db, + patch_norm_idx, + pct=0.1, + patch=2, + sub=1): + try: + with mincTools() as m: + patch_lib=os.path.dirname(input_samples[0].scan)+os.sep+'patch_lib.lst' + inputs=[] + outputs=[patch_norm_db] + + with open(patch_lib,'w') as f: + for i in input_samples: + f.write( os.path.basename( i.scan ) ) + f.write("\n") + inputs.append(i.scan) + + cmd=['create_feature_database', + patch_lib, patch_norm_db, + '--patch', + '--patch-radius', str(patch), + '--subsample', str(sub), + '--random', str(pct), + '--log', + '--threshold', str(1.0), + ] + + m.command(cmd, inputs=inputs, outputs=outputs) + + cmd=['refine_feature_database', + patch_norm_db, patch_norm_idx + ] + m.command(cmd, inputs=[patch_norm_db], outputs=[patch_norm_idx]) + + except mincError as e: + print("Exception in create_patch_norm_db:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in create_patch_norm_db:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/fuse.py b/ipl/segment/fuse.py new file mode 100644 index 0000000..a0262df --- /dev/null +++ b/ipl/segment/fuse.py @@ -0,0 +1,906 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .error_correction import * +from .preselect import * +from .qc import * +from .fuse_segmentations import * +from .library import * +import traceback + +def seg_to_volumes(seg, output_json, label_map=None): + with mincTools( verbose=2 ) as m: + out=m.label_stats(seg,label_defs=label_map) + with open(output_json,'w') as f: + json.dump(out,f,indent=1) + return out + +def invert_lut(inp): + if inp is None: + return None + return { str(j):str(i) for i,j in inp.iteritems()} + + +def fusion_segment( input_scan, + library_description, + output_segment, + input_mask = None, + parameters = {}, + exclude =[], + work_dir = None, + debug = False, + ec_variant = None, + fuse_variant = None, + regularize_variant = None, + add=[], + cleanup = False, + cleanup_xfm = False, + presegment = None, + preprocess_only = False): + """Apply fusion segmentation""" + try: + if debug: + print( "Segmentation parameters:") + print( repr(parameters) ) + print( "presegment={}".format(repr(presegment))) + + out_variant='' + if fuse_variant is not None: + out_variant+=fuse_variant + + if regularize_variant is not None: + out_variant+='_'+regularize_variant + + if ec_variant is not None: + out_variant+='_'+ec_variant + + if work_dir is None: + work_dir=output_segment+os.sep+'work_segment' + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + work_lib_dir= work_dir+os.sep+'library' + work_lib_dir_f=work_dir+os.sep+'library_f' + + if not os.path.exists(work_lib_dir): + os.makedirs(work_lib_dir) + + if not os.path.exists(work_lib_dir_f): + os.makedirs(work_lib_dir_f) + + library_nl_samples_avail=library_description['nl_samples_avail'] + library_modalities=library_description.get('modalities',1)-1 + + # perform symmetric segmentation + segment_symmetric= parameters.get('segment_symmetric', False ) + + # read filter paramters + pre_filters= parameters.get('pre_filters', None ) + post_filters= parameters.get('post_filters', parameters.get( 'filters', None )) + + # if linear registration should be performed + do_initial_register = parameters.get( 'initial_register', + parameters.get( 'linear_register', {})) + + if do_initial_register is not None and isinstance(do_initial_register,dict): + initial_register = do_initial_register + do_initial_register = True + else: + initial_register={} + + inital_reg_type = parameters.get( 'initial_register_type', + parameters.get( 'linear_register_type', + initial_register.get('type','-lsq12'))) + + inital_reg_ants = parameters.get( 'initial_register_ants', + parameters.get( 'linear_register_ants', False)) + + inital_reg_options = parameters.get( 'initial_register_options', + initial_register.get('options',None) ) + + inital_reg_downsample = parameters.get( 'initial_register_downsample', + initial_register.get('downsample',None)) + + inital_reg_use_mask = parameters.get( 'initial_register_use_mask', + initial_register.get('use_mask',False)) + + initial_reg_objective = initial_register.get('objective','-xcorr') + + # perform local linear registration + do_initial_local_register = parameters.get( 'initial_local_register', + parameters.get( 'local_linear_register', {}) ) + if do_initial_local_register is not None and isinstance(do_initial_local_register,dict): + initial_local_register=do_initial_local_register + do_initial_local_register=True + else: + initial_local_register={} + + local_reg_type = parameters.get( 'local_register_type', + initial_local_register.get('type','-lsq12')) + + local_reg_ants = parameters.get( 'local_register_ants', False) + + local_reg_opts = parameters.get( 'local_register_options', + initial_local_register.get('options',None)) + + local_reg_bbox = parameters.get( 'local_register_bbox', + initial_local_register.get('bbox',False )) + + local_reg_downsample = parameters.get( 'local_register_downsample', + initial_local_register.get('downsample',None)) + + local_reg_use_mask = parameters.get( 'local_register_use_mask', + initial_local_register.get('use_mask',True)) + + local_reg_objective = initial_local_register.get('objective','-xcorr') + + # if non-linear registraiton should be performed for library creation + do_nonlinear_register = parameters.get('non_linear_register', False ) + + # generate segmentation library (needed for label fusion, not needed for single atlas based or external tool) + generate_library = parameters.get('generate_library', True ) + + # if non-linear registraiton should be performed pairwise + do_pairwise =parameters.get('non_linear_pairwise', False ) + # if pairwise registration should be performed using ANTS + do_pairwise_ants = parameters.get('non_linear_pairwise_ants', True ) + pairwise_register_type = parameters.get( 'non_linear_pairwise_type',None) + if pairwise_register_type is None: + if do_pairwise_ants: + pairwise_register_type='ants' + + library_preselect= parameters.get('library_preselect', 10) + library_preselect_step= parameters.get('library_preselect_step', None) + library_preselect_method= parameters.get('library_preselect_method', 'MI') + + + # if non-linear registraiton should be performed with ANTS + do_nonlinear_register_ants=parameters.get('non_linear_register_ants',False ) + nlreg_level = parameters.get('non_linear_register_level', 2) + nlreg_start = parameters.get('non_linear_register_start', 16) + nlreg_options = parameters.get('non_linear_register_options', None) + nlreg_downsample = parameters.get('non_linear_register_downsample', None) + + nonlinear_register_type = parameters.get( 'non_linear_register_type',None) + if nonlinear_register_type is None: + if do_nonlinear_register_ants: + nonlinear_register_type='ants' + + pairwise_level = parameters.get('pairwise_level', 2) + pairwise_start = parameters.get('pairwise_start', 16) + pairwise_options = parameters.get('pairwise_options', None) + + fuse_options = parameters.get('fuse_options', None) + + resample_order = parameters.get('resample_order', 2) + resample_baa = parameters.get('resample_baa', True) + + # error correction parametrs + ec_options = parameters.get('ec_options', None) + + # QC image paramters + qc_options = parameters.get('qc_options', None) + + + # special case for training error correction, assume input scan is already pre-processed + run_in_bbox = parameters.get('run_in_bbox', False) + + classes_number = library_description['classes_number'] + seg_datatype = library_description['seg_datatype'] + gco_energy = library_description['gco_energy'] + + + output_info = {} + + input_sample = MriDataset(scan=input_scan, seg=presegment, + mask=input_mask, protect=True, + add=add) + + sample = input_sample + + # get parameters + model = MriDataset(scan=library_description['model'], + mask=library_description['model_mask'], + add= library_description.get('model_add',[]) ) + + local_model = MriDataset(scan=library_description['local_model'], + mask=library_description['local_model_mask'], + scan_f=library_description.get('local_model_flip',None), + mask_f=library_description.get('local_model_mask_flip',None), + seg= library_description.get('local_model_seg',None), + seg_f= library_description.get('local_model_seg_flip',None), + add= library_description.get('local_model_add',[]), + add_f= library_description.get('local_model_add_flip',[]), + ) + + library = library_description['library'] + + sample_modalities=len(add) + + print("\n\n") + print("Sample modalities:{}".format(sample_modalities)) + print("\n\n") + # apply the same steps as used in library creation to perform segmentation: + + # global + initial_xfm=None + nonlinear_xfm=None + bbox_sample=None + nl_sample=None + bbox_linear_xfm=None + flipdir=work_dir+os.sep+'flip' + + sample_filtered=MriDataset(prefix=work_dir, name='flt_'+sample.name, add_n=sample_modalities ) + + # QC file + # TODO: allow for alternative location, extension + #sample_qc=work_dir+os.sep+'qc_'+sample.name+'_'+out_variant+'.jpg' + sample_qc=output_segment+'_qc.jpg' + + + if run_in_bbox: + segment_symmetric=False # that would depend ? + do_initial_register=False + do_initial_local_register=False + # assume filter already applied! + pre_filters=None + post_filters=None + + if pre_filters is not None: + apply_filter( sample.scan, + sample_filtered.scan, + pre_filters, + model=model.scan, + model_mask=model.mask) + + #if sample.mask is None: + sample_filtered.mask=sample.mask + # hack + sample_filtered.add=sample.add + sample=sample_filtered + else: + sample_filtered=None + + output_info['sample_filtered']=sample_filtered + + if segment_symmetric: + # need to flip the inputs + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + sample.scan_f=flipdir+os.sep+os.path.basename(sample.scan) + sample.add_f=['' for (i,j) in enumerate(sample.add)] + + for (i,j) in enumerate(sample.add): + sample.add_f[i]=flipdir+os.sep+os.path.basename(sample.add[i]) + + if sample.mask is not None: + sample.mask_f=flipdir+os.sep+'mask_'+os.path.basename(sample.scan) + else: + sample.mask_f=None + + generate_flip_sample( sample ) + + if presegment is None: + sample.seg=None + sample.seg_f=None + + + if do_initial_register is not None: + initial_xfm=MriTransform(prefix=work_dir, name='init_'+sample.name ) + + if inital_reg_type=='elx' or inital_reg_type=='elastix' : + elastix_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + parameters=inital_reg_options, + nl=False, + use_mask=inital_reg_use_mask, + downsample=inital_reg_downsample + ) + elif inital_reg_type=='ants' or inital_reg_ants: + linear_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + ants=True, + use_mask=inital_reg_use_mask, + downsample=inital_reg_downsample + ) + else: + linear_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + downsample=inital_reg_downsample, + use_mask=inital_reg_use_mask, + objective=initial_reg_objective + ) + + output_info['initial_xfm']=initial_xfm + + + # local + bbox_sample = MriDataset(prefix=work_dir, name='bbox_init_'+sample.name, + add_n=sample_modalities ) + # a hack to have sample mask + bbox_sample_mask = MriDataset(prefix=work_dir, name='bbox_init_'+sample.name ) + + + if do_initial_local_register: + bbox_linear_xfm=MriTransform(prefix=work_dir, name='bbox_init_'+sample.name ) + + if local_reg_type=='elx' or local_reg_type=='elastix' : + elastix_registration( sample, + local_model, + bbox_linear_xfm, + symmetric=segment_symmetric, + init_xfm=initial_xfm, + resample_order=resample_order, + parameters=local_reg_opts, + bbox=local_reg_bbox, + use_mask=local_reg_use_mask, + downsample=local_reg_downsample + ) + elif local_reg_type=='ants' or local_reg_ants: + linear_registration( sample, + local_model, + bbox_linear_xfm, + init_xfm=initial_xfm, + symmetric=segment_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + resample_order=resample_order, + ants=True, + close=True, + bbox=local_reg_bbox, + use_mask=local_reg_use_mask, + downsample=local_reg_downsample + ) + else: + linear_registration( sample, + local_model, + bbox_linear_xfm, + init_xfm=initial_xfm, + symmetric=segment_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + resample_order=resample_order, + close=True, + bbox=local_reg_bbox, + use_mask=local_reg_use_mask, + objective=local_reg_objective, + downsample=local_reg_downsample ) + + else: + bbox_linear_xfm=initial_xfm + + output_info['bbox_initial_xfm']=bbox_linear_xfm + + bbox_sample.mask=None + bbox_sample.mask_f=None + + if sample.seg is None: + bbox_sample.seg=None + bbox_sample.seg_f=None + + warp_sample(sample, local_model, bbox_sample, + transform=bbox_linear_xfm, + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric,# need to flip symmetric dataset + resample_order=resample_order, + filters=post_filters, + ) + + if sample.seg is not None: + _lut=None + _flip_lut=None + if not run_in_bbox: # assume that labels are already renamed + _lut=invert_lut(library_description.get("map",None)) + _flip_lut=invert_lut(library_description.get("flip_map",None)) + + warp_rename_seg( sample, local_model, bbox_sample, + transform=bbox_linear_xfm, + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric, + lut = _lut, + flip_lut = _flip_lut, + resample_order=resample_order, + resample_baa=resample_baa) + + output_info['bbox_sample']=bbox_sample + + if preprocess_only: + if cleanup: + shutil.rmtree(work_lib_dir) + shutil.rmtree(work_lib_dir_f) + if os.path.exists(flipdir): + shutil.rmtree(flipdir) + if pre_filters is not None: + sample_filtered.cleanup() + return (None,output_info) + + # 3. run non-linear registration if needed + # TODO: skip if sample presegmented + if do_nonlinear_register: + nl_sample=MriDataset(prefix=work_dir, name='nl_'+sample.name, add_n=sample_modalities ) + nonlinear_xfm=MriTransform(prefix=work_dir, name='nl_'+sample.name ) + + + if nonlinear_register_type=='elx' or nonlinear_register_type=='elastix' : + elastix_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + nl=True, + downsample=nlreg_downsample ) + elif nonlinear_register_type=='ants' or do_nonlinear_register_ants: + non_linear_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + ants=True, + downsample=nlreg_downsample ) + else: + non_linear_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + ants=False, + downsample=nlreg_downsample ) + + print("\n\n\nWarping the sample!:{}\n\n\n".format(bbox_sample)) + nl_sample.seg=None + nl_sample.seg_f=None + nl_sample.mask=None + nl_sample.mask_f=None + + warp_sample(bbox_sample, local_model, nl_sample, + transform=nonlinear_xfm, + symmetric=segment_symmetric, + resample_order=resample_order, + filters=post_filters, + ) + + warp_model_mask(local_model,bbox_sample_mask, + transform=nonlinear_xfm, + symmetric=segment_symmetric, + resample_order=resample_order) + + bbox_sample.mask=bbox_sample_mask.mask + bbox_sample.mask_f=bbox_sample_mask.mask_f + + output_info['bbox_sample']=bbox_sample + output_info['nl_sample']=nl_sample + else: + nl_sample=bbox_sample + # use mask from the model directly? + bbox_sample.mask=local_model.mask + bbox_sample.mask_f=local_model.mask + + output_info['nonlinear_xfm']=nonlinear_xfm + + if generate_library: + # remove excluded samples TODO: use regular expressions for matching? + selected_library=[i for i in library if i[0] not in exclude] + selected_library_f=[] + + if segment_symmetric: # fill up with all entries + selected_library_f=copy.deepcopy(selected_library) + + # library pre-selection if needed + # TODO: skip if sample presegmented + if library_preselect>0 and library_preselect < len(selected_library): + loaded=False + loaded_f=False + + if os.path.exists(work_lib_dir+os.sep+'sel_library.json'): + with open(work_lib_dir+os.sep+'sel_library.json','r') as f: + selected_library=json.load(f) + loaded=True + + if segment_symmetric and os.path.exists(work_lib_dir_f+os.sep+'sel_library.json'): + with open(work_lib_dir_f+os.sep+'sel_library.json','r') as f: + selected_library_f=json.load(f) + loaded_f=True + + if do_nonlinear_register: + if not loaded: + selected_library=preselect(nl_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=library_nl_samples_avail, + step=library_preselect_step, + lib_add_n=library_modalities) + if segment_symmetric: + if not loaded_f: + selected_library_f=preselect(nl_sample, + selected_library_f, + method=library_preselect_method, + number=library_preselect, + use_nl=library_nl_samples_avail, + flip=True, + step=library_preselect_step, + lib_add_n=library_modalities) + else: + if not loaded: + selected_library=preselect(bbox_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=False, + step=library_preselect_step, + lib_add_n=library_modalities) + if segment_symmetric: + if not loaded_f: + selected_library_f=preselect(bbox_sample, + selected_library_f, + method=library_preselect_method, + number=library_preselect, + use_nl=False,flip=True, + step=library_preselect_step, + lib_add_n=library_modalities) + + if not loaded: + with open(work_lib_dir+os.sep+'sel_library.json','w') as f: + json.dump(selected_library,f) + + if not loaded_f: + if segment_symmetric: + with open(work_lib_dir_f+os.sep+'sel_library.json','w') as f: + json.dump(selected_library_f,f) + + output_info['selected_library']=selected_library + if segment_symmetric: + output_info['selected_library_f']=selected_library_f + + selected_library_scan=[] + selected_library_xfm=[] + selected_library_warped2=[] + selected_library_xfm2=[] + + selected_library_scan_f=[] + selected_library_xfm_f=[] + selected_library_warped_f=[] + selected_library_warped2_f=[] + selected_library_xfm2_f=[] + + for (i,j) in enumerate(selected_library): + d=MriDataset(scan=j[0],seg=j[1], add=j[2:2+library_modalities] ) + + selected_library_scan.append(d) + + selected_library_warped2.append( MriDataset(name=d.name, prefix=work_lib_dir, add_n=sample_modalities )) + selected_library_xfm2.append( MriTransform(name=d.name,prefix=work_lib_dir )) + + if library_nl_samples_avail: + selected_library_xfm.append( MriTransform(xfm=j[2+library_modalities], xfm_inv=j[3+library_modalities] ) ) + + output_info['selected_library_warped2']=selected_library_warped2 + output_info['selected_library_xfm2']=selected_library_xfm2 + if library_nl_samples_avail: + output_info['selected_library_xfm']=selected_library_xfm + + if segment_symmetric: + for (i,j) in enumerate(selected_library_f): + d=MriDataset(scan=j[0],seg=j[1], add=j[2:2+library_modalities] ) + selected_library_scan_f.append(d) + selected_library_warped2_f.append(MriDataset(name=d.name, prefix=work_lib_dir_f, add_n=sample_modalities )) + selected_library_xfm2_f.append(MriTransform( name=d.name, prefix=work_lib_dir_f )) + + if library_nl_samples_avail: + selected_library_xfm_f.append( MriTransform(xfm=j[2+library_modalities], xfm_inv=j[3+library_modalities] )) + + output_info['selected_library_warped2_f']=selected_library_warped2_f + output_info['selected_library_xfm2_f']=selected_library_xfm2_f + if library_nl_samples_avail: + output_info['selected_library_xfm_f']=selected_library_xfm_f + + # nonlinear registration to template or individual + + if do_pairwise: # Right now ignore precomputed transformations + results=[] + if debug: + print("Performing pairwise registration") + + for (i,j) in enumerate(selected_library): + # TODO: make clever usage of precomputed transform if available + if pairwise_register_type=='elx' or pairwise_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + nl=True, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + elif pairwise_register_type=='ants' or do_pairwise_ants: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=True, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + else: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=False, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + + + if segment_symmetric: + for (i,j) in enumerate(selected_library_f): + # TODO: make clever usage of precomputed transform if available + + if pairwise_register_type=='elx' or pairwise_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + nl=True, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + elif pairwise_register_type=='ants' or do_pairwise_ants: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=True, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + else: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=False, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + else: + + results=[] + + for (i, j) in enumerate(selected_library): + + lib_xfm=None + if library_nl_samples_avail: + lib_xfm=selected_library_xfm[i] + + results.append( futures.submit( + concat_resample, + selected_library_scan[i], + lib_xfm , + nonlinear_xfm, + selected_library_warped2[i], + resample_order=resample_order, + resample_baa=resample_baa + ) ) + + if segment_symmetric: + for (i, j) in enumerate(selected_library_f): + lib_xfm=None + if library_nl_samples_avail: + lib_xfm=selected_library_xfm_f[i] + + results.append( futures.submit( + concat_resample, + selected_library_scan_f[i], + lib_xfm, + nonlinear_xfm, + selected_library_warped2_f[i], + resample_order=resample_order, + resample_baa=resample_baa, + flip=True + ) ) + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + else: # no library generated + selected_library=[] + selected_library_f=[] + selected_library_warped2=[] + selected_library_warped2_f=[] + + results=[] + + sample_seg=MriDataset(name='bbox_seg_' + sample.name+out_variant, prefix=work_dir ) + sample_seg.mask=None + sample_seg.mask_f=None + + results.append( futures.submit( + fuse_segmentations, + bbox_sample, + sample_seg, + selected_library_warped2, + flip=False, + classes_number=classes_number, + fuse_options=fuse_options, + gco_energy=gco_energy, + ec_options=ec_options, + model=local_model, + debug=debug, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant + )) + + if segment_symmetric: + results.append( futures.submit( + fuse_segmentations, + bbox_sample, + sample_seg, + selected_library_warped2_f, + flip=True, + classes_number=classes_number, + fuse_options=fuse_options, + gco_energy=gco_energy, + ec_options=ec_options, + model=local_model, + debug=debug, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + output_info['fuse']=results[0].result() + if segment_symmetric: + output_info['fuse_f']=results[1].result() + + if qc_options: + # generate QC images + output_info['qc'] = generate_qc_image(sample_seg, + bbox_sample, + sample_qc, + options=qc_options, + model=local_model, + symmetric=segment_symmetric, + labels=library_description['classes_number']) + # cleanup if need + if cleanup: + shutil.rmtree(work_lib_dir) + shutil.rmtree(work_lib_dir_f) + if os.path.exists(flipdir): + shutil.rmtree(flipdir) + + if nl_sample is not None: + nl_sample.cleanup() + + if pre_filters is not None: + sample_filtered.cleanup() + + if cleanup_xfm: + # TODO: remove more xfms(?) + if nonlinear_xfm is not None: + nonlinear_xfm.cleanup() + + if not run_in_bbox: + # TODO: apply error correction here + # rename labels to final results + sample_seg_native=MriDataset(name='seg_' + sample.name+out_variant, prefix=work_dir ) + + warp_rename_seg(sample_seg, input_sample, sample_seg_native, + transform=bbox_linear_xfm, invert_transform=True, + lut=library_description['map'] , + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric, + use_flipped=segment_symmetric, # needed to flip .seg_f back to right orientation + flip_lut=library_description['flip_map'], + resample_baa=resample_baa, + resample_order=resample_order, + datatype=seg_datatype ) + + output_info['sample_seg_native']=sample_seg_native + output_info['used_labels']=make_segmented_label_list(library_description,symmetric=segment_symmetric) + + if segment_symmetric: + join_left_right(sample_seg_native, output_segment+'_seg.mnc', datatype=seg_datatype) + else: + shutil.copyfile(sample_seg_native.seg, output_segment+'_seg.mnc') + + output_info['output_segment']=output_segment+'_seg.mnc' + + output_info['output_volumes']=seg_to_volumes(output_segment+'_seg.mnc', + output_segment+'_vol.json', + label_map=library_description.get('label_map',None)) + + output_info['output_volumes_json']=output_segment+'_vol.json' + + # TODO: cleanup more here (?) + + return (output_segment+'_seg.mnc',output_info) + else: # special case, needed to train error correction + return (sample_seg.seg,output_info) + + except mincError as e: + print("Exception in fusion_segment:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in fusion_segment:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/fuse_segmentations.py b/ipl/segment/fuse_segmentations.py new file mode 100644 index 0000000..af8299c --- /dev/null +++ b/ipl/segment/fuse_segmentations.py @@ -0,0 +1,390 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .error_correction import * +from .preselect import * +from .qc import * + +import traceback + +def fuse_segmentations( sample, output, library, + fuse_options={}, + flip=False, + classes_number=2, + gco_energy=None, + ec_options=None, + model=None, + debug=False, + ec_variant='', + fuse_variant='', + regularize_variant='', + work_dir=None ): + try: + final_out_seg=output.seg + scan=sample.scan + add_scan=sample.add + output_info={} + preseg=sample.seg + + if flip: + scan=sample.scan_f + add_scan=sample.add_f + final_out_seg=output.seg_f + preseg=sample.seg_f + + if not os.path.exists( final_out_seg ): + with mincTools( verbose=2 ) as m: + if work_dir is None: + work_dir=os.path.dirname(output.seg) + + dataset_name=sample.name + + if flip: + dataset_name+='_f' + + out_seg_fuse = work_dir+os.sep+dataset_name+'_'+fuse_variant+'.mnc' + out_prob_base = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_prob' + out_dist = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_dist.mnc' + out_seg_reg = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_'+regularize_variant+'.mnc' + + out_seg_ec = final_out_seg + + output_info['work_dir']=work_dir + output_info['dataset_name']=work_dir + + if ec_options is None: # skip error-correction part + out_seg_reg=out_seg_ec + print("ec_options={}".format(repr(ec_options))) + + output_info['out_seg_reg']=out_seg_reg + output_info['out_seg_fuse']=out_seg_fuse + output_info['out_dist']=out_dist + + probs=[ '{}_{:02d}.mnc'.format(out_prob_base, i) for i in range(classes_number) ] + + output_info['probs']=probs + + + if preseg is None: + patch=0 + search=0 + threshold=0 + iterations=0 + gco_optimize=False + nnls=False + gco_diagonal=False + label_norm=None + ext_tool=None + + if fuse_options is not None: + # get parameters + patch= fuse_options.get('patch', 0) + search= fuse_options.get('search', 0) + threshold= fuse_options.get('threshold', 0.0) + iterations= fuse_options.get('iter', 3) + weights= fuse_options.get('weights', None) + nnls = fuse_options.get('nnls', False) + label_norm = fuse_options.get('label_norm', None) + beta = fuse_options.get('beta', None) + new_prog = fuse_options.get('new', True) + ext_tool = fuse_options.get('ext', None) + + # graph-cut based segmentation + gco_optimize = fuse_options.get('gco', False) + gco_diagonal = fuse_options.get('gco_diagonal', False) + gco_wlabel= fuse_options.get('gco_wlabel', 1.0) + gco_wdata = fuse_options.get('gco_wdata', 1.0) + gco_wintensity=fuse_options.get('gco_wintensity', 0.0) + gco_epsilon =fuse_options.get('gco_epsilon', 1e-4) + + + if label_norm is not None: + print("Using label_norm:{}".format(repr(label_norm))) + # need to create rough labeling and average + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in library ]) + segs.extend(['--majority', m.tmp('maj_seg.mnc'), '--bg'] ) + m.execute(segs) + + scans=[ i.scan for i in library ] + m.median(scans,m.tmp('median.mnc')) + + norm_order=label_norm.get('order',3) + norm_median=label_norm.get('median',True) + + n_scan=work_dir+os.sep+dataset_name+'_'+fuse_variant+'_norm.mnc' + + if flip: + n_scan=work_dir+os.sep+dataset_name+'_'+fuse_variant+'_f_norm.mnc' + + hl.label_normalize(scan,m.tmp('maj_seg.mnc'),m.tmp('median.mnc'),m.tmp('maj_seg.mnc'),out=n_scan,order=norm_order,median=norm_median) + scan=n_scan + if ext_tool is not None: # will run external segmentation tool! + # ext_tool is expected to be a string with format language specs + segs=ext_tool.format(sample=sample.scan, + mask=sample.mask, + output=out_seg_fuse, + prob_base=out_prob_base, + model_mas=model.mask, + model_atlas=model.seg) + outputs=[out_seg_fuse] + m.command(segs, inputs=[sample.scan], outputs=outputs) + + pass #TODO: finish this + elif patch==0 and search==0: # perform simple majority voting + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in library ]) + segs.extend(['--majority', out_seg_fuse, '--bg'] ) + m.execute(segs) + + #TODO:Output fake probs ? + + if gco_energy is not None and gco_optimize: + # todo place this into parameters + split_labels( out_seg_fuse, + classes_number, + out_prob_base, + antialias=True, + blur=1.0, + expit=1.0, + normalize=True ) + else: # run patc-based label fusion + # create text file for the training library + train_lib=os.path.dirname(library[0].seg)+os.sep+sample.name+'.lst' + + if flip: + train_lib=os.path.dirname(library[0].seg)+os.sep+sample.name+'_f.lst' + + output_info['train_lib']=train_lib + + with open(train_lib,'w') as f: + for i in library: + ss=[ os.path.basename(i.scan) ] + ss.extend([os.path.basename(j) for j in i.add]) + ss.append(os.path.basename(i.seg)) + f.write(",".join(ss)) + f.write("\n") + + outputs=[] + + if len(add_scan)>0: + + segs=['itk_patch_morphology_mc', + scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--adist', out_dist, + '--prob', out_prob_base ] + + if weights is not None: + segs.extend(['--weights',weights]) + + segs.extend(add_scan) + segs.extend(['--output', out_seg_fuse]) + else: + if nnls: + segs=['itk_patch_segmentation', scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--iter', str(iterations), + '--prob', out_prob_base, + '--adist', out_dist, + '--nnls', + '--threshold', str(threshold) ] + else: + if new_prog: + segs=['itk_patch_segmentation','--exp'] + else: + segs=['itk_patch_morphology'] + + segs.extend([scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--iter', str(iterations), + '--prob', out_prob_base, + '--adist', out_dist, + '--threshold', str(threshold) ]) + if beta is not None: + segs.extend(['--beta',str(beta)]) + + segs.append(out_seg_fuse) + # plug in additional modalities + + outputs=[ out_seg_fuse ] + outputs.extend(probs) + + if sample.mask is not None: + segs.extend(['--mask', sample.mask]) + + m.command(segs, inputs=[sample.scan], outputs=outputs) + print(' '.join(segs)) + + if gco_energy is not None and gco_optimize: + gco= [ 'gco_classify', '--cooc', gco_energy ] + + gco.extend( probs ) + gco.extend([out_seg_reg, + '--iter', '1000', + '--wlabel', str(gco_wlabel), + '--wdata', str(gco_wdata), + '--epsilon', str(gco_epsilon)]) + + if gco_diagonal: + gco.append('--diagonal') + + if gco_wintensity > 0.0: + gco.extend( ['--intensity',scan, + '--wintensity',str(gco_wintensit)] ) + + if sample.mask is not None: + gco.extend(['--mask', sample.mask]) + + m.command(gco, inputs=probs, outputs=[ out_seg_reg ] ) + else: + shutil.copyfile(out_seg_fuse, out_seg_reg) + else: + #shutil.copyfile(preseg, out_seg_reg) + + + if ec_options is None: + shutil.copyfile(preseg,final_out_seg) + out_seg_reg=final_out_seg + else: + out_seg_reg=preseg + + output_info['out_seg_reg']=out_seg_reg + output_info['out_seg_fuse']=out_seg_reg + output_info['out_dist']=None + output_info['prob']=None + #out_seg_reg = preseg + + if ec_options is not None: + # create ec mask + ec_border_mask = ec_options.get( 'border_mask' , True ) + ec_border_mask_width = ec_options.get( 'border_mask_width' , 3 ) + + ec_antialias_labels = ec_options.get( 'antialias_labels' , True ) + ec_blur_labels = ec_options.get( 'blur_labels', 1.0 ) + ec_expit_labels = ec_options.get( 'expit_labels', 1.0 ) + ec_normalize_labels = ec_options.get( 'normalize_labels', True ) + ec_use_raw = ec_options.get( 'use_raw', False ) + ec_split = ec_options.get( 'split', None ) + + train_mask = model.mask + ec_input_prefix = out_seg_reg.rsplit('.mnc',1)[0]+'_'+ec_variant + + if ec_border_mask : + train_mask = ec_input_prefix + '_train_mask.mnc' + make_border_mask( out_seg_reg, train_mask, + width=ec_border_mask_width, labels=classes_number ) + + ec_input=[ scan ] + ec_input.extend(sample.add) + + if classes_number>2 and (not ec_use_raw ): + split_labels( out_seg_reg, classes_number, ec_input_prefix, + antialias=ec_antialias_labels, + blur=ec_blur_labels, + expit=ec_expit_labels, + normalize=ec_normalize_labels ) + + ec_input.extend([ '{}_{:02d}.mnc'.format(ec_input_prefix,i) for i in range(classes_number) ]) # skip background feature ? + else: + ec_input.append( out_seg_reg )# the auto segmentation is + + output_info['out_seg_ec']=out_seg_ec + + if ec_split is None: + if ec_variant is not None: + out_seg_ec_errors1 = work_dir + os.sep + dataset_name + '_' + fuse_variant+'_'+regularize_variant+'_'+ec_variant+'_error1.mnc' + out_seg_ec_errors2 = work_dir + os.sep + dataset_name + '_' + fuse_variant+'_'+regularize_variant+'_'+ec_variant+'_error2.mnc' + + output_info['out_seg_ec_errors1']=out_seg_ec_errors1 + output_info['out_seg_ec_errors2']=out_seg_ec_errors2 + + errorCorrectionApply(ec_input, + out_seg_ec, + input_mask=train_mask, + parameters=ec_options, + input_auto=out_seg_reg, + debug=debug, + multilabel=classes_number, + debug_files=[out_seg_ec_errors1, out_seg_ec_errors2 ] ) + else: + results=[] + parts=[] + + for s in range(ec_split): + out='{}_part_{:d}.mnc'.format(ec_input_prefix,s) + train_part=ec_options['training'].rsplit('.pickle',1)[0] + '_' + str(s) + '.pickle' + ec_options_part=copy.deepcopy(ec_options) + ec_options_part['training']=train_part + + if ec_variant is not None: + out_seg_ec_errors1 = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_'+regularize_variant+'_'+ec_variant+'_error1_'+str(s)+'.mnc' + out_seg_ec_errors2 = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_'+regularize_variant+'_'+ec_variant+'_error2_'+str(s)+'.mnc' + + parts.append(out) + results.append( futures.submit( + errorCorrectionApply, + ec_input, out, + input_mask=train_mask, + parameters=ec_options_part, + input_auto=out_seg_reg, + debug=debug, + partition=ec_split, + part=s, + multilabel=classes_number, + debug_files=[out_seg_ec_errors1,out_seg_ec_errors2] )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + merge_segmentations(parts, out_seg_ec, ec_split, ec_options) + + return output_info + + except mincError as e: + print("Exception in fuse_segmentations:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in fuse_segmentations:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def join_left_right(sample,output,datatype=None): + with mincTools() as m: + cmd=['itk_merge_discrete_labels',sample.seg,sample.seg_f,output] + if datatype is not None: + cmd.append('--'+datatype) + m.command(cmd,inputs=[sample.seg,sample.seg_f],outputs=[output]) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/labels.py b/ipl/segment/labels.py new file mode 100644 index 0000000..c6ed099 --- /dev/null +++ b/ipl/segment/labels.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +def split_labels_seg(sample): + ''' split-up one multi-label segmentation into a set of files''' + try: + with mincTools() as m: + if sample.seg is not None: + base=sample.seg.rsplit('.mnc',1)[0]+'_%03d.mnc' + sample.seg_split=m.split_labels(sample.seg,base) + if sample.seg_f is not None: + base=sample.seg_f.rsplit('.mnc',1)[0]+'_%03d.mnc' + sample.seg_f_split=m.split_labels(sample.seg,base) + except mincError as e: + print("Exception in split_labels_seg:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in split_labels_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def merge_labels_seg(sample): + ''' merge multiple segmentation into a single files''' + try: + with mincTools() as m: + if any(sample.seg_split): + if sample.seg is None: + sample.seg=sample.seg_split[0].rsplit('_000.mnc',1)[0]+'.mnc' + m.merge_labels(sample.seg_split,sample.seg) + if any(sample.seg_f_split): + if sample.seg_f is None: + sample.seg_f=sample.seg_f_split[0].rsplit('_000.mnc',1)[0]+'.mnc' + m.merge_labels(sample.seg_f_split,sample.seg_f) + except mincError as e: + print("Exception in merge_labels_seg:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in merge_labels_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/library.py b/ipl/segment/library.py new file mode 100644 index 0000000..4409790 --- /dev/null +++ b/ipl/segment/library.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import copy +import json +import os +import sys +import traceback + +def save_library_info(library_description, output,name='library.json'): + """Save library information into directory, using predfined file structure + Arguments: + library_description -- dictionary with library description + output -- output directory + + Keyword arguments: + name -- optional name of .json file, relative to the output directory, default 'library.json' + """ + try: + tmp_library_description=copy.deepcopy(library_description) + tmp_library_description.pop('prefix',None) + + for i in ['local_model','local_model_mask', 'local_model_flip', + 'local_model_mask_flip', + 'local_model_seg','local_model_sd','local_model_avg','local_model_ovl', + 'gco_energy']: + if tmp_library_description[i] is not None: + tmp_library_description[i]=os.path.relpath(tmp_library_description[i],output) + + for (j, i) in enumerate(tmp_library_description['local_model_add']): + tmp_library_description['local_model_add'][j]=os.path.relpath(i, output) + + for (j, i) in enumerate(tmp_library_description['local_model_add_flip']): + tmp_library_description['local_model_add_flip'][j]=os.path.relpath(i, output) + + for i in ['model','model_mask']: + # if it starts with the same prefix, remove it + if os.path.dirname(tmp_library_description[i])==output \ + or tmp_library_description[i][0]!=os.sep: + tmp_library_description[i]=os.path.relpath(tmp_library_description[i],output) + + for (j, i) in enumerate(tmp_library_description['model_add']): + if os.path.dirname(i)==output: + tmp_library_description['model_add'][j]=os.path.relpath(i, output) + + for (j, i) in enumerate(tmp_library_description['library']): + for (k,t) in enumerate(i): + tmp_library_description['library'][j][k]=os.path.relpath(t, output) + + with open(output+os.sep+name,'w') as f: + json.dump(tmp_library_description,f,indent=1) + except : + print "Error saving library information into:{} {}".format(output,sys.exc_info()[0]) + traceback.print_exc(file=sys.stderr) + raise + +def load_library_info(prefix, name='library.json'): + """Load library information from directory, using predfined file structure + Arguments: + prefix -- directory path + + Keyword arguments: + name -- optional name of .json file, relative to the input directory, default 'library.json' + """ + try: + library_description={} + with open(prefix+os.sep+name,'r') as f: + library_description=json.load(f) + + library_description['prefix']=prefix + + for i in ['local_model','local_model_mask', 'local_model_flip', + 'local_model_mask_flip','local_model_seg','gco_energy']: + if library_description[i] is not None: library_description[i]=prefix+os.sep+library_description[i] + + try: + for (j, i) in enumerate(library_description['local_model_add']): + library_description['local_model_add'][j]=prefix+os.sep+i + + for (j, i) in enumerate(library_description['local_model_add_flip']): + library_description['local_model_add_flip'][j]=prefix+os.sep+i + except KeyError: + pass + + for (j, i) in enumerate(library_description['library']): + for (k,t) in enumerate(i): + library_description['library'][j][k]=prefix+os.sep+t + + for i in ['model','model_mask']: + # if it starts with '/' assume it's absolute path + if library_description[i] is not None and library_description[i][0]!=os.sep: + library_description[i]=prefix+os.sep+library_description[i] + try: + for (j, i) in enumerate(library_description['model_add']): + if library_description['model_add'][j][0]!='/': + library_description['model_add'][j]=prefix+os.sep+i + except KeyError: + pass + + return library_description + except : + print "Error loading library information from:{} {}".format(prefix,sys.exc_info()[0]) + traceback.print_exc(file=sys.stderr) + raise + +def make_segmented_label_list(library_description,symmetric=False): + """ Make a list of labels that are included in the segmentation library + taking into account flipped labels too if needed + """ + used_labels=set() + + if isinstance(library_description['map'], dict): + for i in library_description['map'].iteritems(): + used_labels.add(int(i[1])) + if symmetric: + for i in library_description['flip_map'].iteritems(): + used_labels.add(int(i[1])) + else: + for i in library_description['map']: + used_labels.add(int(i[1])) + if symmetric: + for i in library_description['flip_map']: + used_labels.add(int(i[1])) + return list(used_labels) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/model.py b/ipl/segment/model.py new file mode 100644 index 0000000..096f0d7 --- /dev/null +++ b/ipl/segment/model.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def create_local_model(tmp_lin_samples, model, local_model, + extend_boundary=4, + op=None, + symmetric=False ): + '''create an average segmentation and use it to create local model''' + try: + with mincTools() as m: + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in tmp_lin_samples ]) + + if symmetric: segs.extend([ i.seg_f for i in tmp_lin_samples ]) + + segs.extend(['--majority', m.tmp('majority.mnc')] ) + m.execute(segs) + maj=m.tmp('majority.mnc') + + if op is not None: + m.binary_morphology(maj, op, m.tmp('majority_op.mnc'),binarize_threshold=0.5) + maj=m.tmp('majority_op.mnc') + + # TODO: replace mincreshape/mincbbox with something more sensible + out=m.execute_w_output(['mincbbox', '-threshold', '0.5', '-mincreshape', maj ]).rstrip("\n").split(' ') + + s=[ int(i) for i in out[1].split(',') ] + c=[ int(i) for i in out[3].split(',') ] + + start=[s[0]-extend_boundary, s[1]-extend_boundary ,s[2]-extend_boundary ] + ext= [c[0]+extend_boundary*2, c[1]+extend_boundary*2 ,c[2]+extend_boundary*2] + + # reshape the mask + m.execute(['mincreshape', + '-start','{},{},{}'.format(start[0], start[1], start[2]), + '-count','{},{},{}'.format(ext[0], ext[1], ext[2] ), + maj , local_model.mask , '-byte' ] ) + + m.resample_smooth(model.scan, local_model.scan, like=local_model.mask, order=0) + m.resample_labels(m.tmp('majority.mnc'),local_model.seg, like=local_model.mask, order=0) + + for (i,j) in enumerate(model.add): + m.resample_smooth(model.add[i], local_model.add[i], like=local_model.mask, order=0) + + except mincError as e: + print("Exception in create_local_model:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_local_model:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def create_local_model_flip(local_model, model, remap={}, + extend_boundary=4, op=None ): + try: + with mincTools() as m: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + m.resample_labels(local_model.seg, m.tmp('flip_seg.mnc'), + transform=m.tmp('flip_x.xfm'), + order=0, remap=remap, like=model.scan) + + seg=m.tmp('flip_seg.mnc') + + if op is not None: + m.binary_morphology(seg, op, m.tmp('flip_seg_op.mnc'),binarize_threshold=0.5) + seg=m.tmp('flip_seg_op.mnc') + + # TODO: replace mincreshape/mincbbox with something more sensible + out=m.execute_w_output(['mincbbox', '-threshold', '0.5', '-mincreshape', seg ]).rstrip("\n").split(' ') + + s=[ int(i) for i in out[1].split(',') ] + c=[ int(i) for i in out[3].split(',') ] + + start=[s[0]-extend_boundary, s[1]-extend_boundary ,s[2]-extend_boundary ] + ext= [c[0]+extend_boundary*2, c[1]+extend_boundary*2 ,c[2]+extend_boundary*2] + # reshape the mask + m.execute(['mincreshape', + '-start','{},{},{}'.format(start[0], start[1], start[2]), + '-count','{},{},{}'.format(ext[0], ext[1], ext[2] ), + seg, + local_model.mask_f, + '-byte' ] ) + + m.resample_smooth(local_model.scan, local_model.scan_f, + like=local_model.mask_f, order=0, transform=m.tmp('flip_x.xfm')) + + for (i,j) in enumerate(model.add_f): + m.resample_smooth(model.add[i], local_model.add_f[i], + like=local_model.mask_f, order=0, transform=m.tmp('flip_x.xfm')) + + except mincError as e: + print("Exception in create_local_model_flip:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_local_model_flip:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/preselect.py b/ipl/segment/preselect.py new file mode 100644 index 0000000..f850e7d --- /dev/null +++ b/ipl/segment/preselect.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * + +import traceback + + +def preselect(sample, + library, + method='MI', + number=10, + mask=None, + use_nl=False, + flip=False, + step=None, + lib_add_n=0): + '''calculate requested similarity function and return top number of elements from the library''' + results=[] + column=0 + + # TODO: use multiple modalities for preselection? + if use_nl: + column=4+lib_add_n + + for (i,j) in enumerate(library): + results.append( futures.submit( + calculate_similarity, sample, MriDataset(scan=j[column]), method=method, mask=mask, flip=flip, step=step + ) ) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + val=[ (j.result(), library[i] ) for (i,j) in enumerate(results)] + + val_sorted=sorted(val, key=lambda s: s[0] ) + + return [i[1] for i in val_sorted[ 0:number] ] + + +def calculate_similarity(sample1, sample2, + mask=None, method='MI', + flip=False, step=None): + try: + with mincTools() as m: + scan=sample1.scan + + if flip: + scan=sample1.scan_f + + # figure out step size, minctracc works extremely slow when step size is smaller then file step size + info_sample1=m.mincinfo( sample1.scan ) + + cmds=[ 'minctracc', scan, sample2.scan, '-identity' ] + + if method=='MI': + cmds.extend( ['-nmi', '-blur_pdf', '9'] ) + else: + cmds.append( '-xcorr' ) + + if step is None: + step= max( abs( info_sample1['xspace'].step ) , + abs( info_sample1['yspace'].step ) , + abs( info_sample1['zspace'].step ) ) + + cmds.extend([ + '-step', str(step), str(step), str(step), + '-simplex', '1', + '-tol', '0.01', + '-lsq6', + '-est_center', + '-clob', + m.tmp('similarity.xfm') + ]) + + if mask is not None: + cmds.extend( ['-source_mask', mask]) + + output=re.search( '^Final objective function value = (\S+)' , m.execute_w_output(cmds, verbose=0), flags=re.MULTILINE).group(1) + + return float(output) + + except mincError as e: + print("Exception in calculate_similarity:{}".format( str(e)) ) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in calculate_similarity:{}".format( sys.exc_info()[0]) ) + traceback.print_exc( file=sys.stdout ) + raise diff --git a/ipl/segment/qc.py b/ipl/segment/qc.py new file mode 100644 index 0000000..54428c8 --- /dev/null +++ b/ipl/segment/qc.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + + +import argparse + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import traceback + +# local things +from ipl.segment.structures import * + + +def make_contours(input, output, width=1): + """Convert multi-label image into another multilabel images with borders only + Arguments: + input -- input minc file + output -- output file + + Keyword arguments: + width -- width of the border to leave behind, default 1 (voxels) + """ + with mincTools() as m: + m.command(['c3d', input,'-split', + '-foreach', + '-dup', '-erode', '1' ,'{}x{}x{}'.format(width,width,width), '-scale', '-1', + '-add', + '-endfor', + '-merge', + '-type', 'short','-o',output], + inputs=[input],outputs=[output], + verbose=True) + +def generate_qc_image(sample_seg, + sample, + sample_qc, + options={}, + model=None, + symmetric=False, + labels=2, + title=None): + """Gnerate QC image for multilabel segmentation + Arguments: + sample_seg -- input segmentation + sample -- input file + sample_qc -- output QC file + + Keyword arguments: + options -- options as dictionary with following keys: + lut_file -- LUT file for minclookup, default None + spectral_mask -- boolean , if spectral mask should be used, default False + dicrete_mask -- boolean , if discrete mask should be used, default False + image_range -- list of two real values + clamp -- boolean, if range clamp should be used + big + contours + contour_width + crop + model -- reference model, default None + symmetric -- boolean, if symmetric QC is needed + width -- width of the border to leave behind, default 1 (voxels) + labels -- integer, number of labels present, default 2 + title -- QC image title + """ + try: + + #TODO: implement advanced features + qc_lut=options.get('lut_file',None) + spectral_mask=options.get('spectral_mask',False) + dicrete_mask=options.get('dicrete_mask',False) + image_range=options.get('image_range',None) + clamp=options.get('clamp',False) + big=options.get('big',False) + contours=options.get('contours',False) + contour_width=options.get('contour_width',1) + crop=options.get('crop',None) + + if qc_lut is not None: + spectral_mask=False + dicrete_mask=True + + with mincTools() as m: + seg=sample_seg.seg + seg_f=sample_seg.seg_f + scan=sample.scan + scan_f=sample.scan_f + + if crop is not None: + # remove voxels from the edge + m.autocrop(scan,m.tmp('scan.mnc'),isoexpand=-crop) + scan=m.tmp('scan.mnc') + m.resample_labels(seg,m.tmp('seg.mnc'),like=scan) + seg=m.tmp('seg.mnc') + + if symmetric: + m.autocrop(scan_f,m.tmp('scan_f.mnc'),isoexpand=-crop) + scan_f=m.tmp('scan_f.mnc') + m.resample_labels(seg_f,m.tmp('seg_f.mnc'),like=scan) + seg_f=m.tmp('seg_f.mnc') + + if contours: + make_contours(seg,m.tmp('seg_contours.mnc'),width=contour_width) + seg=m.tmp('seg_contours.mnc') + if symmetric: + make_contours(seg_f,m.tmp('seg_f_contours.mnc'),width=contour_width) + seg_f=m.tmp('seg_f_contours.mnc') + + if symmetric: + + m.qc( scan, + m.tmp('qc.png'), + mask=seg, + mask_range=[0,labels-1], + big=False, + clamp=clamp, + image_range=image_range, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + mask_lut=qc_lut) + + m.qc( scan_f, + m.tmp('qc_f.png'), + mask=seg_f, + mask_range=[0,labels-1], + image_range=image_range, + big=False, + clamp=clamp, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + title=title, + mask_lut=qc_lut) + + m.command(['montage','-tile','2x1','-geometry','+1+1', + m.tmp('qc.png'),m.tmp('qc_f.png'),sample_qc], + inputs=[m.tmp('qc.png'),m.tmp('qc_f.png')], + outputs=[sample_qc]) + else: + m.qc( scan, + sample_qc, + mask=seg, + mask_range=[0,labels-1], + image_range=image_range, + big=True, + mask_lut=qc_lut, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + clamp=clamp, + title=title) + + return [sample_qc] + except mincError as e: + print("Exception in generate_qc_image:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in generate_qc_image:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + + + +def parse_options(): + + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Run QC step manually') + + parser.add_argument('--scan', + help="Underlying scan") + + parser.add_argument('--scan_f', + help="flipped scan") + + parser.add_argument('--seg', + help="Segmentation") + + parser.add_argument('--seg_f', + help="flipped segmentation") + + parser.add_argument('--spectral_mask', + action="store_true", + default=False ) + + parser.add_argument('--discrete_mask', + action="store_true", + default=False ) + + parser.add_argument('--clamp', + action="store_true", + default=False ) + + parser.add_argument('--big', + action="store_true", + default=False ) + + parser.add_argument('--contours', + action="store_true", + default=False ) + + parser.add_argument('--contour_width', + default=1, + type=int, + help="contour_width") + + parser.add_argument('--image_range', + nargs=2, + help="Range") + + parser.add_argument('--lut_file', + help="LUT") + + parser.add_argument('--crop', + type=int, + default=None, + help="Crop files") + + parser.add_argument('--labels', + type=int, + default=2, + help="Number of labels") + + parser.add_argument('output') + + return parser.parse_args() + + +#crop=options.get('crop',None) + +if __name__ == '__main__': + options = parse_options() + + if options.output is None or options.scan is None: + print("Provide some input") + exit(1) + + segment_symmetric=False + if options.scan_f is not None: + segment_symmetric=True + + sample_scan=MriDataset(name='scan', scan=options.scan,scan_f=options.scan_f ) + sample_seg=MriDataset(name='seg', seg=options.seg,seg_f=options.seg_f ) + class_number=1 + + qc_options={ + 'lut_file':options.lut_file, + 'spectral_mask':options.spectral_mask, + 'dicrete_mask':options.discrete_mask, + 'image_range':options.image_range, + 'clamp':options.clamp, + 'big':options.big, + 'contours':options.contours, + 'contour_width':options.contour_width, + 'crop':options.crop + } + + generate_qc_image(sample_seg, + sample_scan, + options.output, + options=qc_options, + symmetric=segment_symmetric, + labels=options.labels) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/registration.py b/ipl/segment/registration.py new file mode 100644 index 0000000..840658b --- /dev/null +++ b/ipl/segment/registration.py @@ -0,0 +1,673 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.registration +import ipl.ants_registration +import ipl.elastix_registration + +def linear_registration( + sample, + model, + output_xfm, + output_sample=None, + output_invert_xfm=None, + init_xfm=None, + symmetric=False, + ants=False, + reg_type ='-lsq12', + objective='-xcorr', + linreg=None, + work_dir=None, + close=False, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + downsample=None, + bbox=False, + use_mask=True + ): + """perform linear registration to the model, and calculate inverse""" + try: + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + print("Registering options: {}".format(repr(linreg))) + print("Registering sample :{}".format(repr(sample))) + print("Registering model :{}".format(repr(model))) + + with mincTools() as m: + + #TODO: check more files? + if not m.checkfiles(inputs=[sample.scan], outputs=[output_xfm.xfm]): return + + #if _init_xfm is None: + # _init_xfm=_init_xfm_f=m.tmp('identity.xfm') + # m.param2xfm(m.tmp('identity.xfm')) + + scan=sample.scan + scan_f=sample.scan_f + + mask=sample.mask + mask_f=sample.mask_f + + model_mask=model.mask + model_mask_f=model.mask + + if mask is None: model_mask=None + if mask_f is None: model_mask_f=None + + if not use_mask: + mask=None + model_mask=None + mask_f=None + model_mask_f=None + + _output_xfm =output_xfm.xfm + _output_xfm_f=output_xfm.xfm_f + + if bbox: + print("Running in bbox!\n\n\n") + scan=m.tmp('scan.mnc') + m.resample_smooth(sample.scan, scan, like=model.scan, transform=_init_xfm) + if sample.mask is not None and (not use_mask): + mask=m.tmp('mask.mnc') + m.resample_labels(sample.mask, mask, like=model.scan, transform=_init_xfm) + _init_xfm=None + close=True + _output_xfm=m.tmp('output.xfm') + + if symmetric: + scan_f=m.tmp('scan_f.mnc') + m.resample_smooth(sample.scan_f, scan_f, like=model.scan, transform=_init_xfm_f) + if sample.mask_f is not None and (not use_mask): + mask_f=m.tmp('mask_f.mnc') + m.resample_labels(sample.mask_f, mask_f, like=model.scan, transform=_init_xfm_f) + _init_xfm_f=None + _output_xfm_f=m.tmp('output_f.xfm') + + if symmetric: + if ants: + ipl.ants_registration.linear_register_ants2( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + parameters=linreg, + close=close, + downsample=downsample, + ) + ipl.ants_registration.linear_register_ants2( + scan_f, + model.scan, + _output_xfm_f, + source_mask=mask_f, + target_mask=model_mask_f, + init_xfm=_init_xfm_f, + parameters=linreg, + close=close, + downsample=downsample, + ) + else: + ipl.registration.linear_register( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + objective=objective, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + + ipl.registration.linear_register( + scan_f, + model.scan, + _output_xfm_f, + source_mask=mask_f, + target_mask=model_mask_f, + init_xfm=_init_xfm_f, + objective=objective, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + else: + if ants: + ipl.ants_registration.linear_register_ants2( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + parameters=linreg, + close=close, + downsample=downsample, + ) + else: + ipl.registration.linear_register( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + if bbox : + if init_xfm is not None: + m.xfmconcat([init_xfm.xfm,_output_xfm],output_xfm.xfm) + if symmetric: + m.xfmconcat([init_xfm.xfm_f,_output_xfm_f],output_xfm.xfm_f) + else: + shutil.copyfile(_output_xfm,output_xfm.xfm) + if symmetric: + shutil.copyfile(_output_xfm_f,output_xfm.xfm_f) + + if output_invert_xfm is not None: + m.xfminvert(output_xfm.xfm, output_invert_xfm.xfm) + if symmetric: + m.xfminvert(output_xfm.xfm_f, output_invert_xfm.xfm_f) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output_xfm.xfm, + like=model.scan, + order=resample_order) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output_xfm.xfm, + aa=resample_aa, + order=resample_order, + like=model.scan, + baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output_xfm.xfm_f, + like=model.scan, + order=resample_order) + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output_xfm.xfm_f, + aa=resample_aa, + order=resample_order, + like=model.scan, + baa=resample_baa) + + return True + except mincError as e: + print("Exception in linear_registration:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in linear_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def elastix_registration( + sample, + model, + output_xfm, + output_sample=None, + output_invert=True, + init_xfm=None, + symmetric=False, + work_dir=None, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + downsample=None, + parameters=None, + bbox=False, + nl=False, + level=2, + start_level=None, # not really used + use_mask=True + ): + """perform elastix registration to the model, and calculate inverse""" + try: + + with mincTools() as m: + + #TODO: check more files? + if not m.checkfiles(inputs=[sample.scan], outputs=[output_xfm.xfm]): return + + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + mask=sample.mask + mask_f=sample.mask_f + model_mask=model.mask + + if mask is None: + model_mask=None + + if not use_mask: + mask=None + model_mask=None + mask_f=None + model_mask_f=None + + scan=sample.scan + scan_f=sample.scan_f + + _output_xfm=output_xfm.xfm + _output_xfm_f=output_xfm.xfm_f + + if bbox: + scan=m.tmp('scan.mnc') + m.resample_smooth(sample.scan, scan, like=model.scan, transform=_init_xfm) + if sample.mask is not None and (not use_mask): + mask=m.tmp('mask.mnc') + m.resample_labels(sample.mask, mask, like=model.scan, transform=_init_xfm) + _init_xfm=None + close=True + _output_xfm=m.tmp('output.xfm') + + if symmetric: + scan_f=m.tmp('scan_f.mnc') + m.resample_smooth(sample.scan_f, scan_f, like=model.scan, transform=_init_xfm_f) + if sample.mask_f is not None and (not use_mask): + mask_f=m.tmp('mask_f.mnc') + m.resample_labels(sample.mask_f, mask_f, like=model.scan, transform=_init_xfm_f) + _init_xfm_f=None + _output_xfm_f=m.tmp('output_f.xfm') + + #TODO: update elastix registration to downsample xfm? + if symmetric: + ipl.elastix_registration.register_elastix( + scan, + model.scan, + output_xfm=_output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + downsample=downsample, + downsample_grid=level, + parameters=parameters, + nl=nl + ) + ipl.elastix_registration.register_elastix( + scan_f, + model.scan, + output_xfm=_output_xfm_f, + source_mask=mask_f, + target_mask=model_mask, + init_xfm=_init_xfm_f, + downsample=downsample, + downsample_grid=level, + parameters=parameters, + nl=nl + ) + else: + ipl.elastix_registration.register_elastix( + scan, + model.scan, + output_xfm=_output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + downsample=downsample, + downsample_grid=level, + parameters=parameters, + nl=nl + ) + + if bbox : + if init_xfm is not None: + m.xfmconcat([init_xfm.xfm,_output_xfm],output_xfm.xfm) + if symmetric: + m.xfmconcat([init_xfm.xfm_f,_output_xfm_f],output_xfm.xfm_f) + else: + shutil.copyfile(_output_xfm,output_xfm.xfm) + if symmetric: + shutil.copyfile(_output_xfm_f,output_xfm.xfm_f) + + if output_invert: + if nl: + m.xfm_normalize(output_xfm.xfm, model.scan, output_xfm.xfm_inv, step=level, invert=True) + else: + m.xfminvert(output_xfm.xfm, output_xfm.xfm_inv) + + if symmetric: + if nl: + m.xfm_normalize(output_xfm.xfm_f, model.scan, output_xfm.xfm_f_inv, step=level, invert=True) + else: + m.xfminvert(output_xfm.xfm_f, output_xfm.xfm_f_inv) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output_xfm.xfm, + like=model.scan, order=resample_order) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output_xfm.xfm, + aa=resample_aa, order=resample_order, + like=model.scan, baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output_xfm.xfm_f, + like=model.scan, order=resample_order) + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output_xfm.xfm_f, + aa=resample_aa, order=resample_order, + like=model.scan, baa=resample_baa) + + return True + except mincError as e: + print("Exception in elastix_registration:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in elastix_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + + +def non_linear_registration( + sample, + model, + output, + output_sample=None, + output_invert=True, + init_xfm=None, + level=2, + start_level=8, + symmetric=False, + parameters=None, + work_dir=None, + ants=False, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + output_inv_target=None, + flip=False, + downsample=None, + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + + with mincTools() as m: + + #TODO: check more files? + if not m.checkfiles(inputs=[sample.scan], outputs=[output.xfm]): return + + + if symmetric: + # TODO: split up into two jobs? + if not os.path.exists( output.xfm ) or \ + not os.path.exists( output.xfm_f ) : + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + m.tmp('forward_f')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward_f')+'.xfm', + #source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward')+'.xfm',model.scan,output.xfm,step=level) + #TODO: regularize here + m.xfm_normalize(m.tmp('forward_f')+'.xfm',model.scan,output.xfm_f,step=level) + + if output_invert: + if ants: + m.xfm_normalize(m.tmp('forward')+'_inverse.xfm', model.scan, output.xfm_inv, step=level ) + m.xfm_normalize(m.tmp('forward_f')+'_inverse.xfm',model.scan, output.xfm_f_inv, step=level ) + else: + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm_inv, step=level, invert=True) + m.xfm_normalize(m.tmp('forward_f')+'.xfm',model.scan, output.xfm_f_inv, step=level, invert=True) + else: + if not os.path.exists( output.xfm ) : + if flip: + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + else: + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm, step=level) + + if output_invert: + if ants: # ANTS produces forward and invrese + m.xfm_normalize(m.tmp('forward')+'_inverse.xfm', model.scan, output.xfm_inv, step=level ) + else: + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm_inv, step=level, invert=True) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + for (i,j) in enumerate(sample.add): + m.resample_smooth(sample.add[i], output_sample.add[i], + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output.xfm_inv, + aa=resample_aa, + order=resample_order, + like=model.scan, + invert_transform=True, + baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output.xfm_f_inv, + like=model.scan, + invert_transform=True, + order=resample_order) + + for (i,j) in enumerate(sample.add_f): + m.resample_smooth(sample.add_f[i], output_sample.add_f[i], + transform=output.xfm_f_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output.xfm_f_inv, + aa=resample_aa, + order=resample_order, + like=model.scan, + invert_transform=True, + baa=resample_baa ) + + if output_inv_target is not None: + m.resample_smooth(model.scan, output_inv_target.scan, + transform=output.xfm, + like=sample.scan, + order=resample_order, + invert_transform=True) + + for (i,j) in enumerate(output_inv_target.add): + m.resample_smooth(model.add[i], output_inv_target.add[i], + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + if warp_seg: + m.resample_labels(model.seg, output_inv_target.seg, + transform=output.xfm, + aa=resample_aa, + order=resample_order, + like=sample.scan, + invert_transform=True, + baa=resample_baa) + + if symmetric: + m.resample_smooth(model.scan, output_inv_target.scan_f, + transform=output.xfm_f, + like=sample.scan, + invert_transform=True, + order=resample_order) + + for (i,j) in enumerate(output_inv_target.add): + m.resample_smooth(model.add_f[i], output_inv_target.add_f[i], + transform=output.xfm_f, + like=sample.scan, + invert_transform=True, + order=resample_order) + + if warp_seg: + m.resample_labels(model.seg, output_inv_target.seg_f, + transform=output.xfm_f, + aa=resample_aa, + order=resample_order, + like=sample.scan, + invert_transform=True, + baa=resample_baa ) + + except mincError as e: + print("Exception in non_linear_registration:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in non_linear_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/resample.py b/ipl/segment/resample.py new file mode 100644 index 0000000..5c0ef73 --- /dev/null +++ b/ipl/segment/resample.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from .filter import * + + +# scoop parallel execution +from scoop import futures, shared + +def create_fake_mask(in_seg, out_mask, op=None ): + try: + with mincTools() as m : + if op is None : + m.calc([in_seg], 'A[0]>0.5?1:0', out_mask, labels=True) + else : + m.binary_morphology(in_seg, op, out_mask, binarize_threshold=0.5) + except mincError as e: + print("Exception in create_fake_mask:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_fake_mask:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + + +def resample_file(input,output,xfm=None,like=None,order=4,invert_transform=False): + '''resample input file using proveded transformation''' + try: + with mincTools() as m: + m.resample_smooth(input,output,xfm=xfm,like=like,order=order,invert_transform=invert_transform) + except mincError as e: + print("Exception in resample_file:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in resample_file:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def resample_split_segmentations(input, output,xfm=None, like=None, order=4, invert_transform=False, symmetric=False): + '''resample individual segmentations, using parallel execution''' + results=[] + base=input.seg.rsplit('.mnc',1)[0] + for (i,j) in input.seg_split.items(): + if not output.seg_split.has_key(i): + output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) + + results.append(futures.submit( + resample_file,j,output.seg_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform + )) + if symmetric: + base=input.seg_f.rsplit('.mnc',1)[0] + for (i,j) in input.seg_f_split.items(): + if not output.seg_f_split.has_key(i): + output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) + + results.append(futures.submit( + resample_file,j,output.seg_f_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform + )) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + +def warp_rename_seg( sample, model, output, + transform=None, + symmetric=False, + symmetric_flip=False, + lut=None, + flip_lut=None, + resample_order=2, + resample_aa=None, + resample_baa=False, + invert_transform=False, + use_flipped=False, + datatype=None, + create_mask=False, + op_mask=None): + #TODO: should i warp mask if present too? + try: + print("warp_rename_seg sampl={} output={}".format(repr(sample),repr(output))) + with mincTools() as m: + xfm=None + if transform is not None: + xfm=transform.xfm + + if symmetric: + xfm_f=transform.xfm_f + + m.resample_labels(sample.seg, output.seg, + transform=xfm, + aa=resample_aa, + order=resample_order, + remap=lut, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype, + baa=resample_baa) + + if create_mask: + create_fake_mask(output.seg, output.mask, op=op_mask) + elif sample.mask is not None: + m.resample_labels(sample.mask, output.mask, + transform=xfm, + order=resample_order, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype ) + + if symmetric: + + seg_f=sample.seg + + if use_flipped: + seg_f=sample.seg_f + + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + m.resample_labels(seg_f, output.seg_f, + transform=xfm_f, + aa=resample_aa, + order=resample_order, + remap=flip_lut, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype, + baa=resample_baa) + if create_mask: + create_fake_mask(output.seg_f, output.mask_f, op=op_mask) + elif sample.mask_f is not None: + m.resample_labels(sample.mask_f, output.mask_f, + transform=xfm, + order=resample_order, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype ) + except mincError as e: + print("Exception in warp_rename_seg:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in warp_rename_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def warp_sample( sample, + model, + output, + transform=None, + symmetric=False, + symmetric_flip=False, + resample_order=None, + use_flipped=False, + filters=None): + # TODO: add filters here + try: + with mincTools() as m: + xfm=None + xfm_f=None + seg_output=output.seg + seg_output_f=output.seg_f + + if transform is not None: + xfm=transform.xfm + if symmetric: + xfm_f=transform.xfm_f + + output_scan=output.scan + + if filters is not None: + output_scan=m.tmp('sample.mnc') + + m.resample_smooth(sample.scan, output_scan, transform=xfm, like=model.scan, order=resample_order) + + if filters is not None: + # TODO: maybe move it to a separate stage? + # HACK: assuming that segmentation was already warped! + apply_filter(output_scan, output.scan, filters, model=model.scan, input_mask=output.mask, input_labels=seg_output, model_labels=model.seg) + + for (i,j) in enumerate( sample.add ): + output_scan = output.add[i] + if filters is not None: + output_scan=m.tmp('sample_{}.mnc').format(i) + + m.resample_smooth(sample.add[i], output_scan, transform=xfm, like=model.scan, order=resample_order) + + if filters is not None: + # TODO: maybe move it to a separate stage? + # TODO: apply segmentations for seg-based filtering + apply_filter(output_scan, output.add[i], filters, model=model.scan, input_mask=output.mask, input_labels=seg_output, model_labels=model.seg) + + if symmetric: + scan_f=sample.scan + + if use_flipped: + scan_f=sample.scan #TODO: figure out what is it + + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + output_scan_f=output.scan_f + + if filters is not None: + output_scan_f=m.tmp('sample_f.mnc') + + m.resample_smooth(scan_f, output_scan_f, transform=xfm_f, like=model.scan, order=resample_order) + + if filters is not None: + # TODO: maybe move it to a separate stage? + apply_filter(output_scan_f, output.scan_f, filters, model=model.scan, input_mask=output.mask_f, input_labels=seg_output_f, model_labels=model.seg) + + for (i,j) in enumerate( sample.add_f ): + output_scan_f = output.add_f[i] + if filters is not None: + output_scan_f=m.tmp('sample_f_{}.mnc').format(i) + + m.resample_smooth( sample.add_f[i], output_scan_f, transform=xfm_f, like=model.scan, order=resample_order) + + if filters is not None: + apply_filter( output_scan_f, output.add_f[i], filters, model=model.scan, input_mask=output.mask_f, input_labels=seg_output_f, model_labels=model.seg) + + output.mask=None + output.mask_f=None + + except mincError as e: + print("Exception in warp_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in warp_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def warp_model_mask( model, + output, + transform=None, + symmetric=False, + symmetric_flip=False, + resample_order=None): + # TODO: add filters here + try: + with mincTools() as m: + xfm=None + xfm_f=None + + if transform is not None: + xfm=transform.xfm + if symmetric: + xfm_f=transform.xfm_f + + m.resample_labels(model.mask, output.mask, transform=xfm, like=output.scan, invert_transform=True) + + if symmetric: + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + m.resample_labels(model.mask, output.mask_f, transform=xfm_f, like=output.scan_f, invert_transform=True) + + except mincError as e: + print("Exception in warp_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in warp_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + + +def concat_resample(lib_scan, + xfm_lib, + xfm_sample, + output, + model=None, + resample_aa=None, + resample_order=2, + resample_baa=False, + flip=False ): + '''Cocnatenate inv(xfm2) and inv(xfm1) and resample scan''' + try: + + if not os.path.exists(output.seg) or \ + not os.path.exists(output.scan) : + with mincTools() as m: + _model=None + + if model is not None: + _model=model.scan + + full_xfm=None + + if xfm_lib is not None and xfm_sample is not None: + if flip: + m.xfmconcat([ xfm_sample.xfm_f, xfm_lib.xfm_inv ], m.tmp('Full.xfm') ) + else: + m.xfmconcat([ xfm_sample.xfm, xfm_lib.xfm_inv ], m.tmp('Full.xfm') ) + full_xfm=m.tmp('Full.xfm') + elif xfm_lib is not None: + full_xfm=xfm_lib.xfm_inv + elif xfm_sample is not None: + if flip: + full_xfm=xfm_sample.xfm_f + else: + full_xfm=xfm_sample.xfm + + m.resample_labels(lib_scan.seg, output.seg, + transform=full_xfm, + aa=resample_aa, + order=resample_order, + like=_model, + invert_transform=True, + baa=resample_baa ) + + m.resample_smooth(lib_scan.scan, output.scan, + transform=full_xfm, + order=resample_order, + like=_model, + invert_transform=True) + + for (i,j) in enumerate(lib_scan.add): + m.resample_smooth(lib_scan.add[i], output.add[i], + transform=full_xfm, + order=resample_order, + like=_model, + invert_transform=True) + except mincError as e: + print("Exception in concat_resample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in concat_resample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/structures.py b/ipl/segment/structures.py new file mode 100644 index 0000000..160f982 --- /dev/null +++ b/ipl/segment/structures.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# +# data structures used in segmentation package + +import shutil +import os +import sys +import traceback +import json + + +class MriDataset(object): + ''' Scan sample with segmentation and mask''' + def __init__(self, prefix=None, name=None, scan=None, mask=None, seg=None, + scan_f=None, mask_f=None, seg_f=None, protect=False, + add=[], add_n=None, + add_f=[] ): + self.prefix=prefix + self.name=name + self.scan=scan + self.mask=mask + self.seg=seg + self.protect=protect + self.seg_split={} + + self.scan_f = scan_f + self.mask_f = mask_f + self.seg_f = seg_f + self.seg_f_split={} + self.add = add + self.add_f = add_f + + if self.name is None : + if scan is not None: + self.name=os.path.basename(scan).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + else: + if self.prefix is None: + raise("trying to create dataset without name and prefix") + (_h, _name) = tempfile.mkstemp(suffix='.mnc', dir=prefix) + os.close(_h) + self.name=os.path.relpath(_name,prefix) + os.unlink(_name) + + if scan is None: + if self.prefix is not None: + self.scan=self.prefix+os.sep+self.name+'.mnc' + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + self.seg=self.prefix+os.sep+self.name+'_seg.mnc' + self.scan_f=self.prefix+os.sep+self.name+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'_f_mask.mnc' + self.seg_f=self.prefix+os.sep+self.name+'_f_seg.mnc' + + if add_n is not None: + self.add=[self.prefix+os.sep+self.name+'_{}.mnc'.format(i) for i in range(add_n)] + self.add_f=[self.prefix+os.sep+self.name+'_{}_f.mnc'.format(i) for i in range(add_n)] + else: + self.add=[] + self.add_f=[] + #------ + + def __repr__(self): + return "MriDataset(\n prefix=\"{}\",\n name=\"{}\",\n scan=\"{}\",\n scan_f=\"{}\",\n mask=\"{}\",\n mask_f=\"{}\",\n seg=\"{}\",\n seg_f=\"{}\",\n protect={},\n add={},\n add_f={})".\ + format(self.prefix,self.name,self.scan,self.scan_f,self.mask,self.mask_f,self.seg,self.seg_f,repr(self.protect),repr(self.add),repr(self.add_f)) + + def cleanup(self): + if not self.protect: + for i in (self.scan, self.mask, self.seg, self.scan_f, self.mask_f, self.seg_f ): + if i is not None and os.path.exists(i): + os.unlink(i) + + for (i,j) in self.seg_split.items(): + if os.path.exists(j): + os.unlink(j) + + for (i,j) in self.seg_f_split.items(): + if os.path.exists(j): + os.unlink(j) + + for (i,j) in enumerate(self.add): + if os.path.exists(j): + os.unlink(j) + # ------------ + + +class MriTransform(object): + '''Transformation''' + def __init__(self, prefix=None, name=None, xfm=None, protect=False, xfm_f=None, xfm_inv=None, xfm_f_inv=None, nl=False ): + self.prefix=prefix + self.name=name + + self.xfm=xfm + self.grid=None + + self.xfm_f=xfm_f + self.grid_f=None + + self.xfm_inv=xfm_inv + self.grid_inv=None + + self.xfm_f_inv=xfm_f_inv + self.grid_f_inv=None + + self.protect=protect + self.nl=nl + + if name is None and xfm is None: + raise "Undefined name and xfm" + + if name is None and xfm is not None: + self.name=os.path.basename(xfm).rsplit('.xfm',1)[0] + + if self.prefix is None: + self.prefix=os.path.dirname(self.xfm) + + if xfm is None: + if self.prefix is not None: + self.xfm= self.prefix+os.sep+self.name+'.xfm' + self.grid= self.prefix+os.sep+self.name+'_grid_0.mnc' + + self.xfm_f= self.prefix+os.sep+self.name+'_f.xfm' + self.grid_f= self.prefix+os.sep+self.name+'_f_grid_0.mnc' + + self.xfm_inv= self.prefix+os.sep+self.name+'_invert.xfm' + self.grid= self.prefix+os.sep+self.name+'_invert_grid_0.mnc' + + self.xfm_f_inv= self.prefix+os.sep+self.name+'_f_invert.xfm' + self.grid_f_inv= self.prefix+os.sep+self.name+'_f_invert_grid_0.mnc' + + def __repr__(self): + return 'MriTransform(prefix="{}",name="{}")'.\ + format(self.prefix, self.name ) + + def cleanup(self): + if not self.protect: + for i in (self.xfm, self.grid, self.xfm_f, self.grid_f, self.xfm_inv, self.grid_inv, self.xfm_f_inv, self.grid_f_inv ): + if i is not None and os.path.exists(i): + os.unlink(i) + +class MRIEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, MriTransform): + return {'name':obj.name, + 'xfm' :obj.xfm, + 'xfm_f':obj.xfm_f, + 'xfm_inv' :obj.xfm_inv, + 'xfm_f_inv':obj.xfm_f_inv, + 'prefix':obj.prefix + } + elif isinstance(obj, MriDataset): + return {'name':obj.name, + 'scan':obj.scan, + 'mask':obj.mask, + 'scan_f':obj.scan_f, + 'mask_f':obj.mask_f, + 'prefix':obj.prefix, + 'add':obj.add, + 'add_f':obj.add_f + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/train.py b/ipl/segment/train.py new file mode 100644 index 0000000..bebbf49 --- /dev/null +++ b/ipl/segment/train.py @@ -0,0 +1,672 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +from __future__ import print_function + +import shutil +import os +import sys +import csv +import copy + +# MINC stuff +# from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .error_correction import * +from .model import * +from .library import * + + +def inv_dict(d): + return { v:k for (k,v) in d.items() } + + +def generate_library(parameters, output, debug=False,cleanup=False): + '''Actual generation of the segmentation library''' + try: + if debug: print(repr(parameters)) + + # read parameters + reference_model = parameters[ 'reference_model'] + reference_mask = parameters.get( 'reference_mask', None) + reference_model_add = parameters.get( 'reference_model_add', []) + + reference_local_model = parameters.get( 'reference_local_model', None) + reference_local_mask = parameters.get( 'reference_local_mask', None) + + reference_local_model_flip= parameters.get( 'reference_local_model_flip', None) + reference_local_mask_flip = parameters.get( 'reference_local_mask_flip', None) + + library = parameters[ 'library' ] + + work_dir = parameters.get( 'workdir',output+os.sep+'work') + + # should we build symmetric model + build_symmetric = parameters.get( 'build_symmetric',False) + + # should we build symmetric flipped model + build_symmetric_flip = parameters.get( 'build_symmetric_flip',False) + + # lookup table for renaming labels for more compact representation + build_remap = parameters.get( 'build_remap',{}) + + # lookup table for renaming labels for more compact representation, + # when building symmetrized library + build_flip_remap = parameters.get( 'build_flip_remap',{}) + + # lookup table for renaming labels for more compact representation, + # when building symmetrized library + build_unflip_remap = parameters.get( 'build_unflip_remap',{}) + + if not build_unflip_remap and build_flip_remap and build_remap: + build_unflip_remap = create_unflip_remap(build_remap,build_flip_remap) + + # label map + label_map = parameters.get( 'label_map',None) + classes_number = parameters.get( 'classes',2) + # perform filtering as final stage of the library creation + + pre_filters = parameters.get( 'pre_filters', None ) + post_filters = parameters.get( 'post_filters', parameters.get( 'filters', None )) + + # perform denoising as final stage of the library creation + resample_order = parameters.get( 'resample_order',2) + + # use boundary anti-aliasing filter when resampling labels + resample_baa = parameters.get( 'resample_baa',True) + + # extent bounding box to reduce boundary effects + extend_boundary = parameters.get( 'extend_boundary',4) + + # extend maks + #dilate_mask = parameters.get( 'dilate_mask',3) + op_mask = parameters.get( 'op_mask','E[2] D[4]') + + # if linear registration should be performed + do_initial_register = parameters.get( 'initial_register', + parameters.get( 'linear_register', {})) + + if do_initial_register is not None and isinstance(do_initial_register,dict): + initial_register = do_initial_register + do_initial_register = True + else: + initial_register={} + + + inital_reg_type = parameters.get( 'initial_register_type', + parameters.get( 'linear_register_type', + initial_register.get('type','-lsq12'))) + + inital_reg_ants = parameters.get( 'initial_register_ants', + parameters.get( 'linear_register_ants', False)) + + inital_reg_options = parameters.get( 'initial_register_options', + initial_register.get('options',None) ) + + inital_reg_downsample = parameters.get( 'initial_register_downsample', + initial_register.get('downsample',None)) + + inital_reg_use_mask = parameters.get( 'initial_register_use_mask', + initial_register.get('use_mask',False)) + + initial_reg_objective = initial_register.get('objective','-xcorr') + + # perform local linear registration + do_initial_local_register = parameters.get( 'initial_local_register', + parameters.get( 'local_linear_register', {}) ) + if do_initial_local_register is not None and isinstance(do_initial_local_register,dict): + initial_local_register=do_initial_local_register + do_initial_local_register=True + else: + initial_local_register={} + + local_reg_type = parameters.get( 'local_register_type', + initial_local_register.get('type','-lsq12')) + + local_reg_ants = parameters.get( 'local_register_ants', False) + + local_reg_opts = parameters.get( 'local_register_options', + initial_local_register.get('options',None)) + + local_reg_bbox = parameters.get( 'local_register_bbox', + initial_local_register.get('bbox',False )) + + local_reg_downsample = parameters.get( 'local_register_downsample', + initial_local_register.get('downsample',None)) + + local_reg_use_mask = parameters.get( 'local_register_use_mask', + initial_local_register.get('use_mask',True)) + + local_reg_objective = initial_local_register.get('objective','-xcorr') + + # if non-linear registraiton should be performed for library creation + do_nonlinear_register = parameters.get( 'non_linear_register',False) + + # if non-linear registraiton should be performed with ANTS + do_nonlinear_register_ants= parameters.get( 'non_linear_register_ants',False) + nonlinear_register_type = parameters.get( 'non_linear_register_type',None) + if nonlinear_register_type is None: + if do_nonlinear_register_ants: + nonlinear_register_type='ants' + + nlreg_level = parameters.get('non_linear_register_level', 2) + nlreg_start = parameters.get('non_linear_register_start', 16) + nlreg_options = parameters.get('non_linear_register_options', None) + nlreg_downsample = parameters.get('non_linear_register_downsample', None) + + modalities = parameters.get( 'modalities',1 ) - 1 + + create_patch_norm_lib = parameters.get( 'create_patch_norm_lib',False) + patch_norm_lib_pct = parameters.get( 'patch_norm_lib_pct', 0.1 ) + patch_norm_lib_sub = parameters.get( 'patch_norm_lib_sub', 1 ) + patch_norm_lib_patch = parameters.get( 'patch_norm_lib_patch', 2 ) # 5x5x5 patches + + use_fake_masks = parameters.get( 'fake_mask', False ) + + # prepare directories + if not os.path.exists(output): + os.makedirs(output) + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + # 0. go over input samples, prepare variables + input_samples=[] + filtered_samples=[] + lin_xfm=[] + lin_samples=[] + tmp_lin_samples=[] + bbox_lin_xfm=[] + + final_samples=[] + warped_samples=[] + final_transforms=[] + tmp_log_samples=[] + + patch_norm_db = output + os.sep + 'patch_norm.db' + patch_norm_idx = output + os.sep + 'patch_norm.idx' + + # identity xfm + identity_xfm = MriTransform(prefix=work_dir, name='identity' ) + with mincTools() as m: + m.param2xfm(identity_xfm.xfm) + m.param2xfm(identity_xfm.xfm_f) + + # check if library is list, if it is not, assume it's a reference to a csv file + if library is not list: + with open(library,'r') as f: + library=list(csv.reader(f)) + + # setup files + model = MriDataset(scan=reference_model, mask=reference_mask, add=reference_model_add) + + for (j,i) in enumerate(library): + scan=i[0] + seg=i[1] + mask=None + + add=i[2:modalities+2] # additional modalties + + if len(i)>modalities+2 : # assume that the extra file is a subject specific mask + mask = i[modalities+2] + elif use_fake_masks : # create mask from segmentation + mask = work_dir + os.sep + 'fake_mask_' + os.path.basename(scan) + create_fake_mask(seg, mask, op=op_mask) + + sample= MriDataset(scan=scan, seg=seg, mask=mask, protect=True,add=add) + input_samples.append( sample ) + filtered_samples.append( MriDataset( prefix=work_dir, name='flt_'+sample.name, add_n=modalities ) ) + + lin_xfm.append( MriTransform(prefix=work_dir, name='lin_'+sample.name ) ) + bbox_lin_xfm.append( MriTransform(prefix=work_dir, name='lin_bbox_'+sample.name ) ) + lin_samples.append( MriDataset( prefix=work_dir, name='lin_'+sample.name, add_n=modalities ) ) + tmp_lin_samples.append( MriDataset( prefix=work_dir, name='tmp_lin_'+ sample.name, add_n=modalities ) ) + tmp_log_samples.append( MriDataset( prefix=work_dir, name='tmp_log_'+ sample.name ) ) + final_samples.append( MriDataset( prefix=output, name=sample.name, add_n=modalities ) ) + warped_samples.append( MriDataset( prefix=output, name='nl_'+sample.name, add_n=modalities ) ) + final_transforms.append( MriTransform(prefix=output, name='nl_'+sample.name ) ) + + # temp array + results=[] + + if pre_filters is not None: + # apply pre-filtering before other stages + filter_all=[] + + for (j,i) in enumerate(input_samples): + # a HACK? + filtered_samples[j].seg =input_samples[j].seg + filtered_samples[j].mask=input_samples[j].mask + + filter_all.append( futures.submit( + filter_sample, input_samples[j], filtered_samples[j], pre_filters, model=model + )) + + futures.wait(filter_all, return_when=futures.ALL_COMPLETED) + else: + filtered_samples=input_samples + + if build_symmetric: + # need to flip the inputs + flipdir=work_dir+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + flip_all=[] + + labels_datatype='short'# TODO: determine optimal here + #if largest_label>255:labels_datatype='short' + + for (j,i) in enumerate(filtered_samples): + i.scan_f=flipdir+os.sep+os.path.basename(i.scan) + i.add_f=[] + + for (k,j) in enumerate(i.add): + i.add_f.append(flipdir+os.sep+os.path.basename(i.add[k])) + + if i.mask is not None: + i.mask_f=flipdir+os.sep+'mask_'+os.path.basename(i.scan) + else: + i.mask_f=None + + flip_all.append( futures.submit( generate_flip_sample, i, labels_datatype=labels_datatype ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + + # 1. run global linear registration if nedded + if do_initial_register : + for (j,i) in enumerate(filtered_samples): + if inital_reg_type=='elx' or inital_reg_type=='elastix' : + results.append( futures.submit( + elastix_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + parameters=inital_reg_options, + downsample=inital_reg_downsample, + use_mask=inital_reg_use_mask + ) ) + elif inital_reg_type=='ants' or inital_reg_ants: + results.append( futures.submit( + linear_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + linreg=inital_reg_options, + ants=True, + downsample=inital_reg_downsample, + use_mask=inital_reg_use_mask + ) ) + else: + results.append( futures.submit( + linear_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + downsample=inital_reg_downsample, + use_mask=inital_reg_use_mask, + objective=initial_reg_objective + ) ) + # TODO: do we really need to wait for result here? + futures.wait( results, return_when=futures.ALL_COMPLETED ) + # TODO: determine if we need to resample input files here + #lin_samples=input_samples + + # 2. for each part run linear registration, apply flip and do symmetric too + + + # 3. perform local linear registrtion and local intensity normalization if needed + # create a local reference model + local_model=None + local_model_ovl=None + local_model_avg=None + local_model_sd=None + + if reference_local_model is None : + local_model = MriDataset( prefix=output, name='local_model', add_n=modalities ) + local_model_ovl = MriDataset( prefix=output, name='local_model_ovl' ) + local_model_avg = MriDataset( prefix=output, name='local_model_avg', add_n=modalities ) + local_model_sd = MriDataset( prefix=output, name='local_model_sd', add_n=modalities ) + + if not os.path.exists( local_model.scan ): + for (j,i) in enumerate( filtered_samples ): + xfm=None + if do_initial_register: + xfm=lin_xfm[j] + + results.append( futures.submit( + warp_rename_seg, i, model, tmp_lin_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + lut=build_remap, + flip_lut=build_flip_remap, + resample_order=0, + resample_baa=False, + create_mask=use_fake_masks, + op_mask=op_mask + ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + create_local_model(tmp_lin_samples, model, local_model, extend_boundary=extend_boundary, op=op_mask) + + if not os.path.exists(local_model.scan_f) and build_symmetric and build_symmetric_flip: + create_local_model_flip(local_model, model, remap=build_unflip_remap, op=op_mask) + else: + local_model=MriDataset(scan=reference_local_model, mask=reference_local_mask) + + local_model.scan_f=reference_local_model_flip + local_model.mask_f=reference_local_mask_flip + + if do_initial_local_register: + for (j,i) in enumerate(filtered_samples): + init_xfm=None + if do_initial_register: + init_xfm=lin_xfm[j] + + if local_reg_type=='elx' or local_reg_type=='elastix' : + results.append( futures.submit( + elastix_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + parameters=local_reg_opts, + bbox=local_reg_bbox, + downsample=local_reg_downsample, + use_mask=local_reg_use_mask + ) ) + elif local_reg_type=='ants' or local_reg_ants: + results.append( futures.submit( + linear_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + close=True, + ants=True, + bbox=local_reg_bbox, + downsample=local_reg_downsample, + use_mask=local_reg_use_mask + ) ) + else: + if not do_initial_register: + init_xfm=identity_xfm # to avoid strange initialization errors + + results.append( futures.submit( + linear_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + close=True, + bbox=local_reg_bbox, + downsample=local_reg_downsample, + use_mask=local_reg_use_mask, + objective=local_reg_objective + ) ) + + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED ) + else: + bbox_lin_xfm=lin_xfm + + + # create bbox samples + results=[] + for (j, i) in enumerate(filtered_samples): + xfm=None + + if i.mask is None: + final_samples[j].mask=None + final_samples[j].mask_f=None + + if do_initial_local_register or do_initial_register: + xfm=bbox_lin_xfm[j] + # + results.append( futures.submit( + warp_rename_seg, i, local_model, final_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + lut=build_remap, + flip_lut=build_flip_remap, + resample_order=resample_order, + resample_baa=resample_baa, + create_mask=use_fake_masks, + op_mask=op_mask + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + results=[] + for (j, i) in enumerate(filtered_samples): + xfm=None + if do_initial_local_register or do_initial_register: + xfm=bbox_lin_xfm[j] + + results.append( futures.submit( + warp_sample, i, local_model, final_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + resample_order=resample_order, + filters=post_filters, + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + if create_patch_norm_lib: + create_patch_norm_db( final_samples, patch_norm_db, + patch_norm_idx, + pct=patch_norm_lib_pct, + sub=patch_norm_lib_sub, + patch=patch_norm_lib_patch) + results=[] + if do_nonlinear_register: + for (j, i) in enumerate(final_samples): + # TODO: decide what to do with mask + i.mask=None + + if nonlinear_register_type=='elx' or nonlinear_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + nl=True, + downsample=nlreg_downsample + ) ) + elif nonlinear_register_type=='ants' or do_nonlinear_register_ants: + results.append( futures.submit( + non_linear_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + ants=True, + downsample=nlreg_downsample + ) ) + else: + results.append( futures.submit( + non_linear_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + ants=False, + downsample=nlreg_downsample + ) ) + final_samples[j].mask=None + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + + with mincTools() as m: + # a hack, to replace a rough model with a new one + if os.path.exists(local_model.seg): + os.unlink(local_model.seg) + + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + + segs.extend([ i.seg for i in warped_samples ]) + if build_symmetric: segs.extend([ i.seg_f for i in warped_samples ]) + + segs.extend(['--majority', local_model.seg, '--bg', '--overlap', local_model_ovl.scan ] ) + m.command(segs,inputs=[],outputs=[local_model.seg,local_model_ovl.scan]) + + avg=['mincaverage','-float'] + avg.extend([ i.scan for i in warped_samples ]) + if build_symmetric: avg.extend([ i.scan_f for i in warped_samples ]) + avg.extend([local_model_avg.scan, '-sdfile', local_model_sd.scan ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.scan,local_model_sd.scan]) + + for i in range(modalities): + avg=['mincaverage','-float'] + avg.extend([ j.add[i] for j in warped_samples ]) + if build_symmetric: avg.extend([ j.add_f[i] for j in warped_samples ]) + + avg.extend([local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.add[i],local_model_sd.add[i]]) + else: + with mincTools() as m: + # a hack, to replace a rough model with a new one + if os.path.exists(local_model.seg): + os.unlink(local_model.seg) + + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in final_samples ]) + + if build_symmetric: segs.extend([ i.seg_f for i in final_samples ]) + + segs.extend(['--majority', local_model.seg, '--bg','--overlap', local_model_ovl.scan] ) + m.command(segs,inputs=[],outputs=[local_model.seg,local_model_ovl.scan]) + + avg=['mincaverage','-float'] + avg.extend([ i.scan for i in final_samples ]) + if build_symmetric: avg.extend([ i.scan_f for i in final_samples ]) + avg.extend([local_model_avg.scan, '-sdfile', local_model_sd.scan ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.scan,local_model_sd.scan]) + + for i in range(modalities): + avg=['mincaverage','-float'] + avg.extend([ j.add[i] for j in final_samples ]) + if build_symmetric: avg.extend([ j.add_f[i] for j in final_samples ]) + avg.extend([local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.add[i],local_model_sd.add[i]]) + + # number of classes including bg + #classes_number=2 + ## 6. create training library description + #with mincTools() as m: + #classes_number=int(m.execute_w_output(['mincstats', '-q', '-max',local_model.seg ]).rstrip("\n"))+1 + + library_description={} + # library models + library_description['model'] = model.scan + library_description['model_mask'] = model.mask + library_description['model_add'] = model.add + + library_description['local_model'] = local_model.scan + library_description['local_model_add'] = local_model.add + library_description['local_model_mask'] = local_model.mask + library_description['local_model_seg'] = local_model.seg + library_description['local_model_avg'] = local_model_avg.scan + library_description['local_model_ovl'] = local_model_ovl.scan + library_description['local_model_sd'] = local_model_sd.scan + + # library parameters + library_description['map']=inv_dict(dict(build_remap)) + library_description['classes_number']=classes_number + library_description['nl_samples_avail']=do_nonlinear_register + library_description['modalities']=modalities+1 + + largest_label=max(library_description['map'].values(), key=lambda p: int(p)) + library_description['seg_datatype']='short' + + if largest_label<=255:library_description['seg_datatype']='byte' + + library_description['gco_energy']=output+os.sep+'gco_energy.csv' + estimate_gco_energy(final_samples, library_description['gco_energy'], classes=classes_number) + library_description['label_map'] = label_map + + if build_symmetric and build_symmetric_flip: + library_description['local_model_flip'] =local_model.scan_f + library_description['local_model_add_flip'] =local_model.add_f + library_description['local_model_mask_flip']=local_model.mask_f + library_description['local_model_seg_flip'] =local_model.seg_f + library_description['flip_map']=inv_dict(dict(build_flip_remap)) + else: + library_description['local_model_flip']=None + library_description['local_model_add_flip']=[] + library_description['local_model_mask_flip']=None + library_description['flip_map']={} + + library_description['library']=[] + + for (j, i) in enumerate(final_samples): + ss=[i.scan, i.seg ] + ss.extend(i.add) + + if do_nonlinear_register: + ss.extend( [ final_transforms[j].xfm, final_transforms[j].xfm_inv, warped_samples[j].scan, warped_samples[j].seg ]) + + library_description['library'].append(ss) + + if build_symmetric: + ss=[i.scan_f, i.seg_f ] + ss.extend(i.add_f) + + if do_nonlinear_register: + ss.extend( [ final_transforms[j].xfm_f, final_transforms[j].xfm_f_inv, warped_samples[j].scan_f, warped_samples[j].seg_f ]) + + library_description['library'].append(ss) + + save_library_info( library_description, output) + # cleanup + if cleanup: + shutil.rmtree(work_dir) + + except mincError as e: + print("Exception in generate_library:{}".format(str(e)),file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in generate_library:{}".format(sys.exc_info()[0]),file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise + + +def estimate_gco_energy(samples,output,classes=2): + with mincTools() as m: + files=[f.seg for f in samples] + cmd=['label_interaction_estimate'] + cmd.extend(files) + cmd.append(output) + cmd.extend(['--classes', str(classes)]) + m.command(cmd,inputs=files,outputs=[output]) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/train_ec.py b/ipl/segment/train_ec.py new file mode 100644 index 0000000..153ef53 --- /dev/null +++ b/ipl/segment/train_ec.py @@ -0,0 +1,398 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import random +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures + +from .structures import * +from .fuse import * +from .train import * +from .filter import * +from .error_correction import * + + +def train_ec_loo( segmentation_library, + segmentation_parameters=None, + ec_parameters=None, + debug=False, + fuse_variant='fuse', + regularize_variant='gc', + ec_variant='ec', + cleanup=False, + ext=False, + train_list=None): + '''Train error correction using leave-one-out cross-validation''' + # for each N subjects run segmentation and compare + + try: + ec_variant = ec_parameters.get( 'variant' , ec_variant) + work_dir = ec_parameters.get( 'work_dir' , segmentation_library['prefix'] + os.sep + fuse_variant ) + ec_output = ec_parameters.get( 'output' , work_dir + os.sep + ec_variant + '.pickle' ) + + ec_border_mask = ec_parameters.get( 'border_mask' , True ) + ec_border_mask_width = ec_parameters.get( 'border_mask_width' , 3 ) + ec_antialias_labels = ec_parameters.get( 'antialias_labels' , True ) + ec_blur_labels = ec_parameters.get( 'blur_labels', 1.0 ) + ec_expit_labels = ec_parameters.get( 'expit_labels', 1.0 ) + ec_normalize_labels = ec_parameters.get( 'normalize_labels', True ) + ec_use_raw = ec_parameters.get( 'use_raw', False ) + ec_split = ec_parameters.get( 'split', None ) + + ec_train_rounds = ec_parameters.get( 'train_rounds', -1 ) + ec_train_cv = ec_parameters.get( 'train_cv', 1 ) + ec_sample_pick_strategy = ec_parameters.get( 'train_pick', 'random' ) + ec_max_samples = ec_parameters.get( 'max_samples', -1 ) + modalities = ec_parameters.get( 'train_modalities', segmentation_library.get('modalities',1) ) - 1 + + print("\n\n") + print("EC modalities:{}".format(modalities)) + print("train_list={}".format(repr(train_list))) + print("ext={}".format(repr(ext))) + print("\n\n") + + try: + if not os.path.exists(work_dir): + os.makedirs(work_dir) + except: + pass + + if (train_list is not None) and not isinstance(train_list, list): + print(repr(train_list)) + with open(train_list,'r') as f: + train_list=list(csv.reader(f)) + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + # setup parameters to stop early + local_model_mask=segmentation_library['local_model_mask'] + + # disable EC options if present + segmentation_parameters['ec_options']=None + + ec_train=[] + ec_train_file = work_dir+os.sep+'train_ec_'+ec_variant+'.json' + #ec_train_library = segmentation_library['library'] + ec_work_dirs=[] + + + if not os.path.exists( ec_train_file ): + results=[] + + _train_list=[] + # if we have pre-segmented scans, then we should pre-process training library again (!) and train on pre-segmented scans + if ext and train_list : + results2=[] + + for (i,j) in enumerate( train_list ): + n=os.path.basename( j[0] ).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + output_pre_seg =work_dir+os.sep+'pre_'+n + ec_work_dir = work_dir+os.sep+'work_pre_'+n + + + #TODO: find out how to select appropriate segmentation + train_sample = j[0] + train_segment = j[1] + train_add=[] + + train_presegment = None + + train_presegment = j[2] + train_add = j[ 3: 3 + modalities ] + + experiment_segmentation_library = copy.deepcopy(segmentation_library) + print("Running pre-processing on {} - {}".format(train_sample,train_presegment)) + + results2.append( futures.submit( + fusion_segment, + train_sample, + experiment_segmentation_library, + work_dir+os.sep+n, + parameters=segmentation_parameters, + debug=True, + work_dir=ec_work_dir, + ec_variant='noec', + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=train_add, + cleanup=cleanup, + presegment=train_presegment, + preprocess_only=True + )) + ### + print("waiting for {} jobs".format(len(results2))) + futures.wait(results2, return_when=futures.ALL_COMPLETED) + print("Finished!") + #train_list=range() + + # now pre-fill training library with freshly pre-processed samples + for (_i,_j) in enumerate(results2): + print("{} - done ".format(_j.result()[1]['bbox_sample'].seg)) + # raise("Not FINISHED!") + sample_id=os.path.basename(train_list[_i][0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + # include into the training list + train_list_i=[ i for i,j in enumerate(segmentation_library['library']) if j[0].find(sample_id)>=0 ] + # the output should be either one or two samples, if symmetrized version is used + + if len(train_list_i)==1: + # we have a single match! + match=segmentation_library['library'][train_list_i[0]] + + train=match[0:2] + train.append(_j.result()[1]['bbox_sample'].seg) + train.extend(match[2:len(match)]) + _train_list.append(train) + elif len(train_list_i)==2: + # we have left and right samples + # we assume that straight is first and flipped is second + + match=segmentation_library['library'][train_list_i[0]] + + train=match[0:2] + train.append(_j.result()[1]['bbox_sample'].seg) + train.extend(match[2:len(match)]) + _train_list.append(train) + + # flipped version + match=segmentation_library['library'][train_list_i[1]] + + train=match[0:2] + train.append(_j.result()[1]['bbox_sample'].seg_f) + train.extend(match[2:len(match)]) + _train_list.append(train) + else: + raise "Unexpected number of matches encountered!" + + else: + _train_list=segmentation_library['library'] + + + + segmentation_parameters['run_in_bbox']=True + if ec_train_cv == 1 : + print("_train_list={}".format(repr(_train_list))) + if ec_train_rounds > 0 and \ + ec_train_rounds < len( _train_list ): + + if ec_sample_pick_strategy=='random' and ec_max_samples>0: + ec_train_library=random.sample(_train_list,ec_max_samples) + else: + ec_train_library=_train_list[0:ec_max_samples] + else: + ec_train_library=_train_list + + for (_i, _j) in enumerate( ec_train_library ): + n=os.path.basename( _j[0] ).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + output_loo_seg=work_dir+os.sep+n + ec_work_dir=work_dir+os.sep+'work_ec_'+n + + #TODO: find out how to select appropriate segmentation + train_sample=_j[0] + train_segment=_j[1] + train_add=[] + + train_presegment=None + + if ext: + train_presegment=_j[2] + train_add=_j[3:3+modalities] + else: + train_add=_j[2:2+modalities] + + experiment_segmentation_library = copy.deepcopy(segmentation_library) + # remove sample + experiment_segmentation_library['library'] = [ i for i in segmentation_library['library'] if i[0].find(n)<0 ] + + results.append( futures.submit( + fusion_segment, + train_sample, + experiment_segmentation_library, + work_dir+os.sep+n, + parameters=segmentation_parameters, + debug=debug, + work_dir=ec_work_dir, + ec_variant='noec', + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=train_add, + cleanup=cleanup, + presegment=train_presegment + )) + + ec_work_dirs.append(ec_work_dir) + else: + validation_library_idx=range(len(_train_list)) + ec_train_library=[] + for i in range( ec_train_rounds ): + ran_file = work_dir + os.sep + ('random_{}_{}.json'.format(ec_variant,i)) + if not os.path.exists( ran_file ): + rem_list=random.sample( validation_library_idx, ec_train_cv ) + with open( ran_file,'w') as f: + json.dump(rem_list,f) + else: + with open( ran_file,'r') as f: + rem_list=json.load(f) + + # ec_sample_pick_strategy=='random' + + # list of subjects + rem_items=[ _train_list[j] for j in rem_list ] + + rem_n=[os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] for j in rem_items] + rem_lib=[] + + for j in rem_n: + rem_lib.extend( [ k for (k,t) in enumerate( _train_list ) if t[0].find(j)>=0 ] ) + + if debug: print(repr(rem_lib)) + rem_lib=set(rem_lib) + #prepare exclusion list + experiment_segmentation_library=copy.deepcopy(segmentation_library) + + experiment_segmentation_library[ 'library' ]=\ + [ k for j,k in enumerate( segmentation_library[ 'library' ] ) if j not in rem_lib ] + + for j,k in enumerate( rem_items ): + + output_experiment=work_dir+os.sep+'{}_{}_{}'.format(i,rem_n[j],'ec') + ec_work_dir=work_dir+os.sep+'work_{}_{}_{}'.format(i,rem_n[j],fuse_variant) + + # ??? + sample=[k[0],k[1]] + presegment=None + if ext: + presegment=k[2] + sample.extend(k[3:3+modalities]) + else: + sample.extend(k[2:2+modalities]) + + ec_train_library.append(sample) + + results.append( futures.submit( + fusion_segment, + k[0], + experiment_segmentation_library, + output_experiment, + parameters=segmentation_parameters, + debug=debug, + work_dir=ec_work_dir, + ec_variant='noec', + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=k[2:2+modalities], + cleanup=cleanup, + presegment=presegment + )) + ec_work_dirs.append(ec_work_dir) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + results2=[] + results3=[] + + for (i,j) in enumerate( ec_train_library ): + train_sample=j[0] + train_segment=j[1] + train_add=j[2:2+modalities] + train_mask=local_model_mask + auto_segment=results[i].result()[0] + + # TODO: use the subject-specific mask somehow? + if ec_border_mask: + train_mask=auto_segment.rsplit( '.mnc',1 )[0] + '_' + ec_variant+'_train_mask.mnc' + results2.append( + futures.submit( make_border_mask, + auto_segment, + train_mask, + width=ec_border_mask_width, + labels=experiment_segmentation_library[ 'classes_number' ] + ) ) + + # need to split up multilabel segmentation for training + if experiment_segmentation_library[ 'classes_number' ]>2 and ( not ec_use_raw ) : + print("Splitting into individual files: class_number={} use_raw={}".format(experiment_segmentation_library[ 'classes_number' ],ec_use_raw)) + labels_prefix=auto_segment.rsplit('.mnc', 1)[0] + + results3.append( futures.submit( split_labels, auto_segment, + experiment_segmentation_library['classes_number'], + labels_prefix, + antialias=ec_antialias_labels, + blur=ec_blur_labels, + expit=ec_expit_labels, + normalize=ec_normalize_labels ) ) + + ec_input=[ train_sample ] + ec_input.extend(train_add) + + ec_input.extend(['{}_{:02d}.mnc'.format(labels_prefix,i) for i in range(experiment_segmentation_library['classes_number']) ]) + ec_input.extend([ auto_segment, train_mask, train_segment ]) + ec_train.append( ec_input ) + + else : # binary label + ec_input=[ train_sample ] + ec_input.extend(train_add) + ec_input.extend([ auto_segment, auto_segment, train_mask, train_segment ]) + ec_train.append( ec_input ) + + if ec_border_mask: + futures.wait(results2, return_when=futures.ALL_COMPLETED) + + if experiment_segmentation_library['classes_number']>2 : + futures.wait(results3, return_when=futures.ALL_COMPLETED) + + # TODO run Error correction here + with open(ec_train_file ,'w') as f: + json.dump(ec_train, f ,indent=1) + else: + with open(ec_train_file,'r') as r: + ec_train=json.load(r) + + if ec_split is None : + if not os.path.exists( ec_output ) : + errorCorrectionTrain( ec_train, ec_output , + parameters=ec_parameters, debug=debug, + multilabel=segmentation_library[ 'classes_number' ] ) + else: + results=[] + for s in range(ec_split): + + out=ec_output.rsplit('.pickle',1)[0] + '_' + str(s) + '.pickle' + + if not os.path.exists(out): + results.append( futures.submit( + errorCorrectionTrain, ec_train, out , + parameters=ec_parameters, debug=debug, partition=ec_split, part=s, + multilabel=segmentation_library[ 'classes_number' ] ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + # TODO: cleanup not-needed files here! + if cleanup: + for i in ec_work_dirs: + shutil.rmtree(i) + except mincError as e: + print("Exception in train_ec_loo:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in train_ec_loo:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/temp_files.py b/ipl/temp_files.py new file mode 100644 index 0000000..e69de29 diff --git a/ipl/test.jpg b/ipl/test.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab7134fa11c9300c8bafeb25d6bedac4b618f8d0 GIT binary patch literal 120188 zcmcG#2UJth)-D|PDM5M(J+yE)ddmOCJ9mtG?i(+Toq@Y#ui3u&t-0np9X*`}oxiK9p$Z})0f9(> zf1uL|kP3*Dxb|Uh&U{mzi(!@(bP;zI*?n>`Qq?WmR=e?bp_} z_6|&ES9kxw;Lz~M=-BwoEN<@i{KDeW^5)j|&hFkm;o$I0E)o#kc;%p z29PtkNJ#yF1I$21&Lu)|K}i>4>2Z-;G>no_IXSnqg$gF7hi9_#{BiCQkNC{h%`?&d zNcMjwSor^yWd9QE-*SzEXuu?Z@xTlqIOyc~O)MYiP3-aBNhy39mp!JkCwTHM$o7bj zu)pU^s3~4SQ`LQ`8r+4`j5=)C@=uPMJTA-_kx3I}Q}bb#Mg=wF2}*J6*0p?Kft zt6Ht@C&`WS$muVwO~*9_z`3YZy(z!F+k`#DDJaK0s_DQ>5&D7A@Hg`KPVNz-Jrv*B z#Lm1B=<`P=|*_hbkM;-5r6h;Pt= z)CO0KiVA@TvzqMM%8#5v+Oj$7>o2x9MN-oQyc-dP-yrQd;eXR4SbPeaUfCG^_0>^u z18$8?Os>~1yW=9(S_5Ih97hI!0H1;&4k7Jan$WR}xL+hKr=XbsAzwJMW$YAGs+sih zi2FF8YZ3k>NES;paDd}|k*j)C56;eZO2Ib;{yAoa*yGQDzT8#~Mlq`~n;Tg;;#D;J z+9}8*lm3|f^$`;|kzk{pXeQS2@=RHMkmX>e15@Pmn$T#L&;L;R`%}=Y0R3p+Dd>Cl zao{Pa#V-ne;G+nAuWy(X@)cfb-+Brn@Ih8va)?7n>iVlCi^jasZns#ST*TToO$t_p zY>^(;LN6+}?=$yQ_NtDt-cZ^rG0!25<=>v#1D=gYfZIYx>-OGkCa&h+_X-V+L%uS~6N`xP?1OBFbM{NL-!z za~YkD_l2(7a@uK+fpKHwB=Ai^{JKBpL^Ze_6jJcRJcIu4>oi`Ug35R?xd~1Gu#h8i zRscKt>E`wkcTfO0LF?truT#)IOAh_uGXGW^pysjcQlmRb#LjGv&lq4(7pCa8PeJLK z0mzoorqZV`#*Vm-{iQ)+C)qQ-N=v68kq@8~%{NT~YRBx)k1m~OC9+(^4bvlmVdA!l zWBGuaxUK46_B$5k#eA|a*|U#Ao<+{TVxh?@#~o<{PYm`r1#$kBJPFw{4lPw;IR!=D zA?#vIm;T1cA1$U&ZC-l5s(lJl1$32o7>NKJLiH(kgSr5iWLyf>@7}mn0qJ05K55M-n+(*v~f!` zC;Ww^jObB{vWQ;WM$h7 zFN0%>)u8{3M<8(Wju=08pG90Ra{2Pi#vf381}Fvo2NZYCK+y*y^GEHPryvFgVmoxl zp$Wj2zxf7GA#k_(-z#jVpbqY!KwQZgCSd@eJ_SXWBdtQ!;blSq4Acz#Pj5^7<603B z0EnL<>>t=yJHAtRbTOf+OcK-h@8JM8`o{~Hfg~k_DLKCM7rp>10FVdvH~}CJH*zG@ zI(1x=LqBwmf2#q&bm(aAb`{{bJMN;|K>^le_Sen3@B@Jn)~YIjob$C1S|cCr75T_ldF?cMytF0Lc~!7f6m>dLEzZk@3U zEWj>|Ow4)zvWq)^*+m$-oaB(_3)2%dp^G)ql|u zRkc+hG@*Mmf3qwQ;n}NN6n`Spn%IK=XPN;Jp_NECcoLJS{SRpQla>Gkd4F^PoeenN zzwAbGmt~O>!cgZ$WlllpbXHf3;v8IYe*kXjL3e@; zC;E|agX%g`cZ#vn*Yk0`ZxUu+R-rFC#5%PD&bCMNCRsNBA{m)#v`OCab}!GRmTWG? z#=3kBngmY|ARkWtR5Mci>6S#3%>B7YR`oPe_e*NbFy7zx9iD;1^j{}fztatmDE{EJ zxrh<>Ti0S%VASfygeu?u@s@EbV9TR$T01)6epT%5*y{SV+tp2xw-L>#8~$96gsEB% zDEF6x3o->6upxbL+6*`Pi6E^3!Hxr4Dr9u0)rV?2H6;|uA!>oQqb;T2C5D;uQsBuW^ynlCd zil3wura@u7d!9cs#$}Xgnmb~IWzm0s0spq$g|EUliT&%nHP@zlvoA?+>+%@#GTV*g zq*!prCr|45lUUNA_ijq9VHXUz?2>H~v%oDpk^q`MRFR7h%i5hI^~ws+Aye~J#bMqI z|1OUgpyCgu=Hq3Gsp{#}?F55M0*3qJD#9=GpaV|sgOIn8c$cQ*+>IH`2?{N83L5-q zP*UqO<3^B}BI@kpal{U7fHp2a{Al=79Avl+q;+zP)L(BzfZ_P0MJen_?D#FD_Rw)N zvc+QyK3zIh&H(|gI7oHjh<99AV?hV*p&Gv!NzfWAYg+enNqR%H@~yMKW+v2$RpZ_& z&gXCG3+YH+Q|bhZgQ6wgb518@*~=wvzUMMR!v$YZ2nYw*_XXgJcm(o4q)YLS+HlB! zS;G(v3KFB%D`Z6k<8%G&4}8xf=?ZtC@gQA?Hc|3Brq=J0LLmo-!_U&wUk|mQCb=od z!o;#)fu&C5Yxj%dp);4`gzlO?k)Y^@B{$l!F+_BB!WT+DRdTRY4Xm#V+zO;H)QOay zmbMc1q%uA78h|E{jQ0MJqo1WeZz|uO5HDP%HaeonlpvP%&K?FX9Ldbwp6RHenHBT5 z%8ema@kq18s#IlJR|GPuY2mBFETeQ9xuw)5u2x20C}ANj&aM)d3nVL+3cUfbezhL$h9)C^7uJ?b?T$q1r?8AOSXg>HItXjoxffg$blzV z;KsMK$4WYq9L=2uHdc#=`sy`ksyz$2)X7yCUN@tG8)xHkAE6DaykO)kbOBxiE4rNq zs?sdI2$tEBC*zNE^Cp9%9#jT8rJsTxF~-F=hAxc6z6~luJ;Zj8ffAZdLhg&Irv}(Y z*wBQe*P}j#rH7HZD}9o?P>=FT5;@_w>hc|HYfLWL%AsG3AktQyg3{!V0k16uQ0XxJ z$(JmbHUOo`OOks}vB>ida?m5D@+ZXX zj|Tvg4WC^gLZL@W)Aqz0w2;K3^ztX514oTJJmFp#j+fZGi2ENiSa4yBUh0fYldSXd z@=YCS!bb_eoxnDJ6j33^cGk{lPl?(wEeRwjy;Ha%10^Uq(MU943Q zs~Mw=JX3eaBog)jrzM9NZN@?D&}Pl$ajS@;O`N>#7MnwMeXo=mh9h_dDZN zRc)f%CtNw29?K79c}krTuZ^E<#2JzVsTzmiB}zPWy&@EVJTKL|Gs zDvZg7Al`t?jwOTiPX_i_goC$Z6Bh0adJ~>j8HmB`=N`Se0ns+M9&UKN&@shOL7g_l zjc2dp&E$dFUVdfEfimlz!AM~WT-#>_?c4FDrpr?_xm8Y)5Wub~pYk^cb6D86F_6v6GeWU)Bs?YWOk|bVTVh5@IjHE=H7=Ll0NZ+gqU4Gf z5%1Nw=aivW7sfUlKN-~eZfOFVexv2O1uF~OI|OQ8a4WCdQ)VB#v~j4z75WkCf(L3sX)lDN;v=lC^b>dq0i_T znhRB2rDx1RIa`d8E%dPk_`dx@E~_F_WGG$;}_bkz>PsqE+xupQP7D72h3+Ga)zsJJn?>Uhp#Z) zlhS%O4|zwL*Vk(VwwXhAK}O15b=nIpwNr%m6f@nIi{u!UU-A9%dM(eN&M1fHXf_m{ z3runU-^ue9zUr9O?G(fpNB{`3H7^Redt>P1dM1efuiY*Iv(BhBitYeJ1TYyMG*K;C zI{ZTnMRb9bn|PA5FL~T^g&w-k?=VQRHDDoO!oqQa#O@S|fpTCLxSW;YBx{nf?F5R; z;!iKsD^M+ngFc8=XIc0P#_CL3gvy>=KbLO$8TKP|MO~?xRT18f(Ot?q1!eVty4%9P z_lnZvG%L~=r*S3XTo0O=r8)xjB3Y|ka82;xP|_aA&!1tPSf%^=s^ivro?3K)4=N+w zL7?@OrHvK!2>agnh(4Ev(Gxz9p5nL95O!Lwhap<(iypDQ+tml5pa2ggu)p zmU6z%4k;GKjpy@MmB`gVA#YOcUw)-wuaV*b!0ULCM0d09`Z$|soo$K(KF)V=fPU^;FF=)OnOZ;|YBPRwzS3_bD?!2Ew^ z0~}yU@`RI|33uqGj)OdM^$#m2gp1aeStof?0s`&oeLs)7N%8QxyJe=8B)r~AV>)}A z=wBEm>ikHUju?@GSWzUeoDa^$l#au(MLr={2qv>G20wjRKbmuoR#Rv&T6zZF^t-qU z;jY~PDyP%;N8(E8V|*s4pAC#$;Qj;`LC{UgCi z`*T^Jz-#4O+x`Nov&KeYdOadF-!9oL8d`EG*WBfO;XouOI68Um)fTkR7s#orCEMLC zwTwtn>y9|Vq3H`;#}RqWk0JP?;=(+H1gpQRS-*MN6QXm<2Q-ykC&!D2oTyz+TfKFwmcO zKsPCZBZHB-)sHbB9HtmbDukHEoN8@4VB~QfnO5GFRkBC~`=D6TuSeI*ZG_lYWR>wh zkF(F01j+I^cMxyY_#{(VL3l{I*1cA5JbzYx1f_TIeJnlX`o=e7nIExMH03Yst|^T9 z)}5r~u}zYI8O*}YfpKl7MNlA^=52{yQiSm+Ts$J(YM)ruMZP5lLxxTJ%v&S8ZZxBf)kqHHoX$> zge%U=C*1I>_IXxpvAtGRKiWG_`SjAV2{?yrFNfBg9Z}@GYF)WW-}>%Z&7`qxOjr`= zV?Eo+YIr_FSh!e%N4J>+lt#|1fUmFUN-xn*pfjHPz(IxMlIl2Nuft;`w&CbD!Jfzz zFoC%0qRZp8_x81(=oTjFTa6-xd`)xjx>tg9@zkEjCVfjH6v3;Dmu&wiC?m%4ihF6~ zvY)1_ZraVUkRJlCbi(^BnLMT-si+2$+b=W;WJ3? znbOzLf?>JhyH@oovxQ$9v}cHKu93g}T+O56E391g{iurX?s~I49pxdLc-v5dbk;Kl zRFcv(+tKECCr6uCS37r!K1sLZI_Gwmyuyub1N%3GEONDp0L_C340BG+DIG!o)XsONXJF>x|l*m6m;>ZNhU7>-66$8@znUR6 z2tIhsz4}yTb^GBABTo0>#G9-6ZdFYu`yUDeY(oAZ>Q zguKL###n$pdp{;*t8KpCkDJh{iR^Z=37ZQ`bt>MQ9Ody3wI_=oy(wcA$uKmEN5+pF z7DiVv?;JhNWU8#vkb9dXeE-wRAV?yd{$3V2n7exC2iMEFD{tRa7ANPmS+I@`K)k}U zB*DBTjWP)%vyd3K;zog$D?P)GY)vjrmBQ+tkz(FG^%q|km>E`4VIG*c*cUEeaV?=J znq}r1^r6-iD=aE_YmuRJ<3a@QZ&Hb%X&>EXrNDC|_VRe#*G#)~|G?RjwI%Z3@vrV* zDf;-#;KI$-6lENHnO9S8dtgTt&W(eBR35B1@t>)F(3*LVR9&Z)Lo($XV~9miEoz;G zbNZR>#7OI_c1dH#fYAEawwdcJZ!!jd1?PHjb^2P+4J7yBSmq)|mfQoZUXY z_u9kF*+SQ~Dp^Lnt+qCu)vNp|5A=>ag}bcL-F<0uR>v#a(LTEP4a}t?7jISZqmG(i zdkDzHWBEG;B1-ivs4Aq-$ zy5fxb#p}j)>>5KSe?59rew8a$T#&)dCy8Q#mt|e$6!ZlC?8+OX*5af+-HVuRB4A7@f<>romaboP*7s5ZZQD+=|C2H#lWTO>*rRRD>{2XIGHt1Dly7?i*R>l|3osWn%B;#}~# zV_wyo28*1Rk8y4<&s#|PYm}-|K$z_B5vU7BO^u8R3CB~$>%Iw+D$M@OVbw|Pub6ki z=;ql5+4e)85e2rYy1q4@Bvt)z=I6ZZT2t1;#R%U{OhNnA!Jx3M=gcs)*aOd1e;{`d zK?q-^tHtRB`6M8lmvoHCiYj-G>@|Bhg|?M zq9WORSXx$kas6=-YWWTx1mA;4*3WD*#ji4gBVi~TYj|RjT~)-6+eH&I?O9nPKP@s< z$>`+Yku&b}iW>VXhTd>m$McWhmb`s(2Mw*4NYJ8#Y!|0E@wwV-E`eVvcCto(4L${3 zQIU)4fP;d#Z6aD5*}ojgeRTllBLayRIk_s=92HE%DU`^(u!XQsD>56cm&|FL@Jb&S zV}kat+ZE(xfPSPa=(k-A6-jehyi5CykD}pYdu>HVgxuHe%T7f~zFjcB8}-(5!9TFP z$4=PYA}Iv+-mBJ_8YL^DCvBeJ_r6^g?Bnu*i)!(R!kLq*=yJz;muXF2Bg<2eaWtK+ ztft7gbbK1PL!z2B$wzm=qGK(~nqAc&I`bGet-hNG4pOYf?>x|COzF6A=rFPG>TZ02 zazTJ54t3rrtZ#FEis_gPO}=?vQv_ygcAF>kb8l}>t`sOq6|rjYVNTq$fv!&jBIRsX zvC~A@)W$T`e0t7VX~>h{iA;BN7+`TxZzeGfOT-=y@DO7Fs{7w0ZZMHL8CV%@u{s4g zC?Xdv`Vd%iLczbM#k1kav$`Y;q%d0i;z-G{-uNw0P}u@cQpmn;tot^FB3U|A0&-Ui zB%nFB(>y(pZW`L+5tgDBlog^6iXe|3r(D)xzux=c+&xoV@+oNLZPYmpTUbgkzgshr zrgqa7_p#fuq6*dJ zirXIAUrkn;wDIUr2@Gb+_YQJ>0|1tIeXeBxEk5Vp(Wu|v3QSt9A z{f`>mb_k7#-X2%*>*`K<^$&}g{fLfWiX(t>*s7E{Fe(XlPV7s%fMK**^tIRNA6!)5 z>^|TTZA zE>B>!_zfl?{Gmt|jYx-f5EJa((r{eP7pb4-GrXa4o>ihj3QZN8V3U@4)8#92o{z8> zrc;djajajTNi%G>tRz3y#A$<`C|tB{z>bR_FwW)<>bp)p z)3}eU_&IBLvr*2_X4|*d63IO>nuA2v)gG6(hSr!`xEHczF3KUNm*k+I;t;pv9o-8) z&1Z6#gvEEa63n#8d~w5{O>HbgAyODGBC<%7OhBC^GNiD=+0psC6{>B2*=urG%g^Qm zPX1vay_S>7UFlMlYIJ>e(SKd|Cjq9qT_jD@rA_6WKNAV>#Gxm5{vBU9rc+Q9hS(Pv zb2bx}TYzr=VaMI%Wa0S>Q%!Eek|G7>BNUvIASM{=6&eg@sx->wgLpZ*)dUl zX~*Js~PP66+H;bnRc0oK<5+F3L`@2)_5JdfC^PIs6(3d zA_MO?1~vrO6VVEWLJjdR7_CO?h zhl_=0A-4sF>LYzNx!}IARrHGVZLc7Taufm?FAYy)O~b46B!r@W{iOeGe5ga(F0zb# z3Dm0-|6@twe_OBSdlP!@?D$i+2C5aUIJkorSkljB#Bxar9iS}b$jd6hPM>%qckBEG ztulqJGk-N_!z?DA(f!U}_W11XM=Jf|*bKi#Yi>la%^`%{bou1Ez9wKxIc;iuoi9Oy zh4ndPemyWtKhpux)W(|4fKYirFTYq|rD!F_@aqBi z^^nJFU*vXhOpC@gnaGF=`3pRT^|+mqQvptco5xykD2T3SQVbUY2E`p@ytlq~AVwg^IL{=Le#lz6IS! z27YkbGG9hr@o~H9-uC-*j;nye>&9{}d0IFdBGHHtQB3Q)MRfd~MIK+AF9Zu^^2kc< zV%IqSI1~axa3iTr6IqtgjGr@B$5m@>0$!uMT6u1QS%$P8zT)KLNg{E6k5h2MPpI_Y z5@K{5PiQKfY>4~C-6NPc+50NgUGh9rXeY~ZNctM_#-?cz<9J8n%zP|mfu)O+&HHo4 zCgfLjlA65zV5wPs9d%I&$C_SwllPIX_$8nKZ}aGCWxsQLS#GQ?)FFnGRBBG;2K#bo za-B2}LsJPMV1Mrgfj97j1IM}YgkV`s(d8Alb2*iRvy)~1NQFz+C@7j)dQ!5E3B^v+ z&!e3ixgQ1H2Ff9A-~2Dk94lmoWVbpeXsm;CIr*eVQH&|VCcOQ{A=pMSH}aLtg_dj% z7Y)0*NGGRvSI1LT87c08M0=WqX`ivG!&1|9w<>n?hyApjL>YMQ>HY}qwD4zN?v0)? zo3%TL4Hu=?-n!#ERbCZ3)zWl+y@cD{a&SJw=FVe1#ZyrIgOx5`sSQ&ORHu+1$EN}% z%;TuThb*`3kgDeYp)B&hUkWA%GD15Kvc($daF6B`)UJ|)OZ;0Mb^2SRQiwXSHWsYf-L1&yN5L#&6Dlcu@7>2Q zZ#Q=yGY{EU;C(BmS8k==tK32l-%-^_&Z?cX)$-zg=c<12P!Xg*el;TKSrM}3A?-~XvKfX+93==PrAraa(!>%O1)CEpDf9uExxnqSU&zCX%IHRiVT6GS4C zZfGi`Z6cc!pIy0BbFFCBJo517XCe0a%2)aWmM-`Voloo{si?X0S^;bZx6*s-rjfCk z-dA{|%Gdbs_rvE{bBr8+U|i(ua<7XQ^?SfRo@bmD+xlz>l}rv2K6zJvxo}D6n43`e z?&mU*>5lr&%IglQ)#=-b-}Gbh4qjdnD?d4@R(5B^Oa$FM$<6(7>^0*S%OQ1r){ScE z{4b=PlZ;%bmfmj>@fw}ntrpV!7>>S~5IY?uJG0@1uBmc`*A~C7Z}KK|ym2Y4+x$+b z9LPmIwehMopwVLFU)O0+HAK^TaxiR~4p#zRDZW1LC7u2)qV!cy7tby(`#GA<$qq}J*bN%F2X>uaOX@{EC`)SR zF(iG6=%UFNhC!rO>F*x-O~X5~$;KRr%>Cj+HPu6CK^ceS$yPEwHMU5<^yrh>`fLYO zd^0)MmLh5D4c6?tgsIlYtS(6Hertl0b8VPWj^|!!mj|__7u(|}YwD*5ugBsHN}n50r+X$_x>N%xN=$+qa$Cn>LDSuoK#5iz2HT@>MeJ%F(b0_srAr`q%=xRGR{_X;*Zc`{q z&fYO_rY^2?8ciI_t1e}+kMg!jxV!e#>y4YU=SZo38!^g< zYyfNf!3@eT-N%+roW_vO!B-#XRMl63FZqpJY9TEvDSKm zWrmx0qu19WjK@CuD^?v>9B8v2y35Z&Drg+dUK8uA3l|?>{t-m+IX}saNmR4k1_OG; zDmQKWlrwcu%)8gAf?&6QSQ*b~{E713Cu7-+q%Tnd?sFMDS({iXrq1Jg#J-wM$;cfi zX-0)NRs0gxv|q(VFNh6rQ63sP&LmPe`F=UcY}I?was_{nFZVJ`iWY0SsoPgkGmbD2 zwEND{s$a?cGd;D37U(`mHe82wZ9Uvh#XipR&i@>B#NBd1bKm#Q%ukP%5q4Rk{^*AD z2GAl~Io8Rplk}9vZvlnWLK@$BaTuSbbC7}O?%)w%$8PihxELxbviD|2;EI5wH)a4U9#2F{C+|H_|x|F z+Rks6hOE@M)OhH%c0ykn_X|`USyoe|6`xO0vCjgpOeFp`jf$=!-2G`$RZiP*J5kIc ziB1X3HXYz|{NdMo9J@08#TTxw<9r!+^cA!Dty#!$(ue5dhp)BU(aeLfOPNp4>7ZGs zlC!K+yMQ&%W=Lv~+UF6jYI6?9L+1w=?t&USkZ9$ONNpBgg%84c{7G?cz9K|0NTL7O zeCE|Ro0w;9{5*ShqVME}R8VtXb-y8_+3T@=giTr$l>Vs%-Azva*_#pkH3oOA=;%~Q z5BEN1xLqrp`RF*O!DwotY6`R!7~Gv+$V%|DXELKwxR=5kIo&F5Yi1OlZxMcDFM3q8 zm4EKP`arx)v~m0gX@c(_vVUvv3)FeaiB;S zjoEqh-?lOU)lm!jKdm;MOS80)Q;_bzS6j%R6RFvclxAvy@WY*TLjrZvK3a5WAjw^$ z#H6`P%)D^y7EvE%#MK3ZA_mfOA6K*Y2EQW>mg!zTT$fk)%D(lTJO3`_??h0D&$_Yp zF?Tj~&@D*J#;VIk`72)AAJ!?HhDL+wF+cgh3(uH^TPBin?BzzXk7@Bghf8Lue5wWe zbqmSQ=OGyfDAbA7(yk;&6^r9S&(?@nk1YeGZs~c$sb3+Aw68|+Y2O-Th-{^TW^Gpl zFg)#6slP}@ygHH?FX!^H-RIpCLtaFx*VWo^Bjpcj>pxV;n_U&An;)31O+5_~d29A4 zu{^ylQB(y5YORKmcwiOfWv&cXB(|b2mlD6%g;pDtQd%kTV|qLiF*)Bj-k?A z>id%|bjuk1sdgbLVpF>-dS>zRHx7pfRD+cfvyZGPL>Hyq^T1PC$laCns=*y0Mno5~ zZ`M)JTDDbHBG(xlF^NaYtjZ}BOiUttR@b-~4j4$^MYxBU5pgT+PF$4VyPpThK#-lRzzEsQ%_9*0@KgB>B1n&sI_Tq;Lq- zo3w4Sf=bS=LG#go%R-GzVKmRSmX`~egZm5II~aSNUoreqzfWA;BUb@eg>P@$?)4kj ze7l!(VOq@ILK-JWD)wFcntx%mWyEY#t9LbGzIyXvUe~1pYCf|sdG8*iCfGbxP@gWp z5jcYSRxuHln&6eaow*MszmK%u^xSV1>FGB?6f?|eOSE58&l9@HM(3&Y0Og5MNS}u@ z*586WJAQOE{YB#H)*Dk3Mmc}C6xl56IB5MOluN*-k+{tvUO^+UHqM3Gq))i9+k&o|4u%WV=$%bmCbD5$M1s ztz&BVb$oLc8(sa*e5j;^ruEy|Jn^tsB*={fkDVL{#dwZ*11pzF39yGpDu*oS3!gI` zqyPq^=`7@g*5@Ubh!1^}5F2@m0B`0%NWwu7GCav{p2+mtQEr3RU6j(cXtT9^p#HFg z*s>&*wN;~YnY7UmM8Jy}%ttDvTafgD%QL{jcMoV=UM$ysh87vZnY`P{V-%q4vU4Xn zLG}iYnCBqG?i2iMo*TceHCuUbr&dbkd}UY5FKt*Pkc`shuD?fAYRn}JujtjT4rLsC}BXU2|M&J)qiPAx6-{>!Y;I%4vW{lzCkpRSEbP4 zeooHft~R5;HPPFTP$=&@pJWJ={W2gQu#?wA-AdBg#k{nk1djCXlm{wryZtH63@{g5 z;k~0iQpQfmCcA2&{jZAhmrSc)!72HXdke=RkKi)TEOJO?4pFC|!xc?s#PLgRsJ}T+ zAOq&C4YKh6wye}NU0q0&1c(suyMQC%jrJG(Tr`7BzPj*?w;rk=6$Ogv5hebOk_B-Q4L%~~lFg_lXHFfq2D;rZu z;d-hW%6gH!iOwz&r-+xR?=P*dZ!t=-Fms}Od-+oc+IWnX#_yrxoe9~PH-p98hSeOF zJnF4Ho>B*t#GsLU#{l%w!*!5dhD4^>i=Qy`V?|GC0WUPXRd%K+a#tgSqxI9`fN}4X z!-GIlQ&6{^kZ^L6WvsIvg>>(aDRR?~?6eJ4s9%E)htLSSG}HFqtRLg7xL)5)jc>*| zjH}Njm(QqbC{Py%T^o_}$4PFc5a_lrsxiD@ z^|uo}2IG%tRBzeGm$5!!5g8zlS!%!hS^Q-)LhWmSU#RzfRc*}d&A2NjKi|AOr*Ttm zSOCN(#4&j|>=y4+@T%QFM)hfF`oPXg45|uf*T07an$JV$Ea)&aBv_`NAmh!--1kmY zAB|Te879l;7-Q`wT;G?!`P>C&9S!TV!BgGpS>(7m?0if2!%4AhjcZ@K=ND9b{dZuR4|FW~kqPrJ24YCyo7#t?;^h3ZmX!>8KclwjO@G z-N#i^S5fYdLXDB6g;ET3h$XJWRBUzw63#O6m)93+JesS#Av&Ii#2wnHtOIOMV`|YgH2{E(9*iVo{QN zG2wVa`=LAqWl<9a0!7ePuEoolFMemQZHsZ=E>DE^v@03leAoQ~)45(OaNE98C&a)L zoC{MX?hcyLI^F9Znn`d-e||NY*+1%A*EvsW7hax9druTCSmOd>q9dS8dd$|Tkoykj z4Q9#jqt=nSgPS!CPs$rF*9^%yI?W2gl~U&CS9DGMxo=Ac+B|EmY$wT*@J-U9@1~q% zs474p4S+78m|NFB*+*$LMGmY=dc7UsyVGlg!oDn}gEF)>kyaLAbD&URFyOuZ&YsUJ?E2y;ER z9j@J9Z`b^^A^t^@B-`dB>fHXuM4c*nM7LjPcHpZC^*6%3SLY&=G+5-aT;;N+eVMj* zKT=eqB==j$5~|^M3ra5#5*H`_8yI@Y^(T>WzWcl1z$P20Tr*&R?FN#!&UVK9DL_Ft ztEa}#0|^O65ixPrtrMzXURrhJGG`znVwe8aQq41%e}&+~uT33veW+;FJMZdzP4FJ; zdj`h4!;U0^G&VHzirYEtRL!_+jm8Aaw+ioZCpS9ZC$T2=T+Slb8=FRcF~99l8~m&8 z)<-9U%azZB?(C+ebZucTgzK-Ze9OM)@^GQ%UiHhv;R^atZ%QAj^4;1IA48*@a_8i& z(0jTcwqU3LpXt&Car8xk8$7}C%5YQ3r0GxfVtkxNkE3f&lCrJVQ1=e#;^el8IbA(G zc=pw+k_*@ili#k+Z910r#+9+!*xebWJkTuZ!N5E^0zeYpF0A_a$ak42n@`RY`Bg?k zIvk>^QY~T};K!{BDS4ml=Olh*l*5 zr|Xj^g2Vbt6~vdQ>}}cS6P>Q6-nr1l*!TLa1aoh{wCV50`h>{1gkgnX!jL^8$j}=< zYC3LWtmU%ytb0qB^$FW~VGoSFNP-koN4v+Znhe8+adpTiRRuWfbC#Nh;Fwq%kYL!}*UCVgvS6jQ-08z}p0@6YdF z4et%C8RoOTDD9SR{U9}4QTo{9@dAEdd6T>_a=J2dR84+zf?Fozx5t$>7IiGAovLM6 zmyRkOt$)?m`w;CRpMonzKlD2ttzC|kZEoT|yUIyW2$Qt%vO&G@@}V5pgue@`7Zo7Z zT$xM93;nELH*xAl^a>Xi&?fLL2T@I@dcFOL5zM#!sg^b?pDo$@Q%$OaLR(2Kraah9 zjbZ8VHiL?@^x6|C~AOrQI3>QAS=E|pN^R298_65Hu#62tVIxp(%`18T1ciUwuO!w9|l zKxn=D_CG~XF`)PvFAre0rN!Yh{2Xc3JF#^O1{(F z^8ORezOIv;dgVqbJY8W)nb?70lE0_)oG@i#U22Tv1y-J{gt2ynL@GBX=(6KM+B?j` z32op=yHmknBg67#W7T*!H}<*pplY#Ny%gmw5i)%97g9AcK3MDl@q3S7GC4>BFY1BT zh8O6KzH~*Z0(&n1k9R0_0i_%GboH{ypVCbzeBPoDXdzfD-Nf8*j@wM7+F;=lELal#z@P1F! z91L}YB0gNGHIH;oeVjCIqIgL|3V|a@N)ii|nxmPsF7oYH7G63RRQllqjVG|V-n8Eq zcCn3k{!6g)lP}{3eog!sq;s7u+pZxP9@e>sVG?}aL2bsWHN}zPEb?i;m7aUcgxBK>y@l=EJWuR3c=-cuL;D8{e2VmJ-tqz73Qf;EYB%CIKOZT?&6=v5QDlGjx(JG&muJUx;MSD~wC~geM4JgnN-Yl5*U&BWh$$dmR z_u$-F@A-qagr%(D6ijPB9InW@e{=mpkygcu45cqXWXE|A5&So;?=}2b%d09M%3j?* z|AN%cy^z&o&_&^;=|Dr@_Y$GFNZ`KQMR-|XK3)6Wl? ze`52$*gSX^n#k#z8>~d?%SXyIKo<=YMMP@$Kl|xJfgC@VdJ3YCKgk({0*RgjVt(*v zTIzN^{HuIxbvvKo5{8#dQWt#+D&Puk6S3Pba-&%6d-6+YpuFzgXxn~i%#dOiN?0(l z`3h(+kz_IHSdgIRv&rJUd!+TsmgyVIW~Ir;6xcJ5!~^>}Us1|d{VA~c=CtOE*-ZgG z@xwIG|03-zqoRD7SmF{Lhr9@h3q`SKr8blaMLL{Z71nKS;kkp|WV5Dcr z0Rf-W|8?Eh^St-EpLeZyy&uj`v(|i=IgewnZTsyliacuVxcBqqtnpS zJJc;;euRbBa#r!nVQzP6piQvFP}J>y=#k)dKRYtD$0Ks>6ppMORr*-oPq~XtbRRS( z79l)aR^vn>zsS1-K`S@j_s`y@4pM^S(JrO@5&|2+jGZK`3=Tgjz7>vm8>T5M*n_xw zl_9kE8j72tlAa;Lj)MRrt`~`__ANS89i(A{bb=m@NWo}qtdUW<)$|$k#*VrW$yc>6 zvVE~VTQx);Qw0vqQnw|8V}ioMJc?tO8`-$hT^^reHW9XKYDjG{{TQ9( zE^djDZpr@f63!bT&kzZ#-$b70#l1nDkIw5Ud68L#{T%IC8h+E&KroyfI;|%bO4T|G zv_$tMool?Mv+_to75#rs`X&SqnA_!vGdrJx8@VD}I>5o%oyC5EJ|cW^#qj zxLKE%%TPMb6rm6$4w-y;S;rLqiVd4$lf>3PAiqDLTKxub`C~E`=|>Vwf29GhvFyld z*_&x;4`F391d4%JsrR0K#@}DNA}A8R7W^OGU1s+o|5KOe-wts(%^NE4MI?q-KB+w$ zFn0Jy+Hw1jAIxX!pO4s@+bE{gYnDD$Z;; zShq+Jd=^gleRdGSJ=@q}vgaF!na^b?yrG@W-UdJ7f% zwrG}z%ijAV73sLQ2H_01MGTn1br|yE$Xg9e^h@tD&?j*L8&mk6Lc>kLX=>{c$xHW; z0GcW3g&G;Bf@S_2j!98`Ue}`JdU8Yr$;1G$U~8aVUSBD6$I19C`<{WrB3H0(K%_U1 zg3Tx+9t%St-9fPGC)3jQCyp@V5w}lkK6C>DY_2U#9d>05D3gndjZXSLqmDO>sX@_o zT?y*<$NaT^_Gl-)H^(~Xd>5SnNt6=ZY5E?lVEo|Sq3iUjf(zALbP=qGg(6}l@br}K zROh26SGOzo4{!S4oq-GceX#kq#`*L1C4A#lKEWu9QBbpI;OQIfik~J9nO}D=Jpy!k zxs@M2pd~EBwho$pYok74VAFmnQPy?eyu!2yM`Z~A?o}$M#)-;eJTSfHtpnpeZh>0; ze|kIrZ#_swN-d1PkJ$wE4K+O&iSC9`5COt5Zw7$G@EEX#-{m`9{d4PE|Dz#zDx6mP zi{)Rsq$xAPV6nSLsGSc51H~VX@IY8SI>RyW0$Hd1cZ^^yWKv?o{{5`O0^r6ko+HO) z??_EtYezhJ_%7$19nNmqWp$eq4+}P(6*YXbV$qMvO~P(@F}bTuKV+|f)OTzMQNVO=&AXUp5Bu_LG2`scQF zU*CRGZb->w9WfXaTaNI@O7NPk5g{(36jXMhzjEXlP@}W3)LFsPZBFZzA<fYd| zg!Ivgd=nlJ`uVJ4Mf6)kCTV9&wmVs|7W%4B4BluEMv&9!%S`M;LXIx#L+!UsMraAO z1dz|Y3$C!}w6>&1-{a0*LBW#0y;H}OHG4(nLg>l<qrSY5IVYL;$zCW;-l=XE(ze1a$gx?PHBB@8^@9tdtuJ(`O&FOmOU#ugeT1f~{N;l)!t8I=17wZ!2TW?5GqsNtx?NRAhXhYC zrE*XP%%s6qWoU8DuaR2zwMFxO;pE1Z^dfo*$KG7gHo@fxcDusTj~vCG+04C+{LfI> zbbmk>gZ&WO6^?-?EKk`}RNOiYNX6}FvE$E7S085l0Uab)Ws<2o*9#T%a&ykpXLX4c zvU)A14M|qEr6+cMynE{aDVt(QX8W1t`tl>1zF7?3a=KVP%I@wY1*UbNdxd+XR>8Y6 zT~v`j?#*sqBaV?<5_1o0=S-wV+Td%z=e@wyR=K*(%E7#CnerJe{V{`iZOodLp9C?i zvng{sjYX-C4|-;|m!j7LqMIb%ko*A|!@hLzcu!H_&c=MjsVOUWM1_3RY-zRs*`rkx zDIsC$`-NblU0I{ZZ$PiPo5=ghO~$k+4$k|~n4@a@dDyW5G>A`MTFJzG!y~4^ma5rH zJbLF3C@O8>I!f4}eB9Q87Ey=@^8-m9;JrkB-QXv)e^TidoKnAPGVDe3(Rt0@=7-hO zDD#>VPx{CaE?TUF_TR4MIuHhSao76|T}-pu6B@nYI7D3+sj z@5p7{)SzR=+~NjGdC6a?0`nK6mKMp z;v-DwbF*KR1WzM?BIQd9V-2y!>E)~D_LT2ZjdrmZf!G>d~Zx5gS|a}BC(^D zrlF+1Mwx48NMe{)bn6YQE#{NaM=mn@2ae%sY@NPPO$sPENISVpZT?9!4`Rn(zi+7-I^o5ja zGA?F23V#sw4DHXM=KeO|S7kUT(19@HPG22Ps3E;Ja3?3HwA3R&2V|%zn#%{<;ICb2 z7Z@R%xi5x(88>iK>WiUb?q@vlO2t)k@7PLrZP$>++SF?aW?koQ@6^!Y%qEH-0|}Xo zQ+P9Y?`g{LSF0_z9C(|NI0q|K_*>HI8$cgD6N#f^pn?1G)1|cpFR6@RB{`lEal+N- z8^8xdYT7Y(iqcW`OYWh+u9Dt#3ddg^_dyz%V{5#}O^L6+AIvAJXll<7SqO+%d;1uxG^^3Jq>1 zRj3y=`JSF39`ySov?8^T@3T4JY7%2r9#4{gSQa@D$O?c-#?a`)jg==8$|0L-0kqS$ z4}a%W2A*cf1m=6ZG24VB`2`Ll&G68H3;wwTGTHBK40Eh2K~MonaF8TSf~J?;93~cQ zQR~3O_}%4f_iRq3mNzQ&RjP`ze2pkIVFt<I+tlUq z0A8tHzO@ ztQ;y2GZ{Hyub8ydQl2mfZs|k#N}sA3v^OA=`deibe2OUo1X-!Sx#hlFkIWmx81`r& zT-R<{h;=~@nW~G5%-2}|B>Ev|LX_(Jn8iBslp7Rpi@E%%cV5os z1ulRACRTGGP(LeJ2vBtHOnc8~8r#RRvQcBP;Bm1~eL-QbiU~Ry z1$!7R+w0$>OqpQkW%aKw+twkT*2HSo{n>?cTOuOsrO}#c&=MgX1!nh4AdMdFrM2|O z%F_^PM68kCNx)Y9KMi_+j7jBwtY>JKfD#-fjDO}?q6$nhucNX)Dx2QsmzHD{6*yj= z8uwJvX5;CMbIV<@`Lo|7Jp6@EYZ4)xgoh%Db(49#8Q{&U5T3DWgYn*d%zaF`^i?E! zvE;@O1&-tOcH)Kf>({F~d8)0vF#Pc-q(#5C2M>!aC%JQf)Q#WG^=PulFYsiFOU^5I zAmu1<{-HHQ>Hk{qF5PLJBjMn)|2>;6Rrxn9hZV4%CCTH0D}=I{2jebJ@Nq1qxkTjgsry*`(05Y|%OJZ@}>Tzi?(IFIki2$Gu{ zk5)68Ap*jbcivsPOI>fjm~ovLb}|qY(RQ@fJB3%?%Z93W%qHH;leSddJI9uuEB=^l zBhL1XKb(duzr-gnnEkMoK%B)=USd@h4%XZ~HZhBEJM--XkR_}>srOzglKEbR3^PSx z#2<^atBu>UzXkEOfL2?gjflq%8kLUT{^Yq;N|UU%jdRye?hipU(Q z5f+ZUUHcNSpv%kiiyG+m632aa3T$gQIN#{B8!E?|sWeSx&z|wvy5rqQ3U%U`-+JE9 zA9AfC)3VS-4WC5CzA}=;4tO`?U1>wMpPX^AM(l}?Vh^dd;a^K zchba41cC=w1*=)sILWNUM?XU^Go#8zw-N0_fN>*Dc=|e}A)ok1R!On~Js$(n0 ziT6t`idq}iIRSHdXS7@Q`A}!bf^c*p(daJCANo4 zFdt;p{5C`lrC{+TIg~9g2kaS!k6Kb=llwDjNL?}~5usub{n<(Vof>bhKMm3hh&A-UAgL*lYXq+$S!b+Ic%%hs*M;|kc}t!$x!SrRR@1}%WqU}XBYu$ z;;zbHW;A=+#=+F&YQEwo_7}I*?N57h5Ku#f{l#4~S)=3MCRX4f@zCJc5840K=578T zK~zimXtuR}pr+RS4>iL&`X5U}J`ZqwEW7PrMy3FsY_2Bo1{E-x4*e&drvS|)oOibc zL+jwp!fB6`wL3CX`Wx_+2*$}+As)-+PJ8Xo%NHTaLqb(g2zQnhT-pn>4ar4;OQr)d zpie;AU~UuWcV26Dn^nbA%_a9}&MATC4!w=}4dnq955a!H_(GBrNlyBN+5uI<&3{NVET8{8!Nn`-Iz@BrJOQWSHiOKfcW1v}Q zRIgOO@T4g-+k5YqkaA`}B*_GER&BvHQR&%Apa8qKoAx6Vf}o7&cEViPVdeOipBL9Z zw(JpepxlgJWQD7&GE+NfIuklgYB9i*j1ELYEQvVTgQ8A@FJPYmaW3S^4xj{e605eV zNKfe)0v>gP3_8mm|jvW+r6B5?tGk4StTc)1?@>C8WL4$54(&j_ zuJMtm=7)c2Yp@OoyT^xt$0L1yF;#7#6)X9gjXfQ(;P5*vm#jm^;{`FwDi5$u%wGB>xOaN2H)4q2-9lI34%)&0+>4`W~;`N{9l zKyjJ79Q{;>%w)5>Z>6w1fp7MiO^5Uz5uc4~<^5&sngaE|IQlaGyqI;=m*XPBWTkm~ zx}7rV$L#h7oa>K>Ih=re#hEyEdTX0~VkV7#5kOwcR-$LpoSYX(t_9huxjKE3jY6OiUeo|iT`R0 zX@PevrUNE{uG9bF2l9RWcdQyZo!F=b>3de-u6+9fCJvmA=~0_i4dGJ|gy5x3eJqHd z;Vyq6Jzk&>bJ% zr95)1U7O_lmWJ5%- ztN)>j38-}l0dZc}ONu$WpZIKm!lAAC$P!BGQyDjwUf+x**iSn$YVJM&QWf#9)X(e5 zeQCfu;8;>sy}ZdG`4iI>k2j1qBI(#pF4r$gycPlsA4uT(ARPSgp6QD6;0$=L#A3EU_=Px(CbT#Ijz;ulUYNrgcpkzeuOP z3ZKz`O7a9l1{f`XU!2oJVwo7{8t==RZZs?pOBu(jZcCCa=6mY<<;DW_o)tkwx0G$iA84x*;kG z#xG=f;?|^rc5CGou1kg}>L~ae51Z-{yWq>o(KZf31!qF0u#}`~3~PVRbPqqOYV1Xn zYb!+@-_IOSy?K@?gj5-Bf*58LEkGqNKG684_uaLhib0y9!`%C-Uh?{~rt(cg3$k{= zX@&fL0Eb#6#URg@x<`9*`jfc0Du`5hn)Uc`^dAtm1rPlBYjoQAJjXUx*9IApy0`NZH zyZ`^-ect}{E{0B9nS=j&7Xh5s*?$N>n@Q+pfXZ}Z1HeE?|JH=pq9A`nm>l)5w%F)D zpbbG3Qv-6C+c{$F#~o^pT%5sGVnnvx3(CDqJs&Lm*}e@DSJ0sF#B9)p^GfFDbI-?Y zJ3V+i&!(KMXln70zHwdTXzMPvWR*+mX|;Ow*jrkimlSz9TMX9b);TES?)!2qYPAm6 zvG4eJgFC_UO+kp+v14dZ@u+rM*ax02ID=h@+FWDdq5d7l^m#e#WOMRS#s-@>xLx-m z!83Ln=2vQfyB(HNucYDptBWSTKU<@WFYOawYncO`-BQUkB00;|9BMFMWG#uICZQWW zx!~F=dMbEZ*2SSv^>SwKyBdD_hl1aP7@n#e&nVZLO9fW&-Z`xeV4eynsEeH8DDliW|%uT-KWFmj~HuB0k<`Gt6;R}aPX)iZBOyQNaZ!?=_9O!g* zr9?Z^JF;D)FUs;(7gXT#-|*clXvQR-QfwLGGyB8o6_}Xj4ywEvnIzBE_3*s2)hd{j zejKvuT#^UJy`xFs@qiw-*IA>ep)&NJ(7q5t2N=_5(fjwKA`G9(a&3@Eb8|3yF(&v~nKI9d z_+tj=;tES#t(0D(_FsaqcbK-bYyL0P-<+6{ys2)+by7K!v-|q&G`Q-QOc{IODQTsR zUvMXPbu&78(<}~l8Gc9WI9@h|E!EPJda0GI7SoASSDNHz3LsA6iAOr_RyswUgc9!qI;g28!btI*UsAIqw9)3Q0wNBJY>CL#G zH!CL^Ou2aQQSA%*1A*y_aRL}$rUXUo$P@8L7;Vs2=`Tg`r<$<4MHLtBXA8mf*eO5g zecu^2E%AR~+)QRHJ*CZ}GVft^%Pi=T)*tC&+Rbd18f@H%hp_Um>*rxh>aSKec;x=T z!;EUcJn{*J76<1?+G_Z7fT+4T#R*%E2BZ`o*H*C3II+lx8!mJ_6%rQv; z#aU+KgtD0?FhIv7 zNx3cP1*Unx5KLiD_z&Pp0NljD$O(cp!6t|FS_6gaqaO_70P&=^ck1RL096Qx?eC^k z7riZu3YN9*n>qO&4d)I{)7+Lwd@n(BGAhBw@HHnV!s7PR4TP!_p#CrMR;4^|CYM~V z%CaHQXMBHtS(69vzoSRn!LvFT9m+zQbvl%_%LnE99lPxaEeRAR6=13?$R2Zw_8}QP zz57u$3UfBZs(UEluDU*ytYbwN%8VvnDi@TXk+z8zyV8F#N{jKssil@biHYj z1(ip@}F?Z^K z`Yx`QMe*6y&5O#4N$RXDO*E^H^DHUAT5;rQw@^1+vZ@s>bnGK?1|f*bfXM-xgx zWooIvFt{l&>V$U_>i&R&@UZnQfmhiQXxMxf(^Fu=Gr24st8`7t|B`W~(2KAbawj=q zd+-hYarllAIav74aB)UDD=vT+>kjqIiun;DS>f7lU$@nmK5gaR#Y2_X^`IvJPzw%> zG~Yl_@Sf(tsc0)YORB~g&&w2UPnn?}pDCJEufv?PN#7eE=F3-RdT42scwe*KtMATb-iisKoANaV zXY{Y@CGmC#J@a@_BypmZjj?(TmKkf?8Ju_@gcJbN%z^6Jkkfwt1Eay>VVR3I?>pPz z8OE+t2(jdlvIP{r|3)9ByUxJ$s6KP>TAJQP)Ap+Rf=~l94f@0D+8x5=#?OtPr=--1V@^Jdra(kFc4*7R0xJ?a?l`#ixwf*v0azn^1%V zL*@-Dvd>_TgrTySbZO_~HjbxmTwnf2qDcl)Ip+nC@yA2tmcA%W@{_wkP#`_vy~d(neZiw^Rmck>VE_Jxnp@20=%N|GCD zn)<&jsBU}lH$?+d#1hPB|8U3ueFv-ncL2bitV_D0@n$l<8wBK)GP4&puM9*hiqr5* zWykP%o{Q7qCFu}(%i+}n4*1jZF&ZHQv045M0qe1HGn3${{P9l9yHPRHy~Vbsypt!c zq{+ec#_wC+fDjehhxsoKz_v}_d4d&yB_IR!^LCr5 z-w39&&ep7$D^DF+KeCee^`oCKCG1{r?;u9q%)b6R$}Y#T*M^}Mb9P){L`gtGD4!;& zf2CGqt6P%;szZ&CNbG?D?p2Rmi>v<-o)HY4yWvJvnv=p=DJq-C6D7hVU7G4mZ)cA}F2!H4B=TSh?RQDS_De~Mgr zA3+j#V-X<@nCJiU)n6ksZ|Dt^&|XRXGTy)(FLDrQc)tgZu!|rL1c=*6lVr#S>JO-a zR&p4yo|3RF-jM-*x$=KBkF6w0Xdb^q>|(&B(Oa-?R|tH;KkQ#)M>wBHo-nMlXg%T^ zb?-OVof!rhz3Y>_rYy1p~XjS+BoEe(4fv4*U0_Jvy{H76t-cK%+MRK2SV`*Z8~ z0>vb=_;RNOot1>y+yJS7qSs0w)~E#HS$>b!`WvcLi8V< zVrMPlcsm~d?iSc?&hymV40`odu`X?+Le$$9fQVbSBo<%UWJOr&ND(5!LL-{8STnyl zL?#3mFt_KeKrx_#K~9g~9UZ-GWega;y1sVLuE{Mm-plr_^sD_)q*_dc{KjHJK_4Nx1+?HQ zBBkj4E$3}iMfoYXHEOCpGy5(~x)VDUHyV;U6SaF;=h(3^ULj`cc&a+Mfg%H^>!)A_ z43TnvRujQ(Z;{CR3RE8E%7Z(Nr#Y=d$Nj`1lqutN>NZR@9?8a$q`QbJS!{@tQ7HcC z-JZ(Ml8FJu8&8@=?VeSp6)QdFjKK+t)=cvs@X6xxp$|W@;DdburYoBVpy8COW*k4E?a#0*=>pTqBml{fQRZF;Ek^ zhy7jt6&X9KYQi8gh82Xzytd!_U^-YYt^ZCG74rc(HlxC7d}Z1wDYN|t^ekRBHPcgB zJswg1yGTa=L+DJ~LPa?`W`@@wI(w6|yH$OKJ$+J1HlqCS07r-Q>$k`xVSi!)O^s!D zuhlqPvdMb*QBL#-B4;ykj~!=GQc^+-Tmo{n9F}34SF$_v`D--w0-6qiXm`lof9Tj| z_kd8=ctQ(VheC)r%!U32L78`D z93i(uJkE77z;n94cK0}hx&#zl7lX%wkR_7lZg5*##EFx@x`}t2Y<=3U0v70@*-4kD zWhe9l-9?0vjy#30LBZ925S#;j9&@65j7;tgCVd|FoE(Ai zlH&U9GwtO3mDT4bk6oXujmmoDoE{!Oqg#e8TEwV3E-8g^j+?u(#bV_avdPOusZMF& zpG_HxM~H&s@_6TJHs^~HHwC%UL=>Ue{`)tkB#K7qQ21WC?In8)%_;R)yc*B8e+6MQ&dB}cMohJBE%#V zV$_H{-6ZTLXK$VLQF&;<8ae*RX8eWfdP?|^5Z=PqVAYL}^xaKrxL3S~rJ8&~_Y0pC ztP46LRpGMAa_?iOS5ZpgTm!smdp7Wys#76>B(Il&6p7>|EaF2P9k1YfFZXSkv0uD&a1uXl9IfQ z-@3{`K0kACZPV?Lvd>=MJbLun#)z>D#ux*U|x$kqMApwC(Kvgdr2i}^0 z$TTcOww%%|u_wQ)&Y|r&kdx;r9Ik;OK{Qseg|&3873!8&!F6-G`!Yo^@0Gc^NB;h< zW|P}lR^RDc(5-Ty?e@JdA@s}|-M5aelq9*&2)8f;0jhk^|BDs7dz-}U{|S|+)CSg4 z0S`nVA8CR`2UK?5_O!NfQv7i@;8wr&e00f~D;|3B?DC(Ck@W9mCHb}>nF+GN0*rO@ z|Iz89+}=W{yw@K5ER9{i<=h)qCygRAQ>XYceicnI3Ke}$h>JVpUYTYD#=5ccv0GoG z@Mr297`cwFZ#l8V)q8nhgN;b2zu1Sm69pu{euEi4TuyG%!mS`UasFb<4#D(aR<0>uEC-RvCa!+m4`@B*Abz9^Pxr)p%@%SRq zPA!X#u0-#kn(bS|<*&dmM1;mG@fRM>`~xEOo}#}^U3GC0*|*rZjJs+}C{=x!=RqHl zO7k+$!C8Ey1>0Od75!VOeZI<-p9^a26r@d>x?ozkV(5PIoaAWal$zNK3^vOYJpl6Y z4i}g8HEN=uVP+EVZmQkv7`6rP!~i{mBl!Kq-bo|rsBy|%kB7-W*w~qtE|zcO4rF*s zjh2HSqKhDYHZ^fmv1b8?;jN^QqK1#9IC{FRnOqTA7zt?+aL4PSEs_WJ!!MIGzs_F1 zvzaXvc*&*{3TLd@D>#yrWF+?VGHvev19~S%k6a5E`2!+e7Kv~XpP*105mU>w#nJG( z3ZH5s@}dJrpPP{wuKKGD>kGkD?I=HgU#?hY*LwQdoMKvR8O}vJ92yECHXY^B{_!lp zvtGE^&&`582zny1jK5WJ1R-`Gou}&XLJ=_2qK`Dm9TsZp>dW693cO|40nsBBFuF{8 zE_!4G@SFx&D3Tok)$0Udn!L+84DC9NXO0>8>>T-xVTD=C{w5bAThCNEHZbH)P;fJs6R5CF~BWk=7 z>5n7t*l)9gnW@T9e*L?}{%eED_e8$QE$NF>7{JO#-ss$W<=~lsyXjdN;}PEH=)1~V zKU;n5+Q=-@vCH;(s!i=41&4=~M3T)CnO{ zG2!stH*Yt;SpowM-$PG9e`MIhMkvwAu=|x#n7ilFGCv4oL~@jCpo`x>;`h^8=ALGK z7rDZKz(xJbx4vs4CZBsK#l$cRk}fGPd+amZnK%~sM8DZ04B8U7O;lTkqS6z@+#_W+ z0!idSRTJC6^@1b6I%c=xwizvSR6w8`=-f;Uy9p|#_p>m#NE0pAe5VCR&fX1?0D{y# z-Or0FjeqlIV_>y{C)xp6t$_CqNWUJ>I~~D*-3leZjH}Q8N&k=;(g0YmrNP~4!&}@u z@6FYi%4i_uW zH~f?~#dxGnd%4Pxk4ox{^Zby_y&mY(p^H44ylr^A2b&s?;F0&M#QX%L6h_`p8N2vv z>J9OJMy?+5#Y{Q+FENG>F~JRv7+o_3VT1ah@+o%#O-3g~64u&&7a59>iD!D-y0u=; z+48wLBmNU*R{@pFZy{bAWG+r+u+>DMrh=j4ug(Jb%Snz%SBpg zl8E@Rcpt$RnJtgr5paH|lLHC{gDE@O5T&zl9wY;5$~`Zjey$3AoSR#8^)S&>pfV3k zvbbwRN-t?nIM^r9W8S~r;|yo&xNGE$W_)pD@T^ITPGZPlr~m;vpPIf7eb5i-6E4{U z5~0aTCmV>Dgpj)Q08Od!7NLqP^)}XbCGKgZ4-VeGE7-8NVFalf!U<_hw|Tivd2=>j z8agt@V2|LB`A5Eg2B6GZ?wX8Jmf^yQ$hBzKU=Fx$EzIARvnfx!$!&%22Nvm_EoKCb z0FI{l)LLyZOo#^F7U3M3u5S{EH7YAOdY2wqxKDN6Iq{{LRERRl_4fn_$31{6h5@w} z|LfpWoB8G2uhwcyKQyyVT1$uQ{oqci%^A;!zoSVHyg)LDi@^m>J#%HGy?czo`6v9Kym^R-N678BBPdxV+i_rMyS+n!|cs^Ph;VbO05$EG&l zwb7G0gbu{;7_x!*cmWS6{^-$Iz@UExuX^PfAZ{M&jLn*Pu zt-dI@ouZi*E{P4VV85)ge3d}%H5sI9Ouq8-7EmDdHWo1>mHA^L?@IrV(+wy$je$fR z+Y@b%cS|VuUmfhnaoqpXj|1K6zpkNiCh!L9G*A^p{ku!ej5Nu=dTDov@Aa2g$^<$3 zyPjhJYm3&tIQ-KaTL|<&iwzfaz5P6z(POHIdVlhDY{&PnBfcU+LmjaHvF!tD?F-_;iRO zo0>tX)39aEGmuA}-fL%!p`9t(ZP_tnrm0U(6!x$u>ehPOHf;FE$>pFePbq{KQOb7CGoOT3QygXKSMIJ=+Rc{ z&FOHpqr>%>6P?@ALBV68(fU`;IUZAG&D+n3*j~O&Ih?}zLVN_#G1s0izxey#6KV`ds0Jx+wa(@ zD>>D^$)6I8B#U|TbqZ1g@#yF?A4an zY|h-mlY{c4g7Oh(ON{|bFR4?XW7jK9jFE7(!i1!}t6Fbta8ow8J3a3#-?=jGuv;q? zdK%>T_7>RX2AeDxZ)|_d&CMtn+Hqeu<}q*R^|f9nBy`Ou*y+h z@{9+=mg&YC&WJc96OZbm@YiF;yOGP$B+APYdF!SXEiUfqn!303V1H<&UUe$9t4Ev3 zF8b#A0<|g1=5ULe$(wA9r8#9;(0&?6H)WBXjaOYixiy=X; z%nkh_wkr4mw)Wmh6VZS% z6&5&oI(?rEE;E3?FZN_^yRl;d{BRAo;yUp+BRK8%3X^%l!h5H>9J`}3U$^Jha5ets ztuHjFR;96n<3V$5>krS_N_YKK8R`|wX|Y7AY0U$sX3I`1XlpmqJb8*TRyS4RARU$I zzdH@|S!*2LZ~y>FHy^6zme@n0h%bkpM&eptvkBLWrqprfSq4T^ZXIu)a@aA{XyAgG zf*{cr!S?N4-*3b(mI5*)HZnLPbGuKzV{Ca!f~!H})#WDaMViwt<}q8zHtwwi_qLTG zny24f)XN>Nt@dFDzo9n7d=3IIHpM)rQ#oQvJNop{7o?%VEc(#-y@keI3el zt~Q*(&y)|H5R?jc`fH@-6Ze94Zia@y&zz zK^MQVzp$c#i|y~%yO=b*7u#E|zKB9u0OHKG_BPl8u8!9)t{=*_SOOJ(lK-_2&hb;> zN}ml{K+(gvpNvfJzL>1!8q%a%GOiw+8IfTuT;>a^CwgfkCiQ&3@pY>oM+?5^0q1X0 zU;+5Yo|LlgopPP! zLdWS{udt&Nl@+yrDqENyAKpV@9&2?1p+;!YID3mdQLI*MlPVzV7#1Mil&U&Kdw^dn z^hCq6zl;Sn1%DNnMw;$Ja792Yr#*@X4lkfeiJ37-nTEW0V5yy|76=aqMy<7XA_=b* zB)GU|*-C^of5CyFEwHsvG8ibc>i%~lIG78l>lZ=bzYQ*sVX=}8)WY3} z845~(JQ3=tC&%ZWRPs`J8lI3EL?KVmn<93w@?#wV)@WP_CRwngD_M&=zn{X`RpzX1 z`1T10++NllpPl0PyoM4}!LxrGPO}#Ro+>e{>u#MDr8%xHdCabZOPFJR$M^_41f$#^ za3gM##FFnnSA{goh~xb%nLRY~PJ{7}@nKr+FbiNKu%S7p#&=Z8F~Qm;F#9f!=!V-D zqrv5(Y;=_xN*X_j#Z~ZD2!-T-_k-x%!sH$)Y>un&i5xndnTEpndHH#Tszv;D8%Jm) zr=(jXcK0hQAbpG|VAcKa`@=@$KTX>B(<>bcJ*xx^o29WKt) ziPlCK#V-RAY&7$xqoe1?SYs#YevXd~hB(ZZWfoIdEbzQChV9xONE}={o`llGV7G3R z=9GVD{vm)WgFUphm5@QDT+L1!eRL02L{_&^&vrcGbKTM=%s{Smq~xdT?i|2X;*zJ%G zSFG>>kb8;aoEf5KsailHMFG5s&sQHWSp9^QD|}U)0}{=4k^14L>*OH$VFOkPZgLN zs=J6>2WIe#7=My%)X&9oy&{Y&^L;&1qpY)dF41VLEEy!)(&rG4|2As&Om3N*OiBs<9B4g)znBxjCM2nHmvQPdFP+k3pk4uTaS3KJ+kG( z3J+ftvPIQ?AdL^Aab}`mND>lY!(KnoQJ4a)mt*Dl7Vvpr-_=1j-6%9S^B?Sgb%do2 z5@?28ipOm}f34lq7sZycoG0YO7oyycn&PXjMBAyUh@2;v9K+&2I>hHp1YG4RNV_xM4W&$F<}o*ufbqmSN3 zIDOk^*C#KK;GHH;Y@}5k&ceU4)0-0|a?{Fu-JSm}+tWJrX#hKdi?Q{a-&zC7P_-AF zuVZ#uJ|o*S^Cq%k$sjn(TsKUkX99!Wdi^$VF;{M6I#K|yP$$sqReD@ni)%t+p-01c z&*kw;h9k1|$lGlMR(IngJ@2bIEGVTFbBSp9QkNJ;$jD7aDnw}w>0-fkhYv?i=U>yv zm;t=RzfpCu53u|=Ox=);Zxl{KHVglI22ApoUK+_I1MFKE#cKX5f%>}%yBD}KJo;*H zsJJd7+<)+HGU78NgyBUun4y3K7@uZba7aMDR`E7J*xy^$(hb*%BLCplkkGy+)ZmC1 zv-u@7>l!}Ov8~c;bxJlQuG7!6aH7m3RA1l1QpggSuRVgGB$8%a8&Jo3jnruMDg5}M z>}#H_;PZDUDXFgr5ey91XOm9fA$Iw3Gh>LJJl+)JgU|IwV2oHv0(W1stbx0d%B`=rvSXH zHdB-brKRbbCt_l$j(A~o3sJzbld;Wv6tq%WR%(*Ay;j|-N-$E)P-#~q46KvSAaZq> zU3o_C+B$W)!`rDU-3szPlD0lQd~=WbZR0VC8!|J=c|$?83(eV_Ji-a5RUW@b4Op;F zbF8v=MQWMQeb-#jo~kB&5YM~R!FwZDp`O%Sj8Z$LgC=Ly(u35N`bmeM}F&iOC#V@{Wy@k&r`e<4)cBUZt?X|5b7A}@NCVz%Z?qPTsoS(#&kUO5Xu zT?(2ocnpE1PLC7v9<`$`I`JZIL_M}gDO6z5-*VdZD7hQ8#9nMg%LEeo6k-K3g|Am@ zTm$pXwAXac{}+4j84hRvtqqS7qZ1+8Fk*-ry~Z#~^e7>TB#0y=h)(ojbRoKsh9QVZ zln}l5-ie+V1{2XnAB@R+<-hmy{CBzEW8eFDpZ(=Mo)6=LnQP`MYyEy}oolW0O!tV% zyHmXUWN2{ZE?%ispbR(t*2u&nNanh=?kf243ePKnjPqFw=*2;I04qlt0xMZ`z|A_v z=M_B0IKyz6;^o_e@&mMb;=^GY&+UD5^p16Cmg>Y7pg=0KWLRw8^<~zG_EQkLFWEXl zhFTNQz^164d~t5*ld9MCL_@BGoI)+vf(Wb6DyXj$5KmcvVDbT>xr-@Bo9a!rA=i(m z(oyUQ9a@eQ53lc%rF<%`e1WC7q|wW{hvFvM;RnVo9o3jdDtR=|1o?l^Y#MajNscc} zzAPOV@;PgI-2zUB@9+E}{B3my(V=?V(qEv=GL$l|jqZ;AIonhZj&^5$aaUY2eq!c3_Rp~+9i>+)JFyjS;|KJzGds9rq)aGnpMHHN>P~l?y?Yo!c&g;m#6#d0 zzCH9qc<|dw16EO7(M}3tzm=j0d3G?Yy)%w|lA1k@YWqS^{rIx-1q13+*ELV$?HEoi zfjCBv%`NpC>EbeQk$~eeX>87rM{J&<&xoLyzymihUD(Vrsp4Ip4|PihMd(@rsw*jk zUG$Cea%li{D;ShK9T0r87mHRGfz^Jz3Fhb8y<`(z zae6}y?E>)~y=Oo_RAb7ePIhS=>@m!@b?gtb#JF`d2AYIP(rS1+y19At#ZDT|hrO%& zq<~R7bZ=e^moyvKpFT`HXVp`eKNxZ2t9q>8P_Re$?r)H)ST^-o&iHMad}CXs9%W8G z>`qY-U0XogcU@0!IqGm^;sxfCASGylyt7}mPE8#R+a$eZc=~-Qu<`*39j~uv|J>XE znW50B=;wLr-Q-WiZV<8IKzZBBB&MWt;R1n6A8C?DXPuH)^rpLaRbnf|Zj<4dBtQG> zKD1)0>r0c5YyPYx7dEqVeyY3ug!5-!jJd0 zHgvz<9z?|#%n&||Ep55zkf*=05qVySkRaMJgbrKJV@t%!@x3NYch0_=zv0z)WqRAQ zP#?h?E`EiLEjuH(- zXEh)FKIdwk4NLIoeMWp#zGQU8af$c4H;*Yy5{>C3*6t+1=(#~#qKt~Re6}#Q{t60> z`gd6SZk05yw6D=)75Y9KUr>`EA^qB@ zTPZtNz*A;%owh3*@gj^f%6*}`-Ywuuk$zJh+q{Yi2hRnozbZ`L_VB-51`kb;?N1Hu z#U`|nWgQgmcwz6C?nvy<&RuhntC@c8@+eMsl~Ky>CLsJvFANgq;F0l56V$(b-yg># zn#{1T9!j(F&DbM+b0KkWknHa3C<>tz>4XIjDa>-#iE^hY}j}48@DgKsAs^%PWb_Di~X9A z4=2e@|C-TDSc#`Tj;A>-hzuW%m0ztan3MC=Rru_}t&3u7XQ_)XJHF4U<0bAI?j(X) zwhwQ6T;Nqr2->+i-nGi3Wj^t~$}ewLJQwYyrhvg<-Pb3A-&X^!J~nRh&1Ii1=3!OX zwtzJp>v+r5B%Aa5FR!<|P)?*s<2cssc{5cOpi!rNdOMQ*OZ~V#LN?+~SMZQ@Br=T5 zv>l#hexHz2vW|KjelvY(by-LK?w9w{S<+!wKQ1Tv@u-Y_IW7uJTZk!cxZ&oOPq?vM zQ}eY^SAQv{in^6&CyWWE)IGRyPJ4+5(aEd$7OOK+k`p^+67zVEaZ*MWA_=k!Y2O7a zmCBE(iL3{>z5~pB<_f_d}=KC%@;%W#olw zDQsvZte5iIFc?<5`r_Ou;KPQsc!@ONEYh-5RFFlAnkyEA6bNr>l?1SB<4J}GNtCcs zxE{OnGU~^$XJcjk`}8zVsd-PvfP@UD_6!sSQGcgR@OyJusoBt$I%DH!^^v*6#eOTp z2oTwPdK+3Rp}P$VhBILnt{!zMCJ|CC%F>YamI_SyAP%O?gsGZgq2*(0zAYGEBXi|r z6o|kw*D;b91HHiF@A6~m@@Rw1qon?#xTMbS9CA`juQHI!X~r5)UttTnRN_S^t@&w~ zpT4SEQo=@D{gCNwLC0$AF&Ei#W19ZtqM88Fsd@yMBlM3XHNzjeUuNPd`F2ZPm+Yo6 zh(C^O+@TApXgX1HAXZz+eVpaEB`9MgwtF{GAmbE08Q)s(tg{K@>07B!2?+B?6w8NG zj9f?nb|tlnoaZ)8{>L{(uxq5Cv!8nXwY$+1*wEo+ktCSET@0K556>D{Xd+I}2iaTi z4vwQA!O*)J7wpL8$kuFAVnYH?Q0S*1Qas?03x6EaiL5pKLUmf)VwXB6*9UsYl35ir`@`b_TeDave?n%x2a$pO7gRI7V!Y|W+vWOUf=1N`E z{l>dQn*dLYv@W+EPe`6e#-|G*#$+5P=&_I-W8AqYMl!H+HjT^HhpLxG6cNrVcRiAV z5{sYBT689|+->7;zIddMemC%3Mkk9J--=N5!Cl`B(bh!%elo?jP2W7+Kio2*- zT{whtdJkIpjtN&%d)DSdbP>gu%y3UZR96$)3*Wcb%T*3@uKy5t-u}rNoH#(weC=~; zc<%#6Y0hqeNn8HtOM1h$aj8S9!k0B~`oeeBAxHT-qEcQ7?Y>rt_-swt6cd5)qjCTq zfd{{oeiV75mmu#6ypWKN>`Ai@8xDgED|GLKN0ns5Z(0+3qK zkwMGx*VI!> z!i6Qt$Pzf`&Uo9kLGP=^bt@^llBu4oVpc(Ku(Rb-L#y{KIbcd9ZL?TSLX3;OKR&hV zs>^bES&uw7Lg`w66u(sReDMAj?MCN9si>0t?)|nq;&*PlT2S&f7iewjaZXAw^$hm# zl+6c;+$nhy^jeJvd2^KcX&oZU+08lfQ@xd=17Cn$yP(v%^!&uT{9HJISOPEQ&dglhpZkL7bGn#hCk%5d z>_VO0-`_BFNyuEnI;B6Rc%}RV5xx8Ivbe@I$|N$%}~ADeiwl&m+!XB24Idz zXmmiaNY{A-j7PWf_T=m5LnZ?~YCn5gmjg&IUt4EnW$#SyborlK7erAFt9~Y?=Zb+GxFBU;>3x?!cxJZc#J1XC{|f9dlwh+Y7+4z9OfPW{ zlR0+(XX;DN2Og&%+^)C>fqD_~{3UM$)Kqp}y~tMhj5P%2;tf5Y#7^a6lU?gTB46nN3|B4lA)mFBY_)D}S6i;QMy+JWQc2Bwb}# z*ESBj3M;tDz9&wIUXsXP8bqAJZtldChv?m>evDtmf&D?^aTFjW5Jd%9d;wMFWBhP; zmP}pD{*3p#`X_0&duBr_*PgTrR20c3fCv1B)R&h&SqJ}sa!3{}k8Ru+Q*vdFxFMoK z)9%{S#m(o{>35^AALA3P{@HMRSw*F{^s~4vK&R)CrK!@cIcoZ2mj&J3=-(pYH4LL4%v01<&iP>(&k%@{RA ze?g{-Ogtb(XFjzyk?-r1h9-ZZG_84Q=Zv^4PZ<<|t0V+YBrCq+U3nSZVpc8;MqfVw zd?J{2L*fzl>yifpp6a)Db#y8BHe2M$Zd^7c3-by=8Pv-3!i)L8>KV}Utb-vhY^coZv+fEz}xZP^>C%E39J`h@-Z4SRTmqlD_)<=4LKE~M@bvhS?@jI z!0RcCw^JO*k^O^0tg->s`XA<{nplAngY!fw@%+MKd6f)Y8w>InC+Pq)lk0yNI&BCN za%TE~k3|zmpkvgc;7I5Rn)YW6>DHO5#oqZp;Y>GC6?YgxnFIiCBmU4s(La(#;TPfL zCLafB>tjD9r}%9e-?k9%!{w|*KO8RzVDsg9o|#DT1xF;Yl8qx}32B8$<%!uP;p>N~ znXe`>=VTa4K4~%gBsbPSNMQQ7uhc;99nV12#aEp$XkDjN9=}2S5bkiieA|;p&u!7! zbA~dGYsX!8fII_Q=QU158B}Jk*9}LNPTd?iP`={IzideNAcP5ZBkAh08JGPSiixD? zmwP>?0=khKx087x|q*oT;T6b+g;k))UYlEQoE5f#Joh`oP zm`<8Th`+N;-dvlW3UxqKNHRCnY)*G_cgl_LJC#14grEc55fSAxyJ-vzJ` z?~_<;(qO-x7w-q-*;k*M&ZodkO1W=oiIevvZB zGq`anMpa4|_JN5qbcj)8s;*$;?4k3xU#=C}GM(key(|4S%3Wj++yheFKlddxzTm6p zpM>i#+0eDKN4UZ72Hzk1rR=glD%)p^cEzYeE~NM$Y_v>iz5fXe<|M}K7lh&Nq+TB@ znf9-qD!gFCpQLlOm~g!*+Lo`qJ*LxtS3T!ud`E~u$Xx&tNWN~A7hs%;H3>yf@uAJ}xe|?~ zcYEHC-}JkqAKEe-!BI?rBHcVD9^lk&?Qk#IKPSP~B@3M_T$b|yrCsE}^Ot4t42Hpk zk_A~`_=AV?mDAS6uDGlmW8F2)wIf+3aZQUaf_lSKa(B{u`eZK&%6z&iFK=;l7ZXq- zOZb*IRx%U&`f#P3Z&D1%br_-aZ zbjuYFo(<_^cUkvGG^>ney8PR`h(9S#fFgFd6KaLgh5@nh;9n8-IU8Ca*spSc0ihin zsSqCk^qxPULTxw#jhvr=M$Rzc|I02W1aXs1s$J+y!V3)ueR3k5LoIfvbnUSDHHsi1 zBSciAI1GPNR~qx|Qrw{$+bF6nM9uvCC&jd^Y-Wpu27n2NHvSmQiK8?f+QRfE|56A6 zmqHjPLN!c%mMOwKIzdXGWg;HK+*C{;yuU%|2m|!>Z}JQ*hdjDQb$*v)+F^y9l(s~Y z{2i2Hvv*3?PxO|o?aP-u26wrD>|Ybq}6_pmRtl-ia5 zuJoxVcQjY5tjp6_H(+i8wP;E-CAUwQ#F^1vdv&O|Swu`Zo80}UqTU2Lv zDOlZS-9|@r-L1i(GR(dlCpwlBBr*Mt@Ii}Hom;mlg{QLP(y*V8jeh!LtFL=`V$BJ% z7{i_qqx+c>V}6EA3hpNvaZ(xr2E6B|N?J4Gp7r266D)l0EQwF!98S!MFW%?6RJ`cs zyM$ZUj`o^|yPO*SoNdG%rWY6ex<}lRlGc`z(&8qoUV%6(4SzRP=5ILau;GT^DD1)i zbmM(Tv0ULH?9Y)p(bH$S+r3w!?OZYw_vm{Um46R7@m}(?`pE~^)p>m{!l3yVHCG?I zd@%LRz)b*4Iv1;)$Kgs~^7h=xx7_#E%;2}-;L~oR$inov9r7oY|1dn*+VOdQioe~8 zpAc__OJ5W=D_}Cr;h~?R5a;qaU{9C`*R2>H7z)yW5rrwlqu|L&1<48D1IM*o)Txz+ zG$<0B>>O{?jm@JLbcv3L@@GWcM!Q;lA^{kU(D)NyVM!36i5R;7Z$PC;6lU($%|GTx zRMh66=m?eqq2|YdxxAYo3joyw7Vy4*(Ld*k;sk1vfGQpO1Ry9UWs4n0{4q`?DEVe{((A>>%zMiISP+S~`rhgja` zc&MLeK->tZ?+rEMXoBzF9Fg=)cwRa+#Zv=YFjYDhmhZ_K`d}JELfH(xgbgUoSA%(7kj_m$U;tuOxcSS_d?K&11GjS~X62a9x5ey5ItJ_{b)yPss>to`X*~w>he*?YUOvt&>&`y>5O&^}nvYkH zWAGX6r%!C^AbOCB8Srd@HW#W%2(XYqf;rJP%>P6T|6hdC{)MEtcZ;twai}&Ermilr z0qRwPpaVcbDCq^fWhDS?lp-ojUef@|{6?_1i+H{ysiOpD>FdO`B zC_ylh$m)h|CbCvz6}yJRY|0h#mzu$ZuXo1mN9Fq^ISIkvBy6MA$HiC9zW>Oj@4Gg+ zTRIS~;wi%b3U+{%m+@tRL5wOM1{*oeKC%mFn-!azOa%T!B3m3#mN5D5o7KGIP80(s zN&`|!kZQmf9l8*U3vY)>>_33cjFn0d5*YGla=)`n1E4w+mvQ1PIEsuo62jo5odg*K zKJN+K$8%FCJceKnEN%O!_BYL#5}W-8b=xEeloqx7BSg(Y%Ib^Gu2mSAF2k0%lvi0^ zhkz%SF=T8x^I{8DY|Q@iF~G2@B9X116vu82sLO{hnvg6wS{s6?{zyjVv@8HC6{m;pMeI=VZ0*H_cANiipIwh z>J2B6`W}T27FUb&$LI2YG=H>ieRJhW`sMHRJJo%g@ze0wlc69t?J5nyp;9NV^jq{t zv}BLeDN-j=KnIsYfI$dgg*(^!9cZlfIeV`V4yY?#4$Ytknog*zUf7J=A7;G+7(qA? z+rYXQ9}HMrt3@*=y%~RFI{}3^XNp5Z)WD>klUK0~u%%*q{hE==IY~~x;28-s9*XM~ zsYkv6aWW;BtI~q)0Cj^T2Z}xQ(mvSDE*i40>&6$S#B(2iMr+>07Ti0j|NFMf=RW02zCKMy%wds?!T!DxCq z-l!~o^(CX)hodpQ(D?t3&C3U6F&$&jM?Lv>u(u`3J_;3*H@*DZ*8a|+G_U#ZU-PWk zn&f|84@Up;ia}>Fe=UJ<3DwHbbdG3smWIN-AyE;ar`mq9r1LiHjse3l67pAQaknb! z0-x1S9njUt3KorZ%;jV$99x&wgOhFU4j2Ed>Jg~@wrOkDmy@-qF0Kdi3`kMu%^NxQ zi39-2n3{{bz<$zdgK)SOqj-KPh;kDPCW%;cm&%_h072VY^Zxm85)jD|rZ8wCTOf(X z3Gf460uJ~qMd;mW$DlS63;jN~sfkWLdlye2z1EovtdxhJFVl_r3^!q9vV4;C?Yc}S8)A{?8f2KTst3PpJ+yU+p zcUZ_j`taJEKwfeNn`zBp5aKUy?q4o{(j}?R*(Eq-iV~$W5!-A`y$;GM`wu?W>ekstNSp02Ab<%qnx0N7ho{-~V{6A-2$5pW> zhJX8Cy|35_j*PwTr0Q>+rGG-fnrAtHNE-UxA|>w zKj3_7!1A#`p033Ad@GjKJnr$$|Bk8sw;t>N#D{Isx(b>D48zx?4I*h!a!_6x& zQU;_$B|uz&_&;up?^V_M_%tmgRekAygRc5cZ_1*}yMKd_v?rE-+qs?eE&&S;n|Z>g zZ1d0mjUDQL@S*)*Tl?QDU-jXKH%ZO2QjcGYCMx?2U>KV!$W;Gu5g-912rPQ{?k@x| z5g2pZ;;?L(KI;-bOrP~5=UvNAF0&+!U(^CnTN_`CwgtSWEGDv&-&R+ zI3^OCP}}gy;M#C_3>XxF-?&`!T2v$bRDWyo*)oIzL>?`)u7&)F7#u7p4-X9U$nr0L*JIn}DVzg{TrU$Q9PgJb<#ro@MAr&P~%h zS&RWF^qPJvhmFQqh}6_#V*8^Sx9AfEuU;-Y8={eXFb7fb36RB8Q^kk&z^=bQ6Md=^ zd@c^j_&IxLr)_}8LZrDD-P%2ActN8C*oQal1*a8ge32SAn+lxbJ=*Nqbj{}3!d~nT zdg}f!0&y6x!~kP%S`OZMdY^3i^Z5G3UP?5XL;Lia$Fq%KKq=Bq&W&nW%k;w7>);_5t^$4cL$wfn7NQtiz*&_{!gdI2x2gN)cS8P>W=vF-n-V zWYms)I1@CL4d~-n)Ywb=$V1u%gIwxguS` zE;5oPM}d=bb{V`hYY8UB;&FG74+_KrGYK4v;N-r=&b5h>6cD5X;YEQYy#)!S;VSlvUF|Vwh>-`ddpCLsV_dz zcvcs+-=k~N79x*Yv_kUm1Veu!2*Q6Z_Ct01aQNa&rs#W@z@Hho#F>ab#Zl@Y+qeUv zbuHyq4zbOt-=J^mZ03toZB&;`+B2D@`OcBLpQm=0qj!aEqHVx;Fi6BmIWTMn22Dg4haJ|Vf8;W0NE-U;aF1EJdXF%15fx6&#@c!_0ZKZ<~ zZ_-V&;a&_h%DK6gV3RGuBtC0(4fT^hIhdEINPp2JDwuZ|yfVhWwm=BZnDT|TC9+pG z&z8C@eXY_bZ+N9BT<_^gC7arv{rMuAvEp6MiejAeQqlv^V$UN__0xtYp)*-Q(DsEW zVE(l2j1n6jFM26;j$o`Z&J?1?P1Q?qTqN2S9K!FCZhilSqHi#Fbd$(&p;UD5y3>28 zpZ11bY2G}FqWSu5h9NY{E*>{`)s8I6$}U)r8rQsNiG+g^3F>l!%hkj%KCEi3c_+nU zb9)vVAMz70%`exWL~R%LR&ioVHz%i1X@H^x^)Ed#uX$4{S)?+-gP(|I+82bS#`7{_ zRVVh>jNGCY7<SS0{^ zJvde$hbaR#?2x#KY7fHMhg3MbC=I6s{l@&uSFbDirNEwJGz}I zs(p(46Ih%fy&yH&Zj;r2Ur4vNe<;5oiK%k?on z%sfd6eweVZNIWwF0kAght3~^>3qf>$ven<99XV869^vk9(6t3kAm_b~AR67}%3o8G zwFhVLVfS;q4VR|(+-ZKX`DN2C;0uGB`k0jtv7*RlKkiKY+8Sg zkN0&SeVM-Ur9Gn-5hYT-J68aS>S7Y`A}ME6TP&j5gnZ(Ip?)aO3@K87GypguPqFKo zNJ)b3C2~ac59p(g5cca4I383K02bahqy#0zF;e4{KZF(4?0u;f2-DFxF{Kp44`WUt zk|7OnKcRRT#sxf=6H%T3e?1(6RP%I~&Tm5<-YYj=JzK;u8eip|H>C@jbOn0`jGWU3 z*k4={!0l&rNdZ4c6VFm23Ep_`I;7gjiSKAG^isMOAV}7&-0-SB<3JFl?l;+ABb5*d#=QPS|)c_ zINCN=0qBAfUElCHkY_9~2A|K<(QAVIl+e{C^*y)$4T2OU#vtgMAa@)TzD^V9bRy@h z$8(>&FpAOFX^6S&$)01T!PClLCtzpt&W(?ShVx96DR!f9@w2WgXoo7@gap=7X92AE zKNt)YU#NBw^-r^~WcoD2zqO~2%Mi&kx;KcYc`U%9aUW=(wfj%D_MZ+#%=!dfo9B&o z1Ps;b*WuH??5}pan)^gR&h>8`e$ULwpqgAD+A{?dL>OKb=`XRAM!>w`A=F*L6=bh~ z?@awKjNdWSQJ-I!4zj4xGi>5ky0+e7=48PF`qK5>Q|9j1EHVrm!}(5)9;N7R6^J0nvV}ZE`M{& zOJa4&uwA(k<~8*e1PlQGmlt(6{C3mhODmL{Edq>r2iQjN?IzoBDQ+Pugz)Qog!5m#+ zQ)S{$t~AWb2Ux;8vfeeI!ej9te&1;XVaF7WVA{8tv%%=(<`Q7~l{F@bYSc8UO_^L* z!FZ2p8M(SS(1sHx{<3J^O;C(DW z><1KgmJ^S)7CKO;oNykXDe88r&9kY$&I(dug|o^3t5>ZX;E%f^twV^Vzz<%(L2h5B z%&n@sKui(YU{ZLUJt@6i{UWug;U#AZ65^iKg}o{rUDt`lnw&kFwuA2Lz}dB&i9}@9 zPoH9FL9*$;{IIE$Ax#fOKk2n%A!O*C55#O1GF7Q;Z`Re@I$gTpvOy5jmz5vam%UPZ zRY?uF-Q3KaT!CG`%deb5d`mdu^SuBsIj@-%N4AVjx@rodj_(MQ>w$kZtJ=VEzhHV@ zjYwQSPRy5A7vdvl;_S95Rx;p{*K8K%aYrzs&jLWYf+1!g@WRLZTQFP;xRTrOAz3T$ z_m3Q=H+S1hxTjb_26-rEO7G6|UsE*rcIN*y-HfR%(R6F-SR@~hmE20Obu$k1iUrB8 z5c@Qfw+%S`Zwm=+LGaC3BZ1F$7pAtUeg--A=(M-uMt4%KG7W`X7=T50sq6rB6CYfJ zGTTe2o|;If$b6@P(;p0Zy>+U>P=v?X0#i|N??DiUJ*8_GqYT4ky%zKEmLs{}pgMNX zpuH*JLH2%-7?bIc=0V+m2`cjWfE>u!l^zFfMI`V{Sky_aReY!cU*pZGGa0Tm0h*(b zAq>@>eGm$MxMhs3iAs?j-~x z#^{3+0EkvUo_qOeh2}ZBY*c%u0Ii@2pAxgu8 zYwGmF15!+dg5|On&y48q?6Bd>#>DNje5^b96MaUMdiNo_CI-zI6!|X_ViNUrzlN9> zf4$n`TJRR-kRsIW>A{>4v@Hv0^FNGid{LY`ACY(zE8%UysiLDX17#RuQeurt8*|UAt zsK2AI2tD~CuJb0mmmHp;x)YspKZ=}^gW4-_E`k6PC&V4OMYh4?U~_B z@mpbRG?mCBBze7^IZ^pI+;+r#--s61ao!~V?#$ZT%KVo0S(=wtE)=kSP-+IFLTZ*$ zsW#qb##`0xl>AhC{FSlxWv^>7bfwYh1ROE?2HT z>U42xbZMOP1IzCV#afz`O>Pvc35p+1E!})!ydwZ@h`jNRnj=i+4dq^ve=Ak3mAYPWICz^9E>D8DPhxkg!X4R-#_d1m=_n)?B)=)ycGPr0{Lu;77 zs~1Fy)7CQV;`TrGv1=Wbn{aLp-?IxBLoxRZ?=S^V6s+8Oz#fOeq5HyDT`moQob5~D zH!{-Q3mkhX5+e42DJ( zaT~;eEWfN9uswDKaVldgRat9#f~I*Jg~=im(>Vh$#5{X>`6zVY$M1sanfCG>dBmi4 z&L?gy-bAr%KoYUOnPo21;>N^S*94nho`Ye<1}aLPwLRU-2k+DHpTh z6+_-z3%CAe7kxc4Wp;CBa5ew*b!JS9JLBEj zVS&ARrQ*-%bAF{__Cg)g*y<|KYnDOe$3#uj6gs;k|4#lQ4+}MVLSUhFyvp^)Z*r$8 z9dL`CqEC6hLDejwiGAhX&5;3ziNS|;K7-dpaRXm2>u_+M<|Y#W^;+AuEa9=oFU)oX z+kV|HubSTS=Hb|W{lG5D0?SjF?!z^i3<*ffoK66Xj#+)?6pXfgY>Ds(DyxsC5yJMjtO@bSJTc=E@|VuiY0XZ(G+gMhjB+oH@qiX8h^{y425!MGWcn z!XZ8g9lsfLicuyNq5& zt(saC<7G>EHbTmCYezvUBRWgBWIWZZ+o6#lxr+ESe?=DxPS=?=Jqqb=XT08-NT;>F z^Ru6N-D?JG%bQVJ6=X?F9484Apb1WBAP1OZ&7d!0G3J2d;KRjZGF1~};T-hDkBy>dVCR0P#(Wftl(kA7+yoxQOmD zUR@^19h(a|bPs@(eu9mg)4a173Fe;((bt2X>||F}nyUG~pmH_#iLX)WM%{1x{Bf+| zJbse@?=E9KwJ}cwEZh5_--h~=vNT`wf5RH$J&}Xhg6w6(t zSXEuB;n*epVm|d$|MXv9k3$|rL(VT`>9VC2_`v{*S+~fvkW-jUNO+1N#5NAl%~HK; zFIRFAUe0G-j={k*p5;C2h`J5RdcBUmK2`Pdz;lwP}6lF3xF0VOSY{0FUZrS2O)q917%Qo2|%UZr1+U|10ma94ql9*jxfwxXCI z?`(7+3r4!y_NcuKNi%+JCPM}vOERI$urE;}I_B$fhx90S{Q#6Jo`TrYG;|=(*21j- zR3WEYvy$6{YxUg%0Nz}Ws$-!Hk@KI|KmD#^dorcEBQ7%H;**-VIjDqMUX~*piMpPg zppv?uKNEVynzq%DSb)N!g4^NO z#vVH2GQ-}}CxL!-9D;&x8BdC=)aqRssVG^zanmbxg!U+rl0c>g_0#<2Vt+HL=fmYm z`8)4q_giL{Sm>@sX9^Cf}(}$PS$=;t=+BOhOLfz)vsn zo#PzBCLA<-EusjwuU6bwBcvAHbyBen-y?soY-*)REQj$eqVxWZ+0FNmL$M(3`C z9ryQz5`8#&N3P_aZnEFkj#o{>-=0hyIHmhMIW`c{j)T@Q`*0wl**1PKOl%S@@gEF* z;$Yo&qiGB&)aSU5gRgP*5F9Z^T$c6|tK*AV`hv;mVf9WjIDhz&NAlY(IRt~_$*p#) zwL?c1<9z!_kMOSfBqboHpf_`NWCHe)=k+?a^9USmEJ1PDYQ=E(Gt@TkF=MK|%(#Z)de-`$x;- z0)Y~wx;y&(z5ezicfUtN9p9%ptV@e?B3?#X(vCrRHB9^3<6C^XoQ9uVw4sMxyjn4J zH9E>oLecSUO4Z^A7QG+QnZLaDF12#|WaT7&)y-9mYETUH_?#^Ny-nE{f^P@e9q!#Q0vG}vcit0%($4|HNz1>}iTVMdj0T7E<#us-t4!F}TDtFts?+)o-XCzPI=2#0; zxpi5~+L*?eaj+plO2nXyBQ-n@lMy&3b(=2%7!#e!s(4XJy~m!=5;{(dwF|$55XL|Q z+6l4Tr1L)$wvXc-8*EAF2gPkk?OWUk1FFc&M1`kt3 zsJ>l|G&ymd#(o$L&j6^ULWm53JDLZ`2h~ScUE@mFT&O#yoExfWcgwP50NThb6S)#I zoog2*m!{HjH@@ZW=0@5!r_IE<_sNh8bDU~WLX&+&_flr4>bD~S=-5ptYumIu^FWhU zUn2MNF^9X(Xayz1**A=Bu}~C|*%@x*QSkJM$iwmk<)Uon$ZKHO@{P#*;d}w!@;|w@ z;zeRPD;x#(sz1^L6F+NH1^2Jov%b5+ugcvrdE3=_Qt7?!>o?Z^;?h~hUU5E0Y@LK$ z^;qchs>wsj=aOtJD?pg6Pk~D~)qq9@s1m2{xx7x-AL3-A728zlITzDC4vqK7;+qKb zOXzdrks4SDdwx?`s3c&gEyX`i(blUY!2H)em;4NB(}DV15&rGGemLdhMyISXBIk#- zCY7xDJ>l~BF@00_(;037ca#1Tu1z8$$QSka#fw{1LfqBD%DfFMGGNY+@9}5jeyx?} zqz`f}I&cLVKHFcYd+P4N;sn9`r&AZ4rjE?C#LAe5pSIqUo%iN$<%96PE|vavZ?7dF z*8koFd-^NsuXn!BUbJ)!ZJC$N48e06dwR4TAan`+*IwMI(hj>F%keGZy=?!5sRCnk znrAA((CABwHP=)-m-gh_QE278VY}RZ9|7^?r1&y?fq*wwZv2AoP%|@$cXoab@q&0c zmCc7`ej1R$eLDYD1RPS-YT#9o+zXkaDqE@Q?*cD7{b+NP@mW>r&*4L;g-}Uf1t25Q7pL-Qw zy9MY~l0V7h0~1X|)~`RtlXwosVtu-Zf~^X5zE zUzQ&jWU!W*_lbL3iHDoKUc5T{L?m5}_2n^#LpKNf%u^}6bLP=;Gp13#rj=TJ714;y( zh*S4u*5xLrK36t<68tg}?PDHd;??B;WS$;KP1E6a&x6KZ3XB zHURvLhg{)B4mbGsZWG>un5oR_0}q4>j!6~oWrldL$9g}Rlz5(QM?YgQ@0eQqum%u= z8J<(IAOjyIKE5$Ol|w+aWYOazaHlQ+=kG+OkM?6HPQQta#U^LhF6H4;V+Sq^I8GnG zT3n*|D$kK!QJkBeKQ7}0H&$nf3d@pCAGrTr!;$jg*B37;Ire*o0%H%j6Uhi;&w+OR-Il>mzUjalc?3YS*?uZyiNsf{m^LX0kcjD zf(NKfcpME<&?4<1PhE48+@!!3UGM(f_H|8SY<7~2vHq}umsIz|(9kgd-ykTdQS_r= zE(J)^1~p71y_^K~c56~m2~%iYlR&mCdsOF~{8b9Epv57R)e)w2GL;wnAM=Y4L{Z-# zZa#t~iBS(A!c-;d0{nxitqUhJkbb#f5X>zjbq3u5Ti)2P`y z*yG@5&bBOXKwSaJq##rpca!rLz>3)w0!fzT`8=$ow0Zw4z8_Ns@N%Os5J=1g%GDMA z)L70RJ{egJlJ{k_A0s*O3~8HR{pu|%8Q2;Ye(uOZlua2uMo!4)0Sv2r0jSqV;}_bD zc9^hNLIc-;-0Oj#5zb-x5zq;qXBYMEX8VDP1#U}XMkPXs-5QupLpgy4py5g~44nA`~e*JRIyT!@qqb!zzrA&A; z{1l6{z`C&!0b1zwA8J!s57L)7!asCY$EzRTVih(8kE;fWY3WpayF&X;l2)H>$Yf^Q zXG_#0f)=_hL8IcXA;7F8cgI)kKGAnam00byK#D@!{tAOs|EyI z^`y^t>GL$x2G`reAVpSK_ z+uL?_d5@%WH0)rksVmI6oKToF}PiU#R(^km&=z5%tY;OyzGyC;jCMZ&F3Vwm# z+K61={*s#ZfHA})frBwKfNRwR;D-ROD6{P!)?@0wi*maasI{k*ijaw+8z%P)nQ=~V5d98D3szdKU5J74LMv4~fQs^< zO7oeTwl(8;y{m4YGTh7RsN;j%hKgCY@|4^>^b{EtaupJ`=+z}1#sHxxyO4u@t5yHG zMDLf^pb+@TX@L?L$io4%Ud@CHm|9>rJFmMW*8e2Xi6*IYFKBI7K@yw+ff~OlHJWUI zx$Io%VsW^bI)9&lYIk@td4uPMTjgy5e}k6r_;(jRf?&)#8frQrw{wE5)t2 z7b%h;1zIG~;_fa*6Fj)Pr9gq;L3{F>ch1%G%sF@OMJE41=9|6uTI;hE^RR*I0Ew=P z3SLe7Hq^T~eY1LT2PD?Gd&*7xi8*oEI=ksi%jD72m6P(|b(H&q`=Ks6u6KN&mTl24 z$4;DjY?)pT3n=$3u4cs7VB5juZ`=1cVm_9)_5DWjmMK8 zbD7S22=DMaPT-hnSJ+G-20B|vyMMN|UoO=v&BCfj&Q*PuGgH zv$E!Rx8l6hf+QEWO-00ut-ThDIOJOB&ylmC_OCyrlPxMqm)Qc<)oE-X57n5X(Paff z=9SQiTZ_{rupC4*&&I{kJe&O1H^xZK-r*qSuDRb?=DkvL3vm6>vcMLH2;sWup|$p0 z=c>uyFUu3%K+P;-MY)EMK11pUDpX;nlGqbeq6aE5o;82zhz?lWHmX?o0%Sm@u1X8s}i zuuyvYsd87`Oc&n5TJu-|!7M((ed+DF9Q5SSa90lLuajTC?wZkMrXq4LBMs>`d|p}R zw8gXTxOtLZh_DDFt0>WSS3S_@t*CpHrPu_j--vWb-quA{0q^9p{ElHs&jlH_d}COQ z0e%s5OjR?>Ae}M0^8JK_Os(Ehj1wc=uVz7?m)6JK7z2nKt|K0ZU_8ziv9*IXV z^xw5J_Exa0gnmDWKYPvdH9iTvAgkyw+}FYp^`{Gb4tPU7f~gXgJ3+|Z8J6Dx(B=&1 z6X@7`D{8*-%SV3H!{MSttsAHAkx%-t_p%tdBhaoJ1A!V_bQ(h2IE^?3P$j&NZ*`WF z_T??ho?N-cP$lHu3Du+0!Zv>BTAs)Zx?alW52eL)eNngy7vidMRf4%^s25ijfi@c_ zz#jEbRlb)kq2Sv_WE*gAx`*Y}OGhfp2o#4SNaXN)piq{Ur_v3YanH zH?5grRdZ({FXbu~0D@92ok-0oBD!Fl6z@(Ref_(w!r((SHb=88FMAupbvB$i5AO?5 zGYzMFo7q;3J@0)JoVw`SIc(gXtq295V5KElFTcLoDhioxQ4#UwZP#^5$NW)SMR<)y zTYk$Tn?f;g{<1&@6s?&I{Ag&QA=`Q)fPW0vHT>1W#g=B4W)*2Q9lP!9`UkMMc&!y? zrBZ40V>d9Npzh571T$C6naq*#41v5;fVbb-NGyvB)Qk#4R!s!d!_s8kj~_4%W76e+ z#8%lR_}+KaF3}H64Aa5HC=lw_Nr|8wZ41!GkM_3(zA)taQE80^(M8`@Z;#&`nYvM0 z6OS)Tx>q3P{nio8f`rUnWb>;})sBT?HT)zL3|j&rV^1s-g7KXlUw51!qAP4w{*H*h z8>Nv^rMIaBEr7mLOz<*_p4fNIF%q4KM0%<49CEyH-ujld@O)hb$cEf;UJ1Z5`^9eh zsT$BE>s#fEE|1_$*S3(#;gs0%ko-|9r0YzdTl}alcKg2ip3mH z$TU;}?C1(zK4ehDV&E$3hZ&+G+gKkFw-vsRNDP^>Dg#fr+UY#m&T_fOz|Hm0?F6WA zfROP3jemg2d54&KfZ6k~h~-Ce-<0L0XXLL23#8={*g35vTK;d$um@&Ag-C;J#ywi8o`vXp^iR1 zP~6!-OG#SqM5>8!7Z>PT0{pFA>jQ+mP=LNA(H}!7%N*}{+)JUUfl@OsdF4U5E*s=U zRS?=51y`EIL@p26ChzbHhbGaGesS_cmC8p%6Rar@)uGLAWN`6X^5-X8a@K!8Hn*H# zhNGeeo$CWQ-kwmkcs`FW9-!g#1Nk1x`~Lj|EjVHA3da9t9Edwkb*3GYnrYG;nYE7O z`K1uQ&F><3QD*T14cW9zmf-1f8r#+Vr z64!?)on5po09oUmbZO>^1sf;`tr^-dzFfj3Bg9r_OnsY^N%zYi%$lc%PX{F^#~L$e)Xe*b#Js2%Ut9_+oxRGV`Lwd|_8;ya|aL&5`^K30FH>`dxvs)2#y5n?+Ofcf~pH9 zvF|==ff|eBi1EJFprT+~)+|ruAE-UxF-X6jJT>468fsrhEJ5}AuDW7YtvgX*%X^MznAXS z@pU-ZO#xuN+G@_EP>nL5{4;l1emOaWc$U7n1hK;mbF>|sro4t~ClvlmR_j!!%w*uU zA4c}z?vUihngX1Bm+un2v z4EykY2#(Sg6CQ#v-O06H-`X`k7!8I;3@)MPBu{!bW;;;7`=}UMrz&4y?XoW-?Fb2* zS=|w`{ah_J93F*M1Cv&nOR_E?jt|h zYQKK%?-YxgLBXi=evk}l|G+!JWo-C4#q860_oZbWpa5v*;%`ULORP7s@+=R>??^ol zk1;FaFHp5Jz$vBDp;<2CFmX;^@9+F*mNj;T=Nh@22;E!#ty@pc^{fBHzfdV0h&!J$ z$NWVFKmJEUi$&{=;gSQD$aSXZ!s$TQ`(Mem{kAUu z038V`-R?V~xC)3-c%}3^(U!g6G=Q(mU14b2v$YthEDQ^6=NDT09OyM)Z|eK}Bds}% zd?;UVJ+O}wF+EpqjQ&qs(%q0G%5_itvsAmb`rbdwM=n+NscBl(g|iMKaANgtojmd@ zGsNB0g7JjQH;P2az4om_GYhl$)WyMSOOSY1YdiVCoXL;xZTDLQ2qxWrZxg;RBK2eH z(XJ!Zxh32>TbhEWmCK2mO4@UHsA3(7NyQ)8ao+m8(D~i`#VL)@SE)d|l_coJ06Jwe z0UEI-ZAqw-x1vIk(o2zEET6!Z;xZBMA@^S8Fay`y$KTAPlFwF&9Fv|(vk#ZLs>FsP zelBM!P95d*@Nep`_VO&q3UIgnZf$9*cX;0D!#M!hlI8w-GToTtP%~X2vdeM!NNIkd z>!>G8(Cg5|!CQ1H5t~JF$1~}H0VH+gtUvph?d6kPa*qXB_)bPQYSJ{HKX?|o10jIQ z3I#!ED$1~Vhk{y-6|Os#yKcMO2hTS{-_XuLOE5_X1HKB%hRs1=V~KeznDEqNq+CM< zj{)`N!R4^^DJNG*&UvvjqmaG@!;sB27LsvIJwz%^rNVg`_+Hu^?X+V!7mH{8b+#d{ zLKD*wBSsmEzpZ2HA-Gjde*r2a@(iJ_?|5yA`Z>X4@FtsGM|0|SoD4Q{Nwg%}b2I0+ z$xE$@fm}J8^$!@vwVuq$!W*nV)_u}39(5EQB;$c>Ba5{xoo;krKtUz*pB`Az)L7rz z)I4pU*9s*L@@5sh5&-pC=~t26_7UZCKTWVg)Y2V0 z+o}5AxyG8*jl`a9cibY0at`F*atNv0C2|%># z`tNWVvvvxItQnZCUxVLEk)SH3-_z8G@c$$NV^%_uAEdV%4#3jsrB4?ppl}cGD~+^(hY|q-Ab%G&c$qZ95r(t8 zc1aC5$)@3Ao6^43LwTUJM8zVEA<1MZr;j!#Hsn)%>+jEolOphWJuji|H2An$(>&LW zp~8BH6tXv%zM&eT6RRcF^OBY=VmIM~+&iSK3Qo)=CHzh_nMiB%vaCwUt@7rJ4&E1z zn|!c&T*?{?-ZdqACS$>-E3oOyIeMpVDjsSxRI$Tn$`-0ry_@e!KI#!iTvyOTAxZZm z*a|8YaR6-?L*E=)kJ~aiVr2-`zSE?W-kJ!!S zCzZr&fz$g>zO>48vUW$<_iRI4cg#A=exc)ZXLT;Q@xS%06R7uoH1n3#^!F=DX;3w>NL7uwjY9lMP6fw}Y$geT#ZONl^bMvM9DvMBU%>LMU7gvtbFR2W`UR-f1-=K?6gE6^sy7el5puZn(<8ff z(T;wi{0>A+(c(1=mLo&l)nGf8lz8zwEp$c~?xf`TL2e zgAI!D_qq|SSP+wTwwP2R5X(;puMDtv?h0#{#*|Ar-|=t$I4^t4>vC%`F`sP0ZdP&! zmiyJ=*>$C{QMHI|GO^yc%JSo?eRjmQw03V?X8NN8!fiAZSlRs89nR(-TXqrxA;6f7tyYBpn*z(&l3zOm;Un(j zGp=#%Xw9|%H|%JWwjhr$?nd_*yk?@6CWiO>=b^@v7whcQK$!n!ZpnKZucw1Nl9)-X z004Rti~@0PS7`?5en}GICZ>5ruyoaOd71w@{RxEt_R&_#u}LQZ>gw(VBWL|YVavy@ z)bB%mHuy7YEeO|COz>6fKI;w_%8~(pJGGKG)2>veRHPuu{G#FKB1a1cJr}ni;#Rce zR^ImjV=A0=dLRqrHU4||Y~x!Z2$9y&!uz$-C=zQJ2jG77&fieG>`rqknBb1DW&FGf zkLpA`z8+Rr4ORJ^4N^-A%*}nD=EJTNd?XHTh(P6R9R8f@QSq;`OZl3WdR<8eux7=e zQc2`n9{AX%3cxArN2z&lfsL`pLthE4tmEzEuU z>K8UEi7E_QOsU4{J+o9dzZh_bq{UX>^x*eI>Bs}X_Y$(9q7&$|-mBL1Nal&nRvwdm z(I2S_9M}?7lFGSfp-M*=qUB>MPAq{bld-(-+^@w3`=_{u02`!NT}e?maEM(!{5HSAz_-w0L!}p7rHP zpTe6^D6D(gKe+x04GlY&$%$IefsKjyE(CiS$UxbG4oKW?+P>ne4T8&gvorKVR5==W z96YT)^g)GygofUR5&ufGBcx#OGX0q&xk`P#w7$v!9zzJ0a=`b`vvQ;zogcM}vVSJE z_a{W-O+O+z$UvaW3fUpQ3XAfd4{}j^nT9HJ$2+@274RMtem<`t!C5U#!L;SZiv?`Q z?ifNgmz80DaMy0yx%ZgoQ6$- zr|RnH$64zEbA~7OZzp;0)U1nS>$MyD^BX6*`#Os%JYXJ{8YV z)s7Sr&_Os?+SA{YsFxwDXei(KF)D88$EFR^d$Dpr&d|E1%#o5rp^AP8N}oR3fIk@> ztFaS@0Rcq`aeO57_9q9BQ-|UwVlF_2(1T<3@Y(oG7gIbmv4>K_|FZcVjMNAH{v@pk z^3nv6S!Dj-bSK(xHr>U2#`G2Jh1{4EdCiOlV)X7L4@_e<13%`G_?b&!tTs3lZ{yx} z_aL%kPl&sU#{nfG9;>WO81t2YGy6aJlqg}wC5ynf+dZU-pXr9Y*Fz)6# z|IAWliHX9Y)$AE?(KNK#kJUvg;M7y~?VCfqcQ`kgOD(*p`5yK8 zwg5$f2kF~PQJag7sVC~s;G`iPqQfCRY?jLk7e17yheTF5iFTNo?2GlMEBnoYL~Qk; zev%mR8XRD7fB1#AGG<~=9h{b4@V&VEg|dwQ$$2GfFZ5;oxM+vdXz<7tb#f(Zd`jqu z6xjc2FXLE-q=1dU$6IFW@7#+lH@S_sIMveY^iwu@IUSQ;Yove3_aAtt7B@Lrtr}FE zD*XoX8gY7IQcv{WQ&&+kL%1q@IgkUO*${(=0U7c(U+t3(4Utcgo zEL^MPdw_qA=aV16_CK8D_9?k`wvjZuw+DNgS!lt{L}lWGssm)>0kTNTlu?NvS~TUW-oP%SQx7S&E>aP?=te= zG_mUWym;jWWCpKzsuN7(u9J$TOm!$fPojVg`G^DfCC@1qUPqk;{wk(R`2*rtwfUL^ z8xcH6TEZw6n58fA_dHdbFKDUNhw1W~4B+ z4qT3Zj-x-o{G@-{gr=z$nv@SV8Z=e1lq6U-U&c?BNDt1tv6ZPNmV>c z+T=fIC$hjt5kJi!W`?eKebNxZeSy8!p4x`qB>GMIE-j`2Chy;xN}|KTLl0b2d@__o zSyzH0jdQv|g*?fBflc;!#mvy01b^P6$o1RII?E~? zSD}W!a>g|KA+Uz6W^PV?0ex0(aE|({X2Lp%>*LW;QIz}_(&ybod+2}wgD&vMz_`#3 z>9H+#P612eC!fYe%}l;LzuykSfIN4ZyJm*7L=f#+013%{`U6X}A*MK-(&Wypo!aKQ z6#EDUSjxJ+tc3Jsq{p-?1&F2OEvgWb3X{}JblI}n1pc7}snH6TuUOqR=SQ9QQcBde*j^qXW|>YF#4 zA820*d1wX+&p}@*Ye0K{I;N%18W^Naw*Fu>1};O|BEUanuQw|XXtNIsMs)UW8AUJ! zc;_+!hTrdmXF?kJU2-#arF{gj>D>S!F@f+-ziy6BqR&21$`hxn#_%EJNNq3mdwKgU zR<^8@wY^v#f=pfXMty)9_ntF@0a8D9VJ2EjAd(P&A4^iMYtC$tI+1dEpi<%K+~eCT zB&fh$yc4hP1UJ=n<50^qL-Kv6X)#e7+1bl=QYpwu;B~QV8@pGqQm+F zEuxT5e~#mZ>ClDs=`J*UZf?qZi4u&Vv(){j(I-tkwzC@}NkOra%iC&#{^+YXXX4cHEF1A zo^ChM3US8V`$n<{(KmDpy?*ie@2&hsz1Hx(^k!weZ7PK++vvWw{3#X>_5&D-;CcW6 zoHEu`*T~b#900WGv0XytiN~^lTG!4#@RnW_jb>jr>WOS+LY!WHbb|AM5w|COPH_pMuO*8b`Lal4h6o?lSYQ z&|K#0aQmK4^F{K`eFH4MDbUjA@_R_#&3SSsXv-h!U1AqjNaWsPKm!bnsSf<=@CPjd^~Xw zYL7f5OGWv6sR%E=81s*qp zQ{_i?OS^fAnt!^uq_^;LD>HoajBOJ15+&ckAA@tDGYH>|Nwu%m9g*USe+OAPyO&9V zF+nqegz-SbqNP2;G@QtpR+hDj2(VPF9vNlB@X5tL1YZi_N+QG5 zQ+9ablim7=@ex|udJjC!nccIM6dMhr0-+%Xm$XyU@72`yDs&~lia@-d2(6ylPEPv= z(l|M{kjDDo_dHR>BOP>r&#C*%hw!a?&fqgLCTyP*PvvD&eBKL=h>HT9c)$m-^(-Z-`vIEag+e90rn@tD7PS?n04#f_GPg-v(kk2h;R_? zB`9?ED#P*%zc-yjI-*tl5jH73(@PkQcpXSp%c=tYi57XsdkhvnpbiWCU0;!!rPKGw zc?__Q!qn54(O$X|qs9AZHQG$-t6ee-pvJ5cx=CU6)}aCd(g5!6PKJ-P=}Yyan)v!h z%yYnXEJYUv8cy9MJk}}&gz62XXECkB@6m##vB1}@WrJ5mA8sB={#C^5U^mnJ=&NL! zOg+u(I;*1pd8nq-O-LD0Bw3l#`|9XS!C3Usrcp1U{fp3T4@aQ`^%IgW4cI>R6bX;v zo*0Fi9nVAB{-WaPy<(8*n=B*Tg!bRx+pIbsidOUU;-&*Ne9HC>$$0o^$tl|>8pO7k zOYk@j%q}A<&`~6et42x=z%Q`4mpp46N4?yH*jY&Wo7vXJpVk7W_1nqnsnYYFQ}mzV zss-!!27`Bwr3$>&YowuO&*RBevAd5140BM0?4O}Da|Ml1X-Z?PWbKZgFo>(w!LhmM zjp_a@#KDt}a!eWT!0aJuQ;V?T=B-sh?6*JbTc?C?wMtZ-aBi>ygn z&LtxUTG?-dICXEaL#JR{lGtI_9*!xf`Z|?O4k_VhPnBzG97xs}qno=W&$lR-mcP4J z*o3b`coOoj{RR#p2WC!;cqcq`oAvEvZw|lN?tCNe_=9VQ-r9(14>&ZoS@0b*ncZd- z+Xw{$C76@Y$Ob>lUVNubbkCJZL1v7L$ibHc{3re9gj`4lWqYEwCDA{OyeDE68`4S* z5qh^eEv_%?js({k^KP!HM9~iPO;C0X)%{&bnNfC3pQxNIuB{A{!od0YW{E{##~AgN z8P9LBL%peE za-|_s8$v=)$N}qF7jmg@WQR>OkDJKh9K$jX&pU)Kb^ifUhS8M&0Jg^%1LaN9-o?(1 z-A3_GNx=4dCYOf}P^y3Q|CAWX?(MYSQULxp2w7FDowIO=&#k}Kfrk_?Os_18bgG5zEX4_FJ<|Vj^TA2Sx;wK6T#JWelbLbvT*`8Guj*R z_x{82NfZTkyXBLZ(E@T*^n(nhm9D<*B?e@#aQ`8Rm-6pMAq~H&)w4CGFbpRA%(4fB zHQQ`KRIe|`b}_Xsm9z2;KyyW@_b0QR-%o4{Gcg=%qd!kQ*}Yv!q_a@0zB^~st`gmF zR^V0s5E!0%$Ca*%f+Tp|4rYiY`;7$5z|+V=2N}-BB(~zcM}wsEK0Qr#+B=1^pwu;37#Xn)RA8} z`UF50f3Tuw_hHtg?OSNUrn&!cZ{tb**J@Ij3Z)xhD=S1{RISi#G~g4T4mEQkK`_1? z@_e&dZH*RP;59SlJxUDQoBBKp)+sftksg%M=O+1T!5OC#HQC!!#j3b)O`6c{ZxbHm zU{nUJ@U}&&I=SSg5Qdh7Y{*!1RDIfP*S4zk)w)&EnaBX%4O0J|eNijkqMaCEEnTw) zJl1=U{(7S5r&3ol39#x&Mh%(BTuLlW6&2{t-MtOIRCBM4(xx9D4 z{@62qK5F=OhOHIazqEUQ7t?k{csB$<7z`phxyJ+PN=(d5RM~}ic#h%&YsE_5Nnwqj z%KF|I=G<|jF&#YA{2Oc3W+Q2Z`-)A!r)bz_Z((%q?_a-#YAQXpAv1@^WOhRzO*6Nm z@M1G{rdF|d;Wr*Xehe~)0v3D2#-=U``x2O_ebak`DaRGd-0A&;zTPj_lG^yl0(y|Z z%e^tj?PE%<(u3HH*}}@SJ(;i09n}OJl|jldtgYA!$Rf4HiDJjb{TQmK#ITpI{^NYDr!4!?)yA(q0+%Vr z&y7?~$)hjz-x$rj>&p!xUG+O2w--Gv>T9!UM1QR^fxGVfauk0q(oIibgaw|j%J0NF z5Uc%Y64|i(Ri@CHfY0Rb7`f;-8LK==hAUirp?P)t2uqnehxoTAYR33|uAP3Jws8Kl zoZXM6Um7Jpv{~*1<@T2vgc4PfzMw8M3Jw@zZoCjKh3NzI*Q>lLTpwW%opEK)BZi{1ziZinB+4?DG&^ z5pN1x8_BuBFgmXv6U$s5d}O<3(<3 z1ToWT6KvgLsY=CFM{ymMX~TTLiLx=w*qm z#xbqIOFHbXT0P}}zCFsmcwiXbJ0Cf!zU{g2@r;;3Z-C+xKdNY{Apht<^5*RD_Y3$GBZ zA2iRmnGfULbIw*{*-bLdw-393W6vn%3BzNwJr1`ES9m5}mgf1PCByBsT~Yz{DMCr^Bz!!ebM zI9Qu)l+h)+o_wFK(aLDzftFm(#@f0Ln9Dh^5Sa325Lw>G>71esHG35ZW2Pr0_6fF} zt+PiTZtxTPI+@hYeo5Ev}?mWGcZI?V4UIzoHR z(P<#27(i#itY-~YVEx_UjHcFu=-fj^&AB!{FB`q~jqR%*`tFB^_c*Y0{!x4Hrqb7q zleIp&Lm$m1Ps*j-C>hVwTdxRLA|Z`k-R2TqIN;gTYzOwGw}aJ*`OLCd_ad*%X+@KZZ5)QD;YJsSVwJH^Ie-HU ze+DBAg%&=wX7}MJj!L9s1GyMHgwerKDf5n=!i@!y9j!awFcwn`L)X@w+)g18z^$0}^rF1d8oqEun#vijMJRm%d&`n-AH6zxpbGKIY_EN@^f?NY zGj)DdiaA~4jYQ+PH;vGfOVj(@cvo^5%-)%9sN^=pDJ5Txt<-MfKA9i`ThbE?DKU1Y zj(?)9|4ap1^v_BlTMM1z0EkchnpLd9vTwQa<%EJI-KM7{!p4n_iB{~Og2-liCpU-=fl*aAY*VSxZw97TWOri~5 z*LR!`CWq%5{mHP8^d8mKu5a@6pEUm#PiT`2*xvSr0ZOYY5wBfiTZxm3@kN8ISfQ`5 zt9}d&tY>*Y_C$PYB7s@r6RfHw1hHX-aWV5O9KfidQSH$%x##17YF4U=h@w!R%GyzgJRzkl4kQY+cPc&|0 z1zUaGDKMM1$G10evMJ-K;E}&4n)qUB$sy&Fl&~q+q4(RTonp*MQ@JodSqI9NAzzN} zCaKQf{4t_53JzEwU3@$eu}|aefs&NtzD*in>2AME-!Z+qaKB|tT$*bZ74qdsB!TxA zbR+GRBBfrafG_q=WnrsjoMw2xEO%5cCIormv?$2D`+>$E;Y zIv^wNLbc&q&OiMgTB?-Vz&*1kt}dC!iFAN0a+{B1O7xOu5Cx+((m-Gvyp#S!wkX4B zP03Zx`ub7vDv8#m@NR zw(hHC&x^`{O$Z}?9b?8Uh9*~#IC~s3O+sq%(+!h>x?W0FJe#fVe3^s(#e=w4^!h&w zb>|T7t7Jw0l+|w}Dbk+xp3rZloz{G_u*(g9pTf#_Zow#QHcW65lKJADoDwr}7A6%@ z_J<_YL>BgQy+de}GF&8Ibxa=194y! z-aIWWNd($c&_l2wc;;^_>%0T1;qbEE&d5fZS;tReJp}TjC|&@H0K~(EpuV4IA-$c?kr-E1vMnz5jHpviVu*eV-3 z3}>2l`o=EX{jojKZ*80>r5mrDA7f{oKs5zdF^|&8*cHrzM@ndEc?phFlqsZ%tLCEc z1-qJ13$#-3q0)y@GE*nCbgyxVG-pkPcB4ALP5R=Fd$>NVV)%s0uVPx{6+ilW42*cC z`E}YA%@$OadlH=I=^5mcw|t$b`tCb2h3q=iJ#uBXx#AI`pDKsK)kdJ3GJl+X7s_sk z6>Nd)CP^}#1yLpaF^#tCxxF=*-DP+&AdY|3i;WH&^V~D<{JEL))M2s${8MbtC_IUn z4xAO_A#9cf6%6Q**Ls>y>%~WSrg0h=y3y%EO@ok4z@Ql@WP;X}>-QR(y_*^J8}0Rl zN*wUcPdUb)hH;fBg z7gd>l<-!PyrS;+7pWW<1lYF1WWt9Ay{(YV<44KoBvL_cv+d@X0t{lfk~wa)d8zw zQ!v$-Owl5)d8AezKbit*OM+fsl|4Ma&kuYFr3RmRYc?)v6;uWqC;^1;Jht@%g61F?|THs}zXt^LWM=X|h+Qqg(|V z-3SJ*qt%1o&q0q33^-qQNdosC{O1}1gC=0>f3E!JeIdr*wBG_K|2II5<59-{mPZxAx;y>v z1n~d&F%&&9e0Vzyx~0LqAAC`*CcB+n7y@#OJ%)fo{NI~1KK{78dw`y(WsnJ(@y-Lc zciO|@XKjl<3kFZp>)1DhEB^wrgYhNgKJLQfVB5<2TvgOb(^<7ivT*gfr3O4&S>dAE zFLKzh5D5EHsInfoAVVLv*!e-v@B`a&yFKf-k-gDZ3n4}u>QCi8`_nUl z5xcq8g#HfQfZLG=u_?W)xRWA=ytp~`&)a|V94Bj1-{;D2$!z7AgpV}MD1jHRUQd;? zf@e1G??*K1xwZ*k0&Kw{9hih10=C4U+tWC^`gDq)(13al*oYG+g#{#**h6&sRe4eo z6z;L`9zx$NlMFvUX9eR)AQ@V!@XdQ@Joz-x!2KnlkHzIR>W>O&ha~KFFsm9DV}B*(i+%PM!og--hc<|KfH{mb7plKo?5?o zJDLWD?8f;x2oYqs)EW_Z&7iQVtUC>cu^)sF zbjf>mQbBu5V)zc)VOuicxNEbWbu;|Y(I0{=b=tlz4GsA5MVYW%MG3A!inc6Yd=k6L zA#L{#5RJuRyBdlG&|%rZaOH9e$OxE>gGZ!nM}l#tWYmg;mU%MtKI`o1$h~N87CT_( zP8tfuM^{qSlHx(~jWBI-5))o^56nKC@UQOdaFTPjS0J2xMjH=lugU=x(0 zEF}-wmMpxG!Qh~`MaN5?yykk@wMBn)fY^4-L_?Bt7o(0V9a^>?a#(VSo0O+qE;J0* zlqcteJ_a^#M@th7NbDUekJ*}uso+&v_B6#zYd)F|i4xpHGS%~h^K4*}>;BIVD|bDBg5>PLoFnRD;e5ii)InM2SksFABG?vq0sp-xKZ~69=ZJnj z$&-&d%Ge(OH5^H7cRcJy zscG7u4}7R$-?Z-Y%ilSg!t89mq1;Wk%@YnV5=gmm>e>7V z|M=qOLkG3aoQy;~dz?wbvaVCiWPAzhRD;rUzs4PUu3y;o&@Ul5yPRe`KepEh1IeoP zYAo4{e>ztlCT(ae5b(X`wys%18|EABz1mVT@)luq1D?rayKH4)jUx+;1MC``$W2)I zCh3JZcm$`aRGU6b9OCX<(=|VNaI}8xxtjU7+-7*gbXZo_x(4(yg6m9BqUZRw8RPkr zF(8SlS2I3jM7~Q%Xpgnw3ZOrP`Dbkv%6+d~>NA{`(hTz}lkLh-{>3w4I-D#|s4yl0 z^MuT{*M`t=#NMe;HbsdXWX?5wZ|D=^{Hlz-!)S$Z&;0?l9m@-kJV-fM2-KzWzvviE zZhcGlMofB0LDF+31{;V;X!u1`dkYQxH6jJDrd)uK1p)krESvY9dgDBIlBK?yNG0DR zH4UUHzThE?d=~&aJ_71)A;PF!9rL8b%mtx6M{|e)yq1~*)!fM)SaZDi6N*?uk1Z-t zlbcU-9Da&zM0E?7*pkI*DYBXdR3mmlbMgVtAED29h{R* zVpV}qbH(jIM2J@57&)TWgys1T7hevn-fGtd+>OPqNQwGbJ{2_uG5Si)ux3cRHkxAZ z3fT{S>?XwD$||`l!|U~b(HTvc^C4m=Q{xnRSL^&t!H)(CR2p%QelEAZqcxq<-)Ko9 zdgfdvwX{du(YtK=NswfvTbOz6nLnPS+iJNXyIAf9t4=rVzu0?L1B;l0)Qrw3A zN){ucKfK7evY5ke;PKQax?R6(_l(e z3$187ZL&}#q5RA8B9A`RC1U($GwIDLE2t~J%StZ~>7XDp^>@^D_fjNLSj1cfzB(*2P;evUSw&^lW@d%P5%q}h zC}V$P*P?vjI?1IHJ?(y=?5##1VK`?X$kWF?zxOdGg|0##Pp2Oq&q#NZv#ekf2L~qc zzj+K9D=G`I`g4MCWyrF$>2Q^4U6x`f=vj9jcqe-R))yE4rh?rT`t%UNSOo1Uo#V(r zyx!CL>a#|0_bQ|@kf7A0)vW7%GZkr?u}?z&cQof81t!&>>yAF-$oi(H3}F53u-zAp z$5vF75F|IuM~vVAA-eH^GwbR_LbkYXkVbBy58HjMa6qfWW58ey10~k2hP@YS7>Q=xzrY_t1@bK zVsgHha`>m-byCb?QOP0P1?|_Z$-s7k442pI#ZFjQy4xX7ltL{X%Sh9Y3ZII zGzr>iWTXkT0n2M4A^yn|A1Gp1Mn^Bj{$Bs+G-Ed3ZoNW7N1XUjgH!=M``mPO?moI-_f8KvJYR^b8s}a zvh4miFyGKy14yp`da9 zrnYA>f!%Tw{x5o?J_*TmEZ{A_3-@%eSnxEIe#&bJL4X!nFv5*|-ykN%!)y}24omvW zoU0E|HU$pMD=!CopanN&wcenhbw6P_PPaJI<#nJT(l3BcVwKxG)}__vy{vs#_kf^g z{tROznP6Yg!{{H{Fz!q1yt*n1CTv#S^`aw_5Cr#{Um_?;X_S z_w5UZq6i2my$1yaqzFiF0hK0FlwJfB5b3>_Py_@*01=Q5(xexuBE1*s(o0Cdh_r+n zAjJ3ad(NC&e&@b3=gz$|@67ur^YG-^d$qmR+G~B5rmg-+%UdN8vP3dhhwTuaAO#F> zjJOWY*tC})asP%r9bNEA+_P&LDrbf_$|Ko6?2+1tJ_o#-k9)#GRhu6i7=JR}+*Mnx z+$5Q)CX*S^hESZ_o!A0H-h&L*`GQygK&;E0)*?qeO2@Qz%X{j>_`wC8^h{9LI5trC zM^yHae8?%vH_lt=m6=eCJ41M#;OwN;SlMP^MAy?sId%{Cv0=ZpXKgW7zc150HA`Cgti(?bkvo$&>2{x>=UYYjfnfHXHF3x2DH-$ zWF$T;cEDd}n&mk5n`s3gSNVQY_Z2_{u{(>ZqaRa(MY`ij^{~GE3u7ss>!0iSrG^AQ zA2xJRd^S!TZbnx*R$Zn6Xuo1%7*uGo|w&KsSUb%fe3UT1YuPf*rS@VbGi3gfM}KEAYqyyu!3iTJK5J zfI(|4idJYSDaj>HWYE^UY8e>RYRdVJwsocw`=Ez#HBF@*s}uLlxy+B*V(2obc;V4N zd6!G8?$Xi{kCp26tTbT_`I`~u9bE|wA>)e;mE&)NjHG|OI+!soE-AcrCt5C|-iaE_ z{pQvwb#%)>T#zAFvJ?ejagbdZt66{bx~V>xBqoZc-s^YI37Roihl4-M#QGf!0v=k}(5o zE24LddMBa2RH0HrKy27cd?G|?hB-=%kzazQ9KI`}z18(BV#EpqWzAntfXjZ*?K+M+ z)C|m0=DMe}+%xRn+`b$MVn6;RQ%yDj?(_MYQXMbeM*psYmzE`!ogW0N3LflNz^YVH z6&@+XokU^0bEAVes!b|{n9UYgO_c7u=v#ORyX-ow!9!Tj+HKXBUXu^-L?l^UU>*DB zEEI661YWBcj+txSTuSp-rUsfgYKZUxU3V9IlW#$!xSJt_uW!;_ipobDyz#mEJ-G&7CmL zgiNDGs;S@FIhMaZa9(J(J6N{n7+NM{hh0!$e!)~i^v8%%KoFHR=YR14*Yb@L+o3XN zJEKxo)IW6hAda5YO{r@iK9kb}hn?s!t^DF-$F?VV78(x`uk@w{ptG+TN~x+vSq-y9 zkTjUd#f*6_8(Q{tybVaLCUyQaPGPyQuqmX@yL*#%l!aQ+i2b-9WclP6x_Ez`p}1gO zu7hJsS!*`(GxSa4r^5Ufl8@S`f3Pw8e=;0m3*+GwHTQ`uU;4cYuEn;r+xyVmCa8kg z?WDN^=zv>}WYw>no){D=n-J#o6&mYOcwnPHk8~RWn_EdK@3_Oq7DpQhne!y~?p_p{zO9*SS1S>s6#`I~%)0%vLQ!?A{e@5ZbLYS?Au#H>p~8kL{Z8 zgz1At^+^#?L+l$<&Eo#l1(-t=*}FZSAKwg+VMR)42dan12f^xHPQ><#^nRLZjX!{i zHjmPUg#=&6YA(faU*XiPyFtg5$P|%fC-F;Bdiof8V{HMax)vkBW^&)f)*IX!OaJRZ z=fz4#cjGt5LcBc2@-1DL1qF;^$VreEz*s;Jlj1*=Z3@k1FfI;+V43Kv}TfDJ)=JAaUYOmD*0tm- z43)XwzJ`vKdf&T4vbo_s5QJTeEf5pw3elCiaaD>sI_G#i=n2UM@eHb}wZt0$(M}t5 z%paGRQl2>f*|Hy_tP@>%bqa#?FUIKbUW&0#kQD)-sN(?Y*<+)dew4j+#&Lc8Zsp9* z!>Tl7;nxsw&lhSt7g(A;#x?r8KO}PL))cr9M{eX-baw# z$n_4=-;9lm`){8I8kt4=)MQcRo4Zhagz#Ng2l;Tvo*nS7*)7cZXbhvsFhFNJrBy0S zeb`^_Hdt~ebkvQE2L&(K#Fy(RPaogszR=W~iL8Yiz@Aixvy79T^O3#}NvT9wUJ))S zacJkCDM;H{e5$MAUhgf&cufIl#?p42RAk3XCYWG~R%S$d2MPK$IEodI^gmxf&DZVd81rP<1D@ZBk-ju;C)F{79vJ9(&_ zu?R3-#F10xo8EfNM&o-E9YMtQ4j^+9rIORS4AE15K<0jz80k>xjOq-jZQAd zB^(*WG*!T9h-z~d)?4SMVqQA>(;N9d*ARTpfnlEJCQDoifF^6e?FGicc(eWQv`|IeQB;ibyk)aC1C>`=3_cPv}_m$ssYW z{G@mCUHN5{!q(vCd{PeC2s~T8&z~%MPu{?`baAU4Jbhy6K>Hy|Oph;O^5e}hQQU=Y zYUpao7yBX7#Q00g_qMmrKh;0lsiPB%e0QZ&u_p_9{=2g8TVtKLC+3$= zQWLWRR{znAjpP_jN|XBr-+)ES_%DH}5aQq1MU&rO&8NVt-<_~~^0e$WG$A>c+hYtT z{rWZR#3c%3&;!Sb5XN3FS4s!dquK`o1#0x8&ClKn_vWnKyYw&Ya`E5Ya1!23_Pwgw z9rj)oMesBq?``XtPt6PKZ!6xQImgF-l>vK-Pa+tZJAW$#00eyySg~U5l1iW;N-Y!Uewg+VJ{v zirIH@MTF!o-`a?qhSQa&_wRmoXlK5A`V0rbLLP++Ts$<~nZZ>`| zo1*Z2?{L3*W?+1zEZOC%PxwfbMF1+j?i;QL**cGZw7xOpEyMM4v)+&m|4F?cbWy-O zxIM1zxJCZ${`xq*PN27!uyw1XBYSe6pWaB5K8q7jS4|Q$F9fr~+-r)GNJfaK>QHGr z<>hEik0mnLyOj~~lA=M>qFH*)#s}P3DdfE+hOblio%fMJmS$`A!}1MW2Q4qULJW8O zt~OPEdj{#Fy014;4jMVcY<{FhL~kF8k3n1d?{c=Nej7R=dfna-XGgai4rI<1tP48E9-2Hliceaj$Xlrk-cPa5bR7>y3 zNO&iX$19)B$n*N_q*p`d3j1PK%DetRs$R$0_)M+dv zc%6T*tY~#Dy8H0-dKz04a}#B#NPwY5QB{x_uB-6P*B@gwUzy@aMwsGU6~D^?*)`h? zfyk&8Hfuqf$0JRo{p>ciSKrm~T;|kYlBi%-TZHpw<34N1^32gbalewMYMn(LvNf*c zgNo4gv+R|Hu{Mmus=f&Y1@VhVO>Hdo(|1R2YBYuuH}i;auINd zxAM!?o8epCkf*p%dH%gR$j5$3n5g|Vs(L9 z_nu<54*5FJgsKN2A2hy;Tn8_ z%C3qbM@PVUby%+OCo%}H3zwz!1M4_kU#pjhk8DrA)udv=a3`LYYqlq$-56BBUCkB3 zTs6=wtDj4SCq4}UgB(p8`iF#{d^ww=USjEAk21Kv8OUzerV!Zzbk{z4Kl5UFD*#wh zjp)|br#J2@H>b~X^pt2JT(%Qei(9K>a~RridWieX_OqPCQ&z6E=5HX2j4%^Plg0d` zAH%oaTPq?*)kbyEd*NX?wyl&Ce;%&zx+d?st-EXns{t=hK-oJxZvsg79F+- z%gA$`xCYl~i`yB)lyNYNtu(!;W#ZD6np4~U-IC-SF-NI^Txy{4oXRrJS=P}enpi#c zK$LSQ+$7Y=8onZP{=CI-0s$P-_TKi!TZ`sCFlc- zj$5S?<6tsm^6^|=PO(qFbx(tf9P6IQg3-3xSWm;wkMmc; z>%`iH>Q@DD+n(hMj?c||UJbkKb2l$g$UBVR4jcPH*`PWZksI(+qD@n`4l7t^zU6qc zskW|#agL^e*_CLD-YIrfiKM>_we|zu-#%EWw z&W)&FW7|l|&$=&Q4u3~)9#AF+sYRaJac7bUwu*cCoRT~=%_W;o;lnfzAf{fDo;1eB zjOa)~G(51pJTPulO=&9*=G8`7TdDuGvgG<@PEs#^`O3W}$_Oxa2H$Y_?pK zQgC$i;vHCk=3^H7Wxqz=5+P`(4v3<_Y?;G_-FHJ0gY0IZiaj9@2xcHe*P=};5jJng$qp(r5 zQ*Y^RV~7Y|qJA=j-Q4efx2bnm`wyF@hQ|6R9Z9RYuOk%hS*m5RVs0iiy*UkPeW$WV zAu^b`p4M_2KKlTZ%;#TLLp9y$?@D{tHYQ4z!RN=1OQkIa@1gu}$g(_O-qKhoHQ|=T$Yq)FipF&mJbr zsjtkYc}~x*V-I-^{Mkh)7P?3e>Y0T`P98G^5v`~P(UXAWVeqi(JXyaYkO%P(XJlNR*3W*B&rLg8X+rgQm<@waM6 zZ-yCC9Y-FI;)VQpf)UG7L>~ysTW@p(l-Aw=+QGX3*ymr%K^nj?p13|s{Feg4ttKFc z2*AKiHsg|kv?o(+$C;fHc>E5-B~X0ZD+!F>R_i#I0@Fg*kyxWYAi-Y%0FpB>Ml1n( zB50Nf?}yU@Hh7f@dLfNIKOWOe?TL$5F{HE$GvvR1`O22VExJg zxGS%}6+V^|Yxqa5e?X)_VPkmVk>|BV0TE15!mHb@1{Qz#+Gp4T8#l z`2Xo1JS`cZ8|GgCwEk#K02lR_1H_~f5EB{bWW8Sm@CI5jr zO~2KB z-TtP@!rEYsvaGxcns;zfnUvUr#wjr42yzYYgjFDXHR=mkG~dd2+vmc#9>!P1&Ka1! zOh`Egl_PZAG}&oJ7%o(#@NByPADf!F#xc1A zuJBBPtlu}(;g5U#9f(yZ?v7TEz;X~Pwmy_EV6tO(yNZgEfGEwO-)Dfk&M2#!z$-15j-@fKi_JCL~q6FnY+$H^c~ zYD?F#Jjtb;kqnkEVTaO+34)cMu2b3~q*Fnk0K?R&eDOEO|C(`eTr_Vr{>8tmV_paF zDZ36td;m<~%D~Pfl9=($CJU|!>2sFUL(TS#`!oA7hcemKPzVVD6r>r~mBa1KYw|Tk zy!>Qzgpq`&hsd0>AbtcGmB;9wWKo2bGQ`N4EwA`hcWZeyV$T|VHMq;OVEs!op=Mlx zK+#3@B48k{{$njZgm-+=d3ROT8UVrgYh{dFA&3<)j;A;u)YyTeK`x+fzP$MQZY}=dqW|ntZ+gubX6&pMiK%i1P*M?ZSF@Z*zT6O|s zugF}wI}1LO5-la=TKu?VHUVU_xdWq~t>7~sGTzP~$~bfb{~Bw02(ydVnT~x2JZ@aW zsC`-*-&sdKJsigigBk2{Iu2=z1#)97N!zdZeHnm~HwYPZyZ@$BucosUeD?YlabcYR z*lk#i8$|0^IrU8x*+!!FUWXFrv#tL%tVvMzb8z*qQVFj<=I3Gc9e^?`0pJd$`ahJo!jV5hMMM`QGar!iMw4 z(&(h}c*mN76D&kxk}UT@Mh2uL%f3agpTCFP;3llLwFN)umsKrt^w^ntSLO5~WVB!T zpmyb=U0qi$BJqLwT#USG`wgs(Y)(_Y1R=@it-0wJddJuCu^VCsmGv$xJnYwDb9*am zcsAfXAg%A1;rUVBPK*2PSgLxbB@>k!m!ZZP2`^1H*aH}~kGpdP@zNKg+5bFx49nZU z#@M(6V@K6;_OhBU?(D_$E*=GznC$LJZggw#{yb86x1-kP+fI%{`>wB0 zPe1;#|LXm+I;q&8s)0vfSc0-;eG%w#`N z94$j*kb6HRa_Rj7mD)2OD7ICVCsA~ zuJd!ZjM1zc=Yrc=9Xcq#xeA8`7F#?NVX*t!AbJU$+H}Q!?pahLq5jpsZbNZWt^{&9 zZ$EFg>Tpic_lVIu4~DQ``XcdNlcyYda?#bM@@fE4_Mlz7-(LEBjgAYs<~N|9edD&y zXW$5P+AGi6y?&n0W!FILY)o)--p%;A)91=2Z2-)0g+cYgj-{*hTV@+v-va$OHkio< z6=#$9#`He~D^Yek+N zwEEe^^J%0|m#r1KKcM1AmQz6Ycf?=gs#XnHK^nXq;U8Q52ny_O9NNWt%xp-Ruu#x@ z#qiC>e_|iQz+-+&y`fC5?sF1zLCm6H&MuPv1D;6@HbEOSGcPZPXqe@M4*roDpjjS( zA8e1M`45+<5ODe;T>iS+=iZ&I(|F(Ngtzc6H}^e)~jx&Iw3YS?&^<&%L1Wq_rN0TiDagw?&~WwdXu)8;)R0xbjkkD zOdGml6uZn|k=?E1>P1Qjj*A9QN2W8Kt6qb9iNXnieJ+eca8Zpb9j?|(YUp( zploE{4Cvd$?bJ8chQAJknYiqJA}dUd9RnJ^{#Lw`gL)E-ppr!n7I5`H+_4oV-w5{` zcyjqJfj|YE>)}Q!95LD#H#HZ!hfxfO75|Z5Ar$^QXR+Ach46CkB`2(_7Muo**7PkQ02!bF|r7}b2Fwp~I zJ0RY4>`34D`s;}6zq71)4<$HG@oa$3k(SJKzR*OB$Gd=Jh@tTWAt<`+nbl87E=|ng z(u!Z|@ap0^?+b`Vqng#mUU$NW6zMU!SyF!AlC{y1Kx&+u;&W(@e8W+;Iq(Tmkc2U32NjQKSKRy72@PZ=VC)LSAiSVYy z5=$(W@rr_1)XZsc=@@=-@_BshoZ@ZVNPo5OC)~ki!-pT-O)^!ya zty2`Gam`}rDw>mG6+jImf}n`h$5!~;*l}}UTCxY$_YX*j-y>x#LM&45R8`2hs_nz>gd2|E}0d#HQ-E8^FRGqnKB^o_WRc-C8ch08w}}%B~2hd0V28&aKKoF zwu`A7%zlrn`!?Pk>qa@|2VTtj%Xbb%)*!k>$8*D!^%X^w!H@h@XC2FqwpWvj{_YP{ zI5%au8V6idwUvJ+EN&G~5Lh*Zj)#?f1K)g}V7!$X$Y>q0(EdsxK4ch(&4@b?>9R(d zmuZ+aBtuh9p3Vt0K~Y3Ug!md&yiwK-8pRY%z=Qku*oQCvC4fO*&H9@!*WHyMg=@5G zYo;km)UPKx_FD0(Ndq(7HYEei&2Ers_d z8?pM>AthVZ$%E{%vW`?w&YUD7KX2vZijpP774InzZn5Da4&d3CDyycrW9B1Mh%+&64n3?If4yeDYOG zMaWJ33tSK8^2ldWk>(dufMK>bMW1Nuv4tTYXy_GX-YcTTBDG=ZZHs62tm9sdgn?oW1LIyHxZK93n@Ps*b-;1$UjcV zDt*5)-ghYpA;!y23aZ73kI3xqeG<5myquCML3%xMOv;U!RxHHDK>^yG;Tt7yP>k#8 zc`(+fczgPF6za~y0cp9EfEz^Gm+GXn9*!rC+XSlbfP$!7$_@o$==zey?5s5b=g7%O z!980iHj{7*$U%hboZVamJev2|v=nN2AfQ?I@oRFhE+#uEp|4Q1#_acS-i$WDx)kL* ztn#EOP1kEOTTr^YjfJlltkst%M=ty3jO28;K;qjersn!gee&~g;r%BsR1s$KK6C;C z(pDs!9#@g-=cU!FKx!$%rvs4;x3j~6QM4{_7Y*D3a(vA_A(L=7SBl-!AeTS|Se)>O z8my0acMg$J>SNY=*yO&){M(6;dfe|fH7|_ueh@0J+$*@Nj(~^aSBIQ7Y;4rn7xF1D zoTJd@zL^+c+dLpsEsgWO&@{pMJhHCDOELm>bV)Z`m4$oJ!Hy(IKY=MbxObH&Rd9OwhFkyL+-2F-4iLA^j4}2 zo92!cWFI(O!=A5@!MIZj-^T1ROsge|aJeShd+LyoywQ)eW0$m8iKnR5az>owQoMaQ>+Qv0*An8P{JO}~W5%mTL z`sH?=5R9uUo;Gkjp_hgx=o!Xt=d{{vJA`p=r=gPiCkj~-BmA6lN%L)U(}zCAJnT@T z@nqxIx-1VWV#VoCv3#IUu^0%Bzhk$&4BDjtv8JIT%*sM~nB}cK*LNSOHN^GI$u3O4 z-&k`oDF zqrQq+BXFMm0iCHRfgALVkdbqEj!JColqgRwMr)ipQ5yuz1_H8Y@O(?0TpDs{5Q+n` zG0}R{KppKFzMfkN+*ogf^u?=mU~Q}+7;Ol(hZqETV(tk{0KFFg?E)l=iHAi8$9q3Y zM;o&4r;`KSy8?V_#s~>G{1IQ;#lUC%Kyx7>7t@LcQWOmL@ha)qG(u9_dIw(N5cqOz zD8%7&b)O|J?g8rlC3j#C6c_l?{YK$7t59U-(0RiF47ezSAwF6uE$jaoC|lw`O>Z}nNfAH zmW0@*bF)KV-S^qfkwu_LNlt$Mr@PRXf?|B;`YFYL8>+kLXzd-8YyZ{-beTF7;>1@C zq|W0qk*h=yj8+E?0#1%wf<9yblV~)UV#WDa%XE&w`Uj4w^!t3EY6U1pED9l_DLpN60dPM50LU*13}n@0)og| zW#-8ODN27I0!$4D@MAY$<8A)RBwhlS3lIblb;6QVr<(9| zfju7$cu=T+6dWMtumF|^AoenlAd3Q`O4lC13jT1(1k}L`jFT%sPB?d9cB^TQ{(wF$ z0m9Gsi{xZ67XJj`r&M2^PXg}lT=Wp|F!G;=TY3cQ4O|`KZ`#?-&-i>uKLjF~6i7pIOLV)|3e^uXqRa{-NU4#FsxvTyc^-cI|XZ{DF0)_l|W*OV@ zuh!DLW}7!_1oGDPopK;M)VKH7@OeRd%{!QANC@N&>jxU0Bq*m#C`RqyjP^r- zF)Luk%o04EYtJS0*WZ|ISQg++E^?9u3|Rkv6OF&d^4~;5Gl4*j!qxIx;&^GIndb2> zxogu8PfBuea!_RFHn68n%-S@8$r-rr_-kJKWAQpVI5|&bPw7kGAsRE-H875C_a+1| zXn^q?1=uJ98Ze$NuIDMuLq%zwgiBF?gSP*^b-e99@Xmibod6q8i&Xn;jv81+WI@xeWe)K=Gb_o(bobz7TvquqIgntOgXZYZ+IClmJ%j>1S2T zLZK+5mD~^gMYaV<_V|?gNK3PD!^B`)e&9jMxp6G(=FSev5CA4b@d=T9M+zOnC`N{nOJ+Hjm zzEvIBf!Ihmd$iUIyoFm4?Kr*s(_wKQ2v_XYIbsOawAR3(N_P9aXC<7(N8Sd4gJz@mN+@3T#UK2H79-C?bv1fSZ`>u|XD@M$QN_o&c zQh6nKs@@ke(RqEtg;)L%=DU%>`4bUT?tb6glOJ$lC_qb9j>u}DKL`5nQzI;r_1pRl zom&)q;0hh%MK6QYXR2Jp1Nm&o>v0iJ$LIkH?kFA+0n=^U$#3k+Z`Ph0#@jj0TeBYa zYg)NIm_7p!hdvk>iR3!jS02(uSC{T88?0P>$&uzdLWWB+#sSE?MmaN}@|5nQlYs;A z)|lX7j{<2@FQCNQeKA9wci&o`TK0Jzv4`>Dm;xW@`5@5-}4 zzMZD8Ys6;R>~;0;o0_kuXJ)~?2M+VdO_4h;E8?+k(zKk>IWpg+E!P+*(mpPVco0F- z*=MdxW^WvLW*r{?0Vy73dA{AqJ)8;^=5X0&F-TbAIy|3A+J3NcmdHPlFE9W`WATSe zlP^NIBgWKA#3OZ!-Pn=`z$u_Nk1ETvw1}RoQ!sqNV`BrhecmDp5}qp|LcBi)O|_fo z%8YZdI!N~;K?B%;gyB$0?ef=(7j-#r>1D{K!=49M%|J&+p{Mf|YHZ}s)fGfkMW5uA zE*9huzFZBJeE)k^m7HVLY2Z^dOR1QG;}u=fRk1U>PbluDC+vuD_M{wG!1MS>ZJJSPQ8=GvVxo~R zhr%x+knkUnC<)I-&hiw#E3TQ;uEbhfsiMb4OpT|BBLFa}b0tfr%NE+HiDq9~vv{67 zNLIShxQw_f<~gBvr5!`|(Z7WlGS)IYRqw)O({Y>x0rkUC@v; zJ3L@clQGjSe=*%#0VqpF9!m8IPO1r3$c7Se!>|2T5r7XQ^i$-gUuA=xY_J0hdg8Jd z8+q|KH1}j2H&>#Cth^W4T}(r>%w6(cepi-$CD#K)%I=wwdeM`^-0_9UG$9m48OqZ< z_0uvj2_96_vzGDFdSvPxd*j}6Gtb6ePsn2c+h?pu%i=&}gzvf0#n7@_u;}&+gSGBB zw$HQZ9z|}rtJI#^%42RWcwg5&2)oa@Az4h&O(d9N_}1)E>22!PPalE?Q9L$c?`ak4hatSCh8disVVP zAZwZ}zVJ>9Z2kkXcDJ2h#Qg_33%X>gHslsdvU*)i)-G^i5}}Epiy)*^^N};6d{ z*&>@+jAKe9_8hgH_|o}9o(=rK%z+ly9kKMmacuy>W8_SiMAz9m=OYi*ax9w*3IExb zLC&0+yc_dFGBV?+hwGUffyuET6(Vhn23p}k&KsxHfhLRGZ1c&Z7kXyRjIvsp4ZJFrhtP*v=F-p9OQT{{P@P^gVl{QgWi218RvE{PQO!phEt|h%AsS;?* zMEuApDr~G>&%H4+1_i)>t^t>ur?I_0dt^`0$NNzsVwl|z0XYSqnwyf{;YcYXRb6*H z59*hjePntZ)#Zs&>}!bd)%8|h-nkCQa85?s#uI~d^Bfo|A1~*A{=*KIFOB~koE6_8 zDW`jT!*pdZ_)F)f?-s)hC}YqPeCIJ7M%JgDl5= zAP1^fqlg;!q%pm{OX&GRDfdi8;g~RJX%0Gq-v*Z zrLI3@j_DH5F#b2CkmNH8o#_EOXScj0B)j9AUb6IGm9?9yRpd$~jMTj<_=&xmo(lN4 z;F>=mOHbsNk3EF!{D&1>=h3*U zp3=2GjearZ9T|Gbn?4AP?5h+p4~oNq=bq-G zoCx_q#MRy=knDvQ;UWQ~iF)O?a>Xh!n@7_~c#-sYHQ-0wtJ+%`y1( zZw?navdjDzYdZz*B!q=rVuQO6bp|>}LP@CIkwOrOO`QWt?Nu4l)01lD%Y+|#Cv!Mz zSy9|6A_N4eGdWfCACPG%r0O2ur%;Oj3{p(uR4eGO@5cij<|J_c0nHB~_Aj5*XMhO) zd;ojHA~-ZOf{4gF(K-GnN2drw!@TwVg`m28^L4kYG}Y~X5@n-XK_B_5vs7mpv5oGCbQc zVrE=kylV}hyUYB8CnS)uG@iS&q*uGpfMa)Dxbl_~{wZUuPvg~t{wXqgP^C=;J2Bj7JqzPUvkL5Bublty$?jo+S>`(uCBly%`zh9LZ1{ae>Rq`vC&|hZ{ zRdpZ#tym8uJ|OSW8D9nr^MKgb=@B#JsQZ31pkcnm$jx=L| z8D`r`RLoZ${iB=5_n|pSx-|)QacgM|E zT07)ro+-VYaK}-AqHd3Hx%}?A^Cj%eQ(c#zOyGSU%J(S&QpA;5?!r8XPMQH^!93Mt z3P{(CY7B{ka{b7h2d?n)Fa4|i5Y z0<7^D6J?_H3><-kZ*v-mwMa!F>=&2XI!TOJ6jR#E_jE(dWcDk#V8;qTJ?t(yXNG9r z1-&BeSJCtn%v&~oFCsE8v*EL+Sa0s*ba+)w|9Y=YIL)K!>M7Ve!ZVl+r_9N=MVs%1 z9L{N}b;*HsR^R!-Wi%f-%kv%*c#C3$#OUu1lH`vZu65Xav4tkd^{UIoVjSc!)#HJ1FONng0~s@?xwDkn)uVVHg@^vKfKvg z6|=91UNf+Ow8Gp(mxF`ftcG%W=%2g8o<|Cby*-={@!|`zefXKf_d2QxHvwWMIz3nl zmM+}L5>~wV@V#M0qK9G-S5=qy^(<+#|wOJqN@Y zfL??A7&P~6)(#Cj;)EVFN>!MZl|yDt@Vj)RL6Wh)c*mQIQx_i-{R7genf-ahJtFrN z#pPQ8c}FJuA{F}U<+SJk1baaR@=eh(W2v`za51KaaM;uX3c^qmRGmS1x+8I-v@Zdv zE5kW&J>m9dZ@P4^DL`cnxvY~<1Hh#c5>eTnP`AdD)N%1>Hfxn3?yVzmejeGIMTws+ zr7Fc+lV9qqh`dSPvTVZ1Kpq;^FL2I?-0OZ1=bm@`2vQc7uY3Pn(lF#@y)JX;HOK>v z3BIoG-<6N%Eo}}aak?ezkt_x3|MMQ~q!0FxA)@tg?y-7!r6dw7yL?sR#7qYYc zC!2*pXdf2>wx1m{kK-_R=Q!KYNvz^h2% zu1<7uh_3w$_~1dd-nxDor(lH2+J@5LKK*xxDPkod*dFbD(jzw-_wBLmb~*r{M0k(( z)TAY9z85RjB_aF$RfWTRx zB)GdA*nqQjH*53ann-QiGqzAQrH%C=c_O&uf;<$sVvNv_b>@Ej!ThPmq5b_$?E>jL z>ZEj`eiHpd2nd7yEOha!PYHFkE0b(wrikWy640Z8IUmt@6j^Oqix4N|^A^*&!Q4WF zCb{#b!Prn@1ne09ySWQtm($*H>2Ul@$O*4TXQDxG%wX=1Bpw8$o-7%}pl+ zpztP`F-_^sAM!#xr;@eAF3z^#Ey=v=$+yE`-g42(bM!;CgE=8DBmaN`+eiX{(F5*V z+O{ztvX|W4>rV?QU}Zi>%;T^8#C-*EkDpan_wx`alH6Q3`!j~RX=WPmZaFHaz zgK>G+oyb-?x0a*bK8U9|ad4aQj<%4sx| zSbqIHF?iduo0GZ)>%BbKWbv4BLOgl=wi^f-^;s)VWFW7w-e%W3CuCb_`fW*dD_cK_ zH39ibmO2+MuzZwD>KC|v8Yrs1xVp_H!J?5{O}M8AuIJEfvU z9yu}VmQ=r1X>1;tn=GoY=f6`8x^!v7DbkzI~|H0e2S*Jl4~jRY@HwuRPBR)ViAzBWcTq*@shD_S@7OOJt0kE#wtGI-xP zIio)+dL7qmkX#-PF+&nrwqw)OeWPagRc>tVtf=!To~SBwe!HIgxyVP3dnn{Kp1Ent z88I^uD?!PkI8A686+1H-x~_5Sqs{fKbL?VE;6Alq^;qoU^b6M1!LVfA2CwB3&zCk} z{^xR~E#mzVYo=}+_H%Rrt{Y$4A=$&}&#`wwjzN;e4n7~%5u^b8GAPsjI+ z2g@!nSjsk@_cHmrbq@7Gh;#E1Z)Kp$Y+4l#s+*vL70g`_TXxA1(&$l$?`GfSK_$AW z5B>V4_y7*=`vbb}wS>ExzkX59^DSNTPWJ!9*;oEG`M>dw97uPKkPwuR5TsK{1x1lA zX^`$N32}^)h5^#j-5nzokd%%AqhaL8@40{H@p*LqfxTY4>vLW2tWS$^EGbMBlz;rg zO;@%Va?~VwPr*ZX?K2k<2g<}rqJ6dzMU3{ zT?{If&iO$#e*MQB5KPIIBwI1l{39#VyJ&g&Plo(2-Is{?m?O z*nTWR3nAoiVFdH=1N^+LXGb!qUNS3=~BTpaJf{Umr}kxC;gMF*kFvxzjV={W1@CCIB!z@!aB4H|S zBtc1ttxNN3Tm`4o2+hP=={WT-mrysCK&>u@yB#dOSF*^LY&KiRYLENO0@|s1hFFm| zlhw-f-6UB!$HzB8AD`vog{^?zdG5-U7$K+y{bgP>_hLNtI~*70YOu+4?#VlD=1|m&yI;_1H!WTm zGOs{8Dom3pJVYFo^x!H_7^cO*4GA5`{`%}Uc9s{oh-XKbE~%S$SN$u_l^O%8(H2XH z!Eq!j93dk&#`8Fg841hX$WCCQ%y-iIg6^R&;GxUP?o7+YwR+8BkaZFSUskr0cf0-q z*)$7%@So6}>Y+C}&x461%9LPnG7*3_A<2Mz0i`a|{+P|_7^$4`q0l3h_t94S`;5(*2`{{dwQjFodT|M-sSeo7~_fWN{^H zuQQGsl0VSE$HJdTIME*8`dOR8e<0n2f3VyApJ`a18aD6i;zvl>3any;&xGZNAS~&k!#iEf?apgu--Ai+Y$nH}zwp27 zA^tG<$)|Viv&2X25kY18ix-52)2CXL7>@o2%4V@3wn@td7X;R2_3pn?i924w5>N`~ zrd#0t9<#vn2veFD^SbLPEv)gbmpLT++B_MG?s0s?C|uUoKA^oVP8KAaPBOOPW4Ej({j{F;=RSYV8gG6wR>?Sa>DSr>*gzHvtfm0;g5RN<{ zO|tAi10WUBG|>OWz#e{JEzbsZa6D5E$)eO0f%yPN00Wxr@;1U`9}XLwF7BKydFsC` z`nZoG3b^rfyVhaBrB%Tgjgw=QlbP}}qy5H(H;w+B(|4~u3E@)oWAf~>-Zk8|?<79U z;0N5Uafh+biO$YlFpC}?9t_dO_qW(NRWry#EP1XM;ev$7jGcDFfFL##E?BA~-wNGd z>;AdU_urhI3KT1zJYZ=LQDAXA5(&}Y6;9l9@Hy)TZ!|&q+C|nWk@8azp{^ehnR;!y zeWsEWr!PV$2oOLN7JiVmY_jWGhd?k{u%+E6TPMDuM6O3_9|Z_e5}bE^iJF0aIZ_xa z^T3~&8OOu(QdWv>TaGweqALZ(e_YC{tj7-~HRfL$Z+`AICeRlpdsVX?u@Rd=kxg6Y zrB*+mGYKyOnC$n@lN^ICZU8iXH?8|&eLu=yVXLP53mZojPE69Lse=Zcd7rMQgTlV! zRqL;le@F)Ag<-EzW!0X7nN_a$Rdbsg!}KZjliU>-lS4V|z!LXz>aR6CvR3ha$22dG zgD!#NdOSaTgwSlBN&Gr6PRm5({bs39>UGGtOr1QnXdtbBg$ao!s21mRG~%#(|N6mG zw&CaE{+mMBAdHR88ih+0dVW=={*ryTp?i6VZIfI^%DXV0+#6dgJkr$U2iA}lIJL65 z`TKUHNJp!i0&2BtU7xY*>+IRUgo6RiW=d0?%Ck{nl3s?)^X@>hag`|=@0^iI&AiAiX+)%REBPg*7R2&amkS--C&~KFqrY)F&)i*b-qE!> zWeK!E$gl~NGIFv^Pf@8v1*l^J8YzwT8J*ylkwhs?)AduclV(9yVEU)%I|7A51Vw@(s5vWw@?Tdq%|&N*x@$YQ_|}K_E$iY%2>J zcgN!8rG@+A_nn=oqqO_=LN-Iazn-`s$GunbV$^*Bgkz3P^=1gbI?acmCVQgdfuz;?k4G-`s8BTbWF)Bv%WbL8wN)8rM!{%*Hd!3{ZM^-TH}V` z4qI{oQOk1QDVfGd4&+4KNd@wCQk^tjoGh28lq9x|kpJO59QN{l-$&J;SW*e=TVHAN z^u=q}M%czVFfA73XNLsSg0<)6aw^nq%>q8Zl3{8urAFUxJhZDhV*Rv{pa*y^T#eNDrS5&1TMSas08E^0u^Yiec`orml zPt~=A!0KY@uoF&*aI`25?$vUnt^Ky8xS^*nSu;L*<5e+J6K(&J?r)2%JIUz3Pyh?3 zd10xH>#wQv4Y$%iIA<(B32$(NlvE82U?y?~O2VGm+_!RX6Y6)qJ8?#b?sgBCN;3y? zPH5o$X(3@Om4C<$&l#)F|1m(MTn;*yMu*9z7vQ~Nm)kf#-p5#nQtXix+9i@`?%`}> zQ#WeK3;PAy$3A>7{8ltG&?l)yp+hpGEa`t{p!JY|W3cpOQ_fcvh)dQ+_M-o=BOs^G zo---lKwWoNLgD%w!QgD=F$I6ys|?G!w8VcJwW?<`KeC`K3US5U^x^=%_&`~Uv7RH} ztL{KV9a(G7{T+PO>VUEr-%Q7{>7hsF?Gav*s4qK5 z#SfZ|4P7#W+R#gzp+>eofxV`}!?a8sRy>R)7u9FG)^(Rn6cd(4oC2N)v&Vjy_!5|~ z$wr=N;VjW-u{4)@_wnRwCk^^lk+`{@K;5$es(@-CYZ8icD=Q9W`vekC314>2c5-~s zFltwiMOOaBcXnm1JK$>(l(SnsdvnO)wS*r0J9R_WTOGu9_%K|%5E49~R74zibN@f2 z8F;yP3OITA`T=fW0-t~S>+jR&JeHiGpsv_g^kK3l1gL%WE^#Rp&}IpHyyxR6EH`|g zySE?-AdZe)*dL8JFRG2rw_8R8NlsQTEY4I*S)S(#Vtr$?!1FshyqUlrQn+1KG4l(e zX<7pmtU)~J_Qez0;WxIZi~wQWy(^B2LGLZCe1i`4>1N-pXySMf3LNgA763wI_ZX(Cs|+t1>CmwXp5 zWE>*VQE_)#XyM&}mnW##Ia+@!Olf!P;IhW2auT1#(1{}OmdfwlU3D{m#?tv#*k<6X z9~Rr5Xe(%I3KQJ8C`)QzFrGd~oS@cIzZU_?&_*n*UBXXv*rr)llo>!T`2<}Tmp+*^ z-HO^VEo`0N-alpeBou9A!p4Bi$AP+LU6yzT&z~7j=fxa~iuc_oL&Y{$o#t?}oN%@| zQMl(w{FXmYMv>lqT$#EjPK70kovxAY_H84>OM=hbCrFp>ZOJ}b`$Vk;e|vGpg@_X1 zm@$iO96G5=WzRz5i%IP7CK#DV3s~TO z!pdg+BrxtKtuC&L7xC&@Z@)gBkw@ZQ-MFigPF>x2Fx)-G`m%*UyLe58eLVUs>x5{) z#?CtG!BR#18FV!Xzh$jJlL_d5=@i|x=QSPC)#&sQ{m$(?--TgF@-TcdMpnhuM5GI3>_(!&XJg6+QgYzPS z=yb4~BB9xnbv7Z&a|qXL%>z{-t|BSG^27+MkJw`MyeKP=jb|T9CI6`3y_)$KnbO={ zh7d!~0%#0h>%7o2iOmeT*hDRRLI>USU@pP(YLz{qr)I!~nrS=64?c=lxm|krIPDmt zQ7M>9wW~SuE1sI19yL9IO-sTc1rr}zpG;0GA*xOaBURX+OwjxF6$j#pouZI%MOO^* z556eD%KIpQPlyPb-ey`Pn?T~o*yYI^MQ~DN!9-$`w%EFGuec>RDuc%hL4v$g4fx(H z_jZ#|N+#aO9&I_ft+(%#RTmVb&vz&OjuLx=ZzizV9`!@WW&Q(YbJNp<#Ua`)A4X}; z#pxeD<{|OwggC%VK4U%)^`IE}9uCeRohamm+`Jh7KTOmcfQZB~uh<@&C?G93!UlF; zVE_!$D}ew|_3I)xU(s5Y}P1JW)F@wyx|~GQWmm zJNJFbEXhDPaT67vYbpJh78sT-@v9(_00#=^osqLS#y7do3(&gHI$hS_PA*s^KFZE~ zwk&1A!D;5ctHh{lQ+W3b<6CUkDzlPcODjb}Irdg*KsS?GwK2NWB7!3-A)e;7Q(G7+ z$%tg7!5?ji_=}nQn6O?0vziK!F=q;Ktw!Fd5xL3o#r-+cPfdN@rdwjeFkZ>FcGxz{2< zmUkDdq@TEK&@gR6ho*>^6cT)sNUqzmJN+PW^PD0{^g6n(JcrU|x0sQ;U~GcDZrYxq-f-}{!D65QlD z#=&XB7S}P(a0vCA;7}azEBk4u68R*P@|x~=DZ*_s@u?T(lsc< zB84G(o=~Pl|I#J98{fua2rKSA9L(O7H^q*wEDOwE9P~1n@6S66W!Yo^Xd>3C1{VWB zo;vJU$r;Kw+>#QQAiw%7JS^zaB`Bxxy<|n40;cNZyJqX;mkSc@Q-RLvisv*@l%nE({QSCosL*8^k`P`|htm zD4oIU(RET?tVpb9-A_T`QwyzR3mogqt-C^)OtntKOb6AgMSF<#!}1c|=i(nib&f#F z_z?YzFQk8r&?0jVEG~9JiJA(-n~j&s`87aN>ZF#^ zWxhZCP_mG}1P`YAWB+jlZ%Vlg{qjw{AlUSfz{E;oi@MYa-aP@%-Ah>VVeMvg4v`k} z^mVKPOv=p}1Ua7z_;SY%*20JyIp`srA8>lG+qxo6nChm3Yuh<*9E<1c6GRZL+8QZ8 z!uiT38bAI*aX6>>t|COscjs8-fW}r&i(>9fXLI~aM`y?%*36ef$3pz|E|z>B*C3&H z(tc49(jty|yBfb17HPY_^7(~P&Ub@tH`hQS6FB45rPkk=Trt|>?JBfQ7Cgo&`_Ak_ zAQ~_m{D1x<_32xh!o-G!ZtsNnV*W8-#IELFSp?V5KUjg2Abkx|!(x-AIiU(ns{Y@0 z)hUGC)7p@ro40#9K&OMJw7a$mLZ84cOe*TN`IR0z6j=8D1Id(@f9JeI#0wHyoF>gy z*PBA#R;{X#W`3!l59n$ zafw4f^kf@Hhzayaa9>Mf_BSn< zCE(>XwbdpIHW4Nccn1@LIVr+(a?6>i)MwfaPvFS)*x&n?&cv(jpV~5IJsU7*8z2ku zvwY<-pCw7l8Ok{&l$$sC-_RzeknwlH<7ee$(GIasdg5?_Gj9zNhq;ez#n+V|6=5E| zll>3$(AtfOO@EO|=;9)+P(%zbxUVG*X3Q69{$;Qf>No??@op+bLihHEHv8%PC|OCo}bR_jwe`%Cd$|ThS2Kyz^3iUUrQ@o3{-e zQEK(V$MH#7#1<>XF{e*YXOCb7YX=ar;3|wRS*7g?``S=(|BS-bH?-t^oN`AnfxuEn?v%{tlQ47>DKx)wY0SW)lw z)D=U$-M6s?nQZA%9OvU(+94BF>GXJ(^b#^E!Ta)r&W^ac%3J)%*tEL=jy_BD-9C0O z30IQQO7%ary$&^*GtAtp??PVb?y*@&T#F*P$M20->}bV$9hK!dtalU!UE{XF6(FDS z!7k!HF(}j9a5~{P6*gOJB$vrsVp*27n-K4wwKJo+0S!|UpV9TexEPOUl|_LHyo1RznZ+j zxbQ(Yq)=#Z)EJJipgGNE05S9B-xC7dM~i`q}bTYS(NY zDEaw-i)9-BALwGg0xcJ{eKHsARL>AK?kuFt<=WIN?QixVI&`OK>B!iJr*-#{(OL|+ zmIpGxg!?{yY1-Qqz0b{h`WsOHC6?`lh<=Sxc_){aOYFt*ixT|hDi;#ogzMDJ0fj9C zA0J+oLBP;2v+J@{`uGJ$n9CO-%^NiJI(Co}~HA{X0FY&cKxRmO1b;)y)2?q6Fk zD##4x9m*+>S*PiZbNv#WMj_O?oCMY z3nI?P<~0qmU)_Iy+{EVu5sc}{s$kbYh&U|B=9k+6R1d!Z<0O68q_j%l)=@yj3Kl)? znivQd>4FrU2w;g(PcvjQba~U1(gz^Rkc6P6U`d^O@K_`Tp-ZH`2 z?2Rv_r5uO>h{?*na>Gy$_QV7#dA4N14H!n4aT=w9V8xKi8Uy@1m{l!poRAH z%JVNBkWIyn9DL;}ayrJTsH4CfRs6+uLN`&<^EsA-;GQh)jLmlo=2*y|W;1Ab=rHT1 zM^$nC=s)M9N`Wu&wG0^|Ph4B#znG89_+M4wVsh3OW1kx|H%=2KQUrzZgf7k@+Ny zep9EjcIo5Ah3isqfWNof%VhCs-X z^=AwuB)RH2DYo%sh>a1lk+gY*&rKEER6FOLjSqoGG`+Gy0-FoNUQ_W|=}JXv056Yy zQXD&&W`xq9-@SbfcXlQmm;xqvojfS}|p^95};mKGMO!ME63}agAb!B;mleEt}@wa)Q z$o+fq-QA4PWQ&o(+mJPu{@|Qb$#*!eQ$av|*y_GUeWEHoV&#ix@j7msQSBXu6P6tZ zW*v{QW)w+mX1K1AC-}&kp2{r^m%J*#OG|ZXk&xKD5Bv`_2!B+&9*qkRZz*^*(946m zKSt-`nPyVd>}MrPvKqVuT}zl5O6gs^pb_Vf7zdFC3aRIL6W6TQy8lC7{Etz``lg=D znM}_O4jumwWc1*XE>-QK^5IZj^=S|NAl8cs&J|6`b5w3F`0#YjUQjwqOmfmijT=fT zqI4aO3&DRMo)e5N{7AYtk7dX(F6AHm4D`9=X@|1bXTM1}C-~TPj+$6p#|+;!p04To z;iKmu5(;150X+`+r&}bH@ISpN)qR4Q0*5Ty zBQONqM{_=#@}CK>SgBv*_8S0DGLIvgf_Gk=uVfb(cE;jCg=>xU6pKjl#eS~&qw=bf z%B`l;-yVLaHdISU-bk_;^F^NS&*4<5-*ojHbFD_!pHIG&GMeYkmKu6V`QDpS%fuHo z`y+~Sr8ut-&Cy5b?CTXj+V3VZyj5o&N-qunccbIuaj4sE@Y8PZ>`kqpJnh>aAL#(h z$eKl^`$E+c#+r$r(l8SkBxKf^3Sw~r%GG){&;A&t_Rn59I`;=~q$ z?fKtMr@S{-mr*y&mg^T{4sH4w|8f&hyL-}eoc?M`#F$*Y5neV>HMf5H_|u4ssOXqN2YsDd{pU$K6TFwLX}fqZ-x0@)#W#RdIfq`6h*eNA>vFDERyAdPUIRFJ<^uge$UR1JawIO(6(H zc2}XyKrO?`l)e_}(%#F|vtPcW)J)kRWDfY=G0=!kY)4 zXg)o;Vb&U`B7Q9wc_96X93d1fgtj-4`5s}K$}E3grH*-8 zu+q2g_yq5@g-a?qyD(Ihz(z+-QTz&YS#XJ0ChLzT{RTw#X$L+zArm8fRKvi)5e}Up zA7jPq*#x;y0jhN4V}yetCz__H)GR%0ugBZbh%G@s9Ba@Yc0WGZEW9S`PqPPZc>mnB zEU}P9DB*t__v1>BOghJT;Bbo4xLpb!J&@RF0uBGR6$Il~Kd>b;zK;IHEz33~tW~vW z4(KS2y$|M#c3X=FN}WYH9^z~)Pc>3?ki1N0O*mWD`-nH_0XNZwhp`Ongwhqv&wbr? z@_I(%+WF60=FfTJzcBC>hMEcICMx^9fcF`~%Wh?RiZS^{Xu24uWh89hptM4z~^`k^)AFoWKS0~Wx$ zvGQaLjKg=JsTDM{<7MDj9C=KVWS=VXC#YZ9QewWwP1*HO`%2W1Cctrn*oF;+9^);*n&F?Ygk%Gi^^zU_k^g;m}MtDPwPcOIISWo&cuGZxIAIwR*Pt1xi+1A zRBY)q_t)9}D6BJC#Q&C&{t^T&wC*|e{G#rS$(uMm#2v|Jv? zH2n;OPwGw;%|s03ac{OL>MM)y|MR@t!7dKxd$G^Y&NV^P`KMJCnPKOx_GIpHdNhbN zja*PDq9VCYUbPQf_MLkVN~ZO^(tkkqv2)}};qO^luOG*0a6GIvK0gc0bj^+ggKWd3 zT}EuIbeFgEUdn6~RFsYpB ze;{LhOQX`zM<=qMUB|Fk($}Wx{vt@qRMC%PYsn+l8p)6gq2FBDBB%aPG(UO?jt?Q+ z3+_$TYK8+Ot}8CcGaep&W6UzwvW55H=XCej$XD zbOt$_tJ>0Y!?IljcxHa&m776bz7x>)nqzEy^9iO6IzUW4|5-I&7q*KOfoY9(!3c>1F`}e88616jG_;v z(kwy_Rx#XRJOXjce-MA-6;R22I1}M;N!_FXl{lvaGo}*2LgOdx;M%t zy9vLsbvly5#|n(AfJqWFCv((^!@`*_Y}_i$p>0tD)~MMGdn_24*7u zv_8qB=I40;_3;EoUGM72O3g?+&_->A!ZKiBNAq*at1#K4T%uyh zW-LttgA!82FLtM9tKOE-AAMmD{bcChV7ekWMvvOUseY)BbeAOZi8DM|3F6oxPQ7%u z7OiL9$C8ofynJYQw3lGl`ui!4np2bmg(rj2*^udnagJT%pgKEm0uj5USBf~N7E-D> zH;kynTNN~WWA?AAR9`2A#rTK)&^M1dB~R)l2V3l_OjcCQHEbt|MCtVne+%G# zMQv_k1B_^bVPk^w_efBuV5mfRx*3DL^m>NZug_K z%!Yn3+H!?x`o$UE(tB{Ewu0K6^x=_EXx7cWEl#c;>O%+<$kKH>7+xRCo?q>e?A2%f1oD zH&fH$-o1Z%)D+K?@G3{ZJq0)Nc&f^IEHvwD#!60s-Amd9lW_N!83chNxtBffWZ@UR zJ}O2bL6a;j=<`-y!+Cz>u3zRT0l}(m#NkNIP@kjw+-=8@=uQGAN5_?`vN*R~NEkUD zwgl@SyST$tkzi7*3Rs@EwVxbJjFaYU8ZWga<{tvMmX=byu$H2P#6||!j!PGl$KvR7 zD!!%~C*O*nWWt?m1000S@%ZjdGICg1x+wvQI*c;WgMeaK(tLOBUYPM}d||FT_5_KT zV}#5lNg0}qI#ae6lo>wjzpRDjUZwn{ONpJY~=1c+?hM@>1zLig9P3F07F;%HEIBoJhMp~1IOiQNM&Bn(^M8JT7~5#qG^fm7 z-pTBS#4Y#GQr8S^Vx!{T%yJ*9RSDD}Z9NQW%^e-h>aN{~qqM#8j8NGn3Ls!iJQ_84 zg}UQ-u@G^7^H2uP3@lknpk8b&5m4R^Y749MLgqUjIP5$p#8~(BpqUOSFAX!j-&rd0 zI4_sV2!eJqu4~6`Ru<0Q)HSswTf_vyEZl_kC=f2Polz6IbBr>V5ycC^RvKTvF>$9@ zePn%Z20|3^#BY2a_b;+*?I@0}KYN*Y_u?7WlGYbeAJl4`{bb`z!1Ho@^fJei;YcDx z8IaTT?2(dH2(#iE6qPX#z=|r01e9NfMsX($-TvE+`>?Mgt&1N^?Xus>#@AT8Vf8dj zOI`wDqBSMTMd*HVrjcXRHpkacubV*gB<>Fzb2Ce}4{L>j-MX<-Zxa!1@)u6E>Fd9P z8X!U$Qi)%ZbqU93<6OZ-4v&1S&&md50-|ym(kpCagRS@Cr%VkPMP4>+z@2( zY#PjdU%%m`;h@#RPFH3%b?&FCz%VZLnf*nWCMXJ$uT z63_@5MvBX3E4#iW%rqeTZZYQ(x~?(&S@(2eL_lO)RaacKH#|iub!$)YYQUol>q+2C z`IZzJp+x)i^lwQ3_L1UrkWa&gkSa%;!9%Z2E+y1&peEPjsiy+L83aGoJ?=_=%I zPnE*o7d!#HYu}EoYLhwBH|#h#AserdA!HvW;Klo)-&k~R6?R@GIM8--i5-)7M018l zjcz|t{&>aY;xMKN{+y|Xf;%h%(D>pisj-jg0i1abe##_ci+Hq@Vhw#^nC{l?VJ-J2 zPRWFeSN)x&l-Z^kT(w!fq2yRUkfP_qlZlso3)A$jEiaUi$eD2tPbZ-|@=6WWv8 z_*q>g_iUgsw)>@`VOEgn4r?t)3*#6ZazIUn=`ta}f#smz>wTcOclrC88l>C1dg7*X zmv_7&cMpY={s)>aQ>$uq0AevEI3C_zp;;dWwZOr9ItBl?WCMsHdE->PuPI~aBEy42 z_WpF?!Ia;H$P*a5ac;u@00u-=V*~{HDSvlXduWor`grCCGRIVX)Q+OyDb-!pp_v4S z^gC?)@!#Yrds?|esJok~csB#x@%QmErDE|~D>(ZKw~K1KcjAEbn?LKELHx-NJ{uG# zNoN;oU-egI3pZF|+=*upD59;Dm&nEI_m7;$eLnI}oVzRdBsYaJoqh{Oz~_GBoi|dw za^ObB#!`GUq1W_V1mmKx9fIvm(v4c2p3LQfZAr=F!Gwdlvf0jLPb{EouylYDVUqal zZy5>)qvSz=1ZeZ@6b3YH;?VXUg0kEPgU7EA)!Ns{2^hxns0(8I-KRZdc0l||+h!yH zxpj9lp?REKODN7TDD-bKVHqG1g*bA&Y21$<<)y9CIxF~03HLw3pJ1%z; zZ}*i<;qo7^^HI${^u`)+fRlTaK}hkt3IR9m|91az9fs=QzU!EwUMFd24Fj$P{G#Gl z?e{0zzMnx?5>+U;@_gEof*~%V(pGCx2;a37Pu~q^Pw^FChBtI41;H ze4Wqzi8kX#9Sh^hMC{m}ihs%Sxb27xa(uSfV#KnfJJm_tGUI6)5yXfV$aI=>wVMcp z9T9NwDORh_RMX{TGHWr&>cQlyXKAq$yW{Xw z8P3@-VdgL5%3|?VwS1lpB6>@=CA5`API)s`xX`QS!Ab8KJCjCB}&hIU=8?ez6H9$D+k+|8PQ1 z5zrrY)--Cw^oi@Z!epB>iJhQ|i)XgC_6L`|@2_z!*V__Rm?f$}n9~58JiE(IVSw0P z2&fr(eKRj%oLT+r--07PAJnqftX|7GP7GKs;<=FE@ z$t4H;UCUW;-=O{a;mw?pE34lTouBz%6~WPDAF-XK7A5>A3+l0d<% zPw6|7mW6s21U!hyT?BPT^>mUH36&~3(>^lLi-MZCIaufiDqbAo4$vXcc3vm6h8dH; z8bhK4P4xsNsi6CKrJQ<4(dvFD-OT(&B;3}roA|VjhowKuMgoyHbY9WnuINmjc2U7d zCS)1yj90TiCmk5LOO|)$dARzm!SRU$(^I#SkHhxjY_#B@by#cy^lDroW_coreh;{K zyR(&|xQ&uw&4Rp^hfB(uWI@eW5@3lZ*7;R{?$+8_SaA|~!k95lu#n@N${pV2Q+YeXF;{&$fB zBEfch&J9S#R`zHwQ!?!5eY7;yoIF}MS{v$c^OKdWlNH#)Z`-qSw2VIF&RW3KK2JXS z6WyzR5-?H;>Mb1y7f(FuM?3d3kwvA{j+duh5T$=4@R*M~=HkYQSVJZxjYERT3(Hu( zL7EO#){O;5H4~94(P@syv<@~4YPN-Zg^1t5-{cW8yxq%+*=ZayHvmGOd=Bniwl;-e zbPnzuY-sKq3NQM3x0Mwb6xb1SSi+VZorlf4CR`iM{4d6-_6$6;T+{fEtzg)s2PJO! zWI=M01+UuDLwnp}T5!)0`fns7K@d zrJFk;0eIA=umP`#?A5!(8^kSD*x`@Ew0 zfOjXBAba5+Z!FfG2?mW3lsNq`JyMz!*?}!N|Nh0AqDgBh+u-Ey-?~-wpWP8>clbf6 z8-XUscS%}k?&M1!jd~I$^063sZb*oWiq^6B09VfU{)8X100)f!vhRg|hM07``?zNA ztuFUQw)VCu{p->lV3iU^Mj2u{j~t3 zj`H6*f;Gy1N%G;lUkrXetzk|Y&A8S>gO4{Ze8bI=n426KNLIUjXrwo!mh?dfo>M$ zD89ZZIllNbm5}o;;8pNUlLWKN>b?DGHklFT8-N?kZeQ~15i4jgtfxH_5LUH7n1BHx z97T<*1DZ&u#etQ?*!Jp*mvkpDWlvFU_Y`{SdN?)x-h63CTJ6_cPs@y=OlQ2M7SPas zefT2O&ELAy8Ti>ll@Dl{wbph!Hg-0Cx4#|`ih5WSCqAxa(yHj*&3Nt}y|#WuqDG-Fy}s(c>aZUCt- z6+M&dzkSv8_mChSP{9zArZ#zVjW6bn+5NQioJlXmr-LFw1l5?EX^Qn1Vzv){q4)$9 zbL=hQhqnsGD~18!6BC%z*6F%lC0@0>Hx*@7cFvp1-+q-^21#qd{W8HybE&;eDX0TV zL(zfyko%cOdeF5^v7me$*bg9UV&0&ZTWod4tK<14K% zpGUVfbMm`i%w(ilcW>yhfBLf8SalUOHwM~CZYiB8`!{qWCYm36TvMH>FrrgC}vbow9!SvBVJQ7Yaf)+pr!va;xd@Kuz91 z(Hbe|C9mxE2=0%R!Jz0%Dh@08H*a}|s2PI2##GI)n6vgTvr z61~q{A03EuXBRUCrAsVl03qpR{8#CA*s8VSAGMoT6hJvnKKl=%b1Lg?(M2K^oBFsC zk6}UC;Ol`8!H>QhmTX5};_EPlHFx(o_M;ck=ezP_UZ%9BWE3jiWqQaFK8fn@P=9iNjc*x?VLRQNu{*H#QYkn4FKZ=bMIYPZ=EZozKf$>C(JxB!(CeGEwwb z6}o7aOgHqnK(fe@6~W~Q8F%iO>Ls$!YM&8$@@>0m(&-X3uP99BSPWNTma)arcr)YC z7nG8!;{J=4%GdW1zAHjYYoxdt6Vh~g&1l>Wic4ORrV8JUGjeknU`Ge{muV34JD#Vf zvC}RKYw%chuZgMPKw)7OdO&fQZq)fS`YMLxK09`ZO}$3-BxSg_a%Q0*xtl9`+*F;9 z7havw^xebmZqYCq&fqrr>LA}%ZXoGm=h;{t(%}y&Pk?;tXI{5ZCo|J@@Nax??_r6{ zJKm%dRJZd25rTzDBbH;Z)vBKOr)t5p)525rEVG2_E%k)37BINVATZ}33LS$kfZcA4 zWe2G(Of1?{YK&>RusR*7C;|^mqsHSsZx$}J@lk0(|IMonLOS;)!$$fS=7h`57dWse z(aj80k=%UuGYx*f57?WM#levD7L99XZwNwx`)-Ci{ z*R##?^j~j3sbAv!MOCWo69QAE8qZ|!%8tTFtWH2JE)7H7J&=5P)g*aSSl+S-(PtHs zWygdW<9x4sYc{><505W?ikkBzU}3=o zbCa@wCb7wb1i+NTRw`|iQ`GmQnAG?eFMrdy023(2Yf4!n<9!DFgL{eg9eFPu4d;J8`n!ntnv*ACK@CE#445xn zw7^R-*W)@_D=W);Q>?;v$Awn7mX`5u;2^a-`SVi4luH@9UQ^=#K%Nfe4s(S?w)6vb zU7@FI;%rE9g4I}mm9Ka2*8v$8MmbY2MxTn=t?&o>6cwfdp)%6y`7RYq6(Eaa6#v@a zs)4OJB9a;sTUimmgm;dDMbWIaMr^m_?Ta8-3SAwAJLwU;qzb#&G5jtaadpy4EZN07 zjkA)Ld56djJ>t-6+#I%N3YQ$A3l+8*rcviefpp(z8lrpUt2Xh%zu=nQL2@Y3Q{7Nm zQA}H}-*0c-tBMl&IaAx#$h-Q?G$^HT9|;VJ-?86kBHo4mkrE_O-=GqaZ)LlVTRn{B z9_EEKN1|_@lxy2%=i7_H`RirrW{|X}EtZP;Y?~BK5HFd)*w15)lCov|5gkQue|~}f z3NgZ#-#wSv3uaovblgMOo3UBPS%m zndN_elc6Mi*3ZgBzH{|?Up;2|kqdF=`1@%`RH8Ub=ge=6Fp0*7a+7syL@vcO{+=C0 z*f$H6Q_TCJVcy+Q>kjY+Szj!N&Po4iQ5;@He?mHYFb@ZF0i_|#4ZM3k&UK>B;a{l{ zkdRZmX!m(iOxu?aJ)$fR5VSjZx{Az4es4RH78eNe8ilHotNpQ>!uJr z&0htk^7M)^>)~|MOT1G>LN^jru+Xc$2H@!PK#hJ`ET6*3w=t&k(QM*PUs6(p4e`>_ z3Arao#jD_3WRM8;5}BIaxwx^;s>zA~p`W`~IYbo=Ll-2ny(jIdCu3E0zp?ZgZUU#K zxD(y9M9)~i*WVgT^|!*(ou6P1fu%ys2KP{xjZ_@+f4ckXzb4->+#w~cNY^MuI;1<4 zl#-NIKtQBx;9!LGXaPY&1f@&5y9E@G&e6RAjsaty_j}@}^Ev;(`C&h8pLg5)Jn#M7 z*L~mDb-}odV^YP|Ua?u_*5IuN00IKZs<1v+=v_XbYZ-sJ4hPWfOy8@;X5utYJcG!Q ziJ+Sh=_)`qR+SyMxP}-0NC|NZVNZn}y{-jDA^@+xraeH1jQ{_x`OK|I0A2+wDS&G8 zdrZWL-YbFC0=oWQ{2-Ov!!x(xm68uw6 zCf6xT%6|Ut)h~Q=nniH9H2d}CINK|a^t8NC`+S>@Yg|1e>lrhiPcnV60;WJV!w$0U zV4n?Q_G0j3JiuqmK?Cyzd(tb-wcUHlf5EpwV$pn_knSP<>ML}S=a2j6_(R0vpt6A| z@=%9xJwVsWj)*HKCEG|$gm9RF82<}CHy{I{IhK@S&T;|X?Ep!q)mQzVYx-t~Rgsx` z7h*URr>1n8$vu(jl4&blA{1(8`&g(`g(+0JE6j z!1i%?d|KZs8M!RKo`(MB^X}sTXG&ua5_gB$cUi)T^@4ukaYwGX<8nYPAk^GNbpgy_ zT=Ou5XigsNn`spGprKtd!u8~ST{-?zI+qNCwESjLUM2;jm=eJ*GeoiB1Xj~nP~6nd ze@eBcSx$RgL&TG!I&#xuuJ27mdela(jB-H|7@ zp*ZdclfZ;{!o7P;mwB&o1V1!HjmyLF1&F^M+r5QR8n7kA?1m>r__?;Y3%+6q)#Z>TtV;~sWMo^<-p<1Q z&W+w?No_gB!I-=f44Y_3foj3h{$hHz3w@BrS}BX~WRL^gia%vL>gq0G%kkV_+Fw`& z)N19ruG5X`qLbtg`(w$k)@9vJjDH7p-|SmFJSkz~cU6;56&|f+NtxyW`SuJ1SlC9j zORcZ4I-)e97jMk7e7szr)Z6RBpFsOz%wn5(q4*wMAnr-GUr;l9ga?UujsC91vd~t} zs5!e`ccNzy(vw)boOmb+@W^I;F9UTU&?O-qEgOGyf@fIo0N+hHwedpaN5mHGXZts} zAaYgLH0LA$m*=V+!G#wO-P(TO-cVBhl_Ax#!KUoV+IGXDm~=l%F$L#VjYiIz+5lkR1Z;WGKfp)CRo zZ_qh?AlzF`u-M$K^>Y)OzZPGHJZ6i>K!ZVO>2;1+kNL(!I z_w_+}#?M^4Q17ATWBD7i1kC7?F={QL!GjK>hjQndURDd`A63!% zHH-W^G38Nhva-OMoam%#KP7z%zL>DYUMCqw6`~c6IDhfs>M;?MjJXfrCOeEq`lKRB=MXNegEuEdr)M8HlroxV62@ z+XCE{)AD+=IojfPHDkgww|w$TfJt@7sHE2wh57gPKe5~6Y}tVhV{GN#9~ z6rlXq4z{alJ}XLTP0ALlU=SIC#t|jiqw=G7 zekt;;Ea{3S%BA=a+`l`dRCYTtPlzHAO3^_MX)YyAqAJ@~k4dwf zFZ)&GDHpDi>}QxNH$9>jlM??LmhRy=V=Ydf|M+t;KZVDWF@CcG*OPM4W61FtVZ#mq z!}2lu8^RBG&>vRSv&!!YS*V=9d`^?lozyC(wP;xwE;^vM*O7IEW+E8)pnhgvLlWyQ zWO7;j&JJ71jhf!BJcPW`xlgULGk))mve~WOE+}oTct38WzO6oPQF8hO@ySYvdfh5Y zjNEQmgoGt$%a{Gq#%UUETJUwbvWAP!s`kKh`ENH{M76}jLYsqL?;b}Wk_kWPL^iM2 zOwKXf>Z$}tq!&OM_-D*#6;^Fu(5FqT(ud)^)ZY4E`zLbWCw3tN^&fL52aa4*edmuu z?Nlo?#s-#fKb9J-8HFNAPoxE8wzgx2{((wDFQjc_0l$rhGpCt)$T0Kc0`AWrGpVg| z=eI&h$xBbXMW<;e=OOg{t1>gEPI+3-(HSwdDxRyZW1h$1CDvifiZ(?0+(?=~6!#^p zUHeNn*2rDLPUs!#Y&>2Fs>|XvQLf1FkgDw4RIYp6*}K;Nz*95l$KWnpSq&Z2DQUP< zXR(~6BK9fKFzKfX&&iSWq>APJmT}1YZX4NOxN!uGb}!Rg#l8LT$(=Rl7uMQ!qC~1i zguBTM{#LnK@-AO7P_5DgHj?_oSi<%yp&5oZ-lBst46GflMJT^>SE)YDKk{1QXKQaB zxVhblT&!)QE`pU48h9mE&gGaoH;soKnJq8~GE*2)R?KY!?%DjVY8 ziF&>+c2C5U*Il8J-wIRf(YJQ6%2l7Q8AvOBOB+DfHGI`RUZ(niBSwf`n%yCL1;zwP zQs;Fo#&RbdA3Hocb?=~7HX}FY@+70h3*XEO+9mQJJ_NJ4v27|NZNt9LUK~Gq@6DOSi_}S?hrQJhqD{pW!Y5N zpVytg1X{X){MtE3D~feXC&=mJqtD)4%Oj&@C#1BYiyB8j;eC1KHgYE4Xr zY4WR&8zw)fOt$W~1U^?ZN|V z^;B?&WEXU6JA%DrPO@SmR77*+g}wluK{EsEkd1;8Lj1wYC0k6;q;XTUh07zY_2#D6 z59>gW-z&AUdGc0cWOdZHwLx>YV;MbWt(LFWx3mxBVjyEj`H{_cF>74@^WxiQYr zDgOj_?da6BH-h8uFM&5#o4ewu6;`92Jg4m0IHqu_u>EVWjlQk+QufKg8-6c#vk4+~ zPcf?Me;}KdwU~<1uNLaIX$V1q&G18rO$F0A9dRBnKe+sD?5JUDP8MaccoyJ?>_mpS$ritfKzFuSuX5( zSi*pw%`T}1@+{{vJdWd`Uqg0e}^;SZ9jK-=omKkAUuN0>u~Z;(ezI= z1N5N}i~8NL@Q2HW8K=x?5pbtOJcc3Fkn{o6;WEB~oaE>qzOVa0$^J-vpTzm9a1CnfTs;I%%5BwPbp3XDlkF2I z_?edGa_WLT1+y|~fnnR&_6R;!;h=JhZmIjD`sXEvfGvBE=Xr9o(58%JD>8FOZbGn_ z))Ib`6;Nj zWUZ<~OE&oeyx=l}(!>P=fv%8ggkH@P*v2!$i!6f@_64^xc_VE zrf30LwO7hp#~?~|{b~!%@w&!oc@M{uC6%iEOJY4mh|V~}y%$n)pYBc&8zc?ysjy`& z{gEK?xQP5?Fwq#-^YTl2nBbnuF6(>0vJh$yDpxSoTyGFKJbBG>DT`^9K6%o6v_iG< zcz~gfoi*y$-FMl8zuOH_cOU~9X-Es5&-|KsyvWoC_pbuB8^fu%y_Mo4rot|Od169K zXB;~+C;Pm|rOHJX+9upCC9&mydHH@GZUOy4>~Ql_`icg}oKND8yW3L%FsYThSWdC8 zcJ`xOJ)-He6$ZXqjH)(1PrZsErCUxc@AmY`iQ5igw#66?1YO3c7jKNK@0@El#aWA8 zpWUWj{yf|1c}i}gdY@tQ7u}=>52g(@i2%0S-?k+%JtWa<_kirZMEkEnHkrHJq8xa8 z3qzpYVj6RylnZa%R7PvmmnIk`_L)y%V_wLpc%{wO&oAopi{x>p&&Y|{OV;t?PN8x) zju=`*#uy+&NG8rI1mQF!D&KI7W1trpXLl~t{|L{+JNOxNZu$UOOFDA5@xb%wVKMr6(tVyi9*EpB(q+GS@M{X zRE@GN$z3!K30o9KTt-#2>lXq{8q(_MdEZ-6!tTVCRB-*V=Ly%Vvu!pBZmQ1&Y#%<$ z*3NDsF^vEdn&Nxu{Rj2GmkNU8nO~7VJ)vmk9cHC~WOS4rL|e87bXtbaxld&Y&h5U| zLx|mF1++g^bU7V4a2zPQa>Z(iUGf(NQSp`OX_y>ABRms!4i=$Q$ z$WR8!bi^bbS~FRxPrZ@zB(b_z7{$ngWE@)O&&6cNZ(vkR-8F3~|Yz(Li%`*1zzv)VnW_Ig|z70~7@&0#iWd>Vy5Zm0e!@^w*bG z=^DYKGTc6JLl1WIL@Cfp&z9Fe&;hpe*BGCO-@6R}**IE+|-bHk{Z>87`+PC4e zL38G%bNJNVn4zp6suh9Uc9A???tivdIWZmq1hN6KZpZOzKAZ#md?jq2Li!+7@`vki zDj%B>q?{@8d+m+%&;~G?uIuN;llJXOihG20{z;&Iosyrv(KP5yt$2daxe+F?V%}oG z>Y2jNInl2=f1v%QGU%(N>BVAu9cvbk9uLOFy+(_?EST@KMvGc+)>zV~ZGB}mm~hb9 zxc22lXLC-%juvCsYkhD1D|ERKuVOu_m*V$em^&3f@bNhjEqNKn_wm3LI2l4TqE~RBw>`o;fJQi)nXsY zF25ujjq`F*m!s*5`>B1~6F_sQS{mz2Snzi(_9WjZT)|SWF=`(>AU76i{Y6Ab_B>*G zwMai^u9NYOpwVLoINuPLqX;n%moyi{0E9R~-VH^+AsbKOq}G}CJXI1_tG#5XEEmtq z=WmzeixUc(v*ihsw?_5yTicp<>QZSsr&_*$d~aFP{35_3v+PoyoKmK=HlU>%WxGkk zG*E}}==F|YaWXK8q^9Q?BX-x~h6Q*YADW4~(M@kCg;vu#oPV3XxNJ$!(mbzX^emIz zO*aNVpRJDUWQ3@7+s#94eovjajd=xgoU_*Gt+qb~vC%W0?wiAy+&4OBe8G3kmo?Zs zW(fZrKE1ln{j`zlyZNqy&(PaDE84as+_O+vWmghZ2f~pkA=&8fkYUUb5t!R@^@*Hn zD92QI(9LJSVl8&MTM^$kRfjO3%ejb@Og7pub5Nv=ieRhBr~5)jsb+ECG{`;nUgF(* zD(|4Z3N3$kxpx)#C$V(&w$qtrGh2>-pq5a_*rx*Io=+W3ug2`6l)u=KgY?DmjCz6^ zhWcR9b(pq(FT#v{*zd0dj$6E+aQmBbT0FmeY~%v`9xA9dYT*8?EL>Mj-i^c^wS&B2_b-XMvoBI6}bd(v7#k(jf`g64L4Xbt`#GK zOUGFN#enA-fw=Kk1ean(21P(!gct1a&cuBE9@Q>&+_e#oH5yfj3vUJJ$XwL}!Z?RL z;3}`M&i_DPs=5G-bch02Vp+1FtE#k9hx-G;{D6HzALjRx$=p8>iZz56cFv=zwEoGK zi3o;p1u`gvEcV4e(2icHqEkz#jyp1%ze96b&eVDwz_^SyCj7}|pdRiU*x$ngV3I1_ z4(sf36$ji9Okx*J*Hv2CC1AU!14}S75O67TEvg8MI$c;pd;%ALD8b)0c-k^yY~Q5K zW>_yRGieClWJ|xe1{{WQ(gnk_4L|>Z7TTajz;|nB;#dt)!%pr^RpAhZ+gu#)#RcGg zp&hDdXQ#5tz>@Sa0>>(AU4?1- zxqY!K?YY2~-sLOXh>=s_Gb@Hh*aC0&l_^S}tlU;t0&6MvpM~^}b_C$&R)Sd%#LdqF zhNu&3G$;U6rh}ke=kB9QtI~&YeHftA%U;fw1BrM(sdRAzRAbhV+SQ4W-b2~>Nf^e- zBMz2?9W@!lngWY%5%Qak4P4tYCD5*0J_7VV6_bBhn&T$dsJ}uGvXRZH-tJIQYMNLw z%~6Ko(MWqOZhmGEq^io;s+0%J{`c~tETLJNuUW1}$|v6)CVQP^2we0f)}!q+G^n$s z>5a%*UG;!s%Rf-9b^+klYc?dwh2sgD?;*zN{;xaei+6sPTZWW~$W!~;%no|AA%{d0JVd}j=c z|0-%xM1!mI?YnWIqL_*MS1p4gD{PWg#ZCq>OuRv+U?CIS+?C5LsWWL&ipL_U` zPI8pQR_m06!KWxP^dr(g|6{F%BD3lB{a@Sxil8ppJw~>7aa8u^d^bH8Ant1 zTc`-tW@X`0V_A_BGOqOC&Q(kq*+0;54av3^*mkf*!5%G+NkNb_JeqN9H!k%*Kp!YM z!Q#m6J$Oxiz&0MdQ1Law+}Ks>w7&~n>*pWwtBxAqV0|O%W&fxs(@>$~>q%3yfwCXB zgSv`bnu4CXCx65bR8t~n+h>*Ez9erF$)Ay*5nKOe=vg-36!?;-NqZp`-u1eR0prm} zCu`Fn4FAh`oJJ&ho1^%cTjyrb4cLMotTGv-NARma4780fM;Oh3kKo|vVodL!mr?z; z3tb4;4Gj=ARB)1kapa9_EKI7CD@#H@CQmeQz2W2M7>@jqmGK-tk*g#;#P+|U|}qM$Q>6_grAi3#YIwaNF!v7x|m4TfX~cc^=g?|jA1J?|M~3I63d&(2z2 zyndvn0su-OVdX zB-!#ZX%`f~R3x7Ba}!&r67r+a&EB!hP;}6iDhgbhIbreN^O^1V+}%~nGoVkP5nK)c z6|GInV<$sAclX`f&$!25oVz_%Qac%zRk*3uSBXR|gTO~VRnE4Qg6+JoiXpv%8b>LW zf7fq6ck;-Na8B-Q=98U?Xi|g*%KM7HrPtVYo)y*Wctm>g!*uFYI5G9dmQMQNn5`Y{neiBXJV+*@#Ila<(uv z7*sdrXVT)(oW~!MpjfjHF604(FYD3^T@*c4US;qwGETwU+e>0E!_l&b^(LufF?_VCkhUPa#^g=@$nK`*$OE%FdekNO^ltZgUTw6vu?pcgq*J=aTJL;GUB#FD0~UMV5t!e3^KGxRX) z*oAvpgQH;%r%d~1tsMjNs4Q!B*TBO2ArnduCg#jXrDc$vBA!J-=gD$o>am4=@`lK@ z!8OW9xHXIhWHB$FR6Nz9!dp9PC9B3lNK&(i_T9Y}5M`061#UVZTiixJuARlMH8PvG zy>Odo6JAusV2`h$A+ry^RoIve1wW8yE@!puqi1{4lwRN);IUARu#7ex+#Du z4VMNB+CvR)W9bKf53bU5ViF35&DaZ&t)Ea0pFLlbBqziXnb%nR zIzL?07=RelzdyjWh6y)*&tJ$n z%Edm$9Ty2n&`?QB3mbob382l_3TC~l_ga1xCJmn3XuTcHbV7S#^6*FX)Bxi62dBjM zl%F;i&v2t5axI;>=@~(&d7X}P?N31M<$@8Qy22q0K3Wvohy&wgLOx?&5{!ApdVHPbHQS>0Z^is`x8mC%$S?tLUKb z#Sdzo??Zez^uh0$PHjaN%{yuJCswJJuonQW#v}-Bx|b(q2??c_zRl_6DZk(B*#;F( z%$<;_^4>)&W7fP69~|WIi1aWtGypTq?wI_K4W@5QfaCEw-Op5N*y3TRNJU>3SIX^I z-%957law+j2l6z&dXc_0{fTRtX3d)Vi&p4+%_=?ej1=yS$UNzB?oiTkF$Hs0#ZVxUEC?I$sB+%Ao;^`l0@7<&4E?bO!O(-ZbO^06E6i(dUTV7Zbo2j}cP=oC z>4Ph-Dql4w_iJkwg)tWr4G$3~HeVdM9;g8q1b|SYgf0vi{G3B%0;#zlISi>&4Ik|X zfTC+5Aod45s+#GWAaab3(k=~N?2Y2;{%n4o=?`~Sjm+|46GsrOkH@1SEMslHsTHMb z`Al_$PQhEBah6_QQo(+L@B~gzJ^f|wlq7Vm^-v#pUSqJ67GsG3-+_GhpkJOWdtUu| zwyO@-f6nqW5b{QF!%l_pWepKYw=^*)uHl9T0-(CS(xQ1`_<9Dz*Lx?oxgn3mh!T|X zanIguvY*cpcj&0cma~~{R!1|T>yqwa9vCsXm%dBY2Bs~fp8);}AFIVE+U{grJ2AUb zY2AJ~trt`f3}J_e8MlSM$a|+*R!33wrN-$%laY_oKZo+qMO5sK5N1hp#>gbZwKc0YzXynefpZO78ZNV9&G6 z)Kxm?P2I?~3BxWQw*`2uK<{C}d1TWuUP)!|Do@mL_NXy19W(s2%qQuyXE*nG_x`+z zFy^`bfJRr6WoxjK{?wvG)YJ%)TL9xBF&}AA@B|A84f(MFaFv`EB})-lb<_Z(6-@R< z_3<2yqeQk&hDVb~;a)w9>OSU8N=V(Ln4rvDrrh)7U<>7lsD05K7BIOR zalTs8&Jll3Rrw}W#57c?1-Tm~z9KnzAU$5s*=r)UI*h_j*-?{a;_ps~^gHCKdqnuI zSn4425){TNsFXg<9#FoCk{+=;^Ge=I=gLP^`yzLbMCrrj&0G))%)L0VnOT0Shnq7> zPF-=LcXz1^1@`EBPe}0e$GZ1q-&E<}a9|n{Cv@*^87k2bBTHw72EuzN_$s8XpdAO8i~(tv)QPH#9YYE=nEm-4Wx zpsZ?=mMo1iTr5#XduziwPx^NZqfr;%dtZ9nI^uo^XLH)KKkY6ob-lmuOC61%1WI2R zhox-*?uwpbvVSXpYQX#)f^ZxJB(-g59u4w+m3<5}oC(8!G@1_tQr ztTt?dqk`VA={TTGrxyw$M#43>Vzzh(|DFdFq9MO(*k3ny#o|bRAP2u@N{TE$tSDpl zJ&VYACq?l1N;?Oh^e)HeBnXV9-bMVR4-a-SLka!CkM{s-Bq;)F_!FljaIbt|I^Jay zhT-`KN`&fcyu>Ow-~`XPX5iWHn!t6%15_(Z6lE#3z!7eo^T8^BxKc3seWiZvymY$l z`$!q~2bW1*)k=9l75P(JefcA+ZNFQAW-bOG!hRs9I-hVo+*ex9P@-S4H=dP$`i)os z24H&)rD!ZC)Qle$jU#z~qhbm1?scP{8wl!;=)acjXsABAK|SJ0d{Cmy{i$Inic>X( z!{wzKBctI57w$-|H(Hwwxg}h|-)iRN#X@EFfytm!*v34JNokKCz$gEwo>UGDAoK|0 zB*&C;vK!mFuY7>x#E1Zl<-Yntm1)D1fUPN3xQkz1Df0d3m_tuP@6TTV8TJTv%4t2$ zIb>Shm;qGru|`8(*imBua}fN%EEnR`WH+Jx8(qMu3&W1wWP`?Tqx~x7%q*?5@yUvxfB%7aNZ5=j`i{SJX7Fb0UT`ZQKh zZG9$)#n1q{u}Ri0-na?2Fo_7N&*Xd_YfMFU5APY=9)2%qrlHiO9IS}x>SiOJZhs}& z{H#9vVSfO%u7?;khrjw#dsqWE^WZYlKUKOqe@Bx{?rDMXusRI@$GRGICJn+Chxd%tbT_fzPW>ahOsWQe#`a<*o0pXeIvdCZ`A-cydF5vTp!JC%zJ- zpsi(X7k@;g;*AeYc`W_f9B^m?=!n_Zft`1tmU5ai@=j;veAU<|9{tON3L1)ZGKkjd z4~o>7No0QiB^%#l?BDWOF0!|+T}N#wK9h#v?yHh6Wc;T8$`~9*Z=lRRLoax#sDsHu z99n~WC`ticH;Zgr>o-%6t14J^b3U|=q;Kw0R){VI$p7ubSZ*9^HN0OLe%1J^g1x3T z5BJC%I~T*)7_Lbe3iX{F)#T2;)<=im<}1pF{K}KY&7uf`{zfHcN30Ck7l(}h1JQ8t z4XSP^v(enN#d@@Rdd=e*VtmkwD7N$s*A6~}EvrVz^_cbIiW+cxb$m4!sRRR5+gJ4*$9jvc1M6!15?e!T_ zh2%yaY6PJ8F`fM@lY5tn7S1nK&R<)1NA&rk2VoQZowrE~pV14LdsaXpl>V@XWqlW? z&QFTA#rS0oqSJg^8(aHuy{P^z{12k* BvhDx? literal 0 HcmV?d00001