diff --git a/examples/add_configuration/cerebellum_segment_options.json b/examples/add_configuration/cerebellum_segment_options.json new file mode 100644 index 0000000..96ce75e --- /dev/null +++ b/examples/add_configuration/cerebellum_segment_options.json @@ -0,0 +1,10 @@ +{ + "local_linear_register": true, + "non_linear_pairwise": false, + "simple_fusion": false, + "non_linear_register_level": 2, + "resample_order": 2, + "resample_baa": true, + "library_preselect":10, + "fuse_options": {"patch":1,"search":1,"threshold":0.0,"gco_energy":null} +} \ No newline at end of file diff --git a/examples/add_configuration/cv_cerebellum.json b/examples/add_configuration/cv_cerebellum.json new file mode 100644 index 0000000..56d9731 --- /dev/null +++ b/examples/add_configuration/cv_cerebellum.json @@ -0,0 +1,3 @@ +{ + "library":"manual_library_cerebellum.lst" +} \ No newline at end of file diff --git a/examples/add_configuration/manual_library_cerebellum.json b/examples/add_configuration/manual_library_cerebellum.json new file mode 100644 index 0000000..5abb6c7 --- /dev/null +++ b/examples/add_configuration/manual_library_cerebellum.json @@ -0,0 +1,46 @@ +{ + "reference_model": "../../models/icbm152_model_09c/mni_icbm152_t1_tal_nlin_sym_09c.mnc", + "reference_mask": "../../models/icbm152_model_09c/mni_icbm152_t1_tal_nlin_sym_09c_mask.mnc", + "library":"manual_library_cerebellum.lst", + "build_remap": [ [101,1], + [103,2], + [105,3], + [107,4], + [108,5], + [109,6], + [113,7], + [115,8], + [116,9], + [117,10], + [119,11], + [121,12], + [123,13], + [125,14], + [131,15], + [133,16], + [135,17], + [137,18], + [138,19], + [139,20], + [143,21], + [145,22], + [146,23], + [147,24], + [149,25], + [153,26], + [155,27], + [171,28], + [200,29], + [201,30], + [300,31], + [301,32], + [400,33], + [500,34] ] , + "classes": 35, + "linear_register": false, + "local_linear_register": true, + "non_linear_register": true, + "non_linear_register_level": 2, + "resample_baa": true, + "resample_order": 2 +} diff --git a/examples/real_tests/test_grading/cv.json b/examples/real_tests/test_grading/cv.json new file mode 100644 index 0000000..54e03a0 --- /dev/null +++ b/examples/real_tests/test_grading/cv.json @@ -0,0 +1,8 @@ +{ + "validation_library":"snipe_library.lst", + "iterations":-1, + "cv":1, + "fuse_variant":"fuse", + "cv_variant":"cv", + "regularize_variant":"reg_1" +} diff --git a/examples/real_tests/test_grading/cv_results.R b/examples/real_tests/test_grading/cv_results.R new file mode 100644 index 0000000..8ec3d98 --- /dev/null +++ b/examples/real_tests/test_grading/cv_results.R @@ -0,0 +1,70 @@ +library(ggplot2) +library(jsonlite) +library(grid) +library(plyr) +theme_set(theme_bw(base_size = 14, base_family = "Arial")) + +cv_slow<-fromJSON("test_cv/cv_stats.json") +cv_fast<-fromJSON("test_cv_fast/cv_stats.json") +cv_lin<-fromJSON("test_cv_lin/cv_stats.json") +cv_v_slow<-fromJSON("test_cv_slow/cv_stats.json") +cv_v_slow_1<-fromJSON("test_cv_slow_1/cv_stats.json") + +cv_kappa<-data.frame( + cv_slow=cv_slow$gkappa, + cv_fast=cv_fast$gkappa, + cv_lin=cv_lin$gkappa, + cv_v_slow=cv_v_slow$gkappa, + cv_v_slow_1=cv_v_slow_1$gkappa + ) + +cvv<-stack(cv_kappa) + +names(cvv)=c('GenKappa','Method') + +png('cv_kappa.png',width=800,height=400,type='cairo') + +ggplot(data=cvv,aes(x=Method,y=GenKappa))+ + geom_boxplot(notch=T)+ + theme_bw()+ + theme( + axis.text = element_text(face = 'bold', vjust = 0.2, size = 18), + axis.title = element_text(face = 'bold', vjust = 0.2, size = 20), + plot.margin = unit(c(0.2,2.8,0.2,0.2), "cm") + ) + + +slen=length(names(cv_slow$result)) +lcv <- vector(mode = "list", length = slen) + +for(l in seq(slen)) { + i=names(cv_slow$result)[l] + cv_grading<-data.frame( + grad_slow=cv_slow$result[,i]$grad, + grad_fast=cv_fast$result[,i]$grad, + grad_lin=cv_lin$result[,i]$grad, + grad_v_slow=cv_v_slow$result[,i]$grad, + grad_v_slow_1=cv_v_slow_1$result[,i]$grad + ) + lcv[[l]]<-stack(cv_grading) + names(lcv[[l]])=c('Grading','Method') + lcv[[l]]$group=rep(cv_slow$group,length(names(cv_grading))) + lcv[[l]]$struct=rep(i,length(lcv[[l]]$group)) +} + +cvv<-rbind.fill(lcv) +cvv$struct<-as.factor(as.numeric(cvv$struct)) +cvv$group<-as.factor(cvv$group) + +png('cv_grading.png',width=800,height=800,type='cairo') + +ggplot(data=cvv,aes(x=group,y=Grading,colour=Method))+ + geom_boxplot(notch=T)+ + theme_bw()+ + facet_grid(struct~Method)+ + geom_abline(intercept=0,slope=0,colour='red',lty=2)+ + theme( + axis.text = element_text(face = 'bold', vjust = 0.2, size = 18), + axis.title = element_text(face = 'bold', vjust = 0.2, size = 20), + plot.margin = unit(c(0.2,2.8,0.2,0.2), "cm") + ) diff --git a/examples/real_tests/test_grading/grade.json b/examples/real_tests/test_grading/grade.json new file mode 100644 index 0000000..e750095 --- /dev/null +++ b/examples/real_tests/test_grading/grade.json @@ -0,0 +1,35 @@ +{ + "initial_local_register": false, + "non_linear_pairwise": false, + "non_linear_register": true, + "non_linear_register_ants": true, + + "non_linear_register_level": 2, + "non_linear_register_start": 8, + + "non_linear_register_options": { + "conf": {"8":100,"4":40,"2":40,"1": 20 }, + "blur": {"8":8 ,"4":4 ,"2":2, "1": 1 }, + "shrink": {"8":8 ,"4":4 ,"2":2, "1": 1 }, + + "transformation": "SyN[ .25, 1.0 , 1.0 ]", + "use_histogram_matching": true, + "cost_function":"CC", + "cost_function_par":"1,3,Regular,1.0" + }, + + "simple_fusion": false, + "resample_order": 1, + "resample_baa": true, + "library_preselect": -1, + "segment_symmetric": false, + + "fuse_options": + { + "patch": 1, + "search": 1, + "threshold": 0.0, + "top": 4 + } + +} diff --git a/examples/real_tests/test_grading/library_description.json b/examples/real_tests/test_grading/library_description.json new file mode 100644 index 0000000..7f4ac27 --- /dev/null +++ b/examples/real_tests/test_grading/library_description.json @@ -0,0 +1,50 @@ +{ + "reference_model": "snipe_library/NC/T1/ADNI.stx_011_S_0002_m00_bbox_snipe.mnc", + "reference_mask": "snipe_library/whole.mnc", + + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"snipe_library.lst", + + "build_remap": [ [2,1], + [4,2], + [19,3], + [21,4]], + + "build_flip_remap": null, + "parts": 0, + "classes": 5, + "groups": 2, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + + "initial_register": false, + "initial_local_register": false, + + "non_linear_register": true, + "non_linear_register_type": "ants", + + "non_linear_register_level": 2, + "non_linear_register_start": 8, + + "non_linear_register_options": { + "conf": {"8":100,"4":40,"2":40,"1": 20 }, + "blur": {"8":8 ,"4":4 ,"2":2, "1": 1 }, + "shrink": {"8":8 ,"4":4 ,"2":2, "1": 1 }, + + "transformation": "SyN[ .25, 1.0 , 1.0 ]", + "use_histogram_matching": true, + "cost_function":"CC", + "cost_function_par":"1,3,Regular,1.0" + }, + + "resample_order": 1, + "resample_baa": true, + "extend_boundary": 4, + "op_mask": "E[2] D[4]", + + "create_patch_norm_lib": false +} diff --git a/examples/real_tests/test_grading/run_test.sh b/examples/real_tests/test_grading/run_test.sh new file mode 100755 index 0000000..828b1dd --- /dev/null +++ b/examples/real_tests/test_grading/run_test.sh @@ -0,0 +1,131 @@ +#! /bin/sh +set -e + +PREFIX=$(pwd)/../../python + +export PYTHONPATH=$PREFIX:$PYTHONPATH + + +cat - > library_description.json < cv.json < grade.json < library_description.json < cv.json < grade.json < library_description.json < cv.json < grade_lin.json < library_description.json < cv.json < grade.json < library_description.json < cv.json < grade_slow.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < cv_re.json <>ec_library_bbox_re.csv +done + + +if [ ! -e test_cv_nl2_re/cv_2_stats.json ];then + +export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 +python -m scoop -n $PARALLEL -vvv \ + $PREFIX/iplScoopFusionSegmentation.py \ + --output test_cv_nl2_re \ + --debug \ + --segment test_lib_nl2 \ + --cv cv_re.json \ + --options segment.json \ + --cleanup --ext +fi + + +if [ ! -e test_cv_nl2_re_ec/cv_2_stats.json ];then +PARALLEL=3 +export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 +python -m scoop -n $PARALLEL -vvv \ + $PREFIX/iplScoopFusionSegmentation.py \ + --output test_cv_nl2_re_ec \ + --debug \ + --segment test_lib_nl2 \ + --cv cv_re.json \ + --options segment.json \ + --cleanup --ext \ + --train-ec ec_train.json +fi + +cat - > cv_re_2.json < ec_train2.json < library_description.json < cv.json < segment.json < library_description.json < cv.json < segment.json < library_description.json < cv.json < segment.json < library_description.json < cv.json < segment.json < library_description_${V}.json < cv_${V}.json < segment_${V}.json < ec_train_${V}.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < + int_par_count=3, + File "/home/vfonov/src/nihpd_pipeline/python/ipl/model/regress.py", line 364, in regress_csv + initial_def_model=initial_def_model) + File "/home/vfonov/src/nihpd_pipeline/python/ipl/model/regress.py", line 190, in regress + futures.wait(r, return_when=futures.ALL_COMPLETED) + File "build/bdist.linux-x86_64/egg/scoop/futures.py", line 397, in wait + for _ in _waitAll(*fs): + File "build/bdist.linux-x86_64/egg/scoop/futures.py", line 364, in _waitAll + for f in _waitAny(future): + File "build/bdist.linux-x86_64/egg/scoop/futures.py", line 341, in _waitAny + raise childFuture.exceptionValue +mincError: mincError('ERROR: command ['minctracc', '/tmp/iplMincTools7UZrh7/vglhuzobject_0_0.mnc_int_approx.002_blur_2.0.mnc', '/tmp/iplMincTools7UZrh7/Or3K6Robject_0_0_blur_2.0.mnc', '-clobber', '-nonlinear', 'corrcoeff', '-weight', '1', '-stiffness', '1', '-similarity', '0.3', '-sub_lattice', '6', '-iterations', '10', '-lattice_diam', '12.0', '12.0', '12.0', '-step', '4.0', '4.0', '4.0', '-transformation', '/tmp/iplMincTools0xSAdH/pXWAYninit.xfm', '-source_mask', 'tmp_regress_std/2/object_0_0.mnc_int_approx.002_mask.mnc', '-model_mask', 'data/mask_0_0.mnc', '/tmp/iplMincTools7UZrh7/l8AeXTobject_0_0.mnc_int_approx.002_object_0_0_5.xfm'] failed 255! +Message: Error in minctracc in file /home/vfonov/src/minc-toolkit-itk4/mni_autoreg/minctracc/Volume/init_lattice.c, line 551 +Cannot calculate size of volume 1 +. +Traceback (most recent call last): + File "build/bdist.linux-x86_64/egg/scoop/_control.py", line 122, in runFuture + uniqueReference = [cb.groupID for cb in future.callback][0] +IndexError: list index out of range +') +AT:[('build/bdist.linux-x86_64/egg/scoop/_control.py', 127, 'runFuture', 'future.resultValue = future.callable(*future.args, **future.kargs)'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/model/registration.py', 661, 'non_linear_register_step_regress_std', 'downsample=downsample,'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/registration.py', 475, 'non_linear_register_full', 'outputs=[tmp_xfm] )'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/minc_tools.py', 399, 'command', 'raise mincError("ERROR: command {} failed {}!\\nMessage: {}\\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()))'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/minc_tools.py', 48, '__init__', 'self.stack = traceback.extract_stack()')] +Traceback (most recent call last): + File "/usr/lib/python2.7/runpy.py", line 162, in _run_module_as_main + "__main__", fname, loader, pkg_name) + File "/usr/lib/python2.7/runpy.py", line 72, in _run_code + exec code in run_globals + File "build/bdist.linux-x86_64/egg/scoop/bootstrap/__main__.py", line 302, in + File "build/bdist.linux-x86_64/egg/scoop/bootstrap/__main__.py", line 92, in main + File "build/bdist.linux-x86_64/egg/scoop/bootstrap/__main__.py", line 290, in run + File "build/bdist.linux-x86_64/egg/scoop/bootstrap/__main__.py", line 271, in futures_startup + File "build/bdist.linux-x86_64/egg/scoop/futures.py", line 64, in _startup + File "build/bdist.linux-x86_64/egg/scoop/_control.py", line 253, in runController +ipl.minc_tools.mincError: mincError('ERROR: command ['minctracc', '/tmp/iplMincTools7UZrh7/vglhuzobject_0_0.mnc_int_approx.002_blur_2.0.mnc', '/tmp/iplMincTools7UZrh7/Or3K6Robject_0_0_blur_2.0.mnc', '-clobber', '-nonlinear', 'corrcoeff', '-weight', '1', '-stiffness', '1', '-similarity', '0.3', '-sub_lattice', '6', '-iterations', '10', '-lattice_diam', '12.0', '12.0', '12.0', '-step', '4.0', '4.0', '4.0', '-transformation', '/tmp/iplMincTools0xSAdH/pXWAYninit.xfm', '-source_mask', 'tmp_regress_std/2/object_0_0.mnc_int_approx.002_mask.mnc', '-model_mask', 'data/mask_0_0.mnc', '/tmp/iplMincTools7UZrh7/l8AeXTobject_0_0.mnc_int_approx.002_object_0_0_5.xfm'] failed 255! +Message: Error in minctracc in file /home/vfonov/src/minc-toolkit-itk4/mni_autoreg/minctracc/Volume/init_lattice.c, line 551 +Cannot calculate size of volume 1 +. +Traceback (most recent call last): + File "build/bdist.linux-x86_64/egg/scoop/_control.py", line 122, in runFuture + uniqueReference = [cb.groupID for cb in future.callback][0] +IndexError: list index out of range +') +AT:[('build/bdist.linux-x86_64/egg/scoop/_control.py', 127, 'runFuture', 'future.resultValue = future.callable(*future.args, **future.kargs)'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/model/registration.py', 661, 'non_linear_register_step_regress_std', 'downsample=downsample,'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/registration.py', 475, 'non_linear_register_full', 'outputs=[tmp_xfm] )'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/minc_tools.py', 399, 'command', 'raise mincError("ERROR: command {} failed {}!\\nMessage: {}\\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()))'), ('/home/vfonov/src/nihpd_pipeline/python/ipl/minc_tools.py', 48, '__init__', 'self.stack = traceback.extract_stack()')] +[2015-10-28 12:39:46,385] launcher (127.0.0.1:60389) INFO Root process is done. +[2015-10-28 12:39:46,385] launcher (127.0.0.1:60389) INFO Finished cleaning spawned subprocesses. diff --git a/examples/synthetic_tests/test_lng_model/make_lin_graph.sh b/examples/synthetic_tests/test_lng_model/make_lin_graph.sh new file mode 100755 index 0000000..ed100e3 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/make_lin_graph.sh @@ -0,0 +1,34 @@ +#! /bin/sh + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 + +rm -f $tempdir/lst + +for i in $(seq 0 8);do + + mincpik --image_range 0 100 data_lin/object_0_${i}.mnc $tempdir/object_0_${i}.miff + + echo $tempdir/object_0_${i}.miff >> $tempdir/lst + echo $i + + for it in $(seq 2 20);do + printf -v in "tmp_regress_LCC_lin_4mm/%d/object_0_%d.mnc_int_approx.%03d.mnc" $it $i ${it} + mincpik --image_range 0 100 $in $tempdir/${it}_${i}.miff + convert -shave 4x4 $tempdir/${it}_${i}.miff $tempdir/${it}_${i}.miff + echo $tempdir/${it}_${i}.miff >> $tempdir/lst + echo $it $i + done +done + +echo "-label Inp null:" >> $tempdir/lst + +for it in $(seq -w 2 20);do + mincpik --image_range 0 20 tmp_regress_LCC_lin_4mm/model_intensity.0${it}_RMS.mnc $tempdir/${it}_RMS.miff + echo "-label $it $tempdir/${it}_RMS.miff" >> $tempdir/lst + echo $it +done + + +montage -geometry 152x152+1+1 -background black -fill white -tile 20x10 -pointsize 40 $(cat $tempdir/lst) \ + lin_progression.png diff --git a/examples/synthetic_tests/test_lng_model/prepare_test_data.sh b/examples/synthetic_tests/test_lng_model/prepare_test_data.sh new file mode 100755 index 0000000..d40ff13 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/prepare_test_data.sh @@ -0,0 +1,41 @@ +#! /bin/sh + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 80 80 80 -step 1 1 1 -start -40 -40 -40" + +# make growing "object" +# 1st group: body growth 4% each step, hand growth 8% +# 2nd group: body growth 8% each step, hand growth 8% + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 + +mkdir -p data + +rm -f subjects.lst + +for i in $(seq 0 8);do + + main_dim_1=$(echo "10*1.04^${i}"|bc -l) + main_dim_2=$(echo "10*1.08^${i}"|bc -l) + + handle_width=$(echo "20*1.08^${i}"|bc -l) + handle_height=$(echo "5*1.08^${i}"|bc -l) + + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_1} ${main_dim_1} ${main_dim_1} $tempdir/ellipse_1.mnc -clob + make_phantom $object_opts -ellipse -center 10 0 0 -width ${handle_width} ${handle_height} ${handle_height} $tempdir/ellipse_2.mnc -clob + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_2} ${main_dim_2} ${main_dim_2} $tempdir/ellipse_3.mnc -clob + + mincmath -max $tempdir/ellipse_1.mnc $tempdir/ellipse_2.mnc data/object_0_$i.mnc -clob + itk_morph --threshold 1 --exp 'D[2]' data/object_0_$i.mnc $tempdir/mask_0_$i.mnc --clob + mincresample -nearest -like data/object_0_$i.mnc $tempdir/mask_0_$i.mnc data/mask_0_$i.mnc + + mincmath -max $tempdir/ellipse_3.mnc $tempdir/ellipse_2.mnc data/object_1_$i.mnc -clob + itk_morph --threshold 1 --exp 'D[2]' data/object_1_$i.mnc $tempdir/mask_1_$i.mnc --clob + mincresample -nearest -like data/object_1_$i.mnc $tempdir/mask_1_$i.mnc data/mask_1_$i.mnc + + echo data/object_0_$i.mnc,data/mask_0_$i.mnc,1.0,1.0,0,$i >> subjects.lst + echo data/object_1_$i.mnc,data/mask_1_$i.mnc,1.0,1.0,1,$i >> subjects.lst + +done + +cut -d , -f 1,2 subjects.lst > subjects_cut.lst diff --git a/examples/synthetic_tests/test_lng_model/prepare_test_data_ldd.sh b/examples/synthetic_tests/test_lng_model/prepare_test_data_ldd.sh new file mode 100755 index 0000000..c2ad94b --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/prepare_test_data_ldd.sh @@ -0,0 +1,36 @@ +#! /bin/sh + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 100 100 100 -step 1 1 1 -start -50 -50 -50" + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 +out=data_ldd +mkdir -p $out + +rm -f subjects_ldd.lst + +for i in $(seq 0 8);do + + main_dim_1=50 + main_dim_2=30 + pos=$(echo "40-${i}*2"|bc -l) + + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_1} ${main_dim_1} ${main_dim_1} $tempdir/ellipse_1.mnc -clob + make_phantom $object_opts -ellipse -center $pos 0 0 -width ${main_dim_2} ${main_dim_2} ${main_dim_2} $tempdir/ellipse_2.mnc -clob + + minccalc -express 'clamp(A[0]-A[1],0,100)' $tempdir/ellipse_1.mnc $tempdir/ellipse_2.mnc $tempdir/object_0_$i.mnc -clob + + itk_morph --threshold 50 $tempdir/object_0_$i.mnc $tempdir/object_0_${i}_b.mnc + itk_distance --signed $tempdir/object_0_${i}_b.mnc $tempdir/object_0_${i}_bd.mnc + mincresample -nearest -like $tempdir/object_0_$i.mnc $tempdir/object_0_${i}_bd.mnc $tempdir/object_0_${i}_bd_.mnc -clob + minccalc -express '(A[0]<0?sin(A[0]*3.14/3)*10:exp(-A[0]/4))*10+A[1]+1.0' $tempdir/object_0_${i}_bd_.mnc $tempdir/object_0_$i.mnc ${out}/object_0_${i}.mnc + + + itk_morph --exp 'D[3]' $tempdir/object_0_${i}_b.mnc $tempdir/mask_0_$i.mnc --clob + mincresample -nearest -like ${out}/object_0_$i.mnc $tempdir/mask_0_$i.mnc ${out}/mask_0_$i.mnc -clob + + echo ${out}/object_0_$i.mnc,${out}/mask_0_$i.mnc,1.0,1.0,$i >> subjects_ldd.lst + #exit +done + +cut -d , -f 1,2 subjects_ldd.lst > subjects_ldd_cut.lst diff --git a/examples/synthetic_tests/test_lng_model/prepare_test_data_lin.sh b/examples/synthetic_tests/test_lng_model/prepare_test_data_lin.sh new file mode 100755 index 0000000..65920c8 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/prepare_test_data_lin.sh @@ -0,0 +1,41 @@ +#! /bin/sh + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 80 80 80 -step 1 1 1 -start -40 -40 -40" + +# make growing "object" +# 1st group: body growth 4% each step, hand growth 8% +# 2nd group: body growth 8% each step, hand growth 8% + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 +out=data_lin +mkdir -p data_lin + +rm -f subjects.lst + +for i in $(seq 0 8);do + + main_dim_1=$(echo "10+${i}"|bc -l) + main_dim_2=$(echo "10+${i}/2"|bc -l) + + handle_width=$(echo "20+${i}*2"|bc -l) + handle_height=$(echo "5+${i}"|bc -l) + + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_1} ${main_dim_1} ${main_dim_1} $tempdir/ellipse_1.mnc -clob + make_phantom $object_opts -ellipse -center 10 0 0 -width ${handle_width} ${handle_height} ${handle_height} $tempdir/ellipse_2.mnc -clob + make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_2} ${main_dim_2} ${main_dim_2} $tempdir/ellipse_3.mnc -clob + + mincmath -max $tempdir/ellipse_1.mnc $tempdir/ellipse_2.mnc ${out}/object_0_$i.mnc -clob + itk_morph --threshold 1 --exp 'D[2]' ${out}/object_0_$i.mnc $tempdir/mask_0_$i.mnc --clob + mincresample -nearest -like ${out}/object_0_$i.mnc $tempdir/mask_0_$i.mnc ${out}/mask_0_$i.mnc + + mincmath -max $tempdir/ellipse_3.mnc $tempdir/ellipse_2.mnc ${out}/object_1_$i.mnc -clob + itk_morph --threshold 1 --exp 'D[2]' ${out}/object_1_$i.mnc $tempdir/mask_1_$i.mnc --clob + mincresample -nearest -like ${out}/object_1_$i.mnc $tempdir/mask_1_$i.mnc ${out}/mask_1_$i.mnc + + echo ${out}/object_0_$i.mnc,${out}/mask_0_$i.mnc,1.0,1.0,0,$i >> subjects_lin.lst + echo ${out}/object_1_$i.mnc,${out}/mask_1_$i.mnc,1.0,1.0,1,$i >> subjects_lin.lst + +done + +cut -d , -f 1,2 subjects_lin.lst > subjects_lin_cut.lst diff --git a/examples/synthetic_tests/test_lng_model/prepare_test_data_rot.sh b/examples/synthetic_tests/test_lng_model/prepare_test_data_rot.sh new file mode 100755 index 0000000..9ee47dc --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/prepare_test_data_rot.sh @@ -0,0 +1,45 @@ +#! /bin/sh + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 80 80 80 -step 1 1 1 -start -40 -40 -40" + +# make growing "object" +# 1st group: body growth 4% each step, hand growth 8% +# 2nd group: body growth 8% each step, hand growth 8% + +tempdir=`mktemp -t testXXXX -d` +trap "rm -rf $tempdir" 0 1 2 15 +out=data_rot +mkdir -p data_rot + +rm -f subjects_rot.lst + +main_dim_1=10 +main_dim_2=10 + +handle_width=20 +handle_height=5 + +make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_1} ${main_dim_1} ${main_dim_1} $tempdir/ellipse_1.mnc -clob +make_phantom $object_opts -ellipse -center 10 0 0 -width ${handle_width} ${handle_height} ${handle_height} $tempdir/ellipse_2.mnc -clob +make_phantom $object_opts -ellipse -center 0 0 0 -width ${main_dim_2} ${main_dim_2} ${main_dim_2} $tempdir/ellipse_3.mnc -clob + +mincmath -max $tempdir/ellipse_1.mnc $tempdir/ellipse_2.mnc $tempdir/object_0_.mnc -clob +minccalc -express 'A[0]+1.0' $tempdir/object_0_.mnc $tempdir/object_0.mnc -clob +itk_morph --threshold 4 --exp 'D[2]' $tempdir/object_0.mnc $tempdir/mask_0.mnc --clob + +mincmath -max $tempdir/ellipse_3.mnc $tempdir/ellipse_2.mnc $tempdir/object_1.mnc -clob +itk_morph --threshold 4 --exp 'D[2]' $tempdir/object_1.mnc $tempdir/mask_1.mnc --clob + + +for i in $(seq 0 8);do + + param2xfm -rotations 0 0 $(($i*10-40)) $tempdir/rot_$i.xfm + + itk_resample --transform $tempdir/rot_$i.xfm $tempdir/object_0.mnc ${out}/object_0_$i.mnc --clob + itk_resample --transform $tempdir/rot_$i.xfm $tempdir/mask_0.mnc ${out}/mask_0_$i.mnc --byte --labels --like ${out}/object_0_$i.mnc --clob + + echo ${out}/object_0_$i.mnc,${out}/mask_0_$i.mnc,1.0,1.0,$(($i-4)) >> subjects_rot.lst + +done + +cut -d , -f 1,2 subjects_rot.lst > subjects_rot_cut.lst diff --git a/examples/synthetic_tests/test_lng_model/run_all_ldd.sh b/examples/synthetic_tests/test_lng_model/run_all_ldd.sh new file mode 100755 index 0000000..8e8119c --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/run_all_ldd.sh @@ -0,0 +1,6 @@ +#! /bin/sh + +python -m scoop -n 4 test_lng_model_LCC_ldd.py +python -m scoop -n 4 test_model_std.py +python -m scoop -n 4 test_model_SSD_ldd.py +python -m scoop -n 4 test_lng_model_LCC_ldd.py diff --git a/examples/synthetic_tests/test_lng_model/scoop_test_ldd.py b/examples/synthetic_tests/test_lng_model/scoop_test_ldd.py new file mode 100644 index 0000000..50a1082 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/scoop_test_ldd.py @@ -0,0 +1,19 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + gm.generate_ldd_model_csv('subjects_cut.lst', + work_prefix='tmp_ldd', + options={'symmetric':False, + 'refine':True, + 'protocol': [{'iter':4,'level':8}, + {'iter':4,'level':4}, + ], + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:20,16:20,8:20,4:20,2:20,1:20 } } + } + + ) diff --git a/examples/synthetic_tests/test_lng_model/subjects_1.lst b/examples/synthetic_tests/test_lng_model/subjects_1.lst new file mode 100644 index 0000000..6586f68 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_1.lst @@ -0,0 +1,18 @@ +data/object_0_0.mnc,data/mask_0_0.mnc,1.0 +data/object_1_0.mnc,data/mask_1_0.mnc,1.0 +data/object_0_1.mnc,data/mask_0_1.mnc,1.0 +data/object_1_1.mnc,data/mask_1_1.mnc,1.0 +data/object_0_2.mnc,data/mask_0_2.mnc,1.0 +data/object_1_2.mnc,data/mask_1_2.mnc,1.0 +data/object_0_3.mnc,data/mask_0_3.mnc,1.0 +data/object_1_3.mnc,data/mask_1_3.mnc,1.0 +data/object_0_4.mnc,data/mask_0_4.mnc,1.0 +data/object_1_4.mnc,data/mask_1_4.mnc,1.0 +data/object_0_5.mnc,data/mask_0_5.mnc,1.0 +data/object_1_5.mnc,data/mask_1_5.mnc,1.0 +data/object_0_6.mnc,data/mask_0_6.mnc,1.0 +data/object_1_6.mnc,data/mask_1_6.mnc,1.0 +data/object_0_7.mnc,data/mask_0_7.mnc,1.0 +data/object_1_7.mnc,data/mask_1_7.mnc,1.0 +data/object_0_8.mnc,data/mask_0_8.mnc,1.0 +data/object_1_8.mnc,data/mask_1_8.mnc,1.0 diff --git a/examples/synthetic_tests/test_lng_model/subjects_ldd.lst b/examples/synthetic_tests/test_lng_model/subjects_ldd.lst new file mode 100644 index 0000000..a13c941 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_ldd.lst @@ -0,0 +1,9 @@ +data_ldd/object_0_0.mnc,data_ldd/mask_0_0.mnc,1.0,0 +data_ldd/object_0_1.mnc,data_ldd/mask_0_1.mnc,1.0,1 +data_ldd/object_0_2.mnc,data_ldd/mask_0_2.mnc,1.0,2 +data_ldd/object_0_3.mnc,data_ldd/mask_0_3.mnc,1.0,3 +data_ldd/object_0_4.mnc,data_ldd/mask_0_4.mnc,1.0,4 +data_ldd/object_0_5.mnc,data_ldd/mask_0_5.mnc,1.0,5 +data_ldd/object_0_6.mnc,data_ldd/mask_0_6.mnc,1.0,6 +data_ldd/object_0_7.mnc,data_ldd/mask_0_7.mnc,1.0,7 +data_ldd/object_0_8.mnc,data_ldd/mask_0_8.mnc,1.0,8 diff --git a/examples/synthetic_tests/test_lng_model/subjects_ldd_cut.lst b/examples/synthetic_tests/test_lng_model/subjects_ldd_cut.lst new file mode 100644 index 0000000..b83fdef --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_ldd_cut.lst @@ -0,0 +1,9 @@ +data_ldd/object_0_0.mnc,data_ldd/mask_0_0.mnc +data_ldd/object_0_1.mnc,data_ldd/mask_0_1.mnc +data_ldd/object_0_2.mnc,data_ldd/mask_0_2.mnc +data_ldd/object_0_3.mnc,data_ldd/mask_0_3.mnc +data_ldd/object_0_4.mnc,data_ldd/mask_0_4.mnc +data_ldd/object_0_5.mnc,data_ldd/mask_0_5.mnc +data_ldd/object_0_6.mnc,data_ldd/mask_0_6.mnc +data_ldd/object_0_7.mnc,data_ldd/mask_0_7.mnc +data_ldd/object_0_8.mnc,data_ldd/mask_0_8.mnc diff --git a/examples/synthetic_tests/test_lng_model/subjects_lin.lst b/examples/synthetic_tests/test_lng_model/subjects_lin.lst new file mode 100644 index 0000000..e9312c8 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_lin.lst @@ -0,0 +1,18 @@ +data_lin/object_0_0.mnc,data_lin/mask_0_0.mnc,1.0,1.0,0,0 +data_lin/object_1_0.mnc,data_lin/mask_1_0.mnc,1.0,1.0,1,0 +data_lin/object_0_1.mnc,data_lin/mask_0_1.mnc,1.0,1.0,0,1 +data_lin/object_1_1.mnc,data_lin/mask_1_1.mnc,1.0,1.0,1,1 +data_lin/object_0_2.mnc,data_lin/mask_0_2.mnc,1.0,1.0,0,2 +data_lin/object_1_2.mnc,data_lin/mask_1_2.mnc,1.0,1.0,1,2 +data_lin/object_0_3.mnc,data_lin/mask_0_3.mnc,1.0,1.0,0,3 +data_lin/object_1_3.mnc,data_lin/mask_1_3.mnc,1.0,1.0,1,3 +data_lin/object_0_4.mnc,data_lin/mask_0_4.mnc,1.0,1.0,0,4 +data_lin/object_1_4.mnc,data_lin/mask_1_4.mnc,1.0,1.0,1,4 +data_lin/object_0_5.mnc,data_lin/mask_0_5.mnc,1.0,1.0,0,5 +data_lin/object_1_5.mnc,data_lin/mask_1_5.mnc,1.0,1.0,1,5 +data_lin/object_0_6.mnc,data_lin/mask_0_6.mnc,1.0,1.0,0,6 +data_lin/object_1_6.mnc,data_lin/mask_1_6.mnc,1.0,1.0,1,6 +data_lin/object_0_7.mnc,data_lin/mask_0_7.mnc,1.0,1.0,0,7 +data_lin/object_1_7.mnc,data_lin/mask_1_7.mnc,1.0,1.0,1,7 +data_lin/object_0_8.mnc,data_lin/mask_0_8.mnc,1.0,1.0,0,8 +data_lin/object_1_8.mnc,data_lin/mask_1_8.mnc,1.0,1.0,1,8 diff --git a/examples/synthetic_tests/test_lng_model/subjects_lin_cut.lst b/examples/synthetic_tests/test_lng_model/subjects_lin_cut.lst new file mode 100644 index 0000000..eba2b93 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_lin_cut.lst @@ -0,0 +1,18 @@ +data_lin/object_0_0.mnc,data_lin/mask_0_0.mnc +data_lin/object_1_0.mnc,data_lin/mask_1_0.mnc +data_lin/object_0_1.mnc,data_lin/mask_0_1.mnc +data_lin/object_1_1.mnc,data_lin/mask_1_1.mnc +data_lin/object_0_2.mnc,data_lin/mask_0_2.mnc +data_lin/object_1_2.mnc,data_lin/mask_1_2.mnc +data_lin/object_0_3.mnc,data_lin/mask_0_3.mnc +data_lin/object_1_3.mnc,data_lin/mask_1_3.mnc +data_lin/object_0_4.mnc,data_lin/mask_0_4.mnc +data_lin/object_1_4.mnc,data_lin/mask_1_4.mnc +data_lin/object_0_5.mnc,data_lin/mask_0_5.mnc +data_lin/object_1_5.mnc,data_lin/mask_1_5.mnc +data_lin/object_0_6.mnc,data_lin/mask_0_6.mnc +data_lin/object_1_6.mnc,data_lin/mask_1_6.mnc +data_lin/object_0_7.mnc,data_lin/mask_0_7.mnc +data_lin/object_1_7.mnc,data_lin/mask_1_7.mnc +data_lin/object_0_8.mnc,data_lin/mask_0_8.mnc +data_lin/object_1_8.mnc,data_lin/mask_1_8.mnc diff --git a/examples/synthetic_tests/test_lng_model/subjects_nomask.lst b/examples/synthetic_tests/test_lng_model/subjects_nomask.lst new file mode 100644 index 0000000..841997e --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_nomask.lst @@ -0,0 +1,18 @@ +data/object_0_0.mnc,,1.0,1.0,0,0 +data/object_1_0.mnc,,1.0,1.0,1,0 +data/object_0_1.mnc,,1.0,1.0,0,1 +data/object_1_1.mnc,,1.0,1.0,1,1 +data/object_0_2.mnc,,1.0,1.0,0,2 +data/object_1_2.mnc,,1.0,1.0,1,2 +data/object_0_3.mnc,,1.0,1.0,0,3 +data/object_1_3.mnc,,1.0,1.0,1,3 +data/object_0_4.mnc,,1.0,1.0,0,4 +data/object_1_4.mnc,,1.0,1.0,1,4 +data/object_0_5.mnc,,1.0,1.0,0,5 +data/object_1_5.mnc,,1.0,1.0,1,5 +data/object_0_6.mnc,,1.0,1.0,0,6 +data/object_1_6.mnc,,1.0,1.0,1,6 +data/object_0_7.mnc,,1.0,1.0,0,7 +data/object_1_7.mnc,,1.0,1.0,1,7 +data/object_0_8.mnc,,1.0,1.0,0,8 +data/object_1_8.mnc,,1.0,1.0,1,8 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot.lst b/examples/synthetic_tests/test_lng_model/subjects_rot.lst new file mode 100644 index 0000000..7f433fb --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_0_0.mnc,1.0,1.0,-4 +data_rot/object_0_1.mnc,data_rot/mask_0_1.mnc,1.0,1.0,-3 +data_rot/object_0_2.mnc,data_rot/mask_0_2.mnc,1.0,1.0,-2 +data_rot/object_0_3.mnc,data_rot/mask_0_3.mnc,1.0,1.0,-1 +data_rot/object_0_4.mnc,data_rot/mask_0_4.mnc,1.0,1.0,0 +data_rot/object_0_5.mnc,data_rot/mask_0_5.mnc,1.0,1.0,1 +data_rot/object_0_6.mnc,data_rot/mask_0_6.mnc,1.0,1.0,2 +data_rot/object_0_7.mnc,data_rot/mask_0_7.mnc,1.0,1.0,3 +data_rot/object_0_8.mnc,data_rot/mask_0_8.mnc,1.0,1.0,4 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot_1.lst b/examples/synthetic_tests/test_lng_model/subjects_rot_1.lst new file mode 100644 index 0000000..ea7ff36 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot_1.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_0_0.mnc,1.0,1.0 +data_rot/object_0_1.mnc,data_rot/mask_0_1.mnc,1.0,1.0 +data_rot/object_0_2.mnc,data_rot/mask_0_2.mnc,1.0,1.0 +data_rot/object_0_3.mnc,data_rot/mask_0_3.mnc,1.0,1.0 +data_rot/object_0_4.mnc,data_rot/mask_0_4.mnc,1.0,1.0 +data_rot/object_0_5.mnc,data_rot/mask_0_5.mnc,1.0,1.0 +data_rot/object_0_6.mnc,data_rot/mask_0_6.mnc,1.0,1.0 +data_rot/object_0_7.mnc,data_rot/mask_0_7.mnc,1.0,1.0 +data_rot/object_0_8.mnc,data_rot/mask_0_8.mnc,1.0,1.0 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot_2.lst b/examples/synthetic_tests/test_lng_model/subjects_rot_2.lst new file mode 100644 index 0000000..1f4dc65 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot_2.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_0_0.mnc,1.0,-4 +data_rot/object_0_1.mnc,data_rot/mask_0_1.mnc,1.0,-3 +data_rot/object_0_2.mnc,data_rot/mask_0_2.mnc,1.0,-2 +data_rot/object_0_3.mnc,data_rot/mask_0_3.mnc,1.0,-1 +data_rot/object_0_4.mnc,data_rot/mask_0_4.mnc,1.0,0 +data_rot/object_0_5.mnc,data_rot/mask_0_5.mnc,1.0,1 +data_rot/object_0_6.mnc,data_rot/mask_0_6.mnc,1.0,2 +data_rot/object_0_7.mnc,data_rot/mask_0_7.mnc,1.0,3 +data_rot/object_0_8.mnc,data_rot/mask_0_8.mnc,1.0,4 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot_2a.lst b/examples/synthetic_tests/test_lng_model/subjects_rot_2a.lst new file mode 100644 index 0000000..5a9bb24 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot_2a.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_all.mnc,1.0,-4 +data_rot/object_0_1.mnc,data_rot/mask_all.mnc,1.0,-3 +data_rot/object_0_2.mnc,data_rot/mask_all.mnc,1.0,-2 +data_rot/object_0_3.mnc,data_rot/mask_all.mnc,1.0,-1 +data_rot/object_0_4.mnc,data_rot/mask_all.mnc,1.0,0 +data_rot/object_0_5.mnc,data_rot/mask_all.mnc,1.0,1 +data_rot/object_0_6.mnc,data_rot/mask_all.mnc,1.0,2 +data_rot/object_0_7.mnc,data_rot/mask_all.mnc,1.0,3 +data_rot/object_0_8.mnc,data_rot/mask_all.mnc,1.0,4 diff --git a/examples/synthetic_tests/test_lng_model/subjects_rot_cut.lst b/examples/synthetic_tests/test_lng_model/subjects_rot_cut.lst new file mode 100644 index 0000000..e05215d --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/subjects_rot_cut.lst @@ -0,0 +1,9 @@ +data_rot/object_0_0.mnc,data_rot/mask_0_0.mnc +data_rot/object_0_1.mnc,data_rot/mask_0_1.mnc +data_rot/object_0_2.mnc,data_rot/mask_0_2.mnc +data_rot/object_0_3.mnc,data_rot/mask_0_3.mnc +data_rot/object_0_4.mnc,data_rot/mask_0_4.mnc +data_rot/object_0_5.mnc,data_rot/mask_0_5.mnc +data_rot/object_0_6.mnc,data_rot/mask_0_6.mnc +data_rot/object_0_7.mnc,data_rot/mask_0_7.mnc +data_rot/object_0_8.mnc,data_rot/mask_0_8.mnc diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model.py b/examples/synthetic_tests/test_lng_model/test_lng_model.py new file mode 100644 index 0000000..49f07c3 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model.py @@ -0,0 +1,36 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + + gm.regress_ldd_csv( + 'subjects_fix_vel.lst', + work_prefix='tmp_regress_nr_b0', + options={ + 'protocol': [ + {'iter':16, 'level':8, 'blur_int': None, 'blur_vel': 4 }, + #{'iter':4, 'level':4, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 4.0 }, + + 'start_level':8, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': True, + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=3, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC.py new file mode 100644 index 0000000..4bfe248 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC.py @@ -0,0 +1,39 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + + gm.regress_ldd_csv( + 'subjects.lst', + work_prefix='tmp_regress_LCC_nr_nd_2', + options={ + 'protocol': [ + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': None }, + {'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 2.0, + 'LCC':True }, + + 'start_level': 8, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': False, + 'qc': True, + 'incremental': False + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_1.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_1.py new file mode 100644 index 0000000..7abcde8 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_1.py @@ -0,0 +1,39 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + + gm.regress_ldd_csv( + 'subjects_1.lst', + work_prefix='tmp_regress_LCC_1', + options={ + 'protocol': [ + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': None }, + {'iter':4, 'level':2, 'blur_int': None, 'blur_vel': None }, + {'iter':4, 'level':1, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':False, + 'max_step': 2.0, + 'LCC':True }, + + 'start_level': 8, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': False, + 'qc': True, + 'incremental': False + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_ldd.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_ldd.py new file mode 100644 index 0000000..b5e570f --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_ldd.py @@ -0,0 +1,40 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.regress_ldd_csv( + 'subjects_ldd.lst', + work_prefix='tmp_regress_LCC_ldd_sym2', + options={ + 'protocol': [ + #{'iter':10, 'level':8, 'blur_int': None, 'blur_vel': None }, + {'iter':10, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:200, 16:200, 8:200, 4:200, 2:40 }, + 'LCC':True }, + + 'start_level': 8, + 'refine': True, + 'cleanup':False, + 'debug': True, + 'debias': False, + 'qc': True, + 'incremental': True, + 'remove0':True, + 'sym':True, + }, + #regress_model=['data/object_0_4.mnc'], + model='data_ldd/object_0_4.mnc', + mask='data_ldd/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_lin.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_lin.py new file mode 100644 index 0000000..ce6534e --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_lin.py @@ -0,0 +1,40 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm +import os + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.regress_ldd_csv( + 'subjects_lin.lst', + work_prefix='tmp_regress_LCC_lin_4mm', + options={ + 'protocol': [ + {'iter':20, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 2.0, + 'LCC':True }, + + 'start_level': 8, + 'refine': True, + 'cleanup': False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': False + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot.py new file mode 100644 index 0000000..42f8775 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot.py @@ -0,0 +1,38 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.regress_ldd_csv( + 'subjects_rot.lst', + work_prefix='tmp_regress_LCC_rot_inc', + options={ + 'protocol': [ + {'iter':2, 'level':8, 'blur_int': None, 'blur_vel': None }, + {'iter':2, 'level':4, 'blur_int': None, 'blur_vel': None }, + {'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'LCC':True }, + + 'start_level': 8, + 'refine': True, + 'cleanup': False, + 'debug': True, + 'debias': False, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_rot/object_0_4.mnc', + mask='data_rot/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot_m.py b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot_m.py new file mode 100644 index 0000000..837d202 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_LCC_rot_m.py @@ -0,0 +1,38 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.regress_ldd_csv( + 'subjects_rot_2.lst', + work_prefix='tmp_regress_LCC_rot_inc_2ba_std', + options={ + 'protocol': [ + {'iter':16, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':2, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'LCC':False }, + + 'start_level': 4, + 'refine': True, + 'cleanup': False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_rot/object_0_4.mnc', + mask='data_rot/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_lim.py b/examples/synthetic_tests/test_lng_model/test_lng_model_lim.py new file mode 100644 index 0000000..9e060da --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_lim.py @@ -0,0 +1,32 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + gm.regress_ldd_csv('subjects_lim.lst', + work_prefix='tmp_regress_lim_nr_v2', + options={ + 'protocol': [ + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': 4 }, + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': 2 }, + {'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + {'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40, 1:40 }, + 'hist_match':True, + 'max_step': 4.0 }, + 'start_level':16, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': True, + 'qc': True, + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_lim_nd.py b/examples/synthetic_tests/test_lng_model/test_lng_model_lim_nd.py new file mode 100644 index 0000000..45bf85c --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_lim_nd.py @@ -0,0 +1,33 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + gm.regress_ldd_csv('subjects_lim.lst', + work_prefix='tmp_regress_lim_nr_nd', + options={ + 'protocol': [ + {'iter':8, 'level':4 }, + #{'iter':4, 'level':4 }, + #{'iter':4, 'level':2 }, + ], + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 4.0 }, + 'start_level':16, + 'refine': False, + 'blur_int_model': None, + 'blur_vel_model': 4, + 'cleanup': False, + 'debug': True, + 'debias': True, + 'qc': True, + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_nomask.py b/examples/synthetic_tests/test_lng_model/test_lng_model_nomask.py new file mode 100644 index 0000000..b7566fa --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_nomask.py @@ -0,0 +1,26 @@ +from scoop import futures, shared + +import iplScoopGenerateModel as gm + +if __name__ == '__main__': + # setup data for parallel processing + gm.regress_ldd_csv('subjects_nomask.lst', + work_prefix='tmp_regress_nomask', + options={ + 'protocol': [ + {'iter':4, 'level':8 }, + {'iter':4, 'level':4 }, + #{'iter':4, 'level':2 }, + ], + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 32:40, 16:40, 8:40, 4:40, 2:40 }, + 'hist_match':True, + 'max_step': 4.0 }, + 'start_level':16, + 'refine':False, + }, + regress_model=['data/object_0_4.mnc'], + mask=None, + int_par_count=1, + ) diff --git a/examples/synthetic_tests/test_lng_model/test_lng_model_std.py b/examples/synthetic_tests/test_lng_model/test_lng_model_std.py new file mode 100644 index 0000000..73d9c0e --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_lng_model_std.py @@ -0,0 +1,52 @@ +from iplMincTools import mincTools,mincError +import traceback +import os + +from scoop import futures, shared + +import iplScoopGenerateModel as gm +# setup data for parallel processing +# have to be at global level + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + try: + + res=gm.regress_csv( + 'subjects.lst', + work_prefix='tmp_regress_std_dd_nr_nd', + options={ + 'protocol': [ + {'iter':4, 'level':4, 'blur_int': None, 'blur_def': None }, + {'iter':4, 'level':2, 'blur_int': None, 'blur_def': None }, + {'iter':4, 'level':1, 'blur_int': None, 'blur_def': None }, + ], + 'start_level':8, + 'refine': False, + 'cleanup': False, + 'debug': True, + 'debias': False, + 'qc': True, + 'nl_mode': 'dd', + }, + #regress_model=['data/object_0_4.mnc'], + model='data/object_0_4.mnc', + mask='data/mask_0_4.mnc', + int_par_count=1, + ) + + # + + + except mincError as e: + print "Exception in regress_csv:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in regress_csv:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/examples/synthetic_tests/test_lng_model/test_model_LCC_ldd.py b/examples/synthetic_tests/test_lng_model/test_model_LCC_ldd.py new file mode 100644 index 0000000..760a8fe --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_model_LCC_ldd.py @@ -0,0 +1,38 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.generate_ldd_model_csv( + 'subjects_ldd_cut.lst', + work_prefix='tmp_avg_LCC_ldd', + options={ + 'protocol': [ + {'iter':4, 'level':8, 'blur_int': None, 'blur_vel': None }, + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 8:100, 4:100, 2:40 }, + 'LCC':True }, + + 'start_level': 8, + 'refine': True, + 'cleanup':False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_ldd/object_0_0.mnc', + mask='data_ldd/mask_0_0.mnc', + ) diff --git a/examples/synthetic_tests/test_lng_model/test_model_SSD_ldd.py b/examples/synthetic_tests/test_lng_model/test_model_SSD_ldd.py new file mode 100644 index 0000000..2dc0d31 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_model_SSD_ldd.py @@ -0,0 +1,38 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.generate_ldd_model_csv( + 'subjects_ldd_cut.lst', + work_prefix='tmp_avg_SSD_ldd', + options={ + 'protocol': [ + {'iter':4, 'level':8, 'blur_int': None, 'blur_vel': None }, + {'iter':4, 'level':4, 'blur_int': None, 'blur_vel': None }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'parameters': {'smooth_update':2, + 'smooth_field':2, + 'conf': { 8:200, 4:200, 2:40 }, + 'LCC':False }, + + 'start_level': 8, + 'refine': True, + 'cleanup':False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_ldd/object_0_0.mnc', + mask='data_ldd/mask_0_0.mnc', + ) diff --git a/examples/synthetic_tests/test_lng_model/test_model_std.py b/examples/synthetic_tests/test_lng_model/test_model_std.py new file mode 100644 index 0000000..d6ad547 --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_model_std.py @@ -0,0 +1,33 @@ +from scoop import futures, shared +import os +import iplScoopGenerateModel as gm + +os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS']='1' + +if __name__ == '__main__': + # setup data for parallel processing + + gm.generate_nonlinear_model_csv( + 'subjects_ldd_cut.lst', + work_prefix='tmp_avg_std', + options={ + 'protocol': [ + {'iter':4, 'level':8, }, + {'iter':4, 'level':4, }, + #{'iter':16, 'level':2, 'blur_int': None, 'blur_vel': None }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 2 }, + #{'iter':4, 'level':2, 'blur_int': None, 'blur_vel': 1 }, + ], + + 'start_level': 8, + 'refine': True, + 'cleanup':False, + 'debug': True, + 'debias': True, + 'qc': True, + 'incremental': True + }, + #regress_model=['data/object_0_4.mnc'], + model='data_ldd/object_0_0.mnc', + mask='data_ldd/mask_0_0.mnc', + ) diff --git a/examples/synthetic_tests/test_lng_model/test_regression.py b/examples/synthetic_tests/test_lng_model/test_regression.py new file mode 100644 index 0000000..70df80a --- /dev/null +++ b/examples/synthetic_tests/test_lng_model/test_regression.py @@ -0,0 +1,104 @@ +#! /usr/bin/env python + +import minc +import sys +import os +import pyezminc +import numpy as np + +from sklearn import linear_model + +if __name__ == "__main__": + + inp=pyezminc.parallel_input_iterator() + out=pyezminc.parallel_output_iterator() + + + design_matrix=np.array( [ [ 1, -0.5,-4], + [ 1, 0.5 ,-4], + [ 1, -0.5,-3], + [ 1, 0.5 ,-3], + [ 1, -0.5,-2], + [ 1, 0.5 ,-2], + [ 1, -0.5,-1], + [ 1, 0.5 ,-1], + [ 1, -0.5, 0], + [ 1, 0.5 , 0], + [ 1, -0.5, 1], + [ 1, 0.5 , 1], + [ 1, -0.5, 2], + [ 1, 0.5 , 2], + [ 1, -0.5, 3], + [ 1, 0.5 , 3], + [ 1, -0.5, 4], + [ 1, 0.5 , 4]] ) + + inp.open([ 'tmp_regress/8/object_0_0.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_1.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_2.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_3.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_4.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_5.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_6.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_7.mnc.008_vel.mnc', + 'tmp_regress/8/object_0_8.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_0.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_1.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_2.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_3.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_4.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_5.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_6.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_7.mnc.008_vel.mnc', + 'tmp_regress/8/object_1_8.mnc.008_vel.mnc', + ]) + + out.open(["tmp_regress/fit_{}.mnc".format(i) for i in range(design_matrix.shape[1])], + 'tmp_regress/8/object_0_0.mnc.008_vel.mnc' ) + + out_error=pyezminc.output_iterator_real(None) + out_error.open("tmp_regress/fit_error.mnc",reference_file="tmp_regress/8/object_1_8.mnc.008.mnc") + + inp.begin() + out.begin() + out_error.begin() + + # allocate sum + v1=np.zeros(shape=[design_matrix.shape[0]], dtype=np.float64, order='C') + v2=np.zeros(shape=[design_matrix.shape[0]], dtype=np.float64, order='C') + v3=np.zeros(shape=[design_matrix.shape[0]], dtype=np.float64, order='C') + + # allocate work space + qqq=np.empty_like(v1) + + clf=linear_model.LinearRegression(fit_intercept=False) + + while not inp.last(): + # assume that we are dealing with 3D vectors + # TODO: add check somewhere to make sure it is the case + v1=inp.value(v1);inp.next() + v2=inp.value(v2);inp.next() + v3=inp.value(v3) + + # put things together + y=np.column_stack((v1,v2,v3)) + x=design_matrix + + clf.fit(x,y) + + out.value(np.ravel(clf.coef_[0,:]));out.next() + out.value(np.ravel(clf.coef_[1,:]));out.next() + out.value(np.ravel(clf.coef_[2,:]));out.next() + + out_error.value(clf.score(x,y));out_error.next() + + inp.next() + + print out.progress() + print inp.progress() + print out_error.progress() + + del inp + del out + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on;hl python diff --git a/examples/synthetic_tests/test_model_creation/.gitignore b/examples/synthetic_tests/test_model_creation/.gitignore new file mode 100644 index 0000000..ceff4c0 --- /dev/null +++ b/examples/synthetic_tests/test_model_creation/.gitignore @@ -0,0 +1,7 @@ +*.pyc +*.mnc +tmp +tmp_nl +tmp_sym +tmp_nl_sym +tmp_* \ No newline at end of file diff --git a/examples/synthetic_tests/test_model_creation/big_subjects.lst b/examples/synthetic_tests/test_model_creation/big_subjects.lst new file mode 100644 index 0000000..53af808 --- /dev/null +++ b/examples/synthetic_tests/test_model_creation/big_subjects.lst @@ -0,0 +1,9 @@ +test_data/big_ellipse_1.mnc,test_data/big_mask.mnc +test_data/big_ellipse_2.mnc,test_data/big_mask.mnc +test_data/big_ellipse_3.mnc,test_data/big_mask.mnc +test_data/big_ellipse_4.mnc,test_data/big_mask.mnc +test_data/big_ellipse_5.mnc,test_data/big_mask.mnc +test_data/big_ellipse_6.mnc,test_data/big_mask.mnc +test_data/big_ellipse_7.mnc,test_data/big_mask.mnc +test_data/big_ellipse_8.mnc,test_data/big_mask.mnc +test_data/big_ellipse_9.mnc,test_data/big_mask.mnc diff --git a/examples/synthetic_tests/test_model_creation/prepare_test_data.sh b/examples/synthetic_tests/test_model_creation/prepare_test_data.sh new file mode 100755 index 0000000..cadeb68 --- /dev/null +++ b/examples/synthetic_tests/test_model_creation/prepare_test_data.sh @@ -0,0 +1,37 @@ +#! /bin/sh +mkdir -p test_data + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 50 50 50 -step 2 2 2 -start -50 -50 -50" +mask_opts="-byte -real_range 0 1 -background 0 -edge_value 1 -fill_value 1 -no_partial -nelements 50 50 50 -step 2 2 2 -start -50 -50 -50" + +# make bunch of ellipses +make_phantom $object_opts -ellipse -center -10 0 0 -width 20 10 10 test_data/ellipse_1.mnc +make_phantom $object_opts -ellipse -center 0 0 0 -width 20 10 10 test_data/ellipse_2.mnc +make_phantom $object_opts -ellipse -center 10 0 0 -width 20 10 10 test_data/ellipse_3.mnc + +make_phantom $object_opts -ellipse -center 0 -10 0 -width 10 20 10 test_data/ellipse_4.mnc +make_phantom $object_opts -ellipse -center 0 0 0 -width 10 20 10 test_data/ellipse_5.mnc +make_phantom $object_opts -ellipse -center 0 10 0 -width 10 20 10 test_data/ellipse_6.mnc + +make_phantom $object_opts -ellipse -center 0 0 -10 -width 10 10 20 test_data/ellipse_7.mnc +make_phantom $object_opts -ellipse -center 0 0 0 -width 10 10 20 test_data/ellipse_8.mnc +make_phantom $object_opts -ellipse -center 0 0 10 -width 10 10 20 test_data/ellipse_9.mnc + +# make mask +make_phantom $mask_opts -rectangle -center 0 0 0 -width 50 50 50 test_data/mask.mnc + + +# make reference +make_phantom $mask_opts -ellipse -center 0 0 0 -width 15 15 15 test_data/ref.mnc + +cat - >subjects.lst <big_subjects.lst <>>> xfmavg tmp_lsq6_downsample/4/ellipse_1.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_2.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_3.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_4.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_5.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_6.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_7.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_8.mnc_corr.004.xfm tmp_lsq6_downsample/4/ellipse_9.mnc_corr.004.xfm test_std.xfm + +Transform_Type = Linear; +Linear_Transform = + 9.99990658e-01 1.41336445e-03 -4.08492676e-03 3.23595092e-01 + -1.47889049e-03 9.99869585e-01 -1.60819040e-02 -3.64787875e-01 + 4.06166498e-03 1.60877979e-02 9.99862333e-01 -5.46621577e-01; diff --git a/examples/synthetic_tests/test_registration/.gitignore b/examples/synthetic_tests/test_registration/.gitignore new file mode 100644 index 0000000..49c63a4 --- /dev/null +++ b/examples/synthetic_tests/test_registration/.gitignore @@ -0,0 +1,5 @@ +# ignore all automatically generated files and outputs +*.xfm +*.mnc +*.txt +*.log diff --git a/examples/synthetic_tests/test_registration/prepare_test_data.sh b/examples/synthetic_tests/test_registration/prepare_test_data.sh new file mode 100755 index 0000000..46478a2 --- /dev/null +++ b/examples/synthetic_tests/test_registration/prepare_test_data.sh @@ -0,0 +1,29 @@ +#! /bin/sh + + +#tempdir=`mktemp -t test -d` +#trap "rm -rf $tempdir" 0 1 2 15 +tempdir=data +mkdir -p $tempdir + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 97 101 103 -step 4 4 4 -start -200 -200 -200" + +object_opts2="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 97 101 103 -step 1 1 1 -start -50 -50 -50" + + +# make bunch of ellipses +make_phantom $object_opts -ellipse -center 0 0 0 -width 150 150 150 $tempdir/ellipse_0.mnc +make_phantom $object_opts -ellipse -center 0 20 0 -width 100 150 100 $tempdir/ellipse_1.mnc +make_phantom $object_opts -ellipse -center 0 -20 0 -width 100 150 100 $tempdir/ellipse_2.mnc + +for i in $(seq 0 2);do + fast_blur --fwhm 8 $tempdir/ellipse_$i.mnc $tempdir/ellipse_${i}_blur.mnc +done + +make_phantom $object_opts2 -ellipse -center 0 0 0 -width 37 37 37 $tempdir/ellipse_0_.mnc +make_phantom $object_opts2 -ellipse -center 0 5 0 -width 25 37 25 $tempdir/ellipse_1_.mnc +make_phantom $object_opts2 -ellipse -center 0 -5 0 -width 25 37 25 $tempdir/ellipse_2_.mnc + +for i in $(seq 0 2);do + fast_blur --fwhm 4 $tempdir/ellipse_${i}_.mnc $tempdir/ellipse_${i}_blur_.mnc +done diff --git a/examples/synthetic_tests/test_registration/test_ants.sh b/examples/synthetic_tests/test_registration/test_ants.sh new file mode 100755 index 0000000..69a86a9 --- /dev/null +++ b/examples/synthetic_tests/test_registration/test_ants.sh @@ -0,0 +1,22 @@ +#! /bin/sh + +in1=data/ellipse_0_blur.mnc +in2=data/ellipse_2_blur.mnc + +# run Exponential +antsRegistration --collapse-output-transforms 0 -d 3 \ + --float 0 --verbose 1 --minc 1 \ + -c '[1000x1000x1000,1e-7,100]' \ + --transform 'Exponential[0.2,1.0,1.0]' \ + -m "CC[$in1,$in2,1.0,4,Regular,0.1]" \ + -s 8x4x2 -f 8x4x2 \ + -o "[test_exp_,test_exp_in1.mnc,test_exp_in2.mnc]" + +# run SyN +antsRegistration --collapse-output-transforms 0 -d 3 \ + --float 0 --verbose 1 --minc 1 \ + -c '[1000x1000x1000,1e-7,100]' \ + --transform 'SyN[0.2,1.0,1.0]' \ + -m "CC[$in1,$in2,1.0,4,Regular,0.1]" \ + -s 8x4x2 -f 8x4x2 \ + -o "[test_syn_,test_syn_in1.mnc,test_syn_in2.mnc]" \ No newline at end of file diff --git a/examples/synthetic_tests/test_registration/test_ldd_reg.py b/examples/synthetic_tests/test_registration/test_ldd_reg.py new file mode 100755 index 0000000..58c77c7 --- /dev/null +++ b/examples/synthetic_tests/test_registration/test_ldd_reg.py @@ -0,0 +1,47 @@ +#! /usr/bin/env python + + +import shutil +import os +import sys +import csv +import traceback +import argparse +import json +import tempfile +import re +import copy +import random + +# MINC stuff +from iplMincTools import mincTools,mincError + +if __name__=='__main__': + with mincTools() as minc: + + + for s in range(0,8): + for g in range(0,8): + par={'conf':{}, + 'smooth_update':s, + 'smooth_field':g, + 'update_rule':1, + 'grad_type':0, + 'max_step':2.0 } + + xfm="test_{}_{}_ldd.xfm".format(s,g) + grid="test_{}_{}_ldd_grid_0.mnc".format(s,g) + grid_m="test_{}_{}_ldd_grid_m.mnc".format(s,g) + test_out="test_{}_{}_ldd_test.mnc".format(s,g) + test_qc="test_{}_{}_ldd_test.jpg".format(s,g) + + minc.non_linear_register_ldd( + "data/ellipse_0_blur.mnc","data/ellipse_1_blur.mnc","test_{}_{}_vel.mnc".format(s,g), + output_xfm=xfm, start=8,level=2,parameters=par) + + minc.grid_magnitude(grid,grid_m) + + minc.resample_smooth("data/ellipse_0_blur.mnc",test_out,transform=xfm) + + minc.qc("data/ellipse_1_blur.mnc",test_qc,mask=test_out,image_range=[0,100],mask_range=[0,100]) + \ No newline at end of file diff --git a/examples/synthetic_tests/test_registration/test_nl_reg.py b/examples/synthetic_tests/test_registration/test_nl_reg.py new file mode 100755 index 0000000..b70dee3 --- /dev/null +++ b/examples/synthetic_tests/test_registration/test_nl_reg.py @@ -0,0 +1,143 @@ +#! /usr/bin/env python + + +import shutil +import os +import sys +import csv +import traceback +import argparse +import json +import tempfile +import re +import copy +import random + +# MINC stuff +from iplMincTools import mincTools,mincError + +elx_par1=""" +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(ShowExactMetricValue "false") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "AdaptiveStochasticGradientDescent") +(Transform "BSplineTransform") +(Metric "AdvancedNormalizedCorrelation") + +(FinalGridSpacingInPhysicalUnits 32) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions 2) + +(ImagePyramidSchedule 8 8 8 4 4 4 ) + +(MaximumNumberOfIterations 200 200 200 ) +(MaximumNumberOfSamplingAttempts 3) + +(NumberOfSpatialSamples 1024 ) + +(NewSamplesEveryIteration "true") +(ImageSampler "Random" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 1) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +""" + + +elx_par2=""" +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(ShowExactMetricValue "false") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "AdaptiveStochasticGradientDescent") +(Transform "BSplineTransform") +(Metric "AdvancedNormalizedCorrelation") + +(FinalGridSpacingInPhysicalUnits 2) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions 2) + +(ImagePyramidSchedule 4 4 4 2 2 2 ) + +(MaximumNumberOfIterations 200 200 200 ) +(MaximumNumberOfSamplingAttempts 3) + +(NumberOfSpatialSamples 1024 ) + +(NewSamplesEveryIteration "true") +(ImageSampler "Random" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 1) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +""" + + +if __name__=='__main__': + with mincTools() as minc: + minc.register_elastix("data/ellipse_0_blur.mnc","data/ellipse_1_blur.mnc", + output_par="test_4mm_0_1_par.txt",output_xfm="test_4mm_0_1.xfm",parameters=elx_par1) + + minc.grid_magnitude("test_4mm_0_1_grid_0.mnc","test_4mm_0_1_grid_m.mnc") + + minc.register_elastix("data/ellipse_0_blur.mnc","data/ellipse_1_blur.mnc",output_par="test_4mm_0_1_2_par.txt", + output_xfm="test_4mm_0_1_2.xfm",parameters=elx_par1,init_xfm="test_4mm_0_1.xfm") + + minc.grid_magnitude("test_4mm_0_1_2_grid_0.mnc","test_4mm_0_1_2_grid_m.mnc") + + minc.register_elastix("data/ellipse_0_blur.mnc","data/ellipse_1_blur.mnc",output_par="test_4mm_0_1_3_par.txt", + output_xfm="test_4mm_0_1_3.xfm",parameters=elx_par1,init_par="test_4mm_0_1_par.txt") + + minc.grid_magnitude("test_4mm_0_1_3_grid_0.mnc","test_4mm_0_1_3_grid_m.mnc") + + minc.register_elastix("data/ellipse_0_blur_.mnc","data/ellipse_1_blur_.mnc",output_par="test_1mm_0_1_par.txt",output_xfm="test_1mm_0_1.xfm",parameters=elx_par1) + minc.grid_magnitude("test_1mm_0_1_grid_0.mnc","test_1mm_0_1_grid_m.mnc") \ No newline at end of file diff --git a/examples/synthetic_tests/test_segmentation/.gitignore b/examples/synthetic_tests/test_segmentation/.gitignore new file mode 100644 index 0000000..c30390d --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/.gitignore @@ -0,0 +1,6 @@ +# ignore all automatically generated files and outputs +*.xfm +*.mnc +*.txt +*.log +test_* \ No newline at end of file diff --git a/examples/synthetic_tests/test_segmentation/cv.json b/examples/synthetic_tests/test_segmentation/cv.json new file mode 100644 index 0000000..0eac201 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv.json @@ -0,0 +1,9 @@ +{ + "validation_library":"seg_subjects.lst", + "iterations":10, + "cv":2, + "fuse_variant":"fuse_1", + "ec_variant":"ec_xgb", + "cv_variant":"cv_xgb", + "regularize_variant":"reg_1" +} diff --git a/examples/synthetic_tests/test_segmentation/cv_ants.json b/examples/synthetic_tests/test_segmentation/cv_ants.json new file mode 100644 index 0000000..66d71d3 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv_ants.json @@ -0,0 +1,5 @@ +{ + "validation_library":"seg_subjects.lst", + "iterations":-1, + "cv":1 +} diff --git a/examples/synthetic_tests/test_segmentation/cv_ants2.json b/examples/synthetic_tests/test_segmentation/cv_ants2.json new file mode 100644 index 0000000..7879706 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv_ants2.json @@ -0,0 +1,9 @@ +{ + "validation_library":"seg_subjects.lst", + "iterations":-1, + "cv":1, + "fuse_variant":"fuse", + "ec_variant":"ec2", + "cv_variant":"cv2", + "regularize_variant":"gc" +} diff --git a/examples/synthetic_tests/test_segmentation/cv_nl.json b/examples/synthetic_tests/test_segmentation/cv_nl.json new file mode 100644 index 0000000..66d71d3 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv_nl.json @@ -0,0 +1,5 @@ +{ + "validation_library":"seg_subjects.lst", + "iterations":-1, + "cv":1 +} diff --git a/examples/synthetic_tests/test_segmentation/cv_triple.json b/examples/synthetic_tests/test_segmentation/cv_triple.json new file mode 100644 index 0000000..4ff5132 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/cv_triple.json @@ -0,0 +1,9 @@ +{ + "validation_library":"seg_subjects_triple.lst", + "iterations":10, + "cv":2, + "fuse_variant":"fuse_1", + "ec_variant":"ec_1", + "cv_variant":"cv_1", + "regularize_variant":"reg_1" +} diff --git a/examples/synthetic_tests/test_segmentation/dumb_segment.py b/examples/synthetic_tests/test_segmentation/dumb_segment.py new file mode 100755 index 0000000..6b1b039 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/dumb_segment.py @@ -0,0 +1,71 @@ +#! /usr/bin/env python + +# standard library +import string +import os +import argparse +import pickle +import cPickle +import sys +import json +import csv +# minc +import minc + +# numpy +import numpy as np + +def coords(string): + c=[float(i) for i in string.split(',')] + + if len(c)!=3 : + raise argparse.ArgumentTypeError('Expect three coordinates') + return c + +def parse_options(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Perform error-correction learning and application') + + parser.add_argument('--center',type=coords, + default=[0.0,0.0,0.0],action = 'store', + help="Center coordinate") + + parser.add_argument('input', + help='Input image') + + parser.add_argument('output', + help='Output image') + + options = parser.parse_args() + + return options + + +def dumb_segment(img, center): + + c=np.mgrid[ 0:img.shape[0] , + 0:img.shape[1] , + 0:img.shape[2] ] + + seg=np.zeros_like( img, dtype=np.int32 ) + + seg=( c[2]>center[0] )*1+\ + ( c[1]>center[1] )*2+\ + ( c[0]>center[2] )*4+ 1 + + seg[ img < 50 ] = 0 + + return np.asarray(seg,dtype=np.int32 ) + +if __name__ == "__main__": + options = parse_options() + print(repr(options)) + input = minc.Image(options.input) + #seg=np.zeros_like( input.data, dtype=np.int32 ) + center_vox=[(options.center[i]-input.start()[i])/input.spacing()[i] for i in xrange(3)] + print(repr(center_vox)) + seg=dumb_segment(input.data, center_vox) + + minc.Label( data=seg ).save(name=options.output, imitate=options.input) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/examples/synthetic_tests/test_segmentation/ec_train.json b/examples/synthetic_tests/test_segmentation/ec_train.json new file mode 100644 index 0000000..7eeb5a1 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/ec_train.json @@ -0,0 +1,14 @@ +{ + "method" : "xgb", + "method2" : "xgb", + + "border_mask": true, + "border_mask_width": 3, + "patch_size": 0, + "use_coord": false, + "use_joint": false, + "use_raw": true, + + "train_rounds": 3, + "train_cv": 2 +} diff --git a/examples/synthetic_tests/test_segmentation/ec_train_ants2.json b/examples/synthetic_tests/test_segmentation/ec_train_ants2.json new file mode 100644 index 0000000..d53f5a9 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/ec_train_ants2.json @@ -0,0 +1,20 @@ +{ + "method" : "AdaBoost", + "method_n" : 200, + "border_mask": true, + "border_mask_width": 2, + "use_coord": true, + "use_joint": true, + "patch_size": 1 , + "use_raw": true, + + "normalize_input": false, + "primary_features": 1, + "max_samples": -1, + "sample_pick": "first", + + "antialias_labels": false, + "blur_labels": 2, + "expit_labels": 2, + "normalize_labels": true +} diff --git a/examples/synthetic_tests/test_segmentation/ec_train_triple.json b/examples/synthetic_tests/test_segmentation/ec_train_triple.json new file mode 100644 index 0000000..be61d47 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/ec_train_triple.json @@ -0,0 +1,11 @@ +{ + "method" : "AdaBoost", + "method_n" : 100, + "border_mask": true, + "border_mask_width": 3, + "use_coord": true, + "use_joint": true, + + "train_rounds": 3, + "train_cv": 2 +} diff --git a/examples/synthetic_tests/test_segmentation/library_description.json b/examples/synthetic_tests/test_segmentation/library_description.json new file mode 100644 index 0000000..c6d3590 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/library_description.json @@ -0,0 +1,21 @@ +{ + "reference_model": "data/ellipse_0_blur.mnc", + "reference_mask": "data/ellipse_0_mask.mnc", + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"seg_subjects.lst", + "build_remap": [ [1, 1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8] ], + "build_flip_remap": null, + "parts": 0, + "classes": 9, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + "linear_register": false, + "local_linear_register": true, + "non_linear_register": false, + "resample_order": 2, + "resample_baa": true +} diff --git a/examples/synthetic_tests/test_segmentation/library_description_ants.json b/examples/synthetic_tests/test_segmentation/library_description_ants.json new file mode 100644 index 0000000..c730e0c --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/library_description_ants.json @@ -0,0 +1,22 @@ +{ + "reference_model": "data/ellipse_0_blur.mnc", + "reference_mask": "data/ellipse_0_mask.mnc", + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"seg_subjects.lst", + "build_remap": [ [1, 1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8] ], + "build_flip_remap": null, + "parts": 0, + "classes": 9, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + "initial_register": null, + "initial_local_register": null, + "non_linear_register": true, + "resample_order": 2, + "resample_baa": true, + "non_linear_register_ants": true +} diff --git a/examples/synthetic_tests/test_segmentation/library_description_nl.json b/examples/synthetic_tests/test_segmentation/library_description_nl.json new file mode 100644 index 0000000..182f9ff --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/library_description_nl.json @@ -0,0 +1,22 @@ +{ + "reference_model": "data/ellipse_0_blur.mnc", + "reference_mask": "data/ellipse_0_mask.mnc", + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"seg_subjects.lst", + "build_remap": [ [1, 1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8] ], + "build_flip_remap": null, + "parts": 0, + "classes": 9, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + "linear_register": false, + "local_linear_register": true, + "non_linear_register": false, + "resample_order": 2, + "resample_baa": true, + "non_linear_register_level": 4 +} diff --git a/examples/synthetic_tests/test_segmentation/library_description_triple.json b/examples/synthetic_tests/test_segmentation/library_description_triple.json new file mode 100644 index 0000000..cf1b169 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/library_description_triple.json @@ -0,0 +1,23 @@ +{ + "reference_model": "data/ellipse_0_blur.mnc", + "reference_mask": "data/ellipse_0_mask.mnc", + "reference_add": [ "data/ellipse_0_blur.mnc", "data/ellipse_0_blur.mnc"], + "reference_local_model" : null, + "reference_local_mask" : null, + "library":"seg_subjects_triple.lst", + "build_remap": [ [1, 1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8] ], + "build_flip_remap": null, + "parts": 0, + "classes": 9, + "build_symmetric": false, + "build_symmetric_flip": false, + "symmetric_lut": null, + "denoise": false, + "denoise_beta": null, + "linear_register": false, + "local_linear_register": true, + "non_linear_register": false, + "resample_order": 2, + "resample_baa": true, + "modalities" : 3 +} diff --git a/examples/synthetic_tests/test_segmentation/prepare_test_data.sh b/examples/synthetic_tests/test_segmentation/prepare_test_data.sh new file mode 100755 index 0000000..d307172 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/prepare_test_data.sh @@ -0,0 +1,50 @@ +#! /bin/sh + + +#tempdir=`mktemp -t test -d` +#trap "rm -rf $tempdir" 0 1 2 15 +tempdir=data +mkdir -p $tempdir + +object_opts="-short -real_range 0 100 -background 0 -edge_value 100 -fill_value 100 -nelements 97 101 103 -step 4 4 4 -start -200 -200 -200" + +seg_opts="-byte -real_range 0 1 -background 0 -edge_value 1 -fill_value 1 -no_partial -nelements 100 100 100 -step 4 4 4 -start -200 -200 -200" + + +make_phantom $object_opts -ellipse -center 0 0 0 -width 150 150 150 $tempdir/ellipse_0.mnc + +# make bunch of ellipses +make_phantom $object_opts -ellipse -center -10 0 0 -width 150 100 100 $tempdir/ellipse_1.mnc +make_phantom $object_opts -ellipse -center 10 0 0 -width 150 100 100 $tempdir/ellipse_2.mnc + +make_phantom $object_opts -ellipse -center 0 -10 0 -width 100 150 100 $tempdir/ellipse_3.mnc +make_phantom $object_opts -ellipse -center 0 10 0 -width 100 150 100 $tempdir/ellipse_4.mnc + +make_phantom $object_opts -ellipse -center 0 0 -10 -width 100 100 150 $tempdir/ellipse_5.mnc +make_phantom $object_opts -ellipse -center 0 0 10 -width 100 100 150 $tempdir/ellipse_6.mnc + + +for i in $(seq 0 6);do + fast_blur --fwhm 8 $tempdir/ellipse_$i.mnc $tempdir/ellipse_${i}_blur.mnc +done + +# make segmentations +./dumb_segment.py $tempdir/ellipse_0.mnc $tempdir/ellipse_0_seg.mnc --center 0,0,0 +./dumb_segment.py $tempdir/ellipse_1.mnc $tempdir/ellipse_1_seg.mnc --center " -10,0,0" +./dumb_segment.py $tempdir/ellipse_2.mnc $tempdir/ellipse_2_seg.mnc --center 10,0,0 +./dumb_segment.py $tempdir/ellipse_3.mnc $tempdir/ellipse_3_seg.mnc --center 0,-10,0 +./dumb_segment.py $tempdir/ellipse_4.mnc $tempdir/ellipse_4_seg.mnc --center 0,10,0 +./dumb_segment.py $tempdir/ellipse_5.mnc $tempdir/ellipse_5_seg.mnc --center 0,0,-10 +./dumb_segment.py $tempdir/ellipse_6.mnc $tempdir/ellipse_6_seg.mnc --center 0,0,10 + + +# create reference mask +itk_morph --threshold 10 --exp 'D[2]' $tempdir/ellipse_0_blur.mnc $tempdir/ellipse_0_mask.mnc + +rm -f seg_subjects.lst + +for i in $(seq 0 6);do + echo $tempdir/ellipse_${i}_blur.mnc,$tempdir/ellipse_${i}_seg.mnc >> seg_subjects.lst +done + + diff --git a/examples/synthetic_tests/test_segmentation/run_test.sh b/examples/synthetic_tests/test_segmentation/run_test.sh new file mode 100755 index 0000000..7d06f88 --- /dev/null +++ b/examples/synthetic_tests/test_segmentation/run_test.sh @@ -0,0 +1,105 @@ +#! /bin/sh + + +PREFIX=$(pwd)/../../python + +export PYTHONPATH=$PREFIX:$PYTHONPATH + + +cat - > library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description.json < cv.json < segment.json < ec_train.json < library_description_ants.json < cv_ants.json < segment_ants.json < library_description_ants.json < cv_ants2.json < segment_ants2.json < ec_train_ants2.json < library_description_ants.json < cv_ants3.json < segment_ants.json < ec_train_ants3.json < library_description_ants.json < cv_ants4.json < segment_ants.json < ec_train_ants4.json < library_description_ants.json < cv_ants5.json < segment_ants.json < ec_train_ants5.json < library_description_ants.json < cv_ants2.json < elastix_lin.txt < segment_ants2.json < ec_train_ants2.json < library_description_nl.json < cv_nl.json < segment_nl_ext.json < library_description_nl.json < cv_nl.json < segment_nl.json < ec_train_nl.json < library_description.json < cv.json < segment_nnls.json < ec_train.json < library_description_triple.json < cv_triple.json < segment_triple.json < ec_train_triple.json < library_description.json < cv.json < segment.json < ec_train.json < " + echo "Usefull environment variables:" + echo "MNI_DATAPATH - location of MNI datasets ( /opt/minc/share )" + echo " should include icbm152_model_09c and beast-library-1.1" + echo "PARALLEL - number of paralell processes to use" + exit 1 +fi + +# setup variables +MNI_DATAPATH=${MNI_DATAPATH:-/opt/minc/share} +PARALLEL=${PARALLEL:-1} + + +icbm_model_dir=$MNI_DATAPATH/icbm152_model_09c +beast_model_dir=$MNI_DATAPATH/beast-library-1.1 + + +if [ ! -d $icbm_model_dir ];then + echo "Missing $icbm_model_dir" + exit 1 +fi + +if [ ! -d $beast_model_dir ];then + echo "Missing $beast_model_dir" + exit 1 +fi + +pipeline_dir=$(dirname $0)/.. +data_dir=$(dirname $0) + +#export PYTHONPATH=$(realpath $pipeline_dir/python) +export PYTHONPATH=$pipeline_dir/python +#export PATH=$pipeline_dir/bin:$PATH +export OMP_NUM_THREADS=1 +export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 + +### Run pipeline for NC scan rescan data ### +python -m scoop -vvv -n $PARALLEL $pipeline_dir/python/iplLongitudinalPipeline.py \ + -L \ + -l $data_dir/nc_scan_rescan_validation_list.csv \ + -o $output_dir \ + --model-dir=$icbm_model_dir \ + --model-name=mni_icbm152_t1_tal_nlin_sym_09c \ + --beast-dir=$beast_model_dir diff --git a/ipl/__init__.py b/ipl/__init__.py new file mode 100755 index 0000000..6cfae61 --- /dev/null +++ b/ipl/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# +# @author Vladimir S. FONOV +# @date 12/10/2014 +# +# Run fusion segmentation + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/ants_registration.py b/ipl/ants_registration.py new file mode 100755 index 0000000..d12417f --- /dev/null +++ b/ipl/ants_registration.py @@ -0,0 +1,667 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 29/06/2015 +# +# registration tools + + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math + +# command-line interface +import argparse + +# local stuff +import minc_tools + + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + + +def ants_linear_register( + source, + target, + output_xfm, + parameters=None, + source_mask=None, + target_mask=None, + init_xfm=None, + objective=None, + conf=None, + debug=False, + close=False, + work_dir=None, + downsample=None, + verbose=0 + ): + """perform linear registration with ANTs""" + + # TODO: make use of parameters + + if parameters is None: + parameters={} + + with minc_tools.mincTools(verbose=verbose) as minc: + if not minc.checkfiles(inputs=[source,target], outputs=[output_xfm]): + return + + prev_xfm = None + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + source_mask_lr=source_mask + target_mask_lr=target_mask + # figure out what to do here: + with minc_tools.cache_files(work_dir=work_dir,context='reg') as tmp: + + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if source_mask is not None: + source_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(source_mask,source_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + iterations=parameters.get('affine-iterations','10000x10000x10000x10000x10000') + + default_gradient_descent_option='0.5x0.95x1.e-5x1.e-4' + if close:default_gradient_descent_option='0.05x0.5x1.e-4x1.e-4' + gradient_descent_option=parameters.get('gradient_descent_option',default_gradient_descent_option) + + mi_option=parameters.get('mi-option','32x16000') + use_mask=parameters.get('use_mask',True) + use_histogram_matching=parameters.get('use_histogram_matching',False) + affine_metric=parameters.get('metric_type','MI') + affine_rigid=parameters.get('rigid',False) + + cost_function_par='1,4' + + cmd=['ANTS','3'] + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + target_mask_lr=target_mask + + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + cmd.extend(['-m','{}[{},{},{}]'.format('CC',source_lr,target_lr,cost_function_par)]) + cmd.extend(['-i','0']) + cmd.extend(['--number-of-affine-iterations',iterations]) + cmd.extend(['--affine-gradient-descent-option', gradient_descent_option]) + cmd.extend(['--MI-option', mi_option]) + cmd.extend(['--affine-metric-type', affine_metric]) + + if affine_rigid: + cmd.append('--rigid-affine') + + cmd.extend(['-o',output_xfm]) + + inputs=[source_lr,target_lr] + if target_mask_lr is not None and use_mask: + inputs.append(target_mask_lr) + cmd.extend(['-x',target_mask_lr]) + + if use_histogram_matching: + cmd.append('--use-Histogram-Matching') + + if winsorize_intensity is not None: + if isinstance(winsorize_intensity, dict): + cmd.extend(['--winsorize-image-intensities',winsorize_intensity.get('low',5),winsorize_intensity.get('high',95)]) + else: + cmd.append('--winsorize-image-intensities') + + if init_xfm is not None: + cmd.extend(['--initial-affine',init_xfm]) + + outputs=[output_xfm ] # TODO: add inverse xfm ? + minc.command(cmd, inputs=inputs, outputs=outputs) + + + +def non_linear_register_ants( + source, target, output_xfm, + target_mask=None, + init_xfm =None, + parameters =None, + downsample =None, + verbose=0 + ): + """perform non-linear registration using ANTs, WARNING: will create inverted xfm will be named output_invert.xfm""" + + with minc_tools.mincTools(verbose=verbose) as minc: + + if parameters is None: + #print("Using default ANTS parameters") + parameters={} + + + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm ]): + return + + cost_function=parameters.get('cost_function','CC') + cost_function_par=parameters.get('cost_function_par','1,2') + + reg=parameters.get('regularization','Gauss[2,0.5]') + iterations=parameters.get('iter','20x20x0') + transformation=parameters.get('transformation','SyN[0.25]') + affine_iterations=parameters.get('affine-iterations','0x0x0') + use_mask=parameters.get('use_mask',True) + use_histogram_matching=parameters.get('use_histogram_matching',False) + + cmd=['ANTS','3'] + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + target_mask_lr=target_mask + + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + cmd.extend(['-m','{}[{},{},{}]'.format(cost_function,source_lr,target_lr,cost_function_par)]) + cmd.extend(['-i',iterations]) + cmd.extend(['-t',transformation]) + cmd.extend(['-r',reg]) + cmd.extend(['--number-of-affine-iterations',affine_iterations]) + cmd.extend(['-o',output_xfm]) + + inputs=[source_lr,target_lr] + if target_mask_lr is not None and use_mask: + inputs.append(target_mask_lr) + cmd.extend(['-x',target_mask_lr]) + + if use_histogram_matching: + cmd.append('--use-Histogram-Matching') + + outputs=[output_xfm ] # TODO: add inverse xfm ? + + #print(repr(cmd)) + + minc.command(cmd, inputs=inputs, outputs=outputs) + + +def non_linear_register_ants2( + source, target, output_xfm, + target_mask=None, + source_mask=None, + init_xfm =None, + parameters =None, + downsample =None, + start =None, + level =32.0, + verbose =0 + ): + """perform non-linear registration using ANTs, WARNING: will create inverted xfm will be named output_invert.xfm""" + if start is None: + start=level + + with minc_tools.mincTools(verbose=verbose) as minc: + + if parameters is None: + #TODO add more options here + parameters={'conf':{}, + 'blur':{}, + 'shrink':{} + } + else: + if not 'conf' in parameters: parameters['conf'] = {} + if not 'blur' in parameters: parameters['blur'] = {} + if not 'shrink' in parameters: parameters['shrink'] = {} + + prog='' + shrink='' + blur='' + for i in range(int(math.log(start)/math.log(2)),-1,-1): + res=2**i + if res>=level: + prog+= str(parameters['conf']. get(res,parameters['conf']. get(str(res),20))) + shrink+=str(parameters['shrink'].get(res,parameters['shrink'].get(str(res),2**i))) + blur+= str(parameters['blur']. get(res,parameters['blur']. get(str(res),2**i))) + if res>level: + prog+='x' + shrink+='x' + blur+='x' + + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm ]): + return + + prog+=','+parameters.get('convergence','1.e-6,10') + + output_base=output_xfm.rsplit('.xfm',1)[0] + + cost_function=parameters.get('cost_function','CC') + cost_function_par=parameters.get('cost_function_par','1,2,Regular,1.0') + + transformation=parameters.get('transformation','SyN[ .25, 2, 0.5 ]') + use_mask=parameters.get('use_mask',True) + use_histogram_matching=parameters.get('use_histogram_matching',False) + use_float=parameters.get('use_float',False) + + winsorize_intensity=parameters.get('winsorize-image-intensities',None) + + cmd=['antsRegistration','--minc','-a','--dimensionality','3'] + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + source_mask_lr=source_mask + target_mask_lr=target_mask + + if downsample is not None: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=minc.tmp(s_base+'_'+str(downsample)+'.mnc') + target_lr=minc.tmp(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + cmd.extend(['--metric','{}[{},{},{}]'.format(cost_function,source_lr,target_lr,cost_function_par)]) + cmd.extend(['--convergence','[{}]'.format(prog)]) + cmd.extend(['--shrink-factors',shrink]) + cmd.extend(['--smoothing-sigmas',blur]) + cmd.extend(['--transform',transformation]) + + cmd.extend(['--output',output_base]) + #cmd.extend(['--save-state',output_xfm]) + + if init_xfm is not None: + cmd.extend(['--initial-fixed-transform',init_xfm]) + + inputs=[source_lr,target_lr] + + if target_mask_lr is not None and source_mask_lr is not None and use_mask: + inputs.extend([source_mask_lr, target_mask_lr]) + cmd.extend(['-x','[{},{}]'.format(source_mask_lr, target_mask_lr)]) + + if use_histogram_matching: + cmd.append('--use-histogram-matching') + + if winsorize_intensity is not None: + if isinstance(winsorize_intensity, dict): + cmd.extend(['--winsorize-image-intensities',str(winsorize_intensity.get('low',1)),str(winsorize_intensity.get('high',99))]) + else: + cmd.append('--winsorize-image-intensities') + + if use_float: + cmd.append('--float') + + if verbose>0: + cmd.extend(['--verbose','1']) + + outputs=[output_xfm ] # TODO: add inverse xfm ? + + + print(">>>\n{}\n>>>>".format(' '.join(cmd))) + + minc.command(cmd, inputs=inputs, outputs=outputs) + +def linear_register_ants2( + source, target, output_xfm, + target_mask=None, + source_mask=None, + init_xfm =None, + parameters =None, + downsample =None, + close=False, + verbose=0 + ): + """perform linear registration using ANTs""" + #TODO:implement close + + + with minc_tools.mincTools(verbose=verbose) as minc: + + + if parameters is None: + #TODO add more options here + parameters={ + 'conf':{}, + 'blur':{}, + 'shrink':{} + } + else: + if not 'conf' in parameters: parameters['conf'] = {} + if not 'blur' in parameters: parameters['blur'] = {} + if not 'shrink' in parameters: parameters['shrink'] = {} + + levels=parameters.get('levels',3) + prog='' + shrink='' + blur='' + + for i in range(levels,0,-1): + _i=str(i) + prog+= str(parameters['conf']. get(i,parameters['conf']. get(_i,10000))) + shrink+=str(parameters['shrink'].get(i,parameters['shrink'].get(_i,2**i))) + blur+= str(parameters['blur']. get(i,parameters['blur']. get(_i,2**i))) + + if i>1: + prog+='x' + shrink+='x' + blur+='x' + # TODO: make it a parameter? + prog+=','+parameters.get('convergence','1.e-8,20') + + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm ]): + return + + output_base=output_xfm.rsplit('.xfm',1)[0] + + cost_function=parameters.get('cost_function','Mattes') + cost_function_par=parameters.get('cost_function_par','1,32,regular,0.3') + + transformation=parameters.get('transformation','affine[ 0.1 ]') + use_mask=parameters.get('use_mask',True) + use_histogram_matching=parameters.get('use_histogram_matching',False) + winsorize_intensity=parameters.get('winsorize-image-intensities',None) + use_float=parameters.get('use_float',False) + intialize_fixed=parameters.get('initialize_fixed',None) + intialize_moving=parameters.get('intialize_moving',None) + + cmd=['antsRegistration','--collapse-output-transforms', '0', '--minc','-a','--dimensionality','3'] + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + source_mask_lr=source_mask + target_mask_lr=target_mask + + if downsample is not None: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=minc.tmp(s_base+'_'+str(downsample)+'.mnc') + target_lr=minc.tmp(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + cmd.extend(['--metric','{}[{},{},{}]'.format(cost_function,source_lr,target_lr,cost_function_par)]) + cmd.extend(['--convergence','[{}]'.format(prog)]) + cmd.extend(['--shrink-factors',shrink]) + cmd.extend(['--smoothing-sigmas',blur]) + cmd.extend(['--transform',transformation]) + cmd.extend(['--output',output_base]) + #cmd.extend(['--save-state',output_xfm]) + + if init_xfm is not None: + cmd.extend(['--initial-fixed-transform',init_xfm]) + # this is a hack in attempt to make initial linear transform to work as expected + # currently, it looks like the center of the transform (i.e center of rotation) is messed up :( + # and it causes lots of problems + cmd.extend(['--initialize-transforms-per-stage','1']) + elif intialize_fixed is not None: + cmd.extend(['--initial-fixed-transform',"[{},{},{}]".format(source_lr,target_lr,str(intialize_fixed))]) + elif not close: + cmd.extend(['--initial-fixed-transform',"[{},{},{}]".format(source_lr,target_lr,'0')]) + + if intialize_moving is not None: + cmd.extend(['--initial-moving-transform',"[{},{},{}]".format(source_lr,target_lr,str(intialize_moving))]) + elif not close: + cmd.extend(['--initial-moving-transform',"[{},{},{}]".format(source_lr,target_lr,'0')]) + + inputs=[source_lr,target_lr] + + if target_mask_lr is not None and source_mask_lr is not None and use_mask: + inputs.extend([source_mask_lr, target_mask_lr]) + cmd.extend(['-x','[{},{}]'.format(source_mask_lr, target_mask_lr)]) + + if use_histogram_matching: + cmd.append('--use-histogram-matching') + + if winsorize_intensity is not None: + if isinstance(winsorize_intensity, dict): + cmd.extend(['--winsorize-image-intensities',winsorize_intensity.get('low',1),winsorize_intensity.get('high',99)]) + else: + cmd.append('--winsorize-image-intensities') + + if use_float: + cmd.append('--float') + + if verbose>0: + cmd.extend(['--verbose','1']) + + outputs=[output_xfm ] # TODO: add inverse xfm ? + minc.command(cmd, inputs=inputs, outputs=outputs,verbose=verbose) + + +def parse_options(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Run ANTs registration" ) + + parser.add_argument("source", + help="Source file") + + parser.add_argument("target", + help="Target file") + + parser.add_argument("--output", + help="Output transformation file, MINC xfm format", + default=None) + + parser.add_argument("--source_mask", + default= None, + help="Source mask") + + parser.add_argument("--target_mask", + default= None, + help="Target mask") + + parser.add_argument("--init", + default = None, + help="Initial transformation, minc format") + + parser.add_argument("--downsample", + default = None, + help="Downsample to given voxel size ", + type=float) + + parser.add_argument("--start", + default = 32, + help="Start level ", + type=float) + + parser.add_argument("--level", + default = 2, + help="Final level ", + type=float) + + parser.add_argument("--iter", + default = '20x20x20x20x20', + help="Non-linear iterations ") + + + parser.add_argument("--cost", + default="Mattes", + help="Cost Function", + choices=[ "Mattes", + "CC", + "MI", + "MeanSquares", + "Demons", + "GC"]) + + parser.add_argument("--par", + default="1,32,regular,0.3", + help="Cost Function parameters", + ) + + parser.add_argument("--nl", + dest="nl", + action="store_true", + help="Use nonlinear mode", + default=False) + + parser.add_argument("--lin", + dest="nl", + action="store_false", + help="Use linear mode", + default=False) + + parser.add_argument("--close", + dest="close", + action="store_true", + help="Start close", + default=False) + + parser.add_argument("--verbose", + default = 0, + help="Verbosity level ", + type=int) + + parser.add_argument("--transform", + default=None, + help="Transform options, default affine[0.1] for linear and SyN[.25,2,0.5] for nonlinear") + + options = parser.parse_args() + return options + + +if __name__ == "__main__": + options = parse_options() + + if options.source is None or options.target is None: + print("Error in arguments, run with --help") + print(repr(options)) + else: + + parameters= { 'conf': {}, + 'blur': {}, + 'shrink':{}, + 'convergence':'1.e-8,20', + 'cost_function':options.cost, + 'cost_function_par':options.par, + 'use_histogram_matching':False, + 'transformation':'affine[ 0.1 ]' + } + + if options.nl: + + conf=options.iter.split('x') + + for (i,j) in zip(range(int(math.log(options.start)/math.log(2)),-1,-1),conf): + res=2**i + if res>=options.level: + parameters['conf'][str(res)]=j + + if options.transform is not None: + parameters['transformation']=options.transform + else: + parameters['transformation']='SyN[.25,2,0.5]' + + non_linear_register_ants2( + options.source, options.target, + options.output, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init, + parameters = parameters, + downsample = options.downsample, + start = options.start, + level = options.level, + verbose = options.verbose + ) + else: + if options.transform is not None: + parameters['transformation']=options.transform + + linear_register_ants2( + options.source, options.target, + options.output, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init, + parameters = parameters, + downsample = options.downsample, + close = options.close, + verbose = options.verbose + ) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/create_pairwise_registrations.py b/ipl/create_pairwise_registrations.py new file mode 100755 index 0000000..7700a85 --- /dev/null +++ b/ipl/create_pairwise_registrations.py @@ -0,0 +1,336 @@ +#! /usr/bin/env python + + +import shutil +import os +import json + +from iplMincTools import mincTools,mincError +from scoop import futures, shared + +def generate_xfm_model(i , j, xfm1, xfm2, mri1, mri2, mask1, mask2, seg1, seg2, output_base,step=2,baa=False): + with mincTools(verbose=2) as minc: + # all xfms are mapping subject to common space, so to map one subject to another it will be xfm1 * xfm2^1 + minc.xfminvert(xfm2,minc.tmp('xfm1.xfm')) + # concatenate xfms + minc.xfmconcat([xfm1,minc.tmp('xfm1.xfm')],minc.tmp('xfm1_dot_xfm2_inv.xfm')) + # normalize xfms + minc.xfm_normalize(minc.tmp('xfm1_dot_xfm2_inv.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + +def generate_xfm_direct_minctracc(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,step=2,baa=False): + with mincTools(verbose=2) as minc: + # normalize xfms + minc.non_linear_register_full(mri1,mri2,output_base+'_map.xfm',level=step,source_mask=mask1,target_mask=mask2) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + + +def generate_xfm_direct_ANTS_CC(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,baa=False,step=2): + with mincTools(verbose=2) as minc: + # normalize xfms + param_cc={'cost_function':'CC','iter':'40x40x40x00'} + + minc.non_linear_register_ants(mri1,mri2,minc.tmp('transform.xfm'),target_mask=mask2,parameters=param_cc) + minc.xfm_normalize(minc.tmp('transform.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + + +def generate_xfm_direct_ANTS_MI(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,baa=False,step=2): + with mincTools(verbose=2) as minc: + # normalize xfms + param_mi={'cost_function':'MI','iter':'40x40x40x00','cost_function_par':'1,32'} + + minc.non_linear_register_ants(mri1,mri2,minc.tmp('transform.xfm'),target_mask=mask2,parameters=param_mi) + minc.xfm_normalize(minc.tmp('transform.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + +def generate_xfm_direct_elastix_cc(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,baa=False,step=2): + with mincTools(verbose=2) as minc: + + param_cc=""" +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(ShowExactMetricValue "false") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "AdaptiveStochasticGradientDescent") +(Transform "BSplineTransform") +(Metric "AdvancedNormalizedCorrelation") + +(FinalGridSpacingInPhysicalUnits 4) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions 3) + +(ImagePyramidSchedule 8 8 8 4 4 4 2 2 2) + +(MaximumNumberOfIterations 2000 2000 2000 ) +(MaximumNumberOfSamplingAttempts 3) + +(NumberOfSpatialSamples 1024 1024 4096 ) + +(NewSamplesEveryIteration "true") +(ImageSampler "Random" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 3) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +""" + # normalize xfms + minc.register_elastix(mri1,mri2,output_xfm=minc.tmp('transform.xfm'),source_mask=mask1,target_mask=mask2,parameters=param_cc) + minc.xfm_normalize(minc.tmp('transform.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + + +def generate_xfm_direct_elastix_mi(i , j, mri1, mri2, mask1, mask2, seg1, seg2, output_base,baa=False,step=2): + with mincTools(verbose=2) as minc: + + param_mi=""" +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(ShowExactMetricValue "false") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "AdaptiveStochasticGradientDescent") +(Transform "BSplineTransform") +(Metric "AdvancedMattesMutualInformation") + +(FinalGridSpacingInPhysicalUnits 4) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions 3) + +(ImagePyramidSchedule 8 8 8 4 4 4 2 2 2) + +(MaximumNumberOfIterations 2000 2000 2000 ) +(MaximumNumberOfSamplingAttempts 3) + +(NumberOfSpatialSamples 1024 1024 4096 ) + +(NewSamplesEveryIteration "true") +(ImageSampler "Random" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 3) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +""" + # normalize xfms + minc.register_elastix(mri1,mri2,output_xfm=minc.tmp('transform.xfm'),source_mask=mask1,target_mask=mask2,parameters=param_mi) + minc.xfm_normalize(minc.tmp('transform.xfm'),mri1,output_base+'_map.xfm',step=step) + + # resample mris + minc.resample_smooth(mri2,minc.tmp('mri2.mnc'),transform=output_base+'_map.xfm',invert_transform=True) + # resample segs + minc.resample_labels(seg2,minc.tmp('seg2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + minc.resample_labels(mask2,minc.tmp('mask2.mnc'),transform=output_base+'_map.xfm',invert_transform=True,baa=baa) + # calculate CC, MI + cc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='cc') + nmi = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='nmi') + ncc = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='ncc') + msq = minc.similarity(mri1,minc.tmp('mri2.mnc'),ref_mask=mask1,sample_mask=minc.tmp('mask2.mnc'),method='msq') + # calculate label overlap + gtc = minc.label_similarity(seg1,minc.tmp('seg2.mnc'),method='gtc') + + # write out result + with open(output_base+'_similarity.txt','w') as f: + f.write("{},{},{},{},{},{},{}\n".format(i,j,cc,ncc,nmi,msq,gtc)) + + +if __name__ == '__main__': + model='model_nl' + output='pairwise' + input_prefix='minc_prep_bbox/' + step_size=2 + + model_results={} + + with open(model+os.sep+'results.json','r') as f: + model_results=json.load(f) + + if not os.path.exists(output): + os.makedirs(output) + # generate fake seg and mri names + #TODO replace with CSV file input + mri= [input_prefix+k['name'] for k in model_results['scan']] + mask=[input_prefix+k['name'].rstrip('.mnc')+'_mask.mnc' for k in model_results['scan']] + seg= [input_prefix+k['name'].rstrip('.mnc')+'_glm.mnc' for k in model_results['scan']] + + print(repr(mri)) + print(repr(mask)) + print(repr(seg)) + rr=[] + + # generate uniform file names! + for (i,j) in enumerate(model_results['xfm']): + for (k,t) in enumerate(model_results['xfm']): + if i!=k: + if not os.path.exists(output+os.sep+'A_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_model,i,k, + j['xfm'],t['xfm'], + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'A_{:02d}_{:02d}'.format(i,k) ) ) + + if not os.path.exists(output+os.sep+'B_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_minctracc,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'B_{:02d}_{:02d}'.format(i,k), + step=2) ) + + if not os.path.exists(output+os.sep+'C_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_ANTS_CC,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'C_{:02d}_{:02d}'.format(i,k) ) ) + + if not os.path.exists( output+os.sep+'D_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_ANTS_MI,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'D_{:02d}_{:02d}'.format(i,k) ) ) + + if not os.path.exists( output+os.sep+'E_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_elastix_cc,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'E_{:02d}_{:02d}'.format(i,k) ) ) + + if not os.path.exists( output+os.sep+'F_{:02d}_{:02d}_map.xfm'.format(i,k) ) : + rr.append( futures.submit( generate_xfm_direct_elastix_mi,i,k, + mri[i],mri[k], + mask[i],mask[k], + seg[i],seg[k], + output+os.sep+'F_{:02d}_{:02d}'.format(i,k) ) ) + + futures.wait(rr, return_when=futures.ALL_COMPLETED) +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/dd_registration.py b/ipl/dd_registration.py new file mode 100644 index 0000000..d8d2126 --- /dev/null +++ b/ipl/dd_registration.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 29/06/2015 +# +# registration tools + + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math + +# local stuff +import minc_tools + + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + +def non_linear_register_ldd( + source, target, + output_velocity, + output_xfm=None, + source_mask=None, + target_mask=None, + init_xfm= None, + init_velocity=None, + level=2, + start=32, + parameters=None, + work_dir=None, + downsample=None + ): + """Use log-diffeomorphic demons to run registration""" + + with minc_tools.mincTools() as minc: + if not minc.checkfiles(inputs=[source,target], + outputs=[output_velocity]): + return + if parameters is None: + parameters={'conf':{}, + 'smooth_update':2, + 'smooth_field':2, + 'update_rule':1, + 'grad_type':0, + 'max_step':2.0, + 'hist_match':True, + 'LCC': False } + + LCC=parameters.get('LCC',False) + + source_lr=source + target_lr=target + source_mask_lr=source_mask + target_mask_lr=target_mask + + if downsample is not None: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + source_lr=minc.tmp(s_base+'_'+str(downsample)+'.mnc') + target_lr=minc.tmp(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + prog='' + + for i in range(int(math.log(start)/math.log(2)),-1,-1): + res=2**i + if res>=level: + prog+=str(parameters['conf'].get(res,20)) + else: + prog+='0' + if i>0: + prog+='x' + + inputs=[source,target] + cmd=None + + if LCC: + cmd=['rpiLCClogDemons', + '-f',source_lr,'-m', target_lr, + '--output-transform', output_velocity, + '-S',str(parameters.get('tradeoff',0.15)), + '-u',str(parameters.get('smooth_update',2)), + '-d',str(parameters.get('smooth_field',2)), + '-C',str(parameters.get('smooth_similarity',3)), + '-b',str(parameters.get('bending_weight',1)), + '-x',str(parameters.get('harmonic_weight',0)), + '-r',str(parameters.get('update_rule',2)), + '-g',str(parameters.get('grad_type',0)), + '-l',str(parameters.get('max_step',2.0)), + '-a',prog ] + + if parameters.get('hist_match',True): + cmd.append('--use-histogram-matching') + + # generate programm + if source_mask_lr is not None: + cmd.extend(['--mask-image', source_mask_lr]) + inputs.append(source_mask_lr) + + if init_velocity is not None: + cmd.extend(['--initial-transform',init_velocity]) + inputs.append(init_velocity) + else: + cmd=['LogDomainDemonsRegistration', + '-f',source_lr,'-m', target_lr, + '--outputVel-field', output_velocity, + '-g',str(parameters.get('smooth_update',2)), + '-s',str(parameters.get('smooth_field',2)), + '-a',str(parameters.get('update_rule',1)), + '-t',str(parameters.get('grad_type',0)), + '-l',str(parameters.get('max_step',2.0)), + '-i',prog ] + + if parameters.get('hist_match',True): + cmd.append('--use-histogram-matching') + + # generate programm + if source_mask_lr is not None: + cmd.extend(['--fixed-mask', source_mask_lr]) + inputs.append(source_mask_lr) + + if target_mask_lr is not None: + cmd.extend(['--moving-mask', target_mask_lr]) + inputs.append(target_mask_lr) + + if init_velocity is not None: + cmd.extend(['--input-field',init_velocity]) + inputs.append(init_velocity) + + if init_xfm is not None: + cmd.extend(['--input-transform',init_xfm]) + inputs.append(init_xfm) + + if output_xfm is not None: + cmd.extend(['--outputDef-field',output_xfm]) + outputs.append(output_xfm) + + outputs=[output_velocity] + + minc.command(cmd, inputs=inputs, outputs=outputs) + # todo add dependency for masks + +def non_linear_register_dd( + source, + target, + output_xfm, + source_mask=None, + target_mask=None, + init_xfm=None, + level=4, + start=32, + parameters=None, + work_dir=None, + downsample=None + ): + """perform incremental non-linear registration with diffeomorphic demons""" + + with minc_tools.mincTools() as minc: + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm]): + return + + if parameters is None: + parameters={'conf':{}, + 'smooth_update':2, + 'smooth_field':2, + 'update_rule':0, + 'grad_type':0, + 'max_step':2.0, + 'hist_match':True } + + + source_lr=source + target_lr=target + source_mask_lr=source_mask + target_mask_lr=target_mask + + if downsample is not None: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + source_lr=minc.tmp(s_base+'_'+str(downsample)+'.mnc') + target_lr=minc.tmp(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=minc.tmp(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + prog='' + + for i in range(int(math.log(start)/math.log(2)),-1,-1): + res=2**i + if res>=level: + prog+=str(parameters['conf'].get(res,20)) + else: + prog+='0' + if i>0: + prog+='x' + + inputs=[source_lr,target_lr] + cmd=['DemonsRegistration', + '-f',source_lr,'-m', target_lr, + '--outputDef-field', output_xfm, + '-g',str(parameters.get('smooth_update',2)), + '-s',str(parameters.get('smooth_field',2)), + '-a',str(parameters.get('update_rule',0)), + '-t',str(parameters.get('grad_type',0)), + '-l',str(parameters.get('max_step',2.0)), + '-i',prog ] + + if parameters.get('hist_match',True): + cmd.append('--use-histogram-matching') + # generate programm + + if source_mask_lr is not None: + cmd.extend(['--fixed-mask', source_mask_lr]) + inputs.append(source_mask_lr) + + if target_mask_lr is not None: + cmd.extend(['--moving-mask', target_mask_lr]) + inputs.append(target_mask_lr) + + if init_xfm is not None: + cmd.extend(['--input-transform',init_xfm]) + inputs.append(init_xfm) + + outputs=[output_xfm] + + minc.command(cmd, inputs=inputs, outputs=outputs) + # todo add dependency for masks + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/elastix_registration.py b/ipl/elastix_registration.py new file mode 100755 index 0000000..cf1547f --- /dev/null +++ b/ipl/elastix_registration.py @@ -0,0 +1,729 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 29/06/2015 +# +# registration tools + + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math + +# command-line interface +import argparse + +# local stuff +import minc_tools + + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + +def parse_tags(tag): + tags=[] + #p = re.compile(r'\S') + volumes=1 + with open(tag,'r') as f: + started=False + for line in f: + line=line.rstrip('\r\n') + + if not started: + m = re.match(".*Volumes = (\S)",line) + + if re.match(".*Points =",line): + started=True + continue + elif m is not None : + volumes=int(m.group(1)) + else: + if re.match('.*;',line) is not None: # this is the last line + line=line.replace(';','') + # last line? + c=line.split(' ') + if len(c[0])==0: + c.pop(0) + + #print(','.join(c)) + #shift @c unless $c[0]; #protection against empty first parameter + #push(@tags, [$c[0], $c[1], $c[2], $c[3], $c[4], $c[5]] ); + tags.append([float(i) for i in c]) + + return (volumes,tags) + +def tag2elx(tags,out1,out2): + (vols,tags)=parse_tags(tags) + + with open(out1,'w') as f: + f.write("point\n{}\n".format(len(tags))) + for i in tags: + f.write("{} {} {}\n".format(i[0],i[1],i[2])) + + if vols>1: + with open(out2,'w') as f: + f.write("point\n{}\n".format(len(tags))) + for i in tags: + f.write("{} {} {}\n".format(i[3],i[4],i[5])) + + return vols + + +def nl_xfm_to_elastix(xfm, elastix_par): + """Convert MINC style xfm into elastix style registration parameters + Assuming that xfm file is strictly non-linear, with a single non-linear deformation field + """ + # TODO: make a proper parsing of XFM file + with minc_tools.mincTools() as minc: + grid=xfm.rsplit('.xfm',1)[0]+'_grid_0.mnc' + if not os.path.exists(grid): + print("nl_xfm_to_elastix error!") + raise minc_tools.mincError("Unfortunately currently only a very primitive way of dealing with Minc XFM files is implemented\n{}".format(traceback.format_exc())) + + with open(elastix_par,'w') as f: + f.write("(Transform \"DeformationFieldTransform\")\n") + f.write("(DeformationFieldInterpolationOrder 0)\n") + f.write("(DeformationFieldFileName \"{}\")\n".format(grid)) + return elastix_par + + +def lin_xfm_to_elastix(xfm,elastix_par): + """Convert MINC style xfm into elastix style registration parameters + Assuming that xfm fiel is strictly linear + """ + with minc_tools.mincTools() as minc: + minc.command(['itk_convert_xfm',xfm,minc.tmp('input.txt')], + inputs=xfm,outputs=[minc.tmp('input.txt')]) + # parsing text transformation + param=None + fix_param=None + + with open(minc.tmp('input.txt'),'r') as f: + for ln in f: + if re.match('^Parameters: ', ln): + param=ln.split(' ') + if re.match('^FixedParameters: ', ln): + fix_param=ln.split(' ') + param.pop(0) + fix_param.pop(0) + with open(minc.tmp('elastix_par'),'w') as f: + f.write('''(Transform "AffineTransform") +(NumberOfParameters 12) +(TransformParameters {}) +(InitialTransformParametersFileName "NoInitialTransform") +(HowToCombineTransforms "Compose") + +// EulerTransform specific +(CenterOfRotationPoint {}) +'''.format(' '.join(param),' '.join(fix_param))) + + +def nl_elastix_to_xfm(elastix_par, xfm, downsample_grid=None, nl=True ): + """Convert elastix transformation file into minc XFM file""" + with minc_tools.mincTools() as minc: + threads=os.environ.get('ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS',1) + cmd=['transformix', '-tp', elastix_par, '-out', minc.tempdir,'-xfm', xfm, '-q', '-threads', str(threads)] + + if nl: + cmd.extend(['-def', 'all']) + if downsample_grid is not None: + cmd.extend(['-sub',str(downsample_grid)]) + + minc.command(cmd, inputs=[elastix_par], outputs=[xfm]); + return xfm + + +def register_elastix( + source, target, + output_par = None, + output_xfm = None, + source_mask= None, + target_mask= None, + init_xfm = None, + init_par = None, + parameters = None, + work_dir = None, + downsample = None, + downsample_grid=None, + nl = True, + output_log = None, + tags = None, + verbose = 0): + """Run elastix with given parameters + Arguments: + source -- source image (fixed image in Elastix notation) + target -- target, or reference image (moving image in Elastix notation) + + Keyword arguments: + output_par -- output transformation in elastix format + output_xfm -- output transformation in MINC XFM format + source_mask -- source mask + target_mask -- target mask + init_xfm -- initial transform in XFM format + init_par -- initial transform in Elastix format + parameters -- parameters for transformation + if it is a string starting with @ it's a text file name that contains + parameters in elastix format + if it any other string - it should be treated as transformation parameters in elastix format + if it is a dictionary: + for non-linear mode (nl==True): + "optimizer" , "AdaptiveStochasticGradientDescent" (default for nonlinear) + "CMAEvolutionStrategy" (default for linear) + "ConjugateGradient" + "ConjugateGradientFRPR" + "FiniteDifferenceGradientDescent" + "QuasiNewtonLBFGS" + "RegularStepGradientDescent" + "RSGDEachParameterApart" + + "transform", "BSplineTransform" (default for nonlinear mode) + "SimilarityTransform" (default for linear) + "AffineTransform" + "AffineDTITransform" + "EulerTransform" + "MultiBSplineTransformWithNormal" + "TranslationTransform" + + "metric" , "AdvancedNormalizedCorrelation" (default) + "AdvancedMattesMutualInformation" + "NormalizedMutualInformation" + "AdvancedKappaStatistic" + "KNNGraphAlphaMutualInformation" + + "resolutions", 3 - number of resolution steps + "pyramid","8 8 8 4 4 4 2 2 2" - downsampling schedule + "iterations",4000 - number of iterations + "samples",4096 - number of samples + "sampler", "Random" (default) + "Full" + "RandomCoordinate" + "Grid" TODO: add SampleGridSpacing + "RandomSparseMask" + + "grid_spacing",10 - grid spacing in mm + "max_step","1.0" - maximum step (mm) + + for linear mode (nl==False): + "optimizer","CMAEvolutionStrategy" - optimizer + "transform","SimilarityTransform" - transform + "metric","AdvancedNormalizedCorrelation" - cost function + "resolutions", 3 - number of resolutions + "pyramid","8 8 8 4 4 4 2 2 2" - resampling schedule + "iterations",4000 - number of iterations + "samples",4096 - number of samples + "sampler","Random" - sampler + "max_step","1.0" - max step + "automatic_transform_init",True - perform automatic transform initialization + "automatic_transform_init_method", - type of automatic transform initalization method, + "CenterOfGravity" (default) + "GeometricalCenter" - center of the image based + work_dir -- Work directory + downsample -- Downsample input images + downsample_grid -- Downsample output nl-deformation + nl -- flag to show that non-linear version is running + output_log -- output log + """ + with minc_tools.mincTools(verbose=2) as minc: + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + source_mask_lr=source_mask + target_mask_lr=target_mask + use_mask=True + + if (init_par is not None) and (init_xfm is not None): + print("register_elastix: init_xfm={} init_par={}".format(repr(init_xfm),repr(init_par))) + raise minc_tools.mincError("Specify either init_xfm or init_par") + + outputs=[] + if output_par is not None: outputs.append(output_par) + if output_xfm is not None: outputs.append(output_xfm) + + if len(outputs)>0 and (not minc.checkfiles( inputs=[source,target], + outputs=outputs )): + return + + threads=os.environ.get('ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS',1) + + if parameters is None: + parameters={} + #print("Running elastix with parameters:{}".format(repr(parameters))) + # figure out what to do here: + with minc_tools.cache_files(work_dir=work_dir,context='elastix') as tmp: + + if init_xfm is not None: + if nl: + init_par=nl_xfm_to_elastix(init_xfm, tmp.cache('init.txt')) + else: + init_par=lin_xfm_to_elastix(init_xfm, tmp.cache('init.txt')) + + # a fitting we shall go... + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if source_mask is not None: + source_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(source_mask,source_mask_lr,unistep=downsample,datatype='byte') + + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + par_file=tmp.cache('parameters.txt') + measure_mode=False + # paramters could be stored in a file + if isinstance(parameters, dict): + use_mask=parameters.get('use_mask',True) + measure_mode=parameters.get('measure',False) + def_iterations=4000 + + if measure_mode: + def_iterations=1 + parameters['iterations']=1 + + with open(par_file,'w') as p: + if nl: + p.write(''' +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) +(ShowExactMetricValue {exact_metric}) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "{optimizer}") +(Transform "{transform}") +(Metric "{metric}") +(MaximumStepLength {max_step}) + +(FinalGridSpacingInPhysicalUnits {grid_spacing}) + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions {resolutions}) + +(ImagePyramidSchedule {pyramid} ) + +(MaximumNumberOfIterations {iterations} ) +(MaximumNumberOfSamplingAttempts 10) + +(NumberOfSpatialSamples {samples} ) + +(NewSamplesEveryIteration "{new_samples}") +(ImageSampler "{sampler}" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 1) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") +'''.format( optimizer= parameters.get('optimizer','AdaptiveStochasticGradientDescent'), + transform= parameters.get('transform','BSplineTransform'), + metric= parameters.get('metric','AdvancedNormalizedCorrelation'), + resolutions=parameters.get('resolutions',3), + pyramid= parameters.get('pyramid','8 8 8 4 4 4 2 2 2'), + iterations=parameters.get('iterations',def_iterations), + samples= parameters.get('samples',4096), + sampler= parameters.get('sampler',"Random"), + grid_spacing=parameters.get('grid_spacing',10), + max_step =parameters.get('max_step',"1.0"), + exact_metric=str(parameters.get("exact_metric",False)).lower(), + new_samples=str(parameters.get("new_samples",True)).lower(), + )) + else: + p.write(''' +(FixedInternalImagePixelType "float") +(MovingInternalImagePixelType "float") +(FixedImageDimension 3) +(MovingImageDimension 3) +(UseDirectionCosines "true") + +(AutomaticTransformInitialization "{automatic_transform_init}") +(AutomaticTransformInitializationMethod "{automatic_transform_init_method}") +(AutomaticScalesEstimation "true") +(AutomaticParameterEstimation "true") +(MaximumStepLength {max_step}) + +(Registration "MultiResolutionRegistration") +(Interpolator "BSplineInterpolator" ) +(ResampleInterpolator "FinalBSplineInterpolator" ) +(Resampler "DefaultResampler" ) +(ShowExactMetricValue {exact_metric}) + +(FixedImagePyramid "FixedSmoothingImagePyramid") +(MovingImagePyramid "MovingSmoothingImagePyramid") + +(Optimizer "{optimizer}") +(Transform "{transform}") +(Metric "{metric}") + +(HowToCombineTransforms "Compose") + +(ErodeMask "false") + +(NumberOfResolutions {resolutions}) + +(ImagePyramidSchedule {pyramid} ) + +(MaximumNumberOfIterations {iterations} ) +(RequiredRatioOfValidSamples 0.01) +(MaximumNumberOfSamplingAttempts 10) + +(NumberOfSpatialSamples {samples} ) + +(NewSamplesEveryIteration "{new_samples}") +(ImageSampler "{sampler}" ) + +(BSplineInterpolationOrder 1) + +(FinalBSplineInterpolationOrder 1) + +(DefaultPixelValue 0) + +(WriteResultImage "false") + +// The pixel type and format of the resulting deformed moving image +(ResultImagePixelType "float") +(ResultImageFormat "mnc") + '''.format( + optimizer=parameters.get('optimizer','CMAEvolutionStrategy'), + transform=parameters.get('transform','SimilarityTransform'), + metric=parameters.get('metric','AdvancedNormalizedCorrelation'), + resolutions=parameters.get('resolutions', 3 ), + pyramid=parameters.get('pyramid','8 8 8 4 4 4 2 2 2'), + iterations=parameters.get('iterations',def_iterations), + samples=parameters.get('samples',4096), + sampler=parameters.get('sampler',"Random"), + max_step=parameters.get('max_step',"1.0"), + automatic_transform_init=str(parameters.get("automatic_transform_init",True)).lower(), # to convert True to true + automatic_transform_init_method=parameters.get("automatic_transform_init_method","CenterOfGravity"), + exact_metric=str(parameters.get("exact_metric",False)).lower(), + new_samples=str(parameters.get("new_samples",True)).lower(), + )) + # + if 'grid_spacing' in parameters: p.write("(SampleGridSpacing {})\n".format(parameters['grid_spacing'])) + #if 'exact_metric' in parameters: p.write("(ShowExactMetricValue {})\n".format(parameters['exact_metric'])) + if 'exact_metric_spacing' in parameters: p.write("(ExactMetricSampleGridSpacing {})\n".format(parameters['exact_metric_spacing'])) + else: + if parameters[0]=="@": + par_file=parameters.split("@",1)[1] + #print("Using:{}".format(par_file)) + else: + with open(par_file,'w') as p: + p.write(parameters) + + cmd=['elastix', + '-f', source_lr , '-m', target_lr, + '-out', tmp.tempdir+os.sep , '-p', par_file, + '-threads', str(threads)] # , '-q' + + if measure_mode: + cmd.append('-M') + + if verbose<1: + cmd.append('-q') + + inputs=[source_lr , target_lr] + + if init_par is not None: + cmd.extend(['-t0',init_par]) + inputs.append(init_par) + + if source_mask is not None and use_mask: + cmd.extend( ['-fMask',source_mask_lr] ) + inputs.append(source_mask_lr) + + if target_mask is not None and use_mask: + cmd.extend( ['-mMask',target_mask_lr] ) + inputs.append(target_mask_lr) + + if tags is not None: + vols=tag2elx(tags,tmp.cache(s_base+'_tags.txt'),tmp.cache(t_base+'_tags.txt')) + inputs.append(tmp.cache(s_base+'_tags.txt') ) + cmd.extend(['-fp',tmp.cache(s_base+'_tags.txt')] ) + shutil.copyfile(tmp.cache(s_base+'_tags.txt'),"source.tag") + + if vols>1: + inputs.append(tmp.cache(t_base+'_tags.txt') ) + cmd.extend(['-mp',tmp.cache(t_base+'_tags.txt')] ) + shutil.copyfile(tmp.cache(t_base+'_tags.txt'),"target.tag") + + outputs=[ tmp.tempdir+os.sep+'TransformParameters.0.txt' ] + + outcome=None + + if measure_mode: + # going to read the output of iterations + out_=minc.execute_w_output(cmd).split("\n") + for l,j in enumerate(out_): + if re.match("^1\:ItNr\s2\:Metric\s.*",j): + outcome=float(out_[l+1].split("\t")[1]) + #print(out_[l]) + #print(out_[l+1]) + break + else: + # + print("Elastix output:\n{}".format("\n".join(out_))) + raise minc_tools.mincError("Elastix didn't report measure") + else: + minc.command(cmd, inputs=inputs, outputs=outputs, verbose=verbose) + + if output_par is not None: + shutil.copyfile( tmp.tempdir+os.sep+'TransformParameters.0.txt' , output_par ) + + if output_xfm is not None: + nl_elastix_to_xfm( tmp.tempdir+os.sep+'TransformParameters.0.txt', + output_xfm, + downsample_grid=downsample_grid, + nl=nl) + + if output_log is not None: + shutil.copyfile(tmp.tempdir+os.sep+'elastix.log',output_log) + + return outcome + + +def parse_options(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Run elastix registration") + + parser.add_argument("--verbose", + action="store_true", + default=False, + help="Be verbose", + dest="verbose") + + parser.add_argument("source", + help="Source file") + + parser.add_argument("target", + help="Target file") + + parser.add_argument("--output_par", + help="Output transformation file, elastix format", + default=None) + + parser.add_argument("--output_xfm", + help="Output transformation file, MINC xfm format", + default=None) + + parser.add_argument("--source_mask", + default= None, + help="Source mask") + parser.add_argument("--target_mask", + default= None, + help="Target mask") + parser.add_argument("--init_xfm", + default = None, + help="Initial transformation, minc format") + parser.add_argument("--init_par", + default = None, + help="Initial transformation elastix format") + + parser.add_argument("--optimizer", + default="AdaptiveStochasticGradientDescent", + help="Elastix optimizer", + choices=["AdaptiveStochasticGradientDescent", + "CMAEvolutionStrategy" , + "ConjugateGradient", + "ConjugateGradientFRPR", + "FiniteDifferenceGradientDescent", + "QuasiNewtonLBFGS", + "RegularStepGradientDescent", + "RSGDEachParameterApart"] + ) + + parser.add_argument("--transform", + default="BSplineTransform", + help="Elastix transform", + choices=[ "BSplineTransform", + "SimilarityTransform", + "AffineTransform", + "AffineDTITransform", + "EulerTransform", + "MultiBSplineTransformWithNormal", + "TranslationTransform"] + ) + + parser.add_argument("--metric", + default="AdvancedNormalizedCorrelation", + help="Elastix metric", + choices=[ "AdvancedNormalizedCorrelation", + "AdvancedMattesMutualInformation", + "NormalizedMutualInformation", + "AdvancedKappaStatistic", + "KNNGraphAlphaMutualInformation", + "AdvancedMeanSquares"]) + + parser.add_argument("--resolutions", + default=3, + type=int, + help="Number of resolutions") + + parser.add_argument("--pyramid", + default="8 8 8 4 4 4 2 2 2", + help="Downsampling program") + + parser.add_argument("--iterations", + default=4000, + help="Number of iterations per level") + + parser.add_argument("--samples", + default=4096, + help="Number of samples") + + parser.add_argument("--sampler", + default="Random", + help="Elastix sampler") + + parser.add_argument("--grid_spacing", + default=10, + type=float, + help="Final node-distance for B-Splines") + + parser.add_argument("--max_step", + default="1.0", + help="Elastix maximum optimizer step") + + parser.add_argument("--work_dir", + default = None, + help="Work directory") + + parser.add_argument("--downsample", + default = None, + help="Downsample to given voxel size ", + type=float) + + parser.add_argument("--downsample_grid", + default=None, + help="Downsample output grid by factor", + type=int) + + parser.add_argument("--tags", + default=None, + help="tags") + + parser.add_argument("--nl", + dest="nl", + action="store_true", + help="Use nonlinear mode", + default=False) + + parser.add_argument("--lin", + dest="nl", + action="store_false", + help="Use linear mode", + default=False) + + parser.add_argument("--output_log", + default = None, + help="Output log file") + + parser.add_argument("-M","--measure", + default = False, + action = "store_true", + help = "Measure mode", + dest="measure") + + parser.add_argument("--close", + dest="close", + action="store_true", + help="Do not initialize transform", + default=False) + + options = parser.parse_args() + return options + + +if __name__ == "__main__": + options = parse_options() + + if options.source is None or options.target is None: + print("Error in arguments, run with --help") + print(repr(options)) + else: + if not options.nl and options.transform=="BSplineTransform": + options.transform="SimilarityTransform" + + parameters= { + "optimizer": options.optimizer, + "transform": options.transform, + "metric": options.metric, + "resolutions": options.resolutions, + "pyramid": options.pyramid, + "iterations": options.iterations, + "samples": options.samples, + "sampler": options.sampler, + "grid_spacing":options.grid_spacing, + "max_step": options.max_step, + "measure": options.measure, + "automatic_transform_init": not options.close + } + + out=register_elastix( + options.source, options.target, + output_par = options.output_par, + output_xfm = options.output_xfm, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init_xfm, + init_par = options.init_par, + parameters = parameters, + work_dir = options.work_dir, + downsample = options.downsample, + downsample_grid=options.downsample_grid, + nl = options.nl, + output_log = options.output_log, + tags = options.tags, + verbose = 2) + if options.measure: + print(out) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/__init__.py b/ipl/grading/__init__.py new file mode 100644 index 0000000..edb624a --- /dev/null +++ b/ipl/grading/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# +# image grading functions + +# internal funcions +from .structures import MriDataset +from .structures import MriTransform +from .labels import split_labels_seg +from .labels import merge_labels_seg +from .resample import resample_file +from .resample import resample_split_segmentations +from .resample import warp_rename_seg +from .resample import warp_sample +from .resample import concat_resample +from .registration import linear_registration +from .registration import non_linear_registration +from .model import create_local_model +from .model import create_local_model_flip +from .filter import apply_filter +from .filter import make_border_mask +from .filter import generate_flip_sample +from .library import save_library_info +from .library import load_library_info +from .train import generate_library +from .fuse import fusion_grading +from .cross_validation import cv_fusion_grading +from .cross_validation import run_grading_experiment +from .analysis import calc_similarity_stats + +__all__= ['generate_library', + 'load_library_info', + 'cv_fusion_grading', + 'fusion_grading' ] + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/analysis.py b/ipl/grading/analysis.py new file mode 100644 index 0000000..22686fa --- /dev/null +++ b/ipl/grading/analysis.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def calc_similarity_stats(input_ground_truth, + input_segmentation, + output_stats=None, + relabel=None ): + ''' + Calculate similarity stats + ''' + stats={} + + stats[ 'sample' ] = input_segmentation + stats[ 'ground_truth' ] = input_ground_truth + + with mincTools() as m: + sim = m.execute_w_output( + ['volume_gtc_similarity', input_ground_truth, input_segmentation,'--csv'] + ).rstrip("\n").split(',') + + stats['gkappa'] = float(sim[0]) + stats['gtc'] = float(sim[1]) + stats['akappa'] = float(sim[2]) + + sim = m.execute_w_output( + [ 'volume_similarity', input_ground_truth, input_segmentation,'--csv'] + ).split("\n") + + ka={} + se={} + sp={} + js={} + + for i in sim: + q=i.split(',') + if len(q)==5: + l=int(q[0]) + + if relabel is not None: + l=relabel[l] + + ka[l] = float( q[1] ) + se[l] = float( q[2] ) + sp[l] = float( q[3] ) + js[l] = float( q[4] ) + + stats['ka']=ka + stats['se']=se + stats['sp']=sp + stats['js']=js + + if output_stats is not None: + with open(output_stats,'w') as f: + f.write("{},{},{},{}\n".format(stats['sample'],stats['gkappa'],stats['gtc'],stats['akappa'])) + + return stats + +def create_grading_map( + output_grading, + output_map, + lin_xfm=None, + nl_xfm=None, + template=None ): + try: + with mincTools( verbose=2 ) as m: + xfm=None + + if lin_xfm is not None and nl_xfm is not None: + xfm=m.tmp('concat.xfm') + m.xfmconcat([lin_xfm,nl_xfm],xfm) + elif lin_xfm is not None: + xfm=lin_xfm + else: + xfm=nl_xfm + + m.resample_smooth(output_grading,output_map, + transform=xfm, + like=template, + order=2, + datatype='short') + + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def create_error_map(input_ground_truth, + input_segmentation, + output_maps, + lin_xfm=None, + nl_xfm=None, + template=None, + label_list=[] ): + try: + with mincTools( verbose=2 ) as m: + # go over labels and calculate errors per label + # + for (i,l) in enumerate(label_list): + # extract label error + out=m.tmp(str(l)+'.mnc') + xfm=None + + m.calc([input_segmentation, input_ground_truth], + "abs(A[0]-{})<0.5&&abs(A[1]-{})>0.5 || abs(A[0]-{})>0.5&&abs(A[1]-{})<0.5 ? 1:0".format(l,l,l,l), + out, datatype='-byte') + + if lin_xfm is not None and nl_xfm is not None: + xfm=m.tmp(str(l)+'.xfm') + m.xfmconcat([lin_xfm,nl_xfm],xfm) + elif lin_xfm is not None: + xfm=lin_xfm + else: + xfm=nl_xfm + + m.resample_smooth(out,output_maps[i], + transform=xfm, + like=template, + order=1, + datatype='byte') + + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def average_error_maps(maps, out_avg): + try: + with mincTools( verbose=2 ) as m: + print("average_error_maps {} {}".format(repr(maps),repr(out_avg))) + m.average(maps, out_avg, datatype='-short') + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/cross_validation.py b/ipl/grading/cross_validation.py new file mode 100644 index 0000000..aad8387 --- /dev/null +++ b/ipl/grading/cross_validation.py @@ -0,0 +1,387 @@ +import shutil +import os +import sys +import csv +import copy +import json +import random + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .fuse import * +from .structures import * +from .resample import * +from .filter import * +from .analysis import * + +def run_grading_experiment( input_scan, + input_seg, + grading_library, + output_experiment, + grading_parameters={}, + debug=False, + mask=None, + work_dir=None, + fuse_variant='fuse', + add=[], + cleanup=False, + group=None, + grading=None + ): + """run a grading experiment: perform grading and compare with ground truth + + Arguments: + input_scan -- input scan object MriDataset + input_seg -- input segmentation file name (ground truth) + grading_library -- segmntation library object + output_experiment -- prefix for output + + Keyword arguments: + grading_parameters -- paramteres for segmentation algorithm, + debug -- debug flag, (default False) + mask -- mask file name to restrict segmentation , (default None) + work_dir -- work directory, (default None - use output_experiment) + fuse_variant -- name of fusion parameters, (default 'fuse' ) + add -- additional modalities [T2w,PDw etc] + cleanup -- flag to clean most of the temporary files + + """ + try: + relabel=grading_library.get("label_map",None) + + if relabel is not None and isinstance(relabel, list) : + _r={i[0]:i[1] for i in relabel} + relabel=_r + + if debug: + if not os.path.exists(os.path.dirname(output_experiment)): + os.makedirs(os.path.dirname(output_experiment)) + with open(output_experiment+'_par.json','w') as f: + json.dump(grading_parameters,f,indent=1) + + (output_seg, output_grad, output_volumes, output_info) = fusion_grading( + input_scan, + grading_library, + output_experiment, + input_mask=mask, + parameters=grading_parameters, + debug=debug, + work_dir=work_dir, + fuse_variant=fuse_variant, + add=add, + cleanup=cleanup) + + stats = calc_similarity_stats( input_seg, + output_seg, + output_stats = output_experiment+'_stats.csv', + relabel = relabel) + + stats['group']=group + stats['grading']=grading + stats['result']=output_volumes + + name=os.path.basename(input_scan).rsplit('.mnc',1)[0] + grading_map=work_dir+os.sep+fuse_variant+'_'+name+'_grading_nl.mnc' + + lin_xfm=None + nl_xfm=None + + if output_info['bbox_initial_xfm'] is not None: + lin_xfm=output_info['bbox_initial_xfm'].xfm + + if output_info['nonlinear_xfm'] is not None: + nl_xfm=output_info['nonlinear_xfm'].xfm + + create_grading_map(output_grad, grading_map, + lin_xfm=lin_xfm, + nl_xfm=nl_xfm, + template=grading_library.get('local_model',None)) + + output_info['stats']=stats + output_info['output']=output_seg + output_info['ground_truth']=input_seg + output_info['grading_map']=grading_map + output_info['group']=group + output_info['grading']=grading + output_info['volumes']=output_volumes + + with open(output_experiment+'_out.json','w') as f: + json.dump(output_info,f,indent=1, cls=GMRIEncoder) + + with open(output_experiment+'_stats.json','w') as f: + json.dump(stats,f,indent=1, cls=GMRIEncoder) + + return (stats, output_info) + + except mincError as e: + print("Exception in run_grading_experiment:{}".format( str(e)) ) + traceback.print_exc( file=sys.stderr ) + raise + + except : + print("Exception in run_grading_experiment:{}".format( sys.exc_info()[0]) ) + traceback.print_exc( file=sys.stderr ) + raise + + +def loo_cv_fusion_grading(validation_library, + grading_library, + output, + grading_parameters, + debug=False, + fuse_variant='fuse', + cv_variant='cv', + cleanup=False, + cv_iter=None): + '''Run leave-one-out cross-validation experiment''' + # for each N subjects run segmentation and compare + # Right now run LOOCV + if not os.path.exists(output): + try: + os.makedirs(output) + except: + pass # assume directory was created by competing process + + results=[] + results_json=[] + + modalities=grading_library.get('modalities',1)-1 + + print("cv_iter={}".format(repr(cv_iter))) + + for (i,j) in enumerate(validation_library): + n = os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + output_experiment = output+os.sep+n+'_'+cv_variant + + validation_sample = j[0] + validation_segment = j[1] + + validation_group = int( j[-2] ) + validation_grading = float(j[-1] ) + + add=j[2:2+modalities] + + experiment_grading_library=copy.deepcopy(grading_library) + + # remove sample + experiment_grading_library['library']=[ _i for _i in grading_library['library'] if _i[2].find(n)<0 ] + + if (cv_iter is None) or (i == cv_iter): + results.append( futures.submit( + run_grading_experiment, + validation_sample, validation_segment, + experiment_grading_library, + output_experiment, + grading_parameters=grading_parameters, + debug=debug, + work_dir=output+os.sep+'work_'+n+'_'+fuse_variant, + fuse_variant=fuse_variant, + add=add, + cleanup=cleanup, + group=validation_group, + grading=validation_grading + )) + else: + results_json.append( (output_experiment+'_stats.json', + output_experiment+'_out.json') ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + stat_results=[] + output_results=[] + + if cv_iter is None: + stat_results = [ _i.result()[0] for _i in results ] + output_results= [ _i.result()[1] for _i in results ] + + elif cv_iter==-1: + # TODO: load from json files + for _i in results_json: + with open(_i[0],'r') as _f: + stat_results.append(json.load(_f)) + with open(_i[1],'r') as _f: + output_results.append(json.load(_f)) + + return (stat_results, output_results) + +def full_cv_fusion_grading(validation_library, + grading_library, + output, + grading_parameters, + cv_iterations, + cv_exclude, + debug=False, + fuse_variant='fuse', + cv_variant='cv', + cleanup=False, + cv_iter=None): + if cv_iter is not None: + raise "Not Implemented!" + + validation_library_idx=range(len(validation_library)) + # randomly exlcude samples, repeat + results=[] + if not os.path.exists(output): + try: + os.makedirs(output) + except: + pass # assume directory was created by competing process + + modalities=grading_library.get('modalities',1)-1 + + for i in range( cv_iterations ): + #TODO: save this list in a file + rem_list=[] + ran_file=output+os.sep+ ('random_{}_{}.json'.format(cv_variant,i)) + + if not os.path.exists( ran_file ): + rem_list=random.sample( validation_library_idx, cv_exclude ) + + with open( ran_file , 'w') as f: + json.dump(rem_list,f) + else: + with open( ran_file ,'r') as f: + rem_list=json.load(f) + + # list of subjects + rem_items=[ validation_library[j] for j in rem_list ] + + rem_n=[os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] for j in rem_items] + rem_lib=[] + + for j in rem_n: + rem_lib.extend( [ k for (k,t) in enumerate( grading_library['library'] ) if t[2].find(j)>=0 ] ) + + if debug: print(repr(rem_lib)) + rem_lib=set(rem_lib) + #prepare exclusion list + experiment_grading_library=copy.deepcopy(grading_library) + + experiment_grading_library['library']=\ + [ k for j,k in enumerate( grading_library['library'] ) if j not in rem_lib ] + + for j,k in enumerate(rem_items): + output_experiment=output+os.sep+('{}_{}_{}'.format(i,rem_n[j],cv_variant)) + work_dir=output+os.sep+('work_{}_{}_{}'.format(i,rem_n[j],fuse_variant)) + + results.append( futures.submit( + run_grading_experiment, k[0], k[1], + experiment_grading_library, + output_experiment, + grading_parameters=grading_parameters, + debug=debug, + work_dir=work_dir, + fuse_variant=fuse_variant, + add=k[4:4+modalities], + cleanup=cleanup, + group=int(k[-2]), + grading=float(k[-1]) + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + stat_results = [ i.result()[0] for i in results ] + output_results = [ i.result()[1] for i in results ] + + return ( stat_results, output_results ) + + +def cv_fusion_grading( cv_parameters, + grading_library, + output, + grading_parameters, + debug=False, + cleanup=False, + cv_iter=None): + '''Run cross-validation experiment + for each N subjects run segmentation and compare + Right now run LOOCV or random CV + ''' + + # TODO: implement more realistic, random schemes + validation_library=cv_parameters['validation_library'] + + # maximum number of iterations + cv_iterations=cv_parameters.get('iterations',-1) + + # number of samples to exclude + cv_exclude=cv_parameters.get('cv',1) + + # use to distinguish different versions of label fusion + fuse_variant=cv_parameters.get('fuse_variant','fuse') + + # use to distinguish different versions of cross-validation + cv_variant=cv_parameters.get('cv_variant','cv') + + cv_output=output+os.sep+cv_variant+'_stats.json' + res_output=output+os.sep+cv_variant+'_res.json' + + if validation_library is not list: + with open(validation_library,'r') as f: + validation_library=list(csv.reader(f)) + + print("Validation library:",validation_library) + stat_results=None + output_results=None + + if cv_iter is not None: + cv_iter=int(cv_iter) + + if cv_iterations==-1 and cv_exclude==1: # simle LOO cross-validation + (stat_results, output_results) = loo_cv_fusion_grading(validation_library, + grading_library, + output, grading_parameters, + debug=debug, + cleanup=cleanup, + fuse_variant=fuse_variant, + cv_variant=cv_variant, + cv_iter=cv_iter) + else: # arbitrary number of iterations + (stat_results, output_results) = full_cv_fusion_grading(validation_library, + grading_library, + output, grading_parameters, + cv_iterations, cv_exclude, + debug=debug, + cleanup=cleanup, + fuse_variant=fuse_variant, + cv_variant=cv_variant, + cv_iter=cv_iter) + + if cv_iter is None or cv_iter==-1: + # build glim-image tables (?) + results=[] + + #for GLIM image + with open(output+os.sep+cv_variant+'_grading.glim','w') as f: + for k in output_results: + group=k['group'] + grading=k['grading'] + grading_map=k['grading_map'] + f.write("{} {} {}\n".format(grading_map,1.0,grading)) + + #for RMINC image + with open(output+os.sep+cv_variant+'_grading.csv','w') as f: + f.write("grading_map,group,grading\n") + for k in output_results: + group=k['group'] + grading=k['grading'] + grading_map=k['grading_map'] + f.write("{},{},{}\n".format(grading_map,group,grading)) + + #TODO: run glim-image or RMINC here + + with open(cv_output,'w') as f: + json.dump(stat_results, f, indent=1 ) + + with open(res_output,'w') as f: + json.dump(output_results, f, indent=1, cls=GMRIEncoder) + + return stat_results + else: + # we assume that results will be available later + return None + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/filter.py b/ipl/grading/filter.py new file mode 100644 index 0000000..34d59c1 --- /dev/null +++ b/ipl/grading/filter.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + + +def filter_sample(input,output,filters,model=None): + apply_filter(input.scan,output.scan,filters,model=model.scan,input_mask=input.mask,model_mask=model.mask) + # TODO: parallelalize? + for (i,j) in enumerate( input.add ): + apply_filter(input.add[i],output.add[i],filters,model=model.add[i],input_mask=i.mask,model_mask=model.mask) + + +def apply_filter(input, output, filters, model=None, input_mask=None, model_mask=None,input_labels=None,model_labels=None): + output_scan=input + try: + if filters is not None : + with mincTools() as m: + if filters.get('denoise',False): + # TODO: choose between ANLM and NLM here? + m.anlm(output_scan,m.tmp('denoised.mnc'), + beta =filters.get('beta',0.5), + patch =filters.get('patch',1), + search =filters.get('search',1), + regularize=filters.get('regularize',None)) + + output_scan =m.tmp('denoised.mnc') + + if filters.get('normalize',False) and model is not None: + if filters.get('nuyl',False): + m.nuyl_normalize(output_scan,model,m.tmp('normalized.mnc'),source_mask=input_mask,target_mask=model_mask) + else: + m.volume_pol(output_scan,model, m.tmp('normalized.mnc'),source_mask=input_mask,target_mask=model_mask) + + output_scan = m.tmp('normalized.mnc') + # TODO: implement more filters + patch_norm = filters.get('patch_norm',None) + + if patch_norm is not None: + print("Running patch normalization") + db = patch_norm.get('db',None) + idx = patch_norm.get('idx',None) + thr = patch_norm.get('threshold',None) + spl = patch_norm.get('spline',None) + med = patch_norm.get('median',None) + it = patch_norm.get('iterations',None) + if db is not None and idx and not None: + # have all the pieces + m.patch_norm(output_scan, m.tmp('patch_norm.mnc'), + index=idx, db=db, threshold=thr, spline=spl, + median=med, field = m.tmp('patch_norm_field.mnc'), + iterations=it) + output_scan = m.tmp('patch_norm.mnc') + + label_norm = filters.get('label_norm',None) + + if label_norm is not None and input_labels is not None and model_labels is not None: + print("Running label norm:{}".format(repr(label_norm))) + norm_order=label_norm.get('order',3) + norm_median=label_norm.get('median',True) + hl.label_normalize(output_scan,input_labels,model,model_labels,out=m.tmp('label_norm.mnc'),order=norm_order,median=norm_median) + output_scan = m.tmp('label_norm.mnc') + + shutil.copyfile(output_scan,output) + else: + shutil.copyfile(input,output) + except mincError as e: + print("Exception in apply_filter:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in apply_filter:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def make_border_mask( input, output, width=1,labels=1): + '''Extract a border along the edge''' + try: + if not os.path.exists(output): + with mincTools() as m: + if labels==1: + m.binary_morphology(input,"D[{}]".format((width+1)//2),m.tmp('d.mnc')) + m.binary_morphology(input,"E[{}]".format(width//2),m.tmp('e.mnc')) + m.calc([m.tmp('d.mnc'),m.tmp('e.mnc')],'A[0]>0.5&&A[1]<0.5?1:0',output) + else: # have to split up labels and then create a mask of all borders + split_labels(input,labels, m.tmp('split')) + borders=[] + for i in range(1,labels): + l='{}_{:02d}.mnc' .format(m.tmp('split'),i) + d='{}_{:02d}_d.mnc'.format(m.tmp('split'),i) + e='{}_{:02d}_e.mnc'.format(m.tmp('split'),i) + b='{}_{:02d}_b.mnc'.format(m.tmp('split'),i) + m.binary_morphology(l,"D[{}]".format((width+1)//2),d) + m.binary_morphology(l,"E[{}]".format(width//2),e) + m.calc([d,e],'A[0]>0.5&&A[1]<0.5?1:0',b) + borders.append(b) + m.math(borders,'max',m.tmp('max'),datatype='-float') + m.reshape(m.tmp('max'),output,datatype='byte', + image_range=[0,1],valid_range=[0,1]) + + except mincError as e: + print("Exception in make_border_mask:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in make_border_mask:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def split_labels(input, n_labels,output_prefix, + antialias=False, blur=None, + expit=None, normalize=False ): + try: + with mincTools() as m: + inputs=[ input ] + outputs=['{}_{:02d}.mnc'.format(output_prefix,i) for i in range(n_labels) ] + + cmd=['itk_split_labels',input,'{}_%02d.mnc'.format(output_prefix), + '--missing',str(n_labels)] + if antialias: + cmd.append('--antialias') + if normalize: + cmd.append('--normalize') + if blur is not None: + cmd.extend(['--blur',str(blur)]) + if expit is not None: + cmd.extend(['--expit',str(expit)]) + m.command(cmd, inputs=inputs, outputs=outputs) + #return outputs + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def generate_flip_sample(input, labels_datatype='byte'): + '''generate flipped version of sample''' + try: + with mincTools() as m: + m.flip_volume_x(input.scan,input.scan_f) + + for (i,j) in enumerate(input.add): + m.flip_volume_x(input.add[i],input.add_f[i]) + + if input.mask is not None: + m.flip_volume_x(input.mask, input.mask_f, labels=True) + + #for i in input.add: + # m.flip_volume_x(i, input.seg_f, labels=True,datatype=labels_datatype) + except mincError as e: + print("Exception in generate_flip_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in generate_flip_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def create_unflip_remap(remap,remap_flip): + if remap is not None and remap_flip is not None: + # convert both into dict + _remap={ int( i[0] ):int(i[1]) for i in remap } + _remap_flip={ int(i[0]):int(i[1]) for i in remap_flip } + _rr={} + for i,j in _remap.items(): + if i in _remap_flip: + _rr[j]=_remap_flip[i] + return _rr + else: + return None + +def log_transform_sample(input, output, threshold=1.0): + try: + with mincTools() as m: + m.calc([input.scan],'A[0]>{}?log(A[0]):0.0'.format(threshold), + output.scan) + except mincError as e: + print("Exception in log_transform_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in log_transform_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def create_patch_norm_db( input_samples, + patch_norm_db, + patch_norm_idx, + pct=0.1, + patch=2, + sub=1): + try: + with mincTools() as m: + patch_lib=os.path.dirname(input_samples[0].scan)+os.sep+'patch_lib.lst' + inputs=[] + outputs=[patch_norm_db] + + with open(patch_lib,'w') as f: + for i in input_samples: + f.write( os.path.basename( i.scan ) ) + f.write("\n") + inputs.append(i.scan) + + cmd=['create_feature_database', + patch_lib, patch_norm_db, + '--patch', + '--patch-radius', str(patch), + '--subsample', str(sub), + '--random', str(pct), + '--log', + '--threshold', str(1.0), + ] + + m.command(cmd, inputs=inputs, outputs=outputs) + + cmd=['refine_feature_database', + patch_norm_db, patch_norm_idx + ] + m.command(cmd, inputs=[patch_norm_db], outputs=[patch_norm_idx]) + + except mincError as e: + print("Exception in create_patch_norm_db:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in create_patch_norm_db:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/fuse.py b/ipl/grading/fuse.py new file mode 100644 index 0000000..0c58a4a --- /dev/null +++ b/ipl/grading/fuse.py @@ -0,0 +1,830 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .preselect import * +from .qc import * +from .fuse_grading import * + +import traceback + +def seg_to_volumes(seg, output_json, label_map=None,grad=None,median=False): + with mincTools( verbose=2 ) as m: + _out=m.label_stats(seg,label_defs=label_map,volume=grad,median=median) + # convert to a dictionary + # label_id, volume, mx, my, mz,[mean/median] + out={i[0]: { 'volume':i[1], 'x':i[2], 'y':i[3], 'z': i[4], 'grad':i[5] } for i in _out } + + with open(output_json,'w') as f: + json.dump(out,f,indent=1) + return out + +def fusion_grading( input_scan, + library_description, + output_segment, + input_mask=None, + parameters={}, + exclude=[], + work_dir=None, + debug=False, + ec_variant=None, + fuse_variant=None, + regularize_variant=None, + add=[], + cleanup=False, + cleanup_xfm=False, + exclude_re=None): + """Apply fusion segmentation""" + + if debug: + print( "Segmentation parameters:") + print( repr(parameters) ) + + out_variant='' + if fuse_variant is not None: + out_variant+=fuse_variant + + if regularize_variant is not None: + out_variant+='_'+regularize_variant + + if ec_variant is not None: + out_variant+='_'+ec_variant + + if work_dir is None: + work_dir=output_segment+os.sep+'work_segment' + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + work_lib_dir= work_dir+os.sep+'library' + work_lib_dir_f=work_dir+os.sep+'library_f' + + if not os.path.exists(work_lib_dir): + os.makedirs(work_lib_dir) + + if not os.path.exists(work_lib_dir_f): + os.makedirs(work_lib_dir_f) + + library_nl_samples_avail=library_description['nl_samples_avail'] + library_modalities=library_description.get('modalities',1)-1 + + # perform symmetric segmentation + segment_symmetric= parameters.get('segment_symmetric', False ) + + # read filter paramters + pre_filters= parameters.get('pre_filters', None ) + post_filters= parameters.get('post_filters', parameters.get( 'filters', None )) + + + # perform local linear registration + do_initial_register = parameters.get( 'initial_register', + parameters.get( 'linear_register', {})) + + if do_initial_register is not None and isinstance(do_initial_register,dict): + initial_register = do_initial_register + do_initial_register = True + else: + initial_register={} + + inital_reg_type = parameters.get( 'initial_register_type', + parameters.get( 'linear_register_type', + initial_register.get('type','-lsq12'))) + + inital_reg_ants = parameters.get( 'initial_register_ants', + parameters.get( 'linear_register_ants', False)) + + inital_reg_options = parameters.get( 'initial_register_options', + initial_register.get('options',None) ) + + inital_reg_downsample = parameters.get( 'initial_register_downsample', + initial_register.get('downsample',None)) + + inital_reg_use_mask = parameters.get( 'initial_register_use_mask', + initial_register.get('use_mask',False)) + + initial_reg_objective = initial_register.get('objective','-xcorr') + + # perform local linear registration + do_initial_local_register = parameters.get( 'initial_local_register', + parameters.get( 'local_linear_register', {}) ) + if do_initial_local_register is not None and isinstance(do_initial_local_register,dict): + initial_local_register=do_initial_local_register + do_initial_local_register=True + else: + initial_local_register={} + + local_reg_type = parameters.get( 'local_register_type', + initial_local_register.get('type','-lsq12')) + + local_reg_ants = parameters.get( 'local_register_ants', False) + + local_reg_opts = parameters.get( 'local_register_options', + initial_local_register.get('options',None)) + + local_reg_bbox = parameters.get( 'local_register_bbox', + initial_local_register.get('bbox',False )) + + local_reg_downsample = parameters.get( 'local_register_downsample', + initial_local_register.get('downsample',None)) + + local_reg_use_mask = parameters.get( 'local_register_use_mask', + initial_local_register.get('use_mask',True)) + + local_reg_objective = initial_local_register.get('objective','-xcorr') + # if non-linear registraiton should be performed for library creation + do_nonlinear_register=parameters.get('non_linear_register', False ) + + # if non-linear registraiton should be performed with ANTS + do_nonlinear_register_ants=parameters.get('non_linear_register_ants',False ) + nonlinear_register_type = parameters.get( 'non_linear_register_type',None) + if nonlinear_register_type is None: + if do_nonlinear_register_ants: + nonlinear_register_type='ants' + + # if non-linear registraiton should be performed pairwise + do_pairwise =parameters.get('non_linear_pairwise', False ) + + # if pairwise registration should be performed using ANTS + do_pairwise_ants =parameters.get('non_linear_pairwise_ants', True ) + pairwise_register_type = parameters.get( 'non_linear_pairwise_type',None) + if pairwise_register_type is None: + if do_pairwise_ants: + pairwise_register_type='ants' + + # should we use ANTs + library_preselect= parameters.get('library_preselect', 10) + library_preselect_step= parameters.get('library_preselect_step', None) + library_preselect_method= parameters.get('library_preselect_method', 'MI') + + + nlreg_level = parameters.get('non_linear_register_level', 2) + nlreg_start = parameters.get('non_linear_register_start', 16) + nlreg_options = parameters.get('non_linear_register_options', None) + nlreg_downsample = parameters.get('non_linear_register_downsample', None) + + + pairwise_level = parameters.get('pairwise_level', 2) + pairwise_start = parameters.get('pairwise_start', 16) + pairwise_options = parameters.get('pairwise_options', None) + + fuse_options = parameters.get('fuse_options', None) + + resample_order = parameters.get('resample_order', 2) + label_resample_order= parameters.get( 'label_resample_order',resample_order) + + resample_baa = parameters.get('resample_baa', True) + + use_median = parameters.get('use_median', False) + # QC image paramters + qc_options = parameters.get('qc_options', None) + + # special case for training error correction, assume input scan is already pre-processed + run_in_bbox = parameters.get('run_in_bbox', False) + + classes_number = library_description['classes_number'] + groups = library_description['groups'] + seg_datatype = 'byte' + + output_info = {} + + sample= MriDataset(scan=input_scan, seg=None, + mask=input_mask, protect=True, + add=add) + # get parameters + model = MriDataset(scan=library_description['model'], + mask=library_description['model_mask'], + add= library_description.get('model_add',[]) ) + + local_model = MriDataset(scan=library_description['local_model'], + mask=library_description['local_model_mask'], + scan_f=library_description.get('local_model_flip',None), + mask_f=library_description.get('local_model_mask_flip',None), + seg= library_description.get('local_model_seg',None), + seg_f= library_description.get('local_model_seg_flip',None), + add= library_description.get('local_model_add',[]), + add_f= library_description.get('local_model_add_flip',[]), + ) + + library = library_description['library'] + + sample_modalities=len(add) + + print("\n\n") + print("Sample modalities:{}".format(sample_modalities)) + print("\n\n") + # apply the same steps as used in library creation to perform segmentation: + + # global + initial_xfm=None + nonlinear_xfm=None + bbox_sample=None + nl_sample=None + bbox_linear_xfm=None + + sample_filtered=MriDataset(prefix=work_dir, name='flt_'+sample.name, add_n=sample_modalities ) + + # QC file + # TODO: allow for alternative location, extension + sample_qc=work_dir+os.sep+'qc_'+sample.name+'_'+out_variant+'.jpg' + + if run_in_bbox: + segment_symmetric=False + do_initial_register=False + do_initial_local_register=False + # assume filter already applied! + pre_filters=None + post_filters=None + + if segment_symmetric: + # need to flip the inputs + flipdir=work_dir+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + sample.scan_f=flipdir+os.sep+os.path.basename(sample.scan) + sample.add_f=['' for (i,j) in enumerate(sample.add)] + + for (i,j) in enumerate(sample.add): + sample.add_f[i]=flipdir+os.sep+os.path.basename(sample.add[i]) + + if sample.mask is not None: + sample.mask_f=flipdir+os.sep+'mask_'+os.path.basename(sample.scan) + generate_flip_sample( sample ) + + if pre_filters is not None: + apply_filter( sample.scan, + sample_filtered.scan, + pre_filters, + model=model.scan, + model_mask=model.mask) + + if sample.mask is None: + sample_filtered.mask=None + # hack + sample_filtered.add=sample.add + sample=sample_filtered + else: + sample_filtered=None + + output_info['sample_filtered']=sample_filtered + + if do_initial_register: + initial_xfm=MriTransform(prefix=work_dir, name='init_'+sample.name ) + + if inital_reg_type=='elx' or inital_reg_type=='elastix' : + elastix_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + parameters=inital_reg_options, + nl=False, + downsample=inital_reg_downsample + ) + elif inital_reg_type=='ants' or inital_reg_ants: + linear_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + ants=True, + downsample=inital_reg_downsample + ) + else: + linear_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + downsample=inital_reg_downsample, + objective=initial_reg_objective + ) + + output_info['initial_xfm']=initial_xfm + + + # local + bbox_sample = MriDataset(prefix=work_dir, name='bbox_init_'+sample.name, + add_n=sample_modalities ) + + + if do_initial_local_register: + bbox_linear_xfm=MriTransform(prefix=work_dir, name='bbox_init_'+sample.name ) + + if local_reg_type=='elx' or local_reg_type=='elastix' : + elastix_registration( sample, + local_model, + bbox_linear_xfm, + symmetric=segment_symmetric, + init_xfm=initial_xfm, + resample_order=resample_order, + parameters=local_reg_opts, + bbox=local_reg_bbox, + downsample=local_reg_downsample + ) + elif local_reg_type=='ants' or local_reg_ants: + linear_registration( sample, + local_model, + bbox_linear_xfm, + init_xfm=initial_xfm, + symmetric=segment_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + resample_order=resample_order, + ants=True, + close=True, + bbox=local_reg_bbox, + downsample=local_reg_downsample + ) + else: + linear_registration( sample, + local_model, + bbox_linear_xfm, + init_xfm=initial_xfm, + symmetric=segment_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + resample_order=resample_order, + close=True, + bbox=local_reg_bbox, + downsample=local_reg_downsample, + objective=local_reg_objective + ) + + else: + bbox_linear_xfm=initial_xfm + + output_info['bbox_initial_xfm']=bbox_linear_xfm + bbox_sample.mask=None + bbox_sample.seg=None + bbox_sample.seg_f=None + + warp_sample(sample, local_model, bbox_sample, + transform=bbox_linear_xfm, + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric,# need to flip symmetric dataset + resample_order=resample_order, + filters=post_filters, + ) + + output_info['bbox_sample']=bbox_sample + + # TODO: run local intensity normalization + + # 3. run non-linear registration if needed + if do_nonlinear_register: + nl_sample=MriDataset(prefix=work_dir, name='nl_'+sample.name, add_n=sample_modalities ) + nonlinear_xfm=MriTransform(prefix=work_dir, name='nl_'+sample.name ) + + + if nonlinear_register_type=='elx' or nonlinear_register_type=='elastix' : + elastix_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + nl=True, + downsample=nlreg_downsample ) + elif nonlinear_register_type=='ants' or do_nonlinear_register_ants: + non_linear_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + ants=True, + downsample=nlreg_downsample ) + else: + non_linear_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + ants=False, + downsample=nlreg_downsample ) + + print("\n\n\nWarping the sample!:{}\n\n\n".format(bbox_sample)) + nl_sample.seg=None + nl_sample.seg_f=None + nl_sample.mask=None + + warp_sample(bbox_sample, local_model, nl_sample, + transform=nonlinear_xfm, + symmetric=segment_symmetric, + resample_order=resample_order) + + output_info['nl_sample']=nl_sample + else: + nl_sample=bbox_sample + + output_info['nonlinear_xfm']=nonlinear_xfm + + if exclude_re is not None: + _exclude_re=re.compile(exclude_re) + selected_library=[i for i in library if not _exclude_re.match(i[2]) and i[2] not in exclude] + else: + selected_library=[i for i in library if i[2] not in exclude] + + selected_library_f=[] + + if segment_symmetric: # fill up with all entries + selected_library_f=selected_library + + # library pre-selection if needed + # we need balanced number of samples for each group + if library_preselect>0 and library_preselect < len(selected_library): + loaded=False + loaded_f=False + + if os.path.exists(work_lib_dir+os.sep+'sel_library.json'): + with open(work_lib_dir+os.sep+'sel_library.json','r') as f: + selected_library=json.load(f) + loaded=True + + if segment_symmetric and os.path.exists(work_lib_dir_f+os.sep+'sel_library.json'): + with open(work_lib_dir_f+os.sep+'sel_library.json','r') as f: + selected_library_f=json.load(f) + loaded_f=True + + if do_nonlinear_register: + if not loaded: + selected_library=preselect(nl_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=library_nl_samples_avail, + step=library_preselect_step, + lib_add_n=library_modalities, + groups=groups) + if segment_symmetric: + if not loaded_f: + selected_library_f=preselect(nl_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=library_nl_samples_avail, + flip=True, + step=library_preselect_step, + lib_add_n=library_modalities, + groups=groups) + else: + if not loaded: + selected_library=preselect(bbox_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=False, + step=library_preselect_step, + lib_add_n=library_modalities, + groups=groups) + if segment_symmetric: + if not loaded_f: + selected_library_f=preselect(bbox_sample, selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=False,flip=True, + step=library_preselect_step, + lib_add_n=library_modalities, + groups=groups) + + if not loaded: + with open(work_lib_dir+os.sep+'sel_library.json','w') as f: + json.dump(selected_library,f) + + if not loaded_f: + if segment_symmetric: + with open(work_lib_dir_f+os.sep+'sel_library.json','w') as f: + json.dump(selected_library_f,f) + + output_info['selected_library']=selected_library + if segment_symmetric: + output_info['selected_library_f']=selected_library_f + + selected_library_scan=[] + selected_library_xfm=[] + selected_library_warped2=[] + selected_library_xfm2=[] + + selected_library_scan_f=[] + selected_library_xfm_f=[] + selected_library_warped_f=[] + selected_library_warped2_f=[] + selected_library_xfm2_f=[] + + for (i,j) in enumerate(selected_library): + d=MriDataset(scan=j[2],seg=j[3], add=j[4:4+library_modalities],group=int(j[0]), grading=float(j[1]) ) + + selected_library_scan.append(d) + + selected_library_warped2.append( MriDataset(name=d.name, prefix=work_lib_dir, add_n=sample_modalities,group=int(j[0]), grading=float(j[1]) )) + selected_library_xfm2.append( MriTransform(name=d.name,prefix=work_lib_dir )) + + if library_nl_samples_avail: + selected_library_xfm.append( MriTransform(xfm=j[4+library_modalities], xfm_inv=j[5+library_modalities] ) ) + + output_info['selected_library_warped2']=selected_library_warped2 + output_info['selected_library_xfm2']=selected_library_xfm2 + if library_nl_samples_avail: + output_info['selected_library_xfm']=selected_library_xfm + + if segment_symmetric: + for (i,j) in enumerate(selected_library_f): + d=MriDataset(scan=j[2],seg=j[3], add=j[4:4+library_modalities], group=int(j[0]), grading=float(j[1]) ) + selected_library_scan_f.append(d) + selected_library_warped2_f.append(MriDataset(name=d.name, prefix=work_lib_dir_f, add_n=sample_modalities )) + selected_library_xfm2_f.append(MriTransform( name=d.name, prefix=work_lib_dir_f )) + + if library_nl_samples_avail: + selected_library_xfm_f.append( MriTransform(xfm=j[4+library_modalities], xfm_inv=j[5+library_modalities] )) + + output_info['selected_library_warped2_f']=selected_library_warped2_f + output_info['selected_library_xfm2_f']=selected_library_xfm2_f + if library_nl_samples_avail: + output_info['selected_library_xfm_f']=selected_library_xfm_f + + # nonlinear registration to template or individual + + if do_pairwise: # Right now ignore precomputed transformations + results=[] + if debug: + print("Performing pairwise registration") + + for (i,j) in enumerate(selected_library): + # TODO: make clever usage of precomputed transform if available + if pairwise_register_type=='elx' or pairwise_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + nl=True, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + elif pairwise_register_type=='ants' or do_pairwise_ants: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=True, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + else: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=False, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + + if segment_symmetric: + for (i,j) in enumerate(selected_library_f): + # TODO: make clever usage of precomputed transform if available + if pairwise_register_type=='elx' or pairwise_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + nl=True, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + elif pairwise_register_type=='ants' or do_pairwise_ants: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=True, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + else: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=False, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + else: + + results=[] + + for (i, j) in enumerate(selected_library): + + lib_xfm=None + if library_nl_samples_avail: + lib_xfm=selected_library_xfm[i] + + results.append( futures.submit( + concat_resample, + selected_library_scan[i], + lib_xfm , + nonlinear_xfm, + selected_library_warped2[i], + resample_order=resample_order, + label_resample_order=label_resample_order, + resample_baa=resample_baa + ) ) + + if segment_symmetric: + for (i, j) in enumerate(selected_library_f): + lib_xfm=None + if library_nl_samples_avail: + lib_xfm=selected_library_xfm_f[i] + + results.append( futures.submit( + concat_resample, + selected_library_scan_f[i], + lib_xfm, + nonlinear_xfm, + selected_library_warped2_f[i], + resample_order=resample_order, + label_resample_order=label_resample_order, + resample_baa=resample_baa, + flip=True + ) ) + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + + results=[] + + sample_seg=MriDataset(name='bbox_seg_' + sample.name+out_variant, prefix=work_dir ) + sample_grad=MriDataset(name='bbox_grad_' + sample.name+out_variant, prefix=work_dir ) + + results.append( futures.submit( + fuse_grading, + bbox_sample, + sample_seg, + selected_library_warped2, + flip=False, + classes_number=classes_number, + fuse_options=fuse_options, + model=local_model, + debug=debug, + fuse_variant=fuse_variant, + groups=groups + )) + + if segment_symmetric: + results.append( futures.submit( + fuse_grading, + bbox_sample, + sample_seg, + selected_library_warped2_f, + flip=True, + classes_number=classes_number, + fuse_options=fuse_options, + model=local_model, + debug=debug, + fuse_variant=fuse_variant, + groups=groups + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + output_info['fuse']=results[0].result() + if segment_symmetric: + output_info['fuse_f']=results[1].result() + + if qc_options: + # generate QC images + output_info['qc'] = generate_qc_image(sample_seg, + bbox_sample, + sample_qc, + options=qc_options, + model=local_model, + symmetric=segment_symmetric, + labels=library_description['classes_number']) + # cleanup if need + if cleanup: + shutil.rmtree(work_lib_dir) + shutil.rmtree(work_lib_dir_f) + if nl_sample is not None: + nl_sample.cleanup() + + if cleanup_xfm: + if nonlinear_xfm is not None: + nonlinear_xfm.cleanup() + + if not run_in_bbox: + # TODO: apply error correction here + # rename labels to final results + sample_seg_native=MriDataset(name='seg_' + sample.name+out_variant, prefix=work_dir ) + + warp_rename_seg( sample_seg, sample, sample_seg_native, + transform=bbox_linear_xfm, invert_transform=True, + lut=library_description['map'] , + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric, + use_flipped=segment_symmetric, # needed to flip .seg_f back to right orientation + flip_lut=library_description['flip_map'], + resample_baa=resample_baa, + resample_order=label_resample_order, + datatype=seg_datatype ) + + warp_sample(sample_seg, sample, sample_seg_native, + transform=bbox_linear_xfm, invert_transform=True, + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric,# need to flip symmetric dataset + resample_order=resample_order) + + output_info['sample_seg_native']=sample_seg_native + + if segment_symmetric: + # TODO: join left and right if needed + #raise "Not implemented yet!" + # join sample_seg_native.seg and sample_seg_native.seg_f into a single file + join_left_right(sample_seg_native, output_segment+'_seg.mnc',output_segment+'_grad.mnc', datatype=seg_datatype) + else: + shutil.copyfile(sample_seg_native.seg, output_segment+'_seg.mnc') + shutil.copyfile(sample_seg_native.scan, output_segment+'_grad.mnc') + + output_info['output_segment']=output_segment+'_seg.mnc' + output_info['output_grading']=output_segment+'_grad.mnc' + + volumes=seg_to_volumes( output_segment+'_seg.mnc', + output_segment+'_vol.json', + label_map=library_description.get('label_map',None), + grad=output_segment+'_grad.mnc', + median=use_median) + + output_info['output_volumes']=volumes + output_info['output_volumes_json']=output_segment+'_vol.json' + + # TODO: cleanup more here (?) + + return (output_segment+'_seg.mnc', output_segment+'_grad.mnc', volumes, output_info) + else: # special case, needed to train error correction TODO: remove? + volumes=seg_to_volumes(sample_seg.seg, + output_segment+'_vol.json', + grad=sample_seg.scan, + median=use_median) + return (sample_seg.seg, sample_seg.scan, volumes, output_info) + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/fuse_grading.py b/ipl/grading/fuse_grading.py new file mode 100644 index 0000000..62bb10e --- /dev/null +++ b/ipl/grading/fuse_grading.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .preselect import * +from .qc import * + +import traceback + +def fuse_grading( sample, output, library, + fuse_options={}, + flip=False, + classes_number=2, + model=None, + debug=False, + fuse_variant='', + work_dir=None, + groups=None): + try: + final_out_seg=output.seg + final_out_grad=output.scan + + scan=sample.scan + add_scan=sample.add + output_info={} + + if flip: + scan=sample.scan_f + add_scan=sample.add_f + final_out_seg=output.seg_f + final_out_grad=output.scan_f + + if not os.path.exists( final_out_grad ): + with mincTools( verbose=2 ) as m: + patch=0 + search=0 + threshold=0 + iterations=0 + gco_optimize=False + nnls=False + gco_diagonal=False + label_norm=None + select_top=None + if fuse_options is not None: + + patch= fuse_options.get('patch', 0) + search= fuse_options.get('search', 0) + threshold= fuse_options.get('threshold', 0.0) + iterations= fuse_options.get('iter', 3) + weights= fuse_options.get('weights', None) + nnls = fuse_options.get('nnls', False) + label_norm = fuse_options.get('label_norm', None) + select_top = fuse_options.get('top', None) + beta = fuse_options.get('beta', None) + + if work_dir is None: + work_dir=os.path.dirname(output.seg) + + dataset_name=sample.name + + if flip: + dataset_name+='_f' + + output_info['work_dir']=work_dir + output_info['dataset_name']=work_dir + + + ##out_seg_fuse = work_dir+os.sep+dataset_name+'_'+fuse_variant+'.mnc' + out_dist = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_dist.mnc' + out_grading = final_out_grad + + output_info['out_seg']=final_out_seg + output_info['out_grading']=out_grading + output_info['out_dist']=out_dist + + if label_norm is not None: + print("Using label_norm:{}".format(repr(label_norm))) + # need to create rough labeling and average + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in library ]) + segs.extend(['--majority', m.tmp('maj_seg.mnc'), '--bg'] ) + m.execute(segs) + + scans=[ i.scan for i in library ] + m.median(scans,m.tmp('median.mnc')) + + norm_order=label_norm.get('order',3) + norm_median=label_norm.get('median',True) + + n_scan=work_dir+os.sep+dataset_name+'_'+fuse_variant+'_norm.mnc' + + if flip: + n_scan=work_dir+os.sep+dataset_name+'_'+fuse_variant+'_f_norm.mnc' + + hl.label_normalize(scan,m.tmp('maj_seg.mnc'),m.tmp('median.mnc'),m.tmp('maj_seg.mnc'),out=n_scan,order=norm_order,median=norm_median) + scan=n_scan + + if patch==0 and search==0: # perform simple majority voting + # create majority voted model segmentation, for ANIMAL segmentation if needed + # TODO: figure out what it means for grading + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in library ]) + segs.extend(['--majority', out_seg_fuse, '--bg'] ) + m.execute(segs) + else: + # create text file for the training library + train_lib=os.path.dirname(library[0].seg)+os.sep+sample.name+'.lst' + + if flip: + train_lib=os.path.dirname(library[0].seg)+os.sep+sample.name+'_f.lst' + + output_info['train_lib']=train_lib + + with open(train_lib,'w') as f: + for i in library: + ss=[ os.path.basename(i.scan) ] + ss.extend([os.path.basename(j) for j in i.add]) + ss.append(os.path.basename(i.seg)) + ss.append(str(i.grading)) + ss.append(str(i.group)) + f.write(",".join(ss)) + f.write("\n") + + outputs=[] + + if len(add_scan)>0: + segs=['itk_patch_morphology_mc', + scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--adist', out_dist, + '--grading', out_grading] + + if weights is not None: + segs.extend(['--weights',weights]) + + segs.extend(add_scan) + segs.extend(['--output', final_out_seg]) + else: + segs=['itk_patch_morphology', scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--iter', str(iterations), + '--adist', out_dist, + '--threshold', str(threshold), + '--grading', out_grading, + '--verbose' ] + segs.append(final_out_seg) + + if beta is not None: + segs.extend(['--beta',str(beta)]) + if sample.mask is not None: + segs.extend(['--mask', sample.mask]) + if select_top is not None: + segs.extend(['--top',str(select_top)]) + if groups is not None: + segs.extend(['--groups',str(groups)]) + + outputs=[ final_out_seg, out_grading, out_dist ] + + m.command(segs, inputs=[sample.scan], outputs=outputs) + print(' '.join(segs)) + return output_info + except mincError as e: + print("Exception in fuse_segmentations:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in fuse_segmentations:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def join_left_right(sample,output_seg,output_grad=None,datatype=None): + with mincTools() as m: + cmd=['itk_merge_discrete_labels',sample.seg,sample.seg_f,output] + if datatype is not None: + cmd.append('--'+datatype) + m.command(cmd,inputs=[sample.seg,sample.seg_f],outputs=[output]) + if output_grad is not None: + # TODO:figure out how to merge gradings + print("Can't merge gradings yet!") + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/labels.py b/ipl/grading/labels.py new file mode 100644 index 0000000..c6ed099 --- /dev/null +++ b/ipl/grading/labels.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +def split_labels_seg(sample): + ''' split-up one multi-label segmentation into a set of files''' + try: + with mincTools() as m: + if sample.seg is not None: + base=sample.seg.rsplit('.mnc',1)[0]+'_%03d.mnc' + sample.seg_split=m.split_labels(sample.seg,base) + if sample.seg_f is not None: + base=sample.seg_f.rsplit('.mnc',1)[0]+'_%03d.mnc' + sample.seg_f_split=m.split_labels(sample.seg,base) + except mincError as e: + print("Exception in split_labels_seg:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in split_labels_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def merge_labels_seg(sample): + ''' merge multiple segmentation into a single files''' + try: + with mincTools() as m: + if any(sample.seg_split): + if sample.seg is None: + sample.seg=sample.seg_split[0].rsplit('_000.mnc',1)[0]+'.mnc' + m.merge_labels(sample.seg_split,sample.seg) + if any(sample.seg_f_split): + if sample.seg_f is None: + sample.seg_f=sample.seg_f_split[0].rsplit('_000.mnc',1)[0]+'.mnc' + m.merge_labels(sample.seg_f_split,sample.seg_f) + except mincError as e: + print("Exception in merge_labels_seg:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in merge_labels_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/library.py b/ipl/grading/library.py new file mode 100644 index 0000000..5b7cea6 --- /dev/null +++ b/ipl/grading/library.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import copy +import json +import os +import sys +import traceback + +def save_library_info(library_description, output,name='library.json'): + """Save library information into directory, using predfined file structure + Arguments: + library_description -- dictionary with library description + output -- output directory + + Keyword arguments: + name -- optional name of .json file, relative to the output directory, default 'library.json' + """ + try: + tmp_library_description=copy.deepcopy(library_description) + tmp_library_description.pop('prefix',None) + + for i in ['local_model','local_model_mask', 'local_model_flip', + 'local_model_mask_flip','local_model_seg']: + if tmp_library_description[i] is not None: + tmp_library_description[i]=os.path.relpath(tmp_library_description[i],output) + + for (j, i) in enumerate(tmp_library_description['local_model_add']): + tmp_library_description['local_model_add'][j]=os.path.relpath(i, output) + + for (j, i) in enumerate(tmp_library_description['local_model_add_flip']): + tmp_library_description['local_model_add_flip'][j]=os.path.relpath(i, output) + + for i in ['model','model_mask']: + # if it starts with the same prefix, remove it + if os.path.dirname(tmp_library_description[i])==output \ + or tmp_library_description[i][0]!=os.sep: + tmp_library_description[i]=os.path.relpath(tmp_library_description[i],output) + + for (j, i) in enumerate(tmp_library_description['model_add']): + if os.path.dirname(i)==output: + tmp_library_description['model_add'][j]=os.path.relpath(i, output) + + for (j, i) in enumerate(tmp_library_description['library']): + for (k,t) in enumerate(i): + if k>1: # skip group and grading + tmp_library_description['library'][j][k]=os.path.relpath(t, output) + + with open(output+os.sep+name,'w') as f: + json.dump(tmp_library_description,f,indent=1) + except : + print "Error saving library information into:{} {}".format(output,sys.exc_info()[0]) + traceback.print_exc(file=sys.stderr) + raise + +def load_library_info(prefix, name='library.json'): + """Load library information from directory, using predfined file structure + Arguments: + prefix -- directory path + + Keyword arguments: + name -- optional name of .json file, relative to the input directory, default 'library.json' + """ + try: + library_description={} + with open(prefix+os.sep+name,'r') as f: + library_description=json.load(f) + + library_description['prefix']=prefix + + for i in ['local_model','local_model_mask', 'local_model_flip', + 'local_model_mask_flip','local_model_seg']: + if library_description[i] is not None: library_description[i]=prefix+os.sep+library_description[i] + try: + for (j, i) in enumerate(library_description['local_model_add']): + library_description['local_model_add'][j]=prefix+os.sep+i + + for (j, i) in enumerate(library_description['local_model_add_flip']): + library_description['local_model_add_flip'][j]=prefix+os.sep+i + except KeyError: + pass + + for (j, i) in enumerate(library_description['library']): + for (k,t) in enumerate(i): + if k>1: # skip group and grading + library_description['library'][j][k]=prefix+os.sep+t + + for i in ['model','model_mask']: + # if it starts with '/' assume it's absolute path + if library_description[i] is not None and library_description[i][0]!=os.sep: + library_description[i]=prefix+os.sep+library_description[i] + try: + for (j, i) in enumerate(library_description['model_add']): + if library_description['model_add'][j][0]!='/': + library_description['model_add'][j]=prefix+os.sep+i + except KeyError: + pass + + return library_description + except : + print "Error loading library information from:{} {}".format(prefix,sys.exc_info()[0]) + traceback.print_exc(file=sys.stderr) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/model.py b/ipl/grading/model.py new file mode 100644 index 0000000..c326a5c --- /dev/null +++ b/ipl/grading/model.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def create_fake_mask(in_scan, out_mask, op=None ): + try: + with mincTools() as m : + if op is None : + m.calc([in_scan], 'A[0]>0.5?1:0', out_mask, labels=True) + else : + m.binary_morphology(in_scan, op, out_mask, binarize_threshold=0.5) + except mincError as e: + print("Exception in create_fake_mask:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_fake_mask:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def create_local_model(tmp_lin_samples, model, local_model, + extend_boundary=4, + op=None, + symmetric=False ): + '''create an average segmentation and use it to create local model''' + try: + with mincTools() as m: + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in tmp_lin_samples ]) + + if symmetric: segs.extend([ i.seg_f for i in tmp_lin_samples ]) + + segs.extend(['--majority', m.tmp('majority.mnc')] ) + m.execute(segs) + maj=m.tmp('majority.mnc') + + if op is not None: + m.binary_morphology(maj, op, m.tmp('majority_op.mnc'),binarize_threshold=0.5) + maj=m.tmp('majority_op.mnc') + + # TODO: replace mincreshape/mincbbox with something more sensible + out=m.execute_w_output(['mincbbox', '-threshold', '0.5', '-mincreshape', maj ]).rstrip("\n").split(' ') + + s=[ int(i) for i in out[1].split(',') ] + c=[ int(i) for i in out[3].split(',') ] + + start=[s[0]-extend_boundary, s[1]-extend_boundary ,s[2]-extend_boundary ] + ext= [c[0]+extend_boundary*2, c[1]+extend_boundary*2 ,c[2]+extend_boundary*2] + + # reshape the mask + m.execute(['mincreshape', + '-start','{},{},{}'.format(start[0], start[1], start[2]), + '-count','{},{},{}'.format(ext[0], ext[1], ext[2] ), + maj , local_model.mask , '-byte' ] ) + + m.resample_smooth(model.scan, local_model.scan, like=local_model.mask, order=0) + m.resample_labels(m.tmp('majority.mnc'),local_model.seg, like=local_model.mask, order=0) + + for (i,j) in enumerate(model.add): + m.resample_smooth(model.add[i], local_model.add[i], like=local_model.mask, order=0) + + except mincError as e: + print("Exception in create_local_model:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_local_model:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def create_local_model_flip(local_model, model, remap={}, + extend_boundary=4, op=None ): + try: + with mincTools() as m: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + m.resample_labels(local_model.seg, m.tmp('flip_seg.mnc'), + transform=m.tmp('flip_x.xfm'), + order=0, remap=remap, like=model.scan) + + seg=m.tmp('flip_seg.mnc') + + if op is not None: + m.binary_morphology(seg, op, m.tmp('flip_seg_op.mnc'),binarize_threshold=0.5) + seg=m.tmp('flip_seg_op.mnc') + + # TODO: replace mincreshape/mincbbox with something more sensible + out=m.execute_w_output(['mincbbox', '-threshold', '0.5', '-mincreshape', seg ]).rstrip("\n").split(' ') + + s=[ int(i) for i in out[1].split(',') ] + c=[ int(i) for i in out[3].split(',') ] + + start=[s[0]-extend_boundary, s[1]-extend_boundary ,s[2]-extend_boundary ] + ext= [c[0]+extend_boundary*2, c[1]+extend_boundary*2 ,c[2]+extend_boundary*2] + # reshape the mask + m.execute(['mincreshape', + '-start','{},{},{}'.format(start[0], start[1], start[2]), + '-count','{},{},{}'.format(ext[0], ext[1], ext[2] ), + seg, + local_model.mask_f, + '-byte' ] ) + + m.resample_smooth(local_model.scan, local_model.scan_f, + like=local_model.mask_f, order=0, transform=m.tmp('flip_x.xfm')) + + for (i,j) in enumerate(model.add_f): + m.resample_smooth(model.add[i], local_model.add_f[i], + like=local_model.mask_f, order=0, transform=m.tmp('flip_x.xfm')) + + except mincError as e: + print("Exception in create_local_model_flip:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_local_model_flip:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/preselect.py b/ipl/grading/preselect.py new file mode 100644 index 0000000..9b0c79b --- /dev/null +++ b/ipl/grading/preselect.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * + +import traceback + + +def preselect(sample, + library, + method='MI', + number=10, + mask=None, + use_nl=False, + flip=False, + step=None, + lib_add_n=0, + groups=None): + '''calculate requested similarity function and return top number of elements from the library''' + results=[] + column=2 # skip over grading and group + + # TODO: use multiple modalities for preselection? + if use_nl: + column=6+lib_add_n + + for (i,j) in enumerate(library): + results.append( futures.submit( + calculate_similarity, sample, MriDataset(scan=j[column]), method=method, mask=mask, flip=flip, step=step + ) ) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + val=[ (j.result(), int(library[i][0]), library[i] ) for (i,j) in enumerate(results)] + + if groups is None: + val_sorted=sorted(val, key=lambda s: s[0] ) + return [ i[2] for i in val_sorted[ 0:number] ] + else: + s_number=number/groups + res=[] + + for i in range(groups): + val_sorted=sorted( [v for v in val if v[1]==i] , key=lambda s: s[0] ) + res.extend( val_sorted[0:s_number] ) + + return [ i[2] for i in res ] + + +def calculate_similarity(sample1, sample2, + mask=None, method='MI', + flip=False, step=None): + try: + with mincTools() as m: + scan=sample1.scan + + if flip: + scan=sample1.scan_f + + # figure out step size, minctracc works extremely slow when step size is smaller then file step size + info_sample1=m.mincinfo( sample1.scan ) + + cmds=[ 'minctracc', scan, sample2.scan, '-identity' ] + + if method=='MI': + cmds.extend( ['-nmi', '-blur_pdf', '9'] ) + else: + cmds.append( '-xcorr' ) + + if step is None: + step= max( abs( info_sample1['xspace'].step ) , + abs( info_sample1['yspace'].step ) , + abs( info_sample1['zspace'].step ) ) + + cmds.extend([ + '-step', str(step), str(step), str(step), + '-simplex', '1', + '-tol', '0.01', + '-lsq6', + '-est_center', + '-clob', + m.tmp('similarity.xfm') + ]) + + if mask is not None: + cmds.extend( ['-source_mask', mask]) + + output=re.search( '^Final objective function value = (\S+)' , m.execute_w_output(cmds, verbose=0), flags=re.MULTILINE).group(1) + + return float(output) + + except mincError as e: + print("Exception in calculate_similarity:{}".format( str(e)) ) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in calculate_similarity:{}".format( sys.exc_info()[0]) ) + traceback.print_exc( file=sys.stdout ) + raise diff --git a/ipl/grading/qc.py b/ipl/grading/qc.py new file mode 100644 index 0000000..cfd9adc --- /dev/null +++ b/ipl/grading/qc.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import traceback + + +def make_contours(input, output, width=1): + """Convert multi-label image into another multilabel images with borders only + Arguments: + input -- input minc file + output -- output file + + Keyword arguments: + width -- width of the border to leave behind, default 1 (voxels) + """ + with mincTools() as m: + m.command(['c3d', input,'-split', + '-foreach', + '-dup', '-erode', '1' ,'{}x{}x{}'.format(width,width,width), '-scale', '-1', + '-add', + '-endfor', + '-merge', + '-type', 'short','-o',output], + inputs=[input],outputs=[output], + verbose=True) + +def generate_qc_image(sample_seg, + sample, + sample_qc, + options={}, + model=None, + symmetric=False, + labels=2, + title=None): + """Gnerate QC image for multilabel segmentation + Arguments: + sample_seg -- input segmentation + sample -- input file + sample_qc -- output QC file + + Keyword arguments: + options -- options as dictionary with following keys: + lut_file -- LUT file for minclookup, default None + spectral_mask -- boolean , if spectral mask should be used, default False + dicrete_mask -- boolean , if discrete mask should be used, default False + image_range -- list of two real values + clamp -- boolean, if range clamp should be used + big + contours + contour_width + crop + model -- reference model, default None + symmetric -- boolean, if symmetric QC is needed + width -- width of the border to leave behind, default 1 (voxels) + labels -- integer, number of labels present, default 2 + title -- QC image title + """ + try: + + #TODO: implement advanced features + qc_lut=options.get('lut_file',None) + spectral_mask=options.get('spectral_mask',False) + dicrete_mask=options.get('dicrete_mask',False) + image_range=options.get('image_range',None) + clamp=options.get('clamp',False) + big=options.get('big',False) + contours=options.get('contours',False) + contour_width=options.get('contour_width',1) + crop=options.get('crop',None) + + if qc_lut is not None: + spectral_mask=False + dicrete_mask=True + + with mincTools() as m: + seg=sample_seg.seg + seg_f=sample_seg.seg_f + scan=sample.scan + scan_f=sample.scan_f + + if crop is not None: + # remove voxels from the edge + m.autocrop(scan,m.tmp('scan.mnc'),isoexpand=-crop) + scan=m.tmp('scan.mnc') + m.resample_labels(seg,m.tmp('seg.mnc'),like=scan) + seg=m.tmp('seg.mnc') + + if symmetric: + m.autocrop(scan_f,m.tmp('scan_f.mnc'),isoexpand=-crop) + scan_f=m.tmp('scan_f.mnc') + m.resample_labels(seg_f,m.tmp('seg_f.mnc'),like=scan) + seg_f=m.tmp('seg_f.mnc') + + if contours: + make_contours(seg,m.tmp('seg_contours.mnc'),width=contour_width) + seg=m.tmp('seg_contours.mnc') + if symmetric: + make_contours(seg_f,m.tmp('seg_f_contours.mnc'),labels=labels,width=contour_width) + seg_f=m.tmp('seg_f_contours.mnc') + + if symmetric: + + m.qc( scan, + m.tmp('qc.png'), + mask=seg, + mask_range=[0,labels-1], + big=True, + clamp=clamp, + image_range=image_range, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + mask_lut=qc_lut) + + m.qc( scan_f, + m.tmp('qc_f.png'), + mask=seg_f, + mask_range=[0,labels-1], + image_range=image_range, + big=True, + clamp=clamp, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + title=title, + mask_lut=qc_lut) + + m.command(['montage','-tile','2x1','-geometry','+1+1', + m.tmp('qc.png'),m.tmp('qc.png'),sample_qc], + inputs=[m.tmp('qc.png'),m.tmp('qc.png')], + outputs=[sample_qc]) + else: + m.qc( scan, + sample_qc, + mask=seg, + mask_range=[0,labels-1], + image_range=image_range, + big=True, + mask_lut=qc_lut, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + clamp=clamp, + title=title) + + return [sample_qc] + except mincError as e: + print("Exception in generate_qc_image:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in generate_qc_image:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/registration.py b/ipl/grading/registration.py new file mode 100644 index 0000000..2ca1467 --- /dev/null +++ b/ipl/grading/registration.py @@ -0,0 +1,638 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.registration +import ipl.ants_registration +import ipl.elastix_registration + +def linear_registration( + sample, + model, + output_xfm, + output_sample=None, + output_invert_xfm=None, + init_xfm=None, + symmetric=False, + ants=False, + reg_type='-lsq12', + objective='-xcorr', + linreg=None, + work_dir=None, + close=False, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + downsample=None, + bbox=False + ): + """perform linear registration to the model, and calculate inverse""" + try: + + + + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + + if not m.checkfiles(inputs=[sample.scan], outputs=[output_xfm.xfm]): return + + #if _init_xfm is None: + # _init_xfm=_init_xfm_f=m.tmp('identity.xfm') + # m.param2xfm(m.tmp('identity.xfm')) + + scan=sample.scan + scan_f=sample.scan_f + mask=sample.mask + mask_f=sample.mask_f + + _output_xfm=output_xfm.xfm + _output_xfm_f=output_xfm.xfm_f + + if bbox: + scan=m.tmp('scan.mnc') + m.resample_smooth(sample.scan, scan, like=model.scan, transform=_init_xfm) + if sample.mask is not None: + mask=m.tmp('mask.mnc') + m.resample_labels(sample.mask, mask, like=model.scan, transform=_init_xfm) + + _init_xfm=None + close=True + _output_xfm=m.tmp('output.xfm') + if symmetric: + scan_f=m.tmp('scan_f.mnc') + m.resample_smooth(sample.scan_f, scan_f, like=model.scan, transform=_init_xfm_f) + if sample.mask_f is not None: + mask=m.tmp('mask_f.mnc') + m.resample_labels(sample.mask_f, mask_f, like=model.scan, transform=_init_xfm_f) + _init_xfm_f=None + _output_xfm_f=m.tmp('output_f.xfm') + + if symmetric: + if ants: + ipl.ants_registration.linear_register_ants2( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=linreg, + close=close, + downsample=downsample, + ) + ipl.ants_registration.linear_register_ants2( + scan_f, + model.scan, + _output_xfm_f, + source_mask=mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=linreg, + close=close, + downsample=downsample, + ) + else: + ipl.registration.linear_register( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model.mask, + init_xfm=_init_xfm, + objective=objective, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + + ipl.registration.linear_register( + scan_f, + model.scan, + _output_xfm_f, + source_mask=mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + objective=objective, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + else: + if ants: + ipl.ants_registration.linear_register_ants2( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=linreg, + close=close, + downsample=downsample + ) + else: + ipl.registration.linear_register( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + if bbox : + if init_xfm is not None: + m.xfmconcat([init_xfm.xfm,_output_xfm],output_xfm.xfm) + if symmetric: + m.xfmconcat([init_xfm.xfm_f,_output_xfm_f],output_xfm.xfm_f) + else: + shutil.copyfile(_output_xfm,output_xfm.xfm) + if symmetric: + shutil.copyfile(_output_xfm_f,output_xfm.xfm_f) + + if output_invert_xfm is not None: + m.xfminvert(output_xfm.xfm, output_invert_xfm.xfm) + if symmetric: + m.xfminvert(output_xfm.xfm_f, output_invert_xfm.xfm_f) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output_xfm.xfm, + like=model.scan, + order=resample_order) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output_xfm.xfm, + aa=resample_aa, + order=resample_order, + like=model.scan, + baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output_xfm.xfm_f, + like=model.scan, + order=resample_order) + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output_xfm.xfm_f, + aa=resample_aa, + order=resample_order, + like=model.scan, + baa=resample_baa) + + return True + except mincError as e: + print("Exception in linear_registration:{} {}".format(sample.name,str(e))) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in linear_registration:{} {}".format(sample.name,sys.exc_info()[0])) + traceback.print_exc(file=sys.stderr) + raise + + +def elastix_registration( + sample, + model, + output_xfm, + output_sample=None, + output_invert_xfm=None, + init_xfm=None, + symmetric=False, + work_dir=None, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + downsample=None, + downsample_grid=None, + parameters=None, + bbox=False, + nl=False + ): + """perform elastix registration to the model, and calculate inverse""" + try: + + with mincTools() as m: + + if not m.checkfiles(inputs=[sample.scan], outputs=[output_xfm.xfm]): return + + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + mask=sample.mask + mask_f=sample.mask_f + model_mask=model.mask + + if mask is None: + model_mask=None + + scan=sample.scan + scan_f=sample.scan_f + + _output_xfm=output_xfm.xfm + _output_xfm_f=output_xfm.xfm_f + + if bbox: + scan=m.tmp('scan.mnc') + m.resample_smooth(sample.scan, scan, like=model.scan, transform=_init_xfm) + if sample.mask is not None: + mask=m.tmp('mask.mnc') + m.resample_labels(sample.mask, mask, like=model.scan, transform=_init_xfm) + _init_xfm=None + close=True + _output_xfm=m.tmp('output.xfm') + + if symmetric: + scan_f=m.tmp('scan_f.mnc') + m.resample_smooth(sample.scan_f, scan_f, like=model.scan, transform=_init_xfm_f) + if sample.mask_f is not None: + mask_f=m.tmp('mask_f.mnc') + m.resample_labels(sample.mask_f, mask_f, like=model.scan, transform=_init_xfm_f) + _init_xfm_f=None + _output_xfm_f=m.tmp('output_f.xfm') + + #TODO: update elastix registration to downsample xfm? + if symmetric: + ipl.elastix_registration.register_elastix( + scan, + model.scan, + output_xfm=_output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + downsample=downsample, + downsample_grid=downsample_grid, + parameters=parameters, + nl=nl + ) + ipl.elastix_registration.register_elastix( + scan_f, + model.scan, + output_xfm=_output_xfm_f, + source_mask=mask_f, + target_mask=model_mask, + init_xfm=_init_xfm_f, + downsample=downsample, + downsample_grid=downsample_grid, + parameters=parameters, + nl=nl + ) + else: + ipl.elastix_registration.register_elastix( + scan, + model.scan, + output_xfm=_output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + downsample=downsample, + downsample_grid=downsample_grid, + parameters=parameters, + nl=nl + ) + + if bbox : + if init_xfm is not None: + m.xfmconcat([init_xfm.xfm,_output_xfm],output_xfm.xfm) + if symmetric: + m.xfmconcat([init_xfm.xfm_f,_output_xfm_f],output_xfm.xfm_f) + else: + shutil.copyfile(_output_xfm,output_xfm.xfm) + if symmetric: + shutil.copyfile(_output_xfm_f,output_xfm.xfm_f) + + + if output_invert_xfm is not None: + m.xfminvert(output_xfm.xfm, output_invert_xfm.xfm) + if symmetric: + m.xfminvert(output_xfm.xfm_f, output_invert_xfm.xfm_f) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output_xfm.xfm, + like=model.scan, order=resample_order) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output_xfm.xfm, + aa=resample_aa, order=resample_order, + like=model.scan, baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output_xfm.xfm_f, + like=model.scan, order=resample_order) + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output_xfm.xfm_f, + aa=resample_aa, order=resample_order, + like=model.scan, baa=resample_baa) + + return True + except mincError as e: + print("Exception in elastix_registration:{} {}".format(sample.name,str(e))) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in elastix_registration:{} {}".format(sample.name,sys.exc_info()[0])) + traceback.print_exc(file=sys.stderr) + raise + + + +def non_linear_registration( + sample, + model, + output, + output_sample=None, + output_invert=True, + init_xfm=None, + level=2, + start_level=8, + symmetric=False, + parameters=None, + work_dir=None, + ants=False, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + output_inv_target=None, + flip=False, + downsample=None, + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + + if not m.checkfiles(inputs=[sample.scan], outputs=[output.xfm]): return + + if symmetric: + # TODO: split up into two jobs? + if not os.path.exists( output.xfm ) or \ + not os.path.exists( output.xfm_f ) : + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + m.tmp('forward_f')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward_f')+'.xfm', + #source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward')+'.xfm',model.scan,output.xfm,step=level) + #TODO: regularize here + m.xfm_normalize(m.tmp('forward_f')+'.xfm',model.scan,output.xfm_f,step=level) + + if output_invert: + if ants: + m.xfm_normalize(m.tmp('forward')+'_inverse.xfm', model.scan, output.xfm_inv, step=level ) + m.xfm_normalize(m.tmp('forward_f')+'_inverse.xfm',model.scan, output.xfm_f_inv, step=level ) + else: + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm_inv, step=level, invert=True) + m.xfm_normalize(m.tmp('forward_f')+'.xfm',model.scan, output.xfm_f_inv, step=level, invert=True) + else: + if not os.path.exists( output.xfm ) : + if flip: + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + else: + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm, step=level) + + if output_invert: + if ants: # ANTS produces forward and invrese + m.xfm_normalize(m.tmp('forward')+'_inverse.xfm', model.scan, output.xfm_inv, step=level ) + else: + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm_inv, step=level, invert=True) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + for (i,j) in enumerate(sample.add): + m.resample_smooth(sample.add[i], output_sample.add[i], + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output.xfm_inv, + aa=resample_aa, + order=resample_order, + like=model.scan, + invert_transform=True, + baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output.xfm_f_inv, + like=model.scan, + invert_transform=True, + order=resample_order) + + for (i,j) in enumerate(sample.add_f): + m.resample_smooth(sample.add_f[i], output_sample.add_f[i], + transform=output.xfm_f_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output.xfm_f_inv, + aa=resample_aa, + order=resample_order, + like=model.scan, + invert_transform=True, + baa=resample_baa ) + + if output_inv_target is not None: + m.resample_smooth(model.scan, output_inv_target.scan, + transform=output.xfm, + like=sample.scan, + order=resample_order, + invert_transform=True) + + for (i,j) in enumerate(output_inv_target.add): + m.resample_smooth(model.add[i], output_inv_target.add[i], + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + if warp_seg: + m.resample_labels(model.seg, output_inv_target.seg, + transform=output.xfm, + aa=resample_aa, + order=resample_order, + like=sample.scan, + invert_transform=True, + baa=resample_baa) + + if symmetric: + m.resample_smooth(model.scan, output_inv_target.scan_f, + transform=output.xfm_f, + like=sample.scan, + invert_transform=True, + order=resample_order) + + for (i,j) in enumerate(output_inv_target.add): + m.resample_smooth(model.add_f[i], output_inv_target.add_f[i], + transform=output.xfm_f, + like=sample.scan, + invert_transform=True, + order=resample_order) + + if warp_seg: + m.resample_labels(model.seg, output_inv_target.seg_f, + transform=output.xfm_f, + aa=resample_aa, + order=resample_order, + like=sample.scan, + invert_transform=True, + baa=resample_baa ) + + except mincError as e: + print("Exception in non_linear_registration:{} {}".format(sample.name,repr(e))) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in non_linear_registration:{} {}".format(sample.name,sys.exc_info()[0])) + traceback.print_exc(file=sys.stderr) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/resample.py b/ipl/grading/resample.py new file mode 100644 index 0000000..74db66b --- /dev/null +++ b/ipl/grading/resample.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from .filter import * + + +# scoop parallel execution +from scoop import futures, shared + + +def resample_file(input,output,xfm=None,like=None,order=4,invert_transform=False): + '''resample input file using proveded transformation''' + try: + with mincTools() as m: + m.resample_smooth(input,output,xfm=xfm,like=like,order=order,invert_transform=invert_transform) + except mincError as e: + print("Exception in resample_file:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in resample_file:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def resample_split_segmentations(input, output,xfm=None, like=None, order=4, invert_transform=False, symmetric=False): + '''resample individual segmentations, using parallel execution''' + results=[] + base=input.seg.rsplit('.mnc',1)[0] + for (i,j) in input.seg_split.items(): + if not output.seg_split.has_key(i): + output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) + + results.append(futures.submit( + resample_file,j,output.seg_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform + )) + if symmetric: + base=input.seg_f.rsplit('.mnc',1)[0] + for (i,j) in input.seg_f_split.items(): + if not output.seg_f_split.has_key(i): + output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) + + results.append(futures.submit( + resample_file,j,output.seg_f_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform + )) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + +def warp_rename_seg( sample, model, output, + transform=None, + symmetric=False, + symmetric_flip=False, + lut=None, + flip_lut=None, + resample_order=2, + resample_aa=None, + resample_baa=False, + invert_transform=False, + use_flipped=False, + datatype=None): + try: + with mincTools() as m: + xfm=None + if transform is not None: + xfm=transform.xfm + + if symmetric: + xfm_f=transform.xfm_f + + m.resample_labels(sample.seg, output.seg, + transform=xfm, + aa=resample_aa, + order=resample_order, + remap=lut, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype, + baa=resample_baa) + + if symmetric: + + seg_f=sample.seg + + if use_flipped: + seg_f=sample.seg_f + + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + m.resample_labels(seg_f, output.seg_f, + transform=xfm_f, + aa=resample_aa, + order=resample_order, + remap=flip_lut, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype, + baa=resample_baa) + + output.mask=None + output.mask_f=None + + except mincError as e: + print("Exception in warp_rename_seg:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in warp_rename_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def warp_sample( sample, + model, + output, + transform=None, + symmetric=False, + symmetric_flip=False, + resample_order=None, + use_flipped=False, + invert_transform=False, + filters=None): + # TODO: add filters here + try: + with mincTools() as m: + xfm=None + xfm_f=None + seg_output=output.seg + seg_output_f=output.seg_f + + #if seg_output is None: + #seg_output=model.seg + + #if seg_output_f is None: + #seg_output_f=model.seg + + if transform is not None: + xfm=transform.xfm + if symmetric: + xfm_f=transform.xfm_f + + output_scan=output.scan + + if filters is not None: + output_scan=m.tmp('sample.mnc') + + m.resample_smooth(sample.scan, output_scan, transform=xfm, like=model.scan, order=resample_order, invert_transform=invert_transform) + + if filters is not None: + # TODO: maybe move it to a separate stage? + # HACK: assuming that segmentation was already warped! + apply_filter(output_scan, output.scan, filters, model=model.scan, input_mask=output.mask, input_labels=seg_output, model_labels=model.seg) + + for (i,j) in enumerate( sample.add ): + output_scan = output.add[i] + if filters is not None: + output_scan=m.tmp('sample_{}.mnc').format(i) + + m.resample_smooth(sample.add[i], output_scan, transform=xfm, like=model.scan, order=resample_order,invert_transform=invert_transform) + + if filters is not None: + # TODO: maybe move it to a separate stage? + # TODO: apply segmentations for seg-based filtering + apply_filter(output_scan, output.add[i], filters, model=model.scan, input_mask=output.mask, input_labels=seg_output, model_labels=model.seg) + + if symmetric: + scan_f=sample.scan + if use_flipped: + scan_f=sample.scan + + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + output_scan_f=output.scan_f + if filters is not None: + output_scan_f=m.tmp('sample_f.mnc') + + m.resample_smooth(scan_f, output_scan_f, transform=xfm_f, like=model.scan, order=resample_order,invert_transform=invert_transform) + + if filters is not None: + # TODO: maybe move it to a separate stage? + apply_filter(output_scan_f, output.scan_f, filters, model=model.scan, input_mask=output.mask_f, input_labels=seg_output_f, model_labels=model.seg) + + for (i,j) in enumerate( sample.add_f ): + output_scan_f = output.add_f[i] + if filters is not None: + output_scan_f=m.tmp('sample_f_{}.mnc').format(i) + + m.resample_smooth( sample.add_f[i], output_scan_f, transform=xfm_f, like=model.scan, order=resample_order,invert_transform=invert_transform) + + if filters is not None: + # TODO: maybe move it to a separate stage? + apply_filter( output_scan_f, output.add_f[i], filters, model=model.scan, input_mask=output.mask_f, input_labels=seg_output_f, model_labels=model.seg) + + output.mask=None + output.mask_f=None + + except mincError as e: + print("Exception in warp_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in warp_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def concat_resample(lib_scan, + xfm_lib, + xfm_sample, + output, + model=None, + resample_aa=None, + resample_order=2, + label_resample_order=2, + resample_baa=False, + flip=False ): + '''Cocnatenate inv(xfm2) and inv(xfm1) and resample scan''' + try: + + if not os.path.exists(output.seg) or \ + not os.path.exists(output.scan) : + with mincTools() as m: + _model=None + + if model is not None: + _model=model.scan + + full_xfm=None + + if xfm_lib is not None and xfm_sample is not None: + if flip: + m.xfmconcat([ xfm_sample.xfm_f, xfm_lib.xfm_inv ], m.tmp('Full.xfm') ) + else: + m.xfmconcat([ xfm_sample.xfm, xfm_lib.xfm_inv ], m.tmp('Full.xfm') ) + full_xfm=m.tmp('Full.xfm') + elif xfm_lib is not None: + full_xfm=xfm_lib.xfm_inv + elif xfm_sample is not None: + if flip: + full_xfm=xfm_sample.xfm_f + else: + full_xfm=xfm_sample.xfm + + m.resample_labels(lib_scan.seg, output.seg, + transform=full_xfm, + aa=resample_aa, + order=label_resample_order, + like=_model, + invert_transform=True, + baa=resample_baa ) + + m.resample_smooth(lib_scan.scan, output.scan, + transform=full_xfm, + order=resample_order, + like=_model, + invert_transform=True) + + for (i,j) in enumerate(lib_scan.add): + m.resample_smooth(lib_scan.add[i], output.add[i], + transform=full_xfm, + order=resample_order, + like=_model, + invert_transform=True) + except mincError as e: + print("Exception in concat_resample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in concat_resample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/structures.py b/ipl/grading/structures.py new file mode 100644 index 0000000..7f7dad9 --- /dev/null +++ b/ipl/grading/structures.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# +# data structures used in segmentation package + +import shutil +import os +import sys +import traceback +import json + + +class MriDataset(object): + ''' Scan sample with segmentation and mask''' + def __init__(self, prefix=None, name=None, scan=None, mask=None, seg=None, + scan_f=None, mask_f=None, seg_f=None, protect=False, + add=[], add_n=None, + add_f=[], group=None,grading=None ): + self.prefix=prefix + self.name=name + self.scan=scan + self.mask=mask + self.seg=seg + self.protect=protect + self.seg_split={} + self.group=group + self.grading=grading + + self.scan_f = scan_f + self.mask_f = mask_f + self.seg_f = seg_f + self.seg_f_split={} + self.add = add + self.add_f = add_f + + if self.name is None : + if scan is not None: + self.name=os.path.basename(scan).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + else: + if self.prefix is None: + raise("trying to create dataset without name and prefix") + (_h, _name) = tempfile.mkstemp(suffix='.mnc', dir=prefix) + os.close(_h) + self.name=os.path.relpath(_name,prefix) + os.unlink(_name) + + if scan is None: + if self.prefix is not None: + self.scan=self.prefix+os.sep+self.name+'.mnc' + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + self.seg=self.prefix+os.sep+self.name+'_seg.mnc' + self.scan_f=self.prefix+os.sep+self.name+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'_f_mask.mnc' + self.seg_f=self.prefix+os.sep+self.name+'_f_seg.mnc' + + if add_n is not None: + self.add=[self.prefix+os.sep+self.name+'_{}.mnc'.format(i) for i in range(add_n)] + self.add_f=[self.prefix+os.sep+self.name+'_{}_f.mnc'.format(i) for i in range(add_n)] + else: + self.add=[] + self.add_f=[] + #------ + + def __repr__(self): + return "MriDataset(\n prefix=\"{}\",\n name=\"{}\",\n scan=\"{}\",\n scan_f=\"{}\",\n mask=\"{}\",\n mask_f=\"{}\",\n seg=\"{}\",\n seg_f=\"{}\",\n protect={},\n add={},\n add_f={},\n group={}\n grading={})".\ + format(self.prefix,self.name,self.scan,self.scan_f,self.mask,self.mask_f,self.seg,self.seg_f,repr(self.protect),repr(self.add),repr(self.add_f),self.group,self.grading) + + def cleanup(self): + if not self.protect: + for i in (self.scan, self.mask, self.seg, self.scan_f, self.mask_f, self.seg_f ): + if i is not None and os.path.exists(i): + os.unlink(i) + + for (i,j) in self.seg_split.items(): + if os.path.exists(j): + os.unlink(j) + + for (i,j) in self.seg_f_split.items(): + if os.path.exists(j): + os.unlink(j) + + for (i,j) in enumerate(self.add): + if os.path.exists(j): + os.unlink(j) + # ------------ + + +class MriTransform(object): + '''Transformation''' + def __init__(self, prefix=None, name=None, xfm=None, protect=False, xfm_f=None, xfm_inv=None, xfm_f_inv=None, nl=False ): + self.prefix=prefix + self.name=name + + self.xfm=xfm + self.grid=None + + self.xfm_f=xfm_f + self.grid_f=None + + self.xfm_inv=xfm_inv + self.grid_inv=None + + self.xfm_f_inv=xfm_f_inv + self.grid_f_inv=None + + self.protect=protect + self.nl=nl + + if name is None and xfm is None: + raise "Undefined name and xfm" + + if name is None and xfm is not None: + self.name=os.path.basename(xfm).rsplit('.xfm',1)[0] + + if self.prefix is None: + self.prefix=os.path.dirname(self.xfm) + + if xfm is None: + if self.prefix is not None: + self.xfm= self.prefix+os.sep+self.name+'.xfm' + self.grid= self.prefix+os.sep+self.name+'_grid_0.mnc' + + self.xfm_f= self.prefix+os.sep+self.name+'_f.xfm' + self.grid_f= self.prefix+os.sep+self.name+'_f_grid_0.mnc' + + self.xfm_inv= self.prefix+os.sep+self.name+'_invert.xfm' + self.grid= self.prefix+os.sep+self.name+'_invert_grid_0.mnc' + + self.xfm_f_inv= self.prefix+os.sep+self.name+'_f_invert.xfm' + self.grid_f_inv= self.prefix+os.sep+self.name+'_f_invert_grid_0.mnc' + + def __repr__(self): + return 'MriTransform(prefix="{}",name="{}")'.\ + format(self.prefix, self.name ) + + def cleanup(self): + if not self.protect: + for i in (self.xfm, self.grid, self.xfm_f, self.grid_f, self.xfm_inv, self.grid_inv, self.xfm_f_inv, self.grid_f_inv ): + if i is not None and os.path.exists(i): + os.unlink(i) + +class GMRIEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, MriTransform): + return {'name':obj.name, + 'xfm' :obj.xfm, + 'xfm_f':obj.xfm_f, + 'xfm_inv' :obj.xfm_inv, + 'xfm_f_inv':obj.xfm_f_inv, + 'prefix':obj.prefix + } + elif isinstance(obj, MriDataset): + return {'name':obj.name, + 'scan':obj.scan, + 'mask':obj.mask, + 'scan_f':obj.scan_f, + 'mask_f':obj.mask_f, + 'prefix':obj.prefix, + 'add':obj.add, + 'add_f':obj.add_f, + 'group':obj.group, + 'grading':obj.grading, + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/grading/train.py b/ipl/grading/train.py new file mode 100644 index 0000000..1bdcc29 --- /dev/null +++ b/ipl/grading/train.py @@ -0,0 +1,653 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +from __future__ import print_function + +import shutil +import os +import sys +import csv +import copy +import traceback + +# MINC stuff +# from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .model import * +from .library import * + + +def inv_dict(d): + return { v:k for (k,v) in d.items() } + +def generate_library(parameters, output, debug=False,cleanup=False): + '''Actual generation of the segmentation library''' + try: + if debug: print(repr(parameters)) + + # read parameters + reference_model = parameters[ 'reference_model'] + reference_mask = parameters.get( 'reference_mask', None) + reference_model_add = parameters.get( 'reference_model_add', []) + + reference_local_model = parameters.get( 'reference_local_model', None) + reference_local_mask = parameters.get( 'reference_local_mask', None) + + reference_local_model_flip= parameters.get( 'reference_local_model_flip', None) + reference_local_mask_flip = parameters.get( 'reference_local_mask_flip', None) + + library = parameters[ 'library' ] + + work_dir = parameters.get( 'workdir',output+os.sep+'work') + + train_groups = parameters[ 'groups'] + + # should we build symmetric model + build_symmetric = parameters.get( 'build_symmetric' ,False) + + # should we build symmetric flipped model + build_symmetric_flip = parameters.get( 'build_symmetric_flip' ,False) + + # lookup table for renaming labels for more compact representation + build_remap = parameters.get( 'build_remap' ,{}) + + # lookup table for renaming labels for more compact representation, + # when building symmetrized library + build_flip_remap = parameters.get( 'build_flip_remap' ,{}) + + # lookup table for renaming labels for more compact representation, + # when building symmetrized library + build_unflip_remap = parameters.get( 'build_unflip_remap' ,{}) + + if not build_unflip_remap and build_flip_remap and build_remap: + build_unflip_remap = create_unflip_remap(build_remap,build_flip_remap) + + # label map + label_map = parameters.get( 'label_map' ,None) + + # perform filtering as final stage of the library creation + pre_filters = parameters.get( 'pre_filters' , None ) + post_filters = parameters.get( 'post_filters' , parameters.get( 'filters', None )) + + resample_order = parameters.get( 'resample_order',2) + label_resample_order = parameters.get( 'label_resample_order',resample_order) + + # use boundary anti-aliasing filter when resampling labels + resample_baa = parameters.get( 'resample_baa',True) + + # perform label warping to create final library + do_warp_labels = parameters.get( 'warp_labels',False) + + # extent bounding box to reduce boundary effects + extend_boundary = parameters.get( 'extend_boundary',4) + + # extend maks + #dilate_mask = parameters.get( 'dilate_mask',3) + op_mask = parameters.get( 'op_mask','E[2] D[4]') + + # if linear registration should be performed + # if linear registration should be performed + do_initial_register = parameters.get( 'initial_register', + parameters.get( 'linear_register', {})) + + if do_initial_register is not None and isinstance(do_initial_register,dict): + initial_register = do_initial_register + do_initial_register = True + else: + initial_register={} + + + inital_reg_type = parameters.get( 'initial_register_type', + parameters.get( 'linear_register_type', + initial_register.get('type','-lsq12'))) + + inital_reg_ants = parameters.get( 'initial_register_ants', + parameters.get( 'linear_register_ants', False)) + + inital_reg_options = parameters.get( 'initial_register_options', + initial_register.get('options',None) ) + + inital_reg_downsample = parameters.get( 'initial_register_downsample', + initial_register.get('downsample',None)) + + inital_reg_use_mask = parameters.get( 'initial_register_use_mask', + initial_register.get('use_mask',False)) + + initial_reg_objective = initial_register.get('objective','-xcorr') + + # perform local linear registration + do_initial_local_register = parameters.get( 'initial_local_register', + parameters.get( 'local_linear_register', {}) ) + if do_initial_local_register is not None and isinstance(do_initial_local_register,dict): + initial_local_register=do_initial_local_register + do_initial_local_register=True + else: + initial_local_register={} + + local_reg_type = parameters.get( 'local_register_type', + initial_local_register.get('type','-lsq12')) + + local_reg_ants = parameters.get( 'local_register_ants', False) + + local_reg_opts = parameters.get( 'local_register_options', + initial_local_register.get('options',None)) + + local_reg_bbox = parameters.get( 'local_register_bbox', + initial_local_register.get('bbox',False )) + + local_reg_downsample = parameters.get( 'local_register_downsample', + initial_local_register.get('downsample',None)) + + local_reg_use_mask = parameters.get( 'local_register_use_mask', + initial_local_register.get('use_mask',True)) + + local_reg_objective = initial_local_register.get('objective','-xcorr') + + # if non-linear registraiton should be performed for library creation + do_nonlinear_register = parameters.get( 'non_linear_register',False) + + # if non-linear registraiton should be performed with ANTS + do_nonlinear_register_ants= parameters.get( 'non_linear_register_ants',False) + nlreg_level = parameters.get('non_linear_register_level', 2) + nlreg_start = parameters.get('non_linear_register_start', 16) + nlreg_options = parameters.get('non_linear_register_options', None) + nlreg_downsample = parameters.get('non_linear_register_downsample', None) + + nonlinear_register_type = parameters.get( 'non_linear_register_type',None) + if nonlinear_register_type is None: + if do_nonlinear_register_ants: + nonlinear_register_type='ants' + + + + modalities = parameters.get( 'modalities',1 ) - 1 + + create_patch_norm_lib = parameters.get( 'create_patch_norm_lib',False) + patch_norm_lib_pct = parameters.get( 'patch_norm_lib_pct', 0.1 ) + patch_norm_lib_sub = parameters.get( 'patch_norm_lib_sub', 1 ) + patch_norm_lib_patch = parameters.get( 'patch_norm_lib_patch', 2 ) # 5x5x5 patches + + # prepare directories + if not os.path.exists(output): + os.makedirs(output) + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + # 0. go over input samples, prepare variables + input_samples=[] + filtered_samples=[] + lin_xfm=[] + lin_samples=[] + tmp_lin_samples=[] + bbox_lin_xfm=[] + #nl_xfm=[] + #bbox_samples=[] + + final_samples=[] + warped_samples=[] + final_transforms=[] + tmp_log_samples=[] + + patch_norm_db = output + os.sep + 'patch_norm.db' + patch_norm_idx = output + os.sep + 'patch_norm.idx' + + # identity xfm + identity_xfm=MriTransform(prefix=work_dir, name='identity' ) + with mincTools() as m: + m.param2xfm(identity_xfm.xfm) + m.param2xfm(identity_xfm.xfm_f) + + # check if library is list, if it is not, assume it's a reference to a csv file + if library is not list: + with open(library,'r') as f: + library=list(csv.reader(f)) + + # setup files + model = MriDataset(scan=reference_model, mask=reference_mask, add=reference_model_add) + + for (j,i) in enumerate(library): + scan=i[0] + seg=i[1] + add=i[2:modalities+2] # additional modalties + group=None + grading=None + + mask = work_dir + os.sep + 'fake_mask_' + os.path.basename(scan) + create_fake_mask(seg, mask) + + if len(i)>modalities+2: # assume that the extra columns is group and grading + group= int(i[modalities+2]) + grading=float(i[modalities+3]) + + sample= MriDataset(scan=scan, seg=seg, mask=mask,protect=True, add=add, group=group, grading=grading) + input_samples.append( sample ) + filtered_samples.append( MriDataset( prefix=work_dir, name='flt_'+sample.name, add_n=modalities, group=group, grading=grading ) ) + + lin_xfm.append( MriTransform(prefix=work_dir, name='lin_'+sample.name ) ) + bbox_lin_xfm.append( MriTransform(prefix=work_dir, name='lin_bbox_'+sample.name ) ) + lin_samples.append( MriDataset( prefix=work_dir, name='lin_'+sample.name, add_n=modalities, group=group, grading=grading ) ) + tmp_lin_samples.append( MriDataset( prefix=work_dir, name='tmp_lin_'+ sample.name, add_n=modalities, group=group, grading=grading ) ) + tmp_log_samples.append( MriDataset( prefix=work_dir, name='tmp_log_'+ sample.name, group=group, grading=grading ) ) + final_samples.append( MriDataset( prefix=output, name=sample.name, add_n=modalities, group=group, grading=grading ) ) + warped_samples.append( MriDataset( prefix=output, name='nl_'+sample.name, add_n=modalities, group=group, grading=grading ) ) + final_transforms.append( MriTransform(prefix=output, name='nl_'+sample.name ) ) + + # temp array + results=[] + + if pre_filters is not None: + # apply pre-filtering before other stages + filter_all=[] + + for (j,i) in enumerate(input_samples): + # a HACK? + filtered_samples[j].seg = input_samples[j].seg + filtered_samples[j].group = input_samples[j].group + filtered_samples[j].grading = input_samples[j].grading + filtered_samples[j].mask = input_samples[j].mask + + filter_all.append( futures.submit( + filter_sample, input_samples[j], filtered_samples[j], pre_filters, model=model + )) + + futures.wait(filter_all, return_when=futures.ALL_COMPLETED) + else: + filtered_samples=input_samples + + if build_symmetric: + # need to flip the inputs + flipdir=work_dir+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + flip_all=[] + + labels_datatype='short'# TODO: determine optimal here + #if largest_label>255:labels_datatype='short' + + for (j,i) in enumerate(filtered_samples): + i.scan_f=flipdir+os.sep+os.path.basename(i.scan) + i.add_f=[] + for (k,j) in enumerate(i.add): + i.add_f.append(flipdir+os.sep+os.path.basename(i.add[k])) + + if i.mask is not None: + i.mask_f=flipdir+os.sep+'mask_'+os.path.basename(i.scan) + + flip_all.append( futures.submit( generate_flip_sample, i, labels_datatype=labels_datatype ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + + # 1. run global linear registration if nedded + if do_initial_register: + for (j,i) in enumerate(filtered_samples): + if inital_reg_type=='elx' or inital_reg_type=='elastix' : + results.append( futures.submit( + elastix_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + parameters=inital_reg_options, + ) ) + elif inital_reg_type=='ants' or inital_reg_ants: + results.append( futures.submit( + linear_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + linreg=inital_reg_options, + ants=True + ) ) + else: + results.append( futures.submit( + linear_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + objective=initial_reg_objective + ) ) + # TODO: do we really need to wait for result here? + futures.wait( results, return_when=futures.ALL_COMPLETED ) + # TODO: determine if we need to resample input files here + lin_samples=input_samples + else: + lin_samples=input_samples + + # 2. for each part run linear registration, apply flip and do symmetric too + # 3. perform local linear registrtion and local intensity normalization if needed + # create a local reference model + local_model=None + local_model_ovl=None + local_model_avg=None + local_model_sd=None + + if reference_local_model is None : + local_model =MriDataset( prefix=output, name='local_model', add_n=modalities ) + local_model_ovl=MriDataset( prefix=output, name='local_model_ovl' ) + local_model_avg=MriDataset( prefix=output, name='local_model_avg', add_n=modalities ) + local_model_sd =MriDataset( prefix=output, name='local_model_sd', add_n=modalities ) + + if not os.path.exists( local_model.scan ): + for (j,i) in enumerate( filtered_samples ): + xfm=None + if do_initial_register: + xfm=lin_xfm[j] + + results.append( futures.submit( + warp_rename_seg, i, model, tmp_lin_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + lut=build_remap, + flip_lut=build_flip_remap, + resample_order=0, + resample_baa=False # This is quick and dirty part + ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + create_local_model(tmp_lin_samples, model, local_model, extend_boundary=extend_boundary, op=op_mask) + + if not os.path.exists(local_model.scan_f) and build_symmetric and build_symmetric_flip: + create_local_model_flip(local_model, model, remap=build_unflip_remap, op=op_mask) + else: + local_model=MriDataset(scan=reference_local_model, mask=reference_local_mask) + + local_model.scan_f=reference_local_model_flip + local_model.mask_f=reference_local_mask_flip + + if do_initial_local_register: + for (j,i) in enumerate(lin_samples): + init_xfm=None + if do_initial_register: + init_xfm=lin_xfm[j] + + if local_reg_type=='elx' or local_reg_type=='elastix' : + results.append( futures.submit( + elastix_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + parameters=local_reg_opts, + bbox=local_reg_bbox + ) ) + elif local_reg_type=='ants' or local_reg_ants: + results.append( futures.submit( + linear_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + close=True, + ants=True, + bbox=local_reg_bbox + ) ) + else: + if not do_initial_register: + init_xfm=identity_xfm # to avoid strange initialization errors + + results.append( futures.submit( + linear_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + close=True, + bbox=local_reg_bbox, + objective=local_reg_objective + ) ) + + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED ) + else: + bbox_lin_xfm=lin_xfm + + + # create bbox samples + results=[] + for (j, i) in enumerate(input_samples): + xfm=None + + if i.mask is None: + final_samples[j].mask=None + + if i.mask_f is None: + final_samples[j].mask_f=None + + if do_initial_local_register or do_initial_register: + xfm=bbox_lin_xfm[j] + # + results.append( futures.submit( + warp_rename_seg, i, local_model, final_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + lut=build_remap, + flip_lut=build_flip_remap, + resample_order=label_resample_order, + resample_baa=resample_baa + ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + results=[] + for (j, i) in enumerate(input_samples): + xfm=None + if do_initial_local_register or do_initial_register: + xfm=bbox_lin_xfm[j] + + results.append( futures.submit( + warp_sample, i, local_model, final_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + resample_order=resample_order, + filters=post_filters, + ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + if create_patch_norm_lib: + #for (j, i) in enumerate(final_samples): + # results.append( futures.submit( + # log_transform_sample, i , tmp_log_samples[j] ) ) + # + # futures.wait(results, return_when=futures.ALL_COMPLETED) + + create_patch_norm_db( final_samples, patch_norm_db, + patch_norm_idx, + pct=patch_norm_lib_pct, + sub=patch_norm_lib_sub, + patch=patch_norm_lib_patch) + results=[] + if do_nonlinear_register: + for (j, i) in enumerate(final_samples): + # TODO: decide what to do with mask + i.mask=None + + if nonlinear_register_type=='elx' or nonlinear_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + nl=True, + downsample=nlreg_downsample + ) ) + elif nonlinear_register_type=='ants' or do_nonlinear_register_ants: + results.append( futures.submit( + non_linear_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + ants=True, + downsample=nlreg_downsample + ) ) + else: + results.append( futures.submit( + non_linear_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + ants=False, + downsample=nlreg_downsample + ) ) + + final_samples[j].mask=None + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + + with mincTools() as m: + # a hack, to replace a rough model with a new one + if os.path.exists(local_model.seg): + os.unlink(local_model.seg) + + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + + segs.extend([ i.seg for i in warped_samples ]) + if build_symmetric: segs.extend([ i.seg_f for i in warped_samples ]) + + segs.extend(['--majority', local_model.seg, '--bg', '--overlap', local_model_ovl.scan ] ) + m.command(segs,inputs=[],outputs=[local_model.seg,local_model_ovl.scan]) + + avg=['mincaverage','-float'] + avg.extend([ i.scan for i in warped_samples ]) + if build_symmetric: avg.extend([ i.scan_f for i in warped_samples ]) + avg.extend([local_model_avg.scan, '-sdfile', local_model_sd.scan ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.scan,local_model_sd.scan]) + + for i in range(modalities): + avg=['mincaverage','-float'] + avg.extend([ j.add[i] for j in warped_samples ]) + if build_symmetric: avg.extend([ j.add_f[i] for j in warped_samples ]) + + avg.extend([local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.add[i],local_model_sd.add[i]]) + else: + with mincTools() as m: + # a hack, to replace a rough model with a new one + if os.path.exists(local_model.seg): + os.unlink(local_model.seg) + + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in final_samples ]) + + if build_symmetric: segs.extend([ i.seg_f for i in final_samples ]) + + segs.extend(['--majority', local_model.seg, '--bg','--overlap', local_model_ovl.scan] ) + m.command(segs,inputs=[],outputs=[local_model.seg,local_model_ovl.scan]) + + avg=['mincaverage','-float'] + avg.extend([ i.scan for i in final_samples ]) + if build_symmetric: avg.extend([ i.scan_f for i in final_samples ]) + avg.extend([local_model_avg.scan, '-sdfile', local_model_sd.scan ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.scan,local_model_sd.scan]) + + for i in range(modalities): + avg=['mincaverage','-float'] + avg.extend([ j.add[i] for j in final_samples ]) + if build_symmetric: avg.extend([ j.add_f[i] for j in final_samples ]) + avg.extend([local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.add[i],local_model_sd.add[i]]) + + # number of classes including bg + classes_number=2 + # 6. create training library description + with mincTools() as m: + classes_number=int(m.execute_w_output(['mincstats', '-q', '-max',local_model.seg ]).rstrip("\n"))+1 + + library_description={} + # library models + library_description['model'] = model.scan + library_description['model_mask'] = model.mask + library_description['model_add'] = model.add + + library_description['local_model'] = local_model.scan + library_description['local_model_add']= local_model.add + library_description['local_model_mask']=local_model.mask + library_description['local_model_seg']= local_model.seg + + # library parameters + library_description['map']=inv_dict(dict(build_remap)) + library_description['classes_number']= classes_number + library_description['nl_samples_avail']=do_nonlinear_register + library_description['modalities']=modalities+1 + library_description['groups']=train_groups + library_description['label_map'] = label_map + + if build_symmetric and build_symmetric_flip: + library_description['local_model_flip'] =local_model.scan_f + library_description['local_model_add_flip'] =local_model.add_f + library_description['local_model_mask_flip']=local_model.mask_f + library_description['local_model_seg_flip'] =local_model.seg_f + library_description['flip_map']=inv_dict(dict(build_flip_remap)) + else: + library_description['local_model_flip']=None + library_description['local_model_add_flip']=[] + library_description['local_model_mask_flip']=None + library_description['flip_map']={} + + library_description['library']=[] + + for (j, i) in enumerate(final_samples): + ss=[i.group,i.grading] + ss.extend([i.scan, i.seg ]) + ss.extend(i.add) + + if do_nonlinear_register: + ss.extend( [ final_transforms[j].xfm, final_transforms[j].xfm_inv, warped_samples[j].scan, warped_samples[j].seg ]) + + library_description['library'].append(ss) + + if build_symmetric: + ss=[i.group,i.grading] + ss.extend([i.scan_f, i.seg_f ]) + ss.extend(i.add_f) + + if do_nonlinear_register: + ss.extend( [ final_transforms[j].xfm_f, final_transforms[j].xfm_f_inv, warped_samples[j].scan_f, warped_samples[j].seg_f ]) + + library_description['library'].append(ss) + + save_library_info( library_description, output) + # cleanup + if cleanup: + shutil.rmtree(work_dir) + + except mincError as e: + print("Exception in generate_library:{}".format(str(e)),file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in generate_library:{}".format(sys.exc_info()[0]),file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/__init__.py b/ipl/lp/__init__.py new file mode 100644 index 0000000..ef7a225 --- /dev/null +++ b/ipl/lp/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/aqc.py b/ipl/lp/aqc.py new file mode 100644 index 0000000..7f5fe83 --- /dev/null +++ b/ipl/lp/aqc.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline resampling + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +#from ipl.minc_qc import qc,qc_field_contour + + + +def make_aqc_nu(t1w_field,aqc_nu,options={}): + pass + +def make_aqc_stx(t1w_tal,model_outline,aqc_tal,options={}): + with mincTools() as m: + m.aqc(t1w_tal.scan, aqc_tal.fname, + slices=options.get("slices",3)) + +def make_aqc_add(t1w_tal,add_tal,aqc,options={}): + pass + +def make_aqc_mask(t1w_tal,aqc_mask,options={}): + pass + +def make_aqc_cls(t1w_tal,tal_cls,aqc_cls,options={}): + pass + +def make_aqc_lobes( t1w_tal, tal_lob,aqc_lob,options={}): + pass \ No newline at end of file diff --git a/ipl/lp/iter_pipeline.py b/ipl/lp/iter_pipeline.py new file mode 100644 index 0000000..c34d164 --- /dev/null +++ b/ipl/lp/iter_pipeline.py @@ -0,0 +1,368 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# local stuff +from .structures import * +from .preprocess import * +from .utils import * +from .registration import * +from .resample import * +from .segment import * +from .qc import * + +def iter_step( t1w_scan, + iteration, + output_dir, + prev_iter={}, + options =None, + t2w_scan=None, + pdw_scan=None, + work_dir=None, + subject_id=None, + timepoint_id=None, + corr_t1w=None, + corr_t2w=None, + corr_pdw=None ): + """ + drop-in replacement for the standard pipeline + + Argumets: t1w_scan -- `MriScan` for T1w scan + iteration -- iteration number + output_dir -- string pointing to output directory + + Kyword arguments: + prev_iter -- information from previous iteration + options -- pipeline optins (dict) + t2w_scan -- T2w scan + pdw_scan -- PDw scan + work_dir -- string pointing to work directory , default None - use output_dir + subject_id -- ID of subject + timepoint_id -- ID of timepoint + """ + try: + print("running iter_step options={}".format(repr(options))) + + if options is None: + # TODO: load defaults from a settings file? + if iteration >0 : + options= { + 'model': 'mni_icbm152_t1_tal_nlin_sym_09c', + 'model_dir': '/opt/minc/share/icbm152_model_09c', + 't1w_nuc': {}, + 't2w_nuc': {}, + 'pdw_nuc': {}, + 't1w_stx': { + 'type':'ants', + 'resample':False, + #'options': { + #'levels': 3, + #'conf': {'3':1000,'2':1000,'1':1000}, + #'blur': {'3':8, '2':4, '1': 2 }, + #'shrink':{'3':8, '2':4, '1': 2 }, + #'convergence':'1.e-8,20', + #'cost_function':'MI', + #'cost_function_par':'1,32,random,0.3', + #'transformation':'similarity[ 0.3 ]', + #} + }, + 'stx': { + 'noscale':False, + }, + 'beast': { 'beastlib': '/opt/minc/share/beast-library-1.1' }, + 'tissue_classify': {}, + 'lobe_segment': {}, + 'nl': True, + 'lobes': True, + 'cls' : True, + 'qc': True, + 'denoise': {}, + + } + else: + options= { + 'model': 'mni_icbm152_t1_tal_nlin_sym_09c', + 'model_dir': '/opt/minc/share/icbm152_model_09c', + 't1w_nuc': {}, + 't2w_nuc': {}, + 'pdw_nuc': {}, + 't1w_stx': { + 'type':'ants', + 'resample':False, + #'options': { + #'levels': 2, + #'conf': {'2':1000,'1':1000}, + #'blur': {'2':4, '1': 2 }, + #'shrink':{'2':4, '1': 2 }, + #'convergence':'1.e-8,20', + #'cost_function':'MI', + #'cost_function_par':'1,32,random,0.3', + #'transformation':'similarity[ 0.3 ]', + #} + }, + 'stx': { + 'noscale':False, + }, + 'beast': { 'beastlib': '/opt/minc/share/beast-library-1.1' }, + 'tissue_classify': {}, + 'lobe_segment': {}, + 'nl': True, + 'lobes': True, + 'cls' : True, + 'qc': True, + 'denoise': {}, + + } + + dataset_id=subject_id + + if dataset_id is None: + dataset_id=t1w_scan.name + + if timepoint_id is not None: + dataset_id=dataset_id+'_'+timepoint_id + + # generate model reference + model_dir =options['model_dir'] + model_name=options['model'] + + model_t1w=MriScan(scan=model_dir+os.sep+options['model']+'.mnc', + mask=model_dir+os.sep+options['model']+'_mask.mnc') + + model_outline=MriScan(scan=model_dir+os.sep+options['model']+'_outline.mnc', + mask=None) + + lobe_atlas_dir=options.get('lobe_atlas_dir',None) + lobe_atlas_defs=options.get('lobe_atlas_defs',None) + + if lobe_atlas_dir is None: + lobe_atlas_dir=model_dir + os.sep + model_name + '_atlas'+os.sep + + if lobe_atlas_defs is None: + lobe_atlas_defs=model_dir + os.sep + model_name + '_atlas'+os.sep+'lobe_defs.csv' + if not os.path.exists(lobe_atlas_defs): + lobe_atlas_defs=None + + if work_dir is None: + work_dir=output_dir+os.sep+'work_'+dataset_id + + run_qc=options.get('qc',True) + run_nl=options.get('nl',True) + run_cls=options.get('cls',True) + run_lobes=options.get('lobes',True) + denoise_parameters=options.get('denoise',None) + create_unscaled=options.get('stx',{}).get('noscale',False) + + clp_dir=work_dir+os.sep+'clp' + tal_dir=work_dir+os.sep+'tal' + nl_dir =work_dir+os.sep+'nl' + cls_dir=work_dir+os.sep+'tal_cls' + qc_dir =work_dir+os.sep+'qc' + lob_dif=work_dir+os.sep+'lob' + vol_dir=work_dir+os.sep+'vol' + + # create all + create_dirs([clp_dir,tal_dir,nl_dir,cls_dir,qc_dir,lob_dif,vol_dir]) + + # files produced by pipeline + # native space + t1w_den=MriScan(prefix=clp_dir, name='den_'+dataset_id, modality='t1w', mask=None, iter=iteration) + t1w_field=MriScan(prefix=clp_dir,name='fld_'+dataset_id, modality='t1w', mask=None, iter=iteration) + t1w_nuc=MriScan(prefix=clp_dir, name='n4_' +dataset_id, modality='t1w', mask=None, iter=iteration) + t1w_clp=MriScan(prefix=clp_dir, name='clamp_'+dataset_id, modality='t1w', mask=None, iter=iteration) + # warp cls and mask back into native space + native_t1w_cls=MriScan(prefix=clp_dir, name='cls_'+dataset_id, modality='t1w', iter=iteration) + # stereotaxic space + t1w_tal_xfm=MriTransform(prefix=tal_dir,name='tal_xfm_'+dataset_id, iter=iteration) + t1w_tal_noscale_xfm=MriTransform(prefix=tal_dir,name='tal_noscale_xfm_'+dataset_id, iter=iteration) + unscale_xfm=MriTransform(prefix=tal_dir,name='unscale_xfm_'+dataset_id, iter=iteration) + + t1w_tal=MriScan(prefix=tal_dir, name='tal_'+dataset_id, modality='t1w', iter=iteration) + prev_t1w_xfm=None + t1w_tal_noscale=MriScan(prefix=tal_dir, name='tal_noscale_'+dataset_id,modality='t1w', iter=iteration) + + # tissue classification results + tal_cls=MriScan(prefix=cls_dir, name='cls_'+dataset_id, iter=iteration) + # lobe segmentation results + tal_lob=MriScan(prefix=lob_dif, name='lob_'+dataset_id, iter=iteration) + + # nl space + nl_xfm= MriTransform(prefix=nl_dir, name='nl_'+dataset_id, iter=iteration) + + # QC files + qc_tal= MriQCImage(prefix=qc_dir,name='tal_t1w_'+dataset_id, iter=iteration) + qc_mask=MriQCImage(prefix=qc_dir,name='tal_mask_'+dataset_id,iter=iteration) + qc_cls= MriQCImage(prefix=qc_dir,name='tal_cls_'+dataset_id, iter=iteration) + qc_lob= MriQCImage(prefix=qc_dir,name='tal_lob_'+dataset_id, iter=iteration) + qc_nu= MriQCImage(prefix=qc_dir,name='nu_'+dataset_id, iter=iteration) + + # AUX files + lob_volumes=MriAux(prefix=vol_dir,name='vol_'+dataset_id, iter=iteration) + lob_volumes_json=MriAux(prefix=vol_dir,name='vol_'+dataset_id,suffix='.json', iter=iteration) + summary_file=MriAux(prefix=work_dir,name='summary_'+dataset_id,suffix='.json', iter=iteration) + + print("Iteration step dataset:{} iteration:{}".format(dataset_id,iteration)) + + # actual processing steps + # 1. preprocessing + if prev_iter is not None: + t1w_scan.mask=prev_iter['native_t1w_cls'].mask + t1w_den.mask =prev_iter['native_t1w_cls'].mask + t1w_nuc.mask =prev_iter['native_t1w_cls'].mask + t1w_clp.mask =prev_iter['native_t1w_cls'].mask + prev_t1w_xfm =prev_iter['t1w_tal_xfm'] + print("Previous iteration:") + print(repr(prev_iter)) + + iter_summary={ + 'iter': iteration, + 'input_t1w': t1w_scan, + 'output_dir': output_dir, + 'dataset_id': dataset_id, + "t1w_field": t1w_field, + "t1w_nuc": t1w_nuc, + "t1w_clp": t1w_clp, + "t1w_tal_xfm": t1w_tal_xfm, + "t1w_tal_noscale_xfm":t1w_tal_noscale_xfm, + "t1w_tal": t1w_tal, + "t1w_tal_noscale":t1w_tal_noscale, + + "corr_t1w": corr_t1w, + "corr_t2w": corr_t2w, + "corr_pdw": corr_pdw, + } + + + if denoise_parameters is not None: + # reuse old denoising + if prev_iter is not None : + t1w_den=prev_iter.get('t1w_den',None) + t1w_den.mask=prev_iter['native_t1w_cls'].mask + else: + denoise(t1w_scan, t1w_den, parameters=denoise_parameters) + + iter_summary["t1w_den"]=t1w_den + + # non-uniformity correction + estimate_nu(t1w_den, t1w_field, + parameters=options.get('t1w_nuc',{})) + if run_qc: + draw_qc_nu(t1w_field,qc_nu) + iter_summary["qc_nu"]=qc_nu + + # apply field + apply_nu(t1w_den, t1w_field, t1w_nuc, + parameters=options.get('t1w_nuc',{})) + else: + # non-uniformity correction + estimate_nu(t1w_scan, t1w_field, + parameters=options.get('t1w_nuc',{})) + + if run_qc: + draw_qc_nu(t1w_field,qc_nu) + iter_summary["qc_nu"]=qc_nu + + # apply field + apply_nu(t1w_scan, t1w_field, t1w_nuc, + parameters=options.get('t1w_nuc',{})) + + # normalize intensity + normalize_intensity(t1w_nuc, t1w_clp, + parameters=options.get('t1w_clp',{}), + model=model_t1w) + # TODO coregister other modalities here? + + # register to STX space + lin_registration(t1w_clp, model_t1w, t1w_tal_xfm, + parameters=options.get('t1w_stx',{}), + init_xfm=prev_t1w_xfm, + corr_xfm=corr_t1w) + + warp_scan(t1w_clp,model_t1w,t1w_tal,transform=t1w_tal_xfm,corr_xfm=corr_t1w) + + if run_qc: + draw_qc_stx(t1w_tal,model_outline,qc_tal) + iter_summary["qc_tal"]=qc_tal + + # run beast to create brain mask + extract_brain_beast(t1w_tal,parameters=options.get('beast'),model=model_t1w) + if run_qc: + draw_qc_mask(t1w_tal,qc_mask) + iter_summary["qc_mask"]=qc_mask + + # create unscaled version + if create_unscaled: + xfm_remove_scale(t1w_tal_xfm, t1w_tal_noscale_xfm, unscale=unscale_xfm) + iter_summary["t1w_tal_noscale_xfm"]=t1w_tal_noscale_xfm + #warp scan to create unscaled version + warp_scan(t1w_clp,model_t1w,t1w_tal_noscale,transform=t1w_tal_noscale_xfm,corr_xfm=corr_t1w) + # warping mask from tal space to unscaled tal space + warp_mask(t1w_tal, model_t1w, t1w_tal_noscale, transform=unscale_xfm) + iter_summary["t1w_tal_noscale"]=t1w_tal_noscale + + # perform non-linear registration + if run_nl: + nl_registration(t1w_tal, model_t1w, nl_xfm, + parameters=options.get('nl_reg',{})) + iter_summary["nl_xfm"]=nl_xfm + + # run tissue classification + if run_nl and run_cls: + classify_tissue(t1w_tal, tal_cls, model_name=model_name, + model_dir=model_dir, xfm=nl_xfm, + parameters=options.get('tissue_classify',{})) + iter_summary["tal_cls"]=tal_cls + if run_qc: + draw_qc_cls(t1w_tal,tal_cls,qc_cls) + iter_summary["qc_cls"]=qc_cls + + warp_cls_back(t1w_tal, tal_cls, t1w_tal_xfm, t1w_nuc, native_t1w_cls,corr_xfm=corr_t1w) + iter_summary["native_t1w_cls"]=native_t1w_cls + + # run lobe segmentation + if run_nl and run_cls and run_lobes: + segment_lobes( tal_cls, nl_xfm, tal_lob, + model=model_t1w, + lobe_atlas_dir=lobe_atlas_dir, + parameters=options.get('lobe_segment',{})) + + iter_summary["tal_lob"]=tal_lob + if run_qc: + draw_qc_lobes( t1w_tal, tal_lob,qc_lob) + iter_summary["qc_lob"]=qc_lob + + # calculate volumes + extract_volumes(tal_lob, tal_cls, t1w_tal_xfm, lob_volumes, + subject_id=subject_id, timepoint_id=timepoint_id , lobedefs=lobe_atlas_defs) + + extract_volumes(tal_lob, tal_cls, t1w_tal_xfm, lob_volumes_json, + produce_json=True,subject_id=subject_id, timepoint_id=timepoint_id,lobedefs=lobe_atlas_defs) + + iter_summary["lob_volumes"]= lob_volumes + iter_summary["lob_volumes_json"]=lob_volumes_json + + save_summary(iter_summary,summary_file.fname) + return iter_summary + + except mincError as e: + print("Exception in iter_step:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in iter_step:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/pipeline.py b/ipl/lp/pipeline.py new file mode 100644 index 0000000..bb36d81 --- /dev/null +++ b/ipl/lp/pipeline.py @@ -0,0 +1,638 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError,temp_files + +# local stuff +from .structures import * +from .preprocess import * +from .utils import * +from .registration import * +from .resample import * +from .segment import * +from .qc import * +from .aqc import * + + +def standard_pipeline(info, + output_dir, + options =None, + work_dir=None): + """ + drop-in replacement for the standard pipeline + + Argumets: t1w_scan `MriScan` for T1w scan + output_dir string pointing to output directory + + Kyword arguments: + work_dir string pointing to work directory , default None - use output_dir + """ + try: + with temp_files() as tmp: + if options is None: + # TODO: load defaults from a settings file? + options= { + 'model': 'mni_icbm152_t1_tal_nlin_sym_09c', + 'model_dir': '/opt/minc/share/icbm152_model_09c', + + 't1w_nuc': {}, + 'add_nuc': {}, + + 't1w_clp': {}, + 'add_clp': {}, + + 't1w_stx': { + 'type':'ants', + 'resample':False, + #'options': { + #'levels': 2, + #'conf': {'2':1000,'1':1000}, + #'blur': {'2':4, '1': 2 }, + #'shrink':{'2':4, '1': 2 }, + #'convergence':'1.e-8,20', + #'cost_function':'MI', + #'cost_function_par':'1,32,random,0.3', + #'transformation':'similarity[ 0.3 ]', + #} + }, + + 'stx': { + 'noscale':False, + 'nuc': None, + }, + + 'beast': { 'beastlib': '/opt/minc/share/beast-library-1.1' }, + 'brain_nl_seg': None, + 'tissue_classify': {}, + 'lobe_segment': {}, + + 'nl': True, + 'lobes': True, + 'cls' : True, + + 'qc': { + 'nu': False, + 't1w_stx': True, + 'add_stx': True, + 'cls': True, + 'lob': True + }, + + 'aqc': { + 'nu': False, + 't1w_stx': False, + 'add_stx': False, + 'cls': False, + 'lob': False, + 'slices': 3 + }, + + 'denoise': {}, + } + + # setup parameters + subject_id = info['subject'] + timepoint_id = info.get('visit', None) + t1w_scan = info['t1w'] + add_scans = info.get('add', None) + init_t1w_lin_xfm = info.get('init_t1w_lin_xfm', None) + + + corr_t1w = info.get('corr_t1w', None) + corr_add = info.get('corr_add', None) + + dataset_id=subject_id + + if dataset_id is None: + dataset_id=t1w_scan.name + + if timepoint_id is not None: + dataset_id=dataset_id+'_'+timepoint_id + + model_name=None + model_dir=None + + #print(json.dumps(options,indent=2)) + + # generate model reference + if info.get('model_dir',None) is not None: + model_dir =info['model_dir'] + model_name=info['model'] + else: + model_dir =options['model_dir'] + model_name=options['model'] + + model_t1w=MriScan(scan=model_dir+os.sep+options['model']+'.mnc', + mask=model_dir+os.sep+options['model']+'_mask.mnc') + + model_outline=MriScan(scan=model_dir+os.sep+options['model']+'_outline.mnc', + mask=None) + + lobe_atlas_dir =options.get('lobe_atlas_dir',None) + lobe_atlas_defs=options.get('lobe_atlas_defs',None) + + if lobe_atlas_dir is None: + lobe_atlas_dir=model_dir + os.sep + model_name + '_atlas'+os.sep + + if lobe_atlas_defs is None: + lobe_atlas_defs=model_dir + os.sep + model_name + '_atlas'+os.sep+'lobe_defs.csv' + if not os.path.exists(lobe_atlas_defs): + lobe_atlas_defs=None + + if work_dir is None: + work_dir=output_dir+os.sep+'work_'+dataset_id + + run_qc = options.get('qc',{}) + run_aqc = options.get('aqc',{}) + run_nl = options.get('nl',True) + run_cls = options.get('cls',True) + run_lobes = options.get('lobes',True) + + if isinstance(run_qc, bool): # fix for old version of options + run_qc={} + if isinstance(run_aqc, bool): # fix for old version of options + run_aqc={} + + denoise_parameters = options.get('denoise',None) + nuc_parameters = options.get('t1w_nuc',{}) + clp_parameters = options.get('t1w_clp',{}) + stx_parameters = options.get('t1w_stx',{}) + + create_unscaled = stx_parameters.get('noscale',False) + stx_nuc = stx_parameters.get('nuc',None) + stx_disable = stx_parameters.get('disable',False) + + clp_dir = work_dir+os.sep+'clp' + tal_dir = work_dir+os.sep+'tal' + nl_dir = work_dir+os.sep+'nl' + cls_dir = work_dir+os.sep+'tal_cls' + qc_dir = work_dir+os.sep+'qc' + aqc_dir = work_dir+os.sep+'aqc' + lob_dif = work_dir+os.sep+'lob' + vol_dir = work_dir+os.sep+'vol' + + # create all + create_dirs([clp_dir,tal_dir,nl_dir,cls_dir,qc_dir,aqc_dir,lob_dif,vol_dir]) + + # files produced by pipeline + # native space + t1w_den=MriScan(prefix=clp_dir, name='den_'+dataset_id, modality='t1w', mask=None) + t1w_field=MriScan(prefix=clp_dir,name='fld_'+dataset_id, modality='t1w', mask=None) + t1w_nuc=MriScan(prefix=clp_dir, name='n4_'+dataset_id, modality='t1w', mask=None) + t1w_clp=MriScan(prefix=clp_dir, name='clamp_'+dataset_id, modality='t1w', mask=None) + + # stereotaxic space + t1w_tal_xfm=MriTransform(prefix=tal_dir,name='tal_xfm_'+dataset_id) + t1w_tal_noscale_xfm=MriTransform(prefix=tal_dir,name='tal_noscale_xfm_'+dataset_id) + unscale_xfm=MriTransform(prefix=tal_dir,name='unscale_xfm_'+dataset_id) + + t1w_tal=MriScan(prefix=tal_dir, name='tal_'+dataset_id, modality='t1w') + t1w_tal_fld=MriScan(prefix=tal_dir, name='tal_fld_'+dataset_id, modality='t1w') + + t1w_tal_noscale=MriScan(prefix=tal_dir, name='tal_noscale_'+dataset_id,modality='t1w') + + t1w_tal_par=MriAux(prefix=tal_dir,name='tal_par_t1w_'+dataset_id) + t1w_tal_log=MriAux(prefix=tal_dir,name='tal_log_t1w_'+dataset_id) + + # tissue classification results + tal_cls=MriScan(prefix=cls_dir, name='cls_'+dataset_id) + native_t1w_cls=MriScan(prefix=clp_dir, name='cls_'+dataset_id, modality='t1w') + # lobe segmentation results + tal_lob=MriScan(prefix=lob_dif, name='lob_'+dataset_id) + + # nl space + nl_xfm=MriTransform(prefix=nl_dir, name='nl_'+dataset_id) + + # QC files + qc_tal= MriQCImage(prefix=qc_dir,name='tal_t1w_'+dataset_id) + qc_mask=MriQCImage(prefix=qc_dir,name='tal_mask_'+dataset_id) + qc_cls= MriQCImage(prefix=qc_dir,name='tal_cls_'+dataset_id) + qc_lob= MriQCImage(prefix=qc_dir,name='tal_lob_'+dataset_id) + qc_nu= MriQCImage(prefix=qc_dir,name='nu_'+dataset_id) + + # QC files + aqc_tal= MriQCImage(prefix=aqc_dir,name='tal_t1w_'+dataset_id,suffix='') + aqc_mask=MriQCImage(prefix=aqc_dir,name='tal_mask_'+dataset_id,suffix='') + aqc_cls= MriQCImage(prefix=aqc_dir,name='tal_cls_'+dataset_id,suffix='') + aqc_lob= MriQCImage(prefix=aqc_dir,name='tal_lob_'+dataset_id,suffix='') + aqc_nu= MriQCImage(prefix=aqc_dir,name='nu_'+dataset_id,suffix='') + + # AUX files + lob_volumes=MriAux(prefix=vol_dir,name='vol_'+dataset_id) + lob_volumes_json=MriAux(prefix=vol_dir,name='vol_'+dataset_id,suffix='.json') + summary_file=MriAux(prefix=work_dir,name='summary_'+dataset_id,suffix='.json') + + + + iter_summary={ + 'subject': subject_id, + 'timepoint': timepoint_id, + 'dataset_id': dataset_id, + + 'input_t1w': t1w_scan, + 'input_add': add_scans, + + 'output_dir': output_dir, + + "t1w_field": t1w_field, + "t1w_nuc": t1w_nuc, + "t1w_clp": t1w_clp, + + "t1w_tal_xfm": t1w_tal_xfm, + "t1w_tal": t1w_tal, + "t1w_tal_noscale":t1w_tal_noscale, + + "corr_t1w": corr_t1w, + "corr_add": corr_add + } + + # actual processing steps + # 1. preprocessing + if denoise_parameters is not None: + denoise(t1w_scan, t1w_den, parameters=denoise_parameters) + t1w_den.mask=t1w_scan.mask + else: + t1w_den=t1w_scan + + iter_summary["t1w_den"]=t1w_den + + if nuc_parameters is not None: + # non-uniformity correction + print("Running N4") + + estimate_nu(t1w_den, t1w_field, + parameters=nuc_parameters, + model=model_t1w) + if run_qc is not None and run_qc.get('nu',False): + draw_qc_nu(t1w_field,qc_nu,options=run_qc) + iter_summary["qc_nu"]=qc_nu + if run_aqc is not None and run_aqc.get('nu',False): + make_aqc_nu(t1w_field,aqc_nu,options=run_aqc) + iter_summary["aqc_nu"]=aqc_nu + + # apply field + apply_nu(t1w_den, t1w_field, t1w_nuc, + parameters=nuc_parameters) + t1w_nuc.mask=t1w_den.mask + else: + t1w_nuc=t1w_den + t1w_field=None + + iter_summary["t1w_field"] = t1w_field + iter_summary["t1w_nuc"] = t1w_nuc + + ################ + # normalize intensity + + if clp_parameters is not None: + normalize_intensity(t1w_nuc, t1w_clp, + parameters=options.get('t1w_clp',{}), + model=model_t1w) + t1w_clp.mask=t1w_nuc.mask + else: + t1w_clp=t1w_nuc + + iter_summary["t1w_clp"] = t1w_clp + + #### + if add_scans is not None: + iter_summary["add_den"] = [] + iter_summary["add_field"] = [] + iter_summary["add_nuc"] = [] + iter_summary["add_clp"] = [] + iter_summary["add_xfm"] = [] + + prev_co_xfm=None + + for i,c in enumerate(add_scans): + # get add options + #TODO do it per modality + add_options = options.get('add',options) + + add_denoise_parameters = add_options.get('denoise',denoise_parameters) + add_nuc_parameters = add_options.get('nuc' ,nuc_parameters) + add_clp_parameters = add_options.get('clp' ,clp_parameters) + add_stx_parameters = add_options.get('stx' ,stx_parameters) + add_model_dir = add_options.get('model_dir',model_dir) + add_model_name = add_options.get('model' ,model_name) + + add_denoise_parameters = add_options.get('{}_denoise'.format(c.modality),add_denoise_parameters) + add_nuc_parameters = add_options.get('{}_nuc' .format(c.modality),add_nuc_parameters) + add_clp_parameters = add_options.get('{}_clp' .format(c.modality),add_clp_parameters) + add_stx_parameters = add_options.get('{}_stx' .format(c.modality),add_stx_parameters) + add_model_dir = add_options.get('{}_model_dir'.format(c.modality),add_model_dir) + add_model_name = add_options.get('{}_model' .format(c.modality),add_model_name) + + add_model = MriScan(scan=add_model_dir+os.sep+add_model_name+'.mnc', + mask=model_t1w.mask) + + den = MriScan(prefix=clp_dir, name='den_' +dataset_id, modality=c.modality, mask=None) + field = MriScan(prefix=clp_dir, name='fld_' +dataset_id, modality=c.modality, mask=None) + nuc = MriScan(prefix=clp_dir, name='n4_' +dataset_id, modality=c.modality, mask=None) + clp = MriScan(prefix=clp_dir, name='clamp_'+dataset_id, modality=c.modality, mask=None) + + add_qc_nu = MriQCImage(prefix=qc_dir, name='nu_' + c.modality+'_' + dataset_id) + add_aqc_nu= MriQCImage(prefix=aqc_dir, name='nu_' + c.modality+'_' + dataset_id) + co_xfm= MriTransform(prefix=clp_dir, name='xfm_'+ c.modality+'_' + dataset_id) + + co_par=MriAux(prefix=clp_dir, name='xfm_par_'+ c.modality+'_'+dataset_id) + co_log=MriAux(prefix=clp_dir, name='xfm_log_'+ c.modality+'_'+dataset_id) + + corr_xfm=None + if corr_add is not None: + corr_xfm=corr_add[i] + + # denoising + if add_denoise_parameters is not None: + denoise(c, den, parameters=add_denoise_parameters) + iter_summary["add_den"].append(den) + den.mask=c.mask # maybe transfer mask from t1w ? + else: + den=c + + # non-uniformity correction + if add_nuc_parameters is not None: + estimate_nu(den, field, parameters=add_nuc_parameters,model=add_model) + if run_qc is not None and run_qc.get('nu',False): + draw_qc_nu(field,add_qc_nu,options=run_qc) + iter_summary["qc_nu_"+c.modality]=add_qc_nu + if run_aqc is not None and run_aqc.get('nu',False): + make_aqc_nu(field,add_aqc_nu,options=run_aqc) + iter_summary["aqc_nu_"+c.modality]=add_aqc_nu + # apply field + apply_nu(den, field, nuc, parameters=add_nuc_parameters) + nuc.mask=den.mask + else: + nuc=den + + # + iter_summary["add_field"].append(field) + iter_summary["add_nuc"].append(nuc) + + if clp_parameters is not None: + normalize_intensity(nuc, clp, + parameters=clp_parameters, + model=add_model) + clp.mask=nuc.mask + else: + clp=nuc + + iter_summary["add_clp"].append(clp) + + # co-registering to T1w + if add_stx_parameters.get('independent',False) or (prev_co_xfm is None): + # run co-registration unless another one can be used + intermodality_co_registration(clp, t1w_clp, co_xfm, + parameters=add_stx_parameters, + corr_xfm=corr_xfm, + corr_ref=corr_t1w, + par=co_par, + log=co_log) + prev_co_xfm=co_xfm + else: + co_xfm=prev_co_xfm + + iter_summary["add_xfm"].append(co_xfm) + + if not stx_disable: + # register to STX space + lin_registration(t1w_clp, model_t1w, t1w_tal_xfm, + parameters=stx_parameters, + corr_xfm=corr_t1w, + par=t1w_tal_par, + log=t1w_tal_log, + init_xfm=init_t1w_lin_xfm) + + if stx_nuc is not None: + tmp_t1w=MriScan(prefix=tmp.tempdir, name='tal_'+dataset_id, modality='t1w') + tmp_t1w_n4=MriScan(prefix=tmp.tempdir, name='tal_n4_'+dataset_id, modality='t1w') + + warp_scan(t1w_clp ,model_t1w, tmp_t1w, + transform=t1w_tal_xfm, + corr_xfm=corr_t1w, + parameters=stx_parameters) + tmp_t1w.mask=None + tmp_t1w_n4.mask=None + + estimate_nu(tmp_t1w, t1w_tal_fld, + parameters=stx_nuc) + + apply_nu(tmp_t1w, t1w_tal_fld, tmp_t1w_n4, + parameters=stx_nuc) + + #TODO: maybe apply region-based intensity normalization here? + normalize_intensity(tmp_t1w_n4, t1w_tal, + parameters=options.get('t1w_clp',{}), + model=model_t1w) + + iter_summary['t1w_tal_fld']=t1w_tal_fld + + else: + warp_scan(t1w_clp,model_t1w, t1w_tal, + transform=t1w_tal_xfm, + corr_xfm=corr_t1w, + parameters=options.get('t1w_stx',{})) + + + if add_scans is not None: + iter_summary["add_stx_xfm"] = [] + iter_summary["add_tal_fld"] = [] + iter_summary["add_tal"] = [] + + for i,c in enumerate(add_scans): + add_stx_parameters = add_options.get('stx' ,stx_parameters) + add_clp_parameters = add_options.get('clp' ,clp_parameters) + add_model_dir = add_options.get('model_dir',model_dir) + add_model_name = add_options.get('model' ,model_name) + + add_stx_parameters = add_options.get('{}_stx' .format(c.modality),add_stx_parameters) + add_clp_parameters = add_options.get('{}_clp' .format(c.modality),add_clp_parameters) + add_model_dir = add_options.get('{}_model_dir'.format(c.modality),add_model_dir) + add_model_name = add_options.get('{}_model' .format(c.modality),add_model_name) + + add_model=MriScan(scan=add_model_dir+os.sep+add_model_name+'.mnc', + mask=model_t1w.mask) + + add_stx_nuc = add_stx_parameters.get('nuc',None) + + + stx_xfm=MriTransform(prefix=tal_dir, name='xfm_'+c.modality+'_'+dataset_id) + + clp=iter_summary["add_clp"][i] + xfm=iter_summary["add_xfm"][i] + tal_fld=MriScan(prefix=tal_dir, name='tal_fld_'+dataset_id, modality=c.modality) + tal=MriScan(prefix=tal_dir, name='tal_'+dataset_id, modality=c.modality) + + xfm_concat( [xfm,t1w_tal_xfm], stx_xfm ) + iter_summary["add_stx_xfm"].append(stx_xfm) + + corr_xfm=None + if corr_add is not None: + corr_xfm=corr_add[i] + + if add_stx_nuc is not None: + tmp_=MriScan(prefix=tmp.tempdir, name='tal_'+dataset_id, modality=c.modality) + tmp_n4=MriScan(prefix=tmp.tempdir, name='tal_n4_'+dataset_id, modality=c.modality) + + warp_scan(clp ,model_t1w, tmp_, + transform=stx_xfm, + corr_xfm=corr_xfm, + parameters=add_stx_parameters) + + tmp_.mask=None + tmp_n4.mask=None + + estimate_nu(tmp_, tal_fld, + parameters=stx_nuc) + + apply_nu(tmp_, tal_fld, tmp_n4, parameters=stx_nuc) + + #TODO: maybe apply region-based intensity normalization here? + normalize_intensity(tmp_n4, tal, + parameters=add_clp_parameters, + model=add_model) + + iter_summary["add_tal_fld"].append(tal_fld) + + else: + warp_scan(clp,model_t1w, tal, + transform=stx_xfm, + corr_xfm=corr_xfm, + parameters=add_stx_parameters) + + iter_summary["add_tal"].append(tal) + + if run_qc is not None and run_qc.get('t1w_stx',True): + draw_qc_stx(t1w_tal,model_outline,qc_tal,options=run_qc) + iter_summary["qc_tal"]=qc_tal + + if add_scans is not None: + iter_summary["qc_add"]=[] + for i,c in enumerate(add_scans): + qc=MriQCImage(prefix=qc_dir,name='tal_'+c.modality+'_'+dataset_id) + if run_qc is not None and run_qc.get('add_stx',True): + draw_qc_add(t1w_tal,iter_summary["add_tal"][i],qc,options=run_qc) + iter_summary["qc_add"].append(qc) + + if run_aqc is not None and run_aqc.get('t1w_stx',True): + make_aqc_stx(t1w_tal,model_outline,aqc_tal,options=run_aqc) + iter_summary["aqc_tal"]=aqc_tal + + if add_scans is not None: + iter_summary["aqc_add"]=[] + for i,c in enumerate(add_scans): + aqc=MriQCImage(prefix=aqc_dir,name='tal_'+c.modality+'_'+dataset_id) + if run_aqc is not None and run_aqc.get('add_stx',True): + make_aqc_add(t1w_tal,iter_summary["add_tal"][i],aqc,options=run_aqc) + iter_summary["aqc_add"].append(aqc) + + # run beast to create brain mask + beast_parameters=options.get('beast',None) + if beast_parameters is not None: + extract_brain_beast(t1w_tal,parameters=beast_parameters,model=model_t1w) + if run_qc is not None and run_qc.get('beast',True): + draw_qc_mask(t1w_tal,qc_mask,options=run_qc) + iter_summary["qc_mask"]=qc_mask + if run_aqc is not None and run_aqc.get('beast',True): + make_aqc_mask(t1w_tal,aqc_mask,options=run_aqc) + iter_summary["aqc_mask"]=aqc_mask + + else: + #extract_brain_nlreg(t1w_tal,parameters=options.get('brain_nl_seg',{}),model=model_t1w) + # if we have initial mask, keep using that! + if t1w_clp.mask is not None: + warp_mask(t1w_clp,model_t1w, t1w_tal, + transform=t1w_tal_xfm, + corr_xfm=corr_t1w, + parameters=options.get('t1w_stx',{})) + t1w_tal.mask=None + pass + + + # create unscaled version + if create_unscaled: + xfm_remove_scale(t1w_tal_xfm, t1w_tal_noscale_xfm, unscale=unscale_xfm) + iter_summary["t1w_tal_noscale_xfm"]=t1w_tal_noscale_xfm + #warp scan to create unscaled version + warp_scan(t1w_clp, model_t1w, t1w_tal_noscale, transform=t1w_tal_noscale_xfm, corr_xfm=corr_t1w) + # warping mask from tal space to unscaled tal space + warp_mask(t1w_tal, model_t1w, t1w_tal_noscale, transform=unscale_xfm) + iter_summary["t1w_tal_noscale"]=t1w_tal_noscale + + # perform non-linear registration + if run_nl: + nl_registration(t1w_tal, model_t1w, nl_xfm, + parameters=options.get('nl_reg',{})) + iter_summary["nl_xfm"]=nl_xfm + + # run tissue classification + if run_nl and run_cls: + classify_tissue(t1w_tal, tal_cls, model_name=model_name, + model_dir=model_dir, xfm=nl_xfm, + parameters=options.get('tissue_classify',{})) + + warp_cls_back (t1w_tal, tal_cls, t1w_tal_xfm, t1w_nuc, native_t1w_cls,corr_xfm=corr_t1w) + warp_mask_back(t1w_tal, t1w_tal_xfm, t1w_nuc, native_t1w_cls,corr_xfm=corr_t1w) + iter_summary["native_t1w_cls"]=native_t1w_cls + iter_summary["tal_cls"]=tal_cls + if run_qc is not None and run_qc.get('cls',True): + draw_qc_cls(t1w_tal,tal_cls,qc_cls,options=run_qc) + if run_aqc is not None and run_aqc.get('cls',True): + make_aqc_cls(t1w_tal,tal_cls,aqc_cls,options=run_aqc) + else: + # just warp mask back + if beast_parameters is not None: + warp_mask_back(t1w_tal, t1w_tal_xfm, t1w_nuc, native_t1w_cls,corr_xfm=corr_t1w) + native_t1w_cls.scan=None + iter_summary["tal_cls"]=tal_cls + + + # run lobe segmentation + if run_nl and run_cls and run_lobes: + segment_lobes( tal_cls, nl_xfm, tal_lob, + model=model_t1w, + lobe_atlas_dir=lobe_atlas_dir, + parameters=options.get('lobe_segment',{})) + iter_summary["tal_lob"]=tal_lob + + if run_qc is not None and run_qc.get('lob',True): + draw_qc_lobes( t1w_tal, tal_lob,qc_lob,options=run_qc) + iter_summary["qc_lob"]=qc_lob + if run_aqc is not None and run_aqc.get('lob',True): + make_aqc_lobes( t1w_tal, tal_lob,aqc_lob,options=run_aqc) + iter_summary["aqc_lob"]=aqc_lob + + # calculate volumes + extract_volumes(tal_lob, tal_cls, t1w_tal_xfm, lob_volumes, + subject_id=subject_id, timepoint_id=timepoint_id , lobedefs=lobe_atlas_defs) + + extract_volumes(tal_lob, tal_cls, t1w_tal_xfm, lob_volumes_json, + produce_json=True,subject_id=subject_id, timepoint_id=timepoint_id,lobedefs=lobe_atlas_defs) + + iter_summary["lob_volumes"]= lob_volumes + iter_summary["lob_volumes_json"]=lob_volumes_json + + save_summary(iter_summary,summary_file.fname) + return iter_summary + + except mincError as e: + print("Exception in iter_step:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in iter_step:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/preprocess.py b/ipl/lp/preprocess.py new file mode 100644 index 0000000..b31edb6 --- /dev/null +++ b/ipl/lp/preprocess.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + +def fix_spacing(scan): + """make sure all spacing in 3D volume are regular + + Arguments: `scan` scan to be fixed + """ + with mincTools() as minc: + for s in ['xspace', 'yspace', 'zspace']: + spacing = minc.query_attribute( scan, s + ':spacing' ) + + if spacing.count( 'irregular' ): + minc.set_attribute( scan, s + ':spacing', 'regular__' ) + return scan + +def denoise(in_scan, out_scan, parameters={}): + """Apply patch-based denoising + + Arguments: in `MriScan` input + out `MriScan` output + parameters `dict` of parameters + """ + use_anlm=parameters.get('anlm', False ) + denoise_beta=parameters.get('beta', 0.7 ) + patch=parameters.get('patch', 2 ) + search=parameters.get('search', 2 ) + regularize=parameters.get('regularize', None ) + with mincTools() as minc: + if use_anlm: + minc.anlm( in_scan.scan, out_scan.scan, beta=denoise_beta, patch=patch, search=search ) + else: + minc.nlm( in_scan.scan, out_scan.scan, beta=denoise_beta, patch=patch, search=search ) + # TODO: maybe USE anlm sometimes? + + +def estimate_nu(in_scan, out_field, parameters={},model=None): + """Estimate non-uniformity correction field + + Arguments: in `MriScan` input + out_field `MriScan` output + parameters `dict` of parameters + """ + with mincTools() as minc: + # + #print("Running N4, parameters={}".format(repr(parameters))) + #traceback.print_stack() + weight_mask=None + init_xfm=None # TODO: maybe add as a parameter, in case manual registration was done? + if in_scan.mask is not None and os.path.exists(in_scan.mask): + weight_mask=in_scan.mask + else: + #TODO: maybe use some kind of threshold here instead of built-in? + pass + + if not minc.checkfiles(inputs=[in_scan.scan], outputs=[out_field.scan]): + return + + if parameters.get('disable',False): + minc.calc([in_scan.scan],'1.0',out_field.scan,datatype='-float') + else: + if parameters.get('use_stx_mask',False) and model is not None: + # method from Gabriel + minc.winsorize_intensity(in_scan.scan,minc.tmp('trunc_t1.mnc')) + minc.binary_morphology(minc.tmp('trunc_t1.mnc'),'',minc.tmp('otsu_t1.mnc'),binarize_bimodal=True) + minc.defrag(minc.tmp('otsu_t1.mnc'),minc.tmp('otsu_defrag_t1.mnc')) + minc.autocrop(minc.tmp('otsu_defrag_t1.mnc'),minc.tmp('otsu_defrag_expanded_t1.mnc'),isoexpand='50mm') + minc.binary_morphology(minc.tmp('otsu_defrag_expanded_t1.mnc'),'D[25] E[25]',minc.tmp('otsu_expanded_closed_t1.mnc')) + minc.resample_labels(minc.tmp('otsu_expanded_closed_t1.mnc'),minc.tmp('otsu_closed_t1.mnc'),like=minc.tmp('trunc_t1.mnc')) + + minc.calc([minc.tmp('trunc_t1.mnc'),minc.tmp('otsu_closed_t1.mnc')], 'A[0]*A[1]', minc.tmp('trunc_masked_t1.mnc')) + minc.calc([in_scan.scan,minc.tmp('otsu_closed_t1.mnc')],'A[0]*A[1]' ,minc.tmp('masked_t1.mnc')) + + minc.linear_register( minc.tmp('trunc_masked_t1.mnc'), model.scan, minc.tmp('stx.xfm'), + init_xfm=init_xfm, objective='-nmi',conf='bestlinreg_new') + + minc.resample_labels( model.mask, minc.tmp('brainmask_t1.mnc'), + transform=minc.tmp('stx.xfm'), invert_transform=True, + like=minc.tmp('otsu_defrag_t1.mnc') ) + + minc.calc([minc.tmp('otsu_defrag_t1.mnc'),minc.tmp('brainmask_t1.mnc')],'A[0]*A[1]',minc.tmp('weightmask_t1.mnc')) + + minc.n4(minc.tmp('masked_t1.mnc'), + output_field=out_field.scan, + shrink=parameters.get('shrink',4), + iter=parameters.get('iter','200x200x200x200'), + weight_mask=minc.tmp('weightmask_t1.mnc'), + mask=minc.tmp('otsu_closed_t1.mnc'), + distance=parameters.get('distance',200), + datatype=parameters.get('datatype',None) + ) + else: + minc.n4(in_scan.scan, + output_field=out_field.scan, + weight_mask=weight_mask, + shrink=parameters.get('shrink',4), + datatype=parameters.get('datatype',None), + iter=parameters.get('iter','200x200x200'), + distance=parameters.get('distance',200)) + +def apply_nu(in_scan, field, out_scan, parameters={}): + """ Apply non-uniformity correction + """ + with mincTools() as minc: + if not minc.checkfiles(inputs=[field.scan],outputs=[out_scan.scan]): + return + minc.resample_smooth(field.scan,minc.tmp('fld.mnc'),like=in_scan.scan,order=1) + minc.calc([in_scan.scan,minc.tmp('fld.mnc')], + 'A[0]/A[1]', out_scan.scan) + + +def normalize_intensity(in_scan, out_scan, + parameters={}, + model=None): + """ Perform global intensity scale normalization + """ + # TODO: make output exp file + with mincTools() as minc: + + if not minc.checkfiles(inputs=[in_scan.scan],outputs=[out_scan.scan]): + return + + order = parameters.get('order',1) + _model=None + + # + if model is None: + _model = parameters.get('model',None) + else: + _model = model.scan + + if _model is None: + raise mincError('Need model ') + + scan_mask = None + model_mask = None + + if in_scan.mask is not None and model is not None: + scan_mask = in_scan.mask + model_mask = model.mask + + if parameters.get('disable',False): + # just bypass + shutil.copyfile(in_scan.scan,out_scan.scan) + elif parameters.get('nuyl',False): + minc.nuyl_normalize(in_scan.scan,_model,out_scan.scan, + source_mask=scan_mask, + target_mask=model_mask) + elif parameters.get('nuyl2',False): + hl.nuyl_normalize2( in_scan.scan,_model,out_scan.scan, + #source_mask=input_mask,target_mask=model_mask, + fwhm=parameters.get('nuyl2_fwhm',2.0), + iterations=parameters.get('nuyl2_iter',4), + ) + else: + minc.volume_pol(in_scan.scan, _model, out_scan.scan, + order=order, + source_mask=scan_mask, + target_mask=model_mask) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/qc.py b/ipl/lp/qc.py new file mode 100644 index 0000000..4af72ac --- /dev/null +++ b/ipl/lp/qc.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline resampling + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +from ipl.minc_qc import qc,qc_field_contour + + +def draw_qc_stx(in_scan,in_outline,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan.scan,out_qc.fname, + big=True,mask=in_outline.scan, + mask_range=[0.0,1.0]) + else: + qc(in_scan.scan,out_qc.fname, + mask=in_outline.scan, + mask_range=[0.0,1.0], + mask_bg=0.5, use_max=True) + + +def draw_qc_mask(in_scan,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan.scan,out_qc.fname, + big=True,mask=in_scan.mask, + mask_range=[0.0,1.0]) + else: + qc(in_scan.scan,out_qc.fname, + mask=in_scan.mask, + mask_range=[0.0,1.0], + mask_bg=0.5, use_max=True) + +def draw_qc_cls(in_scan,in_cls,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan.scan,out_qc.fname, + big=True,mask=in_cls.scan, + mask_range=[0.0,3.5], + spectral_mask=True) + else: + qc(in_scan.scan,out_qc.fname, + mask=in_cls.scan, + mask_range=[0.0,3.5], + mask_cmap='spectral', + mask_bg=0.5, use_max=True) + + +def draw_qc_lobes(in_scan,in_lobes,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan.scan,out_qc.fname, + big=True,mask=in_lobes.scan, + spectral_mask=True) + else: + qc(in_scan.scan,out_qc.fname, + mask=in_lobes.scan, + mask_cmap='spectral', + mask_bg=0.5, use_max=True) + + +def draw_qc_add(in_scan1,in_scan2,out_qc,options={}): + if options.get('big'): + with mincTools() as m: + m.qc(in_scan1.scan,out_qc.fname, + big=True,red=True, + mask=in_scan2.scan, + green_mask=True) + else: + qc(in_scan1.scan,out_qc.fname, + mask=in_scan2.scan, + image_cmap='red', + mask_cmap='green', + mask_bg=0.5, use_max=True) + +def draw_qc_nu(in_field,out_qc,options={}): + qc_field_contour(in_field.scan,out_qc.fname, + image_cmap='jet') + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/lp/registration.py b/ipl/lp/registration.py new file mode 100644 index 0000000..3b191e7 --- /dev/null +++ b/ipl/lp/registration.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline registration + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +import ipl.registration +import ipl.ants_registration +import ipl.elastix_registration + + +def lin_registration(scan, model, out_xfm, init_xfm=None, parameters={},corr_xfm=None,par=None, log=None): + """Perform linear registration + + """ + with mincTools() as m: + + if not m.checkfiles(inputs=[scan.scan,model.scan],outputs=[out_xfm.xfm]): + return + + use_inverse = parameters.get('inverse', False) + lin_mode = parameters.get('type', 'ants') + options = parameters.get('options', None) + downsample = parameters.get('downsample',None) + close = parameters.get('close', False) + resample = parameters.get('resample', False) + objective = parameters.get('objective','-xcorr') + use_model_mask = parameters.get('use_model_mask',False) + + print("Running lin_registration with parameters:{}".format(repr(parameters))) + + _init_xfm=None + _in_scan = scan.scan + _in_mask = scan.mask + + _in_model = model.scan + _in_model_mask = model.mask + _out_xfm = out_xfm.xfm + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + + if corr_xfm is not None: + # apply distortion correction before linear registration, + # but then don't include it it into linear XFM + _in_scan=m.tmp('corr_scan.mnc') + m.resample_smooth(scan.scan,_in_scan, transform=corr_xfm.xfm) + if scan.mask is not None: + _in_mask=m.tmp('corr_scan_mask.mnc') + m.resample_labels(scan.mask,_in_mask, transform=corr_xfm.xfm, like=_in_scan) + + if init_xfm is not None and resample: + #_init_xfm=init_xfm.xfm + _init_xfm=None + _out_xfm=m.tmp('out.xfm') + m.resample_smooth(_in_scan,m.tmp('scan_scan.mnc'), transform=init_xfm.xfm, like=model.scan) + _in_scan=m.tmp('scan_scan.mnc') + if scan.mask is not None: + m.resample_labels(scan.mask, _in_mask, transform=init_xfm.xfm, like=model.scan) + _in_mask=m.tmp('scan_mask.mnc') + + print("lin_registration: mode={} init_xfm={} scan_mask={} use_inverse={}".format(lin_mode,_init_xfm,scan.mask,use_inverse)) + + _model_mask=None + + # use model mask even if scan mask is unspecified! + # to run experminets mostly + if use_model_mask or _in_mask is not None: + _model_mask=model.mask + + _save_out_xfm=_out_xfm + if use_inverse: + _save_out_xfm=_out_xfm + _out_xfm=m.tmp('inverted_out.xfm') + + save_in_scan=_in_scan + save_in_mask=_in_mask + + _in_scan=_in_model + _in_mask=_in_model_mask + + _in_model=save_in_scan + _in_model_mask=save_in_mask + + + if lin_mode=='ants': + ipl.ants_registration.linear_register_ants2( + _in_scan, + _in_model, + _out_xfm, + source_mask=_in_mask, + target_mask=_model_mask, + init_xfm=_init_xfm, + parameters=options, + close=close, + downsample=downsample, + ) + elif lin_mode=='elx': + output_par=None + output_log=None + + if par is not None: + output_par=par.fname + + if log is not None: + output_log=log.fname + + ipl.elastix_registration.register_elastix( + _in_scan, + _in_model, + output_xfm=_out_xfm, + source_mask=_in_mask, + target_mask=_model_mask, + init_xfm=_init_xfm, + downsample=downsample, + parameters=options, + nl=False, + output_log=output_log, + output_par=output_par + ) + elif lin_mode=='mritotal': + # going to use mritotal directly + #m.command() + model_name=os.path.basename(model.scan).rsplit('.mnc',1)[0] + model_dir=os.path.dirname(model.scan) + # TODO: add more options? + cmd=['mritotal','-model',model_name,'-modeldir',model_dir, _in_scan, _out_xfm] + if options is not None: + cmd.extend(options) + + m.command(cmd, + inputs=[_in_scan], + outputs=[_out_xfm]) + else: + ipl.registration.linear_register( + _in_scan, + _in_model, + _out_xfm, + source_mask=_in_mask, + target_mask=_model_mask, + init_xfm=_init_xfm, + objective=objective, + downsample=downsample, + conf=options, + parameters=lin_mode + ) + + if use_inverse: # need to invert transform + m.xfminvert(_out_xfm,_save_out_xfm) + _out_xfm=_save_out_xfm + + if init_xfm is not None and resample: + m.xfmconcat([init_xfm.xfm,_out_xfm],out_xfm.xfm) + + +def intermodality_co_registration(scan, ref, out_xfm, + init_xfm=None, + parameters={}, + corr_xfm=None, + corr_ref=None, + par=None, log=None): + with mincTools() as m: + + if not m.checkfiles(inputs=[scan.scan,ref.scan],outputs=[out_xfm.xfm]): + return + + lin_mode= parameters.get('type', 'ants') + options= parameters.get('options', None) + downsample=parameters.get('downsample', None) + close= parameters.get('close', True) + resample= parameters.get('resample', False) + objective =parameters.get('objective', '-nmi') + nl =parameters.get('nl', False) + + print("Running intermodality_co_registration with parameters:{}".format(repr(parameters))) + + _init_xfm=None + _in_scan=scan.scan + _in_mask=scan.mask + _out_xfm=out_xfm.xfm + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + + if corr_xfm is not None: + # apply distortion correction before linear registration, + # but then don't include it it into linear XFM + _in_scan=m.tmp('corr_scan.mnc') + m.resample_smooth(scan.scan,_in_scan, transform=corr_xfm.xfm) + if scan.mask is not None: + _in_mask=m.tmp('corr_scan_mask.mnc') + m.resample_labels(scan.mask,_in_mask, transform=corr_xfm.xfm, like=_in_scan) + + if init_xfm is not None and resample: + #_init_xfm=init_xfm.xfm + _init_xfm=None + _out_xfm=m.tmp('out.xfm') + m.resample_smooth(_in_scan,m.tmp('scan_scan.mnc'), transform=init_xfm.xfm, like=model.scan) + _in_scan=m.tmp('scan_scan.mnc') + if scan.mask is not None: + m.resample_labels(scan.mask,_in_mask, transform=init_xfm.xfm, like=model.scan) + _in_mask=m.tmp('scan_mask.mnc') + + print("intermodality_co_registration: mode={} init_xfm={} scan_mask={}".format(lin_mode,_init_xfm,scan.mask)) + + if lin_mode=='ants': + ipl.ants_registration.linear_register_ants2( + _in_scan, + ref.scan, + _out_xfm, + source_mask=_in_mask, + target_mask=ref.mask, + init_xfm=_init_xfm, + parameters=options, + close=close, + downsample=downsample, + ) + elif lin_mode=='elx': + output_par=None + output_log=None + + if par is not None: + output_par=par.fname + + if log is not None: + output_log=log.fname + + ipl.elastix_registration.register_elastix( + _in_scan, + ref.scan, + output_xfm=_out_xfm, + source_mask=_in_mask, + target_mask=ref.mask, + init_xfm=_init_xfm, + downsample=downsample, + parameters=options, + nl=nl, + output_log=output_log, + output_par=output_par + ) + else: + ipl.registration.linear_register( + _in_scan, + ref.scan, + _out_xfm, + source_mask=_in_mask, + target_mask=ref.mask, + init_xfm=_init_xfm, + objective=objective, + downsample=downsample, + conf=options, + parameters=lin_mode, + close=close + ) + + if init_xfm is not None and resample: + m.xfmconcat([init_xfm.xfm,_out_xfm],out_xfm.xfm) + + +def nl_registration(scan, model, out_xfm, init_xfm=None, parameters={}): + """Perform non-linear registration + + """ + nl_mode=parameters.get('type','ants') + options=parameters.get('options',None) + downsample=parameters.get('downsample',None) + level=parameters.get('level',2) + start=parameters.get('start_level',32) + + with mincTools() as m: + + if not m.checkfiles(inputs=[scan.scan,model.scan],outputs=[out_xfm.xfm]): + return + + _init_xfm=None + if init_xfm is not None: + _init_xfm=init_xfm.xfm + + if nl_mode=='ants': + ipl.ants_registration.non_linear_register_ants2( + scan.scan, + model.scan, + out_xfm.xfm, + source_mask=scan.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=options, + downsample=downsample, + level=level, + start=start, + ) + elif nl_mode=='elx': + ipl.elastix_registration.register_elastix( + scan.scan, + model.scan, + output_xfm=out_xfm.xfm, + source_mask=scan.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + downsample=downsample, + parameters=options, + nl=True + ) + else: + objective='-xcorr' + if options is not None: + objective=options.get('objective') + + ipl.registration.non_linear_register_full( + scan.scan, + model.scan, + out_xfm.xfm, + source_mask=scan.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + objective=objective, + downsample=downsample, + parameters=options, + level=level, + start=start, + ) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/resample.py b/ipl/lp/resample.py new file mode 100644 index 0000000..28ed8c6 --- /dev/null +++ b/ipl/lp/resample.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline resampling + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +def warp_scan(sample, reference, output_scan, transform=None, parameters={},corr_xfm=None): + with mincTools() as m: + xfm=None + xfms=[] + + if corr_xfm is not None: + xfms.append(corr_xfm.xfm) + if transform is not None: + xfms.append(transform.xfm) + + if len(xfms)==0: + pass + if len(xfms)==1: + xfm=transform.xfm + else: + m.xfmconcat(xfms,m.tmp('concatenated.xfm')) + xfm=m.tmp('concatenated.xfm') + + resample_order=parameters.get('resample_order',4) + + m.resample_smooth(sample.scan, output_scan.scan, + transform=xfm, like=reference.scan, + order=resample_order) + + +def warp_mask(sample, reference, output_scan, transform=None, parameters={},corr_xfm=None): + with mincTools() as m: + xfm=None + xfms=[] + + if corr_xfm is not None: + xfms.append(corr_xfm.xfm) + if transform is not None: + xfms.append(transform.xfm) + + if len(xfms)==0: + pass + if len(xfms)==1: + xfm=transform.xfm + else: + m.xfmconcat(xfms,m.tmp('concatenated.xfm')) + xfm=m.tmp('concatenated.xfm') + + resample_order=parameters.get('resample_order',4) + m.resample_labels(sample.mask, output_scan.mask, transform=xfm, like=reference.scan, order=resample_order) + + +def warp_cls_back(t1w_tal, tal_cls, t1w_tal_xfm,reference, native_t1w_cls, parameters={},corr_xfm=None): + with mincTools() as m: + resample_order=parameters.get('resample_order',0) + resample_baa =parameters.get('resample_baa',False) + + xfm=t1w_tal_xfm.xfm + if corr_xfm is not None: + m.xfmconcat([corr_xfm.xfm,t1w_tal_xfm.xfm],m.tmp('concatenated.xfm')) + xfm=m.tmp('concatenated.xfm') + + + m.resample_labels(tal_cls.scan, native_t1w_cls.scan, + transform=xfm, + like=reference.scan, + order=resample_order, + baa=resample_baa, + invert_transform=True) + +def warp_mask_back(t1w_tal, t1w_tal_xfm, reference, native_t1w_cls, parameters={},corr_xfm=None): + with mincTools() as m: + resample_order=parameters.get('resample_order',0) + resample_baa =parameters.get('resample_baa',False) + + xfm=t1w_tal_xfm.xfm + if corr_xfm is not None: + m.xfmconcat([corr_xfm.xfm,t1w_tal_xfm.xfm],m.tmp('concatenated.xfm')) + xfm=m.tmp('concatenated.xfm') + + m.resample_labels(t1w_tal.mask, native_t1w_cls.mask, + transform=xfm, + like=reference.scan, + order=resample_order, + baa=resample_baa, + invert_transform=True) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/segment.py b/ipl/lp/segment.py new file mode 100644 index 0000000..b4fdb48 --- /dev/null +++ b/ipl/lp/segment.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline resampling + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from .registration import nl_registration + +def extract_brain_beast(scan, parameters={},model=None): + """extract brain using BEaST """ + with mincTools() as m: + # TODO: come up with better default? + beast_lib=parameters.get('beastlib','/opt/minc/share/beast-library-1.1') + beast_res=parameters.get('resolution',2) + beast_mask=beast_lib+os.sep+'union_mask.mnc' + + if m.checkfiles(inputs=[scan.scan], outputs=[scan.mask]): + tmp_in=m.tmp('like_beast.mnc') + m.resample_smooth(scan.scan,tmp_in,like=beast_mask) + # run additional intensity normalizaton + if parameters.get('normalize',True) and model is not None: + m.volume_pol(tmp_in,model.scan,m.tmp('like_beast_norm.mnc')) + tmp_in=m.tmp('like_beast_norm.mnc') + + # run beast + beast_v10_template = beast_lib + os.sep \ + + 'intersection_mask.mnc' + beast_v10_margin = beast_lib + os.sep + 'margin_mask.mnc' + + beast_v10_intersect = beast_lib + os.sep \ + + 'intersection_mask.mnc' + + # perform segmentation + m.run_mincbeast(tmp_in,m.tmp('beast_mask.mnc'), + beast_lib=beast_lib, beast_res=beast_res) + + m.resample_labels(m.tmp('beast_mask.mnc'),scan.mask,like=scan.scan) + +def extract_brain_nlreg(scan, parameters={},model=None): + """extract brain using non-linear registration to the template""" + with mincTools() as m: + if m.checkfiles(inputs=[scan.scan], outputs=[scan.mask]): + tmp_xfm=MriTransform(prefix=m.tempdir, name='nl_'+scan.name) + nl_registration(scan, model, tmp_xfm, parameters=parameters) + # warp template atlas to subject's scan + m.resample_labels(model.mask,scan.mask, transform=tmp_xfm.xfm, invert_transform=True) + + +def classify_tissue(scan, cls, + model_name=None, + model_dir=None, + parameters={}, + xfm=None ): + """Tissue classification + """ + with mincTools() as m: + m.classify_clean([scan.scan], cls.scan, + mask=scan.mask, model_dir=model_dir, + model_name=model_name,xfm=xfm.xfm) + + +def segment_lobes(tal_cls,nl_xfm, tal_lob, model=None, lobe_atlas_dir=None, + parameters={}): + """Lobe segmentation + """ + with mincTools() as m: + m.lobe_segment(tal_cls.scan,tal_lob.scan, + nl_xfm=nl_xfm.xfm,template=model.scan, + atlas_dir=lobe_atlas_dir) + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/structures.py b/ipl/lp/structures.py new file mode 100644 index 0000000..f35f423 --- /dev/null +++ b/ipl/lp/structures.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +# Longitudinal pipeline data structures + +import shutil +import os +import sys +import traceback +import json + + +class MriScan(object): + """Represents a 3D volume as an object on disk + (optionally) a mask + """ + def __init__(self, + prefix = None, name = None, modality = None, + iter = None, scan = None, mask = '', + protect = False ): + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.modality=modality + + if scan is None : + if self.iter is None: + if self.modality is not None: self.scan=self.prefix+os.sep+self.name+'_'+self.modality+'.mnc' + else: self.scan=self.prefix+os.sep+self.name+'.mnc' + else: + if self.modality is not None: self.scan=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_'+self.modality+'.mnc' + else: self.scan=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_'+'.mnc' + else: + self.scan=scan + + if mask=='': + if self.iter is None: + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + else: + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + else: + self.mask=mask + + if self.name is None: + self.name=os.path.basename(self.scan) + + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + + def __repr__(self): + return 'MriScan(prefix="{}", name="{}", modality="{}", iter="{}",scan="{}",mask="{}",protect={})'.\ + format(self.prefix,self.name,self.modality,repr(self.iter),self.scan,self.mask,repr(self.protect)) + + def cleanup(self,verbose=False): + if not self.protect: + for i in (self.scan, self.mask ): + if i is not None and os.path.exists(i): + if verbose: + print("Removing:{}".format(i)) + os.unlink(i) + + +class MriTransform(object): + """Represents transformation + """ + def __init__(self, prefix, name, iter=None, nl=False, xfm=None, grid=None): + self.prefix=prefix + self.name=name + self.iter=iter + self.nl=nl + self.xfm=xfm + self.grid=grid + + if self.xfm is None: + if self.iter is None: + self.xfm= self.prefix+os.sep+self.name+'.xfm' + else: + self.xfm= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'.xfm' + + if self.grid is None and xfm is None and nl: + if self.iter is None: + self.grid= self.prefix+os.sep+self.name+'_grid_0.mnc' + else: + self.grid= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_grid_0.mnc' + + + def __repr__(self): + return 'MriTransform(prefix="{}",name="{}",iter="{}",nl={})'.\ + format(self.prefix,self.name,repr(self.iter),self.nl) + + def cleanup(self, verbose=False): + for i in (self.xfm, self.grid ): + if i is not None and os.path.exists(i): + if verbose: + print("Removing:{}".format(i)) + os.unlink(i) + + +class MriQCImage(object): + """Represents QC image (.jpg) + """ + def __init__(self, prefix, name, iter=None, fname=None, suffix='.jpg'): + self.prefix=prefix + self.name=name + self.iter=iter + self.fname=fname + self.suffix=suffix + + if self.fname is None: + if self.iter is None: + self.fname=self.prefix+os.sep+self.name+self.suffix + else: + self.fname=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+self.suffix + + def __repr__(self): + return 'MriQCImage(prefix="{}",name="{}",iter="{}",fname={})'.\ + format(self.prefix,self.name,repr(self.iter),self.fname) + + def cleanup(self, verbose=False): + #TODO: implement? + pass + + +class MriAux(object): + """Represents an auxiliary file (text) + """ + def __init__(self, prefix, name, iter=None, fname=None,suffix='.txt'): + self.prefix=prefix + self.name=name + self.iter=iter + self.fname=fname + self.suffix=suffix + + if self.fname is None: + if self.iter is None: + self.fname=self.prefix+os.sep+self.name+self.suffix + else: + self.fname=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+self.suffix + + def __repr__(self): + return 'MriAux(prefix="{}",name="{}",iter="{}",fname={})'.\ + format(self.prefix,self.name,repr(self.iter),self.fname) + + def cleanup(self, verbose=False): + #TODO: implement? + pass + + +class PipelineEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, MriTransform): + return {'name':obj.name, + 'iter':obj.iter, + 'xfm':obj.xfm, + 'grid':obj.grid, + 'nl': obj.nl, + 'type':'transform', + } + + if isinstance(obj, MriScan): + return {'name':obj.name, + 'modality': obj.modality, + 'iter':obj.iter, + 'scan':obj.scan, + 'mask':obj.mask, + 'modality': obj.modality, + 'type':'scan', + } + if isinstance(obj, MriQCImage): + return {'name':obj.name, + 'iter':obj.iter, + 'fname':obj.fname, + 'type':'qc_image', + } + + if isinstance(obj, MriAux): + return {'name':obj.name, + 'iter':obj.iter, + 'fname':obj.fname, + 'type':'aux' + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + + +def save_summary(summary,out_file): + with open(out_file,'w') as f: + json.dump(summary, f, indent=1, cls=PipelineEncoder, sort_keys=True) + +def save_pipeline_output(summary,out_file): + save_summary(summary,out_file) + +def convert_summary(in_dict): + ret={} + # iterate over all entries, assuming they should contain only + # recognized types + for i,j in in_dict.iteritems(): + if isinstance(j, dict): + if j.get('type',None)=='aux': + ret[i]=MriAux( + os.path.dirname(j.get('fname','.')), + name=j.get('name',None), + iter=j.get('iter',None), + fname=j.get('fname',None)) + + elif j.get('type',None)=='qc_image': + ret[i]=MriQCImage( + os.path.dirname(j.get('fname','.')), + j.get('name',''), + iter=j.get('iter',None), + fname=j.get('fname',None), + suffix='.'+j.get('fname','.jpg').rsplit('.',1)[-1], + ) + + elif j.get('type',None)=='scan': + ret[i]=MriScan( + prefix=os.path.dirname(j.get('fname','.')), + name=j.get('name',''), + iter=j.get('iter',None), + scan=j.get('scan',None), + mask=j.get('mask',''), + modality=j.get('modality','') + ) + + elif j.get('type',None)=='transform': + ret[i]=MriTransform( + os.path.dirname(j.get('fname','.')), + j.get('name',''), + iter=j.get('iter',None), + xfm=j.get('xfm',None), + grid=j.get('grid',None), + nl=j.get('nl',False) + ) + + else: # just copy it! + ret[i]=j + + else: + ret[i]=j + return ret + +def load_summary(in_file): + tmp=None + with open(in_file,'r') as f: + tmp=json.load(f) + ret=convert_summary(tmp) + return ret + +def load_pipeline_output(in_file): + tmp=None + with open(in_file,'r') as f: + tmp=json.load(f) + ret=[] + for i in tmp: + o=convert_summary(i) + o['output']=convert_summary(o['output']) + ret.append(o) + return ret + + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/lp/utils.py b/ipl/lp/utils.py new file mode 100644 index 0000000..24d03d6 --- /dev/null +++ b/ipl/lp/utils.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 14/08/2015 +# +# Longitudinal pipeline preprocessing + +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def create_dirs(dirs): + for i in dirs: + if not os.path.exists(i): + os.makedirs(i) + + +def xfm_remove_scale(in_xfm,out_xfm,unscale=None): + """remove scaling factors from linear XFM + + """ + _unscale=None + if unscale is not None: + _unscale=unscale.xfm + + with mincTools() as minc: + minc.xfm_noscale(in_xfm.xfm,out_xfm.xfm,unscale=_unscale) + + +def xfm_concat(in_xfms,out_xfm): + """Concatenate multiple transforms + + """ + with mincTools() as minc: + minc.xfmconcat([ i.xfm for i in in_xfms],out_xfm.xfm) + + + +def extract_volumes(in_lob, in_cls, tal_xfm, out, + produce_json=False, + subject_id=None, + timepoint_id=None, + lobedefs=None): + """Convert lobe segmentation to volumetric measurements + + """ + with mincTools() as minc: + vol_lobes= minc.label_stats( in_lob.scan, label_defs=lobedefs ) + vol_cls = minc.label_stats( in_cls.scan ) + params=minc.xfm2param(tal_xfm.xfm) + vol_scale=params['scale'][0]*params['scale'][1]*params['scale'][2] + + volumes={ k[0]:k[1]*vol_scale for k in vol_lobes } + _vol_cls = { k[0]: k[1]*vol_scale for k in vol_cls } + # TODO: figure out what to do when keys are missing, i.e something is definetely wrong + volumes['CSF']=_vol_cls.get(1,0.0) + volumes['GM']=_vol_cls.get(2,0.0) + volumes['WM']=_vol_cls.get(3,0.0) + + volumes['ICC']=volumes['CSF']+volumes['GM']+volumes['WM'] + + if subject_id is not None: + volumes['id']=subject_id + + if timepoint_id is not None: + volumes['timepoint']=timepoint_id + + volumes['scale']=vol_scale + + # save either as text file or json + with open(out.fname,'w') as f: + if produce_json: + json.dump(volumes,f,indent=1) + else: + for i,j in volumes.iteritems(): + f.write("{} {}\n".format(i,j)) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/minc_hl.py b/ipl/minc_hl.py new file mode 100755 index 0000000..3733ff0 --- /dev/null +++ b/ipl/minc_hl.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 4/01/2016 +# +# high level tools + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import traceback + +#from sigtools.modifiers import kwoargs,autokwoargs + +# local stuff +from ipl.minc_tools import mincTools,mincError +#from ipl.optfunc import optfunc +#from clize import run + +# numpy & scipy +#from scipy import stats +import numpy as np +from sklearn import linear_model + +try: + # needed to read and write XFM files + import pyezminc +except: + pass + +try: + # needed for matrix log and exp + import scipy.linalg +except: + pass + +def label_normalize(sample, sample_labels, ref, ref_labels, out=None,sample_mask=None, ref_mask=None,median=False,order=3,debug=False): + '''Use label-based intensity normalization''' + with mincTools() as minc: + if not mincTools.checkfiles(outputs=[out]): return + + ref_stats = {i[0]:i[5] for i in minc.label_stats(ref_labels, volume=ref, mask=ref_mask,median=median)} + sample_stats= {i[0]:i[5] for i in minc.label_stats(sample_labels,volume=sample,mask=sample_mask,median=median)} + x=[] + y=[] + + for i in ref_stats: + # use 0-intercept + if i in sample_stats: + #x.append( [1.0, sample_stats[i], sample_stats[i]*sample_stats[i] ] ) + x.append( sample_stats[i] ) + y.append( ref_stats[i] ) + #print('{} -> {}'.format(sample_stats[i],ref_stats[i])) + # FIX origin? (HACK) + x.append(0.0) + y.append(0.0) + # run linear regression + clf = linear_model.LinearRegression() + __x=np.array(x) + + _x=np.column_stack( ( np.power(__x,i) for i in range(1,order+1) ) ) + _y=np.array( y ) + #print(_x) + #print(_y) + clf.fit(_x, _y) + + if debug: + import matplotlib.pyplot as plt + print('Coefficients: \n', clf.coef_) + #print('[0.0 100.0] -> {}'.format(clf.predict([[1.0,0.0,0.0], [1.0,100.0,100.0*100.0]] ))) + + plt.scatter(_x[:,0], _y, color='black') + #plt.plot(_x[:,0], clf.predict(_x), color='blue', linewidth=3) + prx=np.linspace(0,100,20) + prxp=np.column_stack( ( np.power(prx,i) for i in range(1,order+1) ) ) + plt.plot( prx , clf.predict( prxp ), color='red', linewidth=3) + + plt.xticks(np.arange(0,100,5)) + plt.yticks(np.arange(0,100,5)) + + plt.show() + # create command-line for minccalc + cmd='' + for i in range(order): + if i==0: + cmd+='A[0]*{}'.format(clf.coef_[i]) + else: + cmd+='+'+'*'.join(['A[0]']*(i+1))+'*{}'.format(clf.coef_[i]) + if out is not None: + minc.calc([sample],cmd,out) + return cmd + +def nuyl_normalize2( + source,target, + output, + source_mask=None, + target_mask=None, + linear=False, + iterations=4, + filter_gradients=True, + fwhm=2.0, + verbose=0, + remove_bg=False, + ): + """normalize intensities, using areas with uniform intensity """ + with mincTools(verbose=verbose) as minc: + if not mincTools.checkfiles(outputs=[output]): return + # create gradient maps + + if filter_gradients: + minc.blur(source,minc.tmp('source_grad.mnc'),fwhm,gmag=True,output_float=True) + minc.blur(target,minc.tmp('target_grad.mnc'),fwhm,gmag=True,output_float=True) + # create masks of areas with low gradient + minc.binary_morphology(minc.tmp('source_grad.mnc'),'D[1] I[0]',minc.tmp('source_grad_mask.mnc'),binarize_bimodal=True) + source_mask=minc.tmp('source_grad_mask.mnc') + + minc.binary_morphology(minc.tmp('target_grad.mnc'),'D[1] I[0]',minc.tmp('target_grad_mask.mnc'),binarize_bimodal=True) + target_mask=minc.tmp('target_grad_mask.mnc') + + if remove_bg: + minc.binary_morphology(source,'D[8]',minc.tmp('source_mask.mnc'),binarize_bimodal=True) + minc.binary_morphology(target,'D[8]',minc.tmp('target_mask.mnc'),binarize_bimodal=True) + minc.calc([source_mask,minc.tmp('source_mask.mnc')],'A[0]>0.5&&A[1]>0.5?1:0',minc.tmp('source_grad_mask2.mnc')) + minc.calc([target_mask,minc.tmp('target_mask.mnc')],'A[0]>0.5&&A[1]>0.5?1:0',minc.tmp('target_grad_mask2.mnc')) + source_mask=minc.tmp('source_grad_mask2.mnc') + target_mask=minc.tmp('target_grad_mask2.mnc') + + if source_mask is not None: + minc.resample_labels(source_mask,minc.tmp('source_mask.mnc'),like=minc.tmp('source_grad_mask.mnc')) + minc.calc([minc.tmp('source_grad_mask.mnc'),minc.tmp('source_mask.mnc')],'A[0]>0.5&&A[1]>0.5?1:0',minc.tmp('source_mask2.mnc')) + source_mask=minc.tmp('source_mask2.mnc') + + if target_mask is not None: + minc.resample_labels(target_mask,minc.tmp('target_mask.mnc'),like=minc.tmp('target_grad_mask.mnc')) + minc.calc([minc.tmp('target_grad_mask.mnc'),minc.tmp('target_mask.mnc')],'A[0]>0.5&&A[1]>0.5?1:0',minc.tmp('target_mask2.mnc')) + target_mask=minc.tmp('target_mask2.mnc') + + # now run iterative normalization + for i in range(iterations): + if (i+1)==iterations: out=output + else: out=minc.tmp('{}.mnc'.format(i)) + + minc.nuyl_normalize(source,target,out,source_mask=source_mask,target_mask=target_mask,linear=linear) + source=out + + # done here? + +def patch_normalize(sample, sample_labels, ref, ref_labels, out=None,sample_mask=None, ref_mask=None,median=False,order=3,debug=False): + '''Use label-based intensity normalization''' + with mincTools() as minc: + if not mincTools.checkfiles(outputs=[out]): return + + ref_stats = {i[0]:i[5] for i in minc.label_stats(ref_labels, volume=ref, mask=ref_mask,median=median)} + sample_stats= {i[0]:i[5] for i in minc.label_stats(sample_labels,volume=sample,mask=sample_mask,median=median)} + x=[] + y=[] + + for i in ref_stats: + # use 0-intercept + if i in sample_stats: + #x.append( [1.0, sample_stats[i], sample_stats[i]*sample_stats[i] ] ) + x.append( sample_stats[i] ) + y.append( ref_stats[i] ) + #print('{} -> {}'.format(sample_stats[i],ref_stats[i])) + # FIX origin? (HACK) + x.append(0.0) + y.append(0.0) + # run linear regression + clf = linear_model.LinearRegression() + __x=np.array(x) + + _x=np.column_stack( ( np.power(__x,i) for i in range(1,order+1) ) ) + _y=np.array( y ) + #print(_x) + #print(_y) + clf.fit(_x, _y) + + if debug: + import matplotlib.pyplot as plt + print('Coefficients: \n', clf.coef_) + #print('[0.0 100.0] -> {}'.format(clf.predict([[1.0,0.0,0.0], [1.0,100.0,100.0*100.0]] ))) + + plt.scatter(_x[:,0], _y, color='black') + #plt.plot(_x[:,0], clf.predict(_x), color='blue', linewidth=3) + prx=np.linspace(0,100,20) + prxp=np.column_stack( ( np.power(prx,i) for i in range(1,order+1) ) ) + plt.plot( prx , clf.predict( prxp ), color='red', linewidth=3) + + plt.xticks(np.arange(0,100,5)) + plt.yticks(np.arange(0,100,5)) + + plt.show() + # create command-line for minccalc + cmd='' + for i in range(order): + if i==0: + cmd+='A[0]*{}'.format(clf.coef_[i]) + else: + cmd+='+'+'*'.join(['A[0]']*(i+1))+'*{}'.format(clf.coef_[i]) + if out is not None: + minc.calc([sample],cmd,out) + return cmd + + +def xfmavg(inputs,output): + # TODO: handle inversion flag correctly + all_linear=True + all_nonlinear=True + input_xfms=[] + if not mincTools.checkfiles(inputs=inputs, + outputs=[output ]): + return + for j in inputs: + x=pyezminc.read_transform(j) + if x[0][0] and len(x)==1 and (not x[0][1]): + # this is a linear matrix + input_xfms.append(x[0]) + else: + all_linear&=False + # strip identity matrixes + nl=[] + _identity=np.asmatrix(np.identity(4)) + _eps=1e-6 + for i in x: + if i[0]: + if scipy.linalg.norm(_identity-i[2])>_eps: # this is non-identity matrix + all_nonlinear&=False + else: + nl.append(i) + if len(nl)!=1: + all_nonlinear&=False + else: + input_xfms.append(nl[0]) + if all_linear: + acc=np.asmatrix(np.zeros([4,4],dtype=np.complex)) + for i in input_xfms: + acc+=scipy.linalg.logm(i[2]) + acc/=len(input_xfms) + out_xfm=[(True,False,scipy.linalg.expm(acc).real)] + pyezminc.write_transform(output,out_xfm) + elif all_nonlinear: + input_grids=[] + for i in input_xfms: + input_grids.append(i[2]) + output_grid=output.rsplit('.xfm',1)[0]+'_grid_0.mnc' + with mincTools(verbose=2) as m: + m.average(input_grids,output_grid) + out_xfm=[(False,False,output_grid)] + print("xfmavg output:{}".format(repr(out_xfm))) + pyezminc.write_transform(output,out_xfm) + else: + raise Exception("Mixed XFM files provided as input") + + + +if __name__ == '__main__': + #optfunc.run([nuyl_normalize2,label_normalize]) + # TODO: re-implement using optparse + pass + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on + diff --git a/ipl/minc_qc.py b/ipl/minc_qc.py new file mode 100755 index 0000000..401921f --- /dev/null +++ b/ipl/minc_qc.py @@ -0,0 +1,449 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 11/21/2011 +# +# Tools for creating QC images + +from __future__ import print_function + +import numpy as np +import numpy.ma as ma + +import scipy +import matplotlib +matplotlib.use('AGG') + +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +#from minc2.simple import minc2_file +from minc2_simple import minc2_file + +import matplotlib.cm as cmx +import matplotlib.colors as colors +import argparse + + +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + + +def alpha_blend(si, so, ialpha, oalpha): + """Perform alpha-blending + """ + si_rgb = si[..., :3] + si_alpha = si[..., 3]*ialpha + + so_rgb = so[..., :3] + so_alpha = so[..., 3]*oalpha + + out_alpha = si_alpha + so_alpha * (1. - si_alpha) + + out_rgb = (si_rgb * si_alpha[..., None] + + so_rgb * so_alpha[..., None] * (1. - si_alpha[..., None])) / out_alpha[..., None] + + out = np.zeros_like(si) + out[..., :3] = out_rgb + out[..., 3] = out_alpha + + return out + + +def max_blend(si,so): + """Perform max-blending + """ + return np.maximum(si,so) + +def over_blend(si,so, ialpha, oalpha): + """Perform max-blending + """ + si_rgb = si[..., :3] + si_alpha = si[..., 3]*ialpha + + so_rgb = so[..., :3] + so_alpha = so[..., 3]*oalpha + + out_alpha = np.maximum(si_alpha , so_alpha ) + + out_rgb = si_rgb * (si_alpha[..., None]-so_alpha[..., None]) + so_rgb * so_alpha[..., None] + + out = np.zeros_like(si) + out[..., :3] = out_rgb + out[..., 3] = out_alpha + + return out + + +def qc( + input, + output, + image_range=None, + mask=None, + mask_range=None, + title=None, + image_cmap='gray', + mask_cmap='red', + samples=5, + mask_bg=None, + use_max=False, + use_over=False, + show_image_bar=False, # TODO:implement this? + show_overlay_bar=False, + dpi=100, + ialpha=0.8, + oalpha=0.2, + format=None + ): + """QC image generation, drop-in replacement for minc_qc.pl + Arguments: + input -- input minc file + output -- output QC graphics file + + Keyword arguments: + image_range -- (optional) intensity range for image + mask -- (optional) input mask file + mask_range -- (optional) mask file range + title -- (optional) QC title + image_cmap -- (optional) color map name for image, + possibilities: red, green,blue and anything from matplotlib + mask_cmap -- (optional) color map for mask, default red + samples -- number of slices to show , default 5 + mask_bg -- (optional) level for mask to treat as background + use_max -- (optional) use 'max' colour mixing + use_over -- (optional) use 'over' colour mixing + show_image_bar -- show color bar for intensity range, default false + show_overlay_bar -- show color bar for mask intensity range, default false + dpi -- graphics file DPI, default 100 + ialpha -- alpha channel for colour mixing of main image + oalpha -- alpha channel for colour mixing of mask image + """ + + #_img=minc.Image(input) + #_idata=_img.data + _img=minc2_file(input) + _img.setup_standard_order() + _idata=_img.load_complete_volume(minc2_file.MINC2_FLOAT) + _idims=_img.representation_dims() + + data_shape=_idata.shape + spacing=[_idims[0].step,_idims[1].step,_idims[2].step] + + _ovl=None + _odata=None + omin=0 + omax=1 + + if mask is not None: + _ovl=minc2_file(input) + _ovl.setup_standard_order() + _ovl_data=_ovl.load_complete_volume(minc2_file.MINC2_FLOAT) + if _ovl_data.shape != data_shape: + #print("Overlay shape does not match image!\nOvl={} Image={}",repr(_ovl.data.shape),repr(data_shape)) + raise "Overlay shape does not match image!\nOvl={} Image={}",repr(_ovl_data.shape),repr(data_shape) + if mask_range is None: + omin=np.nanmin(_ovl_data) + omax=np.nanmax(_ovl_data) + else: + omin=mask_range[0] + omax=mask_range[1] + _odata=_ovl_data + + if mask_bg is not None: + _odata=ma.masked_less(_odata, mask_bg) + + slices=[] + + # setup ranges + vmin=vmax=0.0 + if image_range is not None: + vmin=image_range[0] + vmax=image_range[1] + else: + vmin=np.nanmin(_idata) + vmax=np.nanmax(_idata) + + cm = plt.get_cmap(image_cmap) + cmo= plt.get_cmap(mask_cmap) + cmo.set_bad('k',alpha=0.0) + + cNorm = colors.Normalize(vmin=vmin, vmax=vmax) + oNorm = colors.Normalize(vmin=omin, vmax=omax) + + scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) + oscalarMap = cmx.ScalarMappable(norm=oNorm, cmap=cmo) + aspects = [] + + # axial slices + for j in range(0,samples): + i=(data_shape[0]/samples)*j+(data_shape[0]%samples)/2 + si=scalarMap.to_rgba(_idata[i , : ,:]) + + if _ovl is not None: + so=oscalarMap.to_rgba(_odata[i , : ,:]) + if use_max: si=max_blend(si,so) + elif use_over: si=over_blend(si,so, ialpha, oalpha) + else: si=alpha_blend(si, so, ialpha, oalpha) + slices.append( si ) + aspects.append( spacing[0]/spacing[1] ) + # coronal slices + for j in range(0,samples): + i=(data_shape[1]/samples)*j+(data_shape[1]%samples)/2 + si=scalarMap.to_rgba(_idata[: , i ,:]) + + if _ovl is not None: + so=oscalarMap.to_rgba(_odata[: , i ,:]) + if use_max: si=max_blend(si,so) + elif use_over: si=over_blend(si,so, ialpha, oalpha) + else: si=alpha_blend(si, so, ialpha, oalpha) + slices.append( si ) + aspects.append( spacing[2]/spacing[0] ) + + # sagittal slices + for j in range(0,samples): + i=(data_shape[2]/samples)*j+(data_shape[2]%samples)/2 + si=scalarMap.to_rgba(_idata[: , : , i]) + if _ovl is not None: + so=oscalarMap.to_rgba(_odata[: , : , i]) + if use_max: si=max_blend(si,so) + elif use_over: si=over_blend(si,so, ialpha, oalpha) + else: si=alpha_blend(si, so, ialpha, oalpha) + slices.append( si ) + aspects.append( spacing[2]/spacing[1] ) + + w, h = plt.figaspect(3.0/samples) + fig = plt.figure(figsize=(w,h)) + + #outer_grid = gridspec.GridSpec((len(slices)+1)/2, 2, wspace=0.0, hspace=0.0) + ax=None + imgplot=None + for i,j in enumerate(slices): + ax = plt.subplot2grid( (3, samples), (i/samples, i%samples) ) + imgplot = ax.imshow(j,origin='lower',cmap=cm, aspect=aspects[i]) + ax.set_xticks([]) + ax.set_yticks([]) + ax.title.set_visible(False) + # show for the last plot + if show_image_bar: + cbar = fig.colorbar(imgplot) + + + if title is not None: + plt.suptitle(title,fontsize=20) + plt.subplots_adjust(wspace = 0.0 ,hspace=0.0) + else: + plt.subplots_adjust(top=1.0,bottom=0.0,left=0.0,right=1.0,wspace = 0.0 ,hspace=0.0) + + #fig.tight_layout() + #plt.show() + plt.savefig(output, bbox_inches='tight', dpi=dpi,format=format) + plt.close() + plt.close('all') + +def qc_field_contour( + input, + output, + image_range=None, + title=None, + image_cmap='gray', + samples=5, + show_image_bar=False, # TODO:implement this? + dpi=100, + format=None + + ): + """show field contours + """ + + _img=minc2_file(input) + _img.setup_standard_order() + _idata=_img.load_complete_volume(minc2_file.MINC2_FLOAT) + _idims=_img.representation_dims() + + data_shape=_idata.shape + spacing=[_idims[0].step,_idims[1].step,_idims[2].step] + + slices=[] + + # setup ranges + vmin=vmax=0.0 + if image_range is not None: + vmin=image_range[0] + vmax=image_range[1] + else: + vmin=np.nanmin(_idata) + vmax=np.nanmax(_idata) + + cm = plt.get_cmap(image_cmap) + + cNorm = colors.Normalize(vmin=vmin, vmax=vmax) + + scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) + + for j in range(0,samples): + i=(data_shape[0]/samples)*j+(data_shape[0]%samples)/2 + si=_idata[i , : ,:] + slices.append( si ) + + for j in range(0,samples): + i=(data_shape[1]/samples)*j+(data_shape[1]%samples)/2 + si=_idata[: , i ,:] + slices.append( si ) + + for j in range(0,samples): + i=(data_shape[2]/samples)*j+(data_shape[2]%samples)/2 + si=_idata[: , : , i] + slices.append( si ) + + w, h = plt.figaspect(3.0/samples) + fig = plt.figure(figsize=(w,h)) + + #outer_grid = gridspec.GridSpec((len(slices)+1)/2, 2, wspace=0.0, hspace=0.0) + ax=None + imgplot=None + for i,j in enumerate(slices): + ax = plt.subplot2grid( (3, samples), (i/samples, i%samples) ) + imgplot = ax.contour(j,origin='lower', cmap=cm, norm=cNorm, levels=np.linspace(vmin,vmax,20)) + #plt.clabel(imgplot, inline=1, fontsize=8) + ax.set_xticks([]) + ax.set_yticks([]) + ax.title.set_visible(False) + # show for the last plot + if show_image_bar: + cbar = fig.colorbar(imgplot) + + + if title is not None: + plt.suptitle(title,fontsize=20) + plt.subplots_adjust(wspace = 0.0 ,hspace=0.0) + else: + plt.subplots_adjust(top=1.0,bottom=0.0,left=0.0,right=1.0,wspace = 0.0 ,hspace=0.0) + + plt.savefig(output, bbox_inches='tight', dpi=dpi) + plt.close('all') + + +# register custom maps +plt.register_cmap(cmap=colors.LinearSegmentedColormap('red', + {'red': ((0.0, 0.0, 0.0), + (1.0, 1.0, 1.0)), + + 'green': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'blue': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'alpha': ((0.0, 0.0, 1.0), + (1.0, 1.0, 1.0)) + })) + +plt.register_cmap(cmap=colors.LinearSegmentedColormap('green', + {'green': ((0.0, 0.0, 0.0), + (1.0, 1.0, 1.0)), + + 'red': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'blue': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'alpha': ((0.0, 0.0, 1.0), + (1.0, 1.0, 1.0)) + })) + +plt.register_cmap(cmap=colors.LinearSegmentedColormap('blue', + {'blue': ((0.0, 0.0, 0.0), + (1.0, 1.0, 1.0)), + + 'red': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'green': ((0.0, 0.0, 0.0), + (1.0, 0.0, 0.0)), + + 'alpha': ((0.0, 0.0, 1.0), + (1.0, 1.0, 1.0)) + })) + +def parse_options(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Make QC image') + + parser.add_argument("--debug", + action="store_true", + dest="debug", + default=False, + help="Print debugging information" ) + + parser.add_argument("--contour", + action="store_true", + dest="contour", + default=False, + help="Make contour plot" ) + + parser.add_argument("--bar", + action="store_true", + dest="bar", + default=False, + help="Show colour-bar" ) + + parser.add_argument("--cmap", + dest="cmap", + default=None, + help="Colour map" ) + + parser.add_argument("--mask", + dest="mask", + default=None, + help="Add mask" ) + + parser.add_argument("--over", + dest="use_over", + action="store_true", + default=False, + help="Overplot" ) + + parser.add_argument("--max", + dest="use_max", + action="store_true", + default=False, + help="Use max mixing" ) + + parser.add_argument("input", + help="Input minc file") + + parser.add_argument("output", + help="Output QC file") + + options = parser.parse_args() + + if options.debug: + print(repr(options)) + + return options + +if __name__ == '__main__': + options = parse_options() + if options.input is not None and options.output is not None: + if options.contour: + qc_field_contour(options.input,options.output,show_image_bar=options.bar,image_cmap=options.cmap) + else: + qc(options.input,options.output,mask=options.mask,use_max=options.use_max,use_over=options.use_over,mask_bg=0.5) + else: + print("Refusing to run without input data, run --help") + exit(1) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/minc_tools.py b/ipl/minc_tools.py new file mode 100755 index 0000000..dda18c1 --- /dev/null +++ b/ipl/minc_tools.py @@ -0,0 +1,2112 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 11/21/2011 +# +# Generic minc tools + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math + +import inspect + +# local stuff +import registration +import ants_registration +import dd_registration +import elastix_registration + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + +def get_git_hash(): + _script_dir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + _hash_code='' + try: + p=subprocess.Popen(['git', '-C', _script_dir, 'rev-parse', '--short', '--verify', 'HEAD'],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (output,outerr)=p.communicate() + _hash_code=output.decode() + outvalue=p.wait() + except OSError as e: + _hash_code='Unknown' + if not outvalue == 0: + _hash_code='Unknown' + return _hash_code.rstrip("\n") + +class mincError(Exception): + """MINC tools general error""" + def __init__(self, value='ERROR'): + self.value = value + self.stack = traceback.extract_stack() + + def __repr__(self): + return "mincError:{}\nAT:{}".format(self.value, self.stack) + + def __str__(self): + return self.__repr__() + + +class temp_files(object): + """Class to keep track of temp files""" + + def __init__(self,tempdir=None,prefix=None): + + self.tempdir = tempdir + self.clean_tempdir = False + self.tempfiles = {} + if not self.tempdir: + if prefix is None: + prefix='iplMincTools' + self.tempdir = tempfile.mkdtemp(prefix=prefix,dir=os.environ.get('TMPDIR',None)) + self.clean_tempdir = True + + if not os.path.exists(self.tempdir): + os.makedirs(self.tempdir) + + def __enter__(self): + return self + + def __exit__( + self, + type, + value, + traceback, + ): + self.do_cleanup() + + def __del__(self): + self.do_cleanup() + + def do_cleanup(self): + """remove temporary directory if present""" + if self.clean_tempdir and self.tempdir is not None: + shutil.rmtree(self.tempdir) + self.clean_tempdir=False + + def temp_file(self, suffix='', prefix=''): + """create temporary file""" + + (h, name) = tempfile.mkstemp(suffix=suffix, prefix=prefix,dir=self.tempdir) + os.close(h) + os.unlink(name) + return name + + def tmp(self, name): + """return path of a temp file named name""" + try: + return self.tempfiles[name] + except KeyError: + self.tempfiles[name] = self.temp_file(suffix=name) + return self.tempfiles[name] + + def temp_dir(self, suffix='', prefix=''): + """ Create temporary directory for processing""" + + name = tempfile.mkdtemp(suffix=suffix, prefix=prefix, + dir=self.tempdir) + return name + + @property + def dir(self): + return self.tempdir + +class cache_files(temp_files): + """Class to keep track of work files""" + def __init__(self,work_dir=None,context='',tempdir=None): + self._locks={} + super(cache_files,self).__init__(tempdir=tempdir) + self.work_dir=work_dir + self.context=context # TODO: something more clever here? + self.cache_dir=None + if self.work_dir is not None: + self.cache_dir=self.work_dir+os.sep+context+os.sep + if not os.path.exists(self.cache_dir): + os.makedirs(self.cache_dir) + + + def cache(self,name,suffix=''): + """Allocate a name in cache, if cache was setup + also lock the file , so that another process have to wait before using the same file name + + Important: call unlock() on result + """ + #TODO: something more clever here? + fname='' + if self.work_dir is not None: + fname=self.cache_dir+os.sep+name+suffix + lock_name=fname+'.lock' + f=self._locks[lock_name]=open(lock_name, 'a') + fcntl.lockf(f.fileno(), fcntl.LOCK_EX ) + else: + fname=self.tmp(name+suffix) + + return fname + + + def unlock(self,fname): + #TODO: something more clever here? + lock_name=fname+'.lock' + try: + f=self._locks[lock_name] + + if f is not None: + fcntl.lockf(f.fileno(), fcntl.LOCK_UN) + f.close() + + del self._locks[lock_name] + +# try: +# os.unlink(lock_name) +# except OSError: + #probably somebody else is blocking +# pass + + except KeyError: + pass + + + #def __del__(self): + #self.do_cleanup() + # pass + + def __enter__(self): + return self + + def __exit__( + self, + type, + value, + traceback, + ): + self.do_cleanup() + + + def do_cleanup(self): + """unlocking lock files """ + for f in self._locks.keys(): + if self._locks[f] is not None: + fcntl.flock(self._locks[f].fileno(), fcntl.LOCK_UN) + self._locks[f].close() +# try: +# os.unlink(f) +# except OSError: +# #probably somebody else is blocking +# pass + self._locks={} + super(cache_files,self).do_cleanup() + +class mincTools(temp_files): + """minc toolkit interface , mostly basic tools """ + + def __init__(self, tempdir=None, resample=None, verbose=2, prefix=None): + super(mincTools,self).__init__(tempdir=tempdir,prefix=prefix) + # TODO: add some options? + self.resample = resample + self.verbose = verbose + + def __enter__(self): + return super(mincTools,self).__enter__() + + def __exit__( + self, + type, + value, + traceback, + ): + return super(mincTools,self).__exit__(type,value,traceback) + + @staticmethod + def checkfiles( + inputs=None, + outputs=None, + timecheck=False, + verbose=1, + ): + """ Check newer input file """ + + itime = -1 # numer of seconds since epoch + inputs_exist = True + + if inputs is not None: + if isinstance(inputs, basestring): # check if input is only string and not list + if not os.path.exists(inputs): + inputs_exist = False + raise mincError(' ** Error: Input does not exists! :: {}'.format(str(inputs))) + else: + itime = os.path.getmtime(inputs) + else: + for i in inputs: + if not os.path.exists(i): + inputs_exist = False + print(' ** Error: One input does not exists! :: {}'.format(i), file=sys.stderr) + raise mincError(' ** Error: One input does not exists! :: {}'.format(i)) + else: + timer = os.path.getmtime(i) + if timer < itime or itime < 0: + itime = timer + + # Check if outputs exist AND is newer than inputs + + outExists = False + otime = -1 + exists=[] + if outputs is not None: + if isinstance(outputs, basestring): + outExists = os.path.exists(outputs) + if outExists: + otime = os.path.getmtime(outputs) + exists.append(outputs) + else: + for o in outputs: + outExists = os.path.exists(o) + if outExists: + exists.append(outputs) + timer = os.path.getmtime(o) + if timer > otime: + otime = timer + if not outExists: + break + + if outExists: + if timecheck and itime > 0 and otime > 0 and otime < itime: + if verbose>1: + print(' -- Warning: Output exists but older than input! Redoing command',file=sys.stderr) + print(' otime ' + str(otime) + ' < itime ' \ + + str(itime),file=sys.stderr) + return True + else: + if verbose>1: + print(' -- Skipping: Output Exists:{}'.format(repr(exists)),file=sys.stderr) + return False + return True + + @staticmethod + def execute(cmds, verbose=1): + """ + Execute a command line waiting for the end of it + Arguments: + cmds: list containg the command line + + Keyword arguments: + verbose: if false no message will appear + + return : False if error, otherwise the execution output + """ + output_stderr="" + output="" + outvalue=0 + if verbose>0: + print(repr(cmds)) + try: + + if verbose<2: + with open(os.devnull, "w") as fnull: + p=subprocess.Popen(cmds, stdout=fnull, stderr=subprocess.PIPE) + else: + p=subprocess.Popen(cmds, stderr=subprocess.PIPE) + + (output,output_stderr)=p.communicate() + outvalue=p.wait() + + except OSError: + print("ERROR: command {} Error:{}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()), file=sys.stderr) + raise mincError("ERROR: command {} Error:{}!\nMessage: {}\n{}".format(str(cmds),str(outerr),output_stderr,traceback.format_exc())) + if not outvalue == 0: + print("ERROR: command {} failed {}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()), file=sys.stderr) + raise mincError("ERROR: command {} failed {}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc())) + return outvalue + + @staticmethod + def execute_w_output(cmds, verbose=0): + """ + Execute a command line waiting for the end of it + + cmds: list containg the command line + verbose: if false no message will appear + + return : False if error, otherwise the execution output + """ + output='' + outvalue=0 + + if verbose>0: + print(repr(cmds)) + try: + p=subprocess.Popen(cmds,stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (output,outerr)=p.communicate() + if verbose>0: + print(output.decode()) + outvalue=p.wait() + except OSError as e: + print("ERROR: command {} Error:{}!\n{}".format(repr(cmds),str(e),traceback.format_exc()),file=sys.stderr) + raise mincError("ERROR: command {} Error:{}!\n{}".format(repr(cmds),str(e),traceback.format_exc())) + if not outvalue == 0: + print("Command: {} generated output:{} {}\nError:{}".format(' '.join(cmds),outvalue,output,outerr),file=sys.stderr) + raise mincError("ERROR: command {} failed {}!\nError:{}\n{}".format(repr(cmds),str(outvalue),outerr,traceback.format_exc())) + return output.decode() + + @staticmethod + def command( + cmds, + inputs=None, + outputs=None, + timecheck=False, + verbose=1, + ): + """ + Execute a command line waiting for the end of it, testing inputs and outputs + + cmds: list containg the command line + inputs: list of files to check if they exist before executing command + outputs: list of files that should be when finishing + verbose: if 0 no message will appear + outputlines: store the output as a string + timecheck: The command won't be executed if the output exists and is newer than the input file. + + return : False if error, otherwise the execution output + """ + + if verbose>0: + print(repr(cmds)) + + if not mincTools.checkfiles(inputs=inputs, outputs=outputs, + verbose=verbose, + timecheck=timecheck): + return 0 + outvalue=0 + output_stderr="" + output="" + use_shell=not isinstance(cmds, list) + try: + if verbose<2: + with open(os.devnull, "w") as fnull: + p=subprocess.Popen(cmds, stdout=fnull, stderr=subprocess.PIPE,shell=use_shell) + else: + p=subprocess.Popen(cmds, stderr=subprocess.PIPE,shell=use_shell) + + (output,output_stderr)=p.communicate() + outvalue=p.wait() + + except OSError: + print("ERROR: command {} Error:{}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()), file=sys.stderr) + raise mincError("ERROR: command {} Error:{}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc())) + if not outvalue == 0: + print("ERROR: command {} failed {}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc()), file=sys.stderr) + raise mincError("ERROR: command {} failed {}!\nMessage: {}\n{}".format(str(cmds),str(outvalue),output_stderr,traceback.format_exc())) + + outExists = False + if outputs is None: + outExists = True + elif isinstance(outputs, basestring): + outExists = os.path.exists(outputs) + else: + for o in outputs: + outExists = os.path.exists(o) + if not outExists: + break + + if not outExists: + raise mincError('ERROR: Command didn not produce output: {}!'.format(str(cmds))) + + return outvalue + + @staticmethod + def qsub( + comm, + queue='all.q', + name=None, + logfile=None, + depends=None, + ): + """ + Send the job into the sge queue + TODO: improve dependencies and so on + """ + + if not name: + name = comm[0] + try: + qsub_comm = [ + 'qsub','-cwd', + '-N', name, + '-j', 'y', + '-V', '-q', + queue, + ] + path = '' + if logfile: + path = os.path.abspath(logfile) + qsub_comm.extend(['-o', path]) + if depends: + qsub_comm.extend(['-hold_jid', depends]) + + print(' - Name ' + name) + print(' - Queue ' + queue) + print(' - Cmd ' + ' '.join(comm)) + print(' - logfile ' + path) + + #qsub_comm.append(tmpscript) + + cmds="#!/bin/bash\nhostname\n" + cmds+=' '.join(comm)+"\n" + + p=subprocess.Popen(qsub_comm, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT) + + p.communicate(cmds) + # TODO: check error code? + finally: + pass + + @staticmethod + def qsub_pe( + comm, + pe='all.pe', + slots=1, + name=None, + logfile=None, + depends=None, + ): + """ + Send the job into the sge queue + TODO: improve dependencies and so on + """ + + if not name: + name = comm[0] + try: + qsub_comm = [ + 'qsub','-cwd', + '-N', name, + '-j', 'y', + '-V', '-pe', + pe,str(slots) + ] + path = '' + if logfile: + path = os.path.abspath(logfile) + qsub_comm.extend(['-o', path]) + if depends: + qsub_comm.extend(['-hold_jid', depends]) + + print(' - Name ' + name) + print(' - PE ' + pe) + print(' - Slots ' + str(slots)) + print(' - Cmd ' + ' '.join(comm)) + print(' - logfile ' + path) + + cmds="#!/bin/bash\nhostname\n" + cmds+=' '.join(comm)+"\n" + + p=subprocess.Popen(qsub_comm, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT) + + p.communicate(cmds) + # TODO: check error code? + finally: + pass + + @staticmethod + def query_dimorder(input): + '''read a value of an attribute inside minc file''' + + i = subprocess.Popen(['mincinfo', '-vardims', 'image', input], + stdout=subprocess.PIPE).communicate() + return i[0].decode().rstrip('\n').split(' ') + + @staticmethod + def query_attribute(input, attribute): + '''read a value of an attribute inside minc file''' + + i = subprocess.Popen(['mincinfo', '-attvalue', attribute, + input], + stdout=subprocess.PIPE).communicate() + return i[0].decode().rstrip('\n').rstrip(' ') + + @staticmethod + def set_attribute(input, attribute, value): + '''set a value of an attribute inside minc file + if value=None - delete the attribute + ''' + if value is None: + mincTools.execute(['minc_modify_header', input, '-delete', attribute]) + elif isinstance(value, basestring): + mincTools.execute(['minc_modify_header', input, '-sinsert', attribute + '=' + + value]) + else: + # assume that it's a number + mincTools.execute(['minc_modify_header', input, '-dinsert', attribute + '=' + + str(value)]) + + @staticmethod + def mincinfo(input): + """read a basic information about minc file + Arguments: + input -- input minc file + Returns dict with entries per dimension + """ + # TODO: make this robust to errors! + _image_dims = subprocess.Popen(['mincinfo', '-vardims', 'image', input], + stdout=subprocess.PIPE).communicate()[0].decode().rstrip('\n').rstrip(' ').split(' ') + + _req=['mincinfo'] + for i in _image_dims: + _req.extend(['-dimlength',i, + '-attvalue', '{}:start'.format(i), + '-attvalue', '{}:step'.format(i), + '-attvalue', '{}:direction_cosines'.format(i)]) + _req.append(input) + _info= subprocess.Popen(_req, + stdout=subprocess.PIPE).communicate()[0].decode().rstrip('\n').rstrip(' ').split("\n") + + diminfo=collections.namedtuple('dimension',['length','start','step','direction_cosines']) + + _result={} + for i,j in enumerate(_image_dims): + _result[j]=diminfo(length=int(_info[i*4]), + start=float(_info[i*4+1]), + step=float(_info[i*4+2]), + direction_cosines=[float(k) for k in _info[i*4+3].rstrip(' ').split(' ') ]) + + return _result + + def ants_linear_register( + self, + source, + target, + output_xfm, + **kwargs + ): + """perform linear registration with ANTs, obsolete""" + return ants_registration.ants_linear_register(source,target,output_xfm,**kwargs) + + + def linear_register( + self, + source, + target, + output_xfm, + **kwargs + ): + """perform linear registration""" + + return registration.linear_register(source,target,output_xfm,**kwargs) + + def linear_register_to_self( + self, + source, + target, + output_xfm, + **kwargs + ): + """perform linear registration""" + + return registration.linear_register_to_self(source,target,output_xfm,**kwargs) + + def nl_xfm_to_elastix(self , xfm, elastix_par): + """Convert MINC style xfm into elastix style registration parameters""" + return elastix_registration.nl_xfm_to_elastix(sfm,elastix_par) + + def nl_elastix_to_xfm(self , elastix_par, xfm, **kwargs ): + """Convert elastix style parameter file into a nonlinear xfm file""" + return elastix_registration.nl_elastix_to_xfm(elastix_par,xfm,**kwargs) + + def register_elastix( self, source, target, **kwargs ): + """Perform registration with elastix """ + return elastix_registration.register_elastix(source,target,**kwargs) + + def non_linear_register_ants( + self, source, target, output_xfm, **kwargs + ): + """perform non-linear registration using ANTs, + WARNING: will create inverted xfm will be named output_invert.xfm + """ + return ants_registration.non_linear_register_ants(source, target, output_xfm, **kwargs) + + def non_linear_register_ldd( + self, + source, target, + output_velocity, + **kwargs ): + """Use log-diffeomorphic demons to run registration""" + return dd_registration.non_linear_register_ldd(source,target,output_velocity,**kwargs) + + def non_linear_register_full( + self, + source, target, output_xfm, + **kwargs + ): + """perform non-linear registration""" + return registration.non_linear_register_full(source,target,output_xfm,**kwargs) + + def non_linear_register_increment( + self, source, target, output_xfm,** kwargs + ): + """perform incremental non-linear registration""" + return registration.non_linear_register_increment(source, target, output_xfm,** kwargs) + + def resample_smooth( + self, + input, + output, + transform=None, + like=None, + order=4, + uniformize=None, + unistep=None, + invert_transform=False, + resample=None, + datatype=None, + ): + """resample an image, interpreting voxels as intnsities + + Arguments: + input -- input minc file + output -- output minc file + transform -- (optional) transformation file + like -- (optional) reference file for sampling + order -- interpolation order for B-Splines , default 4 + uniformize -- (optional) uniformize volume to have identity direction + cosines and uniform sampling + unistep -- (optional) resample volume to have uniform steps + invert_transform -- invert input transform, default False + resample -- (optional) resample type, variants: + 'sinc', 'linear', 'cubic','nearest' - mincresample + otherwise use itk_resample + datatype -- output minc file data type, variants + 'byte','short','long','float','double' + """ + if os.path.exists(output): + return + + if not resample: + resample = self.resample + if resample == 'sinc': + cmd = ['mincresample', input, output, '-sinc', '-q'] + if transform: + cmd.extend(['-transform', transform]) + if like: + cmd.extend(['-like', like]) + else: + cmd.append('-use_input_sampling') + if invert_transform: + cmd.append('-invert_transform') + if uniformize: + raise mincError('Not implemented!') + if datatype: + cmd.append('-' + datatype) + self.command(cmd, inputs=[input], outputs=[output]) + elif resample == 'linear': + cmd = ['mincresample', input, output, '-trilinear', '-q'] + if transform: + cmd.extend(['-transform', transform]) + if like: + cmd.extend(['-like', like]) + else: + cmd.append('-use_input_sampling') + if invert_transform: + cmd.append('-invert_transform') + if uniformize: + raise mincError('Not implemented!') + if datatype: + cmd.append('-' + datatype) + self.command(cmd, inputs=[input], outputs=[output]) + elif resample == 'cubic': + cmd = ['mincresample', input, output, '-tricubic', '-q'] + if transform: + cmd.extend(['-transform', transform]) + if like: + cmd.extend(['-like', like]) + else: + cmd.append('-use_input_sampling') + if invert_transform: + cmd.append('-invert_transform') + if uniformize: + raise mincError('Not implemented!') + if datatype: + cmd.append('-' + datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + elif resample == 'nearest': + cmd = ['mincresample', input, output, '-nearest', '-q'] + if transform: + cmd.extend(['-transform', transform]) + if like: + cmd.extend(['-like', like]) + else: + cmd.append('-use_input_sampling') + if invert_transform: + cmd.append('-invert_transform') + if uniformize: + raise mincError('Not implemented!') + if datatype: + cmd.append('-' + datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + else: + cmd = ['itk_resample', input, output, '--order', str(order)] + if transform: + cmd.extend(['--transform', transform]) + if like: + cmd.extend(['--like', like]) + if invert_transform: + cmd.append('--invert_transform') + if uniformize: + cmd.extend(['--uniformize', str(uniformize)]) + if unistep: + cmd.extend(['--unistep', str(unistep)]) + if datatype: + cmd.append('--' + datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def resample_labels( + self, + input, + output, + transform=None, + like=None, + invert_transform=False, + order=None, + datatype=None, + remap=None, + aa=None, + baa=False, + uniformize=None, + unistep=None, + ): + """resample an image with discrete labels""" + if datatype is None: + datatype='byte' + + cmd = ['itk_resample', input, output, '--labels'] + + + if remap is not None: + if isinstance(remap, list): + remap=dict(remap) + + if isinstance(remap, dict): + if any(remap): + _remap="" + for (i,j) in remap.items(): _remap+='{} {};'.format(i,j) + cmd.extend(['--lut-string', _remap ]) + else: + cmd.extend(['--lut-string', str(remap) ]) + if transform is not None: + cmd.extend(['--transform', transform]) + if like is not None: + cmd.extend(['--like', like]) + if invert_transform: + cmd.append('--invert_transform') + if order is not None: + cmd.extend(['--order',str(order)]) + if datatype is not None: + cmd.append('--' + datatype) + if aa is not None: + cmd.extend(['--aa',str(aa)]) + if baa : + cmd.append('--baa') + if uniformize: + cmd.extend(['--uniformize', str(uniformize)]) + if unistep: + cmd.extend(['--unistep', str(unistep)]) + + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + + def resample_smooth_logspace( + self, + input, + output, + velocity=None, + like=None, + order=4, + invert_transform=False, + datatype=None, + ): + """resample an image """ + if os.path.exists(output): + return + + cmd = ['log_resample', input, output, '--order', str(order)] + if velocity: + cmd.extend(['--log_transform', velocity]) + if like: + cmd.extend(['--like', like]) + if invert_transform: + cmd.append('--invert_transform') + if datatype: + cmd.append('--' + datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def resample_labels_logspace( + self, + input, + output, + velocity=None, + like=None, + invert_transform=False, + order=None, + datatype=None, + ): + """resample an image with discrete labels""" + if datatype is None: + datatype='byte' + + cmd = ['log_resample', input, output, '--labels'] + + + if velocity is not None: + cmd.extend(['--log_transform', velocity]) + if like is not None: + cmd.extend(['--like', like]) + if invert_transform: + cmd.append('--invert_transform') + if order is not None: + cmd.extend(['--order',str(order)]) + if datatype is not None: + cmd.append('--' + datatype) + + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + + def xfminvert(self, input, output): + """invert transformation""" + + self.command(['xfminvert', input, output], inputs=[input], + outputs=[output],verbose=self.verbose) + + def xfmavg( + self, + inputs, + output, + nl=False, + ): + """average transformations""" + + cmd = ['xfmavg'] + cmd.extend(inputs) + cmd.append(output) + if nl: + cmd.append('-ignore_linear') + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def xfmconcat(self, inputs, output): + """concatenate transformations""" + + cmd = ['xfmconcat'] + cmd.extend(inputs) + cmd.append(output) + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def xfm_v0_scaling(self, inputs, output): + """concatenate transformations""" + + cmd = ['xfm_v0_scaling.pl'] + cmd.extend(inputs) + cmd.append(output) + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def average( + self, + inputs, + output, + sdfile=None, + datatype=None, + ): + """average images""" + + cmd = ['mincaverage', '-q', '-clob'] + cmd.extend(inputs) + cmd.append(output) + + if sdfile: + cmd.extend(['-sdfile', sdfile]) + if datatype: + cmd.append(datatype) + cmd.extend(['-max_buffer_size_in_kb', '1000000', '-copy_header']) + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + def median( + self, + inputs, + output, + madfile=None, + datatype=None, + ): + """average images""" + + cmd = ['minc_median', '--clob'] + cmd.extend(inputs) + cmd.append(output) + + if madfile: + cmd.extend(['--mad', madfile]) + if datatype: + cmd.append(datatype) + + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def calc( + self, + inputs, + expression, + output, + datatype=None, + labels=False + ): + """apply mathematical expression to image(s)""" + + cmd = ['minccalc', '-copy_header','-q', '-clob', '-express', expression] + + if datatype: + cmd.append(datatype) + if labels: + cmd.append('-labels') + + cmd.extend(inputs) + cmd.append(output) + + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + def math( + self, + inputs, + operation, + output, + datatype=None, + ): + """apply mathematical operation to image(s)""" + + cmd = ['mincmath', '-q', '-clob', '-copy_header', '-'+operation] + + if datatype: + cmd.append(datatype) + + cmd.extend(inputs) + cmd.append(output) + + self.command(cmd, inputs=inputs, outputs=[output], verbose=self.verbose) + + + def stats(self, input, + stats, mask=None, + mask_binvalue=1, + val_floor=None, + val_ceil=None, + val_range=None, + single_value=True): + args=['mincstats',input,'-q'] + + if isinstance(stats, list): + args.extend(stats) + else: + args.append(stats) + + if mask is not None: + args.extend(['-mask',mask,'-mask_binvalue',str(mask_binvalue)]) + if val_floor is not None: + args.extend(['-floor',str(val_floor)]) + if val_ceil is not None: + args.extend(['-ceil',str(val_ceil)]) + if val_range is not None: + args.extend(['-range',str(val_range[0]),str(val_range[1])]) + + r=self.execute_w_output(args,verbose=self.verbose) + if single_value : + return float(r) + else: + return [float(i) for i in r.split(' ')] + + def similarity(self, reference, sample, ref_mask=None, sample_mask=None,method="msq"): + """Calculate image similarity metric""" + args=['itk_similarity',reference,sample,'--'+method] + + if ref_mask is not None: + args.extend(['--src_mask',ref_mask]) + if sample_mask is not None: + args.extend(['--target_mask',sample_mask]) + + r=self.execute_w_output(args,verbose=self.verbose) + return float(r) + + + def label_similarity(self, reference, sample, method="gkappa"): + """Calculate image similarity metric""" + args=['volume_gtc_similarity',reference, sample,'--'+method] + r=self.execute_w_output(args,verbose=self.verbose) + return float(r) + + def noise_estimate(self, input): + '''Estimate file noise (absolute)''' + args=['noise_estimate',input] + r=self.execute_w_output(args,verbose=self.verbose) + return float(r) + + def snr_estimate(self, input): + '''Estimate file SNR''' + args=['noise_estimate',input,'--snr'] + r=self.execute_w_output(args,verbose=self.verbose) + return float(r) + + def log_average(self, inputs, output): + """perform log-average (geometric average)""" + tmp = ['log(A[%d])' % i for i in xrange(len(inputs))] + self.calc(inputs, 'exp((%s)/%d)' % ('+'.join(tmp), + len(inputs)), output, datatype='-float') + + + def param2xfm(self, output, scales=None, translation=None, rotations=None, shears=None): + cmd = ['param2xfm','-clobber',output] + + if translation is not None: + cmd.extend(['-translation',str(translation[0]),str(translation[1]),str(translation[2])]) + if rotations is not None: + cmd.extend(['-rotations',str(rotations[0]),str(rotations[1]),str(rotations[2])]) + if scales is not None: + cmd.extend(['-scales',str(scales[0]),str(scales[1]),str(scales[2])]) + if shears is not None: + cmd.extend(['-shears',str(shears[0]),str(shears[1]),str(shears[2])]) + + self.command(cmd, inputs=[], outputs=[output], verbose=self.verbose) + + + + def flip_volume_x(self,input,output, labels=False, datatype=None): + '''flip along x axis''' + if not os.path.exists(self.tmp('flip_x.xfm')): + self.param2xfm(self.tmp('flip_x.xfm'), + scales=[-1.0,1.0,1.0]) + if labels: + self.resample_labels(input,output,order=0,transform=self.tmp('flip_x.xfm'),datatype=datatype) + else: + self.resample_smooth(input,output,order=0,transform=self.tmp('flip_x.xfm'),datatype=datatype) + + + def volume_pol( + self, + source,target, + output, + source_mask=None, + target_mask=None, + order=1, + expfile=None, + datatype=None, + ): + """normalize intensities""" + + if (expfile is None or os.path.exists(expfile) ) and os.path.exists(output): + return + + rm_expfile = False + if not expfile: + expfile = self.temp_file(suffix='.exp') + rm_expfile = True + try: + cmd = ['volume_pol', + source, target, + '--order', str(order), + '--expfile', expfile, + '--noclamp','--clob', ] + if source_mask: + cmd.extend(['--source_mask', source_mask]) + if target_mask: + cmd.extend(['--target_mask', target_mask]) + self.command(cmd, inputs=[source, target], + outputs=[expfile], verbose=self.verbose) + exp = open(expfile).read().rstrip() + cmd = ['minccalc', '-q' ,'-expression', exp, source, output] + if datatype: + cmd.append(datatype) + self.command(cmd, inputs=[source, target], outputs=[output], verbose=self.verbose) + finally: + if rm_expfile and os.path.exists(expfile): + os.unlink(expfile) + + def nuyl_normalize( + self, + source,target, + output, + source_mask=None, + target_mask=None, + linear=False, + steps=10 + ): + """normalize intensities + Arguments: + source - input image + target - reference image + output - output image + + Optional Arguments: + souce_mask - input image mask (used for calculating intensity mapping) + target_mask - reference image mask + linear - use linear intensity model (False) + steps - number of steps in linear-piece-wise approximatation (10) + + """ + cmd = ['minc_nuyl', source, target,'--clob', output,'--steps',str(steps) ] + if source_mask: + cmd.extend(['--source-mask', source_mask]) + if target_mask: + cmd.extend(['--target-mask', target_mask]) + if linear: + cmd.append('--linear') + + self.command(cmd, inputs=[source, target], + outputs=[output], verbose=self.verbose) + + + def nu_correct( + self, + input, + output_imp=None, + output_field=None, + output_image=None, + mask=None, + mri3t=False, + normalize=False, + distance=None, + downsample_field=None, + datatype=None + ): + """apply N3""" + + if (output_image is None or os.path.exists(output_image)) and \ + (output_imp is None or os.path.exists(output_imp)) and \ + (output_field is None or os.path.exists(output_field)): + return + + output_imp_ = output_imp + + if not output_imp_ is not None: + output_imp_ = self.temp_file(suffix='.imp') + + if output_field is not None: + output_field_tmp=self.temp_file(suffix='.mnc') + + output_image_ = output_image + + if not output_image_: + output_image_ = self.temp_file(suffix='nuc.mnc') + + cmd = [ + 'nu_estimate', + '-stop', '0.00001', + '-fwhm', '0.1', + '-iterations','1000', + input, output_imp_, + ] + + if normalize: + cmd.append('-normalize_field') + + if mask is not None: + cmd.extend(['-mask', mask]) + + if distance is not None: + cmd.extend(['-distance', str(distance)]) + elif mri3t: + cmd.extend(['-distance', '50']) + + try: + self.command(cmd, inputs=[input], outputs=[output_imp_], + verbose=self.verbose) + + cmd=['nu_evaluate', + input, '-mapping', + output_imp_, + output_image_] + + if mask is not None: + cmd.extend(['-mask', mask]) + + if output_field is not None: + cmd.extend(['-field', output_field_tmp] ) + + self.command(cmd,inputs=[input], outputs = [output_image_], + verbose=self.verbose) + + if output_field is not None: + self.resample_smooth(output_field_tmp, output_field, datatype=datatype,unistep=downsample_field) + + finally: + if output_imp is None : + os.unlink(output_imp_) + if output_image is None : + os.unlink(output_image_) + + def n4(self, input, + output_corr=None, output_field=None, + mask=None, distance=200, + shrink=None, weight_mask=None, + datatype=None, iter=None, + sharpening=None, threshold=None, + downsample_field=None + ): + + outputs=[] + if output_corr is not None: + outputs.append(output_corr) + + if output_field is not None: + outputs.append(output_field) + + if not self.checkfiles(inputs=[input],outputs=outputs, + verbose=self.verbose): + return + + _out=self.temp_file(suffix='.mnc') + _out_fld=self.temp_file(suffix='.mnc') + + cmd=[ 'N4BiasFieldCorrection', '-d', '3', + '-i', input,'--rescale-intensities', '1', + '--bspline-fitting', str(distance), + '--output','[{},{}]'.format(_out,_out_fld) ] + + if mask is not None: + cmd.extend(['--mask-image',mask]) + if weight_mask is not None: + cmd.extend(['--weight-image',weight_mask]) + if shrink is not None: + cmd.extend(['--shrink-factor',str(shrink)]) + if iter is not None: + if threshold is None: threshold=0.0 + cmd.extend(['--convergence','[{}]'.format( ','.join([str(iter),str(threshold)]) )]) + if sharpening is not None: + cmd.extend(['--histogram-sharpening','[{}]'.format(str(sharpening))]) + self.command(cmd,inputs=[input],outputs=[_out,_out_fld], + verbose=self.verbose) + + if output_corr is not None: + if datatype is not None: + self.reshape(_out, output_corr, datatype=datatype) + os.unlink(_out) + else: + shutil.move(_out, output_corr) + + if output_field is not None: + if downsample_field is not None: + self.resample_smooth(_out_fld, output_field, datatype=datatype,unistep=downsample_field) + else: + if datatype is not None: + self.reshape(_out_fld, output_field, datatype=datatype) + os.unlink(_out_fld) + else: + shutil.move(_out_fld, output_field) + + def difference_n4( + self, + input, + model, + output, + mask=None, + distance=None, + iter=None + ): + + diff = self.temp_file(suffix='.mnc') + _output = self.temp_file(suffix='_out.mnc') + try: + if mask: + self.calc([input, model, mask], + 'A[2]>0.5?A[0]-A[1]+100:0', diff) + else: + self.calc([input, model], + 'A[0]-A[1]+100', diff) + + self.n4(diff, mask=mask, output_field=_output, + distance=distance, + iter=iter) + # fix , because N4 doesn't preserve dimension order + self.resample_smooth(_output, output, like=diff) + + finally: + os.unlink(diff) + os.unlink(_output) + + + def apply_fld(self,input,fld,output): + '''Apply inhomogeniety correction field''' + _res_fld=self.temp_file(suffix='.mnc') + if not self.checkfiles(inputs=[input],outputs=[output], + verbose=self.verbose): + return + try: + self.resample_smooth(fld, _res_fld, like=input,order=1) + self.calc([input, _res_fld], + 'A[1]>0.0?A[0]/A[1]:A[0]', output) + finally: + os.unlink(_res_fld) + + + + def apply_n3_vol_pol( + self, + input, + model, + output, + source_mask=None, + target_mask=None, + bias=None, + ): + + intermediate = input + try: + if bias: + intermediate = self.temp_file(suffix='.mnc') + self.calc([input, bias], + 'A[1]>0.5&&A[1]<1.5?A[0]/A[1]:A[0]', + intermediate, datatype='-float') + self.volume_pol( + intermediate, + model, + output, + source_mask=source_mask, + target_mask=target_mask, + datatype='-short', + ) + finally: + if bias: + os.unlink(intermediate) + + def difference_n3( + self, + input, + model, + output, + mask=None, + mri3t=False, + distance=None, + normalize=True, + ): + + diff = self.temp_file(suffix='.mnc') + + try: + if mask: + self.calc([input, model, mask], + 'A[2]>0.5?A[0]-A[1]+100:0', diff) + else: + self.calc([input, model], + 'A[0]-A[1]+100', diff) + + self.nu_correct(diff, mask=mask, output_field=output, + mri3t=mri3t, distance=distance, + normalize=normalize) + finally: + os.unlink(diff) + + + def xfm_normalize( + self, input, + like, output, + step=None, + exact=False, + invert=False, + ): + + # TODO: convert xfm_normalize.pl to python + cmd = ['xfm_normalize.pl', input, '--like', like, output] + if step: + cmd.extend(['--step', str(step)]) + if exact: + cmd.extend(['--exact']) + if invert: + cmd.extend(['--invert']) + + self.command(cmd, inputs=[input, like], outputs=[output], verbose=self.verbose) + + + def xfm_noscale(self, input, output, unscale=None): + """remove scaling from linear part of XFM""" + + scale = self.temp_file(suffix='scale.xfm') + _unscale=unscale + if unscale is None: + _unscale = self.temp_file(suffix='unscale.xfm') + try: + (out, err) = subprocess.Popen(['xfm2param', input], + stdout=subprocess.PIPE).communicate() + scale_ = filter(lambda x: re.match('^\-scale', x), + out.decode().split('\n')) + if len(scale_) != 1: + raise mincError("Can't extract scale from " + input) + scale__ = re.split('\s+', scale_[0]) + cmd = ['param2xfm'] + cmd.extend(scale__) + cmd.extend([scale]) + self.command(cmd, verbose=self.verbose) + self.xfminvert(scale, _unscale) + self.xfmconcat([input, _unscale], output) + finally: + if os.path.exists(scale): + os.unlink(scale) + if unscale!=_unscale and os.path.exists(_unscale): + os.unlink(_unscale) + + + def blur( + self, + input, + output, + fwhm, + gmag=False, + dx=False, + dy=False, + dz=False, + output_float=False, + ): + """Apply gauissian blurring to the input image""" + + cmd = ['fast_blur', input, output, '--fwhm', str(fwhm)] + if gmag: + cmd.append('--gmag') + if dx: + cmd.append('--dx') + if dy: + cmd.append('--dy') + if dz: + cmd.append('--dz') + if output_float: + cmd.append('--float') + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def blur_orig( + self, + input, + output, + fwhm, + gmag=False, + dx=False, + dy=False, + dz=False, + output_float=False, + ): + """Apply gauissian blurring to the input image""" + with temp_files() as tmp: + p=tmp.tmp('blur_orig') + cmd = ['mincblur', input, p, '-fwhm', str(fwhm),'-no_apodize'] + if gmag: + cmd.append('-gradient') + if output_float: + cmd.append('-float') + self.command(cmd, inputs=[input], outputs=[p+'_blur.mnc'], verbose=2) + + if gmag: + shutil.move(p+'_dxyz.mnc',output) + else: + shutil.move(p+'_blur.mnc',output) + + + def blur_vectors( + self, + input, + output, + fwhm, + gmag=False, + output_float=False, + dim=3 + ): + """Apply gauissian blurring to the input vector field """ + + if not self.checkfiles(inputs=[input], outputs=[output], + verbose=self.verbose): + return + + with temp_files() as tmp: + b=[] + dimorder=self.query_dimorder(input) + for i in range(dim): + self.reshape(input,tmp.tmp(str(i)+'.mnc'),dimrange='vector_dimension={}'.format(i)) + self.blur(tmp.tmp(str(i)+'.mnc'),tmp.tmp('blur_'+str(i)+'.mnc'),fwhm=fwhm,output_float=output_float,gmag=gmag) + b.append(tmp.tmp('blur_'+str(i)+'.mnc')) + # assemble + cmd=['mincconcat','-concat_dimension','vector_dimension','-quiet'] + cmd.extend(b) + cmd.append(tmp.tmp('output.mnc')) + self.command(cmd,inputs=b,outputs=[],verbose=self.verbose) + self.command(['mincreshape','-dimorder',','.join(dimorder),tmp.tmp('output.mnc'),output,'-quiet'], + inputs=[],outputs=[output],verbose=self.verbose) + # done + + + def nlm(self, + input,output, + beta=0.7, + patch=3, + search=1, + sigma=None, + ): + + if sigma is None: + sigma=self.noise_estimate(input) + + cmd=['itk_minc_nonlocal_filter', + input, output, + #'--beta', str(beta), + '--patch',str(patch), + '--search',str(search) + ] + + cmd.extend(['--sigma',str(sigma*beta)]) + + self.command(cmd, + inputs=[input], outputs=[output], verbose=self.verbose) + + + def anlm(self, + input, output, + beta=0.7, + patch=None, + search=None, + regularize=None, + ): + cmd=['itk_minc_nonlocal_filter', '--clobber', '--anlm', + input, output,'--beta', str(beta),] + + if patch is not None: cmd.extend(['--patch', str(patch)] ) + if search is not None: cmd.extend(['--search', str(search)] ) + if regularize is not None: cmd.extend(['--regularize',str(regularize)]) + + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + + def qc( + self, + input, + output, + image_range=None, + mask=None, + mask_range=None, + title=None, + labels=False, + labels_mask=False, + spectral_mask=False, + big=False, + clamp=False, + bbox=False, + dicrete=False, + dicrete_mask=False, + red=False, + green_mask=False, + cyanred=False, + cyanred_mask=False, + mask_lut=None + ): + + cmd = ['minc_qc.pl', input, output, '--verbose'] + + if image_range is not None: + cmd.extend(['--image-range', str(image_range[0]), + str(image_range[1])]) + if mask is not None: + cmd.extend(['--mask', mask]) + if mask_range is not None: + cmd.extend(['--mask-range', str(mask_range[0]), + str(mask_range[1])]) + if title is not None: + cmd.extend(['--title', title]) + if labels: + cmd.append('--labels') + if labels_mask: + cmd.append('--labels-mask') + if spectral_mask: + cmd.append('--spectral-mask') + if big: + cmd.append('--big') + if clamp: + cmd.append('--clamp') + if bbox: + cmd.append('--bbox') + if labels: + cmd.append('--labels') + if labels_mask: + cmd.append('--labels-mask') + if dicrete: + cmd.append('--discrete') + if dicrete_mask: + cmd.append('--discrete-mask') + if red: + cmd.append('--red') + if green_mask: + cmd.append('--green-mask') + if cyanred: + cmd.append('--cyanred') + if cyanred_mask: + cmd.append('--cyanred-mask') + if mask_lut is not None: + cmd.extend(['--mask-lut',mask_lut]) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def aqc( + self, + input, + output_prefix, + slices=3 + ): + + cmd = ['minc_aqc.pl', input, output_prefix, '--slices', str(slices) ] + + self.command(cmd, inputs=[input], outputs=[output_prefix+'_0.jpg'], verbose=self.verbose) + + + def grid_determinant( + self, + input, + output, + datatype=None + ): + cmd=['grid_proc','--det',input,output] + if datatype is not None: + cmd.append('--'+datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def grid_2_log( + self, + input, + output, + datatype=None, + exp=False, + factor=None, + ): + cmd=['grid_2_log',input,output] + if datatype is not None: + cmd.append('--'+datatype) + if exp: + cmd.append('--exp') + if factor is not None: + cmd.extend(['--factor',str(factor)]) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def grid_magnitude( + self, + input, + output, + datatype=None + ): + cmd=['grid_proc','--mag',input,output] + if datatype is not None: + cmd.append('--'+datatype) + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def reshape( + self, + input, + output, + normalize=False, + datatype=None, + image_range=None, + valid_range=None, + dimorder=None, + signed=False, + unsigned=False, + dimrange=None + ): + """reshape minc files, #TODO add more options to fully support mincreshape""" + if signed and unsigned: + raise mincError('Attempt to reshape file to have both signed and unsigned datatype') + cmd = ['mincreshape', input, output, '-q'] + if image_range: + cmd.extend(['-image_range', str(image_range[0]), + str(image_range[1])]) + if valid_range: + cmd.extend(['-valid_range', str(image_range[0]), + str(image_range[1])]) + if dimorder: + cmd.extend(['-dimorder', ','.join(dimorder)]) + if datatype: + cmd.append('-' + datatype) + if normalize: + cmd.append('-normalize') + if signed: + cmd.append('-signed') + if unsigned: + cmd.append('-unsigned') + if dimrange is not None: + if type(dimrange) is list: + [ cmd.extend(['-dimrange',i]) for i in dimrange ] + else: + cmd.extend(['-dimrange',dimrange]) + + self.command(cmd, inputs=[input], outputs=[output], verbose=self.verbose) + + def split_labels(self, input, output_prefix, normalize=True, lut=None, aa=True, expit=4.0): + '''split a miltilabel file into a set of files possibly with anti-aliasing filter applied''' + output_file_pattern=output_prefix+'_%03d.mnc' + cmd=['itk_split_labels',input,output_file_pattern] + if aa : cmd.append('--aa') + if expit> 0: cmd.extend(['--expit',expit]) + if normalize: cmd.append('--normalize') + if lut is not None: + if isinstance(lut, list): + lut=dict(remap) + + if isinstance(lut, dict): + if any(lut): + _lut="" + for (i,j) in lut.items(): _lut+='{} {};'.format(i,j) + cmd.extend(['--lut-string', _lut ]) + else: + cmd.extend(['--lut-string', str(lut) ]) + # TODO: figure out how to effectively predict output file names + # and report it to the calling programm + out_=self.execute_w_output(cmd).split("\n") + return dict( [int(i[0]),i[1]] for i in [ j.split(',') for j in out_] ) + + def merge_labels(self,input,output): + '''merge labels using voting''' + try: + data_type='--byte' + inputs=[self.temp_file(suffix='merge.csv')] + with open(self.temp_file(suffix='merge.csv'),'w') as f: + for (i,j) in input.items(): + f.write("{},{}\n".format(i,j)) + inputs.append(j) + if int(i)>255: data_type='--short' + + cmd=['itk_merge_labels', '--csv', self.temp_file(suffix='merge.csv'), output, '--clob', data_type] + self.command(cmd,inputs=inputs,outputs=[output]) + finally: + os.unlink(self.temp_file(suffix='merge.csv')) + + def label_stats(self, input, + bg=False, + label_defs=None, + volume=None, + median=False, + mask=None): + ''' calculate label statistics : label_id, volume, mx, my, mz,[mean/median] ''' + _label_file=label_defs + cmd=['itk_label_stats',input] + if bg: cmd.append('--bg') + if label_defs is not None: + if isinstance(label_defs, list) : + _label_file=self.temp_file(suffix='.csv') + with open(_label_file,'w') as f: + for i in label_defs: + f.write("{},{}\n".format(i[0],i[1])) + elif isinstance(label_defs, dict) : + _label_file=self.temp_file(suffix='.csv') + with open(_label_file,'w') as f: + for i, j in label_defs.iteritems(): + f.write("{},{}\n".format(i,j)) + + cmd.extend(['--labels',_label_file]) + if volume is not None: + cmd.extend(['--volume',volume]) + if median: + cmd.append('--median') + + if mask is not None: + cmd.extend(['--mask',mask]) + + _out=self.execute_w_output(cmd).split("\n") + _out.pop(0)# remove header + if _label_file != label_defs: + os.unlink(_label_file) + out=[] + + if label_defs is not None: + out=[ [ ( float(j) if k>0 else j ) for k,j in enumerate(i.split(',')) ] for i in _out if len(i)>0 ] + else: + out=[ [ ( float(j) if k>0 else int(j) ) for k,j in enumerate(i.split(',')) ] for i in _out if len(i)>0 ] + return out + + def skullregistration( + self, + source, + target, + source_mask, + target_mask, + output_xfm, + init_xfm=None, + stxtemplate_xfm=None, + ): + """perform linear registration based on the skull segmentaton""" + + temp_dir = self.temp_dir(prefix='skullregistration') + os.sep + fit = '-xcorr' + try: + if init_xfm: + resampled_source = temp_dir + 'resampled_source.mnc' + resampled_source_mask = temp_dir \ + + 'resampled_source_mask.mnc' + self.resample_smooth(source, resampled_source, + like=target, transform=init_xfm) + self.resample_labels(source_mask, + resampled_source_mask, like=target, + transform=init_xfm) + source = resampled_source + source_mask = resampled_source_mask + if stxtemplate_xfm: + resampled_target = temp_dir + 'resampled_target.mnc' + resampled_target_mask = temp_dir \ + + 'resampled_target_mask.mnc' + self.resample_smooth(target, resampled_target, + transform=stxtemplate_xfm) + self.resample_labels(target_mask, + resampled_target_mask, + transform=stxtemplate_xfm) + target = resampled_target + target_mask = resampled_target_mask + + self.command(['itk_morph', '--exp', 'D[3]', source_mask, + temp_dir + 'dilated_source_mask.mnc'], verbose=self.verbose) + self.calc([temp_dir + 'dilated_source_mask.mnc', source], + 'A[0]<=0.1 && A[0]>=-0.1 ? A[1]:0', temp_dir + + 'non_brain_source.mnc' ) + self.command(['mincreshape', '-dimrange', 'zspace=48,103', + temp_dir + 'non_brain_source.mnc', temp_dir + + 'non_brain_source_crop.mnc'], verbose=self.verbose ) + self.command(['itk_morph', '--exp', 'D[3]', target_mask, + temp_dir + 'dilated_target_mask.mnc'], verbose=self.verbose) + self.calc([temp_dir + 'dilated_target_mask.mnc', target], + 'A[0]<=0.1 && A[0]>=-0.1 ? A[1]:0', temp_dir + + 'non_brain_target.mnc') + self.command(['mincreshape', '-dimrange', 'zspace=48,103', + temp_dir + 'non_brain_target.mnc', temp_dir + + 'non_brain_target_crop.mnc'], verbose=self.verbose ) + self.command([ + 'bestlinreg_s2', + '-clobber', '-lsq12', source, target, + temp_dir + '1.xfm', + ], verbose=self.verbose ) + self.command([ + 'minctracc', + '-quiet','-clobber', + fit, + '-step', '2', '2', '2', + '-simplex','1', + '-lsq12', + '-model_mask', target_mask, + source, + target, + temp_dir + '2.xfm', + '-transformation', temp_dir + '1.xfm', + ], verbose=self.verbose) + + self.command([ + 'minctracc', + '-quiet','-clobber', + fit, + '-step', '2', '2','2', + '-simplex', '1', + '-lsq12','-transformation', temp_dir + '2.xfm', + temp_dir + 'non_brain_source_crop.mnc', + temp_dir + 'non_brain_target_crop.mnc', + temp_dir + '3.xfm', + ], verbose=self.verbose) + + self.command([ + 'minctracc', + '-quiet', '-clobber', + fit, + '-step', '2', '2', '2', + '-transformation', + temp_dir + '3.xfm', + '-simplex', '1', + '-lsq12', + '-w_scales', '0', '0', '0', + '-w_shear', '0', '0', '0', + '-model_mask', target_mask, + source, + target, + temp_dir + '4.xfm', + ], verbose=self.verbose) + + if init_xfm: + self.command(['xfmconcat', init_xfm, temp_dir + '4.xfm' + , output_xfm, '-clobber'], verbose=self.verbose) + else: + shutil.move(temp_dir + '4.xfm', output_xfm) + finally: + shutil.rmtree(temp_dir) + + def binary_morphology(self, source, expression, target , binarize_bimodal=False, binarize_threshold=None): + cmd=['itk_morph',source,target] + if expression is not None and expression!='': + cmd.extend(['--exp',expression]) + if binarize_bimodal: + cmd.append('--bimodal') + elif binarize_threshold is not None : + cmd.extend(['--threshold',str(binarize_threshold) ]) + self.command(cmd,inputs=[source],outputs=[target], verbose=2) + + def grayscale_morphology(self, source, expression, target ): + cmd=['itk_g_morph',source,'--exp',expression,target] + self.command(cmd,inputs=[source],outputs=[target], verbose=self.verbose) + + + def patch_norm(self, input, output, + index=None, db=None, threshold=0.0, + spline=None, median=None, field=None, + subsample=2, iterations=None ): + + cmd=['flann_patch_normalize.pl',input,output] + if index is None or db is None: + raise mincError("patch normalize need index and db") + cmd.extend(['--db',db,'--index',index]) + + if median is not None: + cmd.extend(['--median',str(median)]) + + if spline is not None: + cmd.extend(['--spline',str(spline)]) + + if iterations is not None: + cmd.extend(['--iter',str(iterations)]) + + cmd.extend(['--subsample',str(subsample)]) + + if field is not None: + cmd.extend(['--field',field]) + + self.command(cmd,inputs=[input],outputs=[output], verbose=self.verbose) + + def autocrop(self,input,output, + isoexpand=None,isoextend=None): + # TODO: repimplement in python + cmd=['autocrop',input,output] + if isoexpand: cmd.extend(['-isoexpand',str(isoexpand)]) + if isoextend: cmd.extend(['-isoextend',str(isoextend)]) + self.command(cmd,inputs=[input],outputs=[output], verbose=self.verbose) + + + def run_mincbeast(self, input_scan, output_mask, + beast_lib=None, beast_conf=None, beast_res=2): + if beast_lib is None: + raise mincError('mincbeast needs location of library') + if beast_conf is None: + beast_conf=beast_lib+os.sep+'default.{}mm.conf'.format(beast_res) + + + cmd = [ + 'mincbeast', + beast_lib, + input_scan, + output_mask, + '-median', + '-fill', + '-conf', + beast_conf, + '-same_resolution'] + + self.command(cmd,inputs=[input_scan],outputs=[output_mask], verbose=2) + + + + def classify_clean( + self, input_scans, output_cls, + mask=None, xfm=None, model_dir=None, model_name=None + ): + """ + run classify_clean + """ + # TODO reimplement in python? + + cmd = ['classify_clean', '-clean_tags'] + + cmd.extend(input_scans) + + if mask is not None: cmd.extend(['-mask',mask,'-mask_tag','-mask_classified']) + if xfm is not None: cmd.extend(['-tag_transform',xfm]) + + if model_dir is not None and model_name is not None: + cmd.extend([ + '-tagdir', model_dir, + '-tagfile', "{}_ntags_1000_prob_90_nobg.tag".format(model_name), + '-bgtagfile', "{}_ntags_1000_bg.tag".format(model_name) + ]) + cmd.append(output_cls) + self.command(cmd,inputs=input_scans,outputs=[output_cls], verbose=self.verbose) + + def lobe_segment(self,in_cls,out_lobes, + nl_xfm=None,lin_xfm=None, + atlas_dir=None,template=None): + """ + Run lobe_segment script + """ + # TODO convert to python + identity=self.tmp('identity.xfm') + self.param2xfm(identity) + + if nl_xfm is None: + nl_xfm=identity + if lin_xfm is None: + lin_xfm=identity + + # TODO: setup sensible defaults here? + if atlas_dir is None or template is None: + raise mincError('lobe_segment needs atlas_dir and template') + + cmd = [ + 'lobe_segment', + nl_xfm, + lin_xfm, + in_cls, + out_lobes, + '-modeldir', atlas_dir, + '-template', template, + ] + + self.command(cmd, inputs=[in_cls],outputs=[out_lobes], verbose=self.verbose) + + def xfm2param(self, input): + """extract transformation parameters""" + + out=self.execute_w_output(['xfm2param', input]) + + params_=[ [ float(k) if s>0 else k for s,k in enumerate(re.split('\s+', l))] for l in out.decode().split('\n') if re.match('^\-', l) ] + + return { k[0][1:] :[k[1],k[2],k[3]] for k in params_ } + + + def defrag(self,input,output,stencil=6,max_connect=None,label=1): + cmd = [ + 'mincdefrag', + input,output, str(label),str(stencil) + ] + if max_connect is not None: + cmd.append(str(max_connect)) + self.command(cmd, inputs=[input],outputs=[output], verbose=self.verbose) + + def winsorize_intensity(self,input,output,pct1=1,pct2=95): + # obtain percentile + _threshold_1=self.stats(input,['-pctT',str(pct1)]) + _threshold_2=self.stats(input,['-pctT',str(pct2)]) + self.calc([input],"clamp(A[0],{},{})".format(_threshold_1,_threshold_2),output) + + +if __name__ == '__main__': + pass + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/model/__init__.py b/ipl/model/__init__.py new file mode 100644 index 0000000..7076517 --- /dev/null +++ b/ipl/model/__init__.py @@ -0,0 +1,5 @@ +# model generations + + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/filter.py b/ipl/model/filter.py new file mode 100644 index 0000000..d920b91 --- /dev/null +++ b/ipl/model/filter.py @@ -0,0 +1,505 @@ +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def generate_flip_sample(input): + '''generate flipped version of sample''' + with mincTools() as m: + m.flip_volume_x(input.scan,input.scan_f) + + if input.mask is not None: + m.flip_volume_x(input.mask,input.mask_f,labels=True) + + return True + + +def normalize_sample(input, + output, + model, + bias_field=None, + ): + """Normalize sample intensity""" + + with mincTools() as m: + m.apply_n3_vol_pol( + input.scan, + model.scan, + output.scan, + source_mask=input.mask, + target_mask=model.mask, + bias=bias_field, + ) + output.mask=input.mask + return output + + +def average_samples( + samples, + output, + output_sd=None, + symmetric=False, + symmetrize=False, + median=False + ): + """average individual samples""" + try: + with mincTools() as m: + avg = [] + + out_scan=output.scan + out_mask=output.mask + + if symmetrize: + out_scan=m.tmp('avg.mnc') + out_mask=m.tmp('avg_mask.mnc') + + for s in samples: + avg.append(s.scan) + + if symmetric: + for s in samples: + avg.append(s.scan_f) + out_sd=None + + if output_sd: + out_sd=output_sd.scan + + if median: + m.median(avg, out_scan,madfile=out_sd) + else: + m.average(avg, out_scan,sdfile=out_sd) + + if symmetrize: + # TODO: replace flipping of averages with averaging of flipped + # some day + m.flip_volume_x(out_scan,m.tmp('flip.mnc')) + m.average([out_scan,m.tmp('flip.mnc')],output.scan) + + # average masks + if output.mask is not None: + avg = [] + for s in samples: + avg.append(s.mask) + + if symmetric: + for s in samples: + avg.append(s.mask_f) + + if not os.path.exists(output.mask): + + if symmetrize: + m.average(avg,m.tmp('avg_mask.mnc'),datatype='-float') + m.flip_volume_x(m.tmp('avg_mask.mnc'),m.tmp('flip_avg_mask.mnc')) + m.average([m.tmp('avg_mask.mnc'),m.tmp('flip_avg_mask.mnc')],m.tmp('sym_avg_mask.mnc'),datatype='-float') + + m.calc([m.tmp('sym_avg_mask.mnc')],'A[0]>=0.5?1:0',m.tmp('sym_avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('sym_avg_mask_.mnc'),output.mask,image_range=[0,1],valid_range=[0,1]) + else: + m.average(avg,m.tmp('avg_mask.mnc'),datatype='-float') + m.calc([m.tmp('avg_mask.mnc')],'A[0]>=0.5?1:0',m.tmp('avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('avg_mask_.mnc'),output.mask,image_range=[0,1],valid_range=[0,1]) + + + + + return True + except mincError as e: + print "Exception in average_samples:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_samples:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def average_stats( + avg, + sd, + ): + """calculate median sd within mask""" + try: + st=0 + with mincTools(verbose=2) as m: + if avg.mask is not None: + st=float(m.stats(sd.scan,'-median',mask=avg.mask)) + else: + st=float(m.stats(sd.scan,'-median')) + return st + except mincError as e: + print "mincError in average_stats:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def calculate_diff_bias_field(sample, model, output, symmetric=False, distance=100, n4=False ): + try: + with mincTools() as m: + if n4: + if model.mask is not None: + m.difference_n4(sample.scan, model.scan, output.scan, mask=model.mask, distance=distance) + else: + m.difference_n4(sample.scan, model.scan, output.scan, distance=distance ) + if symmetric: + if model.mask is not None: + m.difference_n4(sample.scan_f, model.scan, output.scan_f, mask=model.mask, distance=distance) + else: + m.difference_n4(sample.scan_f, model.scan, output.scan_f, distance=distance ) + else: + if model.mask is not None: + m.difference_n3(sample.scan, model.scan, output.scan, mask=model.mask, distance=distance, normalize=True) + else: + m.difference_n3(sample.scan, model.scan, output.scan, distance=distance, normalize=True ) + if symmetric: + if model.mask is not None: + m.difference_n3(sample.scan_f, model.scan, output.scan_f, mask=model.mask, distance=distance, normalize=True) + else: + m.difference_n3(sample.scan_f, model.scan, output.scan_f, distance=distance, normalize=True ) + return True + except mincError as e: + print "mincError in average_stats:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def average_bias_fields(samples, output, symmetric=False ): + try: + with mincTools() as m: + + avg = [] + + for s in samples: + avg.append(s.scan) + + if symmetric: + for s in samples: + avg.append(s.scan_f) + + m.log_average(avg, output.scan) + return True + except mincError as e: + print "mincError in average_stats:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def resample_and_correct_bias( + sample, + transform, + avg_bias, + output, + previous=None, + symmetric=False, + ): + # resample bias field and apply previous estimate + try: + with mincTools() as m: + + m.calc([sample.scan, avg_bias.scan], + 'A[1]>0.1?A[0]/A[1]:1.0', m.tmp('corr_bias.mnc')) + + m.resample_smooth(m.tmp('corr_bias.mnc'), + m.tmp('corr_bias2.mnc'), + like=sample.scan, + transform=transform.xfm, + invert_transform=True) + if previous: + m.calc([previous.scan, m.tmp('corr_bias2.mnc') ], 'A[0]*A[1]', + output.scan, datatype='-float') + else: + shutil.copy(m.tmp('corr_bias2.mnc'), output.scan) + + if symmetric: + m.calc([sample.scan_f, avg_bias.scan], + 'A[1]>0.1?A[0]/A[1]:1.0', m.tmp('corr_bias_f.mnc')) + + m.resample_smooth(m.tmp('corr_bias_f.mnc'), + m.tmp('corr_bias2_f.mnc'), + like=sample.scan, + transform=transform.xfm, + invert_transform=True) + if previous: + m.calc([previous.scan_f, m.tmp('corr_bias2_f.mnc')], + 'A[0]*A[1]', + output.scan_f, datatype='-float') + else: + shutil.copy(m.tmp('corr_bias2_f.mnc'), output.scan) + + return True + except mincError as e: + print "Exception in resample_and_correct_bias:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in resample_and_correct_bias:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def apply_linear_model( + lin_model, + parameters, + output_volume + ): + """build a volume, for a given regression model and parameters""" + try: + with mincTools() as m: + + if lin_model.N!=len(parameters): + raise mincError("Expected: {} parameters, got {}".format(lin_model.N,len(parameters))) + # create minccalc expression + _exp=[] + for i in range(0,lin_model.N): + _exp.append('A[{}]*{}'.format(i,parameters[i])) + exp='+'.join(_exp) + m.calc(lin_model.volume,exp,output_volume) + return True + except mincError as e: + print( "Exception in apply_linear_model:{}".format(str(e)) ) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in apply_linear_model:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + + +def build_approximation(int_model, + geo_model , + parameters_int, + parameters_def, + output_scan, + output_transform, + noresample=False): + try: + with mincTools() as m: + + intensity=m.tmp('int_model.mnc') + if noresample: + intensity=output_scan.scan + #geometry=m.tmp('geometry_model.mnc') + + # TODO: paralelelize? + if int_model.N>0: + apply_linear_model(int_model,parameters_int,intensity) + else: # not modelling intensity + intensity=int_model.volume[0] + + # if we have geometry information + if geo_model is not None and geo_model.N>0 : + apply_linear_model(geo_model, parameters_def, output_transform.grid ) + # create appropriate .xfm file + with open(output_transform.xfm,'w') as f: + f.write( +""" +MNI Transform File +Transform_Type = Linear; +Linear_Transform = + 1 0 0 0 + 0 1 0 0 + 0 0 1 0; +Transform_Type = Grid_Transform; +Displacement_Volume = {}; +""".format(os.path.basename(output_transform.grid)) + ) + + if not noresample: + m.resample_smooth(intensity, output_scan.scan, + transform=output_transform.xfm, + like=int_model.volume[0]) + + if int_model.mask is not None: + if noresample: + shutil.copyfile(int_model.mask, + output_scan.mask) + else: + m.resample_labels(int_model.mask, + output_scan.mask, + transform=output_transform.xfm, + like=int_model.volume[0]) + else: + output_scan.mask=None + else: # not modelling shape! + shutil.copyfile(intensity,output_scan.scan) + if int_model.mask is not None: + shutil.copyfile(int_model.mask, + output_scan.mask) + else: + output_scan.mask=None + output_transform=None + return (output_scan, output_transform) + except mincError as e: + print( "Exception in build_approximation:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in build_approximation:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def voxel_regression(int_design_matrix, + def_design_matrix, + int_estimate, + def_estimate, + next_int_model, + next_def_model, + int_residual, + def_residual, + blur_int_model=None, + blur_def_model=None, + qc=False): + """Perform voxel-wise regression using given design matrix""" + try: + with mincTools() as m: + #print(repr(next_int_model)) + + # a small hack - assume that input directories are the same + _prefix=def_estimate[0].prefix + _design_vel=_prefix+os.sep+'regression_vel.csv' + _design_int=_prefix+os.sep+'regression_int.csv' + + #nomask=False + #for i in for i in int_estimate: + # if i.mask is None: + # nomask=True + _masks=[i.mask for i in int_estimate] + _inputs=[] + _outputs=[] + _outputs.extend(next_int_model.volume) + _outputs.extend(next_def_model.volume) + + with open(_design_vel,'w') as f: + for (i, l ) in enumerate(def_design_matrix): + f.write(os.path.basename(def_estimate[i].grid)) + f.write(',') + f.write(','.join([str(qq) for qq in l])) + f.write("\n") + _inputs.append(def_estimate[i].grid) + + with open(_design_int,'w') as f: + for (i, l ) in enumerate(int_design_matrix): + f.write(os.path.basename(int_estimate[i].scan)) + f.write(',') + f.write(','.join([str(qq) for qq in l])) + f.write("\n") + _inputs.append(int_estimate[i].scan) + + if not m.checkfiles(inputs=_inputs, outputs=_outputs): + return + + int_model=next_int_model + def_model=next_def_model + + if blur_int_model is not None: + int_model=MriDatasetRegress(prefix=m.tempdir, name='model_int',N=next_int_model.N,nomask=(next_int_model.mask is None)) + + if blur_def_model is not None: + def_model=MriDatasetRegress(prefix=m.tempdir,name='model_def', N=next_def_model.N, nomask=(next_def_model.mask is None)) + + + # regress deformations + m.command(['volumes_lm',_design_vel, def_model.volume[0].rsplit('_0.mnc',1)[0]], + inputs=[_design_vel], + outputs=def_model.volume, + verbose=2) + + + # regress intensity + m.command(['volumes_lm',_design_int, int_model.volume[0].rsplit('_0.mnc',1)[0]], + inputs=[_design_int], + outputs=int_model.volume, + verbose=2) + + if blur_def_model is not None: + # blur estimates + for (i,j) in enumerate(def_model.volume): + m.blur_vectors(def_model.volume[i],next_def_model.volume[i],blur_def_model) + # a hack preserve unfiltered RMS volume + shutil.copyfile(def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + + if blur_int_model is not None: + for (i,j) in enumerate(int_model.volume): + m.blur(int_model.volume[i],next_int_model.volume[i],blur_int_model) + # a hack preserve unfiltered RMS volume + shutil.copyfile(int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + + # average masks + if next_int_model.mask is not None: + m.average(_masks,m.tmp('avg_mask.mnc'),datatype='-float') + m.calc([m.tmp('avg_mask.mnc')],'A[0]>0.5?1:0',m.tmp('avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('avg_mask_.mnc'),next_int_model.mask,image_range=[0,1],valid_range=[0,1]) + + if qc: + m.qc(next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.jpg' ) + + m.grid_magnitude(next_def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + m.tmp('def_RMS_mag.mnc')) + m.qc(m.tmp('def_RMS_mag.mnc'), + next_def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.jpg') + + #cleanup + #os.unlink(_design_vel) + #os.unlink(_design_int) + + + except mincError as e: + print( "Exception in voxel_regression:{}".format(str(e)) ) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in voxel_regression:{}".format(sys.exc_info()[0]) ) + traceback.print_exc(file=sys.stdout) + raise + + +def average_stats_regression( + current_int_model, current_def_model, + int_residual, def_residual, + ): + """calculate median sd within mask for intensity and velocity""" + try: + sd_int=0.0 + sd_def=0.0 + with mincTools(verbose=2) as m: + m.grid_magnitude(def_residual.scan, m.tmp('mag.mnc')) + if current_int_model.mask is not None: + sd_int=float(m.stats(int_residual.scan,'-median',mask=current_int_model.mask)) + m.resample_smooth(m.tmp('mag.mnc'),m.tmp('mag_.mnc'),like=current_int_model.mask) + sd_def=float(m.stats(m.tmp('mag_.mnc'),'-median',mask=current_int_model.mask)) + else: + sd_int=float(m.stats(int_residual.scan,'-median')) + sd_def=float(m.stats(m.tmp('mag.mnc'),'-median')) + + return (sd_int,sd_def) + except mincError as e: + print "mincError in average_stats_regression:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats_regression:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/generate_linear.py b/ipl/model/generate_linear.py new file mode 100644 index 0000000..01e4046 --- /dev/null +++ b/ipl/model/generate_linear.py @@ -0,0 +1,321 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from ipl.model.structures import MriDataset, MriTransform, MRIEncoder +from ipl.model.filter import generate_flip_sample, normalize_sample +from ipl.model.filter import average_samples,average_stats +from ipl.model.filter import calculate_diff_bias_field,average_bias_fields +from ipl.model.filter import resample_and_correct_bias +from ipl.model.registration import linear_register_step, non_linear_register_step +from ipl.model.registration import average_transforms +from ipl.model.resample import concat_resample, concat_resample_nl + +from scoop import futures, shared + + + +def generate_linear_average( + samples, + initial_model=None, + output_model=None, + output_model_sd=None, + prefix='.', + options={} + ): + """ perform iterative model creation""" + + # use first sample as initial model + if not initial_model: + initial_model = samples[0] + + # current estimate of template + current_model = initial_model + current_model_sd = None + transforms=[] + corr=[] + + bias_fields=[] + corr_transforms=[] + corr_samples=[] + sd=[] + + iterations=options.get('iterations',4) + cleanup=options.get('cleanup',False) + symmetric=options.get('symmetric',False) + reg_type=options.get('reg_type','-lsq12') + objective=options.get('objective','-xcorr') + linreg=options.get('linreg',None) + refine=options.get('refine',False) + biascorr=options.get('biascorr',False) + biasdist=options.get('biasdist',100)# default for 1.5T + qc=options.get('qc',False) + downsample=options.get('downsample',None) + use_n4=options.get('N4',False) + use_median=options.get('median',False) + + models=[] + models_sd=[] + models_bias=[] + + if symmetric: + flipdir=prefix+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + flip_all=[] + # generate flipped versions of all scans + for (i, s) in enumerate(samples): + _s_name=os.path.basename(s.scan).rsplit('.gz',1)[0] + s.scan_f=prefix+os.sep+'flip'+os.sep+_s_name + + if s.mask is not None: + s.mask_f=prefix+os.sep+'flip'+os.sep+'mask_'+_s_name + + flip_all.append( futures.submit( generate_flip_sample,s ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + + # go through all the iterations + for it in xrange(1,iterations+1): + + # this will be a model for next iteration actually + + # 1 register all subjects to current template + next_model =MriDataset(prefix=prefix, iter=it, name='avg') + next_model_sd =MriDataset(prefix=prefix, iter=it, name='sd') + next_model_bias=MriDataset(prefix=prefix, iter=it, name='bias') + + transforms=[] + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + inv_transforms=[] + fwd_transforms=[] + for (i, s) in enumerate(samples): + sample_xfm = MriTransform(name=s.name, prefix=it_prefix,iter=it,linear=True) + sample_inv_xfm = MriTransform(name=s.name+'_inv', prefix=it_prefix,iter=it,linear=True) + + prev_transform = None + prev_bias_field = None + + if it > 1 and refine: + prev_transform = corr_transforms[i] + + if it > 1 and biascorr: + prev_bias_field = bias_fields[i] + + + transforms.append( + futures.submit( + linear_register_step, + s, + current_model, + sample_xfm, + output_invert=sample_inv_xfm, + init_xfm=prev_transform, + symmetric=symmetric, + reg_type=reg_type, + objective=objective, + linreg=linreg, + work_dir=prefix, + bias=prev_bias_field, + downsample=downsample) + ) + inv_transforms.append(sample_inv_xfm) + fwd_transforms.append(sample_xfm) + + + # wait for jobs to finish + futures.wait(transforms, return_when=futures.ALL_COMPLETED) + + # remove information from previous iteration + if cleanup and it>1 : + for s in corr_samples: + s.cleanup(verbose=True) + for x in corr_transforms: + x.cleanup(verbose=True) + + # here all the transforms should exist + avg_inv_transform=MriTransform(name='avg_inv', prefix=it_prefix,iter=it,linear=True) + + # 2 average all transformations + result=futures.submit( + average_transforms, inv_transforms, avg_inv_transform, nl=False, symmetric=symmetric + # TODO: maybe make median transforms? + ) + futures.wait([result], return_when=futures.ALL_COMPLETED) + + corr=[] + corr_transforms=[] + corr_samples=[] + # 3 concatenate correction and resample + + for (i, s) in enumerate(samples): + prev_bias_field = None + if it > 1 and biascorr: + prev_bias_field = bias_fields[i] + + c=MriDataset( prefix=it_prefix,iter=it,name=s.name) + x=MriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it,linear=True) + + corr.append(futures.submit( + concat_resample, s, fwd_transforms[i], avg_inv_transform, + c, x, current_model, symmetric=symmetric, qc=qc, bias=prev_bias_field + )) + corr_transforms.append(x) + corr_samples.append(c) + futures.wait(corr, return_when=futures.ALL_COMPLETED) + + # cleanup transforms + if cleanup : + for x in inv_transforms: + x.cleanup() + for x in fwd_transforms: + x.cleanup() + avg_inv_transform.cleanup() + + # 4 average resampled samples to create new estimate + result=futures.submit( + average_samples, corr_samples, next_model, next_model_sd, symmetric=symmetric, symmetrize=symmetric,median=use_median + ) + + if cleanup : + # remove previous template estimate + models.append(next_model) + models_sd.append(next_model_sd) + + futures.wait([result], return_when=futures.ALL_COMPLETED) + + if biascorr: + biascorr_results=[] + new_bias_fields=[] + + for (i, s) in enumerate(samples): + prev_bias_field = None + if it > 1: + prev_bias_field = bias_fields[i] + c=corr_samples[i] + x=corr_transforms[i] + b=MriDataset(prefix=it_prefix,iter=it,name='bias_'+s.name) + biascorr_results.append( futures.submit( + calculate_diff_bias_field, + c, next_model, b, symmetric=symmetric, distance=biasdist, + n4=use_n4 + ) ) + new_bias_fields.append(b) + + futures.wait(biascorr_results, return_when=futures.ALL_COMPLETED) + + result=futures.submit( + average_bias_fields, new_bias_fields, next_model_bias, symmetric=symmetric + ) + futures.wait([result], return_when=futures.ALL_COMPLETED) + biascorr_results=[] + new_corr_bias_fields=[] + for (i, s) in enumerate(samples): + prev_bias_field = None + if it > 1: + prev_bias_field = bias_fields[i] + c=corr_samples[i] + x=corr_transforms[i] + b=new_bias_fields[i] + out=MriDataset(prefix=it_prefix,iter=it,name='c_bias_'+s.name) + biascorr_results.append( futures.submit( + resample_and_correct_bias, b, x , next_model_bias, out, previous=prev_bias_field, symmetric=symmetric + ) ) + new_corr_bias_fields.append( out ) + futures.wait(biascorr_results, return_when=futures.ALL_COMPLETED) + + # swap bias fields + if biascorr: bias_fields=new_bias_fields + + current_model=next_model + current_model_sd=next_model_sd + sd.append( futures.submit(average_stats, next_model, next_model_sd ) ) + + # copy output to the destination + futures.wait(sd, return_when=futures.ALL_COMPLETED) + + with open(prefix+os.sep+'stats.txt','w') as f: + for s in sd: + f.write("{}\n".format(s.result())) + + if cleanup: + # keep the final model + models.pop() + models_sd.pop() + + # delete unneeded models + for m in models: + m.cleanup() + for m in models_sd: + m.cleanup() + + results={ + 'model': current_model, + 'model_sd': current_model_sd, + 'xfm': corr_transforms, + 'biascorr': bias_fields, + 'scan': corr_samples, + 'symmetric': symmetric + } + + with open(prefix+os.sep+'results.json','w') as f: + json.dump(results,f,indent=1,cls=MRIEncoder) + + return results + + + +def generate_linear_model(samples,model=None,mask=None,work_prefix=None,options={}): + internal_sample=[] + + try: + for i in samples: + s=MriDataset(scan=i[0],mask=i[1]) + internal_sample.append(s) + + internal_model=None + if model is not None: + internal_model=MriDataset(scan=model,mask=mask) + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return generate_linear_average(internal_sample,internal_model,prefix=work_prefix,options=options) + except mincError as e: + print "Exception in generate_linear_model:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in generate_linear_model:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def generate_linear_model_csv(input_csv,model=None,mask=None,work_prefix=None,options={}): + internal_sample=[] + + with open(input_csv, 'r') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) + for row in reader: + internal_sample.append(MriDataset(scan=row[0],mask=row[1])) + + internal_model=None + if model is not None: + internal_model=MriDataset(scan=model,mask=mask) + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return generate_linear_average(internal_sample,internal_model,prefix=work_prefix,options=options) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/generate_nonlinear.py b/ipl/model/generate_nonlinear.py new file mode 100644 index 0000000..56f7563 --- /dev/null +++ b/ipl/model/generate_nonlinear.py @@ -0,0 +1,342 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from ipl.model.structures import MriDataset, MriTransform,MRIEncoder +from ipl.model.filter import generate_flip_sample, normalize_sample +from ipl.model.filter import average_samples,average_stats +from ipl.model.filter import calculate_diff_bias_field,average_bias_fields +from ipl.model.filter import resample_and_correct_bias + +from ipl.model.registration import linear_register_step +from ipl.model.registration import non_linear_register_step +from ipl.model.registration import dd_register_step +from ipl.model.registration import ants_register_step +from ipl.model.registration import elastix_register_step +from ipl.model.registration import average_transforms +from ipl.model.resample import concat_resample +from ipl.model.resample import concat_resample_nl + +from scoop import futures, shared + + +def generate_nonlinear_average( + samples, + initial_model =None, + output_model =None, + output_model_sd=None, + prefix='.', + options={}, + skip=0, + stop_early=100000 + ): + """ perform iterative model creation""" + + # use first sample as initial model + if not initial_model: + initial_model = samples[0] + + # current estimate of template + current_model = initial_model + current_model_sd = None + + transforms=[] + corr=[] + + bias_fields=[] + corr_transforms=[] + sd=[] + corr_samples=[] + + protocol=options.get('protocol', [{'iter':4,'level':32}, + {'iter':4,'level':32}] ) + + cleanup= options.get('cleanup',False) + symmetric= options.get('symmetric',False) + parameters= options.get('parameters',None) + refine= options.get('refine',True) + qc= options.get('qc',False) + downsample_= options.get('downsample',None) + use_dd= options.get('use_dd',False) + use_ants= options.get('use_ants',False) + use_elastix= options.get('use_elastix',False) + start_level= options.get('start_level',None) + use_median= options.get('median',False) + + models=[] + models_sd=[] + + if symmetric: + flipdir=prefix+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + flip_all=[] + # generate flipped versions of all scans + for (i, s) in enumerate(samples): + _s_name=os.path.basename(s.scan).rsplit('.gz',1)[0] + s.scan_f=prefix+os.sep+'flip'+os.sep+_s_name + + if s.mask is not None: + s.mask_f=prefix+os.sep+'flip'+os.sep+'mask_'+_s_name + + flip_all.append( futures.submit( generate_flip_sample,s ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + # go through all the iterations + it=0 + for (i,p) in enumerate(protocol): + downsample=p.get('downsample',downsample_) + for j in xrange(1,p['iter']+1): + it+=1 + if it>stop_early: + break + # this will be a model for next iteration actually + + # 1 register all subjects to current template + next_model=MriDataset(prefix=prefix,iter=it,name='avg') + next_model_sd=MriDataset(prefix=prefix,iter=it,name='sd') + transforms=[] + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + inv_transforms=[] + fwd_transforms=[] + + start=None + if it==1: + start=start_level + + for (i, s) in enumerate(samples): + sample_xfm=MriTransform(name=s.name,prefix=it_prefix,iter=it) + sample_inv_xfm=MriTransform(name=s.name+'_inv',prefix=it_prefix,iter=it) + + prev_transform = None + prev_bias_field = None + + if it > 1: + if refine: + prev_transform = corr_transforms[i] + else: + start=start_level # TWEAK? + + if it>skip and itskip and it1 : + # remove information from previous iteration + for s in corr_samples: + s.cleanup(verbose=True) + for x in corr_transforms: + x.cleanup(verbose=True) + + # here all the transforms should exist + avg_inv_transform=MriTransform(name='avg_inv', prefix=it_prefix, iter=it) + + # 2 average all transformations + if it>skip and itskip and itskip and itskip and it1: + # remove previous template estimate + models.append(next_model) + models_sd.append(next_model_sd) + + current_model=next_model + current_model_sd=next_model_sd + + if it>skip and it_eps: # this is non-identity matrix + all_nonlinear&=False + else: + # TODO: if grid have to be inverted! + (grid_file,grid_invert)=x.get_grid_transform(1) + input_grids.append(grid_file) + elif x.get_n_type(1)==minc2_xfm.MINC2_XFM_GRID_TRANSFORM: + # TODO: if grid have to be inverted! + (grid_file,grid_invert)=x.get_grid_transform(0) + input_grids.append(grid_file) + + if all_linear: + acc=np.asmatrix(np.zeros([4,4],dtype=np.complex)) + for i in input_xfms: + print(i) + acc+=scipy.linalg.logm(i) + + acc/=len(input_xfms) + acc=np.asarray(scipy.linalg.expm(acc).real,'float64','C') + + x=minc2_xfm() + x.append_linear_transform(acc) + x.save(output) + + elif all_nonlinear: + + output_grid=output.rsplit('.xfm',1)[0]+'_grid_0.mnc' + + with mincTools(verbose=2) as m: + m.average(input_grids,output_grid) + + x=minc2_xfm() + x.append_grid_transform(output_grid, False) + x.save(output) + else: + raise Exception("Mixed XFM files provided as input") + +def linear_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + symmetric=False, + reg_type='-lsq12', + objective='-xcorr', + linreg=None, + work_dir=None, + bias=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + scan=sample.scan + + if bias is not None: + m.calc([sample.scan,bias.scan],'A[0]*A[1]',m.tmp('corr.mnc')) + scan=m.tmp('corr.mnc') + + if symmetric: + scan_f=sample.scan_f + + if bias is not None: + m.calc([sample.scan_f,bias.scan_f],'A[0]*A[1]',m.tmp('corr_f.mnc')) + scan_f=m.tmp('corr_f.mnc') + + _out_xfm=output.xfm + _out_xfm_f=output.xfm_f + + if avg_symmetric: + _out_xfm=m.tmp('straight.xfm') + _out_xfm_f=m.tmp('flipped.xfm') + + ipl.registration.linear_register( + scan, + model.scan, + _out_xfm, + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + objective=objective, + parameters=reg_type, + conf=linreg, + downsample=downsample, + #work_dir=work_dir + ) + ipl.registration.linear_register( + scan_f, + model.scan, + _out_xfm_f, + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm_f, + objective=objective, + parameters=reg_type, + conf=linreg, + downsample=downsample, + #work_dir=work_dir + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), _out_xfm_f , m.tmp('flip_x.xfm')], m.tmp('double_flipped.xfm')) + + xfmavg([_out_xfm,m.tmp('double_flipped.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], output.xfm_f ) + + else: + ipl.registration.linear_register( + scan, + model.scan, + output.xfm, + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + objective=objective, + parameters=reg_type, + conf=linreg, + downsample=downsample + #work_dir=work_dir + ) + if output_invert is not None: + m.xfminvert(output.xfm, output_invert.xfm) + + if symmetric: + m.xfminvert(output.xfm_f, output_invert.xfm_f) + + return True + except mincError as e: + print "Exception in linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def non_linear_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if start is None: + start=level + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.xfm,output.xfm_f]): + + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward.xfm'), + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward_f.xfm'), + source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), m.tmp('forward_f.xfm') , m.tmp('flip_x.xfm')], m.tmp('forward_f_f.xfm')) + + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,m.tmp('forward_n.xfm'),step=level) + m.xfm_normalize(m.tmp('forward_f_f.xfm'),model.scan,m.tmp('forward_f_f_n.xfm'),step=level) + + xfmavg([m.tmp('forward_n.xfm'),m.tmp('forward_f_f_n.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], m.tmp('output_f.xfm' )) + m.xfm_normalize(m.tmp('output_f.xfm'),model.scan,output.xfm_f,step=level) + + else: + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output.xfm,step=level) + m.xfm_normalize(m.tmp('forward_f.xfm'),model.scan,output.xfm_f,step=level) + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.xfm]): + + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward.xfm'), + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output.xfm,step=level) + + if output_invert is not None and m.checkfiles(inputs=[], outputs=[output_invert.xfm]): + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output_invert.xfm,step=level,invert=True) + if symmetric: + m.xfm_normalize(m.tmp('forward_f.xfm'),model.scan,output_invert.xfm_f,step=level,invert=True) + + return True + except mincError as e: + print "Exception in non_linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def dd_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if start is None: + start=level + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.xfm,output.xfm_f]): + + ipl.dd_registration.non_linear_register_dd( + sample.scan, + model.scan, + m.tmp('forward.xfm'), + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + ipl.dd_registration.non_linear_register_dd( + sample.scan_f, + model.scan, + m.tmp('forward_f.xfm'), + source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), m.tmp('forward_f.xfm') , m.tmp('flip_x.xfm')], m.tmp('forward_f_f.xfm')) + + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,m.tmp('forward_n.xfm'),step=level) + m.xfm_normalize(m.tmp('forward_f_f.xfm'),model.scan,m.tmp('forward_f_f_n.xfm'),step=level) + + xfmavg([m.tmp('forward_n.xfm'),m.tmp('forward_f_f_n.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], m.tmp('output_f.xfm' )) + m.xfm_normalize(m.tmp('output_f.xfm'),model.scan,output.xfm_f,step=level) + + else: + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output.xfm,step=level) + m.xfm_normalize(m.tmp('forward_f.xfm'),model.scan,output.xfm_f,step=level) + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.xfm]): + + ipl.dd_registration.non_linear_register_dd( + sample.scan, + model.scan, + m.tmp('forward.xfm'), + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output.xfm,step=level) + + if output_invert is not None and m.checkfiles(inputs=[], outputs=[output_invert.xfm]): + m.xfm_normalize(m.tmp('forward.xfm'),model.scan,output_invert.xfm,step=level,invert=True) + if symmetric: + m.xfm_normalize(m.tmp('forward_f.xfm'),model.scan,output_invert.xfm_f,step=level,invert=True) + + return True + except mincError as e: + print "Exception in non_linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def ants_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if start is None: + start=level + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + with mincTools() as m: + out=m.tmp('forward') + out_f=m.tmp('forward_f') + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.xfm, output.xfm_f]): + + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + out+'.xfm', + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + out_f+'.xfm', + source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), out_f+'.xfm', m.tmp('flip_x.xfm')], m.tmp('forward_f_f.xfm')) + + m.xfm_normalize(out+'.xfm', model.scan, m.tmp('forward_n.xfm'),step=level) + m.xfm_normalize(m.tmp('forward_f_f.xfm'),model.scan,m.tmp('forward_f_f_n.xfm'),step=level) + + xfmavg([m.tmp('forward_n.xfm'),m.tmp('forward_f_f_n.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], m.tmp('output_f.xfm' )) + m.xfm_normalize(out_f+'.xfm',model.scan,output.xfm_f,step=level) + + else: + m.xfm_normalize(out+'.xfm',model.scan,output.xfm,step=level) + m.xfm_normalize(out_f+'.xfm',model.scan,output.xfm_f,step=level) + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.xfm]): + + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + out+'.xfm', + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(out+'.xfm',model.scan,output.xfm,step=level) + + if output_invert is not None and m.checkfiles(inputs=[], outputs=[output_invert.xfm]): + m.xfm_normalize(out+'_inverse.xfm',model.scan,output_invert.xfm,step=level) + if symmetric: + m.xfm_normalize(out_f+'_inverse.xfm',model.scan,output_invert.xfm_f,step=level) + + return True + except mincError as e: + print "Exception in non_linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def elastix_register_step( + sample, + model, + output, + output_invert=None, + init_xfm=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + avg_symmetric=True + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if start is None: + start=level + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + # setup parameters appropriate for given level + elx_parameters=parameters.get(str(level),{}) + downsample_grid=elx_parameters.get('downsample_grid',level/2.0) + + with mincTools() as m: + out=m.tmp('forward') + out_f=m.tmp('forward_f') + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.xfm, output.xfm_f]): + + ipl.elastix_registration.register_elastix( + sample.scan, + model.scan, + output_xfm=out+'.xfm', + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=elx_parameters, + downsample_grid=downsample_grid, + downsample=downsample, + nl=True + ) + + ipl.elastix_registration.register_elastix( + sample.scan_f, + model.scan, + output_xfm=out_f+'.xfm', + source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=elx_parameters, + downsample_grid=downsample_grid, + downsample=downsample, + nl=True + ) + + if avg_symmetric: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0,1.0,1.0] ) + m.xfmconcat([m.tmp('flip_x.xfm'), out_f+'.xfm', m.tmp('flip_x.xfm')], m.tmp('forward_f_f.xfm')) + + m.xfm_normalize(out+'.xfm', model.scan, m.tmp('forward_n.xfm'),step=level) + m.xfm_normalize(m.tmp('forward_f_f.xfm'),model.scan,m.tmp('forward_f_f_n.xfm'),step=level) + + xfmavg([m.tmp('forward_n.xfm'),m.tmp('forward_f_f_n.xfm')],output.xfm) + m.xfmconcat([m.tmp('flip_x.xfm'), output.xfm , m.tmp('flip_x.xfm')], m.tmp('output_f.xfm' )) + m.xfm_normalize(out_f+'.xfm',model.scan,output.xfm_f,step=level) + + else: + m.xfm_normalize(out+'.xfm', model.scan,output.xfm, step=level) + m.xfm_normalize(out_f+'.xfm',model.scan,output.xfm_f,step=level) + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.xfm]): + + ipl.elastix_registration.register_elastix( + sample.scan, + model.scan, + output_xfm=out+'.xfm', + source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=elx_parameters, + downsample_grid=downsample_grid, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(out+'.xfm',model.scan,output.xfm,step=level) + + if output_invert is not None and m.checkfiles(inputs=[], outputs=[output_invert.xfm]): + m.xfm_normalize(out+'.xfm',model.scan,output_invert.xfm,step=level,invert=True) + if symmetric: + m.xfm_normalize(out_f+'.xfm',model.scan,output_invert.xfm_f,step=level,invert=True) + + return True + except mincError as e: + print "Exception in non_linear_register_step:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def average_transforms( + samples, + output, + nl=False, + symmetric=False, + invert=False + ): + """average given transformations""" + try: + with mincTools() as m: + avg = [] + out_xfm=output.xfm + + for i in samples: + avg.append(i.xfm) + + if symmetric: + for i in samples: + avg.append(i.xfm_f) + if invert: + out_xfm=m.tmp("average.xfm") + xfmavg(avg, out_xfm) + + if invert: + m.xfminvert(out_xfm, output.xfm) + return True + except mincError as e: + print "Exception in average_transforms:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_transforms:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def non_linear_register_step_regress_std( + sample, + model_int, + model_def, + output_int, + output_def, + level=32, + start_level=None, + parameters=None, + work_dir=None, + downsample=None, + debug=False, + previous_def=None, + datatype='short', + nl_mode='animal' + ): + """perform linear registration to the model, and calculate new estimate""" + try: + + with mincTools() as m: + if m.checkfiles(inputs=[sample.scan], + outputs=[output_def.xfm]): + + int_approximate = None + def_approximate = None + def_update = None + + if debug: + int_approximate = MriDataset( prefix=output_def.prefix, + name=output_def.name +'_int_approx', + iter=output_def.iter ) + + def_approximate = MriTransform( prefix=output_def.prefix, + name=output_def.name +'_approx', + iter=output_def.iter ) + + def_update = MriTransform( prefix=output_def.prefix, + name=output_def.name +'_update', + iter=output_def.iter ) + else: + int_approximate = MriDataset( prefix=m.tempdir, + name=output_def.name +'_int_approx') + + def_approximate = MriTransform( prefix=m.tempdir, + name=output_def.name +'_approx' ) + + def_update = MriTransform( prefix=m.tempdir, + name=output_def.name +'_update') + + # A hack! assume that if initial model is MriDataset it means zero regression coeff + if isinstance(model_int, MriDataset): + int_approximate=model_int + def_approximate=None + else: + (int_approximate, def_approximate) = \ + build_approximation(model_int, + model_def, + sample.par_int, + sample.par_def, + int_approximate, + def_approximate, + noresample=False) + if model_def is None: + def_approximate=None + + if start_level is None: + start_level=level + + init_xfm=None + + # we are refining previous estimate + if previous_def is not None: + ## have to adjust it based on the current estimate + if def_approximate is not None: + init_xfm=m.tmp('init_def.xfm') + m.xfminvert(def_approximate.xfm, m.tmp('approx_inv.xfm')) + m.xfmconcat(previous_def.xfm,m.tmp('approx_inv.xfm')) + m.xfm_normalize(m.tmp('approx_inv.xfm'),int_approximate.scan,m.tmp('init.xfm'),step=level) + init_xfm=m.tmp('init.xfm') + else: + init_xfm=previous_def.xfm + + print("level={} start={}".format(level,start_level)) + print("parameters={}".format(repr(parameters))) + + if nl_mode=='animal': + ipl.registration.non_linear_register_full( + int_approximate.scan, + sample.scan, + def_update.xfm, + source_mask=int_approximate.mask, + target_mask=sample.mask, + init_xfm=init_xfm, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + elif nl_mode=='ants': + ipl.ants_registration.non_linear_register_ants2( + int_approximate.scan, + sample.scan, + def_update.xfm, + source_mask=int_approximate.mask, + target_mask=sample.mask, + init_xfm=init_xfm, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + elif nl_mode=='dd': + ipl.dd_registration.non_linear_register_dd( + int_approximate.scan, + sample.scan, + def_update.xfm, + source_mask=int_approximate.mask, + target_mask=sample.mask, + init_xfm=init_xfm, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + else: #elastix + ipl.elatix_registration.register_elastix( + int_approximate.scan, + sample.scan, + def_update.xfm, + source_mask=int_approximate.mask, + target_mask=sample.mask, + init_xfm=init_xfm, + parameters=parameters, + downsample_grid=level, + downsample=downsample, + #work_dir=work_dir + ) + + + # update estimate, + if def_approximate is not None: + m.xfmconcat([def_approximate.xfm,def_update.xfm],m.tmp('output_def.xfm')) + m.xfm_normalize(m.tmp('output_def.xfm'),int_approximate.scan,output_def.xfm, step=level) + else: + m.xfm_normalize(def_update.xfm,int_approximate.scan,output_def.xfm, step=level) + + if output_int is not None: + # resample intensity + m.resample_smooth(sample.scan, output_int.scan, + transform=output_def.xfm, + invert_transform=True, + datatype='-'+datatype + ) + if sample.mask is not None: + m.resample_labels(sample.mask, output_int.mask, + transform=output_def.xfm, + invert_transform=True) + # done + + except mincError as e: + print "Exception in non_linear_register_step_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/regress.py b/ipl/model/regress.py new file mode 100644 index 0000000..d97b486 --- /dev/null +++ b/ipl/model/regress.py @@ -0,0 +1,450 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from iplMincTools import mincTools,mincError + +from .structures import MriDataset, MriTransform, MRIEncoder, MriDatasetRegress +from .filter import generate_flip_sample, normalize_sample +from .filter import average_samples,average_stats +from .filter import calculate_diff_bias_field,average_bias_fields +from .filter import resample_and_correct_bias +from .filter import build_approximation +from .filter import average_stats_regression +from .filter import voxel_regression + +from .registration import non_linear_register_step +from .registration import dd_register_step +from .registration import ants_register_step +from .registration import average_transforms +from .registration import non_linear_register_step_regress_std +from .resample import concat_resample_nl + +from scoop import futures, shared + +def regress( + samples, + initial_model=None, + initial_int_model=None, + initial_def_model=None, + output_int_model=None, + output_def_model=None, + output_residuals_int=None, + output_residuals_def=None, + prefix='.', + options={} + ): + """ perform iterative model creation""" + try: + + # make sure all input scans have parameters + N_int=None + N_def=None + + int_design_matrix=[] + def_design_matrix=[] + nomask=False + + for s in samples: + + if N_int is None: + N_int=len(s.par_int) + elif N_int!=len(s.par_int): + raise mincError("Sample {} have inconsisten number of int paramters: {} expected {}".format(repr(s),len(s),N_int)) + + if N_def is None: + N_def=len(s.par_def) + elif N_def!=len(s.par_def): + raise mincError("Sample {} have inconsisten number of int paramters: {} expected {}".format(repr(s),len(s),N_def)) + + int_design_matrix.append(s.par_int) + def_design_matrix.append(s.par_def) + + if s.mask is None: + nomask=True + + #print("Intensity design matrix=\n{}".format(repr(int_design_matrix))) + #print("Velocity design matrix=\n{}".format(repr(def_design_matrix))) + + ref_model=None + # current estimate of template + if initial_model is not None: + current_int_model = initial_model + current_def_model = None + ref_model=initial_model.scan + else: + current_int_model = initial_int_model + current_def_model = initial_def_model + ref_model=initial_int_model.volume[0] + transforms=[] + + full_transforms=[] + + protocol=options.get( + 'protocol', [{'iter':4,'level':32, 'blur_int': None, 'blur_def': None }, + {'iter':4,'level':16, 'blur_int': None, 'blur_def': None }] + ) + + cleanup= options.get('cleanup',False) + cleanup_intermediate= options.get('cleanup_intermediate',False) + + parameters= options.get('parameters',None) + refine= options.get('refine',False) + qc= options.get('qc',False) + downsample = options.get('downsample',None) + start_level= options.get('start_level',None) + debug = options.get('debug',False) + debias = options.get('debias',True) + nl_mode = options.get('nl_mode','animal') + + if parameters is None: + pass + #TODO: make sensible parameters? + + int_models=[] + def_models=[] + int_residuals=[] + def_residuals=[] + + int_residual=None + def_residual=None + + prev_def_estimate=None + # go through all the iterations + it=0 + residuals=[] + + for (i,p) in enumerate(protocol): + blur_int_model=p.get('blur_int',None) + blur_def_model=p.get('blur_def',None) + for j in range(1,p['iter']+1): + it+=1 + _start_level=None + if it==1: + _start_level=start_level + # this will be a model for next iteration actually + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + next_int_model=MriDatasetRegress(prefix=prefix, name='model_int', iter=it, N=N_int, nomask=nomask) + next_def_model=MriDatasetRegress(prefix=prefix, name='model_def', iter=it, N=N_def, nomask=True) + print("next_int_model={}".format( next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') ) + + int_residual=MriDataset(prefix=prefix, scan=next_int_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + #name=next_int_model.name, iter=it ) + + def_residual=MriDataset(prefix=prefix, scan=next_def_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + #name=next_def_model.name, iter=it ) + + # skip over existing models here! + + if not next_int_model.exists() or \ + not next_def_model.exists() or \ + not int_residual.exists() or \ + not def_residual.exists(): + + int_estimate=[] + def_estimate=[] + r=[] + + + # 1 for each sample generate current approximation + # 2. perform non-linear registration between each sample and sample-specific approximation + # 3. update transformation + # 1+2+3 - all together + for (i, s) in enumerate(samples): + sample_def= MriTransform(name=s.name,prefix=it_prefix,iter=it) + sample_int= MriDataset(name=s.name, prefix=it_prefix,iter=it) + + previous_def=None + + if refine and it>1: + previous_def=prev_def_estimate[i] + + r.append( + futures.submit( + non_linear_register_step_regress_std, + s, + current_int_model, + current_def_model, + None, + sample_def, + parameters=parameters, + level=p['level'], + start_level=_start_level, + work_dir=prefix, + downsample=downsample, + debug=debug, + previous_def=previous_def, + nl_mode=nl_mode + ) + ) + def_estimate.append(sample_def) + #int_estimate.append(sample_int) + + # wait for jobs to finish + futures.wait(r, return_when=futures.ALL_COMPLETED) + avg_inv_transform=None + + if debias: + # here all the transforms should exist + avg_inv_transform=MriTransform(name='avg_inv',prefix=it_prefix,iter=it) + # 2 average all transformations + average_transforms(def_estimate, avg_inv_transform, symmetric=False, invert=True,nl=True) + + corr=[] + corr_transforms=[] + corr_samples=[] + + # 3 concatenate correction and resample + for (i, s) in enumerate(samples): + c=MriDataset(prefix=it_prefix,iter=it,name=s.name) + x=MriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + + corr.append(futures.submit(concat_resample_nl, + s, def_estimate[i], avg_inv_transform, + c, x, + current_int_model, + p['level'], + symmetric=False, + qc=qc, + invert_transform=True )) + + corr_transforms.append(x) + corr_samples.append(c) + + futures.wait(corr, return_when=futures.ALL_COMPLETED) + + # 4. perform regression and create new estimate + # 5. calculate residulas (?) + # 4+5 + result=futures.submit(voxel_regression, + int_design_matrix, def_design_matrix, + corr_samples, corr_transforms, + next_int_model, next_def_model, + int_residual, def_residual, + blur_int_model=blur_int_model, + blur_def_model=blur_def_model, + qc=qc + ) + + futures.wait([result], return_when=futures.ALL_COMPLETED) + + # 6. cleanup + if cleanup : + print("Cleaning up iteration: {}".format(it)) + for i in def_estimate: + i.cleanup() + for i in corr_samples: + i.cleanup() + if prev_def_estimate is not None: + for i in prev_def_estimate: + i.cleanup() + avg_inv_transform.cleanup() + else: + # files were there, reuse them + print("Iteration {} already performed, skipping".format(it)) + corr_transforms=[] + # this is a hack right now + for (i, s) in enumerate(samples): + x=MriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + corr_transforms.append(x) + + int_models.append(current_int_model) + def_models.append(current_def_model) + int_residuals.append(int_residual) + def_residuals.append(def_residual) + + current_int_model=next_int_model + current_def_model=next_def_model + + + result=futures.submit(average_stats_regression, + current_int_model, current_def_model, + int_residual, def_residual ) + residuals.append(result) + + regression_results={ + 'int_model': current_int_model, + 'def_model': current_def_model, + 'int_residuals': int_residual.scan, + 'def_residuals': def_residual.scan, + } + with open(prefix+os.sep+'results_{:03d}.json'.format(it),'w') as f: + json.dump(regression_results,f,indent=1, cls=MRIEncoder) + + # save for next iteration + # TODO: regularize? + prev_def_estimate=corr_transforms # have to use adjusted def estimate + + # copy output to the destination + futures.wait(residuals, return_when=futures.ALL_COMPLETED) + with open(prefix+os.sep+'stats.txt','w') as f: + for s in residuals: + f.write("{}\n".format(s.result())) + + + with open(prefix+os.sep+'results_final.json','w') as f: + json.dump(regression_results, f, indent=1, cls=MRIEncoder) + + + if cleanup_intermediate: + for i in range(len(int_models)-1): + int_models[i].cleanup() + def_models[i].cleanup() + int_residuals[i].cleanup() + def_residuals[i].cleanup() + # delete unneeded models + #shutil.rmtree(prefix+os.sep+'reg') + + return regression_results + except mincError as e: + print "Exception in regress:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in regress:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def regress_csv(input_csv, + int_par_count=None, + model=None, + mask=None, + work_prefix=None, + options={}, + regress_model=None): + """convinience function to run model generation using CSV input file and a fixed init""" + internal_sample=[] + + with open(input_csv, 'r') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) + for row in reader: + + par=[ float(i) for i in row[2:] ] + par_def=par + par_int=par + + if int_par_count is not None: + par_int=par[:int_par_count] + par_def=par[int_par_count:] + _mask=row[1] + if _mask=='': + _mask=None + internal_sample.append( MriDataset(scan=row[0], mask=_mask, par_int=par_int, par_def=par_def) ) + + internal_model=None + initial_int_model=None + initial_def_model=None + + if regress_model is None: + if model is not None: + internal_model=MriDataset(scan=model, mask=mask) + else: + # assume that regress_model is an array + initial_int_model=MriDatasetRegress(prefix=work_prefix, name='initial_model_int', N=len(regress_model)) + initial_int_model.volume=regress_model + initial_int_model.mask=mask + + initial_int_model.protect=True + initial_def_model=None + + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return regress( internal_sample, + initial_model=internal_model, + prefix=work_prefix, + options=options, + initial_int_model=initial_int_model, + initial_def_model=initial_def_model) + + +def regress_simple(input_samples, + int_design_matrix, + geo_design_matrix, + model=None, + mask=None, + work_prefix=None, + options={}, + regress_model=None): + """convinience function to run model generation using CSV input file and a fixed init""" + internal_sample=[] + + for (i,j) in enumerate(input_samples): + internal_sample.append( MriDataset(scan=j[0], mask=j[1], + par_int=int_design_matrix[i], + par_def=geo_design_matrix[i]) + ) + + internal_model=None + initial_int_model=None + initial_def_model=None + + if regress_model is None: + if model is not None: + internal_model=MriDataset(scan=model, mask=mask) + else: + # assume that regress_model is an array + initial_int_model=MriDatasetRegress(prefix=work_prefix, name='initial_model_int', N=len(regress_model)) + initial_int_model.volume=regress_model + initial_int_model.mask=mask + + initial_int_model.protect=True + initial_def_model=None + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return regress( internal_sample, + initial_model=internal_model, + prefix=work_prefix, + options=options, + initial_int_model=initial_int_model, + initial_def_model=initial_def_model) + + + +def build_estimate(description_json, parameters, output_prefix, int_par_count=None): + desc=None + with open(description_json, 'r') as f: + desc=json.load(f) + + int_parameters=parameters + def_parameters=parameters + + if int_par_count is not None: + int_parameters=parameters[:int_par_count] + def_parameters=parameters[int_par_count:] + + if len(def_parameters)!=len(desc["def_model"]["volume"]) or \ + len(int_parameters)!=len(desc["int_model"]["volume"]): + + print(desc["int_model"]["volume"]) + print("int_parameters={}".format(repr(int_parameters))) + + print(desc["def_model"]["volume"]) + print("def_parameters={}".format(repr(def_parameters))) + + raise mincError("{} inconsisten number of paramters, expected {}". + format(repr(int_parameters), + len(desc["def_model"]["volume"]))) + + deformation=MriDatasetRegress(from_dict=desc["def_model"]) + intensity=MriDatasetRegress(from_dict=desc["int_model"]) + + output_scan=MriDataset(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) + output_transform=MriTransform(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) + + build_approximation(intensity, deformation, + int_parameters, def_parameters, + output_scan, output_transform) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/resample.py b/ipl/model/resample.py new file mode 100644 index 0000000..8916229 --- /dev/null +++ b/ipl/model/resample.py @@ -0,0 +1,161 @@ +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +from .filter import * +from .structures import * + +# scoop parallel execution +from scoop import futures, shared + + +def concat_resample( + input_mri, + input_transform, + corr_transform, + output_mri, + output_transform, + model, + symmetric=False, + qc=False, + bias=None + ): + """apply correction transformation and resample input""" + try: + with mincTools() as m: + + if not ( os.path.exists(output_mri.scan) and os.path.exists(output_transform.xfm) ): + scan=input_mri.scan + + if bias is not None: + m.calc([input_mri.scan,bias.scan],'A[0]*A[1]',m.tmp('corr.mnc')) + scan=m.tmp('corr.mnc') + + m.xfmconcat([input_transform.xfm, corr_transform.xfm], output_transform.xfm) + m.resample_smooth(scan, output_mri.scan, transform=output_transform.xfm,like=model.scan) + + if input_mri.mask is not None and output_mri.mask is not None: + m.resample_labels(input_mri.mask, + output_mri.mask, + transform=output_transform.xfm, + like=model.scan) + if qc: + m.qc(output_mri.scan,output_mri.scan+'.jpg',mask=output_mri.mask) + else: + if qc: + m.qc(output_mri.scan,output_mri.scan+'.jpg') + + + if symmetric: + scan_f=input_mri.scan_f + + if bias is not None: + m.calc([input_mri.scan_f,bias.scan_f],'A[0]*A[1]',m.tmp('corr_f.mnc')) + scan_f=m.tmp('corr_f.mnc') + + m.xfmconcat([input_transform.xfm_f, corr_transform.xfm], output_transform.xfm_f) + m.resample_smooth(scan_f, output_mri.scan_f, transform=output_transform.xfm_f,like=model.scan) + + if input_mri.mask is not None and output_mri.mask is not None: + m.resample_labels(input_mri.mask_f, + output_mri.mask_f, + transform=output_transform.xfm_f, + like=model.scan) + if qc: + m.qc(output_mri.scan_f,output_mri.scan_f+'.jpg',mask=output_mri.mask_f) + else: + if qc: + m.qc(output_mri.scan_f,output_mri.scan_f+'.jpg') + except mincError as e: + print "Exception in concat_resample:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in concat_resample:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def concat_resample_nl( + input_mri, + input_transform, + corr_transform, + output_mri, + output_transform, + model, + level, + symmetric=False, + qc=False, + invert_transform=False + ): + """apply correction transformation and resample input""" + try: + with mincTools() as m: + tfm=input_transform.xfm + if corr_transform is not None: + m.xfmconcat([input_transform.xfm, corr_transform.xfm], m.tmp('transform.xfm')) + tfm=m.tmp('transform.xfm') + ref=None + if isinstance(model, MriDatasetRegress): ref=model.volume[0] + else: ref=model.scan + + m.xfm_normalize( tfm, ref, output_transform.xfm, + step=level) + + m.resample_smooth(input_mri.scan, output_mri.scan, + transform=output_transform.xfm, + like=ref, + invert_transform=invert_transform) + + if input_mri.mask and output_mri.mask: + m.resample_labels(input_mri.mask, + output_mri.mask, + transform=output_transform.xfm, + like=ref, + invert_transform=invert_transform) + if qc: + m.qc(output_mri.scan,output_mri.scan+'.jpg', + mask=output_mri.mask) + else: + if qc: + m.qc(output_mri.scan,output_mri.scan+'.jpg') + + if symmetric: + tfm_f=input_transform.xfm_f + if corr_transform is not None: + m.xfmconcat( [input_transform.xfm_f, corr_transform.xfm], m.tmp('transform_f.xfm') ) + tfm_f=m.tmp('transform_f.xfm') + m.xfm_normalize( tfm_f, ref, output_transform.xfm_f, step=level ) + m.resample_smooth(input_mri.scan_f, output_mri.scan_f, transform=output_transform.xfm_f, + like=ref, + invert_transform=invert_transform ) + + if input_mri.mask and output_mri.mask: + m.resample_labels(input_mri.mask_f, + output_mri.mask_f, + transform=output_transform.xfm_f, + like=ref, + invert_transform=invert_transform) + + if qc: + m.qc(output_mri.scan_f, output_mri.scan_f+'.jpg', + mask=output_mri.mask_f) + else: + if qc: + m.qc(output_mri.scan_f, output_mri.scan_f+'.jpg') + + + return True + except mincError as e: + print "Exception in concat_resample_nl:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in concat_resample_nl:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model/structures.py b/ipl/model/structures.py new file mode 100644 index 0000000..5aef213 --- /dev/null +++ b/ipl/model/structures.py @@ -0,0 +1,180 @@ +# data structures used in model generation package + +import shutil +import os +import sys +import traceback +import json + +class MriDataset(object): + def __init__(self, prefix=None, name=None, iter=None, scan=None, mask=None, protect=False, par_int=[],par_def=[]): + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.scan_f=None + self.mask_f=None + self.par_int=par_int + self.par_def=par_def + + if scan is None: + if self.iter is None: + self.scan=self.prefix+os.sep+self.name+'.mnc' + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + self.scan_f=self.prefix+os.sep+self.name+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'_f_mask.mnc' + else: + self.scan=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'.mnc' + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + self.scan_f=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f_mask.mnc' + else: + self.scan=scan + self.mask=mask + + if self.name is None: + self.name=os.path.basename(self.scan) + + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + + def __repr__(self): + return 'MriDataset(prefix="{}",name="{}",iter="{}",scan="{}",mask="{}",protect={},par_int={},par_def={})'.\ + format(self.prefix,self.name,repr(self.iter),self.scan,self.mask,repr(self.protect),repr(self.par_int),repr(self.par_def)) + + def exists(self): + _ex=True + for i in (self.scan, self.mask, self.scan_f, self.mask_f ): + if i is not None : + _ex&=os.path.exists(i) + return _ex + + def cleanup(self,verbose=False): + if not self.protect: + for i in (self.scan, self.mask, self.scan_f, self.mask_f ): + if i is not None and os.path.exists(i): + if verbose: + print("Removing:{}".format(i)) + os.unlink(i) + +class MriTransform(object): + def __init__(self,prefix,name,iter=None,linear=False): + self.prefix=prefix + self.name=name + self.iter=iter + self.xfm_f=None + self.grid_f=None + self.linear=linear + + if self.iter is None: + self.xfm= self.prefix+os.sep+self.name+'.xfm' + self.xfm_f= self.prefix+os.sep+self.name+'_f.xfm' + self.grid= self.prefix+os.sep+self.name+'_grid_0.mnc' + self.grid_f= self.prefix+os.sep+self.name+'_f_grid_0.mnc' + else: + self.xfm= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'.xfm' + self.xfm_f= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f.xfm' + self.grid= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_grid_0.mnc' + self.grid_f= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f_grid_0.mnc' + # just null grids if it is linear + if self.linear: + self.grid=None + self.grid_f=None + + def __repr__(self): + return 'MriTransform(prefix="{}",name="{}",iter="{}")'.\ + format(self.prefix,self.name,repr(self.iter)) + + def cleanup(self,verbose=False): + for i in (self.xfm, self.grid, self.xfm_f, self.grid_f): + if i is not None and os.path.exists(i): + if verbose: + print("Removing:{}".format(i)) + os.unlink(i) + + +class MriDatasetRegress(object): + def __init__(self, prefix=None, name=None, iter=None, N=1, protect=False, from_dict=None, nomask=False): + if from_dict is None: + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.N=N + self.volume=[] + + if self.iter is None: + for n in range(0,N): + self.volume.append(self.prefix+os.sep+self.name+'_{}.mnc'.format(n)) + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + else: + for n in range(0,N): + self.volume.append(self.prefix+os.sep+self.name+'.{:03d}_{}'.format(iter,n)+'.mnc') + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + if nomask: + self.mask=None + else: # simple hack for now + self.volume=from_dict["volume"] + self.iter=from_dict["iter"] + self.name=from_dict["name"] + self.mask=from_dict["mask"] + self.N=len(self.volume) + + def __repr__(self): + return 'MriDatasetRegress(prefix="{}",name="{}",volume={},mask={},iter="{}",protect={})'.\ + format(self.prefix, self.name, repr(self.volume), self.mask, repr(self.iter), repr(self.protect)) + + def cleanup(self): + if not self.protect: + for i in self.volume: + if i is not None and os.path.exists(i): + os.unlink(i) + for i in [self.mask]: + if i is not None and os.path.exists(i): + os.unlink(i) + + def exists(self): + """ + Check that all files are present + """ + _ex=True + for i in self.volume: + if i is not None : + _ex&=os.path.exists(i) + + for i in [self.mask]: + if i is not None : + _ex&=os.path.exists(i) + + return _ex + + +class MRIEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, MriTransform): + return {'name':obj.name, + 'iter':obj.iter, + 'xfm':obj.xfm, 'grid':obj.grid, + 'xfm_f':obj.xfm_f,'grid_f':obj.grid_f, + 'linear':obj.linear + } + if isinstance(obj, MriDataset): + return {'name':obj.name, + 'iter':obj.iter, + 'scan':obj.scan, + 'mask':obj.mask, + 'scan_f':obj.scan_f, + 'mask_f':obj.mask_f, + 'par_def':obj.par_def, + 'par_int':obj.par_int + } + elif isinstance(obj, MriDatasetRegress): + return {'name': obj.name, + 'iter': obj.iter, + 'volume':obj.volume, + 'mask': obj.mask, + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/__init__.py b/ipl/model_ldd/__init__.py new file mode 100644 index 0000000..7076517 --- /dev/null +++ b/ipl/model_ldd/__init__.py @@ -0,0 +1,5 @@ +# model generations + + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/filter_ldd.py b/ipl/model_ldd/filter_ldd.py new file mode 100644 index 0000000..21e494d --- /dev/null +++ b/ipl/model_ldd/filter_ldd.py @@ -0,0 +1,453 @@ +import shutil +import os +import sys +import csv +import traceback +import copy + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +from .structures_ldd import MriDataset, LDDMriTransform, LDDMRIEncoder,MriDatasetRegress + +def generate_flip_sample(input): + '''generate flipped version of sample''' + with mincTools() as m: + m.flip_volume_x(input.scan,input.scan_f) + + if input.mask is not None: + m.flip_volume_x(input.mask,input.mask_f,labels=True) + + print "Flipped!" + return True + + +def normalize_sample( + input, + output, + model, + bias_field=None, + ): + """Normalize sample intensity""" + + with mincTools() as m: + m.apply_n3_vol_pol( + input.scan, + model.scan, + output.scan, + source_mask=input.mask, + target_mask=model.mask, + bias=bias_field, + ) + output.mask=input.mask + return output + + + +def average_samples( + samples, + output, + output_sd=None, + symmetric=False, + ): + """average individual samples""" + try: + with mincTools() as m: + avg = [] + + for s in samples: + avg.append(s.scan) + + if symmetric: + for s in samples: + avg.append(s.scan_f) + + if output_sd: + m.average(avg, output.scan, sdfile=output_sd.scan) + else: + m.average(avg, output.scan) + + # average masks + if output.mask is not None: + avg = [] + for s in samples: + avg.append(s.mask) + + if symmetric: + for s in samples: + avg.append(s.mask_f) + + if not os.path.exists(output.mask): + m.average(avg,m.tmp('avg_mask.mnc'),datatype='-float') + m.calc([m.tmp('avg_mask.mnc')],'A[0]>0.5?1:0',m.tmp('avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('avg_mask_.mnc'),output.mask,image_range=[0,1],valid_range=[0,1]) + + + return True + except mincError as e: + print "Exception in average_samples:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_samples:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def average_stats( + avg, + sd, + ): + """calculate median sd within mask""" + try: + st=0 + with mincTools(verbose=2) as m: + if avg.mask is not None: + st=float(m.stats(sd.scan,'-median',mask=avg.mask)) + else: + st=float(m.stats(sd.scan,'-median')) + return st + except mincError as e: + print "mincError in average_stats:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def average_stats_regression( + current_intensity_model, current_velocity_model, + intensity_residual, velocity_residual, + ): + """calculate median sd within mask for intensity and velocity""" + try: + sd_int=0.0 + sd_vel=0.0 + with mincTools(verbose=2) as m: + m.grid_magnitude(velocity_residual.scan,m.tmp('mag.mnc')) + if current_intensity_model.mask is not None: + sd_int=float(m.stats(intensity_residual.scan,'-median',mask=current_intensity_model.mask)) + m.resample_smooth(m.tmp('mag.mnc'),m.tmp('mag_.mnc'),like=current_intensity_model.mask) + sd_vel=float(m.stats(m.tmp('mag_.mnc'),'-median',mask=current_intensity_model.mask)) + else: + sd_int=float(m.stats(intensity_residual.scan,'-median')) + sd_vel=float(m.stats(m.tmp('mag.mnc'),'-median')) + + return (sd_int,sd_vel) + except mincError as e: + print "mincError in average_stats_regression:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_stats_regression:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + + +def calculate_diff_bias_field(sample, model, output, symmetric=False, distance=100 ): + try: + with mincTools() as m: + if model.mask is not None: + m.difference_n3(sample.scan, model.scan, output.scan, mask=model.mask, distance=distance, normalize=True) + else: + m.difference_n3(sample.scan, model.scan, output.scan, distance=distance, normalize=True ) + if symmetric: + if model.mask is not None: + m.difference_n3(sample.scan_f, model.scan, output.scan, mask=model.mask_f, distance=distance, normalize=True) + else: + m.difference_n3(sample.scan_f, model.scan, output.scan, distance=distance, normalize=True ) + return True + except mincError as e: + print "mincError in calculate_diff_bias_field:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in calculate_diff_bias_field:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def average_bias_fields(samples, output, symmetric=False ): + try: + with mincTools() as m: + + avg = [] + + for s in samples: + avg.append(s.scan) + + if symmetric: + for s in samples: + avg.append(s.scan_f) + + m.log_average(avg, output.scan) + return True + except mincError as e: + print "mincError in average_bias_fields:{}".format(repr(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_bias_fields:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + + +def resample_and_correct_bias_ldd( + sample, + transform, + avg_bias, + output, + previous=None, + symmetric=False, + ): + # resample bias field and apply previous estimate + try: + with mincTools() as m: + + m.calc([sample.scan, avg_bias.scan], + 'A[1]>0.1?A[0]/A[1]:1.0', m.tmp('corr_bias.mnc')) + + m.resample_smooth_logspace(m.tmp('corr_bias.mnc'), + m.tmp('corr_bias2.mnc'), + like=sample.scan, + transform=transform.vel, + invert_transform=True) + if previous: + m.calc([previous.scan, m.tmp('corr_bias2.mnc') ], 'A[0]*A[1]', + output.scan, datatype='-float') + else: + shutil.copy(m.tmp('corr_bias2.mnc'), output.scan) + + if symmetric: + m.calc([sample.scan_f, avg_bias.scan], + 'A[1]>0.1?A[0]/A[1]:1.0', m.tmp('corr_bias_f.mnc')) + + m.resample_smooth_logspace(m.tmp('corr_bias_f.mnc'), + m.tmp('corr_bias2_f.mnc'), + like=sample.scan, + transform=transform.vel, + invert_transform=True) + if previous: + m.calc([previous.scan_f, m.tmp('corr_bias2_f.mnc')], + 'A[0]*A[1]', + output.scan_f, datatype='-float') + else: + shutil.copy(m.tmp('corr_bias2_f.mnc'), output.scan) + + return True + except mincError as e: + print "Exception in resample_and_correct_bias_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in resample_and_correct_bias_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def apply_linear_model( + lin_model, + parameters, + output_volume + ): + """build a volume, for a given regression model and parameters""" + try: + with mincTools() as m: + + if lin_model.N!=len(parameters): + raise mincError("Expected: {} parameters, got {}".format(lin_model.N,len(parameters))) + + # create minccalc expression + _exp=[] + for i in range(0,lin_model.N): + _exp.append('A[{}]*{}'.format(i,parameters[i])) + exp='+'.join(_exp) + m.calc(lin_model.volume,exp,output_volume) + + return True + except mincError as e: + print( "Exception in apply_linear_model:{}".format(str(e)) ) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in apply_linear_model:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def build_approximation(intensity_model, geo_model , + parameters_intensity, parameters_velocity, + output_scan, output_transform, + noresample=False, + remove0=False): + try: + with mincTools() as m: + + intensity=m.tmp('intensity_model.mnc') + if noresample: + intensity=output_scan.scan + #geometry=m.tmp('geometry_model.mnc') + + # TODO: paralelelize? + if intensity_model.N>0: + apply_linear_model(intensity_model,parameters_intensity,intensity) + else: # not modelling intensity + intensity=intensity_model.volume[0] + + # if we have geometry information + if geo_model is not None and geo_model.N>0 : + _parameters_velocity=copy.deepcopy(parameters_velocity) + if remove0:_parameters_velocity[0]=0 + apply_linear_model(geo_model, _parameters_velocity, output_transform.vel) + + if not noresample: + m.resample_smooth_logspace(intensity, output_scan.scan, + velocity=output_transform.vel, + like=intensity_model.volume[0]) + + if intensity_model.mask is not None: + if noresample: + shutil.copyfile(intensity_model.mask, + output_scan.mask) + else: + m.resample_labels_logspace(intensity_model.mask, + output_scan.mask, + velocity=output_transform.vel, + like=intensity_model.volume[0]) + else: + output_scan.mask=None + else: # not modelling shape! + shutil.copyfile(intensity,output_scan.scan) + if intensity_model.mask is not None: + shutil.copyfile(intensity_model.mask, + output_scan.mask) + else: + output_scan.mask=None + output_transform=None + + return (output_scan, output_transform) + except mincError as e: + print( "Exception in build_approximation:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in build_approximation:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def voxel_regression(intensity_design_matrix, + velocity_design_matrix, + intensity_estimate, velocity_estimate, + next_intensity_model, next_velocity_model, + intensity_residual, velocity_residual, + blur_int_model=None, blur_vel_model=None, + qc=False): + """Perform voxel-wise regression using given design matrix""" + try: + with mincTools() as m: + #print(repr(next_intensity_model)) + + # a small hack - assume that input directories are the same + _prefix=velocity_estimate[0].prefix + _design_vel=_prefix+os.sep+'regression_vel.csv' + _design_int=_prefix+os.sep+'regression_int.csv' + + #nomask=False + #for i in for i in intensity_estimate: + # if i.mask is None: + # nomask=True + _masks=[i.mask for i in intensity_estimate] + _inputs=[] + _outputs=[] + _outputs.extend(next_intensity_model.volume) + _outputs.extend(next_velocity_model.volume) + + with open(_design_vel,'w') as f: + for (i, l ) in enumerate(velocity_design_matrix): + f.write(os.path.basename(velocity_estimate[i].vel)) + f.write(',') + f.write(','.join([str(qq) for qq in l])) + f.write("\n") + _inputs.append(velocity_estimate[i].vel) + + with open(_design_int,'w') as f: + for (i, l ) in enumerate(intensity_design_matrix): + f.write(os.path.basename(intensity_estimate[i].scan)) + f.write(',') + f.write(','.join([str(qq) for qq in l])) + f.write("\n") + _inputs.append(intensity_estimate[i].scan) + + if not m.checkfiles(inputs=_inputs, outputs=_outputs): + return + + intensity_model=next_intensity_model + velocity_model=next_velocity_model + + if blur_int_model is not None: + intensity_model=MriDatasetRegress(prefix=m.tempdir, name='model_intensity',N=next_intensity_model.N,nomask=(next_intensity_model.mask is None)) + + if blur_vel_model is not None: + velocity_model=MriDatasetRegress(prefix=m.tempdir,name='model_velocity', N=next_velocity_model.N, nomask=(next_velocity_model.mask is None)) + + + # regress velocity + m.command(['volumes_lm',_design_vel, velocity_model.volume[0].rsplit('_0.mnc',1)[0]], + inputs=[_design_vel], + outputs=velocity_model.volume, + verbose=2) + + # regress intensity + m.command(['volumes_lm',_design_int, intensity_model.volume[0].rsplit('_0.mnc',1)[0]], + inputs=[_design_int], + outputs=intensity_model.volume, + verbose=2) + + if blur_vel_model is not None: + # blur estimates + for (i,j) in enumerate(velocity_model.volume): + m.blur_vectors(velocity_model.volume[i],next_velocity_model.volume[i],blur_vel_model) + # a hack preserve unfiltered RMS volume + shutil.copyfile(velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + + if blur_int_model is not None: + for (i,j) in enumerate(intensity_model.volume): + m.blur(intensity_model.volume[i],next_intensity_model.volume[i],blur_int_model) + # a hack preserve unfiltered RMS volume + shutil.copyfile(intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + + # average masks + if next_intensity_model.mask is not None: + m.average(_masks,m.tmp('avg_mask.mnc'),datatype='-float') + m.calc([m.tmp('avg_mask.mnc')],'A[0]>0.5?1:0',m.tmp('avg_mask_.mnc'),datatype='-byte') + m.reshape(m.tmp('avg_mask_.mnc'),next_intensity_model.mask,image_range=[0,1],valid_range=[0,1]) + + if qc: + m.qc(next_intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + next_intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.jpg' ) + + m.grid_magnitude(next_velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc', + m.tmp('velocity_RMS_mag.mnc')) + + m.qc(m.tmp('velocity_RMS_mag.mnc'), + next_velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.jpg') + + #cleanup + #os.unlink(_design_vel) + #os.unlink(_design_int) + + + except mincError as e: + print( "Exception in voxel_regression:{}".format(str(e)) ) + traceback.print_exc(file=sys.stdout) + raise + except : + print( "Exception in voxel_regression:{}".format(sys.exc_info()[0]) ) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/generate_nonlinear_ldd.py b/ipl/model_ldd/generate_nonlinear_ldd.py new file mode 100644 index 0000000..0633ea0 --- /dev/null +++ b/ipl/model_ldd/generate_nonlinear_ldd.py @@ -0,0 +1,260 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from .structures_ldd import MriDataset, LDDMriTransform, LDDMRIEncoder +from .filter_ldd import generate_flip_sample, normalize_sample +from .filter_ldd import average_samples,average_stats +from .filter_ldd import calculate_diff_bias_field +from .filter_ldd import average_bias_fields +from .filter_ldd import resample_and_correct_bias_ldd +from .registration_ldd import non_linear_register_step_ldd +from .registration_ldd import average_transforms_ldd +from .resample_ldd import concat_resample_ldd + +from scoop import futures, shared + +def generate_ldd_average( + samples, + initial_model=None, + output_model=None, + output_model_sd=None, + prefix='.', + options={} + ): + """ perform iterative model creation""" + try: + #print(repr(options)) + # use first sample as initial model + if not initial_model: + initial_model = samples[0] + + # current estimate of template + current_model = initial_model + current_model_sd = None + + transforms=[] + corr=[] + + bias_fields=[] + corr_transforms=[] + sd=[] + corr_samples=[] + + protocol=options.get('protocol', [ + {'iter':4,'level':32}, + {'iter':4,'level':16}] + ) + + cleanup=options.get('cleanup',False) + symmetric=options.get('symmetric',False) + parameters=options.get('parameters',None) + refine=options.get('refine',True) + qc=options.get('qc',False) + downsample=options.get('downsample',None) + + models=[] + models_sd=[] + + if symmetric: + flipdir=prefix+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + flip_all=[] + # generate flipped versions of all scans + for (i, s) in enumerate(samples): + s.scan_f=prefix+os.sep+'flip'+os.sep+os.path.basename(s.scan) + + if s.mask is not None: + s.mask_f=prefix+os.sep+'flip'+os.sep+'mask_'+os.path.basename(s.scan) + + flip_all.append( futures.submit( generate_flip_sample,s ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + # go through all the iterations + it=0 + for (i,p) in enumerate(protocol): + for j in xrange(1,p['iter']+1): + it+=1 + # this will be a model for next iteration actually + + # 1 register all subjects to current template + next_model=MriDataset(prefix=prefix,iter=it,name='avg') + next_model_sd=MriDataset(prefix=prefix,iter=it,name='sd') + transforms=[] + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + inv_transforms=[] + fwd_transforms=[] + + for (i, s) in enumerate(samples): + sample_xfm=LDDMriTransform(name=s.name,prefix=it_prefix,iter=it) + + prev_transform = None + prev_bias_field = None + + if it > 1 and refine: + prev_transform = corr_transforms[i] + + transforms.append( + futures.submit( + non_linear_register_step_ldd, + s, + current_model, + sample_xfm, + init_vel=prev_transform, + symmetric=symmetric, + parameters=parameters, + level=p['level'], + work_dir=prefix, + downsample=downsample) + ) + fwd_transforms.append(sample_xfm) + + # wait for jobs to finish + futures.wait(transforms, return_when=futures.ALL_COMPLETED) + + if cleanup and it>1 : + # remove information from previous iteration + for s in corr_samples: + s.cleanup() + for x in corr_transforms: + x.cleanup() + + # here all the transforms should exist + avg_inv_transform=LDDMriTransform(name='avg_inv',prefix=it_prefix,iter=it) + + # 2 average all transformations + average_transforms_ldd(fwd_transforms, avg_inv_transform, symmetric=symmetric, invert=True) + + corr=[] + corr_transforms=[] + corr_samples=[] + + # 3 concatenate correction and resample + for (i, s) in enumerate(samples): + c=MriDataset(prefix=it_prefix,iter=it,name=s.name) + x=LDDMriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + + corr.append(futures.submit(concat_resample_ldd, s, + fwd_transforms[i], avg_inv_transform, c, x, current_model.scan, + symmetric=symmetric, qc=qc )) + + corr_transforms.append(x) + corr_samples.append(c) + + futures.wait(corr, return_when=futures.ALL_COMPLETED) + + # 4 average resampled samples to create new estimate + + result=futures.submit(average_samples, corr_samples, next_model, next_model_sd, symmetric=symmetric) + futures.wait([result], return_when=futures.ALL_COMPLETED) + + + if cleanup: + for s in fwd_transforms: + s.cleanup() + + if cleanup and it>1 : + # remove previous template estimate + models.append(next_model) + models_sd.append(next_model_sd) + + current_model=next_model + current_model_sd=next_model_sd + + result=futures.submit(average_stats, next_model, next_model_sd) + sd.append(result) + + # copy output to the destination + futures.wait(sd, return_when=futures.ALL_COMPLETED) + with open(prefix+os.sep+'stats.txt','w') as f: + for s in sd: + f.write("{}\n".format(s.result())) + + results={ + 'model': current_model, + 'model_sd': current_model_sd, + 'vel': corr_transforms, + 'biascorr': None, + 'scan': corr_samples, + 'symmetric': symmetric, + } + + with open(prefix+os.sep+'results.json','w') as f: + json.dump(results,f,indent=1,cls=LDDMRIEncoder) + + if cleanup: + # delete unneeded models + for m in models: + m.cleanup() + for m in models_sd: + m.cleanup() + + return results + except mincError as e: + print "Exception in generate_ldd_average:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in generate_ldd_average:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def generate_ldd_model_csv(input_csv,model=None,mask=None,work_prefix=None,options={}): + internal_sample=[] + + with open(input_csv, 'r') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) + for row in reader: + internal_sample.append(MriDataset(scan=row[0],mask=row[1])) + + internal_model=None + if model is not None: + internal_model=MriDataset(scan=model,mask=mask) + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return generate_ldd_average(internal_sample,internal_model, + prefix=work_prefix,options=options) + + +def generate_ldd_model(samples,model=None,mask=None,work_prefix=None,options={}): + internal_sample=[] + try: + #print(repr(options)) + for i in samples: + s=MriDataset(scan=i[0],mask=i[1]) + internal_sample.append(s) + + internal_model=None + if model is not None: + internal_model=MriDataset(scan=model,mask=mask) + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return generate_ldd_average(internal_sample,internal_model, + prefix=work_prefix,options=options) + + except mincError as e: + print "Exception in generate_ldd_model:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in generate_ldd_model:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/registration_ldd.py b/ipl/model_ldd/registration_ldd.py new file mode 100644 index 0000000..090710e --- /dev/null +++ b/ipl/model_ldd/registration_ldd.py @@ -0,0 +1,311 @@ +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +import ipl.dd_registration + +# internal stuff +from .filter_ldd import build_approximation +from .structures_ldd import * + +def non_linear_register_step_ldd( + sample, + model, + output, + init_vel=None, + level=32, + start=None, + symmetric=False, + parameters=None, + work_dir=None, + downsample=None, + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_vel=None + _init_vel_f=None + + if start is None: + start=level + + if init_vel is not None: + _init_vel=init_vel.vel + if symmetric: + _init_vel_f=init_vel.vel_f + + with mincTools() as m: + + if symmetric: + + if m.checkfiles(inputs=[sample.scan,model.scan,sample.scan_f], + outputs=[output.vel,output.vel_f]): + + ipl.dd_registration.non_linear_register_ldd( + sample.scan, + model.scan, + output.vel, + source_mask=sample.mask, + target_mask=model.mask, + init_velocity=_init_vel, + parameters=parameters, + start=level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + ipl.dd_registration.non_linear_register_ldd( + sample.scan_f, + model.scan, + output.vel_f, + source_mask=sample.mask_f, + target_mask=model.mask, + init_velocity=_init_vel_f, + parameters=parameters, + start=level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + + + else: + if m.checkfiles(inputs=[sample.scan,model.scan], + outputs=[output.vel]): + + ipl.dd_registration.non_linear_register_ldd( + sample.scan, + model.scan, + output.vel, + source_mask=sample.mask, + target_mask=model.mask, + init_velocity=_init_vel, + parameters=parameters, + start=level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + except mincError as e: + print "Exception in non_linear_register_step_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def average_transforms_ldd( + samples, + output, + symmetric=False, + invert=False + ): + """average given transformations""" + try: + with mincTools() as m: + avg = [] + if not os.path.exists(output.vel): + out=output.vel + + if invert: + out=m.tmp('avg.mnc') + + for i in samples: + avg.append(i.vel) + + if symmetric: + for i in samples: + avg.append(i.vel_f) + m.average(avg, out) + + if invert: + m.calc([out],'-A[0]',output.vel) + + except mincError as e: + print "Exception in average_transforms_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in average_transforms_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def non_linear_register_step_regress_ldd( + sample, + model_intensity, + model_velocity, + output_intensity, + output_velocity, + level=32, + start_level=None, + parameters=None, + work_dir=None, + downsample=None, + debug=False, + previous_velocity=None, + datatype='short', + incremental=True, + remove0=False, + sym=False + ): + """perform linear registration to the model, and calculate new estimate""" + try: + + with mincTools() as m: + #print repr(sample) + + if m.checkfiles(inputs=[sample.scan], + outputs=[output_velocity.vel]): + + #velocity_approximate = LDDMriTransform(prefix=m.tempdir,name=sample.name+'_velocity') + #intensity_approximate = MriDataset(prefix=m.tempdir,name=sample.name+'_intensity') + intensity_approximate = None + velocity_approximate = None + velocity_update = None + + if debug: + intensity_approximate = MriDataset( prefix=output_velocity.prefix, + name=output_velocity.name +'_int_approx', + iter=output_velocity.iter ) + + velocity_approximate = LDDMriTransform( prefix=output_velocity.prefix, + name=output_velocity.name +'_approx', + iter=output_velocity.iter ) + + velocity_update = LDDMriTransform( prefix=output_velocity.prefix, + name=output_velocity.name +'_update', + iter=output_velocity.iter ) + else: + intensity_approximate = MriDataset( prefix=m.tempdir, + name=output_velocity.name +'_int_approx') + + velocity_approximate = LDDMriTransform( prefix=m.tempdir, + name=output_velocity.name +'_approx' ) + + velocity_update = LDDMriTransform( prefix=m.tempdir, + name=output_velocity.name +'_update') + + # A hack! assume that if initial model is MriDataset it means zero regression coeff + if isinstance(model_intensity, MriDataset): + intensity_approximate=model_intensity + velocity_approximate=None + + else: + (intensity_approximate, velocity_approximate) = \ + build_approximation(model_intensity, + model_velocity, + sample.par_int, + sample.par_vel, + intensity_approximate, + velocity_approximate, + noresample=(not incremental), + remove0=remove0) + if model_velocity is None: + velocity_approximate=None + + if start_level is None: + start_level=level + + # we are refining previous estimate + init_velocity=None + #if velocity_approximate is not None: + #init_velocity=velocity_approximate.vel + if incremental: + if previous_velocity is not None: + ## have to adjust it based on the current estimate + if velocity_approximate is not None: + init_velocity=m.tmp('init_velocity.mnc') + m.calc( [previous_velocity.vel, velocity_approximate.vel ], + 'A[0]-A[1]', init_velocity) + + else: + init_velocity=previous_velocity.vel + else: + if previous_velocity is not None: + init_velocity=previous_velocity.vel + elif velocity_approximate is not None: + init_velocity=velocity_approximate.vel + if sym: + print("Using symmetrization!") + # TODO: parallelalize this + update1=m.tmp('update1.mnc') + m.non_linear_register_ldd( + intensity_approximate.scan, + sample.scan, + update1, + source_mask=intensity_approximate.mask, + target_mask=sample.mask, + init_velocity=init_velocity, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + update2=m.tmp('update2.mnc') + m.non_linear_register_ldd( + sample.scan, + intensity_approximate.scan, + update2, + source_mask=sample.mask, + target_mask=intensity_approximate.mask, + init_velocity=init_velocity, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + m.calc([update1,update2],'(A[0]-A[1])/2.0',velocity_update.vel) + else: + m.non_linear_register_ldd( + intensity_approximate.scan, + sample.scan, + velocity_update.vel, + source_mask=intensity_approximate.mask, + target_mask=sample.mask, + init_velocity=init_velocity, + parameters=parameters, + start=start_level, + level=level, + downsample=downsample, + #work_dir=work_dir + ) + + # update estimate, possibility to use link function? + if incremental and velocity_approximate is not None: + m.calc( [velocity_approximate.vel, velocity_update.vel ], 'A[0]+A[1]', output_velocity.vel, + datatype='-'+datatype) + else: + m.calc( [velocity_update.vel ], 'A[0]', output_velocity.vel, + datatype='-'+datatype) + + if output_intensity is not None: + # resample intensity + m.resample_smooth_logspace(sample.scan, output_intensity.scan, + velocity=output_velocity.vel, + invert_transform=True, + datatype='-'+datatype + ) + + if sample.mask is not None: + m.resample_labels_logspace(sample.mask, output_intensity.mask, + velocity=output_velocity.vel, + invert_transform=True) + # done + + except mincError as e: + print "Exception in non_linear_register_step_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in non_linear_register_step_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/regress_ldd.py b/ipl/model_ldd/regress_ldd.py new file mode 100644 index 0000000..612015d --- /dev/null +++ b/ipl/model_ldd/regress_ldd.py @@ -0,0 +1,457 @@ +import shutil +import os +import sys +import csv +import traceback +import json + +# MINC stuff +from iplMincTools import mincTools,mincError + +from .structures_ldd import MriDataset, LDDMriTransform, LDDMRIEncoder,MriDatasetRegress +from .filter_ldd import generate_flip_sample, normalize_sample +from .filter_ldd import average_samples,average_stats_regression +from .filter_ldd import calculate_diff_bias_field +from .filter_ldd import average_bias_fields +from .filter_ldd import resample_and_correct_bias_ldd +from .filter_ldd import build_approximation +from .filter_ldd import voxel_regression +from .registration_ldd import non_linear_register_step_ldd +from .registration_ldd import average_transforms_ldd +from .registration_ldd import non_linear_register_step_regress_ldd +from .resample_ldd import concat_resample_ldd + +from scoop import futures, shared + + +def regress_ldd( + samples, + initial_model=None, + initial_intensity_model=None, + initial_velocity_model=None, + output_intensity_model=None, + output_velocity_model=None, + output_residuals_int=None, + output_residuals_vel=None, + prefix='.', + options={} + ): + """ perform iterative model creation""" + try: + + + # make sure all input scans have parameters + N_int=None + N_vel=None + + intensity_design_matrix=[] + velocity_design_matrix=[] + nomask=False + + for s in samples: + + if N_int is None: + N_int=len(s.par_int) + elif N_int!=len(s.par_int): + raise mincError("Sample {} have inconsisten number of intensity paramters: {} expected {}".format(repr(s),len(s),N_int)) + + if N_vel is None: + N_vel=len(s.par_vel) + elif N_vel!=len(s.par_vel): + raise mincError("Sample {} have inconsisten number of intensity paramters: {} expected {}".format(repr(s),len(s),N_vel)) + + intensity_design_matrix.append(s.par_int) + velocity_design_matrix.append(s.par_vel) + + if s.mask is None: + nomask=True + + #print("Intensity design matrix=\n{}".format(repr(intensity_design_matrix))) + #print("Velocity design matrix=\n{}".format(repr(velocity_design_matrix))) + + ref_model=None + # current estimate of template + if initial_model is not None: + current_intensity_model = initial_model + current_velocity_model = None + ref_model=initial_model.scan + else: + current_intensity_model = initial_intensity_model + current_velocity_model = initial_velocity_model + ref_model=initial_intensity_model.volume[0] + + transforms=[] + + full_transforms=[] + + protocol=options.get( + 'protocol', [{'iter':4,'level':32, 'blur_int': None, 'blur_vel': None }, + {'iter':4,'level':16, 'blur_int': None, 'blur_vel': None }] + ) + + cleanup= options.get('cleanup',False) + cleanup_intermediate= options.get('cleanup_intermediate',False) + + parameters= options.get('parameters',None) + refine= options.get('refine',False) + qc= options.get('qc',False) + downsample =options.get('downsample',None) + start_level=options.get('start_level',None) + debug =options.get('debug',False) + debias =options.get('debias',True) + incremental=options.get('incremental',True) + remove0 =options.get('remove0',False) + sym =options.get('sym',False) + + if parameters is None: + parameters={ + 'conf':{}, + 'smooth_update':2, + 'smooth_field':2, + 'update_rule':1, + 'grad_type': 0, + 'max_step': 2.0, # This paramter is probably domain specific + 'hist_match':True # this turns out to be very important! + } + + intensity_models=[] + velocity_models=[] + intensity_residuals=[] + velocity_residuals=[] + + intensity_residual=None + velocity_residual=None + + prev_velocity_estimate=None + # go through all the iterations + it=0 + residuals=[] + + for (i,p) in enumerate(protocol): + blur_int_model=p.get('blur_int',None) + blur_vel_model=p.get('blur_vel',None) + for j in range(1,p['iter']+1): + it+=1 + _start_level=None + if it==1: + _start_level=start_level + # this will be a model for next iteration actually + + it_prefix=prefix+os.sep+str(it) + if not os.path.exists(it_prefix): + os.makedirs(it_prefix) + + next_intensity_model=MriDatasetRegress(prefix=prefix, name='model_intensity',iter=it, N=N_int,nomask=nomask) + next_velocity_model=MriDatasetRegress(prefix=prefix, name='model_velocity', iter=it, N=N_vel, nomask=True) + + + intensity_residual=MriDataset(prefix=prefix, scan= next_intensity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + #name=next_intensity_model.name, iter=it ) + + velocity_residual =MriDataset(prefix=prefix, scan= next_velocity_model.volume[0].rsplit('_0.mnc',1)[0]+'_RMS.mnc') + #name=next_velocity_model.name, iter=it ) + + # skip over existing models here! + + if not next_intensity_model.exists() or \ + not next_velocity_model.exists() or \ + not intensity_residual.exists() or \ + not velocity_residual.exists(): + + intensity_estimate=[] + velocity_estimate=[] + r=[] + + + # 1 for each sample generate current approximation + # 2. perform non-linear registration between each sample and sample-specific approximation + # 3. update transformation + # 1+2+3 - all together + for (i, s) in enumerate(samples): + sample_velocity= LDDMriTransform(name=s.name,prefix=it_prefix,iter=it) + sample_intensity= MriDataset(name=s.name,prefix=it_prefix,iter=it) + + previous_velocity=None + + if refine and it>1 and (not remove0): + previous_velocity=prev_velocity_estimate[i] + + r.append( + futures.submit( + non_linear_register_step_regress_ldd, + s, + current_intensity_model, + current_velocity_model, + None, + sample_velocity, + parameters=parameters, + level=p['level'], + start_level=_start_level, + work_dir=prefix, + downsample=downsample, + debug=debug, + previous_velocity=previous_velocity, + incremental=incremental, + remove0=remove0, + sym=sym + ) + ) + velocity_estimate.append(sample_velocity) + #intensity_estimate.append(sample_intensity) + + # wait for jobs to finish + futures.wait(r, return_when=futures.ALL_COMPLETED) + avg_inv_transform=None + + if debias: + # here all the transforms should exist + avg_inv_transform=LDDMriTransform(name='avg_inv',prefix=it_prefix,iter=it) + # 2 average all transformations + average_transforms_ldd(velocity_estimate, avg_inv_transform, symmetric=False, invert=True) + + corr=[] + corr_transforms=[] + corr_samples=[] + + # 3 concatenate correction and resample + for (i, s) in enumerate(samples): + c=MriDataset(prefix=it_prefix,iter=it,name=s.name) + x=LDDMriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + + corr.append(futures.submit(concat_resample_ldd, + s, velocity_estimate[i], avg_inv_transform, + c, x, + model=ref_model, + symmetric=False, + qc=qc, + invert_transform=True )) + corr_transforms.append(x) + corr_samples.append(c) + + futures.wait(corr, return_when=futures.ALL_COMPLETED) + + # 4. perform regression and create new estimate + # 5. calculate residulas (?) + # 4+5 + result=futures.submit(voxel_regression, + intensity_design_matrix, velocity_design_matrix, + corr_samples, corr_transforms, + next_intensity_model, next_velocity_model, + intensity_residual, velocity_residual, + blur_int_model=blur_int_model, + blur_vel_model=blur_vel_model, + qc=qc + ) + futures.wait([result], return_when=futures.ALL_COMPLETED) + + # 6. cleanup + if cleanup : + print("Cleaning up iteration: {}".format(it)) + for i in velocity_estimate: + i.cleanup() + for i in corr_samples: + i.cleanup() + if prev_velocity_estimate is not None: + for i in prev_velocity_estimate: + i.cleanup() + if debias: + avg_inv_transform.cleanup() + else: + # files were there, reuse them + print("Iteration {} already performed, skipping".format(it)) + corr_transforms=[] + # this is a hack right now + for (i, s) in enumerate(samples): + x=LDDMriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) + corr_transforms.append(x) + + intensity_models.append(current_intensity_model) + velocity_models.append(current_velocity_model) + intensity_residuals.append(intensity_residual) + velocity_residuals.append(velocity_residual) + + current_intensity_model=next_intensity_model + current_velocity_model=next_velocity_model + + + result=futures.submit(average_stats_regression, + current_intensity_model, current_velocity_model, + intensity_residual, velocity_residual ) + residuals.append(result) + + regression_results={ + 'intensity_model': current_intensity_model, + 'velocity_model': current_velocity_model, + 'intensity_residuals': intensity_residual.scan, + 'velocity_residuals': velocity_residual.scan, + } + with open(prefix+os.sep+'results_{:03d}.json'.format(it),'w') as f: + json.dump(regression_results,f,indent=1, cls=LDDMRIEncoder) + + # save for next iteration + # TODO: regularize? + prev_velocity_estimate=corr_transforms # have to use adjusted velocity estimate + + # copy output to the destination + futures.wait(residuals, return_when=futures.ALL_COMPLETED) + with open(prefix+os.sep+'stats.txt','w') as f: + for s in residuals: + f.write("{}\n".format(s.result())) + + + with open(prefix+os.sep+'results_final.json','w') as f: + json.dump(regression_results, f, indent=1, cls=LDDMRIEncoder) + + + if cleanup_intermediate: + for i in range(len(intensity_models)-1): + intensity_models[i].cleanup() + velocity_models[i].cleanup() + intensity_residuals[i].cleanup() + velocity_residuals[i].cleanup() + # delete unneeded models + #shutil.rmtree(prefix+os.sep+'reg') + + return regression_results + except mincError as e: + print "Exception in generate_ldd_average:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in generate_ldd_average:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +def regress_ldd_csv(input_csv, + int_par_count=None, + model=None, + mask=None, + work_prefix=None, options={}, + regress_model=None): + """convinience function to run model generation using CSV input file and a fixed init""" + internal_sample=[] + + with open(input_csv, 'r') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) + for row in reader: + + par=[ float(i) for i in row[2:] ] + par_vel=par + par_int=par + + if int_par_count is not None: + par_int=par[:int_par_count] + par_vel=par[int_par_count:] + _mask=row[1] + if _mask=='': + _mask=None + internal_sample.append( MriDataset(scan=row[0], mask=_mask, par_int=par_int, par_vel=par_vel) ) + + internal_model=None + initial_intensity_model=None + initial_velocity_model=None + + if regress_model is None: + if model is not None: + internal_model=MriDataset(scan=model, mask=mask) + else: + # assume that regress_model is an array + initial_intensity_model=MriDatasetRegress(prefix=work_prefix, name='initial_model_intensity', N=len(regress_model)) + initial_intensity_model.volume=regress_model + initial_intensity_model.mask=mask + + initial_intensity_model.protect=True + initial_velocity_model=None + + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return regress_ldd( internal_sample, + initial_model=internal_model, + prefix=work_prefix, + options=options, + initial_intensity_model=initial_intensity_model, + initial_velocity_model=initial_velocity_model) + + +def regress_ldd_simple(input_samples, + int_design_matrix, + geo_design_matrix, + model=None, + mask=None, + work_prefix=None, options={}, + regress_model=None): + """convinience function to run model generation using CSV input file and a fixed init""" + internal_sample=[] + + for (i,j) in enumerate(input_samples): + + internal_sample.append( MriDataset(scan=j[0], mask=j[1], + par_int=int_design_matrix[i], + par_vel=geo_design_matrix[i]) + ) + + internal_model=None + initial_intensity_model=None + initial_velocity_model=None + + if regress_model is None: + if model is not None: + internal_model=MriDataset(scan=model, mask=mask) + else: + # assume that regress_model is an array + initial_intensity_model=MriDatasetRegress(prefix=work_prefix, name='initial_model_intensity', N=len(regress_model)) + initial_intensity_model.volume=regress_model + initial_intensity_model.mask=mask + + initial_intensity_model.protect=True + initial_velocity_model=None + + if work_prefix is not None and not os.path.exists(work_prefix): + os.makedirs(work_prefix) + + return regress_ldd( internal_sample, + initial_model=internal_model, + prefix=work_prefix, + options=options, + initial_intensity_model=initial_intensity_model, + initial_velocity_model=initial_velocity_model) + + + +def build_estimate(description_json, parameters, output_prefix, int_par_count=None): + desc=None + with open(description_json, 'r') as f: + desc=json.load(f) + intensity_parameters=parameters + velocity_parameters=parameters + + if int_par_count is not None: + intensity_parameters=parameters[:int_par_count] + velocity_parameters=parameters[int_par_count:] + + if len(velocity_parameters)!=len(desc["velocity_model"]["volume"]) or \ + len(intensity_parameters)!=len(desc["intensity_model"]["volume"]): + + print(desc["intensity_model"]["volume"]) + print("intensity_parameters={}".format(repr(intensity_parameters))) + + print(desc["velocity_model"]["volume"]) + print("velocity_parameters={}".format(repr(velocity_parameters))) + + raise mincError("{} inconsisten number of paramters, expected {}". + format(repr(intensity_parameters), + len(desc["velocity_model"]["volume"]))) + + velocity=MriDatasetRegress(from_dict=desc["velocity_model"]) + intensity=MriDatasetRegress(from_dict=desc["intensity_model"]) + + output_scan=MriDataset(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) + output_transform=LDDMriTransform(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) + + build_approximation(intensity, velocity, + intensity_parameters, velocity_parameters, + output_scan, output_transform) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/resample_ldd.py b/ipl/model_ldd/resample_ldd.py new file mode 100644 index 0000000..33b697d --- /dev/null +++ b/ipl/model_ldd/resample_ldd.py @@ -0,0 +1,118 @@ +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +from .filter_ldd import * + + +# scoop parallel execution +from scoop import futures, shared + + +def concat_resample_ldd( + input_mri, + input_transform, + corr_transform, + output_mri, + output_transform, + model, + symmetric=False, + qc=False, + bias=None, + invert_transform=False, + datatype='short' + ): + """apply correction transformation and resample input""" + try: + with mincTools() as m: + + if not ( os.path.exists(output_mri.scan) and os.path.exists(output_transform.vel) ): + scan=input_mri.scan + + if bias is not None: + m.calc([input_mri.scan,bias.scan],'A[0]*A[1]',m.tmp('corr.mnc')) + scan=m.tmp('corr.mnc') + + if corr_transform is not None: + m.calc([input_transform.vel, corr_transform.vel],'A[0]+A[1]', output_transform.vel, datatype='-'+datatype) + else: + # TODO: copy? + m.calc([input_transform.vel ],'A[0]', output_transform.vel, datatype='-'+datatype) + + m.resample_smooth_logspace(scan, output_mri.scan, + velocity=output_transform.vel, + like=model, + invert_transform=invert_transform, + datatype=datatype) + + if input_mri.mask is not None and output_mri.mask is not None: + m.resample_labels_logspace(input_mri.mask, + output_mri.mask, + velocity=output_transform.vel, + like=model, + invert_transform=invert_transform) + if qc: + m.qc(output_mri.scan, output_mri.scan+'.jpg', + mask=output_mri.mask) + else: + if qc: + m.qc(output_mri.scan, output_mri.scan+'.jpg') + + if qc: + m.grid_magnitude(output_transform.vel, + m.tmp('velocity_mag.mnc')) + + m.qc(m.tmp('velocity_mag.mnc'), output_mri.scan+'_vel.jpg') + + if symmetric: + scan_f=input_mri.scan_f + + if bias is not None: + m.calc([input_mri.scan_f,bias.scan_f],'A[0]*A[1]', + m.tmp('corr_f.mnc'),datatype='-'+datatype) + scan_f=m.tmp('corr_f.mnc') + + if corr_transform is not None: + m.calc([input_transform.vel_f, corr_transform.vel],'A[0]+A[1]', output_transform.vel_f, datatype='-'+datatype) + else: + m.calc([input_transform.vel_f],'A[0]', output_transform.vel_f, datatype='-'+datatype) + + m.resample_smooth_logspace(scan_f, output_mri.scan_f, + velocity=output_transform.vel_f, + like=model, + invert_transform=invert_transform, + datatype=datatype) + + if input_mri.mask is not None and output_mri.mask is not None: + m.resample_labels_logspace(input_mri.mask_f, + output_mri.mask_f, + velocity=output_transform.vel_f, + like=model, + invert_transform=invert_transform) + if qc: + m.qc(output_mri.scan_f,output_mri.scan_f+'.jpg', + mask=output_mri.mask_f) + else: + if qc: + m.qc(output_mri.scan_f,output_mri.scan_f+'.jpg') + + if qc: + m.grid_magnitude(output_transform.vel_f, + m.tmp('velocity_mag_f.mnc')) + + m.qc(m.tmp('velocity_mag_f.mnc'), output_mri.scan_f+'_vel.jpg' ) + + except mincError as e: + print "Exception in concat_resample_ldd:{}".format(str(e)) + traceback.print_exc(file=sys.stdout) + raise + except : + print "Exception in concat_resample_ldd:{}".format(sys.exc_info()[0]) + traceback.print_exc(file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/model_ldd/structures_ldd.py b/ipl/model_ldd/structures_ldd.py new file mode 100644 index 0000000..0317453 --- /dev/null +++ b/ipl/model_ldd/structures_ldd.py @@ -0,0 +1,181 @@ +# data structures used in model generation package + +import shutil +import os +import sys +import traceback +import json + +class MriDataset(object): + """ + Hold MRI sample together with regression parameters + """ + def __init__(self, prefix=None, name=None, iter=None, scan=None, mask=None, protect=False, par_int=[],par_vel=[]): + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.scan_f=None + self.mask_f=None + self.par_int=par_int + self.par_vel=par_vel + + if scan is None: + if self.iter is None: + self.scan=self.prefix+os.sep+self.name+'.mnc' + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + self.scan_f=self.prefix+os.sep+self.name+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'_f_mask.mnc' + else: + self.scan=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'.mnc' + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + self.scan_f=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f_mask.mnc' + else: + self.scan=scan + self.mask=mask + + if self.name is None: + self.name=os.path.basename(self.scan) + + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + + def __repr__(self): + return 'MriDataset(prefix="{}",name="{}",iter="{}",scan="{}",mask="{}",protect={},par_int={},par_val={})'.\ + format(self.prefix,self.name,repr(self.iter),self.scan,self.mask,repr(self.protect),repr(self.par_int),repr(self.par_vel)) + + def cleanup(self): + """ + Remove files, use if they are not needed anymore + """ + if not self.protect: + for i in (self.scan, self.mask, self.scan_f, self.mask_f ): + if i is not None and os.path.exists(i): + os.unlink(i) + + def exists(self): + _ex=True + for i in (self.scan, self.mask, self.scan_f, self.mask_f ): + if i is not None : + _ex&=os.path.exists(i) + return _ex + + +class MriDatasetRegress(object): + def __init__(self, prefix=None, name=None, iter=None, N=1, protect=False, from_dict=None, nomask=False): + if from_dict is None: + self.prefix=prefix + self.name=name + self.iter=iter + self.protect=protect + self.N=N + self.volume=[] + + if self.iter is None: + for n in range(0,N): + self.volume.append(self.prefix+os.sep+self.name+'_{}.mnc'.format(n)) + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + else: + for n in range(0,N): + self.volume.append(self.prefix+os.sep+self.name+'.{:03d}_{}'.format(iter,n)+'.mnc') + self.mask=self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_mask.mnc' + if nomask: + self.mask=None + else: # simple hack for now + self.volume=from_dict["volume"] + self.iter=from_dict["iter"] + self.name=from_dict["name"] + self.mask=from_dict["mask"] + self.N=len(self.volume) + + def __repr__(self): + return 'MriDatasetRegress(prefix="{}",name="{}",volume={},mask={},iter="{}",protect={})'.\ + format(self.prefix, self.name, repr(self.volume), self.mask, repr(self.iter), repr(self.protect)) + + def cleanup(self): + if not self.protect: + for i in self.volume: + if i is not None and os.path.exists(i): + os.unlink(i) + for i in [self.mask]: + if i is not None and os.path.exists(i): + os.unlink(i) + + def exists(self): + """ + Check that all files are present + """ + _ex=True + for i in self.volume: + if i is not None : + _ex&=os.path.exists(i) + + for i in [self.mask]: + if i is not None : + _ex&=os.path.exists(i) + + return _ex + +class LDDMriTransform(object): + """ + Store log-diffemorphic transforation + """ + def __init__(self,prefix,name,iter=None): + self.prefix=prefix + self.name=name + self.iter=iter + self.vel_f=None + + if self.iter is None: + self.vel= self.prefix+os.sep+self.name+'_vel.mnc' + self.vel_f= self.prefix+os.sep+self.name+'_f_vel_0.mnc' + else: + self.vel= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_vel.mnc' + self.vel_f= self.prefix+os.sep+self.name+'.{:03d}'.format(iter)+'_f_vel.mnc' + + def __repr__(self): + return 'LDDMriTransform(prefix="{}",name="{}",iter="{}")'.\ + format(self.prefix,self.name,repr(self.iter)) + + def cleanup(self): + for i in (self.vel, self.vel_f): + if i is not None and os.path.exists(i): + os.unlink(i) + + def exists(self): + _ex=True + for i in (self.vel, self.vel_f): + if i is not None : + _ex&=os.path.exists(i) + return _ex + + +class LDDMRIEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, LDDMriTransform): + return {'name':obj.name, + 'iter':obj.iter, + 'vel' :obj.vel, + 'vel_f':obj.vel_f + } + elif isinstance(obj, MriDatasetRegress): + return {'name':obj.name, + 'iter':obj.iter, + 'volume':obj.volume, + 'mask':obj.mask + } + elif isinstance(obj, MriDataset): + return {'name':obj.name, + 'iter':obj.iter, + 'scan':obj.scan, + 'mask':obj.mask, + 'scan_f':obj.scan_f, + 'mask_f':obj.mask_f, + 'par_int':obj.par_int, + 'par_vel':obj.par_vel + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/qc/metric.py b/ipl/qc/metric.py new file mode 100755 index 0000000..942d703 --- /dev/null +++ b/ipl/qc/metric.py @@ -0,0 +1,153 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +from __future__ import print_function + +import argparse +import shutil +import os +import sys +import csv +import copy +import json + +#import minc + +import ipl.elastix_registration +import ipl.minc_tools as minc_tools + +import numpy as np + +def parse_options(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Run registration metric") + + parser.add_argument("--verbose", + action="store_true", + default=False, + help="Be verbose", + dest="verbose") + + parser.add_argument("source", + help="Source file") + + parser.add_argument("target", + help="Target file") + + parser.add_argument("output", + help="Save output in a file") + + parser.add_argument("--exact", + action="store_true", + default=False, + help="Use exact metric", + dest="exact") + + parser.add_argument("--xfm", + help="Apply transform to source before running metric", + default=None) + + parser.add_argument("--random", + help="Apply random transform to source before running metric", + default=False, + action="store_true" + ) + + options = parser.parse_args() + return options + + +def extract_part(inp,outp,info, x=None, y=None, z=None,parts=None): + # + with minc_tools.mincTools() as minc: + ranges=[ + 'zspace={},{}'.format( info['zspace'].length/parts*z , info['zspace'].length/parts ), + 'yspace={},{}'.format( info['yspace'].length/parts*y , info['yspace'].length/parts ), + 'xspace={},{}'.format( info['xspace'].length/parts*x , info['xspace'].length/parts ) + ] + minc.reshape(inp, outp, dimrange=ranges ) + +if __name__ == "__main__": + options = parse_options() + metric = 'NormalizedMutualInformation' + sampler = 'Grid' + if options.source is None or options.target is None or options.output is None: + print("Error in arguments, run with --help") + print(repr(options)) + else: + #src = minc.Image(options.source, dtype=np.float32).data + #trg = minc.Image(options.target, dtype=np.float32).data + measures=[] + with minc_tools.mincTools() as minc: + # + _source=options.source + if options.xfm is not None: + _source=minc.tmp("source.mnc") + minc.resample_smooth(options.source,_source,transform=options.xfm,like=options.target) + + + measures={ + 'source':options.source, + 'target':options.target, + + } + + + if options.random: + xfm=minc.tmp('random.xfm') + rx=np.random.random_sample()*20.0-10.0 + ry=np.random.random_sample()*20.0-10.0 + rz=np.random.random_sample()*20.0-10.0 + tx=np.random.random_sample()*20.0-10.0 + ty=np.random.random_sample()*20.0-10.0 + tz=np.random.random_sample()*20.0-10.0 + minc.param2xfm(xfm,translation=[tx,ty,tz],rotations=[rx,ry,rz]) + measures['rot']=[rx,ry,rz] + measures['tran']=[tx,ty,tz] + _source=minc.tmp("source.mnc") + minc.resample_smooth(options.source,_source,transform=xfm,like=options.target) + + src_info=minc.mincinfo(_source) + trg_info=minc.mincinfo(options.target) + # + parts=3 + os.environ['MINC_COMPRESS']='0' + + parameters={'metric':metric, + 'resolutions':1, + 'pyramid': '1 1 1', + 'measure': True, + 'sampler': sampler, + 'grid_spacing': '3 3 3', + 'exact_metric': options.exact, + 'iterations': 1, + 'new_samples': False, + 'optimizer': "AdaptiveStochasticGradientDescent", + } + # + measures['sim']={'whole':ipl.elastix_registration.register_elastix(_source, options.target, parameters=parameters, nl=False)} + + for z in range(parts): + for y in range(parts): + for x in range(parts): + # + # extract part + src=minc.tmp("src_{}_{}_{}.mnc".format(x,y,z)) + trg=minc.tmp("trg_{}_{}_{}.mnc".format(x,y,z)) + #print(1) + extract_part(_source,src,src_info,x=x,y=y,z=z,parts=parts) + #print(2) + extract_part(options.target,trg,trg_info,x=x,y=y,z=z,parts=parts) + # run elastix measurement + k="{}_{}_{}".format(x,y,z) + measures['sim'][k]=ipl.elastix_registration.register_elastix(src, trg, parameters=parameters, nl=False) + # TODO: parallelize? + with open(options.output,'w') as f: + json.dump(measures,f,indent=2) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/registration.py b/ipl/registration.py new file mode 100644 index 0000000..e265d5f --- /dev/null +++ b/ipl/registration.py @@ -0,0 +1,853 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date 29/06/2015 +# +# registration tools + + +from __future__ import print_function + +import os +import sys +import shutil +import tempfile +import subprocess +import re +import fcntl +import traceback +import collections +import math +import argparse +# local stuff +import minc_tools + + +# hack to make it work on Python 3 +try: + unicode = unicode +except NameError: + # 'unicode' is undefined, must be Python 3 + str = str + unicode = str + bytes = bytes + basestring = (str,bytes) +else: + # 'unicode' exists, must be Python 2 + str = str + unicode = unicode + bytes = str + basestring = basestring + + + +linear_registration_config={ + 'bestlinreg': [ + { "blur" : "blur", + "trans" : ['-est_translations'], + "blur_fwhm" : 16, + "steps" : [8, 8, 8], + "tolerance" : 0.01, + "simplex" : 32 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4, 4, 4], + "tolerance" : 0.004, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4, 4, 4], + "tolerance" : 0.004, + "simplex" : 8 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4, 4, 4], + "tolerance" : 0.004, + "simplex" : 4 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4, 4, 4], + "tolerance" : 0.004, + "simplex" : 2 } + ], + + 'bestlinreg_s': [ + { "blur" : "blur", + "trans" : ['-est_translations'], + "blur_fwhm" : 16, + "steps" : [8,8,8], + "tolerance" : 0.01, + "simplex" : 32 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.0001, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.0001, + "simplex" : 8 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 2, + "steps" : [2,2,2], + "tolerance" : 0.0005, + "simplex" : 4 } + ], + + 'bestlinreg_s2': [ + { "blur" : "blur", + "trans" : ['-est_translations'], + "blur_fwhm" : 16, + "steps" : [8,8,8], + "tolerance" : 0.01, + "simplex" : 32 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 8 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 4 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 2 } + ], + + 'experiment_1': [ + { "blur" : "blur", + "trans" : ['-est_translations'], + "blur_fwhm" : 8, + "steps" : [8,8,8], + "tolerance" : 0.01, + "simplex" : 32 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 16 }, + + { "blur" : "blur", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 8 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 8, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 4 }, + + { "blur" : "dxyz", + "trans" : None, + "blur_fwhm" : 4, + "steps" : [4,4,4], + "tolerance" : 0.004, + "simplex" : 2 } + ], + + 'bestlinreg_new': [ # re-imelementation from Claude's bestlinreg ~ 2016-12-01 + { 'blur' : "blur", # -lsq7 scaling only + 'parameters' : "-lsq6", + 'trans' : ['-est_translations'], + 'blur_fwhm' : 8, + 'steps' : [4, 4, 4], + 'tolerance' : 0.0001, + 'simplex' : 16 }, + + { 'blur' : "blur", # -lsqXX full options + 'parameters' : "-lsq7", + 'trans' : None, + 'blur_fwhm' : 8, + 'steps' : [4, 4, 4], + 'tolerance' : 0.0001, + 'simplex' : 16 }, + + { 'blur' : "blur", + 'trans' : None, + 'blur_fwhm' : 4, + 'steps' : [4, 4, 4], + 'tolerance' : 0.0001, + 'simplex' : 8 }, + + { 'blur' : "blur", + 'trans' : None, + 'blur_fwhm' : 2, + 'steps' : [2, 2, 2], + 'tolerance' : 0.0005, + 'simplex' : 4 } + ] + + + } + + +def linear_register( + source, + target, + output_xfm, + parameters=None, + source_mask=None, + target_mask=None, + init_xfm=None, + objective=None, + conf=None, + debug=False, + close=False, + norot=False, + noshear=False, + noshift=False, + noscale=False, + work_dir=None, + start=None, + downsample=None, + verbose=0 + ): + """Perform linear registration, replacement for bestlinreg.pl script + + Args: + source - name of source minc file + target - name of target minc file + output_xfm - name of output transformation file + parameters - registration parameters (optional), can be + '-lsq6', '-lsq9', '-lsq12' + source_mask - name of source mask file (optional) + target_mask - name of target mask file (optional) + init_xfm - name of initial transformation file (optional) + objective - name of objective function (optional), could be + '-xcorr' (default), '-nmi','-mi' + conf - configuration for iterative algorithm (optional) + array of dict, or a string describing a flawor + bestlinreg (default) + bestlinreg_s + bestlinreg_s2 + bestlinreg_new - Claude's latest and greatest + debug - debug flag (optional) , default False + close - closeness flag (optional) , default False + norot - disable rotation flag (optional) , default False + noshear - disable shear flag (optional) , default False + noshift - disable shift flag (optional) , default False + noscale - disable scale flag (optional) , default False + work_dir - working directory (optional) , default create one in temp + start - initial blurring level, default 16mm from configuration + downsample - downsample initial files to this step size, default None + verbose - verbosity level + Returns: + resulting XFM file + + Raises: + mincError when tool fails + """ + print("linear_register source_mask:{} target_mask:{}".format(source_mask,target_mask)) + + with minc_tools.mincTools(verbose=verbose) as minc: + if not minc.checkfiles(inputs=[source,target], outputs=[output_xfm]): + return + + # python version + if conf is None: + conf = linear_registration_config['bestlinreg'] + elif not isinstance(conf, list): # assume that it is a string + if conf in linear_registration_config: + conf = linear_registration_config[conf] + + if parameters is None: + parameters='-lsq9' + + if objective is None: + objective='-xcorr' + + if not isinstance(conf, list): # assume that it is a string + # assume it's external program's name + # else run internally + with minc_tools.mincTools() as m: + cmd=[conf,source,target,output_xfm] + if source_mask is not None: + cmd.extend(['-source_mask',source_mask]) + if target_mask is not None: + cmd.extend(['-target_mask',target_mask]) + if parameters is not None: + cmd.append(parameters) + if objective is not None: + cmd.append(objective) + if init_xfm is not None: + cmd.extend(['-init_xfm',init_xfm]) + m.command(cmd, inputs=[source,target], outputs=[output_xfm],verbose=2) + return output_xfm + else: + + prev_xfm = None + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + source_mask_lr=source_mask + target_mask_lr=target_mask + # figure out what to do here: + with minc_tools.cache_files(work_dir=work_dir,context='reg') as tmp: + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr, unistep=downsample) + minc.resample_smooth(target,target_lr, unistep=downsample) + + if source_mask is not None: + source_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(source_mask,source_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + + # a fitting we shall go... + for (i,c) in enumerate(conf): + _parameters=parameters + + if 'parameters' in c and parameters!='-lsq6': # emulate Claude's approach + _parameters=c.get('parameters')#'-lsq7' + + # set up intermediate files + if start is not None and start>c['blur_fwhm']: + continue + elif close and c['blur_fwhm']>8: + continue + + tmp_xfm = tmp.tmp(s_base+'_'+t_base+'_'+str(i)+'.xfm') + + tmp_source = source_lr + tmp_target = target_lr + + if c['blur_fwhm']>0: + tmp_source = tmp.cache(s_base+'_'+c['blur']+'_'+str(c['blur_fwhm'])+'.mnc') + if not os.path.exists(tmp_source): + minc.blur(source_lr,tmp_source,gmag=(c['blur']=='dxyz'), fwhm=c['blur_fwhm']) + + tmp_target = tmp.cache(t_base+'_'+c['blur']+'_'+str(c['blur_fwhm'])+'.mnc') + if not os.path.exists(tmp_target): + minc.blur(target_lr,tmp_target,gmag=(c['blur']=='dxyz'), fwhm=c['blur_fwhm']) + + # set up registration + args =[ 'minctracc', + tmp_source, tmp_target,'-clobber', + _parameters , + objective , + '-simplex', c['simplex'], + '-tol', c['tolerance'] ] + + args.append('-step') + args.extend(c['steps']) + + # Current transformation at this step + if prev_xfm is not None: + args.extend(['-transformation', prev_xfm]) + elif init_xfm is not None: + args.extend(['-transformation',init_xfm,'-est_center']) + elif close: + args.append('-identity') + else: + # Initial transformation will be computed from the from Principal axis + # transformation (PAT). + if c['trans']=='-est_translations': + args.extend(c['trans']) + else : + # will use manual transformation based on shif of CoM, should be identical to '-est_translations' , but it's not + com_src=minc.stats(source,['-com','-world_only'],single_value=False) + com_trg=minc.stats(target,['-com','-world_only'],single_value=False) + diff=[com_trg[k]-com_src[k] for k in range(3)] + xfm=tmp.cache(s_base+'_init.xfm') + minc.param2xfm(xfm,translation=diff) + args.extend(['-transformation',xfm]) + + # masks (even if the blurred image is masked, it's still preferable + # to use the mask in minctracc) + if source_mask is not None: + args.extend(['-source_mask',source_mask_lr]) + if target_mask is not None: + args.extend(['-model_mask',target_mask_lr]) + + if noshear: + args.extend( ['-w_shear',0,0,0] ) + if noscale: + args.extend( ['-w_scales',0,0,0] ) + if noshift: + args.extend( ['-w_translations',0,0,0] ) + if norot: + args.extend( ['-w_rotations',0,0,0] ) + + # add files and run registration + args.append(tmp_xfm) + minc.command([str(ii) for ii in args],inputs=[tmp_source,tmp_target],outputs=[tmp_xfm]) + + prev_xfm = tmp_xfm + + shutil.copyfile(prev_xfm,output_xfm) + return output_xfm + +def linear_register_to_self( + source, + target, + output_xfm, + parameters=None, + mask=None, + target_talxfm=None, + init_xfm=None, + model=None, + modeldir=None, + close=False, + nocrop=False, + noautothreshold=False + ): + """perform linear registration, wrapper around mritoself + + """ + + # TODO convert mritoself to python (?) + with minc_tools.mincTools() as minc: + cmd = ['mritoself', source, target, output_xfm] + if parameters is not None: + cmd.append(parameters) + if mask is not None: + cmd.extend(['-mask', mask]) + if target_talxfm is not None: + cmd.extend(['-target_talxfm', target_talxfm]) + if init_xfm is not None: + cmd.extend(['-transform', init_xfm]) + if model is not None: + cmd.extend(['-model', model]) + if modeldir is not None: + cmd.extend(['-modeldir', modeldir]) + if close: + cmd.append('-close') + if nocrop: + cmd.append('-nocrop') + if noautothreshold: + cmd.append('-noautothreshold') + cmd.append('-nothreshold') + minc.command(cmd, inputs=[source, target], outputs=[output_xfm]) + + + +def non_linear_register_full( + source, target, output_xfm, + source_mask=None, + target_mask=None, + init_xfm= None, + level=4, + start=32, + parameters=None, + work_dir=None, + downsample=None + ): + """perform non-linear registration, multiple levels + Args: + source - name of source minc file + target - name of target minc file + output_xfm - name of output transformation file + source_mask - name of source mask file (optional) + target_mask - name of target mask file (optional) + init_xfm - name of initial transformation file (optional) + parameters - configuration for iterative algorithm dict (optional) + work_dir - working directory (optional) , default create one in temp + start - initial step size, default 32mm + level - final step size, default 4mm + downsample - downsample initial files to this step size, default None + + Returns: + resulting XFM file + + Raises: + mincError when tool fails + """ + with minc_tools.mincTools() as minc: + + if not minc.checkfiles(inputs=[source,target], + outputs=[output_xfm]): + return + + if parameters is None: + #print("Using default parameters") + parameters = { + 'cost': 'corrcoeff', + 'weight': 1, + 'stiffness': 1, + 'similarity': 0.3, + 'sub_lattice': 6, + + 'conf': [ + {'step' : 32.0, + 'blur_fwhm' : 16.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 16.0, + 'blur_fwhm' : 8.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 12.0, + 'blur_fwhm' : 6.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 8.0, + 'blur_fwhm' : 4.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 6.0, + 'blur_fwhm' : 3.0, + 'iterations' : 20, + 'blur' : 'blur', + }, + {'step' : 4.0, + 'blur_fwhm' : 2.0, + 'iterations' : 10, + 'blur' : 'blur', + }, + {'step' : 2.0, + 'blur_fwhm' : 1.0, + 'iterations' : 10, + 'blur' : 'blur', + }, + {'step' : 1.0, + 'blur_fwhm' : 1.0, + 'iterations' : 10, + 'blur' : 'blur', + }, + {'step' : 1.0, + 'blur_fwhm' : 0.5, + 'iterations' : 10, + 'blur' : 'blur', + }, + {'step' : 0.5, + 'blur_fwhm' : 0.25, + 'iterations' : 10, + 'blur' : 'blur', + }, + ] + } + + prev_xfm = None + prev_grid = None + + s_base=os.path.basename(source).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + t_base=os.path.basename(target).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + source_lr=source + target_lr=target + + source_mask_lr=source_mask + target_mask_lr=target_mask + + # figure out what to do here: + with minc_tools.cache_files(work_dir=work_dir,context='reg') as tmp: + # a fitting we shall go... + if downsample is not None: + source_lr=tmp.cache(s_base+'_'+str(downsample)+'.mnc') + target_lr=tmp.cache(t_base+'_'+str(downsample)+'.mnc') + + minc.resample_smooth(source,source_lr,unistep=downsample) + minc.resample_smooth(target,target_lr,unistep=downsample) + + if source_mask is not None: + source_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(source_mask,source_mask_lr,unistep=downsample,datatype='byte') + if target_mask is not None: + target_mask_lr=tmp.cache(s_base+'_mask_'+str(downsample)+'.mnc') + minc.resample_labels(target_mask,target_mask_lr,unistep=downsample,datatype='byte') + + for (i,c) in enumerate(parameters['conf']): + + if c['step']>start: + continue + elif c['step']0: + tmp_source = tmp.cache(s_base+'_'+c['blur']+'_'+str(c['blur_fwhm'])+'.mnc') + + if not os.path.exists(tmp_source): + minc.blur(source_lr,tmp_source,gmag=(c['blur']=='dxyz'),fwhm=c['blur_fwhm']) + tmp.unlock(tmp_source) + + tmp_target = tmp.cache(t_base+'_'+c['blur']+'_'+str(c['blur_fwhm'])+'.mnc') + if not os.path.exists(tmp_target): + minc.blur(target_lr,tmp_target,gmag=(c['blur']=='dxyz'),fwhm=c['blur_fwhm']) + tmp.unlock(tmp_target) + + # set up registration + args =['minctracc', tmp_source,tmp_target,'-clobber', + '-nonlinear', parameters['cost'], + '-weight', parameters['weight'], + '-stiffness', parameters['stiffness'], + '-similarity', parameters['similarity'], + '-sub_lattice',parameters['sub_lattice'], + ] + + args.extend(['-iterations', c['iterations'] ] ) + args.extend(['-lattice_diam', c['step']*3.0, c['step']*3.0, c['step']*3.0 ] ) + args.extend(['-step', c['step'], c['step'], c['step'] ] ) + + if c['step']<4: + args.append('-no_super') + + # Current transformation at this step + if prev_xfm is not None: + args.extend(['-transformation', prev_xfm]) + elif init_xfm is not None: + args.extend(['-transformation', init_xfm]) + else: + args.append('-identity') + + # masks (even if the blurred image is masked, it's still preferable + # to use the mask in minctracc) + if source_mask is not None: + args.extend(['-source_mask',source_mask_lr]) + if target_mask is not None: + args.extend(['-model_mask',target_mask_lr]) + + # add files and run registration + args.append(tmp_xfm) + + minc.command([str(ii) for ii in args], + inputs=[tmp_source,tmp_target], + outputs=[tmp_xfm] ) + + prev_xfm = tmp_xfm + prev_grid = tmp_grid + + # done + if prev_xfm is None: + raise minc_tools.mincError("No iterations were performed!") + + # STOP-gap measure to save space for now + # TODO: fix minctracc? + # TODO: fix mincreshape too! + minc.calc([prev_grid],'A[0]',tmp.tmp('final_grid_0.mnc'),datatype='-float') + shutil.move(tmp.tmp('final_grid_0.mnc'),prev_grid) + + minc.param2xfm(tmp.tmp('identity.xfm')) + minc.xfmconcat([tmp.tmp('identity.xfm'),prev_xfm],output_xfm) + return output_xfm + +def non_linear_register_increment( + source, + target, + output_xfm, + source_mask=None, + target_mask=None, + init_xfm=None, + level=4, + parameters=None, + work_dir=None, + downsample=None + ): + """perform non-linear registration, increment right now there are no + difference with non_linear_register_full , + with start and level set to same value + Args: + source - name of source minc file + target - name of target minc file + output_xfm - name of output transformation file + source_mask - name of source mask file (optional) + target_mask - name of target mask file (optional) + init_xfm - name of initial transformation file (optional) + parameters - configuration for iterative algorithm dict (optional) + work_dir - working directory (optional) , default create one in temp + level - final step size, default 4mm + downsample - downsample initial files to this step size, default None + + Returns: + resulting XFM file + + Raises: + mincError when tool fails + """ + + return non_linear_register_full(source,target,output_xfm, + source_mask=source_mask, + target_mask=target_mask, + init_xfm=init_xfm, + level=level, + start=level, + parameters=parameters, + work_dir=work_dir, + downsample=downsample) + + + +def parse_options(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Run minctracc-based registration") + + parser.add_argument("--verbose", + action="store_true", + default=False, + help="Be verbose", + dest="verbose") + + parser.add_argument("source", + help="Source file") + + parser.add_argument("target", + help="Target file") + + parser.add_argument("output_xfm", + help="Output transformation file, xfm format") + + parser.add_argument("--source_mask", + default= None, + help="Source mask") + + parser.add_argument("--target_mask", + default= None, + help="Target mask") + + parser.add_argument("--init_xfm", + default = None, + help="Initial transformation, minc format") + + parser.add_argument("--work_dir", + default = None, + help="Work directory") + + parser.add_argument("--downsample", + default = None, + help="Downsample to given voxel size ", + type=float) + + parser.add_argument("--start", + default = None, + help="Start level of registration 32 for nonlinear, 16 for linear", + type=float) + + parser.add_argument("--level", + default = 4.0, + help="Final level of registration (nl)", + type=float) + + parser.add_argument("--nl", + action="store_true", + dest='nl', + help="Use nonlinear mode", + default=False) + + parser.add_argument("--lin", + help="Linear mode, default lsq6", + default='lsq6') + + parser.add_argument("--objective", + default="xcorr", + help="Registration objective function (linear)") + + parser.add_argument("--conf", + default="bestlinreg_s2", + help="Linear registrtion configuration") + + + options = parser.parse_args() + return options + + +if __name__ == "__main__": + options = parse_options() + + if options.source is None or options.target is None: + print("Error in arguments, run with --help") + print(repr(options)) + else: + + if options.nl : + if options.start is None: + options.start=32.0 + + non_linear_register_full( + options.source, options.target, options.output_xfm, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init_xfm, + start = options.start, + level = options.level, + work_dir = options.work_dir, + downsample = options.downsample) + else: + if options.start is None: + options.start=16.0 + _verbose=0 + if options.verbose: _verbose=2 + + linear_register( + options.source, options.target, options.output_xfm, + source_mask= options.source_mask, + target_mask= options.target_mask, + init_xfm = options.init_xfm, + #start = options.start, + work_dir = options.work_dir, + downsample = options.downsample, + objective = '-'+options.objective, + conf = options.conf, + parameters = '-'+options.lin, + verbose = _verbose + ) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80 diff --git a/ipl/segment/__init__.py b/ipl/segment/__init__.py new file mode 100644 index 0000000..00817a2 --- /dev/null +++ b/ipl/segment/__init__.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# +# image segmentation functions + +# internal funcions +from .error_correction import errorCorrectionTrain +from .cross_validation import errorCorrectionApply +from .structures import MriDataset +from .structures import MriTransform +from .labels import split_labels_seg +from .labels import merge_labels_seg +from .resample import resample_file +from .resample import resample_split_segmentations +from .resample import warp_rename_seg +from .resample import warp_sample +from .resample import concat_resample +from .registration import linear_registration +from .registration import non_linear_registration +from .model import create_local_model +from .model import create_local_model_flip +from .filter import apply_filter +from .filter import make_border_mask +from .filter import generate_flip_sample +from .library import save_library_info +from .library import load_library_info +from .train import generate_library +from .fuse import fusion_segment +from .train_ec import train_ec_loo +from .cross_validation import loo_cv_fusion_segment +from .cross_validation import full_cv_fusion_segment +from .cross_validation import cv_fusion_segment +from .cross_validation import run_segmentation_experiment +from .analysis import calc_similarity_stats + +__all__= ['generate_library', + 'load_library_info', + 'cv_fusion_segment', + 'fusion_segment', + 'train_ec_loo' ] + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/analysis.py b/ipl/segment/analysis.py new file mode 100644 index 0000000..478ce55 --- /dev/null +++ b/ipl/segment/analysis.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def calc_similarity_stats( input_ground_truth, + input_segmentation, + output_stats=None, + relabel=None, + use_labels=None): + ''' + Calculate similarity stats + ''' + stats={} + + stats[ 'sample' ] = input_segmentation + stats[ 'ground_truth' ] = input_ground_truth + + cmd=['volume_gtc_similarity', input_ground_truth, input_segmentation,'--csv'] + + if use_labels: + cmd.extend(['--include', ','.join([str(i) for i in use_labels])]) + + with mincTools() as m: + sim = m.execute_w_output( cmd ).rstrip("\n").split(',') + + stats['gkappa'] = float(sim[0]) + stats['gtc'] = float(sim[1]) + stats['akappa'] = float(sim[2]) + + + sim = m.execute_w_output( + [ 'volume_similarity', input_ground_truth, input_segmentation,'--csv'] + ).split("\n") + + ka={} + se={} + sp={} + js={} + + for i in sim: + q=i.split(',') + if len(q)==5: + l=int(q[0]) + + if relabel is not None: + l=relabel[str(l)] + + ka[l] = float( q[1] ) + se[l] = float( q[2] ) + sp[l] = float( q[3] ) + js[l] = float( q[4] ) + + stats['ka']=ka + stats['se']=se + stats['sp']=sp + stats['js']=js + + if output_stats is not None: + with open(output_stats,'w') as f: + f.write("{},{},{},{}\n".format(stats['sample'],stats['gkappa'],stats['gtc'],stats['akappa'])) + + return stats + +def create_error_map(input_ground_truth, + input_segmentation, + output_maps, + lin_xfm=None, + nl_xfm=None, + template=None, + label_list=[] ): + try: + with mincTools( verbose=2 ) as m: + # go over labels and calculate errors per label + # + for (i,l) in enumerate(label_list): + # extract label error + out=m.tmp(str(l)+'.mnc') + xfm=None + + m.calc([input_segmentation, input_ground_truth], + "abs(A[0]-{})<0.5&&abs(A[1]-{})>0.5 || abs(A[0]-{})>0.5&&abs(A[1]-{})<0.5 ? 1:0".format(l,l,l,l), + out, datatype='-byte') + + if lin_xfm is not None and nl_xfm is not None: + xfm=m.tmp(str(l)+'.xfm') + m.xfmconcat([lin_xfm,nl_xfm],xfm) + elif lin_xfm is not None: + xfm=lin_xfm + else: + xfm=nl_xfm + + m.resample_smooth(out,output_maps[i], + transform=xfm, + like=template, + order=1, + datatype='byte') + + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def average_error_maps(maps, out_avg): + try: + with mincTools( verbose=2 ) as m: + print("average_error_maps {} {}".format(repr(maps),repr(out_avg))) + m.average(maps, out_avg, datatype='-short') + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def max_error_maps(maps, out_max): + try: + with mincTools( verbose=2 ) as m: + print("average_error_maps {} {}".format(repr(maps),repr(out_max))) + m.math(maps, 'max', out_max, datatype='-short') + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/cerebellum_qc_v5.lut b/ipl/segment/cerebellum_qc_v5.lut new file mode 100644 index 0000000..4d946f6 --- /dev/null +++ b/ipl/segment/cerebellum_qc_v5.lut @@ -0,0 +1,31 @@ +0 0.0 0.0 0.0 +1 1.0 0.678431372549 0.243137254902 +2 0.0 0.0 0.0 +3 0.847058823529 0.749019607843 0.847058823529 +4 0.858823529412 0.439215686275 0.576470588235 +5 1.0 0.0 1.0 +6 0.0 0.172549019608 0.592156862745 +7 0.0 0.980392156863 0.603921568627 +8 0.878431372549 0.752941176471 0.0 +9 0.803921568627 0.360784313725 0.360784313725 +10 0.372549019608 0.619607843137 0.627450980392 +11 1.0 0.270588235294 0.0 +12 0.690196078431 0.76862745098 0.870588235294 +13 1.0 0.0 0.0 +14 0.0 0.0 1.0 +15 0.0 1.0 1.0 +16 1.0 0.937254901961 0.835294117647 +17 0.803921568627 0.521568627451 0.247058823529 +18 0.0 0.545098039216 0.545098039216 +19 1.0 0.894117647059 0.882352941176 +20 0.866666666667 0.627450980392 0.866666666667 +21 1.0 0.250980392157 0.250980392157 +22 0.294117647059 0.0 0.509803921569 +23 0.235294117647 0.701960784314 0.443137254902 +24 0.933333333333 0.909803921569 0.666666666667 +25 0.898039215686 0.992156862745 0.0 +26 0.172549019608 1.0 0.211764705882 +27 1.0 0.545098039216 1.0 +28 1.0 0.0 0.427450980392 +29 0.823529411765 0.807843137255 0.0 +30 0.988235294118 0.976470588235 0.96862745098 diff --git a/ipl/segment/cross_validation.py b/ipl/segment/cross_validation.py new file mode 100644 index 0000000..d63e993 --- /dev/null +++ b/ipl/segment/cross_validation.py @@ -0,0 +1,504 @@ +import shutil +import os +import sys +import csv +import copy +import json +import random + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .fuse import * +from .structures import * +from .resample import * +from .train_ec import * +from .filter import * +from .analysis import * + +def run_segmentation_experiment( input_scan, + input_seg, + segmentation_library, + output_experiment, + segmentation_parameters={}, + debug=False, + mask=None, + work_dir=None, + ec_parameters=None, + ec_variant='ec', + fuse_variant='fuse', + regularize_variant='gc', + add=[], + cleanup=False, + presegment=None, + train_list=None): + """run a segmentation experiment: perform segmentation and compare with ground truth + + Arguments: + input_scan -- input scan object MriDataset + input_seg -- input segmentation file name (ground truth) + segmentation_library -- segmntation library object + output_experiment -- prefix for output + + Keyword arguments: + segmentation_parameters -- paramteres for segmentation algorithm, + debug -- debug flag, (default False) + mask -- mask file name to restrict segmentation , (default None) + work_dir -- work directory, (default None - use output_experiment) + ec_parameters -- error correction paramters, (default None) + ec_variant -- name of error correction parameters setting , (default 'ec') + fuse_variant -- name of fusion parameters, (default 'fuse' ) + regularize_variant -- name of regularization parameters, (default 'gc') + add -- additional modalities [T2w,PDw etc] + cleanup -- flag to clean most of the temporary files + presegment -- use pre-segmented result (when comparing with external tool) + """ + try: + relabel=segmentation_library.get("label_map",None) + + if relabel is not None and isinstance(relabel, list) : + _r={i[0]:i[1] for i in relabel} + relabel=_r + + if ec_parameters is not None: + _ec_parameters=copy.deepcopy(ec_parameters) + # let's train error correction! + + if work_dir is not None: + fuse_output=work_dir+os.sep+fuse_variant+'_'+regularize_variant + else: + fuse_output=output_experiment+os.sep+fuse_variant+'_'+regularize_variant + + _ec_parameters['work_dir']=fuse_output + _ec_parameters['output']=ec_output=fuse_output+os.sep+ec_variant+'.pickle' + _ec_parameters['variant']=ec_variant + + train_ec_loo( segmentation_library, + segmentation_parameters=copy.deepcopy(segmentation_parameters), + ec_parameters=_ec_parameters, + debug=debug, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + cleanup=cleanup, + ext=(presegment is not None), + train_list=train_list ) + + segmentation_parameters['ec_options']=copy.deepcopy(ec_parameters) + segmentation_parameters['ec_options']['training']=ec_output + + if debug: + if not os.path.exists(os.path.dirname(output_experiment)): + os.makedirs(os.path.dirname(output_experiment)) + with open(output_experiment+'_par.json','w') as f: + json.dump(segmentation_parameters,f,indent=1) + + (output_file, output_info) = fusion_segment( + input_scan, + segmentation_library, + output_experiment, + input_mask=mask, + parameters=segmentation_parameters, + debug=debug, + work_dir=work_dir, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=add, + cleanup=cleanup, + presegment=presegment) + + stats = calc_similarity_stats( input_seg, output_file, + output_stats = output_experiment+'_stats.csv', + use_labels = output_info['used_labels'], + relabel = relabel ) + + remap = segmentation_library.get('map',{}) + labels_used=[] + error_maps=[] + + if any(remap): + for (i,j) in remap.items(): + labels_used.append( int(j) ) + else: + # assume binary mode + labels_used=[1] + + for i in labels_used: + error_maps.append( work_dir+os.sep+fuse_variant+'_'+regularize_variant+'_error_{:03d}.mnc'.format(i) ) + + lin_xfm=None + nl_xfm=None + if output_info['bbox_initial_xfm'] is not None: + lin_xfm=output_info['bbox_initial_xfm'].xfm + + if output_info['nonlinear_xfm'] is not None: + nl_xfm=output_info['nonlinear_xfm'].xfm + + create_error_map( input_seg, output_file, error_maps, + lin_xfm=lin_xfm, + nl_xfm=nl_xfm, + template=segmentation_library.get('local_model',None), + label_list=labels_used ) + + output_info['stats'] = stats + output_info['output'] = output_file + output_info['ground_truth']= input_seg + output_info['error_maps'] = error_maps + + if presegment is not None: + output_info['presegment']=presegment + + with open(output_experiment+'_out.json','w') as f: + json.dump(output_info,f,indent=1, cls=MRIEncoder) + + with open(output_experiment+'_stats.json','w') as f: + json.dump(stats,f,indent=1, cls=MRIEncoder) + + return (stats, output_info) + + except mincError as e: + print("Exception in run_segmentation_experiment:{}".format( str(e)) ) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in run_segmentation_experiment:{}".format( sys.exc_info()[0]) ) + traceback.print_exc( file=sys.stdout ) + raise + + +def loo_cv_fusion_segment(validation_library, + segmentation_library, + output, + segmentation_parameters, + ec_parameters=None, + debug=False, + ec_variant='ec', + fuse_variant='fuse', + cv_variant='cv', + regularize_variant='gc', + cleanup=False, + ext=False, + cv_iter=None): + '''Run leave-one-out cross-validation experiment''' + # for each N subjects run segmentation and compare + # Right now run LOOCV + if not os.path.exists(output): + try: + os.makedirs(output) + except: + pass # assume directory was created by competing process + + results=[] + results_json=[] + + modalities=segmentation_library.get('modalities',1)-1 + print("cv_iter={}".format(repr(cv_iter))) + + for (i,j) in enumerate(validation_library): + + n = os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + output_experiment = output+os.sep+n+'_'+cv_variant + + validation_sample = j[0] + validation_segment = j[1] + + presegment=None + add=[] + + if ext: + presegment=j[2] + add=j[3:3+modalities] + else: + add=j[2:2+modalities] + + # remove training sample (?) + _validation_library=validation_library[0:i] + _validation_library.extend(validation_library[i+1:len(validation_library)]) + + + experiment_segmentation_library=copy.deepcopy(segmentation_library) + + # remove sample + experiment_segmentation_library['library']=[ _i for _i in segmentation_library['library'] if _i[0].find(n)<0 ] + + if (cv_iter is None) or (i == cv_iter): + results.append( futures.submit( + run_segmentation_experiment, + validation_sample, validation_segment, + experiment_segmentation_library, + output_experiment, + segmentation_parameters=segmentation_parameters, + debug=debug, + work_dir=output+os.sep+'work_'+n+'_'+fuse_variant, + ec_parameters=ec_parameters, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=add, + cleanup=cleanup, + presegment=presegment, + train_list=_validation_library + )) + else: + results_json.append( (output_experiment+'_stats.json', + output_experiment+'_out.json') ) + + print("Waiting for {} jobs".format(len(results))) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + stat_results=[] + output_results=[] + + if cv_iter is None: + stat_results = [ _i.result()[0] for _i in results ] + output_results= [ _i.result()[1] for _i in results ] + elif cv_iter==-1: + # TODO: load from json files + for _i in results_json: + if os.path.exists(_i[0]) and os.path.exists(_i[1]):# VF: a hack + with open(_i[0],'r') as _f: + stat_results.append(json.load(_f)) + with open(_i[1],'r') as _f: + output_results.append(json.load(_f)) + else: + if not os.path.exists(_i[0]): + print("Warning: missing file:{}".format(_i[0])) + if not os.path.exists(_i[1]): + print("Warning: missing file:{}".format(_i[1])) + + return (stat_results, output_results) + + +def full_cv_fusion_segment(validation_library, + segmentation_library, + output, + segmentation_parameters, + cv_iterations, + cv_exclude, + ec_parameters=None, + debug=False, + ec_variant='ec', + fuse_variant='fuse', + cv_variant='cv', + regularize_variant='gc', + cleanup=False, + ext=False, + cv_iter=None): + if cv_iter is not None: + raise "Not Implemented!" + + validation_library_idx=range(len(validation_library)) + # randomly exlcude samples, repeat + results=[] + if not os.path.exists(output): + try: + os.makedirs(output) + except: + pass # assume directory was created by competing process + + modalities=segmentation_library.get('modalities',1)-1 + + for i in range( cv_iterations ): + #TODO: save this list in a file + rem_list=[] + ran_file=output+os.sep+ ('random_{}_{}.json'.format(cv_variant,i)) + + if not os.path.exists( ran_file ): + rem_list=random.sample( validation_library_idx, cv_exclude ) + + with open( ran_file , 'w') as f: + json.dump(rem_list,f) + else: + with open( ran_file ,'r') as f: + rem_list=json.load(f) + + # list of subjects + rem_items=[ validation_library[j] for j in rem_list ] + + rem_n=[os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] for j in rem_items] + rem_lib=[] + val_lib=[] + + for j in rem_n: + rem_lib.extend( [ k for (k,t) in enumerate( segmentation_library['library'] ) if t[0].find(j)>=0 ] ) + val_lib.extend( [ k for (k,t) in enumerate( validation_library ) if t[0].find(j)>=0 ] ) + + + if debug: print(repr(rem_lib)) + rem_lib=set(rem_lib) + val_lib=set(val_lib) + + #prepare exclusion list + experiment_segmentation_library=copy.deepcopy(segmentation_library) + + experiment_segmentation_library['library']=\ + [ k for j,k in enumerate( segmentation_library['library'] ) if j not in rem_lib ] + + _validation_library=\ + [ k for j,k in enumerate( validation_library ) if j not in val_lib ] + + for j,k in enumerate(rem_items): + + output_experiment=output+os.sep+('{}_{}_{}'.format(i,rem_n[j],cv_variant)) + work_dir=output+os.sep+('work_{}_{}_{}'.format(i,rem_n[j],fuse_variant)) + + validation_sample=k[0] + validation_segment=k[1] + + presegment=None + shift=2 + + if ext: + presegment=k[2] + shift=3 + + results.append( futures.submit( + run_segmentation_experiment, validation_sample, validation_segment, + experiment_segmentation_library, + output_experiment, + segmentation_parameters=segmentation_parameters, + debug=debug, + work_dir=work_dir, + ec_parameters=ec_parameters, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=k[shift:shift+modalities], + cleanup=cleanup, + presegment=presegment, + train_list=_validation_library + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + stat_results = [ i.result()[0] for i in results ] + output_results = [ i.result()[1] for i in results ] + + return ( stat_results, output_results ) + + +def cv_fusion_segment( cv_parameters, + segmentation_library, + output, + segmentation_parameters, + ec_parameters=None, + debug=False, + cleanup=False, + ext=False, + extlib=None, + cv_iter=None ): + '''Run cross-validation experiment + for each N subjects run segmentation and compare + Right now run LOOCV or random CV + ''' + + # TODO: implement more realistic, random schemes + validation_library=cv_parameters['validation_library'] + + # maximum number of iterations + cv_iterations=cv_parameters.get('iterations',-1) + + # number of samples to exclude + cv_exclude=cv_parameters.get('cv',1) + + # use to distinguish different versions of error correction + ec_variant=cv_parameters.get('ec_variant','ec') + + # use to distinguish different versions of label fusion + fuse_variant=cv_parameters.get('fuse_variant','fuse') + + # use to distinguish different versions of cross-validation + cv_variant=cv_parameters.get('cv_variant','cv') + + # different version of label regularization + regularize_variant=cv_parameters.get('regularize_variant','gc') + + cv_output=output+os.sep+cv_variant+'_stats.json' + res_output=output+os.sep+cv_variant+'_res.json' + + if extlib is not None: + validation_library=extlib + + if validation_library is not list: + with open(validation_library,'r') as f: + validation_library=list(csv.reader(f)) + + if cv_iter is not None: + cv_iter=int(cv_iter) + + stat_results=None + output_results=None + + + if ext: + # TODO: move pre-rpcessing here? + # pre-process presegmented scans here! + # we only neeed to re-create left-right flipped segmentation + pass + + + if cv_iterations==-1 and cv_exclude==1: # simle LOO cross-validation + (stat_results, output_results) = loo_cv_fusion_segment(validation_library, + segmentation_library, + output, segmentation_parameters, + ec_parameters=ec_parameters, + debug=debug, + cleanup=cleanup, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + cv_variant=cv_variant, + regularize_variant=regularize_variant, + ext=ext, + cv_iter=cv_iter) + else: # arbitrary number of iterations + (stat_results, output_results) = full_cv_fusion_segment(validation_library, + segmentation_library, + output, segmentation_parameters, + cv_iterations, cv_exclude, + ec_parameters=ec_parameters, + debug=debug, + cleanup=cleanup, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + cv_variant=cv_variant, + regularize_variant=regularize_variant, + ext=ext, + cv_iter=cv_iter) + + # average error maps + + if cv_iter is None or cv_iter==-1: + results=[] + output_results_all={'results':output_results} + output_results_all['cv_stats']=cv_output + output_results_all['error_maps']={} + all_error_maps=[] + + for (i,j) in enumerate(output_results[0]['error_maps']): + out_avg=output+os.sep+cv_variant+'_error_{:03d}.mnc'.format(i) + output_results_all['error_maps'][i]=out_avg + all_error_maps.append(out_avg) + maps=[ k['error_maps'][i] for k in output_results ] + results.append(futures.submit( + average_error_maps,maps,out_avg)) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + output_results_all['max_error']=output+os.sep+cv_variant+'_max_error.mnc'.format(i) + max_error_maps(all_error_maps,output_results_all['max_error']) + + with open(cv_output,'w') as f: + json.dump(stat_results, f, indent=1 ) + + with open(res_output,'w') as f: + json.dump(output_results_all, f, indent=1, cls=MRIEncoder) + + return stat_results + else: + # we assume that results will be available later + return None + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/error_correction.py b/ipl/segment/error_correction.py new file mode 100755 index 0000000..6b87436 --- /dev/null +++ b/ipl/segment/error_correction.py @@ -0,0 +1,793 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +# standard library +import string +import os +import argparse +import pickle + +try: + import cPickle +except ImportError: + pass + +import sys +import json +import csv +# minc +import minc + +# numpy +import numpy as np + +# scikit-learn +from sklearn import svm +from sklearn import neighbors +from sklearn import ensemble +from sklearn import tree +#from sklearn import cross_validation +from sklearn import preprocessing +from sklearn import dummy + +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import Normalizer + +# XGB package +try: + import xgboost as xgb +except ImportError: + pass + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import traceback + + +def prepare_features(images, coords, mask=None, use_coord=True, use_joint=True, patch_size=1, primary_features=1 ): + features=[] + + # add features dependant on coordinates + + image_no=len(images) + if primary_features > image_no or primary_features<0 : + primary_features=image_no + # use with center at 0 and 1.0 at the edge, could have used preprocessing + image_idx=0 + if use_coord: + image_idx=3 + if coords is None: + c=np.mgrid[ 0:images[0].shape[0] , + 0:images[0].shape[1] , + 0:images[0].shape[2] ] + + features.append( ( c[2]-images[0].shape[0]/2.0)/ (images[0].shape[0]/2.0) ) + features.append( ( c[1]-images[0].shape[1]/2.0)/ (images[0].shape[1]/2.0) ) + features.append( ( c[0]-images[0].shape[2]/2.0)/ (images[0].shape[2]/2.0) ) + + else: # assume we have three sets of coords + features.append( coords[0] ) + features.append( coords[1] ) + features.append( coords[2] ) + + + # add apparance and context images (patch around each voxel) + if patch_size>0: + for i in range(primary_features) : + for x in range(-patch_size,patch_size+1) : + for y in range (-patch_size,patch_size+1) : + for z in range(-patch_size,patch_size+1) : + features.append( np.roll( np.roll( np.roll( images[i], shift=x, axis=0 ), shift=y, axis=1), shift=z, axis=2 ) ) + + features.extend(images[primary_features:-1]) # add the rest + app_features=primary_features*(patch_size*2+1)*(patch_size*2+1)*(patch_size*2+1)+(image_no-primary_features) + primary_features=primary_features*(patch_size*2+1)*(patch_size*2+1)*(patch_size*2+1) + else: + features.extend(images) + app_features=image_no + + # add joint features + if use_joint and use_coord: + for i in range(primary_features): + # multiply apparance features by coordinate features + for j in range(3): + # multiply apparance features by coordinate features + features.append( features[i+image_idx] * features[j] ) + + # extract only what's needed + if mask is not None: + return [ i[ mask>0 ] for i in features ] + else: + return [ i for i in features ] + + +def convert_image_list(images): + ''' + convert array of images into a single matrix + ''' + s=[] + for (i,k) in enumerate(images): + s.append(np.column_stack( tuple( np.ravel( j ) for j in k ) ) ) + print(s[-1].shape) + + return np.vstack( tuple( i for i in s ) ) + + +def extract_part(img, partition, part, border): + ''' + extract slice of the image for parallelized execution + ''' + if partition is None or part is None : + return img + else: + strip=img.shape[2]//partition + beg=strip*part + end=strip*(part+1) + + if part>0: + beg-=border + if part<(partition-1): + end+=border + else : + end=img.shape[2] + return img[:,:,beg:end] + + +def pad_data(img, shape, partition, part, border): + if partition is None or part is None : + return img + else: + out=np.zeros(shape,dtype=img.dtype) + strip=shape[2]//partition + + beg=strip*part + end=strip*(part+1) + + _beg=0 + _end=img.shape[2] + + if part>0: + beg-=border + + if part<(partition-1): + end+=border + else : + end=shape[2] + + out[:,:,beg:end]=img[:,:,_beg:_end] + return out + + +def merge_segmentations(inputs, output, partition, parameters): + patch_size=parameters.get('patch_size',1) + border=patch_size*2 + out=None + strip=None + for i in range(len(inputs)): + d=minc.Label( inputs[i] ).data + + if out is None: + out=np.zeros(d.shape,dtype=np.int32) + strip=d.shape[2]/partition + + beg=strip*i + end=strip*(i+1) + + if i==(partition-1): + end=d.shape[2] + + out[:,:,beg:end]=d[:,:,beg:end] + + out_i=minc.Label( data=out ) + out_i.save( name=output, imitate=inputs[0]) + + +def errorCorrectionTrain(input_images, + output, + parameters=None, + debug=False, + partition=None, + part=None, + multilabel=1): + try: + use_coord = parameters.get('use_coord',True) + use_joint = parameters.get('use_joint',True) + patch_size = parameters.get('patch_size',1) + + border=patch_size*2 + + if patch_size==0: + border=2 + + normalize_input=parameters.get('normalize_input',True) + + method = parameters.get('method','lSVC') + method2 = parameters.get('method2',method) + method_n = parameters.get('method_n',15) + method2_n = parameters.get('method2_n',method_n) + method_random = parameters.get('method_random',None) + method_max_features=parameters.get('method_max_features','auto') + method_n_jobs=parameters.get('method_n_jobs',1) + primary_features=parameters.get('primary_features',1) + + training_images = [] + training_diff = [] + training_images_direct = [] + training_direct = [] + + if debug: + print("errorCorrectionTrain use_coord={} use_joint={} patch_size={} normalize_input={} method={} output={} partition={} part={}".\ + format(repr(use_coord),repr(use_joint),repr(patch_size),repr(normalize_input),method,output,partition,part)) + + coords=None + total_mask_size=0 + total_diff_mask_size=0 + + for (i,inp) in enumerate(input_images): + mask=None + diff=None + mask_diff=None + + if inp[-2] is not None: + mask=extract_part(minc.Label( inp[-2] ).data, partition, part, border) + + ground_data = minc.Label( inp[-1] ).data + auto_data = minc.Label( inp[-3] ).data + + ground_shape = ground_data.shape + ground = extract_part(ground_data, partition, part, border) + auto = extract_part(auto_data, partition, part, border) + + shape = ground_shape + if coords is None and use_coord: + c = np.mgrid[ 0:shape[0], 0:shape[1], 0: shape[2] ] + coords = [ extract_part( (c[j]-shape[j]/2.0)/(shape[j]/2.0), partition, part, border ) for j in range(3) ] + + features = [ extract_part( minc.Image(k, dtype=np.float32).data, partition, part, border ) for k in inp[0:-3] ] + + mask_size = shape[0] * shape[1] * shape[2] + + if debug: + print("Training data size:{}".format(len(features))) + if mask is not None: + mask_size = np.sum(mask) + print("Mask size:{}".format(mask_size)) + else: + print("Mask absent") + total_mask_size += mask_size + + if multilabel>1: + diff = (ground != auto) + total_diff_mask_size += np.sum(mask) + + if mask is not None: + mask_diff = diff & ( mask > 0 ) + print("Sample {} mask_diff={} diff={}".format(i,np.sum(mask_diff),np.sum(diff))) + #print(mask_diff) + training_diff.append( diff [ mask>0 ] ) + training_direct.append( ground[ mask_diff ] ) + else: + mask_diff = diff + training_diff.append( diff ) + training_direct.append( ground[ diff ] ) + + training_images.append( prepare_features( + features, + coords, + mask=mask, + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) ) + + training_images_direct.append( prepare_features( + features, + coords, + mask=mask_diff, + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) ) + + else: + mask_diff=mask + if mask is not None: + training_diff.append( ground[ mask>0 ] ) + else: + training_diff.append( ground ) + + + + training_images.append( prepare_features( + features, + coords, + mask=mask, + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) ) + + if debug: + print("feature size:{}".format(len(training_images[-1]))) + + if i == 0 and parameters.get('dump',False): + print("Dumping feature images...") + for (j,k) in enumerate( training_images[-1] ): + test=np.zeros_like( images[0] ) + test[ mask>0 ]=k + out=minc.Image( data=test ) + out.save( name="dump_{}.mnc".format(j), imitate=inp[0] ) + + # calculate normalization coeffecients + + if debug: print("Done") + + clf=None + clf2=None + + if total_mask_size>0: + training_X = convert_image_list( training_images ) + training_Y = np.ravel( np.concatenate( tuple(j for j in training_diff ) ) ) + + if debug: print("Fitting 1st...") + + if method == "xgb": + clf = None + elif method == "SVM": + clf = svm.SVC() + elif method == "nuSVM": + clf = svm.NuSVC() + elif method == 'NC': + clf = neighbors.NearestCentroid() + elif method == 'NN': + clf = neighbors.KNeighborsClassifier(method_n) + elif method == 'RanForest': + clf = ensemble.RandomForestClassifier(n_estimators=method_n, + n_jobs=method_n_jobs, + max_features=method_max_features, + random_state=method_random) + elif method == 'AdaBoost': + clf = ensemble.AdaBoostClassifier(n_estimators=method_n,random_state=method_random) + elif method == 'AdaBoostPP': + clf = Pipeline(steps=[('normalizer', Normalizer()), + ('AdaBoost', ensemble.AdaBoostClassifier(n_estimators=method_n,random_state=method_random)) + ]) + elif method == 'tree': + clf = tree.DecisionTreeClassifier(random_state=method_random) + elif method == 'ExtraTrees': + clf = ensemble.ExtraTreesClassifier(n_estimators=method_n, + max_features=method_max_features, + n_jobs=method_n_jobs, + random_state=method_random) + elif method == 'Bagging': + clf = ensemble.BaggingClassifier(n_estimators=method_n, + max_features=method_max_features, + n_jobs=method_n_jobs, + random_state=method_random) + elif method == 'dumb': + clf = dummy.DummyClassifier(strategy="constant",constant=0) + else: + clf = svm.LinearSVC() + + #scores = cross_validation.cross_val_score(clf, training_X, training_Y) + #print scores + if method == "xgb": + xg_train = xgb.DMatrix( training_X, label=training_Y) + param = {} + num_round = 100 + # use softmax multi-class classification + param['objective'] = 'multi:softmax' + # scale weight of positive examples + param['eta'] = 0.1 + param['max_depth'] = 8 + param['silent'] = 1 + param['nthread'] = 4 + param['num_class'] = 2 + clf = xgb.train(param, xg_train, num_round) + elif method != 'dumb': + clf.fit( training_X, training_Y ) + + if multilabel>1 and method != 'dumb': + if debug: print("Fitting direct...") + + training_X = convert_image_list( training_images_direct ) + training_Y = np.ravel( np.concatenate( tuple(j for j in training_direct ) ) ) + + if method2 == "xgb": + clf2 = None + if method2 == "SVM": + clf2 = svm.SVC() + elif method2 == "nuSVM": + clf2 = svm.NuSVC() + elif method2 == 'NC': + clf2 = neighbors.NearestCentroid() + elif method2 == 'NN': + clf2 = neighbors.KNeighborsClassifier(method_n) + elif method2 == 'RanForest': + clf2 = ensemble.RandomForestClassifier(n_estimators=method_n, + n_jobs=method_n_jobs, + max_features=method_max_features, + random_state=method_random) + elif method2 == 'AdaBoost': + clf2 = ensemble.AdaBoostClassifier(n_estimators=method_n,random_state=method_random) + elif method2 == 'AdaBoostPP': + clf2 = Pipeline(steps=[('normalizer', Normalizer()), + ('AdaBoost', ensemble.AdaBoostClassifier(n_estimators=method_n,random_state=method_random)) + ]) + elif method2 == 'tree': + clf2 = tree.DecisionTreeClassifier(random_state=method_random) + elif method2 == 'ExtraTrees': + clf2 = ensemble.ExtraTreesClassifier(n_estimators=method_n, + max_features=method_max_features, + n_jobs=method_n_jobs, + random_state=method_random) + elif method2 == 'Bagging': + clf2 = ensemble.BaggingClassifier(n_estimators=method_n, + max_features=method_max_features, + n_jobs=method_n_jobs, + random_state=method_random) + elif method2 == 'dumb': + clf2 = dummy.DummyClassifier(strategy="constant",constant=0) + else: + clf2 = svm.LinearSVC() + + if method2 == "xgb" : + xg_train = xgb.DMatrix( training_X, label=training_Y) + + param = {} + num_round = 100 + # use softmax multi-class classification + param['objective'] = 'multi:softmax' + # scale weight of positive examples + param['eta'] = 0.1 + param['max_depth'] = 8 + param['silent'] = 1 + param['nthread'] = 4 + param['num_class'] = multilabel + + clf2 = xgb.train(param, xg_train, num_round) + + elif method != 'dumb': + clf2.fit( training_X, training_Y ) + + #print(clf.score(training_X,training_Y)) + + if debug: + print( clf ) + print( clf2 ) + else: + print("Warning : zero total mask size!, using null classifier") + clf = dummy.DummyClassifier(strategy="constant",constant=0) + + if method == 'xgb' and method2 == 'xgb': + #save + clf.save_model(output) + clf2.save_model(output+'_2') + else: + with open(output,'wb') as f: + cPickle.dump( [clf, clf2] , f, -1) + + except mincError as e: + print("Exception in linear_registration:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in linear_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def errorCorrectionApply(input_images, + output, + input_mask=None, + parameters=None, + debug=False, + history=None, + input_auto=None, + partition=None, + part=None, + multilabel=1, + debug_files=None ): + try: + use_coord=parameters.get('use_coord',True) + use_joint=parameters.get('use_joint',True) + patch_size=parameters.get('patch_size',1) + normalize_input=parameters.get('normalize_input',True) + primary_features=parameters.get('primary_features',1) + + method =parameters.get('method','lSVC') + method2 =parameters.get('method2',method) + + training=parameters['training'] + + clf=None + clf2=None + + border=patch_size*2 + + if patch_size==0: + border=2 + + if debug: print( "Running error-correction, input_image:{} trining:{} partition:{} part:{} output:{} input_auto:{}". + format(repr(input_images), training, partition,part,output,input_auto) ) + + if method == 'xgb' and method2 == 'xgb': + # need to convert from Unicode + _training=str(training) + clf = xgb.Booster(model_file=_training) + if multilabel>1: + clf2 = xgb.Booster(model_file=_training+'_2') + else: + with open(training, 'rb') as f: + c = cPickle.load(f) + clf = c[0] + clf2 = c[1] + + if debug: + print( clf ) + print( clf2 ) + print( "Loading input images..." ) + + input_data=[ minc.Image(k, dtype=np.float32).data for k in input_images ] + shape=input_data[0].shape + + #features = [ extract_part( minc.Image(k, dtype=np.float32).data, partition, part, border) for k in inp[0:-3] ] + #if normalize_input: + #features = [ extract_part( preprocessing.scale( k ), partition, part, border) for k in input_data ] + #else: + features = [ extract_part( k, partition, part, border) for k in input_data ] + + coords=None + + if use_coord: + c=np.mgrid[ 0:shape[0] , 0:shape[1] , 0: shape[2] ] + coords=[ extract_part( (c[j]-shape[j]/2.0)/(shape[j]/2.0), partition, part, border ) for j in range(3) ] + + if debug: + print("Features data size:{}".format(len(features))) + + mask=None + + mask_size=shape[0]*shape[1]*shape[2] + + if input_mask is not None: + mask=extract_part( minc.Label( input_mask ).data, partition, part, border ) + mask_size=np.sum( mask ) + + out_cls = None + out_corr = None + + test_x=convert_image_list ( [ prepare_features( + features, + coords, + mask=mask, + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) + ] ) + + if input_auto is not None: + out_corr = np.copy( extract_part( minc.Label( input_auto ).data, partition, part, border) ) # use input data + out_cls = np.copy( extract_part( minc.Label( input_auto ).data, partition, part, border) ) # use input data + else: + out_corr = np.zeros( shape, dtype=np.int32 ) + out_cls = np.zeros( shape, dtype=np.int32 ) + + if mask_size>0 and not isinstance(clf, dummy.DummyClassifier): + if debug: + print("Running classifier 1 ...") + + if method!='xgb': + pred = np.asarray( clf.predict( test_x ), dtype=np.int32 ) + else: + xg_predict = xgb.DMatrix(test_x) + pred = np.array( clf.predict( xg_predict ), dtype=np.int32 ) + + if debug_files is not None: + out_dbg = np.zeros( shape, dtype=np.int32 ) + if mask is not None: + out_dbg[ mask > 0 ] = pred + else: + out_dbg = pred + + out_dbg=minc.Label( data=pad_data(out_dbg, shape, partition, part, border) ) + out_dbg.save(name=debug_files[0], imitate=input_images[0], history=history) + + + if mask is not None: + out_corr[ mask > 0 ] = pred + else: + out_corr = pred + + if multilabel > 1 and clf2 is not None: + if mask is not None: + mask=np.logical_and(mask>0, out_corr>0) + else: + mask=(out_corr>0) + + if debug: + print("Running classifier 2 ...") + + test_x = convert_image_list ( [ prepare_features( + features, + coords, + mask=mask , + use_coord=use_coord, + use_joint=use_joint, + patch_size=patch_size, + primary_features=primary_features ) + ] ) + if method2!='xgb': + pred = np.asarray( clf2.predict( test_x ), dtype=np.int32 ) + else: + xg_predict = xgb.DMatrix(test_x) + pred = np.array( clf2.predict( xg_predict ), dtype=np.int32 ) + + out_cls[ mask > 0 ] = pred + + if debug_files is not None: + out_dbg = np.zeros( shape, dtype=np.int32 ) + if mask is not None: + out_dbg[ mask > 0 ] = pred + else: + out_dbg = pred + + out_dbg=minc.Label( data=pad_data(out_dbg, shape, partition, part, border) ) + out_dbg.save(name=debug_files[1], imitate=input_images[0], history=history) + + + else: + out_cls=out_corr + + else: + pass # nothing to do! + + if debug: + print("Saving output...") + + out=minc.Label( data=pad_data(out_cls, shape, partition, part, border) ) + + out.save(name=output, imitate=input_images[0], history=history) + except mincError as e: + print("Exception in linear_registration:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in linear_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def parse_options(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Perform error-correction learning and application') + + parser.add_argument('--train', + help="Training library in json format, list of lists: [,img2],,,") + + parser.add_argument('--train_csv', + help="Training library in CSV format, format [,img2],,,") + + parser.add_argument('--input', + help="Automatic seg to be corrected") + + parser.add_argument('--output', + help="Output image, required for application of method") + + parser.add_argument('--param', + help="Load error-correction parameters from file") + + parser.add_argument('--mask', + help="Region for correction, required for application of method" ) + + parser.add_argument('--method', + choices=['SVM','lSVM','nuSVM','NN','RanForest','AdaBoost','tree'], + default='lSVM', + help='Classification algorithm') + + parser.add_argument('-n', + type=int, + help="nearest neighbors", + default=15) + + parser.add_argument('--debug', + action="store_true", + dest="debug", + default=False, + help='Print debugging information' ) + + parser.add_argument('--dump', + action="store_true", + dest="dump", + default=False, + help='Dump first sample features (for debugging)' ) + + parser.add_argument('--coord', + action="store_true", + dest="coord", + default=False, + help='Use image coordinates as additional features' ) + + parser.add_argument('--joint', + action="store_true", + dest="joint", + default=False, + help='Produce joint features between appearance and coordinate' ) + + parser.add_argument('--random', + type=int, + dest="random", + help='Provide random state if needed' ) + + parser.add_argument('--save', + help='Save training results in a file') + + parser.add_argument('--load', + help='Load training results from a file') + + parser.add_argument('image', + help='Input images', nargs='*') + + options = parser.parse_args() + + return options + + +if __name__ == "__main__": + history = minc.format_history(sys.argv) + + options = parse_options() + + parameters={} + if options.param is None: + parameters['method']=options.method + parameters['method_n']=options.n + parameters['method_random']=options.random + + parameters['use_coord']=options.coord + parameters['use_joint']=options.joint + + + # load training images + if ( (options.train is not None or \ + options.train_csv is not None) and \ + options.save is not None) : + + if options.debug: print("Loading training images...") + + train=None + + if options.train is not None: + with open(options.train,'rb') as f: + train=json.load(f) + else: + with open(options.train_csv,'rb') as f: + train=list(csv.reader(f)) + + errorCorrectionTrain(train,options.save, + parameters=parameters, + debug=options.debug) + + + elif options.input is not None and \ + options.image is not None and \ + options.output is not None: + + if options.load is not None: + parameters['training']=options.load + + errorCorrectionApply( + [options.image],options.input, + options.output, + input_mask=options.mask, + debug=options.debug, + history=history) + + else: + print("Error in arguments, run with --help") + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/filter.py b/ipl/segment/filter.py new file mode 100644 index 0000000..6393723 --- /dev/null +++ b/ipl/segment/filter.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + + +def filter_sample(input,output,filters,model=None): + + apply_filter(input.scan, output.scan, filters, + model=model.scan, input_mask=input.mask, + model_mask=model.mask) + # TODO: parallelalize? + for (i,j) in enumerate( input.add ): + apply_filter(input.add[i], output.add[i], filters, + model=model.add[i], input_mask=i.mask, + model_mask=model.mask) + + +def apply_filter(input, output, filters, model=None, input_mask=None, model_mask=None, input_labels=None,model_labels=None): + output_scan=input + try: + if filters is not None : + + with mincTools() as m: + if filters.get('denoise',False): + # TODO: choose between ANLM and NLM here? + m.anlm(output_scan,m.tmp('denoised.mnc'), + beta =filters.get('beta',0.5), + patch =filters.get('patch',1), + search =filters.get('search',1), + regularize=filters.get('regularize',None)) + + output_scan =m.tmp('denoised.mnc') + + if filters.get('normalize',False) and model is not None: + + if filters.get('nuyl',False): + m.nuyl_normalize(output_scan,model,m.tmp('normalized.mnc'), + source_mask=input_mask,target_mask=model_mask) + elif filters.get('nuyl2',False): + hl.nuyl_normalize2(output_scan,model,m.tmp('normalized.mnc'), + #source_mask=input_mask,target_mask=model_mask, + fwhm=filters.get('nuyl2_fwhm',2.0), + iterations=filters.get('nuyl2_iter',4)) + else: + m.volume_pol(output_scan,model, m.tmp('normalized.mnc'), + source_mask=input_mask,target_mask=model_mask) + output_scan = m.tmp('normalized.mnc') + + # TODO: implement more filters + patch_norm = filters.get('patch_norm',None) + + if patch_norm is not None: + print("Running patch normalization") + db = patch_norm.get('db',None) + idx = patch_norm.get('idx',None) + thr = patch_norm.get('threshold',None) + spl = patch_norm.get('spline',None) + med = patch_norm.get('median',None) + it = patch_norm.get('iterations',None) + if db is not None and idx and not None: + # have all the pieces + m.patch_norm(output_scan, m.tmp('patch_norm.mnc'), + index=idx, db=db, threshold=thr, spline=spl, + median=med, field = m.tmp('patch_norm_field.mnc'), + iterations=it) + output_scan = m.tmp('patch_norm.mnc') + + label_norm = filters.get('label_norm',None) + + if label_norm is not None and input_labels is not None and model_labels is not None: + print("Running label norm:{}".format(repr(label_norm))) + norm_order=label_norm.get('order',3) + norm_median=label_norm.get('median',True) + hl.label_normalize(output_scan,input_labels,model,model_labels,out=m.tmp('label_norm.mnc'),order=norm_order,median=norm_median) + output_scan = m.tmp('label_norm.mnc') + + shutil.copyfile(output_scan,output) + else: + shutil.copyfile(input,output) + except mincError as e: + print("Exception in apply_filter:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in apply_filter:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def make_border_mask( input, output, width=1,labels=1): + '''Extract a border along the edge''' + try: + if not os.path.exists(output): + with mincTools() as m: + if labels==1: + m.binary_morphology(input,"D[{}]".format((width+1)//2),m.tmp('d.mnc')) + m.binary_morphology(input,"E[{}]".format(width//2),m.tmp('e.mnc')) + m.calc([m.tmp('d.mnc'),m.tmp('e.mnc')],'A[0]>0.5&&A[1]<0.5?1:0',output) + else: # have to split up labels and then create a mask of all borders + split_labels(input,labels, m.tmp('split')) + borders=[] + for i in range(1,labels): + l='{}_{:02d}.mnc' .format(m.tmp('split'),i) + d='{}_{:02d}_d.mnc'.format(m.tmp('split'),i) + e='{}_{:02d}_e.mnc'.format(m.tmp('split'),i) + b='{}_{:02d}_b.mnc'.format(m.tmp('split'),i) + m.binary_morphology(l,"D[{}]".format((width+1)//2),d) + m.binary_morphology(l,"E[{}]".format(width//2),e) + m.calc([d,e],'A[0]>0.5&&A[1]<0.5?1:0',b) + borders.append(b) + m.math(borders,'max',m.tmp('max'),datatype='-float') + m.reshape(m.tmp('max'),output,datatype='byte', + image_range=[0,1],valid_range=[0,1]) + + except mincError as e: + print("Exception in make_border_mask:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in make_border_mask:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def split_labels(input, n_labels,output_prefix, + antialias=False, blur=None, + expit=None, normalize=False ): + try: + with mincTools() as m: + inputs=[ input ] + outputs=['{}_{:02d}.mnc'.format(output_prefix,i) for i in range(n_labels) ] + + cmd=['itk_split_labels',input,'{}_%02d.mnc'.format(output_prefix), + '--missing',str(n_labels)] + if antialias: + cmd.append('--antialias') + if normalize: + cmd.append('--normalize') + if blur is not None: + cmd.extend(['--blur',str(blur)]) + if expit is not None: + cmd.extend(['--expit',str(expit)]) + m.command(cmd, inputs=inputs, outputs=outputs) + #return outputs + except mincError as e: + print("Exception in split_labels:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in split_labels:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def generate_flip_sample(input, labels_datatype='byte'): + '''generate flipped version of sample''' + try: + with mincTools() as m: + m.flip_volume_x(input.scan,input.scan_f) + + for (i,j) in enumerate(input.add): + m.flip_volume_x(input.add[i],input.add_f[i]) + + if input.mask is not None: + m.flip_volume_x(input.mask, input.mask_f, labels=True) + + #for i in input.add: + # m.flip_volume_x(i, input.seg_f, labels=True,datatype=labels_datatype) + except mincError as e: + print("Exception in generate_flip_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in generate_flip_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def create_unflip_remap(remap,remap_flip): + if remap is not None and remap_flip is not None: + # convert both into dict + _remap= { int(i[0]):int(i[1]) for i in remap } + _remap_flip={ int(i[1]):int(i[0]) for i in remap_flip } + _rr={} + + for i,j in _remap.items(): + if j in _remap_flip: + _rr[j]=j + return _rr + else: + return None + +def log_transform_sample(input, output, threshold=1.0): + try: + with mincTools() as m: + m.calc([input.scan],'A[0]>{}?log(A[0]):0.0'.format(threshold), + output.scan) + except mincError as e: + print("Exception in log_transform_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in log_transform_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def create_patch_norm_db( input_samples, + patch_norm_db, + patch_norm_idx, + pct=0.1, + patch=2, + sub=1): + try: + with mincTools() as m: + patch_lib=os.path.dirname(input_samples[0].scan)+os.sep+'patch_lib.lst' + inputs=[] + outputs=[patch_norm_db] + + with open(patch_lib,'w') as f: + for i in input_samples: + f.write( os.path.basename( i.scan ) ) + f.write("\n") + inputs.append(i.scan) + + cmd=['create_feature_database', + patch_lib, patch_norm_db, + '--patch', + '--patch-radius', str(patch), + '--subsample', str(sub), + '--random', str(pct), + '--log', + '--threshold', str(1.0), + ] + + m.command(cmd, inputs=inputs, outputs=outputs) + + cmd=['refine_feature_database', + patch_norm_db, patch_norm_idx + ] + m.command(cmd, inputs=[patch_norm_db], outputs=[patch_norm_idx]) + + except mincError as e: + print("Exception in create_patch_norm_db:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in create_patch_norm_db:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/fuse.py b/ipl/segment/fuse.py new file mode 100644 index 0000000..a0262df --- /dev/null +++ b/ipl/segment/fuse.py @@ -0,0 +1,906 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .error_correction import * +from .preselect import * +from .qc import * +from .fuse_segmentations import * +from .library import * +import traceback + +def seg_to_volumes(seg, output_json, label_map=None): + with mincTools( verbose=2 ) as m: + out=m.label_stats(seg,label_defs=label_map) + with open(output_json,'w') as f: + json.dump(out,f,indent=1) + return out + +def invert_lut(inp): + if inp is None: + return None + return { str(j):str(i) for i,j in inp.iteritems()} + + +def fusion_segment( input_scan, + library_description, + output_segment, + input_mask = None, + parameters = {}, + exclude =[], + work_dir = None, + debug = False, + ec_variant = None, + fuse_variant = None, + regularize_variant = None, + add=[], + cleanup = False, + cleanup_xfm = False, + presegment = None, + preprocess_only = False): + """Apply fusion segmentation""" + try: + if debug: + print( "Segmentation parameters:") + print( repr(parameters) ) + print( "presegment={}".format(repr(presegment))) + + out_variant='' + if fuse_variant is not None: + out_variant+=fuse_variant + + if regularize_variant is not None: + out_variant+='_'+regularize_variant + + if ec_variant is not None: + out_variant+='_'+ec_variant + + if work_dir is None: + work_dir=output_segment+os.sep+'work_segment' + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + work_lib_dir= work_dir+os.sep+'library' + work_lib_dir_f=work_dir+os.sep+'library_f' + + if not os.path.exists(work_lib_dir): + os.makedirs(work_lib_dir) + + if not os.path.exists(work_lib_dir_f): + os.makedirs(work_lib_dir_f) + + library_nl_samples_avail=library_description['nl_samples_avail'] + library_modalities=library_description.get('modalities',1)-1 + + # perform symmetric segmentation + segment_symmetric= parameters.get('segment_symmetric', False ) + + # read filter paramters + pre_filters= parameters.get('pre_filters', None ) + post_filters= parameters.get('post_filters', parameters.get( 'filters', None )) + + # if linear registration should be performed + do_initial_register = parameters.get( 'initial_register', + parameters.get( 'linear_register', {})) + + if do_initial_register is not None and isinstance(do_initial_register,dict): + initial_register = do_initial_register + do_initial_register = True + else: + initial_register={} + + inital_reg_type = parameters.get( 'initial_register_type', + parameters.get( 'linear_register_type', + initial_register.get('type','-lsq12'))) + + inital_reg_ants = parameters.get( 'initial_register_ants', + parameters.get( 'linear_register_ants', False)) + + inital_reg_options = parameters.get( 'initial_register_options', + initial_register.get('options',None) ) + + inital_reg_downsample = parameters.get( 'initial_register_downsample', + initial_register.get('downsample',None)) + + inital_reg_use_mask = parameters.get( 'initial_register_use_mask', + initial_register.get('use_mask',False)) + + initial_reg_objective = initial_register.get('objective','-xcorr') + + # perform local linear registration + do_initial_local_register = parameters.get( 'initial_local_register', + parameters.get( 'local_linear_register', {}) ) + if do_initial_local_register is not None and isinstance(do_initial_local_register,dict): + initial_local_register=do_initial_local_register + do_initial_local_register=True + else: + initial_local_register={} + + local_reg_type = parameters.get( 'local_register_type', + initial_local_register.get('type','-lsq12')) + + local_reg_ants = parameters.get( 'local_register_ants', False) + + local_reg_opts = parameters.get( 'local_register_options', + initial_local_register.get('options',None)) + + local_reg_bbox = parameters.get( 'local_register_bbox', + initial_local_register.get('bbox',False )) + + local_reg_downsample = parameters.get( 'local_register_downsample', + initial_local_register.get('downsample',None)) + + local_reg_use_mask = parameters.get( 'local_register_use_mask', + initial_local_register.get('use_mask',True)) + + local_reg_objective = initial_local_register.get('objective','-xcorr') + + # if non-linear registraiton should be performed for library creation + do_nonlinear_register = parameters.get('non_linear_register', False ) + + # generate segmentation library (needed for label fusion, not needed for single atlas based or external tool) + generate_library = parameters.get('generate_library', True ) + + # if non-linear registraiton should be performed pairwise + do_pairwise =parameters.get('non_linear_pairwise', False ) + # if pairwise registration should be performed using ANTS + do_pairwise_ants = parameters.get('non_linear_pairwise_ants', True ) + pairwise_register_type = parameters.get( 'non_linear_pairwise_type',None) + if pairwise_register_type is None: + if do_pairwise_ants: + pairwise_register_type='ants' + + library_preselect= parameters.get('library_preselect', 10) + library_preselect_step= parameters.get('library_preselect_step', None) + library_preselect_method= parameters.get('library_preselect_method', 'MI') + + + # if non-linear registraiton should be performed with ANTS + do_nonlinear_register_ants=parameters.get('non_linear_register_ants',False ) + nlreg_level = parameters.get('non_linear_register_level', 2) + nlreg_start = parameters.get('non_linear_register_start', 16) + nlreg_options = parameters.get('non_linear_register_options', None) + nlreg_downsample = parameters.get('non_linear_register_downsample', None) + + nonlinear_register_type = parameters.get( 'non_linear_register_type',None) + if nonlinear_register_type is None: + if do_nonlinear_register_ants: + nonlinear_register_type='ants' + + pairwise_level = parameters.get('pairwise_level', 2) + pairwise_start = parameters.get('pairwise_start', 16) + pairwise_options = parameters.get('pairwise_options', None) + + fuse_options = parameters.get('fuse_options', None) + + resample_order = parameters.get('resample_order', 2) + resample_baa = parameters.get('resample_baa', True) + + # error correction parametrs + ec_options = parameters.get('ec_options', None) + + # QC image paramters + qc_options = parameters.get('qc_options', None) + + + # special case for training error correction, assume input scan is already pre-processed + run_in_bbox = parameters.get('run_in_bbox', False) + + classes_number = library_description['classes_number'] + seg_datatype = library_description['seg_datatype'] + gco_energy = library_description['gco_energy'] + + + output_info = {} + + input_sample = MriDataset(scan=input_scan, seg=presegment, + mask=input_mask, protect=True, + add=add) + + sample = input_sample + + # get parameters + model = MriDataset(scan=library_description['model'], + mask=library_description['model_mask'], + add= library_description.get('model_add',[]) ) + + local_model = MriDataset(scan=library_description['local_model'], + mask=library_description['local_model_mask'], + scan_f=library_description.get('local_model_flip',None), + mask_f=library_description.get('local_model_mask_flip',None), + seg= library_description.get('local_model_seg',None), + seg_f= library_description.get('local_model_seg_flip',None), + add= library_description.get('local_model_add',[]), + add_f= library_description.get('local_model_add_flip',[]), + ) + + library = library_description['library'] + + sample_modalities=len(add) + + print("\n\n") + print("Sample modalities:{}".format(sample_modalities)) + print("\n\n") + # apply the same steps as used in library creation to perform segmentation: + + # global + initial_xfm=None + nonlinear_xfm=None + bbox_sample=None + nl_sample=None + bbox_linear_xfm=None + flipdir=work_dir+os.sep+'flip' + + sample_filtered=MriDataset(prefix=work_dir, name='flt_'+sample.name, add_n=sample_modalities ) + + # QC file + # TODO: allow for alternative location, extension + #sample_qc=work_dir+os.sep+'qc_'+sample.name+'_'+out_variant+'.jpg' + sample_qc=output_segment+'_qc.jpg' + + + if run_in_bbox: + segment_symmetric=False # that would depend ? + do_initial_register=False + do_initial_local_register=False + # assume filter already applied! + pre_filters=None + post_filters=None + + if pre_filters is not None: + apply_filter( sample.scan, + sample_filtered.scan, + pre_filters, + model=model.scan, + model_mask=model.mask) + + #if sample.mask is None: + sample_filtered.mask=sample.mask + # hack + sample_filtered.add=sample.add + sample=sample_filtered + else: + sample_filtered=None + + output_info['sample_filtered']=sample_filtered + + if segment_symmetric: + # need to flip the inputs + if not os.path.exists(flipdir): + os.makedirs(flipdir) + + sample.scan_f=flipdir+os.sep+os.path.basename(sample.scan) + sample.add_f=['' for (i,j) in enumerate(sample.add)] + + for (i,j) in enumerate(sample.add): + sample.add_f[i]=flipdir+os.sep+os.path.basename(sample.add[i]) + + if sample.mask is not None: + sample.mask_f=flipdir+os.sep+'mask_'+os.path.basename(sample.scan) + else: + sample.mask_f=None + + generate_flip_sample( sample ) + + if presegment is None: + sample.seg=None + sample.seg_f=None + + + if do_initial_register is not None: + initial_xfm=MriTransform(prefix=work_dir, name='init_'+sample.name ) + + if inital_reg_type=='elx' or inital_reg_type=='elastix' : + elastix_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + parameters=inital_reg_options, + nl=False, + use_mask=inital_reg_use_mask, + downsample=inital_reg_downsample + ) + elif inital_reg_type=='ants' or inital_reg_ants: + linear_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + ants=True, + use_mask=inital_reg_use_mask, + downsample=inital_reg_downsample + ) + else: + linear_registration( sample, + model, initial_xfm, + symmetric=segment_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + downsample=inital_reg_downsample, + use_mask=inital_reg_use_mask, + objective=initial_reg_objective + ) + + output_info['initial_xfm']=initial_xfm + + + # local + bbox_sample = MriDataset(prefix=work_dir, name='bbox_init_'+sample.name, + add_n=sample_modalities ) + # a hack to have sample mask + bbox_sample_mask = MriDataset(prefix=work_dir, name='bbox_init_'+sample.name ) + + + if do_initial_local_register: + bbox_linear_xfm=MriTransform(prefix=work_dir, name='bbox_init_'+sample.name ) + + if local_reg_type=='elx' or local_reg_type=='elastix' : + elastix_registration( sample, + local_model, + bbox_linear_xfm, + symmetric=segment_symmetric, + init_xfm=initial_xfm, + resample_order=resample_order, + parameters=local_reg_opts, + bbox=local_reg_bbox, + use_mask=local_reg_use_mask, + downsample=local_reg_downsample + ) + elif local_reg_type=='ants' or local_reg_ants: + linear_registration( sample, + local_model, + bbox_linear_xfm, + init_xfm=initial_xfm, + symmetric=segment_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + resample_order=resample_order, + ants=True, + close=True, + bbox=local_reg_bbox, + use_mask=local_reg_use_mask, + downsample=local_reg_downsample + ) + else: + linear_registration( sample, + local_model, + bbox_linear_xfm, + init_xfm=initial_xfm, + symmetric=segment_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + resample_order=resample_order, + close=True, + bbox=local_reg_bbox, + use_mask=local_reg_use_mask, + objective=local_reg_objective, + downsample=local_reg_downsample ) + + else: + bbox_linear_xfm=initial_xfm + + output_info['bbox_initial_xfm']=bbox_linear_xfm + + bbox_sample.mask=None + bbox_sample.mask_f=None + + if sample.seg is None: + bbox_sample.seg=None + bbox_sample.seg_f=None + + warp_sample(sample, local_model, bbox_sample, + transform=bbox_linear_xfm, + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric,# need to flip symmetric dataset + resample_order=resample_order, + filters=post_filters, + ) + + if sample.seg is not None: + _lut=None + _flip_lut=None + if not run_in_bbox: # assume that labels are already renamed + _lut=invert_lut(library_description.get("map",None)) + _flip_lut=invert_lut(library_description.get("flip_map",None)) + + warp_rename_seg( sample, local_model, bbox_sample, + transform=bbox_linear_xfm, + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric, + lut = _lut, + flip_lut = _flip_lut, + resample_order=resample_order, + resample_baa=resample_baa) + + output_info['bbox_sample']=bbox_sample + + if preprocess_only: + if cleanup: + shutil.rmtree(work_lib_dir) + shutil.rmtree(work_lib_dir_f) + if os.path.exists(flipdir): + shutil.rmtree(flipdir) + if pre_filters is not None: + sample_filtered.cleanup() + return (None,output_info) + + # 3. run non-linear registration if needed + # TODO: skip if sample presegmented + if do_nonlinear_register: + nl_sample=MriDataset(prefix=work_dir, name='nl_'+sample.name, add_n=sample_modalities ) + nonlinear_xfm=MriTransform(prefix=work_dir, name='nl_'+sample.name ) + + + if nonlinear_register_type=='elx' or nonlinear_register_type=='elastix' : + elastix_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + nl=True, + downsample=nlreg_downsample ) + elif nonlinear_register_type=='ants' or do_nonlinear_register_ants: + non_linear_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + ants=True, + downsample=nlreg_downsample ) + else: + non_linear_registration( bbox_sample, local_model, + nonlinear_xfm, + symmetric=segment_symmetric, + level=nlreg_level, + start_level=nlreg_start, + parameters=nlreg_options, + ants=False, + downsample=nlreg_downsample ) + + print("\n\n\nWarping the sample!:{}\n\n\n".format(bbox_sample)) + nl_sample.seg=None + nl_sample.seg_f=None + nl_sample.mask=None + nl_sample.mask_f=None + + warp_sample(bbox_sample, local_model, nl_sample, + transform=nonlinear_xfm, + symmetric=segment_symmetric, + resample_order=resample_order, + filters=post_filters, + ) + + warp_model_mask(local_model,bbox_sample_mask, + transform=nonlinear_xfm, + symmetric=segment_symmetric, + resample_order=resample_order) + + bbox_sample.mask=bbox_sample_mask.mask + bbox_sample.mask_f=bbox_sample_mask.mask_f + + output_info['bbox_sample']=bbox_sample + output_info['nl_sample']=nl_sample + else: + nl_sample=bbox_sample + # use mask from the model directly? + bbox_sample.mask=local_model.mask + bbox_sample.mask_f=local_model.mask + + output_info['nonlinear_xfm']=nonlinear_xfm + + if generate_library: + # remove excluded samples TODO: use regular expressions for matching? + selected_library=[i for i in library if i[0] not in exclude] + selected_library_f=[] + + if segment_symmetric: # fill up with all entries + selected_library_f=copy.deepcopy(selected_library) + + # library pre-selection if needed + # TODO: skip if sample presegmented + if library_preselect>0 and library_preselect < len(selected_library): + loaded=False + loaded_f=False + + if os.path.exists(work_lib_dir+os.sep+'sel_library.json'): + with open(work_lib_dir+os.sep+'sel_library.json','r') as f: + selected_library=json.load(f) + loaded=True + + if segment_symmetric and os.path.exists(work_lib_dir_f+os.sep+'sel_library.json'): + with open(work_lib_dir_f+os.sep+'sel_library.json','r') as f: + selected_library_f=json.load(f) + loaded_f=True + + if do_nonlinear_register: + if not loaded: + selected_library=preselect(nl_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=library_nl_samples_avail, + step=library_preselect_step, + lib_add_n=library_modalities) + if segment_symmetric: + if not loaded_f: + selected_library_f=preselect(nl_sample, + selected_library_f, + method=library_preselect_method, + number=library_preselect, + use_nl=library_nl_samples_avail, + flip=True, + step=library_preselect_step, + lib_add_n=library_modalities) + else: + if not loaded: + selected_library=preselect(bbox_sample, + selected_library, + method=library_preselect_method, + number=library_preselect, + use_nl=False, + step=library_preselect_step, + lib_add_n=library_modalities) + if segment_symmetric: + if not loaded_f: + selected_library_f=preselect(bbox_sample, + selected_library_f, + method=library_preselect_method, + number=library_preselect, + use_nl=False,flip=True, + step=library_preselect_step, + lib_add_n=library_modalities) + + if not loaded: + with open(work_lib_dir+os.sep+'sel_library.json','w') as f: + json.dump(selected_library,f) + + if not loaded_f: + if segment_symmetric: + with open(work_lib_dir_f+os.sep+'sel_library.json','w') as f: + json.dump(selected_library_f,f) + + output_info['selected_library']=selected_library + if segment_symmetric: + output_info['selected_library_f']=selected_library_f + + selected_library_scan=[] + selected_library_xfm=[] + selected_library_warped2=[] + selected_library_xfm2=[] + + selected_library_scan_f=[] + selected_library_xfm_f=[] + selected_library_warped_f=[] + selected_library_warped2_f=[] + selected_library_xfm2_f=[] + + for (i,j) in enumerate(selected_library): + d=MriDataset(scan=j[0],seg=j[1], add=j[2:2+library_modalities] ) + + selected_library_scan.append(d) + + selected_library_warped2.append( MriDataset(name=d.name, prefix=work_lib_dir, add_n=sample_modalities )) + selected_library_xfm2.append( MriTransform(name=d.name,prefix=work_lib_dir )) + + if library_nl_samples_avail: + selected_library_xfm.append( MriTransform(xfm=j[2+library_modalities], xfm_inv=j[3+library_modalities] ) ) + + output_info['selected_library_warped2']=selected_library_warped2 + output_info['selected_library_xfm2']=selected_library_xfm2 + if library_nl_samples_avail: + output_info['selected_library_xfm']=selected_library_xfm + + if segment_symmetric: + for (i,j) in enumerate(selected_library_f): + d=MriDataset(scan=j[0],seg=j[1], add=j[2:2+library_modalities] ) + selected_library_scan_f.append(d) + selected_library_warped2_f.append(MriDataset(name=d.name, prefix=work_lib_dir_f, add_n=sample_modalities )) + selected_library_xfm2_f.append(MriTransform( name=d.name, prefix=work_lib_dir_f )) + + if library_nl_samples_avail: + selected_library_xfm_f.append( MriTransform(xfm=j[2+library_modalities], xfm_inv=j[3+library_modalities] )) + + output_info['selected_library_warped2_f']=selected_library_warped2_f + output_info['selected_library_xfm2_f']=selected_library_xfm2_f + if library_nl_samples_avail: + output_info['selected_library_xfm_f']=selected_library_xfm_f + + # nonlinear registration to template or individual + + if do_pairwise: # Right now ignore precomputed transformations + results=[] + if debug: + print("Performing pairwise registration") + + for (i,j) in enumerate(selected_library): + # TODO: make clever usage of precomputed transform if available + if pairwise_register_type=='elx' or pairwise_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + nl=True, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + elif pairwise_register_type=='ants' or do_pairwise_ants: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=True, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + else: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan[i], + selected_library_xfm2[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=False, + output_inv_target=selected_library_warped2[i], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + + + if segment_symmetric: + for (i,j) in enumerate(selected_library_f): + # TODO: make clever usage of precomputed transform if available + + if pairwise_register_type=='elx' or pairwise_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + nl=True, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + elif pairwise_register_type=='ants' or do_pairwise_ants: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=True, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + else: + results.append( futures.submit( + non_linear_registration, + bbox_sample, + selected_library_scan_f[i], + selected_library_xfm2_f[i], + level=pairwise_level, + start_level=pairwise_start, + parameters=pairwise_options, + ants=False, + output_inv_target=selected_library_warped2_f[i], + warp_seg=True, + flip=True, + resample_order=resample_order, + resample_baa=resample_baa + ) ) + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + else: + + results=[] + + for (i, j) in enumerate(selected_library): + + lib_xfm=None + if library_nl_samples_avail: + lib_xfm=selected_library_xfm[i] + + results.append( futures.submit( + concat_resample, + selected_library_scan[i], + lib_xfm , + nonlinear_xfm, + selected_library_warped2[i], + resample_order=resample_order, + resample_baa=resample_baa + ) ) + + if segment_symmetric: + for (i, j) in enumerate(selected_library_f): + lib_xfm=None + if library_nl_samples_avail: + lib_xfm=selected_library_xfm_f[i] + + results.append( futures.submit( + concat_resample, + selected_library_scan_f[i], + lib_xfm, + nonlinear_xfm, + selected_library_warped2_f[i], + resample_order=resample_order, + resample_baa=resample_baa, + flip=True + ) ) + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + else: # no library generated + selected_library=[] + selected_library_f=[] + selected_library_warped2=[] + selected_library_warped2_f=[] + + results=[] + + sample_seg=MriDataset(name='bbox_seg_' + sample.name+out_variant, prefix=work_dir ) + sample_seg.mask=None + sample_seg.mask_f=None + + results.append( futures.submit( + fuse_segmentations, + bbox_sample, + sample_seg, + selected_library_warped2, + flip=False, + classes_number=classes_number, + fuse_options=fuse_options, + gco_energy=gco_energy, + ec_options=ec_options, + model=local_model, + debug=debug, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant + )) + + if segment_symmetric: + results.append( futures.submit( + fuse_segmentations, + bbox_sample, + sample_seg, + selected_library_warped2_f, + flip=True, + classes_number=classes_number, + fuse_options=fuse_options, + gco_energy=gco_energy, + ec_options=ec_options, + model=local_model, + debug=debug, + ec_variant=ec_variant, + fuse_variant=fuse_variant, + regularize_variant=regularize_variant + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + output_info['fuse']=results[0].result() + if segment_symmetric: + output_info['fuse_f']=results[1].result() + + if qc_options: + # generate QC images + output_info['qc'] = generate_qc_image(sample_seg, + bbox_sample, + sample_qc, + options=qc_options, + model=local_model, + symmetric=segment_symmetric, + labels=library_description['classes_number']) + # cleanup if need + if cleanup: + shutil.rmtree(work_lib_dir) + shutil.rmtree(work_lib_dir_f) + if os.path.exists(flipdir): + shutil.rmtree(flipdir) + + if nl_sample is not None: + nl_sample.cleanup() + + if pre_filters is not None: + sample_filtered.cleanup() + + if cleanup_xfm: + # TODO: remove more xfms(?) + if nonlinear_xfm is not None: + nonlinear_xfm.cleanup() + + if not run_in_bbox: + # TODO: apply error correction here + # rename labels to final results + sample_seg_native=MriDataset(name='seg_' + sample.name+out_variant, prefix=work_dir ) + + warp_rename_seg(sample_seg, input_sample, sample_seg_native, + transform=bbox_linear_xfm, invert_transform=True, + lut=library_description['map'] , + symmetric=segment_symmetric, + symmetric_flip=segment_symmetric, + use_flipped=segment_symmetric, # needed to flip .seg_f back to right orientation + flip_lut=library_description['flip_map'], + resample_baa=resample_baa, + resample_order=resample_order, + datatype=seg_datatype ) + + output_info['sample_seg_native']=sample_seg_native + output_info['used_labels']=make_segmented_label_list(library_description,symmetric=segment_symmetric) + + if segment_symmetric: + join_left_right(sample_seg_native, output_segment+'_seg.mnc', datatype=seg_datatype) + else: + shutil.copyfile(sample_seg_native.seg, output_segment+'_seg.mnc') + + output_info['output_segment']=output_segment+'_seg.mnc' + + output_info['output_volumes']=seg_to_volumes(output_segment+'_seg.mnc', + output_segment+'_vol.json', + label_map=library_description.get('label_map',None)) + + output_info['output_volumes_json']=output_segment+'_vol.json' + + # TODO: cleanup more here (?) + + return (output_segment+'_seg.mnc',output_info) + else: # special case, needed to train error correction + return (sample_seg.seg,output_info) + + except mincError as e: + print("Exception in fusion_segment:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in fusion_segment:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/fuse_segmentations.py b/ipl/segment/fuse_segmentations.py new file mode 100644 index 0000000..af8299c --- /dev/null +++ b/ipl/segment/fuse_segmentations.py @@ -0,0 +1,390 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.minc_hl as hl + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .error_correction import * +from .preselect import * +from .qc import * + +import traceback + +def fuse_segmentations( sample, output, library, + fuse_options={}, + flip=False, + classes_number=2, + gco_energy=None, + ec_options=None, + model=None, + debug=False, + ec_variant='', + fuse_variant='', + regularize_variant='', + work_dir=None ): + try: + final_out_seg=output.seg + scan=sample.scan + add_scan=sample.add + output_info={} + preseg=sample.seg + + if flip: + scan=sample.scan_f + add_scan=sample.add_f + final_out_seg=output.seg_f + preseg=sample.seg_f + + if not os.path.exists( final_out_seg ): + with mincTools( verbose=2 ) as m: + if work_dir is None: + work_dir=os.path.dirname(output.seg) + + dataset_name=sample.name + + if flip: + dataset_name+='_f' + + out_seg_fuse = work_dir+os.sep+dataset_name+'_'+fuse_variant+'.mnc' + out_prob_base = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_prob' + out_dist = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_dist.mnc' + out_seg_reg = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_'+regularize_variant+'.mnc' + + out_seg_ec = final_out_seg + + output_info['work_dir']=work_dir + output_info['dataset_name']=work_dir + + if ec_options is None: # skip error-correction part + out_seg_reg=out_seg_ec + print("ec_options={}".format(repr(ec_options))) + + output_info['out_seg_reg']=out_seg_reg + output_info['out_seg_fuse']=out_seg_fuse + output_info['out_dist']=out_dist + + probs=[ '{}_{:02d}.mnc'.format(out_prob_base, i) for i in range(classes_number) ] + + output_info['probs']=probs + + + if preseg is None: + patch=0 + search=0 + threshold=0 + iterations=0 + gco_optimize=False + nnls=False + gco_diagonal=False + label_norm=None + ext_tool=None + + if fuse_options is not None: + # get parameters + patch= fuse_options.get('patch', 0) + search= fuse_options.get('search', 0) + threshold= fuse_options.get('threshold', 0.0) + iterations= fuse_options.get('iter', 3) + weights= fuse_options.get('weights', None) + nnls = fuse_options.get('nnls', False) + label_norm = fuse_options.get('label_norm', None) + beta = fuse_options.get('beta', None) + new_prog = fuse_options.get('new', True) + ext_tool = fuse_options.get('ext', None) + + # graph-cut based segmentation + gco_optimize = fuse_options.get('gco', False) + gco_diagonal = fuse_options.get('gco_diagonal', False) + gco_wlabel= fuse_options.get('gco_wlabel', 1.0) + gco_wdata = fuse_options.get('gco_wdata', 1.0) + gco_wintensity=fuse_options.get('gco_wintensity', 0.0) + gco_epsilon =fuse_options.get('gco_epsilon', 1e-4) + + + if label_norm is not None: + print("Using label_norm:{}".format(repr(label_norm))) + # need to create rough labeling and average + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in library ]) + segs.extend(['--majority', m.tmp('maj_seg.mnc'), '--bg'] ) + m.execute(segs) + + scans=[ i.scan for i in library ] + m.median(scans,m.tmp('median.mnc')) + + norm_order=label_norm.get('order',3) + norm_median=label_norm.get('median',True) + + n_scan=work_dir+os.sep+dataset_name+'_'+fuse_variant+'_norm.mnc' + + if flip: + n_scan=work_dir+os.sep+dataset_name+'_'+fuse_variant+'_f_norm.mnc' + + hl.label_normalize(scan,m.tmp('maj_seg.mnc'),m.tmp('median.mnc'),m.tmp('maj_seg.mnc'),out=n_scan,order=norm_order,median=norm_median) + scan=n_scan + if ext_tool is not None: # will run external segmentation tool! + # ext_tool is expected to be a string with format language specs + segs=ext_tool.format(sample=sample.scan, + mask=sample.mask, + output=out_seg_fuse, + prob_base=out_prob_base, + model_mas=model.mask, + model_atlas=model.seg) + outputs=[out_seg_fuse] + m.command(segs, inputs=[sample.scan], outputs=outputs) + + pass #TODO: finish this + elif patch==0 and search==0: # perform simple majority voting + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in library ]) + segs.extend(['--majority', out_seg_fuse, '--bg'] ) + m.execute(segs) + + #TODO:Output fake probs ? + + if gco_energy is not None and gco_optimize: + # todo place this into parameters + split_labels( out_seg_fuse, + classes_number, + out_prob_base, + antialias=True, + blur=1.0, + expit=1.0, + normalize=True ) + else: # run patc-based label fusion + # create text file for the training library + train_lib=os.path.dirname(library[0].seg)+os.sep+sample.name+'.lst' + + if flip: + train_lib=os.path.dirname(library[0].seg)+os.sep+sample.name+'_f.lst' + + output_info['train_lib']=train_lib + + with open(train_lib,'w') as f: + for i in library: + ss=[ os.path.basename(i.scan) ] + ss.extend([os.path.basename(j) for j in i.add]) + ss.append(os.path.basename(i.seg)) + f.write(",".join(ss)) + f.write("\n") + + outputs=[] + + if len(add_scan)>0: + + segs=['itk_patch_morphology_mc', + scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--adist', out_dist, + '--prob', out_prob_base ] + + if weights is not None: + segs.extend(['--weights',weights]) + + segs.extend(add_scan) + segs.extend(['--output', out_seg_fuse]) + else: + if nnls: + segs=['itk_patch_segmentation', scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--iter', str(iterations), + '--prob', out_prob_base, + '--adist', out_dist, + '--nnls', + '--threshold', str(threshold) ] + else: + if new_prog: + segs=['itk_patch_segmentation','--exp'] + else: + segs=['itk_patch_morphology'] + + segs.extend([scan, + '--train', train_lib, + '--search', str(search), + '--patch', str(patch), + '--discrete', str(classes_number), + '--iter', str(iterations), + '--prob', out_prob_base, + '--adist', out_dist, + '--threshold', str(threshold) ]) + if beta is not None: + segs.extend(['--beta',str(beta)]) + + segs.append(out_seg_fuse) + # plug in additional modalities + + outputs=[ out_seg_fuse ] + outputs.extend(probs) + + if sample.mask is not None: + segs.extend(['--mask', sample.mask]) + + m.command(segs, inputs=[sample.scan], outputs=outputs) + print(' '.join(segs)) + + if gco_energy is not None and gco_optimize: + gco= [ 'gco_classify', '--cooc', gco_energy ] + + gco.extend( probs ) + gco.extend([out_seg_reg, + '--iter', '1000', + '--wlabel', str(gco_wlabel), + '--wdata', str(gco_wdata), + '--epsilon', str(gco_epsilon)]) + + if gco_diagonal: + gco.append('--diagonal') + + if gco_wintensity > 0.0: + gco.extend( ['--intensity',scan, + '--wintensity',str(gco_wintensit)] ) + + if sample.mask is not None: + gco.extend(['--mask', sample.mask]) + + m.command(gco, inputs=probs, outputs=[ out_seg_reg ] ) + else: + shutil.copyfile(out_seg_fuse, out_seg_reg) + else: + #shutil.copyfile(preseg, out_seg_reg) + + + if ec_options is None: + shutil.copyfile(preseg,final_out_seg) + out_seg_reg=final_out_seg + else: + out_seg_reg=preseg + + output_info['out_seg_reg']=out_seg_reg + output_info['out_seg_fuse']=out_seg_reg + output_info['out_dist']=None + output_info['prob']=None + #out_seg_reg = preseg + + if ec_options is not None: + # create ec mask + ec_border_mask = ec_options.get( 'border_mask' , True ) + ec_border_mask_width = ec_options.get( 'border_mask_width' , 3 ) + + ec_antialias_labels = ec_options.get( 'antialias_labels' , True ) + ec_blur_labels = ec_options.get( 'blur_labels', 1.0 ) + ec_expit_labels = ec_options.get( 'expit_labels', 1.0 ) + ec_normalize_labels = ec_options.get( 'normalize_labels', True ) + ec_use_raw = ec_options.get( 'use_raw', False ) + ec_split = ec_options.get( 'split', None ) + + train_mask = model.mask + ec_input_prefix = out_seg_reg.rsplit('.mnc',1)[0]+'_'+ec_variant + + if ec_border_mask : + train_mask = ec_input_prefix + '_train_mask.mnc' + make_border_mask( out_seg_reg, train_mask, + width=ec_border_mask_width, labels=classes_number ) + + ec_input=[ scan ] + ec_input.extend(sample.add) + + if classes_number>2 and (not ec_use_raw ): + split_labels( out_seg_reg, classes_number, ec_input_prefix, + antialias=ec_antialias_labels, + blur=ec_blur_labels, + expit=ec_expit_labels, + normalize=ec_normalize_labels ) + + ec_input.extend([ '{}_{:02d}.mnc'.format(ec_input_prefix,i) for i in range(classes_number) ]) # skip background feature ? + else: + ec_input.append( out_seg_reg )# the auto segmentation is + + output_info['out_seg_ec']=out_seg_ec + + if ec_split is None: + if ec_variant is not None: + out_seg_ec_errors1 = work_dir + os.sep + dataset_name + '_' + fuse_variant+'_'+regularize_variant+'_'+ec_variant+'_error1.mnc' + out_seg_ec_errors2 = work_dir + os.sep + dataset_name + '_' + fuse_variant+'_'+regularize_variant+'_'+ec_variant+'_error2.mnc' + + output_info['out_seg_ec_errors1']=out_seg_ec_errors1 + output_info['out_seg_ec_errors2']=out_seg_ec_errors2 + + errorCorrectionApply(ec_input, + out_seg_ec, + input_mask=train_mask, + parameters=ec_options, + input_auto=out_seg_reg, + debug=debug, + multilabel=classes_number, + debug_files=[out_seg_ec_errors1, out_seg_ec_errors2 ] ) + else: + results=[] + parts=[] + + for s in range(ec_split): + out='{}_part_{:d}.mnc'.format(ec_input_prefix,s) + train_part=ec_options['training'].rsplit('.pickle',1)[0] + '_' + str(s) + '.pickle' + ec_options_part=copy.deepcopy(ec_options) + ec_options_part['training']=train_part + + if ec_variant is not None: + out_seg_ec_errors1 = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_'+regularize_variant+'_'+ec_variant+'_error1_'+str(s)+'.mnc' + out_seg_ec_errors2 = work_dir+os.sep+dataset_name+'_'+fuse_variant+'_'+regularize_variant+'_'+ec_variant+'_error2_'+str(s)+'.mnc' + + parts.append(out) + results.append( futures.submit( + errorCorrectionApply, + ec_input, out, + input_mask=train_mask, + parameters=ec_options_part, + input_auto=out_seg_reg, + debug=debug, + partition=ec_split, + part=s, + multilabel=classes_number, + debug_files=[out_seg_ec_errors1,out_seg_ec_errors2] )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + merge_segmentations(parts, out_seg_ec, ec_split, ec_options) + + return output_info + + except mincError as e: + print("Exception in fuse_segmentations:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in fuse_segmentations:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +def join_left_right(sample,output,datatype=None): + with mincTools() as m: + cmd=['itk_merge_discrete_labels',sample.seg,sample.seg_f,output] + if datatype is not None: + cmd.append('--'+datatype) + m.command(cmd,inputs=[sample.seg,sample.seg_f],outputs=[output]) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/labels.py b/ipl/segment/labels.py new file mode 100644 index 0000000..c6ed099 --- /dev/null +++ b/ipl/segment/labels.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +def split_labels_seg(sample): + ''' split-up one multi-label segmentation into a set of files''' + try: + with mincTools() as m: + if sample.seg is not None: + base=sample.seg.rsplit('.mnc',1)[0]+'_%03d.mnc' + sample.seg_split=m.split_labels(sample.seg,base) + if sample.seg_f is not None: + base=sample.seg_f.rsplit('.mnc',1)[0]+'_%03d.mnc' + sample.seg_f_split=m.split_labels(sample.seg,base) + except mincError as e: + print("Exception in split_labels_seg:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in split_labels_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def merge_labels_seg(sample): + ''' merge multiple segmentation into a single files''' + try: + with mincTools() as m: + if any(sample.seg_split): + if sample.seg is None: + sample.seg=sample.seg_split[0].rsplit('_000.mnc',1)[0]+'.mnc' + m.merge_labels(sample.seg_split,sample.seg) + if any(sample.seg_f_split): + if sample.seg_f is None: + sample.seg_f=sample.seg_f_split[0].rsplit('_000.mnc',1)[0]+'.mnc' + m.merge_labels(sample.seg_f_split,sample.seg_f) + except mincError as e: + print("Exception in merge_labels_seg:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in merge_labels_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/library.py b/ipl/segment/library.py new file mode 100644 index 0000000..4409790 --- /dev/null +++ b/ipl/segment/library.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import copy +import json +import os +import sys +import traceback + +def save_library_info(library_description, output,name='library.json'): + """Save library information into directory, using predfined file structure + Arguments: + library_description -- dictionary with library description + output -- output directory + + Keyword arguments: + name -- optional name of .json file, relative to the output directory, default 'library.json' + """ + try: + tmp_library_description=copy.deepcopy(library_description) + tmp_library_description.pop('prefix',None) + + for i in ['local_model','local_model_mask', 'local_model_flip', + 'local_model_mask_flip', + 'local_model_seg','local_model_sd','local_model_avg','local_model_ovl', + 'gco_energy']: + if tmp_library_description[i] is not None: + tmp_library_description[i]=os.path.relpath(tmp_library_description[i],output) + + for (j, i) in enumerate(tmp_library_description['local_model_add']): + tmp_library_description['local_model_add'][j]=os.path.relpath(i, output) + + for (j, i) in enumerate(tmp_library_description['local_model_add_flip']): + tmp_library_description['local_model_add_flip'][j]=os.path.relpath(i, output) + + for i in ['model','model_mask']: + # if it starts with the same prefix, remove it + if os.path.dirname(tmp_library_description[i])==output \ + or tmp_library_description[i][0]!=os.sep: + tmp_library_description[i]=os.path.relpath(tmp_library_description[i],output) + + for (j, i) in enumerate(tmp_library_description['model_add']): + if os.path.dirname(i)==output: + tmp_library_description['model_add'][j]=os.path.relpath(i, output) + + for (j, i) in enumerate(tmp_library_description['library']): + for (k,t) in enumerate(i): + tmp_library_description['library'][j][k]=os.path.relpath(t, output) + + with open(output+os.sep+name,'w') as f: + json.dump(tmp_library_description,f,indent=1) + except : + print "Error saving library information into:{} {}".format(output,sys.exc_info()[0]) + traceback.print_exc(file=sys.stderr) + raise + +def load_library_info(prefix, name='library.json'): + """Load library information from directory, using predfined file structure + Arguments: + prefix -- directory path + + Keyword arguments: + name -- optional name of .json file, relative to the input directory, default 'library.json' + """ + try: + library_description={} + with open(prefix+os.sep+name,'r') as f: + library_description=json.load(f) + + library_description['prefix']=prefix + + for i in ['local_model','local_model_mask', 'local_model_flip', + 'local_model_mask_flip','local_model_seg','gco_energy']: + if library_description[i] is not None: library_description[i]=prefix+os.sep+library_description[i] + + try: + for (j, i) in enumerate(library_description['local_model_add']): + library_description['local_model_add'][j]=prefix+os.sep+i + + for (j, i) in enumerate(library_description['local_model_add_flip']): + library_description['local_model_add_flip'][j]=prefix+os.sep+i + except KeyError: + pass + + for (j, i) in enumerate(library_description['library']): + for (k,t) in enumerate(i): + library_description['library'][j][k]=prefix+os.sep+t + + for i in ['model','model_mask']: + # if it starts with '/' assume it's absolute path + if library_description[i] is not None and library_description[i][0]!=os.sep: + library_description[i]=prefix+os.sep+library_description[i] + try: + for (j, i) in enumerate(library_description['model_add']): + if library_description['model_add'][j][0]!='/': + library_description['model_add'][j]=prefix+os.sep+i + except KeyError: + pass + + return library_description + except : + print "Error loading library information from:{} {}".format(prefix,sys.exc_info()[0]) + traceback.print_exc(file=sys.stderr) + raise + +def make_segmented_label_list(library_description,symmetric=False): + """ Make a list of labels that are included in the segmentation library + taking into account flipped labels too if needed + """ + used_labels=set() + + if isinstance(library_description['map'], dict): + for i in library_description['map'].iteritems(): + used_labels.add(int(i[1])) + if symmetric: + for i in library_description['flip_map'].iteritems(): + used_labels.add(int(i[1])) + else: + for i in library_description['map']: + used_labels.add(int(i[1])) + if symmetric: + for i in library_description['flip_map']: + used_labels.add(int(i[1])) + return list(used_labels) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/model.py b/ipl/segment/model.py new file mode 100644 index 0000000..096f0d7 --- /dev/null +++ b/ipl/segment/model.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + + +def create_local_model(tmp_lin_samples, model, local_model, + extend_boundary=4, + op=None, + symmetric=False ): + '''create an average segmentation and use it to create local model''' + try: + with mincTools() as m: + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in tmp_lin_samples ]) + + if symmetric: segs.extend([ i.seg_f for i in tmp_lin_samples ]) + + segs.extend(['--majority', m.tmp('majority.mnc')] ) + m.execute(segs) + maj=m.tmp('majority.mnc') + + if op is not None: + m.binary_morphology(maj, op, m.tmp('majority_op.mnc'),binarize_threshold=0.5) + maj=m.tmp('majority_op.mnc') + + # TODO: replace mincreshape/mincbbox with something more sensible + out=m.execute_w_output(['mincbbox', '-threshold', '0.5', '-mincreshape', maj ]).rstrip("\n").split(' ') + + s=[ int(i) for i in out[1].split(',') ] + c=[ int(i) for i in out[3].split(',') ] + + start=[s[0]-extend_boundary, s[1]-extend_boundary ,s[2]-extend_boundary ] + ext= [c[0]+extend_boundary*2, c[1]+extend_boundary*2 ,c[2]+extend_boundary*2] + + # reshape the mask + m.execute(['mincreshape', + '-start','{},{},{}'.format(start[0], start[1], start[2]), + '-count','{},{},{}'.format(ext[0], ext[1], ext[2] ), + maj , local_model.mask , '-byte' ] ) + + m.resample_smooth(model.scan, local_model.scan, like=local_model.mask, order=0) + m.resample_labels(m.tmp('majority.mnc'),local_model.seg, like=local_model.mask, order=0) + + for (i,j) in enumerate(model.add): + m.resample_smooth(model.add[i], local_model.add[i], like=local_model.mask, order=0) + + except mincError as e: + print("Exception in create_local_model:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_local_model:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + +def create_local_model_flip(local_model, model, remap={}, + extend_boundary=4, op=None ): + try: + with mincTools() as m: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + m.resample_labels(local_model.seg, m.tmp('flip_seg.mnc'), + transform=m.tmp('flip_x.xfm'), + order=0, remap=remap, like=model.scan) + + seg=m.tmp('flip_seg.mnc') + + if op is not None: + m.binary_morphology(seg, op, m.tmp('flip_seg_op.mnc'),binarize_threshold=0.5) + seg=m.tmp('flip_seg_op.mnc') + + # TODO: replace mincreshape/mincbbox with something more sensible + out=m.execute_w_output(['mincbbox', '-threshold', '0.5', '-mincreshape', seg ]).rstrip("\n").split(' ') + + s=[ int(i) for i in out[1].split(',') ] + c=[ int(i) for i in out[3].split(',') ] + + start=[s[0]-extend_boundary, s[1]-extend_boundary ,s[2]-extend_boundary ] + ext= [c[0]+extend_boundary*2, c[1]+extend_boundary*2 ,c[2]+extend_boundary*2] + # reshape the mask + m.execute(['mincreshape', + '-start','{},{},{}'.format(start[0], start[1], start[2]), + '-count','{},{},{}'.format(ext[0], ext[1], ext[2] ), + seg, + local_model.mask_f, + '-byte' ] ) + + m.resample_smooth(local_model.scan, local_model.scan_f, + like=local_model.mask_f, order=0, transform=m.tmp('flip_x.xfm')) + + for (i,j) in enumerate(model.add_f): + m.resample_smooth(model.add[i], local_model.add_f[i], + like=local_model.mask_f, order=0, transform=m.tmp('flip_x.xfm')) + + except mincError as e: + print("Exception in create_local_model_flip:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_local_model_flip:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/preselect.py b/ipl/segment/preselect.py new file mode 100644 index 0000000..f850e7d --- /dev/null +++ b/ipl/segment/preselect.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * + +import traceback + + +def preselect(sample, + library, + method='MI', + number=10, + mask=None, + use_nl=False, + flip=False, + step=None, + lib_add_n=0): + '''calculate requested similarity function and return top number of elements from the library''' + results=[] + column=0 + + # TODO: use multiple modalities for preselection? + if use_nl: + column=4+lib_add_n + + for (i,j) in enumerate(library): + results.append( futures.submit( + calculate_similarity, sample, MriDataset(scan=j[column]), method=method, mask=mask, flip=flip, step=step + ) ) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + val=[ (j.result(), library[i] ) for (i,j) in enumerate(results)] + + val_sorted=sorted(val, key=lambda s: s[0] ) + + return [i[1] for i in val_sorted[ 0:number] ] + + +def calculate_similarity(sample1, sample2, + mask=None, method='MI', + flip=False, step=None): + try: + with mincTools() as m: + scan=sample1.scan + + if flip: + scan=sample1.scan_f + + # figure out step size, minctracc works extremely slow when step size is smaller then file step size + info_sample1=m.mincinfo( sample1.scan ) + + cmds=[ 'minctracc', scan, sample2.scan, '-identity' ] + + if method=='MI': + cmds.extend( ['-nmi', '-blur_pdf', '9'] ) + else: + cmds.append( '-xcorr' ) + + if step is None: + step= max( abs( info_sample1['xspace'].step ) , + abs( info_sample1['yspace'].step ) , + abs( info_sample1['zspace'].step ) ) + + cmds.extend([ + '-step', str(step), str(step), str(step), + '-simplex', '1', + '-tol', '0.01', + '-lsq6', + '-est_center', + '-clob', + m.tmp('similarity.xfm') + ]) + + if mask is not None: + cmds.extend( ['-source_mask', mask]) + + output=re.search( '^Final objective function value = (\S+)' , m.execute_w_output(cmds, verbose=0), flags=re.MULTILINE).group(1) + + return float(output) + + except mincError as e: + print("Exception in calculate_similarity:{}".format( str(e)) ) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in calculate_similarity:{}".format( sys.exc_info()[0]) ) + traceback.print_exc( file=sys.stdout ) + raise diff --git a/ipl/segment/qc.py b/ipl/segment/qc.py new file mode 100644 index 0000000..54428c8 --- /dev/null +++ b/ipl/segment/qc.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import re +import json + + +import argparse + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import traceback + +# local things +from ipl.segment.structures import * + + +def make_contours(input, output, width=1): + """Convert multi-label image into another multilabel images with borders only + Arguments: + input -- input minc file + output -- output file + + Keyword arguments: + width -- width of the border to leave behind, default 1 (voxels) + """ + with mincTools() as m: + m.command(['c3d', input,'-split', + '-foreach', + '-dup', '-erode', '1' ,'{}x{}x{}'.format(width,width,width), '-scale', '-1', + '-add', + '-endfor', + '-merge', + '-type', 'short','-o',output], + inputs=[input],outputs=[output], + verbose=True) + +def generate_qc_image(sample_seg, + sample, + sample_qc, + options={}, + model=None, + symmetric=False, + labels=2, + title=None): + """Gnerate QC image for multilabel segmentation + Arguments: + sample_seg -- input segmentation + sample -- input file + sample_qc -- output QC file + + Keyword arguments: + options -- options as dictionary with following keys: + lut_file -- LUT file for minclookup, default None + spectral_mask -- boolean , if spectral mask should be used, default False + dicrete_mask -- boolean , if discrete mask should be used, default False + image_range -- list of two real values + clamp -- boolean, if range clamp should be used + big + contours + contour_width + crop + model -- reference model, default None + symmetric -- boolean, if symmetric QC is needed + width -- width of the border to leave behind, default 1 (voxels) + labels -- integer, number of labels present, default 2 + title -- QC image title + """ + try: + + #TODO: implement advanced features + qc_lut=options.get('lut_file',None) + spectral_mask=options.get('spectral_mask',False) + dicrete_mask=options.get('dicrete_mask',False) + image_range=options.get('image_range',None) + clamp=options.get('clamp',False) + big=options.get('big',False) + contours=options.get('contours',False) + contour_width=options.get('contour_width',1) + crop=options.get('crop',None) + + if qc_lut is not None: + spectral_mask=False + dicrete_mask=True + + with mincTools() as m: + seg=sample_seg.seg + seg_f=sample_seg.seg_f + scan=sample.scan + scan_f=sample.scan_f + + if crop is not None: + # remove voxels from the edge + m.autocrop(scan,m.tmp('scan.mnc'),isoexpand=-crop) + scan=m.tmp('scan.mnc') + m.resample_labels(seg,m.tmp('seg.mnc'),like=scan) + seg=m.tmp('seg.mnc') + + if symmetric: + m.autocrop(scan_f,m.tmp('scan_f.mnc'),isoexpand=-crop) + scan_f=m.tmp('scan_f.mnc') + m.resample_labels(seg_f,m.tmp('seg_f.mnc'),like=scan) + seg_f=m.tmp('seg_f.mnc') + + if contours: + make_contours(seg,m.tmp('seg_contours.mnc'),width=contour_width) + seg=m.tmp('seg_contours.mnc') + if symmetric: + make_contours(seg_f,m.tmp('seg_f_contours.mnc'),width=contour_width) + seg_f=m.tmp('seg_f_contours.mnc') + + if symmetric: + + m.qc( scan, + m.tmp('qc.png'), + mask=seg, + mask_range=[0,labels-1], + big=False, + clamp=clamp, + image_range=image_range, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + mask_lut=qc_lut) + + m.qc( scan_f, + m.tmp('qc_f.png'), + mask=seg_f, + mask_range=[0,labels-1], + image_range=image_range, + big=False, + clamp=clamp, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + title=title, + mask_lut=qc_lut) + + m.command(['montage','-tile','2x1','-geometry','+1+1', + m.tmp('qc.png'),m.tmp('qc_f.png'),sample_qc], + inputs=[m.tmp('qc.png'),m.tmp('qc_f.png')], + outputs=[sample_qc]) + else: + m.qc( scan, + sample_qc, + mask=seg, + mask_range=[0,labels-1], + image_range=image_range, + big=True, + mask_lut=qc_lut, + spectral_mask=spectral_mask, + dicrete_mask=dicrete_mask, + clamp=clamp, + title=title) + + return [sample_qc] + except mincError as e: + print("Exception in generate_qc_image:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in generate_qc_image:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + + + +def parse_options(): + + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description='Run QC step manually') + + parser.add_argument('--scan', + help="Underlying scan") + + parser.add_argument('--scan_f', + help="flipped scan") + + parser.add_argument('--seg', + help="Segmentation") + + parser.add_argument('--seg_f', + help="flipped segmentation") + + parser.add_argument('--spectral_mask', + action="store_true", + default=False ) + + parser.add_argument('--discrete_mask', + action="store_true", + default=False ) + + parser.add_argument('--clamp', + action="store_true", + default=False ) + + parser.add_argument('--big', + action="store_true", + default=False ) + + parser.add_argument('--contours', + action="store_true", + default=False ) + + parser.add_argument('--contour_width', + default=1, + type=int, + help="contour_width") + + parser.add_argument('--image_range', + nargs=2, + help="Range") + + parser.add_argument('--lut_file', + help="LUT") + + parser.add_argument('--crop', + type=int, + default=None, + help="Crop files") + + parser.add_argument('--labels', + type=int, + default=2, + help="Number of labels") + + parser.add_argument('output') + + return parser.parse_args() + + +#crop=options.get('crop',None) + +if __name__ == '__main__': + options = parse_options() + + if options.output is None or options.scan is None: + print("Provide some input") + exit(1) + + segment_symmetric=False + if options.scan_f is not None: + segment_symmetric=True + + sample_scan=MriDataset(name='scan', scan=options.scan,scan_f=options.scan_f ) + sample_seg=MriDataset(name='seg', seg=options.seg,seg_f=options.seg_f ) + class_number=1 + + qc_options={ + 'lut_file':options.lut_file, + 'spectral_mask':options.spectral_mask, + 'dicrete_mask':options.discrete_mask, + 'image_range':options.image_range, + 'clamp':options.clamp, + 'big':options.big, + 'contours':options.contours, + 'contour_width':options.contour_width, + 'crop':options.crop + } + + generate_qc_image(sample_seg, + sample_scan, + options.output, + options=qc_options, + symmetric=segment_symmetric, + labels=options.labels) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/registration.py b/ipl/segment/registration.py new file mode 100644 index 0000000..840658b --- /dev/null +++ b/ipl/segment/registration.py @@ -0,0 +1,673 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError +import ipl.registration +import ipl.ants_registration +import ipl.elastix_registration + +def linear_registration( + sample, + model, + output_xfm, + output_sample=None, + output_invert_xfm=None, + init_xfm=None, + symmetric=False, + ants=False, + reg_type ='-lsq12', + objective='-xcorr', + linreg=None, + work_dir=None, + close=False, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + downsample=None, + bbox=False, + use_mask=True + ): + """perform linear registration to the model, and calculate inverse""" + try: + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + print("Registering options: {}".format(repr(linreg))) + print("Registering sample :{}".format(repr(sample))) + print("Registering model :{}".format(repr(model))) + + with mincTools() as m: + + #TODO: check more files? + if not m.checkfiles(inputs=[sample.scan], outputs=[output_xfm.xfm]): return + + #if _init_xfm is None: + # _init_xfm=_init_xfm_f=m.tmp('identity.xfm') + # m.param2xfm(m.tmp('identity.xfm')) + + scan=sample.scan + scan_f=sample.scan_f + + mask=sample.mask + mask_f=sample.mask_f + + model_mask=model.mask + model_mask_f=model.mask + + if mask is None: model_mask=None + if mask_f is None: model_mask_f=None + + if not use_mask: + mask=None + model_mask=None + mask_f=None + model_mask_f=None + + _output_xfm =output_xfm.xfm + _output_xfm_f=output_xfm.xfm_f + + if bbox: + print("Running in bbox!\n\n\n") + scan=m.tmp('scan.mnc') + m.resample_smooth(sample.scan, scan, like=model.scan, transform=_init_xfm) + if sample.mask is not None and (not use_mask): + mask=m.tmp('mask.mnc') + m.resample_labels(sample.mask, mask, like=model.scan, transform=_init_xfm) + _init_xfm=None + close=True + _output_xfm=m.tmp('output.xfm') + + if symmetric: + scan_f=m.tmp('scan_f.mnc') + m.resample_smooth(sample.scan_f, scan_f, like=model.scan, transform=_init_xfm_f) + if sample.mask_f is not None and (not use_mask): + mask_f=m.tmp('mask_f.mnc') + m.resample_labels(sample.mask_f, mask_f, like=model.scan, transform=_init_xfm_f) + _init_xfm_f=None + _output_xfm_f=m.tmp('output_f.xfm') + + if symmetric: + if ants: + ipl.ants_registration.linear_register_ants2( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + parameters=linreg, + close=close, + downsample=downsample, + ) + ipl.ants_registration.linear_register_ants2( + scan_f, + model.scan, + _output_xfm_f, + source_mask=mask_f, + target_mask=model_mask_f, + init_xfm=_init_xfm_f, + parameters=linreg, + close=close, + downsample=downsample, + ) + else: + ipl.registration.linear_register( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + objective=objective, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + + ipl.registration.linear_register( + scan_f, + model.scan, + _output_xfm_f, + source_mask=mask_f, + target_mask=model_mask_f, + init_xfm=_init_xfm_f, + objective=objective, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + else: + if ants: + ipl.ants_registration.linear_register_ants2( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + parameters=linreg, + close=close, + downsample=downsample, + ) + else: + ipl.registration.linear_register( + scan, + model.scan, + _output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + parameters=reg_type, + conf=linreg, + close=close, + downsample=downsample, + ) + if bbox : + if init_xfm is not None: + m.xfmconcat([init_xfm.xfm,_output_xfm],output_xfm.xfm) + if symmetric: + m.xfmconcat([init_xfm.xfm_f,_output_xfm_f],output_xfm.xfm_f) + else: + shutil.copyfile(_output_xfm,output_xfm.xfm) + if symmetric: + shutil.copyfile(_output_xfm_f,output_xfm.xfm_f) + + if output_invert_xfm is not None: + m.xfminvert(output_xfm.xfm, output_invert_xfm.xfm) + if symmetric: + m.xfminvert(output_xfm.xfm_f, output_invert_xfm.xfm_f) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output_xfm.xfm, + like=model.scan, + order=resample_order) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output_xfm.xfm, + aa=resample_aa, + order=resample_order, + like=model.scan, + baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output_xfm.xfm_f, + like=model.scan, + order=resample_order) + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output_xfm.xfm_f, + aa=resample_aa, + order=resample_order, + like=model.scan, + baa=resample_baa) + + return True + except mincError as e: + print("Exception in linear_registration:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in linear_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def elastix_registration( + sample, + model, + output_xfm, + output_sample=None, + output_invert=True, + init_xfm=None, + symmetric=False, + work_dir=None, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + downsample=None, + parameters=None, + bbox=False, + nl=False, + level=2, + start_level=None, # not really used + use_mask=True + ): + """perform elastix registration to the model, and calculate inverse""" + try: + + with mincTools() as m: + + #TODO: check more files? + if not m.checkfiles(inputs=[sample.scan], outputs=[output_xfm.xfm]): return + + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + mask=sample.mask + mask_f=sample.mask_f + model_mask=model.mask + + if mask is None: + model_mask=None + + if not use_mask: + mask=None + model_mask=None + mask_f=None + model_mask_f=None + + scan=sample.scan + scan_f=sample.scan_f + + _output_xfm=output_xfm.xfm + _output_xfm_f=output_xfm.xfm_f + + if bbox: + scan=m.tmp('scan.mnc') + m.resample_smooth(sample.scan, scan, like=model.scan, transform=_init_xfm) + if sample.mask is not None and (not use_mask): + mask=m.tmp('mask.mnc') + m.resample_labels(sample.mask, mask, like=model.scan, transform=_init_xfm) + _init_xfm=None + close=True + _output_xfm=m.tmp('output.xfm') + + if symmetric: + scan_f=m.tmp('scan_f.mnc') + m.resample_smooth(sample.scan_f, scan_f, like=model.scan, transform=_init_xfm_f) + if sample.mask_f is not None and (not use_mask): + mask_f=m.tmp('mask_f.mnc') + m.resample_labels(sample.mask_f, mask_f, like=model.scan, transform=_init_xfm_f) + _init_xfm_f=None + _output_xfm_f=m.tmp('output_f.xfm') + + #TODO: update elastix registration to downsample xfm? + if symmetric: + ipl.elastix_registration.register_elastix( + scan, + model.scan, + output_xfm=_output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + downsample=downsample, + downsample_grid=level, + parameters=parameters, + nl=nl + ) + ipl.elastix_registration.register_elastix( + scan_f, + model.scan, + output_xfm=_output_xfm_f, + source_mask=mask_f, + target_mask=model_mask, + init_xfm=_init_xfm_f, + downsample=downsample, + downsample_grid=level, + parameters=parameters, + nl=nl + ) + else: + ipl.elastix_registration.register_elastix( + scan, + model.scan, + output_xfm=_output_xfm, + source_mask=mask, + target_mask=model_mask, + init_xfm=_init_xfm, + downsample=downsample, + downsample_grid=level, + parameters=parameters, + nl=nl + ) + + if bbox : + if init_xfm is not None: + m.xfmconcat([init_xfm.xfm,_output_xfm],output_xfm.xfm) + if symmetric: + m.xfmconcat([init_xfm.xfm_f,_output_xfm_f],output_xfm.xfm_f) + else: + shutil.copyfile(_output_xfm,output_xfm.xfm) + if symmetric: + shutil.copyfile(_output_xfm_f,output_xfm.xfm_f) + + if output_invert: + if nl: + m.xfm_normalize(output_xfm.xfm, model.scan, output_xfm.xfm_inv, step=level, invert=True) + else: + m.xfminvert(output_xfm.xfm, output_xfm.xfm_inv) + + if symmetric: + if nl: + m.xfm_normalize(output_xfm.xfm_f, model.scan, output_xfm.xfm_f_inv, step=level, invert=True) + else: + m.xfminvert(output_xfm.xfm_f, output_xfm.xfm_f_inv) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output_xfm.xfm, + like=model.scan, order=resample_order) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output_xfm.xfm, + aa=resample_aa, order=resample_order, + like=model.scan, baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output_xfm.xfm_f, + like=model.scan, order=resample_order) + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output_xfm.xfm_f, + aa=resample_aa, order=resample_order, + like=model.scan, baa=resample_baa) + + return True + except mincError as e: + print("Exception in elastix_registration:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in elastix_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + + +def non_linear_registration( + sample, + model, + output, + output_sample=None, + output_invert=True, + init_xfm=None, + level=2, + start_level=8, + symmetric=False, + parameters=None, + work_dir=None, + ants=False, + warp_seg=False, + resample_order=2, + resample_aa=None, + resample_baa=False, + output_inv_target=None, + flip=False, + downsample=None, + ): + """perform linear registration to the model, and calculate inverse""" + + try: + _init_xfm=None + _init_xfm_f=None + + if init_xfm is not None: + _init_xfm=init_xfm.xfm + if symmetric: + _init_xfm_f=init_xfm.xfm_f + + + with mincTools() as m: + + #TODO: check more files? + if not m.checkfiles(inputs=[sample.scan], outputs=[output.xfm]): return + + + if symmetric: + # TODO: split up into two jobs? + if not os.path.exists( output.xfm ) or \ + not os.path.exists( output.xfm_f ) : + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + m.tmp('forward_f')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward_f')+'.xfm', + #source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm_f, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward')+'.xfm',model.scan,output.xfm,step=level) + #TODO: regularize here + m.xfm_normalize(m.tmp('forward_f')+'.xfm',model.scan,output.xfm_f,step=level) + + if output_invert: + if ants: + m.xfm_normalize(m.tmp('forward')+'_inverse.xfm', model.scan, output.xfm_inv, step=level ) + m.xfm_normalize(m.tmp('forward_f')+'_inverse.xfm',model.scan, output.xfm_f_inv, step=level ) + else: + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm_inv, step=level, invert=True) + m.xfm_normalize(m.tmp('forward_f')+'.xfm',model.scan, output.xfm_f_inv, step=level, invert=True) + else: + if not os.path.exists( output.xfm ) : + if flip: + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan_f, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan_f, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask_f, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + else: + if ants: + ipl.ants_registration.non_linear_register_ants2( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + target_mask=model.mask, + parameters=parameters, + downsample=downsample, + level=level, + start=start_level, + #work_dir=work_dir + ) + else: + ipl.registration.non_linear_register_full( + sample.scan, + model.scan, + m.tmp('forward')+'.xfm', + #source_mask=sample.mask, + target_mask=model.mask, + init_xfm=_init_xfm, + parameters=parameters, + level=level, + start=start_level, + downsample=downsample, + #work_dir=work_dir + ) + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm, step=level) + + if output_invert: + if ants: # ANTS produces forward and invrese + m.xfm_normalize(m.tmp('forward')+'_inverse.xfm', model.scan, output.xfm_inv, step=level ) + else: + m.xfm_normalize(m.tmp('forward')+'.xfm', model.scan, output.xfm_inv, step=level, invert=True) + + if output_sample is not None: + m.resample_smooth(sample.scan, output_sample.scan, + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + for (i,j) in enumerate(sample.add): + m.resample_smooth(sample.add[i], output_sample.add[i], + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + if warp_seg: + m.resample_labels(sample.seg, output_sample.seg, + transform=output.xfm_inv, + aa=resample_aa, + order=resample_order, + like=model.scan, + invert_transform=True, + baa=resample_baa) + + if symmetric: + m.resample_smooth(sample.scan_f, output_sample.scan_f, + transform=output.xfm_f_inv, + like=model.scan, + invert_transform=True, + order=resample_order) + + for (i,j) in enumerate(sample.add_f): + m.resample_smooth(sample.add_f[i], output_sample.add_f[i], + transform=output.xfm_f_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + if warp_seg: + m.resample_labels(sample.seg_f, output_sample.seg_f, + transform=output.xfm_f_inv, + aa=resample_aa, + order=resample_order, + like=model.scan, + invert_transform=True, + baa=resample_baa ) + + if output_inv_target is not None: + m.resample_smooth(model.scan, output_inv_target.scan, + transform=output.xfm, + like=sample.scan, + order=resample_order, + invert_transform=True) + + for (i,j) in enumerate(output_inv_target.add): + m.resample_smooth(model.add[i], output_inv_target.add[i], + transform=output.xfm_inv, + like=model.scan, + order=resample_order, + invert_transform=True) + + if warp_seg: + m.resample_labels(model.seg, output_inv_target.seg, + transform=output.xfm, + aa=resample_aa, + order=resample_order, + like=sample.scan, + invert_transform=True, + baa=resample_baa) + + if symmetric: + m.resample_smooth(model.scan, output_inv_target.scan_f, + transform=output.xfm_f, + like=sample.scan, + invert_transform=True, + order=resample_order) + + for (i,j) in enumerate(output_inv_target.add): + m.resample_smooth(model.add_f[i], output_inv_target.add_f[i], + transform=output.xfm_f, + like=sample.scan, + invert_transform=True, + order=resample_order) + + if warp_seg: + m.resample_labels(model.seg, output_inv_target.seg_f, + transform=output.xfm_f, + aa=resample_aa, + order=resample_order, + like=sample.scan, + invert_transform=True, + baa=resample_baa ) + + except mincError as e: + print("Exception in non_linear_registration:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in non_linear_registration:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/resample.py b/ipl/segment/resample.py new file mode 100644 index 0000000..5c0ef73 --- /dev/null +++ b/ipl/segment/resample.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +from .filter import * + + +# scoop parallel execution +from scoop import futures, shared + +def create_fake_mask(in_seg, out_mask, op=None ): + try: + with mincTools() as m : + if op is None : + m.calc([in_seg], 'A[0]>0.5?1:0', out_mask, labels=True) + else : + m.binary_morphology(in_seg, op, out_mask, binarize_threshold=0.5) + except mincError as e: + print("Exception in create_fake_mask:{}".format(repr(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in create_fake_mask:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + + +def resample_file(input,output,xfm=None,like=None,order=4,invert_transform=False): + '''resample input file using proveded transformation''' + try: + with mincTools() as m: + m.resample_smooth(input,output,xfm=xfm,like=like,order=order,invert_transform=invert_transform) + except mincError as e: + print("Exception in resample_file:{}".format(str(e))) + traceback.print_exc(file=sys.stdout) + raise + except : + print("Exception in resample_file:{}".format(sys.exc_info()[0])) + traceback.print_exc(file=sys.stdout) + raise + + +def resample_split_segmentations(input, output,xfm=None, like=None, order=4, invert_transform=False, symmetric=False): + '''resample individual segmentations, using parallel execution''' + results=[] + base=input.seg.rsplit('.mnc',1)[0] + for (i,j) in input.seg_split.items(): + if not output.seg_split.has_key(i): + output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) + + results.append(futures.submit( + resample_file,j,output.seg_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform + )) + if symmetric: + base=input.seg_f.rsplit('.mnc',1)[0] + for (i,j) in input.seg_f_split.items(): + if not output.seg_f_split.has_key(i): + output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) + + results.append(futures.submit( + resample_file,j,output.seg_f_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform + )) + futures.wait(results, return_when=futures.ALL_COMPLETED) + + +def warp_rename_seg( sample, model, output, + transform=None, + symmetric=False, + symmetric_flip=False, + lut=None, + flip_lut=None, + resample_order=2, + resample_aa=None, + resample_baa=False, + invert_transform=False, + use_flipped=False, + datatype=None, + create_mask=False, + op_mask=None): + #TODO: should i warp mask if present too? + try: + print("warp_rename_seg sampl={} output={}".format(repr(sample),repr(output))) + with mincTools() as m: + xfm=None + if transform is not None: + xfm=transform.xfm + + if symmetric: + xfm_f=transform.xfm_f + + m.resample_labels(sample.seg, output.seg, + transform=xfm, + aa=resample_aa, + order=resample_order, + remap=lut, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype, + baa=resample_baa) + + if create_mask: + create_fake_mask(output.seg, output.mask, op=op_mask) + elif sample.mask is not None: + m.resample_labels(sample.mask, output.mask, + transform=xfm, + order=resample_order, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype ) + + if symmetric: + + seg_f=sample.seg + + if use_flipped: + seg_f=sample.seg_f + + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + m.resample_labels(seg_f, output.seg_f, + transform=xfm_f, + aa=resample_aa, + order=resample_order, + remap=flip_lut, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype, + baa=resample_baa) + if create_mask: + create_fake_mask(output.seg_f, output.mask_f, op=op_mask) + elif sample.mask_f is not None: + m.resample_labels(sample.mask_f, output.mask_f, + transform=xfm, + order=resample_order, + like=model.scan, + invert_transform=invert_transform, + datatype=datatype ) + except mincError as e: + print("Exception in warp_rename_seg:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + + except : + print("Exception in warp_rename_seg:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def warp_sample( sample, + model, + output, + transform=None, + symmetric=False, + symmetric_flip=False, + resample_order=None, + use_flipped=False, + filters=None): + # TODO: add filters here + try: + with mincTools() as m: + xfm=None + xfm_f=None + seg_output=output.seg + seg_output_f=output.seg_f + + if transform is not None: + xfm=transform.xfm + if symmetric: + xfm_f=transform.xfm_f + + output_scan=output.scan + + if filters is not None: + output_scan=m.tmp('sample.mnc') + + m.resample_smooth(sample.scan, output_scan, transform=xfm, like=model.scan, order=resample_order) + + if filters is not None: + # TODO: maybe move it to a separate stage? + # HACK: assuming that segmentation was already warped! + apply_filter(output_scan, output.scan, filters, model=model.scan, input_mask=output.mask, input_labels=seg_output, model_labels=model.seg) + + for (i,j) in enumerate( sample.add ): + output_scan = output.add[i] + if filters is not None: + output_scan=m.tmp('sample_{}.mnc').format(i) + + m.resample_smooth(sample.add[i], output_scan, transform=xfm, like=model.scan, order=resample_order) + + if filters is not None: + # TODO: maybe move it to a separate stage? + # TODO: apply segmentations for seg-based filtering + apply_filter(output_scan, output.add[i], filters, model=model.scan, input_mask=output.mask, input_labels=seg_output, model_labels=model.seg) + + if symmetric: + scan_f=sample.scan + + if use_flipped: + scan_f=sample.scan #TODO: figure out what is it + + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + output_scan_f=output.scan_f + + if filters is not None: + output_scan_f=m.tmp('sample_f.mnc') + + m.resample_smooth(scan_f, output_scan_f, transform=xfm_f, like=model.scan, order=resample_order) + + if filters is not None: + # TODO: maybe move it to a separate stage? + apply_filter(output_scan_f, output.scan_f, filters, model=model.scan, input_mask=output.mask_f, input_labels=seg_output_f, model_labels=model.seg) + + for (i,j) in enumerate( sample.add_f ): + output_scan_f = output.add_f[i] + if filters is not None: + output_scan_f=m.tmp('sample_f_{}.mnc').format(i) + + m.resample_smooth( sample.add_f[i], output_scan_f, transform=xfm_f, like=model.scan, order=resample_order) + + if filters is not None: + apply_filter( output_scan_f, output.add_f[i], filters, model=model.scan, input_mask=output.mask_f, input_labels=seg_output_f, model_labels=model.seg) + + output.mask=None + output.mask_f=None + + except mincError as e: + print("Exception in warp_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in warp_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + +def warp_model_mask( model, + output, + transform=None, + symmetric=False, + symmetric_flip=False, + resample_order=None): + # TODO: add filters here + try: + with mincTools() as m: + xfm=None + xfm_f=None + + if transform is not None: + xfm=transform.xfm + if symmetric: + xfm_f=transform.xfm_f + + m.resample_labels(model.mask, output.mask, transform=xfm, like=output.scan, invert_transform=True) + + if symmetric: + if symmetric_flip: + m.param2xfm(m.tmp('flip_x.xfm'), scales=[-1.0, 1.0, 1.0]) + xfm_f=m.tmp('flip_x.xfm') + + if transform is not None: + m.xfmconcat( [m.tmp('flip_x.xfm'), transform.xfm_f ], m.tmp('transform_flip.xfm') ) + xfm_f=m.tmp('transform_flip.xfm') + + m.resample_labels(model.mask, output.mask_f, transform=xfm_f, like=output.scan_f, invert_transform=True) + + except mincError as e: + print("Exception in warp_sample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in warp_sample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + + + +def concat_resample(lib_scan, + xfm_lib, + xfm_sample, + output, + model=None, + resample_aa=None, + resample_order=2, + resample_baa=False, + flip=False ): + '''Cocnatenate inv(xfm2) and inv(xfm1) and resample scan''' + try: + + if not os.path.exists(output.seg) or \ + not os.path.exists(output.scan) : + with mincTools() as m: + _model=None + + if model is not None: + _model=model.scan + + full_xfm=None + + if xfm_lib is not None and xfm_sample is not None: + if flip: + m.xfmconcat([ xfm_sample.xfm_f, xfm_lib.xfm_inv ], m.tmp('Full.xfm') ) + else: + m.xfmconcat([ xfm_sample.xfm, xfm_lib.xfm_inv ], m.tmp('Full.xfm') ) + full_xfm=m.tmp('Full.xfm') + elif xfm_lib is not None: + full_xfm=xfm_lib.xfm_inv + elif xfm_sample is not None: + if flip: + full_xfm=xfm_sample.xfm_f + else: + full_xfm=xfm_sample.xfm + + m.resample_labels(lib_scan.seg, output.seg, + transform=full_xfm, + aa=resample_aa, + order=resample_order, + like=_model, + invert_transform=True, + baa=resample_baa ) + + m.resample_smooth(lib_scan.scan, output.scan, + transform=full_xfm, + order=resample_order, + like=_model, + invert_transform=True) + + for (i,j) in enumerate(lib_scan.add): + m.resample_smooth(lib_scan.add[i], output.add[i], + transform=full_xfm, + order=resample_order, + like=_model, + invert_transform=True) + except mincError as e: + print("Exception in concat_resample:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in concat_resample:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/structures.py b/ipl/segment/structures.py new file mode 100644 index 0000000..160f982 --- /dev/null +++ b/ipl/segment/structures.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# +# data structures used in segmentation package + +import shutil +import os +import sys +import traceback +import json + + +class MriDataset(object): + ''' Scan sample with segmentation and mask''' + def __init__(self, prefix=None, name=None, scan=None, mask=None, seg=None, + scan_f=None, mask_f=None, seg_f=None, protect=False, + add=[], add_n=None, + add_f=[] ): + self.prefix=prefix + self.name=name + self.scan=scan + self.mask=mask + self.seg=seg + self.protect=protect + self.seg_split={} + + self.scan_f = scan_f + self.mask_f = mask_f + self.seg_f = seg_f + self.seg_f_split={} + self.add = add + self.add_f = add_f + + if self.name is None : + if scan is not None: + self.name=os.path.basename(scan).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + if self.prefix is None: + self.prefix=os.path.dirname(self.scan) + else: + if self.prefix is None: + raise("trying to create dataset without name and prefix") + (_h, _name) = tempfile.mkstemp(suffix='.mnc', dir=prefix) + os.close(_h) + self.name=os.path.relpath(_name,prefix) + os.unlink(_name) + + if scan is None: + if self.prefix is not None: + self.scan=self.prefix+os.sep+self.name+'.mnc' + self.mask=self.prefix+os.sep+self.name+'_mask.mnc' + self.seg=self.prefix+os.sep+self.name+'_seg.mnc' + self.scan_f=self.prefix+os.sep+self.name+'_f.mnc' + self.mask_f=self.prefix+os.sep+self.name+'_f_mask.mnc' + self.seg_f=self.prefix+os.sep+self.name+'_f_seg.mnc' + + if add_n is not None: + self.add=[self.prefix+os.sep+self.name+'_{}.mnc'.format(i) for i in range(add_n)] + self.add_f=[self.prefix+os.sep+self.name+'_{}_f.mnc'.format(i) for i in range(add_n)] + else: + self.add=[] + self.add_f=[] + #------ + + def __repr__(self): + return "MriDataset(\n prefix=\"{}\",\n name=\"{}\",\n scan=\"{}\",\n scan_f=\"{}\",\n mask=\"{}\",\n mask_f=\"{}\",\n seg=\"{}\",\n seg_f=\"{}\",\n protect={},\n add={},\n add_f={})".\ + format(self.prefix,self.name,self.scan,self.scan_f,self.mask,self.mask_f,self.seg,self.seg_f,repr(self.protect),repr(self.add),repr(self.add_f)) + + def cleanup(self): + if not self.protect: + for i in (self.scan, self.mask, self.seg, self.scan_f, self.mask_f, self.seg_f ): + if i is not None and os.path.exists(i): + os.unlink(i) + + for (i,j) in self.seg_split.items(): + if os.path.exists(j): + os.unlink(j) + + for (i,j) in self.seg_f_split.items(): + if os.path.exists(j): + os.unlink(j) + + for (i,j) in enumerate(self.add): + if os.path.exists(j): + os.unlink(j) + # ------------ + + +class MriTransform(object): + '''Transformation''' + def __init__(self, prefix=None, name=None, xfm=None, protect=False, xfm_f=None, xfm_inv=None, xfm_f_inv=None, nl=False ): + self.prefix=prefix + self.name=name + + self.xfm=xfm + self.grid=None + + self.xfm_f=xfm_f + self.grid_f=None + + self.xfm_inv=xfm_inv + self.grid_inv=None + + self.xfm_f_inv=xfm_f_inv + self.grid_f_inv=None + + self.protect=protect + self.nl=nl + + if name is None and xfm is None: + raise "Undefined name and xfm" + + if name is None and xfm is not None: + self.name=os.path.basename(xfm).rsplit('.xfm',1)[0] + + if self.prefix is None: + self.prefix=os.path.dirname(self.xfm) + + if xfm is None: + if self.prefix is not None: + self.xfm= self.prefix+os.sep+self.name+'.xfm' + self.grid= self.prefix+os.sep+self.name+'_grid_0.mnc' + + self.xfm_f= self.prefix+os.sep+self.name+'_f.xfm' + self.grid_f= self.prefix+os.sep+self.name+'_f_grid_0.mnc' + + self.xfm_inv= self.prefix+os.sep+self.name+'_invert.xfm' + self.grid= self.prefix+os.sep+self.name+'_invert_grid_0.mnc' + + self.xfm_f_inv= self.prefix+os.sep+self.name+'_f_invert.xfm' + self.grid_f_inv= self.prefix+os.sep+self.name+'_f_invert_grid_0.mnc' + + def __repr__(self): + return 'MriTransform(prefix="{}",name="{}")'.\ + format(self.prefix, self.name ) + + def cleanup(self): + if not self.protect: + for i in (self.xfm, self.grid, self.xfm_f, self.grid_f, self.xfm_inv, self.grid_inv, self.xfm_f_inv, self.grid_f_inv ): + if i is not None and os.path.exists(i): + os.unlink(i) + +class MRIEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, MriTransform): + return {'name':obj.name, + 'xfm' :obj.xfm, + 'xfm_f':obj.xfm_f, + 'xfm_inv' :obj.xfm_inv, + 'xfm_f_inv':obj.xfm_f_inv, + 'prefix':obj.prefix + } + elif isinstance(obj, MriDataset): + return {'name':obj.name, + 'scan':obj.scan, + 'mask':obj.mask, + 'scan_f':obj.scan_f, + 'mask_f':obj.mask_f, + 'prefix':obj.prefix, + 'add':obj.add, + 'add_f':obj.add_f + } + # Let the base class default method raise the TypeError + return json.JSONEncoder.default(self, obj) + + + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/train.py b/ipl/segment/train.py new file mode 100644 index 0000000..bebbf49 --- /dev/null +++ b/ipl/segment/train.py @@ -0,0 +1,672 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +from __future__ import print_function + +import shutil +import os +import sys +import csv +import copy + +# MINC stuff +# from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures, shared + +from .filter import * +from .structures import * +from .registration import * +from .resample import * +from .error_correction import * +from .model import * +from .library import * + + +def inv_dict(d): + return { v:k for (k,v) in d.items() } + + +def generate_library(parameters, output, debug=False,cleanup=False): + '''Actual generation of the segmentation library''' + try: + if debug: print(repr(parameters)) + + # read parameters + reference_model = parameters[ 'reference_model'] + reference_mask = parameters.get( 'reference_mask', None) + reference_model_add = parameters.get( 'reference_model_add', []) + + reference_local_model = parameters.get( 'reference_local_model', None) + reference_local_mask = parameters.get( 'reference_local_mask', None) + + reference_local_model_flip= parameters.get( 'reference_local_model_flip', None) + reference_local_mask_flip = parameters.get( 'reference_local_mask_flip', None) + + library = parameters[ 'library' ] + + work_dir = parameters.get( 'workdir',output+os.sep+'work') + + # should we build symmetric model + build_symmetric = parameters.get( 'build_symmetric',False) + + # should we build symmetric flipped model + build_symmetric_flip = parameters.get( 'build_symmetric_flip',False) + + # lookup table for renaming labels for more compact representation + build_remap = parameters.get( 'build_remap',{}) + + # lookup table for renaming labels for more compact representation, + # when building symmetrized library + build_flip_remap = parameters.get( 'build_flip_remap',{}) + + # lookup table for renaming labels for more compact representation, + # when building symmetrized library + build_unflip_remap = parameters.get( 'build_unflip_remap',{}) + + if not build_unflip_remap and build_flip_remap and build_remap: + build_unflip_remap = create_unflip_remap(build_remap,build_flip_remap) + + # label map + label_map = parameters.get( 'label_map',None) + classes_number = parameters.get( 'classes',2) + # perform filtering as final stage of the library creation + + pre_filters = parameters.get( 'pre_filters', None ) + post_filters = parameters.get( 'post_filters', parameters.get( 'filters', None )) + + # perform denoising as final stage of the library creation + resample_order = parameters.get( 'resample_order',2) + + # use boundary anti-aliasing filter when resampling labels + resample_baa = parameters.get( 'resample_baa',True) + + # extent bounding box to reduce boundary effects + extend_boundary = parameters.get( 'extend_boundary',4) + + # extend maks + #dilate_mask = parameters.get( 'dilate_mask',3) + op_mask = parameters.get( 'op_mask','E[2] D[4]') + + # if linear registration should be performed + do_initial_register = parameters.get( 'initial_register', + parameters.get( 'linear_register', {})) + + if do_initial_register is not None and isinstance(do_initial_register,dict): + initial_register = do_initial_register + do_initial_register = True + else: + initial_register={} + + + inital_reg_type = parameters.get( 'initial_register_type', + parameters.get( 'linear_register_type', + initial_register.get('type','-lsq12'))) + + inital_reg_ants = parameters.get( 'initial_register_ants', + parameters.get( 'linear_register_ants', False)) + + inital_reg_options = parameters.get( 'initial_register_options', + initial_register.get('options',None) ) + + inital_reg_downsample = parameters.get( 'initial_register_downsample', + initial_register.get('downsample',None)) + + inital_reg_use_mask = parameters.get( 'initial_register_use_mask', + initial_register.get('use_mask',False)) + + initial_reg_objective = initial_register.get('objective','-xcorr') + + # perform local linear registration + do_initial_local_register = parameters.get( 'initial_local_register', + parameters.get( 'local_linear_register', {}) ) + if do_initial_local_register is not None and isinstance(do_initial_local_register,dict): + initial_local_register=do_initial_local_register + do_initial_local_register=True + else: + initial_local_register={} + + local_reg_type = parameters.get( 'local_register_type', + initial_local_register.get('type','-lsq12')) + + local_reg_ants = parameters.get( 'local_register_ants', False) + + local_reg_opts = parameters.get( 'local_register_options', + initial_local_register.get('options',None)) + + local_reg_bbox = parameters.get( 'local_register_bbox', + initial_local_register.get('bbox',False )) + + local_reg_downsample = parameters.get( 'local_register_downsample', + initial_local_register.get('downsample',None)) + + local_reg_use_mask = parameters.get( 'local_register_use_mask', + initial_local_register.get('use_mask',True)) + + local_reg_objective = initial_local_register.get('objective','-xcorr') + + # if non-linear registraiton should be performed for library creation + do_nonlinear_register = parameters.get( 'non_linear_register',False) + + # if non-linear registraiton should be performed with ANTS + do_nonlinear_register_ants= parameters.get( 'non_linear_register_ants',False) + nonlinear_register_type = parameters.get( 'non_linear_register_type',None) + if nonlinear_register_type is None: + if do_nonlinear_register_ants: + nonlinear_register_type='ants' + + nlreg_level = parameters.get('non_linear_register_level', 2) + nlreg_start = parameters.get('non_linear_register_start', 16) + nlreg_options = parameters.get('non_linear_register_options', None) + nlreg_downsample = parameters.get('non_linear_register_downsample', None) + + modalities = parameters.get( 'modalities',1 ) - 1 + + create_patch_norm_lib = parameters.get( 'create_patch_norm_lib',False) + patch_norm_lib_pct = parameters.get( 'patch_norm_lib_pct', 0.1 ) + patch_norm_lib_sub = parameters.get( 'patch_norm_lib_sub', 1 ) + patch_norm_lib_patch = parameters.get( 'patch_norm_lib_patch', 2 ) # 5x5x5 patches + + use_fake_masks = parameters.get( 'fake_mask', False ) + + # prepare directories + if not os.path.exists(output): + os.makedirs(output) + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + # 0. go over input samples, prepare variables + input_samples=[] + filtered_samples=[] + lin_xfm=[] + lin_samples=[] + tmp_lin_samples=[] + bbox_lin_xfm=[] + + final_samples=[] + warped_samples=[] + final_transforms=[] + tmp_log_samples=[] + + patch_norm_db = output + os.sep + 'patch_norm.db' + patch_norm_idx = output + os.sep + 'patch_norm.idx' + + # identity xfm + identity_xfm = MriTransform(prefix=work_dir, name='identity' ) + with mincTools() as m: + m.param2xfm(identity_xfm.xfm) + m.param2xfm(identity_xfm.xfm_f) + + # check if library is list, if it is not, assume it's a reference to a csv file + if library is not list: + with open(library,'r') as f: + library=list(csv.reader(f)) + + # setup files + model = MriDataset(scan=reference_model, mask=reference_mask, add=reference_model_add) + + for (j,i) in enumerate(library): + scan=i[0] + seg=i[1] + mask=None + + add=i[2:modalities+2] # additional modalties + + if len(i)>modalities+2 : # assume that the extra file is a subject specific mask + mask = i[modalities+2] + elif use_fake_masks : # create mask from segmentation + mask = work_dir + os.sep + 'fake_mask_' + os.path.basename(scan) + create_fake_mask(seg, mask, op=op_mask) + + sample= MriDataset(scan=scan, seg=seg, mask=mask, protect=True,add=add) + input_samples.append( sample ) + filtered_samples.append( MriDataset( prefix=work_dir, name='flt_'+sample.name, add_n=modalities ) ) + + lin_xfm.append( MriTransform(prefix=work_dir, name='lin_'+sample.name ) ) + bbox_lin_xfm.append( MriTransform(prefix=work_dir, name='lin_bbox_'+sample.name ) ) + lin_samples.append( MriDataset( prefix=work_dir, name='lin_'+sample.name, add_n=modalities ) ) + tmp_lin_samples.append( MriDataset( prefix=work_dir, name='tmp_lin_'+ sample.name, add_n=modalities ) ) + tmp_log_samples.append( MriDataset( prefix=work_dir, name='tmp_log_'+ sample.name ) ) + final_samples.append( MriDataset( prefix=output, name=sample.name, add_n=modalities ) ) + warped_samples.append( MriDataset( prefix=output, name='nl_'+sample.name, add_n=modalities ) ) + final_transforms.append( MriTransform(prefix=output, name='nl_'+sample.name ) ) + + # temp array + results=[] + + if pre_filters is not None: + # apply pre-filtering before other stages + filter_all=[] + + for (j,i) in enumerate(input_samples): + # a HACK? + filtered_samples[j].seg =input_samples[j].seg + filtered_samples[j].mask=input_samples[j].mask + + filter_all.append( futures.submit( + filter_sample, input_samples[j], filtered_samples[j], pre_filters, model=model + )) + + futures.wait(filter_all, return_when=futures.ALL_COMPLETED) + else: + filtered_samples=input_samples + + if build_symmetric: + # need to flip the inputs + flipdir=work_dir+os.sep+'flip' + if not os.path.exists(flipdir): + os.makedirs(flipdir) + flip_all=[] + + labels_datatype='short'# TODO: determine optimal here + #if largest_label>255:labels_datatype='short' + + for (j,i) in enumerate(filtered_samples): + i.scan_f=flipdir+os.sep+os.path.basename(i.scan) + i.add_f=[] + + for (k,j) in enumerate(i.add): + i.add_f.append(flipdir+os.sep+os.path.basename(i.add[k])) + + if i.mask is not None: + i.mask_f=flipdir+os.sep+'mask_'+os.path.basename(i.scan) + else: + i.mask_f=None + + flip_all.append( futures.submit( generate_flip_sample, i, labels_datatype=labels_datatype ) ) + + futures.wait(flip_all, return_when=futures.ALL_COMPLETED) + + # 1. run global linear registration if nedded + if do_initial_register : + for (j,i) in enumerate(filtered_samples): + if inital_reg_type=='elx' or inital_reg_type=='elastix' : + results.append( futures.submit( + elastix_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + parameters=inital_reg_options, + downsample=inital_reg_downsample, + use_mask=inital_reg_use_mask + ) ) + elif inital_reg_type=='ants' or inital_reg_ants: + results.append( futures.submit( + linear_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + linreg=inital_reg_options, + ants=True, + downsample=inital_reg_downsample, + use_mask=inital_reg_use_mask + ) ) + else: + results.append( futures.submit( + linear_registration, i, model, lin_xfm[j], + symmetric=build_symmetric, + reg_type=inital_reg_type, + linreg=inital_reg_options, + downsample=inital_reg_downsample, + use_mask=inital_reg_use_mask, + objective=initial_reg_objective + ) ) + # TODO: do we really need to wait for result here? + futures.wait( results, return_when=futures.ALL_COMPLETED ) + # TODO: determine if we need to resample input files here + #lin_samples=input_samples + + # 2. for each part run linear registration, apply flip and do symmetric too + + + # 3. perform local linear registrtion and local intensity normalization if needed + # create a local reference model + local_model=None + local_model_ovl=None + local_model_avg=None + local_model_sd=None + + if reference_local_model is None : + local_model = MriDataset( prefix=output, name='local_model', add_n=modalities ) + local_model_ovl = MriDataset( prefix=output, name='local_model_ovl' ) + local_model_avg = MriDataset( prefix=output, name='local_model_avg', add_n=modalities ) + local_model_sd = MriDataset( prefix=output, name='local_model_sd', add_n=modalities ) + + if not os.path.exists( local_model.scan ): + for (j,i) in enumerate( filtered_samples ): + xfm=None + if do_initial_register: + xfm=lin_xfm[j] + + results.append( futures.submit( + warp_rename_seg, i, model, tmp_lin_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + lut=build_remap, + flip_lut=build_flip_remap, + resample_order=0, + resample_baa=False, + create_mask=use_fake_masks, + op_mask=op_mask + ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + create_local_model(tmp_lin_samples, model, local_model, extend_boundary=extend_boundary, op=op_mask) + + if not os.path.exists(local_model.scan_f) and build_symmetric and build_symmetric_flip: + create_local_model_flip(local_model, model, remap=build_unflip_remap, op=op_mask) + else: + local_model=MriDataset(scan=reference_local_model, mask=reference_local_mask) + + local_model.scan_f=reference_local_model_flip + local_model.mask_f=reference_local_mask_flip + + if do_initial_local_register: + for (j,i) in enumerate(filtered_samples): + init_xfm=None + if do_initial_register: + init_xfm=lin_xfm[j] + + if local_reg_type=='elx' or local_reg_type=='elastix' : + results.append( futures.submit( + elastix_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + parameters=local_reg_opts, + bbox=local_reg_bbox, + downsample=local_reg_downsample, + use_mask=local_reg_use_mask + ) ) + elif local_reg_type=='ants' or local_reg_ants: + results.append( futures.submit( + linear_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + close=True, + ants=True, + bbox=local_reg_bbox, + downsample=local_reg_downsample, + use_mask=local_reg_use_mask + ) ) + else: + if not do_initial_register: + init_xfm=identity_xfm # to avoid strange initialization errors + + results.append( futures.submit( + linear_registration, i, local_model, bbox_lin_xfm[j], + init_xfm=init_xfm, + symmetric=build_symmetric, + reg_type=local_reg_type, + linreg=local_reg_opts, + close=True, + bbox=local_reg_bbox, + downsample=local_reg_downsample, + use_mask=local_reg_use_mask, + objective=local_reg_objective + ) ) + + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED ) + else: + bbox_lin_xfm=lin_xfm + + + # create bbox samples + results=[] + for (j, i) in enumerate(filtered_samples): + xfm=None + + if i.mask is None: + final_samples[j].mask=None + final_samples[j].mask_f=None + + if do_initial_local_register or do_initial_register: + xfm=bbox_lin_xfm[j] + # + results.append( futures.submit( + warp_rename_seg, i, local_model, final_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + lut=build_remap, + flip_lut=build_flip_remap, + resample_order=resample_order, + resample_baa=resample_baa, + create_mask=use_fake_masks, + op_mask=op_mask + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + results=[] + for (j, i) in enumerate(filtered_samples): + xfm=None + if do_initial_local_register or do_initial_register: + xfm=bbox_lin_xfm[j] + + results.append( futures.submit( + warp_sample, i, local_model, final_samples[j], + transform=xfm, + symmetric=build_symmetric, + symmetric_flip=build_symmetric, + resample_order=resample_order, + filters=post_filters, + )) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + if create_patch_norm_lib: + create_patch_norm_db( final_samples, patch_norm_db, + patch_norm_idx, + pct=patch_norm_lib_pct, + sub=patch_norm_lib_sub, + patch=patch_norm_lib_patch) + results=[] + if do_nonlinear_register: + for (j, i) in enumerate(final_samples): + # TODO: decide what to do with mask + i.mask=None + + if nonlinear_register_type=='elx' or nonlinear_register_type=='elastix' : + results.append( futures.submit( + elastix_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + nl=True, + downsample=nlreg_downsample + ) ) + elif nonlinear_register_type=='ants' or do_nonlinear_register_ants: + results.append( futures.submit( + non_linear_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + ants=True, + downsample=nlreg_downsample + ) ) + else: + results.append( futures.submit( + non_linear_registration, + i, + local_model, + final_transforms[j], + symmetric=build_symmetric, + level=nlreg_level, + parameters=nlreg_options, + output_sample=warped_samples[j], + warp_seg=True, + resample_order=resample_order, + resample_baa=resample_baa, + ants=False, + downsample=nlreg_downsample + ) ) + final_samples[j].mask=None + # TODO: do we really need to wait for result here? + futures.wait(results, return_when=futures.ALL_COMPLETED) + + with mincTools() as m: + # a hack, to replace a rough model with a new one + if os.path.exists(local_model.seg): + os.unlink(local_model.seg) + + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + + segs.extend([ i.seg for i in warped_samples ]) + if build_symmetric: segs.extend([ i.seg_f for i in warped_samples ]) + + segs.extend(['--majority', local_model.seg, '--bg', '--overlap', local_model_ovl.scan ] ) + m.command(segs,inputs=[],outputs=[local_model.seg,local_model_ovl.scan]) + + avg=['mincaverage','-float'] + avg.extend([ i.scan for i in warped_samples ]) + if build_symmetric: avg.extend([ i.scan_f for i in warped_samples ]) + avg.extend([local_model_avg.scan, '-sdfile', local_model_sd.scan ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.scan,local_model_sd.scan]) + + for i in range(modalities): + avg=['mincaverage','-float'] + avg.extend([ j.add[i] for j in warped_samples ]) + if build_symmetric: avg.extend([ j.add_f[i] for j in warped_samples ]) + + avg.extend([local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.add[i],local_model_sd.add[i]]) + else: + with mincTools() as m: + # a hack, to replace a rough model with a new one + if os.path.exists(local_model.seg): + os.unlink(local_model.seg) + + # create majority voted model segmentation, for ANIMAL segmentation if needed + segs=['multiple_volume_similarity'] + segs.extend([ i.seg for i in final_samples ]) + + if build_symmetric: segs.extend([ i.seg_f for i in final_samples ]) + + segs.extend(['--majority', local_model.seg, '--bg','--overlap', local_model_ovl.scan] ) + m.command(segs,inputs=[],outputs=[local_model.seg,local_model_ovl.scan]) + + avg=['mincaverage','-float'] + avg.extend([ i.scan for i in final_samples ]) + if build_symmetric: avg.extend([ i.scan_f for i in final_samples ]) + avg.extend([local_model_avg.scan, '-sdfile', local_model_sd.scan ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.scan,local_model_sd.scan]) + + for i in range(modalities): + avg=['mincaverage','-float'] + avg.extend([ j.add[i] for j in final_samples ]) + if build_symmetric: avg.extend([ j.add_f[i] for j in final_samples ]) + avg.extend([local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ] ) + m.command(avg,inputs=[],outputs=[local_model_avg.add[i],local_model_sd.add[i]]) + + # number of classes including bg + #classes_number=2 + ## 6. create training library description + #with mincTools() as m: + #classes_number=int(m.execute_w_output(['mincstats', '-q', '-max',local_model.seg ]).rstrip("\n"))+1 + + library_description={} + # library models + library_description['model'] = model.scan + library_description['model_mask'] = model.mask + library_description['model_add'] = model.add + + library_description['local_model'] = local_model.scan + library_description['local_model_add'] = local_model.add + library_description['local_model_mask'] = local_model.mask + library_description['local_model_seg'] = local_model.seg + library_description['local_model_avg'] = local_model_avg.scan + library_description['local_model_ovl'] = local_model_ovl.scan + library_description['local_model_sd'] = local_model_sd.scan + + # library parameters + library_description['map']=inv_dict(dict(build_remap)) + library_description['classes_number']=classes_number + library_description['nl_samples_avail']=do_nonlinear_register + library_description['modalities']=modalities+1 + + largest_label=max(library_description['map'].values(), key=lambda p: int(p)) + library_description['seg_datatype']='short' + + if largest_label<=255:library_description['seg_datatype']='byte' + + library_description['gco_energy']=output+os.sep+'gco_energy.csv' + estimate_gco_energy(final_samples, library_description['gco_energy'], classes=classes_number) + library_description['label_map'] = label_map + + if build_symmetric and build_symmetric_flip: + library_description['local_model_flip'] =local_model.scan_f + library_description['local_model_add_flip'] =local_model.add_f + library_description['local_model_mask_flip']=local_model.mask_f + library_description['local_model_seg_flip'] =local_model.seg_f + library_description['flip_map']=inv_dict(dict(build_flip_remap)) + else: + library_description['local_model_flip']=None + library_description['local_model_add_flip']=[] + library_description['local_model_mask_flip']=None + library_description['flip_map']={} + + library_description['library']=[] + + for (j, i) in enumerate(final_samples): + ss=[i.scan, i.seg ] + ss.extend(i.add) + + if do_nonlinear_register: + ss.extend( [ final_transforms[j].xfm, final_transforms[j].xfm_inv, warped_samples[j].scan, warped_samples[j].seg ]) + + library_description['library'].append(ss) + + if build_symmetric: + ss=[i.scan_f, i.seg_f ] + ss.extend(i.add_f) + + if do_nonlinear_register: + ss.extend( [ final_transforms[j].xfm_f, final_transforms[j].xfm_f_inv, warped_samples[j].scan_f, warped_samples[j].seg_f ]) + + library_description['library'].append(ss) + + save_library_info( library_description, output) + # cleanup + if cleanup: + shutil.rmtree(work_dir) + + except mincError as e: + print("Exception in generate_library:{}".format(str(e)),file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise + except : + print("Exception in generate_library:{}".format(sys.exc_info()[0]),file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise + + +def estimate_gco_energy(samples,output,classes=2): + with mincTools() as m: + files=[f.seg for f in samples] + cmd=['label_interaction_estimate'] + cmd.extend(files) + cmd.append(output) + cmd.extend(['--classes', str(classes)]) + m.command(cmd,inputs=files,outputs=[output]) + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/segment/train_ec.py b/ipl/segment/train_ec.py new file mode 100644 index 0000000..153ef53 --- /dev/null +++ b/ipl/segment/train_ec.py @@ -0,0 +1,398 @@ +# -*- coding: utf-8 -*- +# +# @author Vladimir S. FONOV +# @date +# + +import shutil +import os +import sys +import csv +import copy +import random +import traceback + +# MINC stuff +from ipl.minc_tools import mincTools,mincError + +# scoop parallel execution +from scoop import futures + +from .structures import * +from .fuse import * +from .train import * +from .filter import * +from .error_correction import * + + +def train_ec_loo( segmentation_library, + segmentation_parameters=None, + ec_parameters=None, + debug=False, + fuse_variant='fuse', + regularize_variant='gc', + ec_variant='ec', + cleanup=False, + ext=False, + train_list=None): + '''Train error correction using leave-one-out cross-validation''' + # for each N subjects run segmentation and compare + + try: + ec_variant = ec_parameters.get( 'variant' , ec_variant) + work_dir = ec_parameters.get( 'work_dir' , segmentation_library['prefix'] + os.sep + fuse_variant ) + ec_output = ec_parameters.get( 'output' , work_dir + os.sep + ec_variant + '.pickle' ) + + ec_border_mask = ec_parameters.get( 'border_mask' , True ) + ec_border_mask_width = ec_parameters.get( 'border_mask_width' , 3 ) + ec_antialias_labels = ec_parameters.get( 'antialias_labels' , True ) + ec_blur_labels = ec_parameters.get( 'blur_labels', 1.0 ) + ec_expit_labels = ec_parameters.get( 'expit_labels', 1.0 ) + ec_normalize_labels = ec_parameters.get( 'normalize_labels', True ) + ec_use_raw = ec_parameters.get( 'use_raw', False ) + ec_split = ec_parameters.get( 'split', None ) + + ec_train_rounds = ec_parameters.get( 'train_rounds', -1 ) + ec_train_cv = ec_parameters.get( 'train_cv', 1 ) + ec_sample_pick_strategy = ec_parameters.get( 'train_pick', 'random' ) + ec_max_samples = ec_parameters.get( 'max_samples', -1 ) + modalities = ec_parameters.get( 'train_modalities', segmentation_library.get('modalities',1) ) - 1 + + print("\n\n") + print("EC modalities:{}".format(modalities)) + print("train_list={}".format(repr(train_list))) + print("ext={}".format(repr(ext))) + print("\n\n") + + try: + if not os.path.exists(work_dir): + os.makedirs(work_dir) + except: + pass + + if (train_list is not None) and not isinstance(train_list, list): + print(repr(train_list)) + with open(train_list,'r') as f: + train_list=list(csv.reader(f)) + + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + # setup parameters to stop early + local_model_mask=segmentation_library['local_model_mask'] + + # disable EC options if present + segmentation_parameters['ec_options']=None + + ec_train=[] + ec_train_file = work_dir+os.sep+'train_ec_'+ec_variant+'.json' + #ec_train_library = segmentation_library['library'] + ec_work_dirs=[] + + + if not os.path.exists( ec_train_file ): + results=[] + + _train_list=[] + # if we have pre-segmented scans, then we should pre-process training library again (!) and train on pre-segmented scans + if ext and train_list : + results2=[] + + for (i,j) in enumerate( train_list ): + n=os.path.basename( j[0] ).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + output_pre_seg =work_dir+os.sep+'pre_'+n + ec_work_dir = work_dir+os.sep+'work_pre_'+n + + + #TODO: find out how to select appropriate segmentation + train_sample = j[0] + train_segment = j[1] + train_add=[] + + train_presegment = None + + train_presegment = j[2] + train_add = j[ 3: 3 + modalities ] + + experiment_segmentation_library = copy.deepcopy(segmentation_library) + print("Running pre-processing on {} - {}".format(train_sample,train_presegment)) + + results2.append( futures.submit( + fusion_segment, + train_sample, + experiment_segmentation_library, + work_dir+os.sep+n, + parameters=segmentation_parameters, + debug=True, + work_dir=ec_work_dir, + ec_variant='noec', + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=train_add, + cleanup=cleanup, + presegment=train_presegment, + preprocess_only=True + )) + ### + print("waiting for {} jobs".format(len(results2))) + futures.wait(results2, return_when=futures.ALL_COMPLETED) + print("Finished!") + #train_list=range() + + # now pre-fill training library with freshly pre-processed samples + for (_i,_j) in enumerate(results2): + print("{} - done ".format(_j.result()[1]['bbox_sample'].seg)) + # raise("Not FINISHED!") + sample_id=os.path.basename(train_list[_i][0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + # include into the training list + train_list_i=[ i for i,j in enumerate(segmentation_library['library']) if j[0].find(sample_id)>=0 ] + # the output should be either one or two samples, if symmetrized version is used + + if len(train_list_i)==1: + # we have a single match! + match=segmentation_library['library'][train_list_i[0]] + + train=match[0:2] + train.append(_j.result()[1]['bbox_sample'].seg) + train.extend(match[2:len(match)]) + _train_list.append(train) + elif len(train_list_i)==2: + # we have left and right samples + # we assume that straight is first and flipped is second + + match=segmentation_library['library'][train_list_i[0]] + + train=match[0:2] + train.append(_j.result()[1]['bbox_sample'].seg) + train.extend(match[2:len(match)]) + _train_list.append(train) + + # flipped version + match=segmentation_library['library'][train_list_i[1]] + + train=match[0:2] + train.append(_j.result()[1]['bbox_sample'].seg_f) + train.extend(match[2:len(match)]) + _train_list.append(train) + else: + raise "Unexpected number of matches encountered!" + + else: + _train_list=segmentation_library['library'] + + + + segmentation_parameters['run_in_bbox']=True + if ec_train_cv == 1 : + print("_train_list={}".format(repr(_train_list))) + if ec_train_rounds > 0 and \ + ec_train_rounds < len( _train_list ): + + if ec_sample_pick_strategy=='random' and ec_max_samples>0: + ec_train_library=random.sample(_train_list,ec_max_samples) + else: + ec_train_library=_train_list[0:ec_max_samples] + else: + ec_train_library=_train_list + + for (_i, _j) in enumerate( ec_train_library ): + n=os.path.basename( _j[0] ).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] + + output_loo_seg=work_dir+os.sep+n + ec_work_dir=work_dir+os.sep+'work_ec_'+n + + #TODO: find out how to select appropriate segmentation + train_sample=_j[0] + train_segment=_j[1] + train_add=[] + + train_presegment=None + + if ext: + train_presegment=_j[2] + train_add=_j[3:3+modalities] + else: + train_add=_j[2:2+modalities] + + experiment_segmentation_library = copy.deepcopy(segmentation_library) + # remove sample + experiment_segmentation_library['library'] = [ i for i in segmentation_library['library'] if i[0].find(n)<0 ] + + results.append( futures.submit( + fusion_segment, + train_sample, + experiment_segmentation_library, + work_dir+os.sep+n, + parameters=segmentation_parameters, + debug=debug, + work_dir=ec_work_dir, + ec_variant='noec', + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=train_add, + cleanup=cleanup, + presegment=train_presegment + )) + + ec_work_dirs.append(ec_work_dir) + else: + validation_library_idx=range(len(_train_list)) + ec_train_library=[] + for i in range( ec_train_rounds ): + ran_file = work_dir + os.sep + ('random_{}_{}.json'.format(ec_variant,i)) + if not os.path.exists( ran_file ): + rem_list=random.sample( validation_library_idx, ec_train_cv ) + with open( ran_file,'w') as f: + json.dump(rem_list,f) + else: + with open( ran_file,'r') as f: + rem_list=json.load(f) + + # ec_sample_pick_strategy=='random' + + # list of subjects + rem_items=[ _train_list[j] for j in rem_list ] + + rem_n=[os.path.basename(j[0]).rsplit('.gz',1)[0].rsplit('.mnc',1)[0] for j in rem_items] + rem_lib=[] + + for j in rem_n: + rem_lib.extend( [ k for (k,t) in enumerate( _train_list ) if t[0].find(j)>=0 ] ) + + if debug: print(repr(rem_lib)) + rem_lib=set(rem_lib) + #prepare exclusion list + experiment_segmentation_library=copy.deepcopy(segmentation_library) + + experiment_segmentation_library[ 'library' ]=\ + [ k for j,k in enumerate( segmentation_library[ 'library' ] ) if j not in rem_lib ] + + for j,k in enumerate( rem_items ): + + output_experiment=work_dir+os.sep+'{}_{}_{}'.format(i,rem_n[j],'ec') + ec_work_dir=work_dir+os.sep+'work_{}_{}_{}'.format(i,rem_n[j],fuse_variant) + + # ??? + sample=[k[0],k[1]] + presegment=None + if ext: + presegment=k[2] + sample.extend(k[3:3+modalities]) + else: + sample.extend(k[2:2+modalities]) + + ec_train_library.append(sample) + + results.append( futures.submit( + fusion_segment, + k[0], + experiment_segmentation_library, + output_experiment, + parameters=segmentation_parameters, + debug=debug, + work_dir=ec_work_dir, + ec_variant='noec', + fuse_variant=fuse_variant, + regularize_variant=regularize_variant, + add=k[2:2+modalities], + cleanup=cleanup, + presegment=presegment + )) + ec_work_dirs.append(ec_work_dir) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + results2=[] + results3=[] + + for (i,j) in enumerate( ec_train_library ): + train_sample=j[0] + train_segment=j[1] + train_add=j[2:2+modalities] + train_mask=local_model_mask + auto_segment=results[i].result()[0] + + # TODO: use the subject-specific mask somehow? + if ec_border_mask: + train_mask=auto_segment.rsplit( '.mnc',1 )[0] + '_' + ec_variant+'_train_mask.mnc' + results2.append( + futures.submit( make_border_mask, + auto_segment, + train_mask, + width=ec_border_mask_width, + labels=experiment_segmentation_library[ 'classes_number' ] + ) ) + + # need to split up multilabel segmentation for training + if experiment_segmentation_library[ 'classes_number' ]>2 and ( not ec_use_raw ) : + print("Splitting into individual files: class_number={} use_raw={}".format(experiment_segmentation_library[ 'classes_number' ],ec_use_raw)) + labels_prefix=auto_segment.rsplit('.mnc', 1)[0] + + results3.append( futures.submit( split_labels, auto_segment, + experiment_segmentation_library['classes_number'], + labels_prefix, + antialias=ec_antialias_labels, + blur=ec_blur_labels, + expit=ec_expit_labels, + normalize=ec_normalize_labels ) ) + + ec_input=[ train_sample ] + ec_input.extend(train_add) + + ec_input.extend(['{}_{:02d}.mnc'.format(labels_prefix,i) for i in range(experiment_segmentation_library['classes_number']) ]) + ec_input.extend([ auto_segment, train_mask, train_segment ]) + ec_train.append( ec_input ) + + else : # binary label + ec_input=[ train_sample ] + ec_input.extend(train_add) + ec_input.extend([ auto_segment, auto_segment, train_mask, train_segment ]) + ec_train.append( ec_input ) + + if ec_border_mask: + futures.wait(results2, return_when=futures.ALL_COMPLETED) + + if experiment_segmentation_library['classes_number']>2 : + futures.wait(results3, return_when=futures.ALL_COMPLETED) + + # TODO run Error correction here + with open(ec_train_file ,'w') as f: + json.dump(ec_train, f ,indent=1) + else: + with open(ec_train_file,'r') as r: + ec_train=json.load(r) + + if ec_split is None : + if not os.path.exists( ec_output ) : + errorCorrectionTrain( ec_train, ec_output , + parameters=ec_parameters, debug=debug, + multilabel=segmentation_library[ 'classes_number' ] ) + else: + results=[] + for s in range(ec_split): + + out=ec_output.rsplit('.pickle',1)[0] + '_' + str(s) + '.pickle' + + if not os.path.exists(out): + results.append( futures.submit( + errorCorrectionTrain, ec_train, out , + parameters=ec_parameters, debug=debug, partition=ec_split, part=s, + multilabel=segmentation_library[ 'classes_number' ] ) ) + + futures.wait(results, return_when=futures.ALL_COMPLETED) + + # TODO: cleanup not-needed files here! + if cleanup: + for i in ec_work_dirs: + shutil.rmtree(i) + except mincError as e: + print("Exception in train_ec_loo:{}".format(str(e))) + traceback.print_exc( file=sys.stdout ) + raise + except : + print("Exception in train_ec_loo:{}".format(sys.exc_info()[0])) + traceback.print_exc( file=sys.stdout) + raise + +# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on diff --git a/ipl/temp_files.py b/ipl/temp_files.py new file mode 100644 index 0000000..e69de29 diff --git a/ipl/test.jpg b/ipl/test.jpg new file mode 100644 index 0000000..ab7134f Binary files /dev/null and b/ipl/test.jpg differ