Skip to content

Commit

Permalink
Merge pull request #457 from dPys/development
Browse files Browse the repository at this point in the history
Development
  • Loading branch information
dPys authored Dec 15, 2020
2 parents a379cc4 + 14c1e61 commit b729aab
Show file tree
Hide file tree
Showing 8 changed files with 1,230 additions and 63 deletions.
3 changes: 2 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,8 @@ RUN apt-get update -qq \
&& rm -r fsl* \
&& chmod 777 -R $FSLDIR/bin \
&& chmod 777 -R /usr/lib/fsl/5.0 \
&& echo "tmpfs /tmp tmpfs rw,nodev,nosuid,size=10G 0 0" >> /etc/fstab
&& echo "tmpfs /tmp tmpfs rw,nodev,nosuid,size=10G 0 0" >> /etc/fstab \
&& echo "GRUB_CMDLINE_LINUX_DEFAULT="rootflags=uquota,pquota"" >> /etc/default/grub
# && wget --retry-connrefused --waitretry=5 --read-timeout=60 --timeout=60 -t 0 -q -O examples.tar.gz "https://osf.io/ye4vf/download" && tar -xvzf examples.tar.gz -C /tmp \
# && rm -rf examples.tar.gz

Expand Down
2 changes: 1 addition & 1 deletion pynets/__about__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# from ._version import get_versions
# __version__ = get_versions()['version']
# del get_versions
__version__ = "1.0.3"
__version__ = "1.0.4"

__packagename__ = "pynets"
__copyright__ = "Copyright 2016, Derek Pisner"
Expand Down
1,207 changes: 1,187 additions & 20 deletions pynets/cli/pynets_collect.py

Large diffs are not rendered by default.

9 changes: 4 additions & 5 deletions pynets/cli/pynets_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3515,6 +3515,7 @@ def main():
"PyNets not installed! Ensure that you are referencing the correct"
" site-packages and using Python3.6+"
)
sys.exit(1)

if len(sys.argv) < 1:
print("\nMissing command-line inputs! See help options with the -h"
Expand All @@ -3541,11 +3542,9 @@ def main():
run_uuid = retval.get("run_uuid", None)

retcode = retcode or int(pynets_wf is None)
if retcode != 0:
sys.exit(retcode)
if retcode == 1:
return retcode

# Clean up master process before running workflow, which may create
# forks
gc.collect()

mgr.shutdown()
Expand All @@ -3555,7 +3554,7 @@ def main():

rmtree(work_dir, ignore_errors=True)

sys.exit(0)
return 0


if __name__ == "__main__":
Expand Down
5 changes: 2 additions & 3 deletions pynets/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1973,16 +1973,15 @@ def run(self):
watchdog_thread.join()
return 0

def _watchdog(self):
WATCHDOG_HARD_KILL_TIMEOUT = 7200
def _watchdog(self, watchdog_timeout=10800):

self.last_progress_time = time.time()

while self.last_progress_time == time.time():
if self.shutdown.wait(timeout=5):
return
last_progress_delay = time.time() - self.last_progress_time
if last_progress_delay < WATCHDOG_HARD_KILL_TIMEOUT:
if last_progress_delay < watchdog_timeout:
continue
try:
signal.signal(signal.SIGQUIT, dumpstacks)
Expand Down
34 changes: 17 additions & 17 deletions pynets/dmri/track.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def reconstruction(conn_model, gtab, dwi_data, B0_mask):
)
except ValueError:
import sys
sys.exit(0)
sys.exit(1)

del dwi_data

Expand Down Expand Up @@ -191,7 +191,7 @@ def prep_tissues(
raise ValueError("Tissue classifier cannot be none.")
except ValueError:
import sys
sys.exit(0)
sys.exit(1)

del gm_data, wm_data, vent_csf_in_dwi_data

Expand Down Expand Up @@ -482,8 +482,7 @@ def track_ensemble(
while float(stream_counter) < float(target_samples) and float(ix) < 0.50*float(len(all_combs)):
with Parallel(n_jobs=nthreads, backend='loky',
mmap_mode='r+', temp_folder=joblib_dir,
verbose=0, max_nbytes='50000M',
timeout=timeout) as parallel:
verbose=0, timeout=timeout) as parallel:
out_streams = parallel(
delayed(run_tracking)(
i, recon_path, n_seeds_per_iter, directget, maxcrossing,
Expand All @@ -502,14 +501,15 @@ def track_ensemble(

if len(out_streams) < min_streams:
ix += 2
print(f"Fewer than {min_streams} streamlines tracked on last"
f" iteration. Loosening tolerance and anatomical"
f" constraints. Check {tissues4d} or {recon_path}"
f" for errors...")
if track_type != 'particle':
tiss_class = 'wb'
roi_neighborhood_tol = float(roi_neighborhood_tol) * 1.05
min_length = float(min_length) * 0.95
print(f"Fewer than {min_streams} streamlines tracked "
f"on last iteration with cache directory: "
f"{cache_dir}. Loosening tolerance and "
f"anatomical constraints. Check {tissues4d} or "
f"{recon_path} for errors...")
# if track_type != 'particle':
# tiss_class = 'wb'
roi_neighborhood_tol = float(roi_neighborhood_tol) * 1.25
# min_length = float(min_length) * 0.9875
continue
else:
ix -= 1
Expand All @@ -531,19 +531,19 @@ def track_ensemble(
)
gc.collect()
print(Style.RESET_ALL)
os.system(f"rm -f {joblib_dir}/*")
os.system(f"rm -rf {joblib_dir}/*")
except BaseException:
os.system(f"rm -f {tmp_files_dir} &")
os.system(f"rm -rf {tmp_files_dir} &")
return None

if ix >= 0.75*len(all_combs) and \
float(stream_counter) < float(target_samples):
print(f"Tractography failed. >{len(all_combs)} consecutive sampling "
f"iterations with few streamlines.")
os.system(f"rm -f {tmp_files_dir} &")
os.system(f"rm -rf {tmp_files_dir} &")
return None
else:
os.system(f"rm -f {tmp_files_dir} &")
os.system(f"rm -rf {tmp_files_dir} &")
print("Tracking Complete: ", str(time.time() - start))

del parallel, all_combs
Expand Down Expand Up @@ -730,7 +730,7 @@ def run_tracking(step_curv_combinations, recon_path,
"ERROR: No valid tracking method(s) specified.")
except ValueError:
import sys
sys.exit(0)
sys.exit(1)

# Filter resulting streamlines by those that stay entirely
# inside the brain
Expand Down
4 changes: 2 additions & 2 deletions pynets/runconfig.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -209,15 +209,15 @@ resource_dict: # Nipype workflow resource settings
- 'get_fa_node':
- (2, 1)
- 'run_tracking_node':
- (3, 8)
- (3, 10)
- 'thresh_diff_node':
- (1, 1.5)
- 'dsn_node':
- (1, 2)
- 'plot_all_node':
- (1, 2)
- 'streams2graph_node':
- (3, 4)
- (3, 6)
- 'build_multigraphs_node':
- (2, 8)
- 'plot_all_struct_func_node':
Expand Down
29 changes: 15 additions & 14 deletions pynets/stats/benchmarking.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,19 +449,19 @@ def benchmark_reproducibility(comb, modality, alg, par_dict, disc,
with open(label_file, 'r+') as f:
node_dict = json.load(f)
indices = [i['index'] for i in
node_dict.values()]
node_dict]
if indices == ixs:
coords = [i['coord'] for i in
node_dict.values()]
node_dict]

df_coords = pd.DataFrame(
[str(tuple(x)) for x in
coords]).T
df_coords.columns = [f"rsn-{comb_tuple[0]}_res-{comb_tuple[-2]}_{i}" for i in ixs]
labels = [
list(i['label'].values())[7] for i
list(i['label'])[7] for i
in
node_dict.values()]
node_dict]

df_labels = pd.DataFrame(
labels).T
Expand Down Expand Up @@ -520,7 +520,6 @@ def benchmark_reproducibility(comb, modality, alg, par_dict, disc,
df_summary.at[0, f"{lp}_icc"] = np.nan
coord_in = np.nan
label_in = np.nan
del c_icc

dict_sum[f"{lp}_coord"] = coord_in
dict_sum[f"{lp}_label"] = label_in
Expand Down Expand Up @@ -593,11 +592,11 @@ def benchmark_reproducibility(comb, modality, alg, par_dict, disc,
if __name__ == "__main__":
__spec__ = "ModuleSpec(name='builtins', loader=<class '_" \
"frozen_importlib.BuiltinImporter'>)"
base_dir = '/scratch/04171/dpisner/HNU/HNU_outs/triple'
#base_dir = '/scratch/04171/dpisner/HNU/HNU_outs/outputs_language'
#base_dir = '/scratch/04171/dpisner/HNU/HNU_outs/triple'
base_dir = '/scratch/04171/dpisner/HNU/HNU_outs/outputs_language'
thr_type = "MST"
icc = False
disc = True
icc = True
disc = False
int_consist = False
target_modality = 'dwi'

Expand All @@ -606,8 +605,10 @@ def benchmark_reproducibility(comb, modality, alg, par_dict, disc,
#embedding_types = ['OMNI']
embedding_types = ['OMNI', 'ASE']
modalities = ['func', 'dwi']
rsns = ['kmeans']
#rsns = ['language']
#rsns = ['kmeans', 'triple']
#rsns = ['triple']
#rsns = ['kmeans']
rsns = ['language']
#template = 'CN200'
template = 'MNI152_T1'
mets = ["global_efficiency",
Expand Down Expand Up @@ -719,7 +720,7 @@ def tuple_insert(tup, pos, ele):
cache_dir = tempfile.mkdtemp()

with Parallel(
n_jobs=128, require="sharedmem", backend='threading',
n_jobs=-1, require="sharedmem", backend='threading',
verbose=10, max_nbytes='20000M',
temp_folder=cache_dir
) as parallel:
Expand All @@ -734,8 +735,8 @@ def tuple_insert(tup, pos, ele):
# outs = []
# for comb in grid:
# outs.append(benchmark_reproducibility(
# comb, modality, alg, par_dict,
# disc, final_missingness_summary,
# comb, modality, alg, sub_dict_clean,
# disc, final_missingness_summary, icc_tmps_dir,
# ))
df_summary = pd.concat([i for i in outs if i is not None and not i.empty], axis=0)
df_summary = df_summary.dropna(axis=0, how='all')
Expand Down

0 comments on commit b729aab

Please sign in to comment.