diff --git a/.github/workflows/test_package.yml b/.github/workflows/test_package.yml index a87f84f7..84d54cb5 100644 --- a/.github/workflows/test_package.yml +++ b/.github/workflows/test_package.yml @@ -27,7 +27,6 @@ jobs: run: | pip install --upgrade pip pip install pytest - pip install -r requirements_github.txt pip install -e . - name: Tests diff --git a/dwi_ml/data/processing/space/neighborhood.py b/dwi_ml/data/processing/space/neighborhood.py index e040a6ec..19d6018b 100644 --- a/dwi_ml/data/processing/space/neighborhood.py +++ b/dwi_ml/data/processing/space/neighborhood.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- import itertools -from typing import Union, List import numpy as np import torch diff --git a/dwi_ml/data/processing/streamlines/data_augmentation.py b/dwi_ml/data/processing/streamlines/data_augmentation.py index c3cedb9b..14634525 100644 --- a/dwi_ml/data/processing/streamlines/data_augmentation.py +++ b/dwi_ml/data/processing/streamlines/data_augmentation.py @@ -7,7 +7,8 @@ from dipy.io.stateful_tractogram import StatefulTractogram from nibabel.streamlines.tractogram import (PerArrayDict, PerArraySequenceDict) import numpy as np -from scilpy.tractograms.streamline_operations import resample_streamlines_step_size + +from scilpy.tracking.tools import resample_streamlines_step_size from scilpy.utils.streamlines import compress_sft diff --git a/dwi_ml/data/processing/streamlines/post_processing.py b/dwi_ml/data/processing/streamlines/post_processing.py index 67bed921..4a1d50b8 100644 --- a/dwi_ml/data/processing/streamlines/post_processing.py +++ b/dwi_ml/data/processing/streamlines/post_processing.py @@ -1,13 +1,12 @@ # -*- coding: utf-8 -*- -import logging from typing import List import numpy as np import torch -from scilpy.tractograms.uncompress import uncompress from scilpy.tractanalysis.tools import \ extract_longest_segments_from_profile as segmenting_func +from scilpy.tractanalysis.uncompress import uncompress # We could try using nan instead of zeros for non-existing previous dirs... DEFAULT_UNEXISTING_VAL = torch.zeros((1, 3), dtype=torch.float32) diff --git a/dwi_ml/models/embeddings.py b/dwi_ml/models/embeddings.py index 3c846d14..5597a246 100644 --- a/dwi_ml/models/embeddings.py +++ b/dwi_ml/models/embeddings.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from typing import Tuple, Union, List +from typing import Tuple, List import numpy as np import torch diff --git a/dwi_ml/models/projects/learn2track_model.py b/dwi_ml/models/projects/learn2track_model.py index 4e49e792..68dba721 100644 --- a/dwi_ml/models/projects/learn2track_model.py +++ b/dwi_ml/models/projects/learn2track_model.py @@ -11,10 +11,10 @@ compute_directions, normalize_directions, compute_n_previous_dirs from dwi_ml.data.processing.streamlines.sos_eos_management import \ convert_dirs_to_class -from dwi_ml.models.embeddings import NoEmbedding, keys_to_embeddings +from dwi_ml.models.embeddings import NoEmbedding from dwi_ml.models.main_models import ( ModelWithPreviousDirections, ModelWithDirectionGetter, - ModelWithNeighborhood, MainModelOneInput, ModelOneInputWithEmbedding) + ModelWithNeighborhood, ModelOneInputWithEmbedding) from dwi_ml.models.stacked_rnn import StackedRNN logger = logging.getLogger('model_logger') # Same logger as Super. diff --git a/dwi_ml/models/projects/learn2track_utils.py b/dwi_ml/models/projects/learn2track_utils.py index 5453f08f..0a805c12 100644 --- a/dwi_ml/models/projects/learn2track_utils.py +++ b/dwi_ml/models/projects/learn2track_utils.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import argparse -from dwi_ml.models.embeddings import keys_to_embeddings from dwi_ml.models.projects.learn2track_model import Learn2TrackModel diff --git a/dwi_ml/models/projects/transformers_utils.py b/dwi_ml/models/projects/transformers_utils.py index 414814f0..558944f7 100644 --- a/dwi_ml/models/projects/transformers_utils.py +++ b/dwi_ml/models/projects/transformers_utils.py @@ -1,10 +1,8 @@ # -*- coding: utf-8 -*- -from dwi_ml.models.embeddings import keys_to_embeddings from dwi_ml.models.positional_encoding import ( keys_to_positional_encodings) from dwi_ml.models.projects.transformer_models import ( AbstractTransformerModel) -from dwi_ml.models.utils.direction_getters import check_args_direction_getter sphere_choices = ['symmetric362', 'symmetric642', 'symmetric724', 'repulsion724', 'repulsion100', 'repulsion200'] diff --git a/dwi_ml/testing/projects/transformer_visualisation_utils.py b/dwi_ml/testing/projects/transformer_visualisation_utils.py index 3fc352f9..26803485 100644 --- a/dwi_ml/testing/projects/transformer_visualisation_utils.py +++ b/dwi_ml/testing/projects/transformer_visualisation_utils.py @@ -10,7 +10,7 @@ from scilpy.io.streamlines import load_tractogram_with_reference from scilpy.io.utils import add_reference_arg, add_overwrite_arg, add_bbox_arg -from scilpy.tractograms.streamline_operations import resample_streamlines_step_size +from scilpy.tracking.tools import resample_streamlines_step_size from scilpy.utils.streamlines import compress_sft from dwi_ml.io_utils import add_logging_arg diff --git a/dwi_ml/training/batch_samplers.py b/dwi_ml/training/batch_samplers.py index 30d8d5ef..ba612377 100644 --- a/dwi_ml/training/batch_samplers.py +++ b/dwi_ml/training/batch_samplers.py @@ -28,8 +28,6 @@ from typing import List, Tuple, Iterator, Union import numpy as np -import torch -import torch.multiprocessing from torch.utils.data import Sampler from dwi_ml.data.dataset.multi_subject_containers import MultiSubjectDataset diff --git a/dwi_ml/training/trainers.py b/dwi_ml/training/trainers.py index 297cdeeb..2d989a7a 100644 --- a/dwi_ml/training/trainers.py +++ b/dwi_ml/training/trainers.py @@ -4,7 +4,7 @@ import logging import os import shutil -from typing import Union, List, Tuple +from typing import Union, List from comet_ml import (Experiment as CometExperiment, ExistingExperiment) import numpy as np diff --git a/dwi_ml/training/utils/batch_samplers.py b/dwi_ml/training/utils/batch_samplers.py index 94c46d7d..184658ad 100644 --- a/dwi_ml/training/utils/batch_samplers.py +++ b/dwi_ml/training/utils/batch_samplers.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- import argparse -import logging -from dwi_ml.experiment_utils.prints import format_dict_to_str from dwi_ml.experiment_utils.timer import Timer from dwi_ml.training.batch_samplers import DWIMLBatchIDSampler diff --git a/dwi_ml/training/with_generation/batch_loader.py b/dwi_ml/training/with_generation/batch_loader.py index 0c386020..19befc10 100644 --- a/dwi_ml/training/with_generation/batch_loader.py +++ b/dwi_ml/training/with_generation/batch_loader.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -from typing import List, Dict - -import torch +from typing import Dict from dwi_ml.training.batch_loaders import DWIMLBatchLoaderOneInput diff --git a/dwi_ml/unit_tests/test_dataset.py b/dwi_ml/unit_tests/test_dataset.py index 75a3ef47..e752afa5 100755 --- a/dwi_ml/unit_tests/test_dataset.py +++ b/dwi_ml/unit_tests/test_dataset.py @@ -4,7 +4,6 @@ import os import h5py -import numpy as np import torch from dwi_ml.data.dataset.multi_subject_containers import \ diff --git a/dwi_ml/unit_tests/test_submethods_interpolation.py b/dwi_ml/unit_tests/test_submethods_interpolation.py index 64aeda1a..2f513c1b 100644 --- a/dwi_ml/unit_tests/test_submethods_interpolation.py +++ b/dwi_ml/unit_tests/test_submethods_interpolation.py @@ -5,8 +5,7 @@ import torch from dwi_ml.data.processing.space.neighborhood import \ - prepare_neighborhood_vectors, extend_coordinates_with_neighborhood, \ - unflatten_neighborhood + prepare_neighborhood_vectors, unflatten_neighborhood from dwi_ml.data.processing.volume.interpolation import \ interpolate_volume_in_neighborhood diff --git a/requirements.txt b/requirements.txt index 652be44a..ca74400a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,9 @@ # Supported for python 3.10 # Should work for python > 3.8. -# ---------- -# Scilpy and dipy must be installed manually first. -# (or run pip install -r requirements_github.txt) -# Not adding here to let you use your favorite version. -# ---------- - +# Scilpy and comet_ml both require requests. In comet: >=2.18.*, +# which installs a version >2.28. Adding request version explicitely. +requests==2.28.* bertviz~=1.4.0 # For transformer's visu torch==1.13.* tqdm==4.64.* @@ -16,7 +13,7 @@ jupyterlab>=3.6.2 # For transformer's visu IProgress>=0.4 # For jupyter with tdqm nested_lookup==0.2.25 nose==1.3.* - +scilpy==1.5.post2 ## Necessary but should be installed with scilpy (Last check: 09/2023): future==0.18.* @@ -27,10 +24,6 @@ numpy==1.23.* scipy==1.9.* -# Scilpy requires requests==2.28.*, but comet_ml requires requests>=2.18.*, -# which installs a version >2.28. Adding request version explicitely. -requests==2.28.* - # --------------- Notes to developers # If we upgrade torch, verify if code copied in diff --git a/requirements_github.txt b/requirements_github.txt deleted file mode 100644 index 078cf9dd..00000000 --- a/requirements_github.txt +++ /dev/null @@ -1,10 +0,0 @@ - -# --------------- -# Note to developpers -# Could not manage to import hot dipy on github. -# We had to use Emmanuelle Renauld's branch of scilpy, with hot dipy skipped. - -# Not installing dipy. Will be installed trough my scilpy. -git+https://github.com/EmmaRenauld/scilpy.git@BRANCH_OLD_DIPY#egg=scilpy -# Will install dipy through requirements. - diff --git a/scripts_python/tests/test_all_steps_tts.py b/scripts_python/tests/test_all_steps_tts.py index 41d2eb5a..c37b8d03 100644 --- a/scripts_python/tests/test_all_steps_tts.py +++ b/scripts_python/tests/test_all_steps_tts.py @@ -5,11 +5,8 @@ import pytest import tempfile -import torch - from dwi_ml.unit_tests.utils.expected_values import \ - (TEST_EXPECTED_VOLUME_GROUPS, TEST_EXPECTED_STREAMLINE_GROUPS, - TEST_EXPECTED_SUBJ_NAMES) + (TEST_EXPECTED_VOLUME_GROUPS, TEST_EXPECTED_STREAMLINE_GROUPS) from dwi_ml.unit_tests.utils.data_and_models_for_tests import fetch_testing_data data_dir = fetch_testing_data()