diff --git a/.gitignore b/.gitignore index 80088d214..e88bad0d3 100644 --- a/.gitignore +++ b/.gitignore @@ -89,3 +89,4 @@ docs/auto_examples/ docs/modules/ report.html notebooks/ +.pytest_cache/ diff --git a/.travis.yml b/.travis.yml index 22aaf9223..68b248cf1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,10 @@ language: python +cache: + pip: true + apt: true + directories: + - ~/visbrain_data sudo: false dist: trusty @@ -11,13 +16,15 @@ matrix: include: - env: TEST=standard os: linux + # - env: TEST=standard + # os: linux + # python: '3.7' - env: TEST=flake os: linux - # - env: TEST=examples - # os: linux # - env: TEST=standard # os: osx - # language: generic + - env: TEST=examples + os: linux allow_failures: - env: TEST=flake @@ -42,18 +49,20 @@ before_install: install: # Create the py3 environnement ; - - conda create -q -n testenv python=$TRAVIS_PYTHON_VERSION numpy scipy pip + - conda create -q -n testenv python=$TRAVIS_PYTHON_VERSION numpy scipy pip cython - source activate testenv # Install dependencies : - - if [ "${TEST}" == "standard" ]; then + - if [ "${TEST}" == "standard" ] || [ "${TEST}" == "examples" ]; then pip install codecov pytest pytest-cov pytest-sugar pytest-travis-fold openpyxl xlrd; - pip install mne nibabel pandas tensorpac; + pip install mne nibabel pandas tensorpac scikit-image; pip install git+https://github.com/hbldh/lspopt.git#egg=lspopt; fi; - if [ "${TEST}" == "flake" ]; then pip install flake8 pep8-naming; fi; - # - pip install PyOpenGL PyOpenGL_accelerate + # Install latest version vispy : + - pip install git+https://github.com/vispy/vispy.git + - pip install PyOpenGL PyOpenGL_accelerate # - pip install -q freetype-py husl pypng cassowary imageio # ------------------- VISBRAIN ------------------- - cd ${SRC_DIR} diff --git a/MANIFEST.in b/MANIFEST.in index 4daa20f4f..3c3b6a9f3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,21 +3,5 @@ include README.rst include LICENSE include requirements.txt -# Brain templates (Don't use recursive-include visbrain/data *.npz) : -include visbrain/data/templates/B1.npz -include visbrain/data/templates/B2.npz -include visbrain/data/templates/B3.npz - -# ROI : -include visbrain/data/roi/brodmann.npz -include visbrain/data/roi/aal.npz -include visbrain/data/roi/talairach.npz - -# Eegref : -include visbrain/data/topo/eegref.npz - -# Add visbrain icons : -recursive-include visbrain/data/icons *.svg - # Add url path : -include visbrain/data/data_url.txt +include visbrain/data_url.json diff --git a/Makefile b/Makefile index 429aefd6a..46fa2c1f5 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,4 @@ # simple makefile to simplify repetetive build env management tasks under posix - CTAGS ?= ctags all: clean inplace test @@ -37,8 +36,22 @@ flake: clean-test @flake8 examples: clean - @echo "NOT CONFIGURED YET" - + @for i in examples/brain/*.py examples/objects/*.py;do \ + echo "-----------------------------------------------"; \ + echo $$i; \ + echo "-----------------------------------------------"; \ + python $$i --visbrain-show=False; \ + echo "\n"; \ + done + +examples-full: clean + @for i in @for i in examples/*/*.py;do \ + echo "-----------------------------------------------"; \ + echo $$i; \ + echo "-----------------------------------------------"; \ + python $$i --visbrain-show=False; \ + echo "\n"; \ + done pypi: @python setup.py register -r pypi diff --git a/README.rst b/README.rst index 1e4bdcbd6..7fea3e0d1 100644 --- a/README.rst +++ b/README.rst @@ -46,6 +46,7 @@ Visbrain requires : * Matplotlib >= 1.5.5 * PyQt5 * Pillow +* PyOpenGL User installation ----------------- @@ -54,38 +55,5 @@ Install Visbrain : .. code-block:: shell - pip install visbrain + pip install -U visbrain -We also strongly recommend to install *pandas* and *pyopengl* : - -.. code-block:: shell - - pip install pandas PyOpenGL PyOpenGL_accelerate - -Modules -======= - -.. figure:: https://github.com/EtienneCmb/visbrain/blob/master/docs/picture/visbrain_readme.png - :align: center - -* `Brain `_ : visualize EEG/MEG/Intracranial data, connectivity in a standard MNI 3D brain (see `Brain examples `_). -* `Sleep `_ : visualize and analyze polysomnographic sleep data (see `Sleep examples `_). -* `Signal `_ : data-mining module for time-series inspection (see `Signal examples `_). -* `Topo `_ : display topographical maps (see `Topo examples `_). -* `Figure `_ : figure-layout for high-quality publication-like figures (see `Figure examples `_). -* `Colorbar `_ : colorbar editor (see `Colorbar examples `_). - - -Contribution -============ - -Main developers ---------------- - -* `Etienne Combrisson `_ -* `Raphael Vallat `_ - -With the help of ----------------- - -*Karim Jerbi, Christian O'Reilly, David Meunier, Dmitri Altukchov, Tarek Lajnef, Perrine Ruby, JB Einchenlaub, kevroy314, Annalisa Pascarella, Thomas Thiery, Yann Harel, Anne-Lise Saive, Golnush Alamian* diff --git a/appveyor.yml b/appveyor.yml index 6aa471230..22ac117b0 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -23,12 +23,14 @@ install: - "conda config --set always_yes yes --set changeps1 no" - "conda update -q conda" - "conda info -a" - - "conda create -q -n testenv python=%PYTHON_VERSION% numpy scipy matplotlib pip" + - "conda create -q -n testenv python=%PYTHON_VERSION% numpy scipy matplotlib pip cython" # Activate testing environnement : - "activate testenv" - "python -m pip install --upgrade pip" + # Install latest vispy version : + - pip install git+https://github.com/vispy/vispy.git # Install dependencies : - - "pip install mne nibabel setuptools PyOpenGL PyOpenGL_accelerate pytest pandas openpyxl tensorpac xlrd" + - "pip install mne nibabel setuptools PyOpenGL PyOpenGL_accelerate pytest pandas openpyxl tensorpac xlrd scikit-image" - "pip install git+https://github.com/hbldh/lspopt.git#egg=lspopt" - "dir" # ------------------- VISBRAIN ------------------- diff --git a/circle.yml b/circle.yml index 8a9772e7b..212cba5c0 100644 --- a/circle.yml +++ b/circle.yml @@ -1,59 +1,82 @@ -general: - branches: - ignore: - - gh-pages - - -machine: - environment: - # We need to set this variable to let Anaconda take precedence - PATH: "/home/ubuntu/miniconda3/envs/circleenv/bin:/home/ubuntu/miniconda3/bin:$PATH" - DISPLAY: ":99.0" - python: - version: 3.6.0 - -dependencies: - cache_directories: - - "~/miniconda3" - # Various dependencies - pre: - # Get a running Python - - cd ~; - # Disable pyenv (no cleaner way provided by CircleCI as it prepends pyenv version to PATH) - - rm -rf ~/.pyenv; - - rm -rf ~/virtualenvs; - # Get Anaconda and conda-based requirements - - > - if [ ! -d "/home/ubuntu/miniconda3" ]; then - echo "Setting up conda"; - wget -q http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh; - chmod +x ~/miniconda.sh; - ~/miniconda.sh -b -p /home/ubuntu/miniconda3; - conda update --yes --quiet conda; - else - echo "Conda already set up."; - fi - - if ! conda env list | grep circleenv; then - conda create -n circleenv --yes pip python=3.6 pip; - else - echo "Conda env already set up."; - fi; - - conda install -n circleenv --yes numpy scipy pip; - - pip install pytest pytest-travis-fold; - - pip install mne nibabel pandas openpyxl tensorpac xlrd; - - pip install git+https://github.com/hbldh/lspopt.git#egg=lspopt; - - ls -al /home/ubuntu/miniconda3; - - ls -al /home/ubuntu/miniconda3/bin; - - echo $PATH; - - echo $CIRCLE_BRANCH - - which python; - - which pip; - - override: - - cd /home/ubuntu/visbrain && pip install -e .; - - /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; - -test: - override: - - py.test: - timeout: 1500 +version: 2 +jobs: + build: + branches: + ignore: + - gh-pages + only: + - master + - develop + docker: + - image: circleci/python:3.6-jessie + steps: + - checkout + - run: + name: Clean CircleCI + command: | + rm -rf ~/.pyenv; + rm -rf ~/virtualenvs; + - run: + name: Spin up Xvfb + command: | + /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; + echo "export DISPLAY=:99" >> $BASH_ENV; + - run: sudo apt-get install libgl1-mesa-glx libegl1-mesa libxrandr2 libxrandr2 libxss1 libxcursor1 libxcomposite1 libasound2 libxi6 libxtst6 qt5-default; + - restore_cache: + keys: + - data-cache + - pip-cache + - miniconda-cache + - run: + name: Install miniconda + command: | + if [ ! -d "~/miniconda3" ]; then + wget -q http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh; + chmod +x ~/miniconda.sh; + ~/miniconda.sh -b -p ~/miniconda3; + echo "export PATH=~/miniconda3/bin:$PATH" >> $BASH_ENV; + else + echo "Miniconda3 already set up."; + fi + - save_cache: + key: miniconda-cache + paths: + - ~/.cache/miniconda + - run: + name: Setup Python environment + command: | + conda update --yes --quiet conda; + conda create -n testenv --yes python=3.6 numpy scipy pip cython qt==5.9.4 matplotlib==2.2.2 pyqt==5.9.2 ; + source activate testenv; + pip install -U pip; + pip install git+https://github.com/vispy/vispy.git; + pip install pytest pytest-travis-fold; + pip install mne nibabel pandas openpyxl tensorpac xlrd scikit-image; + pip install git+https://github.com/hbldh/lspopt.git#egg=lspopt; + echo $PATH; + echo $CIRCLE_BRANCH; + which python; + which pip; + - save_cache: + key: pip-cache + paths: + - ~/.cache/pip + - run: + name: Install visbrain + command: | + source activate testenv; + python setup.py develop; + - run: + name: run tests + command: | + mkdir test-reports + source activate testenv; + pytest --junitxml=test-reports/junit.xml + - store_test_results: + path: test-reports + - store_artifacts: + path: test-reports + - save_cache: + key: data-cache + paths: + - ~/.visbrain_data diff --git a/docs/_static/visbrain_styles.css b/docs/_static/visbrain_styles.css index 7cbf74af4..7578761c2 100644 --- a/docs/_static/visbrain_styles.css +++ b/docs/_static/visbrain_styles.css @@ -76,12 +76,12 @@ th { /* ############################ ALERT ############################*/ .alert-info { - background-color: #47a7f5; - font-weight: bold; + color: black; + background-color: #c7e4fc; } .alert-warning { - background-color: #FD9500; - font-weight: bold; + color: black; + background-color: #fedfb2; } .alert-primary { background-color: #4b4d39; diff --git a/docs/api.rst b/docs/api.rst index 10e87aab2..022ef6c0c 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -9,12 +9,17 @@ API :depth: 2 -GUI based modules ------------------ +Graphical user interface +------------------------ -:py:mod:`visbrain`: +:py:mod:`visbrain.gui`: + +.. currentmodule:: visbrain.gui + +.. automodule:: visbrain.gui + :no-members: + :no-inherited-members: -.. currentmodule:: visbrain .. autosummary:: :toctree: generated/ @@ -22,7 +27,6 @@ GUI based modules Brain Sleep - Topo Signal Figure @@ -53,6 +57,7 @@ Objects RoiObj SceneObj SourceObj + TopoObj TimeFrequencyObj TimeSeries3DObj VectorObj @@ -97,6 +102,8 @@ I/O download_file path_to_visbrain_data read_stc + write_fig_hyp + get_sleep_stats Miscellaneous ------------- @@ -124,23 +131,9 @@ Miscellaneous color2vb array2colormap -Command line ------------- - -In addition to using Python script, you can also use the following command-lines from a terminal : - -* :ref:`cli_visbrain_sleep` : open the graphical user interface of Sleep. -* :ref:`cli_visbrain_fig_hyp` : export a hypnogram file (**.txt**, **.csv** or **.hyp**) into a high definition colored or black and white image. -* :ref:`cli_visbrain_sleep_stats` : Compute sleep statistics from hypnogram file and export them in csv. - -.. _cli_visbrain_sleep: -.. click:: visbrain.cli:cli_sleep - :prog: visbrain_sleep +.. currentmodule:: visbrain.utils -.. _cli_visbrain_fig_hyp: -.. click:: visbrain.cli:cli_fig_hyp - :prog: visbrain_fig_hyp +.. autosummary:: + :toctree: generated/ -.. _cli_visbrain_sleep_stats: -.. click:: visbrain.cli:cli_sleep_stats - :prog: visbrain_sleep_stats + Colormap diff --git a/docs/brain.rst b/docs/brain.rst index e1e2b3f6a..cd97a9eaf 100644 --- a/docs/brain.rst +++ b/docs/brain.rst @@ -102,7 +102,7 @@ The *Brain* module can be imported as follow : .. code-block:: python - from visbrain import Brain + from visbrain.gui import Brain GUI description ~~~~~~~~~~~~~~~ @@ -118,15 +118,6 @@ The *Brain* graphical user interface is subdivided into three main parts : * **Colorbar canvas** (*hide by default*) * **Cross-sections canvas** (*hide by default*) -.. .. note:: - -.. If you want to save the current GUI state (i.e. all buttons properties) and retrieve it later, use the menu File/Save/GUI config and File/Load/GUI config or the associated *Brain* method. - -.. **Examples** -.. * :ref:`sphx_glr_auto_examples_brain_13_load_gui_config.py` - -.. **API** -.. * :ref:`LoadSaveConfig` Settings panel tabs ^^^^^^^^^^^^^^^^^^^ @@ -186,10 +177,6 @@ By default, *Brain* comes with three brain templates respectively B1 (with cereb Further brain templates can be downloaded `here `_. -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_02_brain_using_vertices.py` Sources ~~~~~~~ @@ -204,11 +191,6 @@ Sources can be added to the scene using (x, y, z) MNI coordinates and comes with * **Connectivity :** must be a (N, N) upper triangular array describing how to connect sources * **Time-series and/or pictures** : finally, it's also possible to visualize signals (such as time-series, spectral signals...) and 2-D pictures (time-frequency maps, comodulogram...) -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_03_sources.py` - * :ref:`sphx_glr_auto_examples_brain_09_add_multiple_objects.py` Cortical projection and repartition ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -223,11 +205,6 @@ Cortical projection and repartition Both methods use a **radius** parameter and only vertices with an euclidian distance under **radius** are going to be considered. From the GUI, those functions can be executed from the menu *Project*, from the tab *Sources/Properties/Projection*, using keyboard :ref:`brainshortcuts` or *Brain* methods. -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_03_sources.py` - * :ref:`sphx_glr_auto_examples_brain_08_screenshot.py` Connect sources ^^^^^^^^^^^^^^^ @@ -243,11 +220,6 @@ Sources can be connected together using connectivity links. *Brain* provides thr * **Count :** color each connectivity node according to the number of connections to it * **Density :** color each link according to the number of existing links in a controllable sphere. -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_04_connectivity.py` - * :ref:`sphx_glr_auto_examples_brain_09_add_multiple_objects.py` Attach time-series and/or pictures ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -259,11 +231,6 @@ Attach time-series and/or pictures As a complement, *Brain* provides the ability to visualize directly into the MNI brain time-series and pictures. -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_06_add_time_series.py` - * :ref:`sphx_glr_auto_examples_brain_07_add_pictures.py` .. warning:: @@ -290,12 +257,6 @@ Cross-sections Cross-sections correspond to an axial, sagittal and coronal slice of the volume and can either be visualize in 3-D (inside the brain template) or in slitted view -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_00_brain_control.py` - * :ref:`sphx_glr_auto_examples_brain_10_add_nifti_volume.py` - Region Of Interest (ROI) ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -307,27 +268,6 @@ Region Of Interest (ROI) If a volume is provided with corresponding labels, ROIs can be extracted and then be transformed into a mesh, compatible with source's projection methods. -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_05_region_of_interest.py` - * :ref:`sphx_glr_auto_examples_brain_08_screenshot.py` - - -3-D volume rendering -^^^^^^^^^^^^^^^^^^^^ - -.. figure:: picture/picbrain/brain_volume.png - :align: center - - Volume rendering methods of a Nifti volume. - -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_00_brain_control.py` - * :ref:`sphx_glr_auto_examples_brain_10_add_nifti_volume.py` - Colorbar control ~~~~~~~~~~~~~~~~ @@ -343,11 +283,6 @@ The colorbar can be controlled for individual objects including : * **Pictures** (*if defined*) * **Projections** (*if defined*) -.. note:: - - **Examples** - * :ref:`sphx_glr_auto_examples_brain_08_screenshot.py` - Examples ~~~~~~~~ diff --git a/docs/conf.py b/docs/conf.py index 20784e2f2..f10feae3e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,7 +20,7 @@ from datetime import date import sphinx_bootstrap_theme -from sphinx_gallery.sorting import FileNameSortKey +from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder from numpydoc import numpydoc, docscrape import visbrain @@ -74,6 +74,12 @@ 'gallery_dirs': 'auto_examples', 'backreferences_dir': 'generated', 'default_thumb_file': 'picture/visbrain.png', + 'subsection_order': ExplicitOrder(['../examples/objects', + '../examples/gui_brain', + '../examples/gui_sleep', + '../examples/gui_signal', + '../examples/eeg_meg', + '../examples/figure']), 'within_subsection_order': FileNameSortKey, 'doc_module': ('visbrain'), # 'thumbnail_size': (100, 100), diff --git a/docs/documentation.rst b/docs/documentation.rst index 70ecb0676..445ab1b0c 100644 --- a/docs/documentation.rst +++ b/docs/documentation.rst @@ -41,7 +41,7 @@ Documentation .. toctree:: - :maxdepth: 2 + :maxdepth: 3 :numbered: objects @@ -93,29 +93,6 @@ Documentation sleep -.. ------------------ TOPO MODULE ------------------ -.. raw:: html - - - - - -
- -
-
- -.. toctree:: - :maxdepth: 3 - :numbered: - - topo - .. ------------------ SIGNAL MODULE ------------------ .. raw:: html diff --git a/docs/faq.rst b/docs/faq.rst index 3da0bb264..385db79f0 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -47,7 +47,7 @@ And you should be ready to write your first python script. In your file explorer .. code-block:: python # Import only the Brain module from visbrain - from visbrain import Brain + from visbrain.gui import Brain # Open Brain : Brain().show() @@ -112,7 +112,7 @@ Yes, we provide a set of functions to help you with the compatibility with MNE-P .. code-block:: python import visbrain # import the full package - from visbrain import Brain, Sleep # import modules + from visbrain.gui import Brain, Sleep # import modules from visbrain.objects import BrainObj, SourceObj # import objects .. ----------------------------- JUPYTER ----------------------------- @@ -310,7 +310,7 @@ The :class:`visbrain.objects.BrainObj` comes with several brain templates (i.e B .. code-block:: python - from visbrain import Brain + from visbrain.gui import Brain from visbrain.objects import BrainObj vertices = ... # the array of vertices @@ -379,7 +379,7 @@ Two strategies : .. code-block:: python - from visbrain import Sleep + from visbrain.gui import Sleep data = ... # NumPy array of shape (n_channels, n_time_points) channels = [...] # List of channel names. Could be None diff --git a/docs/figure.rst b/docs/figure.rst index 2c6310404..1f81997dd 100644 --- a/docs/figure.rst +++ b/docs/figure.rst @@ -14,7 +14,7 @@ This module can be used to arange several exported figures in a grid, to control .. code-block:: python - from visbrain import Figure + from visbrain.gui import Figure Example @@ -23,7 +23,7 @@ Example .. code-block:: python # Import the Figure module : - from visbrain import Figure + from visbrain.gui import Figure # Set the list of files to load : files = ['front.png', 'top.png', 'connect.png', 'connect.png'] diff --git a/docs/introduction.rst b/docs/introduction.rst index 75a5b1a2d..e1b05a392 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -24,7 +24,7 @@ Structure Visbrain is mainly divided into two branches : -* **Modules** : essentially designed for beginner users, modules comes with a graphical user interface (GUI) for interactions between plotted elements and parameters. +* **Modules** : modules comes with a graphical user interface (GUI) for interactions between plotted elements and parameters. * **Objects** : objects are elementary bricks i.e. one visualization purpose per object. It's mainly designed for advanced users since objects are much more modular. See the :ref:`Objects` documentation and the API :class:`visbrain.objects` ====================== ======================================================= @@ -33,7 +33,6 @@ Module name Description :ref:`BrainModule` Visualizations involving a MNI brain :ref:`SleepModule` Visualize and score polysomnographic data :ref:`SignalModule` Visualize multi-dimensional datasets -:ref:`TopoModule` Topographic representations :ref:`FigureModule` Figure layout ====================== ======================================================= @@ -44,28 +43,28 @@ The visbrain structure is summarized below. Structure and hierarchy used in visbrain -Installation -============ +Installation options +==================== Dependencies ------------ -=============================================================== =========== ========================================= -Package Version Purpose -=============================================================== =========== ========================================= -`NumPy `_ >= 1.13 Scientific computing -`SciPy `_ - Mathematics, science, and engineering -`Matplotlib `_ >= 1.5.5 Colors and colormaps integration -`VisPy `_ >= 0.5.2 Graphics rendering -`PyQt5 `_ - Graphical User Interface components -`Pillow `_ - Screenshots and image file format support -=============================================================== =========== ========================================= - -PyQt5 can be installed using either **pip install pyqt5** or **conda install pyqt**. We also strongly recommend to install *pandas* and *pyopengl* : +* NumPy and SciPy (>= 1.13) +* Matplotlib (>= 1.5.5) +* VisPy (>= 0.5.2) +* PyQt5 +* PyOpenGL +Pillow -.. code-block:: shell +Optional dependencies +--------------------- - pip install pandas PyOpenGL PyOpenGL_accelerate +* Pandas & xlrd : table import / export +* Pillow : export figures +* Nibabel : read nifti files +* MNE-python : alternative to read sleep data files +* Tensorpac : compute and display phase-amplitude coupling +* lspopt : multitaper spectrogram Regular installation -------------------- @@ -86,3 +85,24 @@ If you want to install visbrain in develop mode : git clone https://github.com/EtienneCmb/visbrain.git visbrain/ cd visbrain/ python setup.py develop + +From here you can switch to the latest features using : + +.. code-block:: shell + + git checkout develop + +If you don't want to clone the full package, run : + +.. code-block:: shell + + pip install git+https://github.com/EtienneCmb/visbrain.git + + +Update visbrain +--------------- +You can update visbrain using : + +.. code-block:: shell + + pip install --upgrade visbrain \ No newline at end of file diff --git a/docs/objects.rst b/docs/objects.rst index d4f2422f3..f9621b09f 100644 --- a/docs/objects.rst +++ b/docs/objects.rst @@ -97,3 +97,9 @@ A scene can be imported from :class:`visbrain.objects` and defined as follow : This is a non-exhaustive example. You definitively should take a look at the :class:`visbrain.objects.SceneObj` + + +Complete object tutorial +------------------------ + +For each object, we provide a `complete tutorial `_ with illustration of all functionalities. diff --git a/docs/picture/piccbar/ex_load_existing_config.png b/docs/picture/piccbar/ex_load_existing_config.png deleted file mode 100644 index 3c81f95ec..000000000 Binary files a/docs/picture/piccbar/ex_load_existing_config.png and /dev/null differ diff --git a/docs/picture/piccbar/ex_open_editor.png b/docs/picture/piccbar/ex_open_editor.png deleted file mode 100644 index ed7df0597..000000000 Binary files a/docs/picture/piccbar/ex_open_editor.png and /dev/null differ diff --git a/docs/picture/piceegmeg/ex_eegmeg_meg_inverse.png b/docs/picture/piceegmeg/ex_eegmeg_meg_inverse.png index f0074eb64..69c499c2e 100644 Binary files a/docs/picture/piceegmeg/ex_eegmeg_meg_inverse.png and b/docs/picture/piceegmeg/ex_eegmeg_meg_inverse.png differ diff --git a/docs/picture/picobjects/ex_brain_obj.png b/docs/picture/picobjects/ex_brain_obj.png index 85c50f4e0..6271b8095 100644 Binary files a/docs/picture/picobjects/ex_brain_obj.png and b/docs/picture/picobjects/ex_brain_obj.png differ diff --git a/docs/picture/picobjects/ex_connect_obj.png b/docs/picture/picobjects/ex_connect_obj.png index 2d00acb6d..5a1c43ae5 100644 Binary files a/docs/picture/picobjects/ex_connect_obj.png and b/docs/picture/picobjects/ex_connect_obj.png differ diff --git a/docs/picture/picobjects/ex_cs_obj.png b/docs/picture/picobjects/ex_cs_obj.png deleted file mode 100644 index df5c6227d..000000000 Binary files a/docs/picture/picobjects/ex_cs_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/ex_imtfspec_obj.png b/docs/picture/picobjects/ex_imtfspec_obj.png index a0aad62a9..c3ea6a97c 100644 Binary files a/docs/picture/picobjects/ex_imtfspec_obj.png and b/docs/picture/picobjects/ex_imtfspec_obj.png differ diff --git a/docs/picture/picobjects/ex_roi_obj.png b/docs/picture/picobjects/ex_roi_obj.png index 513f77849..07be82070 100644 Binary files a/docs/picture/picobjects/ex_roi_obj.png and b/docs/picture/picobjects/ex_roi_obj.png differ diff --git a/docs/picture/picobjects/ex_source_obj.png b/docs/picture/picobjects/ex_source_obj.png index 63a1abe1d..edef42804 100644 Binary files a/docs/picture/picobjects/ex_source_obj.png and b/docs/picture/picobjects/ex_source_obj.png differ diff --git a/docs/picture/picobjects/ex_topo_obj.png b/docs/picture/picobjects/ex_topo_obj.png new file mode 100644 index 000000000..2f3d4e3b8 Binary files /dev/null and b/docs/picture/picobjects/ex_topo_obj.png differ diff --git a/docs/picture/picobjects/ex_tspic_obj.png b/docs/picture/picobjects/ex_tspic_obj.png new file mode 100644 index 000000000..c7b3f84b4 Binary files /dev/null and b/docs/picture/picobjects/ex_tspic_obj.png differ diff --git a/docs/picture/picobjects/ex_vol_obj.png b/docs/picture/picobjects/ex_vol_obj.png index b1a94ca19..b643463d2 100644 Binary files a/docs/picture/picobjects/ex_vol_obj.png and b/docs/picture/picobjects/ex_vol_obj.png differ diff --git a/docs/picture/picobjects/pic_RoiObj.png b/docs/picture/picobjects/pic_RoiObj.png deleted file mode 100644 index 8be5cae45..000000000 Binary files a/docs/picture/picobjects/pic_RoiObj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_brain_obj.png b/docs/picture/picobjects/pic_brain_obj.png deleted file mode 100644 index e74199ed7..000000000 Binary files a/docs/picture/picobjects/pic_brain_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_cbar_obj.png b/docs/picture/picobjects/pic_cbar_obj.png deleted file mode 100644 index 596a6ac72..000000000 Binary files a/docs/picture/picobjects/pic_cbar_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_connect_obj.png b/docs/picture/picobjects/pic_connect_obj.png deleted file mode 100644 index fb6064050..000000000 Binary files a/docs/picture/picobjects/pic_connect_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_cs_obj.png b/docs/picture/picobjects/pic_cs_obj.png deleted file mode 100644 index 132c56dd6..000000000 Binary files a/docs/picture/picobjects/pic_cs_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_hypno_obj.png b/docs/picture/picobjects/pic_hypno_obj.png deleted file mode 100644 index 30de4b419..000000000 Binary files a/docs/picture/picobjects/pic_hypno_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_image_obj.png b/docs/picture/picobjects/pic_image_obj.png deleted file mode 100644 index 35e566bb7..000000000 Binary files a/docs/picture/picobjects/pic_image_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_picture_obj.png b/docs/picture/picobjects/pic_picture_obj.png deleted file mode 100644 index f7d9e6f28..000000000 Binary files a/docs/picture/picobjects/pic_picture_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_source_obj.png b/docs/picture/picobjects/pic_source_obj.png deleted file mode 100644 index 0f31fa679..000000000 Binary files a/docs/picture/picobjects/pic_source_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_spec_obj.png b/docs/picture/picobjects/pic_spec_obj.png deleted file mode 100644 index dd704c1fc..000000000 Binary files a/docs/picture/picobjects/pic_spec_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_tf_obj.png b/docs/picture/picobjects/pic_tf_obj.png deleted file mode 100644 index 827e310ff..000000000 Binary files a/docs/picture/picobjects/pic_tf_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_ts_obj.png b/docs/picture/picobjects/pic_ts_obj.png deleted file mode 100644 index 64f2cec3b..000000000 Binary files a/docs/picture/picobjects/pic_ts_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_vector_obj.png b/docs/picture/picobjects/pic_vector_obj.png deleted file mode 100644 index d68283f85..000000000 Binary files a/docs/picture/picobjects/pic_vector_obj.png and /dev/null differ diff --git a/docs/picture/picobjects/pic_vol_obj.png b/docs/picture/picobjects/pic_vol_obj.png deleted file mode 100644 index 8ebf4462a..000000000 Binary files a/docs/picture/picobjects/pic_vol_obj.png and /dev/null differ diff --git a/docs/picture/pictopo/ex_connectivity.png b/docs/picture/pictopo/ex_connectivity.png index efcfe520a..05040f0e5 100644 Binary files a/docs/picture/pictopo/ex_connectivity.png and b/docs/picture/pictopo/ex_connectivity.png differ diff --git a/docs/picture/pictopo/ex_grid_topoplot.png b/docs/picture/pictopo/ex_grid_topoplot.png deleted file mode 100644 index e3bde6d37..000000000 Binary files a/docs/picture/pictopo/ex_grid_topoplot.png and /dev/null differ diff --git a/docs/picture/pictopo/ex_shared_colorbar.png b/docs/picture/pictopo/ex_shared_colorbar.png deleted file mode 100644 index e057b341c..000000000 Binary files a/docs/picture/pictopo/ex_shared_colorbar.png and /dev/null differ diff --git a/docs/picture/pictopo/ex_topoplot_plotting_properties.png b/docs/picture/pictopo/ex_topoplot_plotting_properties.png index 281ee1747..7239e5991 100644 Binary files a/docs/picture/pictopo/ex_topoplot_plotting_properties.png and b/docs/picture/pictopo/ex_topoplot_plotting_properties.png differ diff --git a/docs/release.rst b/docs/release.rst index 56b8795b4..039ad304f 100644 --- a/docs/release.rst +++ b/docs/release.rst @@ -7,6 +7,33 @@ Changelog :local: :depth: 1 +0.4.3 +----- + +New features +~~~~~~~~~~~~ +* :class:`visbrain.objects.SourceObj.project_sources` can now be projected to a specific overlay. + +Improvements +~~~~~~~~~~~~ +* Fix colormap update for every recording modality +* Colormap computed onto the GPU for : spectrogram, phase-amplitude coupling, images, 3D images, brain object, grid signals +* Sorted brain templates in :class:`visbrain.Brain` + remove sulcus as a brain template +* Fewer visible possibilities when importing from the root of visbrain +* Remove all data from the visbrain package +* Include MIST ROI template to the :class:`visbrain.objects.RoiObj` +* Enable to filter ROIs from the Brain GUI + +Bug fixes +~~~~~~~~~ +* Brain scaling in :class:`visbrain.mne.mne_plot_source_estimation` +* Recursive folder creation for brain template +* Select from the GUI brain template build with vertices and faces +* Repeat source localization using the same RoiObj +* Colorbar module has been removed and replaced by CbarObj +* Insert annotation inside Signal +* Smoothing for MEG data (`PR20 `_) + 0.4.1 ----- @@ -17,6 +44,7 @@ New features * Add activations (:class:`visbrain.objects.CrossSecObj.set_activation`) and highlight multiple sources (:class:`visbrain.objects.CrossSecObj.highlight_sources`) inside the :class:`visbrain.objects.CrossSecObj` * Plot MNE sources :class:`visbrain.mne.mne_plot_source_space` + Improvements ~~~~~~~~~~~~ diff --git a/docs/signal.rst b/docs/signal.rst index 65c5b1816..eb414a077 100644 --- a/docs/signal.rst +++ b/docs/signal.rst @@ -77,7 +77,7 @@ The :class:`Signal` module can be imported as follow : .. code-block:: python - from visbrain import Signal + from visbrain.gui import Signal Shortcuts diff --git a/docs/sleep.rst b/docs/sleep.rst index 5e6b5f91b..5757a9fe0 100644 --- a/docs/sleep.rst +++ b/docs/sleep.rst @@ -107,7 +107,7 @@ The :class:`Sleep` module can be imported as follow : .. code-block:: python - from visbrain import Sleep + from visbrain.gui import Sleep GUI description @@ -455,7 +455,7 @@ Don't send anything, just open the interface and you will have a popup window as .. code-block:: python # Import the Sleep module from visbrain : - from visbrain import Sleep + from visbrain.gui import Sleep # Run the interface : Sleep().show() @@ -475,7 +475,7 @@ Instead of leaving inputs arguments empty, send the path to the data : .. code-block:: python # Import the Sleep module from visbrain : - from visbrain import Sleep + from visbrain.gui import Sleep # Define where the data are located : dfile = '/home/perso/myfile.eeg' # File for the hypogram : @@ -494,7 +494,7 @@ Finally, it is possible to load several other file formats using `MNE Python pac .. code-block:: python # Import the Sleep module: - from visbrain import Sleep + from visbrain.gui import Sleep # - Biosemi Data Format (BDF) data = 'mybdffile.bdf' # - EGI format @@ -515,7 +515,7 @@ It is possible to manually load raw data and pass them as inputs arguments Sleep from scipy.io import loadmat # Import the Sleep module from visbrain: - from visbrain import Sleep + from visbrain.gui import Sleep # Load your dataset : mat = loadmat('testing_database.mat') # Get the data, sampling frequency and channel names: @@ -746,7 +746,7 @@ Alternatively, if you want to use a configuration when running :class:`Sleep`, y from mne import io # Import the Sleep module: - from visbrain import Sleep + from visbrain.gui import Sleep Sleep(config_file='pathto/myconfig.json') @@ -787,7 +787,7 @@ Annotations can be defined in a `csv file -

Quick description _images/topo_ico.png

-

Topo is a GUI based module for topographic representations.

-
-

- -Checkout the API of the :class:`visbrain.Topo` class. If you need help with the :class:`Topo` module, ask your questions in the dedicated `gitter Topo chat `_ - -.. raw:: html - - _images/ex_topoplot_plotting_properties.png

-
- -.. contents:: Contents - :local: - :depth: 2 - -Main features -~~~~~~~~~~~~~ - -.. raw:: html - -
-
-
- Display topographic map -
    -
  • Find coordinates according to channel names
  • -
  • Add connectivity edges
  • -
  • Support multiple coordinate systems
  • -
  • Highly controllable colorbar
  • -
-
-
-
-
- Grid representation -
    -
  • Display topoplot into a highly controllable grid.
  • -
  • Add either one colorbar per topoplot or one shared colorbar across topoplot
  • -
-
-
-
- - -Import and use Topo -~~~~~~~~~~~~~~~~~~~ - -The :class:`Topo` module can be imported as follow : - -.. code-block:: python - - from visbrain import Topo - -Examples -~~~~~~~~ - -.. include:: generated/visbrain.Topo.examples - -.. raw:: html - -
diff --git a/examples/README.txt b/examples/README.txt index 4315a8720..15de878a0 100644 --- a/examples/README.txt +++ b/examples/README.txt @@ -1,5 +1,9 @@ .. _general_examples: +.. warning:: + Some Visbrain's examples are based on data that need to be downloaded. + Those data are downloaded inside the folder *~/visbrain_data/example_data* + Examples ======== diff --git a/examples/colorbar/00_open_editor.py b/examples/colorbar/00_open_editor.py deleted file mode 100644 index db3ae7e9b..000000000 --- a/examples/colorbar/00_open_editor.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Open the colorbar editor -======================== - -Open the colorbar editor and pass inputs to control it. - -.. image:: ../../picture/piccbar/ex_open_editor.png -""" -from visbrain import Colorbar - -cb = Colorbar(vmin=.1, under='slateblue', vmax=.8, over='olive', - cmap='viridis', ndigits=4, cblabel='oki !', border=False, - name='Example1') -cb.show() diff --git a/examples/colorbar/01_load_existing_config.py b/examples/colorbar/01_load_existing_config.py deleted file mode 100644 index e56c4240e..000000000 --- a/examples/colorbar/01_load_existing_config.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Load an existing colorbar configuration file -============================================ - -The configuration is in a config.txt file and is loaded to reproduce the -colorbar configuration. - -Configuration files : -https://www.dropbox.com/s/5o1ph08rmpft200/cbar_config.zip?dl=0 - -.. image:: ../../picture/piccbar/ex_load_existing_config.png -""" -from visbrain import Colorbar -from visbrain.io import download_file, path_to_visbrain_data - -download_file('cbar_config.zip', unzip=True, remove_archive=True) -Colorbar(config=path_to_visbrain_data('config_1.txt')).show() diff --git a/examples/colorbar/README.txt b/examples/colorbar/README.txt deleted file mode 100644 index 7fdccda19..000000000 --- a/examples/colorbar/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -Colorbar Examples ------------------ - -Examples demonstrating how to open and configure the Colorbar editor. \ No newline at end of file diff --git a/examples/eeg_meg/conjunction_map.py b/examples/eeg_meg/conjunction_map.py index 37e91d53f..d3a98d169 100644 --- a/examples/eeg_meg/conjunction_map.py +++ b/examples/eeg_meg/conjunction_map.py @@ -10,21 +10,16 @@ .. image:: ../../picture/piceegmeg/ex_eegmeg_conjunction_map.png """ -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj -from visbrain.io import path_to_visbrain_data, download_file +from visbrain.io import download_file """Download files if needed """ -file_name_1 = 'lh.sig.nii.gz' -file_name_2 = 'lh.alt_sig.nii.gz' -download_file(file_name_1) -download_file(file_name_2) +file_1 = download_file('lh.sig.nii.gz', astype='example_data') +file_2 = download_file('lh.alt_sig.nii.gz', astype='example_data') -file_1 = path_to_visbrain_data(file=file_name_1) -file_2 = path_to_visbrain_data(file=file_name_2) - -b_obj = BrainObj('inflated', translucent=False) +b_obj = BrainObj('inflated', translucent=False, sulcus=True) b_obj.add_activation(file=file_1, clim=(4., 30.), hide_under=4, cmap='Reds_r', hemisphere='left') b_obj.add_activation(file=file_2, clim=(4., 30.), hide_under=4, cmap='Blues_r', diff --git a/examples/eeg_meg/fmri_activation.py b/examples/eeg_meg/fmri_activation.py index 98e6c2f8b..1221035b1 100644 --- a/examples/eeg_meg/fmri_activation.py +++ b/examples/eeg_meg/fmri_activation.py @@ -10,16 +10,13 @@ .. image:: ../../picture/piceegmeg/ex_eegmeg_fmri_activations.png """ -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj -from visbrain.io import path_to_visbrain_data, download_file +from visbrain.io import download_file """Download file if needed """ -file_name = 'lh.sig.nii.gz' -download_file(file_name) -file = path_to_visbrain_data(file=file_name) - +file = download_file('lh.sig.nii.gz', astype='example_data') b_obj = BrainObj('inflated', translucent=False, sulcus=True) b_obj.add_activation(file=file, clim=(5., 20.), hide_under=5, cmap='viridis', diff --git a/examples/eeg_meg/forward_solution.py b/examples/eeg_meg/forward_solution.py index 545d6281b..5c806397e 100644 --- a/examples/eeg_meg/forward_solution.py +++ b/examples/eeg_meg/forward_solution.py @@ -31,7 +31,7 @@ # Additional inputs for SourceObj : kw_s_obj = dict(color='blue', symbol='square') # Additional inputs for activations (colormap, clim...) : -kw_activation = dict(cmap='viridis', hide_under=0.3, clim=(0., .6)) +kw_activation = dict(cmap='Reds', hide_under=0., clim=(0., .6)) """Show control : - True -> directly display the Brain interface diff --git a/examples/eeg_meg/meg_inverse_solution.py b/examples/eeg_meg/meg_inverse_solution.py index b5c5293f1..26f830286 100644 --- a/examples/eeg_meg/meg_inverse_solution.py +++ b/examples/eeg_meg/meg_inverse_solution.py @@ -10,17 +10,16 @@ .. image:: ../../picture/piceegmeg/ex_eegmeg_meg_inverse.png """ -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj -from visbrain.io import path_to_visbrain_data, download_file, read_stc +from visbrain.io import download_file, read_stc """Download file if needed : """ -file = 'meg_source_estimate-lh.stc' -download_file(file) +stc_file = download_file('meg_source_estimate-lh.stc', astype='example_data') # Read the *.stc file : -file = read_stc(path_to_visbrain_data(file=file)) +file = read_stc(stc_file) # Get the data and vertices from the file : data = file['data'][:, 2] @@ -28,7 +27,7 @@ # Define a brain object and add the data to the mesh : b_obj = BrainObj('inflated', translucent=False, hemisphere='left') -b_obj.add_activation(data=data, vertices=vertices, smoothing_steps=5, +b_obj.add_activation(data=data, vertices=vertices, smoothing_steps=15, clim=(13., 22.), hide_under=13., cmap='plasma', hemisphere='left') diff --git a/examples/eeg_meg/source_space.py b/examples/eeg_meg/source_space.py index 064c790df..c89521303 100644 --- a/examples/eeg_meg/source_space.py +++ b/examples/eeg_meg/source_space.py @@ -15,7 +15,7 @@ from mne.datasets import sample -from visbrain import Brain +from visbrain.gui import Brain from visbrain.mne import mne_plot_source_space # Define path : diff --git a/examples/eeg_meg/vector_based_meg_inverse.py b/examples/eeg_meg/vector_based_meg_inverse.py index fb5d0ee78..7219f11ac 100644 --- a/examples/eeg_meg/vector_based_meg_inverse.py +++ b/examples/eeg_meg/vector_based_meg_inverse.py @@ -12,17 +12,16 @@ """ import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj, VectorObj -from visbrain.io import path_to_visbrain_data, read_stc, download_file +from visbrain.io import read_stc, download_file """Download file if needed """ -file_name = 'meg_source_estimate-lh.stc' -download_file(file_name) +stc_file = download_file('meg_source_estimate-lh.stc', astype='example_data') # Read the *.stc file : -file = read_stc(path_to_visbrain_data(file=file_name)) +file = read_stc(stc_file) # Get the data and vertices from the file : data = file['data'][:, 2] diff --git a/examples/figure/0_GeneratePictures.py b/examples/figure/0_GeneratePictures.py index 4adbea9c7..f14aefa7b 100644 --- a/examples/figure/0_GeneratePictures.py +++ b/examples/figure/0_GeneratePictures.py @@ -6,7 +6,7 @@ pictures are going to be set in a layout in the 1_LayoutExample.py script. """ import numpy as np -from visbrain import Brain +from visbrain.gui import Brain kwargs = {} diff --git a/examples/figure/1_LayoutExample.py b/examples/figure/1_LayoutExample.py index 27507d5a7..d637212c8 100644 --- a/examples/figure/1_LayoutExample.py +++ b/examples/figure/1_LayoutExample.py @@ -2,20 +2,23 @@ Page layout example =================== -Arange pictures in a grid. +Arange pictures in a grid. Download the archive : https://www.dropbox.com/s/jsjct54ynvdjzfq/figure.zip?dl=1 """ -from visbrain import Figure -from visbrain.io import download_file, path_to_visbrain_data +import os -download_file("figure.zip", unzip=True) +from visbrain.gui import Figure +from visbrain.io import download_file + +fig_path = download_file("figure.zip", unzip=True, astype='example_data') +fig_path = fig_path.split("figure.zip")[0] # Files to load : files = ['default.png', 'inside.png', 'count.png', 'density.png', 'repartition.jpg', 'roi.jpg'] -files = [path_to_visbrain_data(k) for k in files] +files = [os.path.join(fig_path, k) for k in files] # Titles : titles = ['Default view', 'Select sources inside', 'Connectivity', diff --git a/examples/brain/00_brain_control.py b/examples/gui_brain/00_brain_control.py similarity index 98% rename from examples/brain/00_brain_control.py rename to examples/gui_brain/00_brain_control.py index 41894b61b..1d22b6ea5 100644 --- a/examples/brain/00_brain_control.py +++ b/examples/gui_brain/00_brain_control.py @@ -22,7 +22,7 @@ .. image:: ../../picture/picbrain/ex_brain_control.png """ -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj """Visbrain comes with three default templates : diff --git a/examples/brain/01_cross_sections_and_volume.py b/examples/gui_brain/01_cross_sections_and_volume.py similarity index 92% rename from examples/brain/01_cross_sections_and_volume.py rename to examples/gui_brain/01_cross_sections_and_volume.py index cd7df0b4a..258449e5a 100644 --- a/examples/brain/01_cross_sections_and_volume.py +++ b/examples/gui_brain/01_cross_sections_and_volume.py @@ -9,7 +9,7 @@ .. image:: ../../picture/picbrain/ex_crossec_and_volume.png """ -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import CrossSecObj, VolumeObj from visbrain.io import download_file @@ -19,7 +19,7 @@ """Download the file. """ -path = download_file(volume_name) +path = download_file(volume_name, astype='example_data') """Define a cross-section object diff --git a/examples/brain/02_brain_using_vertices.py b/examples/gui_brain/02_brain_using_vertices.py similarity index 93% rename from examples/brain/02_brain_using_vertices.py rename to examples/gui_brain/02_brain_using_vertices.py index 8911107e1..37b6c2627 100644 --- a/examples/brain/02_brain_using_vertices.py +++ b/examples/gui_brain/02_brain_using_vertices.py @@ -9,14 +9,14 @@ """ import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj from visbrain.io import download_file """Download and the load the Custom.npz archive. This file contains vertices and faces of a brain template that is not integrated by default in Visbrain. """ -mat = np.load(download_file('Custom.npz')) +mat = np.load(download_file('Custom.npz', astype='example_data')) """Get vertices and faces from the archive. diff --git a/examples/brain/03_sources.py b/examples/gui_brain/03_sources.py similarity index 96% rename from examples/brain/03_sources.py rename to examples/gui_brain/03_sources.py index 0395027d0..ed6229bac 100644 --- a/examples/brain/03_sources.py +++ b/examples/gui_brain/03_sources.py @@ -12,7 +12,7 @@ """ import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import SourceObj, BrainObj from visbrain.io import download_file @@ -20,7 +20,7 @@ """Load the xyz coordinates and corresponding subject name """ -mat = np.load(download_file('xyz_sample.npz')) +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) xyz, subjects = mat['xyz'], mat['subjects'] """The "subjects" list is composed of 6 diffrents subjects and here we set one diff --git a/examples/brain/04_connectivity.py b/examples/gui_brain/04_connectivity.py similarity index 93% rename from examples/brain/04_connectivity.py rename to examples/gui_brain/04_connectivity.py index c7dea35e4..45af67ccf 100644 --- a/examples/brain/04_connectivity.py +++ b/examples/gui_brain/04_connectivity.py @@ -13,9 +13,9 @@ from __future__ import print_function import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import SourceObj, ConnectObj -from visbrain.io import download_file, path_to_visbrain_data +from visbrain.io import download_file # Create an empty kwargs dictionnary : kwargs = {} @@ -23,8 +23,7 @@ # ____________________________ DATA ____________________________ # Load the xyz coordinates and corresponding subject name : -download_file('xyz_sample.npz') -mat = np.load(path_to_visbrain_data('xyz_sample.npz')) +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) xyz, subjects = mat['xyz'], mat['subjects'] N = xyz.shape[0] # Number of electrodes diff --git a/examples/brain/05_region_of_interest.py b/examples/gui_brain/05_region_of_interest.py similarity index 89% rename from examples/brain/05_region_of_interest.py rename to examples/gui_brain/05_region_of_interest.py index 047e8468b..243c2957b 100644 --- a/examples/brain/05_region_of_interest.py +++ b/examples/gui_brain/05_region_of_interest.py @@ -13,15 +13,17 @@ from __future__ import print_function import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj, SourceObj, RoiObj from visbrain.io import download_file """Download the location of sources closed to the thalamus and the power of alpha oscillations """ -s_xyz = np.loadtxt(download_file('thalamus.txt')) -s_data = np.load(download_file('Px.npy')).mean(1) * 10e26 +thalamus_xyz = download_file('thalamus.txt', astype='example_data') +thalamus_data = download_file('Px.npy', astype='example_data') +s_xyz = np.loadtxt(thalamus_xyz) +s_data = np.load(thalamus_data).mean(1) * 10e26 """Create a source object """ diff --git a/examples/brain/06_add_time_series.py b/examples/gui_brain/06_add_time_series.py similarity index 94% rename from examples/brain/06_add_time_series.py rename to examples/gui_brain/06_add_time_series.py index 4a25b0f32..629cfaa2a 100644 --- a/examples/brain/06_add_time_series.py +++ b/examples/gui_brain/06_add_time_series.py @@ -12,13 +12,13 @@ """ import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import TimeSeries3DObj, SourceObj from visbrain.io import download_file # Load the xyz coordinates and corresponding subject name : -s_xyz = np.load(download_file('xyz_sample.npz'))['xyz'] +s_xyz = np.load(download_file('xyz_sample.npz', astype='example_data'))['xyz'] s_xyz = s_xyz[4::25, ...] s_text = [str(k) for k in range(s_xyz.shape[0])] s_textsize = 1.5 diff --git a/examples/brain/07_add_pictures.py b/examples/gui_brain/07_add_pictures.py similarity index 94% rename from examples/brain/07_add_pictures.py rename to examples/gui_brain/07_add_pictures.py index 4223e34b6..25cfb9609 100644 --- a/examples/brain/07_add_pictures.py +++ b/examples/gui_brain/07_add_pictures.py @@ -12,14 +12,14 @@ """ import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import Picture3DObj, SourceObj from visbrain.io import download_file kwargs = {} # Load the xyz coordinates and corresponding subject name : -s_xyz = np.load(download_file('xyz_sample.npz'))['xyz'] +s_xyz = np.load(download_file('xyz_sample.npz', astype='example_data'))['xyz'] s_xyz = s_xyz[4::10, ...] n_sources = s_xyz.shape[0] diff --git a/examples/brain/08_screenshot.py b/examples/gui_brain/08_screenshot.py similarity index 97% rename from examples/brain/08_screenshot.py rename to examples/gui_brain/08_screenshot.py index 021cc6dd7..fc2605e2b 100644 --- a/examples/brain/08_screenshot.py +++ b/examples/gui_brain/08_screenshot.py @@ -19,14 +19,14 @@ import os import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj, SourceObj, RoiObj from visbrain.io import download_file, path_to_visbrain_data save_pic_path = path_to_visbrain_data(folder='Example_pic') # Load the xyz coordinates and corresponding subject name : -s_xyz = np.load(download_file('xyz_sample.npz'))['xyz'] +s_xyz = np.load(download_file('xyz_sample.npz', astype='example_data'))['xyz'] """Create a source object with random data between [-50,50] """ diff --git a/examples/brain/09_add_multiple_objects.py b/examples/gui_brain/09_add_multiple_objects.py similarity index 97% rename from examples/brain/09_add_multiple_objects.py rename to examples/gui_brain/09_add_multiple_objects.py index 2db6d7547..455b75141 100644 --- a/examples/brain/09_add_multiple_objects.py +++ b/examples/gui_brain/09_add_multiple_objects.py @@ -12,7 +12,7 @@ from __future__ import print_function import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import SourceObj, ConnectObj from visbrain.io import download_file @@ -22,7 +22,7 @@ kwargs = {} # Load the xyz coordinates and corresponding subject name : -mat = np.load(download_file('xyz_sample.npz')) +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) s_xyz, subjects = mat['xyz'], mat['subjects'] """ diff --git a/examples/brain/10_add_nifti_volume.py b/examples/gui_brain/10_add_nifti_volume.py similarity index 88% rename from examples/brain/10_add_nifti_volume.py rename to examples/gui_brain/10_add_nifti_volume.py index c9c5cea93..8ef51a63a 100644 --- a/examples/brain/10_add_nifti_volume.py +++ b/examples/gui_brain/10_add_nifti_volume.py @@ -16,14 +16,14 @@ from __future__ import print_function import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import VolumeObj, CrossSecObj, SourceObj from visbrain.io import download_file """Download two NIFTI files """ -path_1 = download_file('GG-853-GM-0.7mm.nii.gz') -path_2 = download_file('GG-853-WM-0.7mm.nii.gz') +path_1 = download_file('GG-853-GM-0.7mm.nii.gz', astype='example_data') +path_2 = download_file('GG-853-WM-0.7mm.nii.gz', astype='example_data') """Define four sources sources and a Source object """ diff --git a/examples/brain/11_add_vectors.py b/examples/gui_brain/11_add_vectors.py similarity index 96% rename from examples/brain/11_add_vectors.py rename to examples/gui_brain/11_add_vectors.py index 06d716624..a4c64d87a 100644 --- a/examples/brain/11_add_vectors.py +++ b/examples/gui_brain/11_add_vectors.py @@ -15,7 +15,7 @@ """ import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import VectorObj, BrainObj, SourceObj from visbrain.io import download_file @@ -24,7 +24,7 @@ """ Load the xyz coordinates and corresponding subject name """ -mat = np.load(download_file('xyz_sample.npz')) +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) xyz, subjects = mat['xyz'], mat['subjects'] """The first vector object use the position of a subset of sources as a diff --git a/examples/brain/12_parcellize.py b/examples/gui_brain/12_parcellize.py similarity index 92% rename from examples/brain/12_parcellize.py rename to examples/gui_brain/12_parcellize.py index 89c392f7c..0eebf95a6 100644 --- a/examples/brain/12_parcellize.py +++ b/examples/gui_brain/12_parcellize.py @@ -12,7 +12,7 @@ """ import numpy as np -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import BrainObj from visbrain.io import download_file @@ -21,8 +21,8 @@ file2 = 'rh.aparc.annot' # Download files if needed : -path_to_file1 = download_file(file1) -path_to_file2 = download_file(file2) +path_to_file1 = download_file(file1, astype='example_data') +path_to_file2 = download_file(file2, astype='example_data') # Define a brain object : b_obj = BrainObj('inflated', hemisphere='both', translucent=False, diff --git a/examples/brain/README.txt b/examples/gui_brain/README.txt similarity index 63% rename from examples/brain/README.txt rename to examples/gui_brain/README.txt index 69a2ed6ac..115610132 100644 --- a/examples/brain/README.txt +++ b/examples/gui_brain/README.txt @@ -1,5 +1,5 @@ -Brain Examples --------------- +Graphical user interface : Brain +-------------------------------- Examples demonstrating visualizations on a standard 3D-MNI brain. diff --git a/examples/signal/00_1d_signal.py b/examples/gui_signal/00_1d_signal.py similarity index 97% rename from examples/signal/00_1d_signal.py rename to examples/gui_signal/00_1d_signal.py index e3ebb80c9..3c0ba840a 100644 --- a/examples/signal/00_1d_signal.py +++ b/examples/gui_signal/00_1d_signal.py @@ -17,7 +17,7 @@ .. image:: ../../picture/picsignal/ex_1d_signal.png """ -from visbrain import Signal +from visbrain.gui import Signal from visbrain.utils import generate_eeg sf = 512. # sampling frequency diff --git a/examples/signal/01_2d_signals.py b/examples/gui_signal/01_2d_signals.py similarity index 98% rename from examples/signal/01_2d_signals.py rename to examples/gui_signal/01_2d_signals.py index e2c9f987e..537d1ce90 100644 --- a/examples/signal/01_2d_signals.py +++ b/examples/gui_signal/01_2d_signals.py @@ -27,7 +27,7 @@ .. image:: ../../picture/picsignal/ex_2d_signal.png """ -from visbrain import Signal +from visbrain.gui import Signal from visbrain.utils import generate_eeg sf = 512. # sampling frequency diff --git a/examples/signal/02_3d_signals.py b/examples/gui_signal/02_3d_signals.py similarity index 98% rename from examples/signal/02_3d_signals.py rename to examples/gui_signal/02_3d_signals.py index 02a982fa7..c7d213dba 100644 --- a/examples/signal/02_3d_signals.py +++ b/examples/gui_signal/02_3d_signals.py @@ -15,7 +15,7 @@ .. image:: ../../picture/picsignal/ex_3d_signal.png """ from itertools import product -from visbrain import Signal +from visbrain.gui import Signal from visbrain.utils import generate_eeg sf = 512. # sampling frequency diff --git a/examples/signal/03_interface_customization.py b/examples/gui_signal/03_interface_customization.py similarity index 97% rename from examples/signal/03_interface_customization.py rename to examples/gui_signal/03_interface_customization.py index 8b0327dda..8c233bae3 100644 --- a/examples/signal/03_interface_customization.py +++ b/examples/gui_signal/03_interface_customization.py @@ -6,7 +6,7 @@ .. image:: ../../picture/picsignal/ex_custom_interface.png """ -from visbrain import Signal +from visbrain.gui import Signal from visbrain.utils import generate_eeg sf = 512. # sampling frequency diff --git a/examples/signal/04_annotations.py b/examples/gui_signal/04_annotations.py similarity index 96% rename from examples/signal/04_annotations.py rename to examples/gui_signal/04_annotations.py index 4f271e475..f3a91bdbc 100644 --- a/examples/signal/04_annotations.py +++ b/examples/gui_signal/04_annotations.py @@ -12,7 +12,7 @@ .. image:: ../../picture/picsignal/ex_annotations.png """ -from visbrain import Signal +from visbrain.gui import Signal from visbrain.utils import generate_eeg sf = 512. # sampling frequency diff --git a/examples/signal/05_screenshot.py b/examples/gui_signal/05_screenshot.py similarity index 97% rename from examples/signal/05_screenshot.py rename to examples/gui_signal/05_screenshot.py index 1d4e39be1..7c7e3194d 100644 --- a/examples/signal/05_screenshot.py +++ b/examples/gui_signal/05_screenshot.py @@ -8,7 +8,7 @@ """ import numpy as np -from visbrain import Signal +from visbrain.gui import Signal sf = 1000. # Sampling-frequency n_pts = 4000 # Number of time points diff --git a/examples/signal/06_butterfly.py b/examples/gui_signal/06_butterfly.py similarity index 96% rename from examples/signal/06_butterfly.py rename to examples/gui_signal/06_butterfly.py index b32f02cce..6d4d9ac70 100644 --- a/examples/signal/06_butterfly.py +++ b/examples/gui_signal/06_butterfly.py @@ -7,7 +7,7 @@ .. image:: ../../picture/picsignal/ex_butterfly.png """ import numpy as np -from visbrain import Signal +from visbrain.gui import Signal sf = 1024. # Sampling frequency diff --git a/examples/signal/README.txt b/examples/gui_signal/README.txt similarity index 59% rename from examples/signal/README.txt rename to examples/gui_signal/README.txt index 2a7a2f19c..d197d6d9d 100644 --- a/examples/signal/README.txt +++ b/examples/gui_signal/README.txt @@ -1,5 +1,5 @@ -Signal Examples ---------------- +Graphical user interface : Signal +--------------------------------- Examples demonstrating how to use the Signal module. diff --git a/examples/signal/signal_annotations.txt b/examples/gui_signal/signal_annotations.txt similarity index 100% rename from examples/signal/signal_annotations.txt rename to examples/gui_signal/signal_annotations.txt diff --git a/examples/gui_sleep/README.txt b/examples/gui_sleep/README.txt new file mode 100644 index 000000000..efb50ce4c --- /dev/null +++ b/examples/gui_sleep/README.txt @@ -0,0 +1,8 @@ +Graphical user interface : Sleep +-------------------------------- + +Examples demonstrating how to use Sleep and how to load files. + +.. contents:: Contents + :local: + :depth: 2 \ No newline at end of file diff --git a/examples/sleep/basic_sleep.py b/examples/gui_sleep/basic_sleep.py similarity index 91% rename from examples/sleep/basic_sleep.py rename to examples/gui_sleep/basic_sleep.py index d65e9c0af..3a50682c8 100644 --- a/examples/sleep/basic_sleep.py +++ b/examples/gui_sleep/basic_sleep.py @@ -11,6 +11,6 @@ .. image:: ../../picture/picsleep/ex_basic_sleep.png """ -from visbrain import Sleep +from visbrain.gui import Sleep Sleep().show() diff --git a/examples/gui_sleep/get_sleep_statistics.py b/examples/gui_sleep/get_sleep_statistics.py new file mode 100644 index 000000000..eb39dae6e --- /dev/null +++ b/examples/gui_sleep/get_sleep_statistics.py @@ -0,0 +1,22 @@ +""" +Get sleep statistics +==================== + +Get sleep statictics such as sleep stages duration, duration of the hypnogram. +""" +from visbrain.io import download_file, get_sleep_stats + +############################################################################### +# Hypnogram data +############################################################################### +# Download a hypnogram example + +path_to_hypno = download_file("s101_jbe.hyp", astype='example_data') + +############################################################################### +# Get sleep statistics +############################################################################### +# Sleep statistics are going to be printed in the terminal and then saved in a +# `my_stats.csv` + +get_sleep_stats(path_to_hypno, output_file='my_stats.csv') diff --git a/examples/sleep/load_brainvision.py b/examples/gui_sleep/load_brainvision.py similarity index 79% rename from examples/sleep/load_brainvision.py rename to examples/gui_sleep/load_brainvision.py index 3bdd1f3eb..dde2339c7 100644 --- a/examples/sleep/load_brainvision.py +++ b/examples/gui_sleep/load_brainvision.py @@ -10,17 +10,15 @@ .. image:: ../../picture/picsleep/ex_LoadBrainVision.png """ import os -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data ############################################################################### # LOAD YOUR FILE ############################################################################### -current_path = path_to_visbrain_data() -target_path = os.path.join(current_path, 'sleep_data', 'brainvision') - # Download dataset : -download_file("sleep_brainvision.zip", unzip=True, to_path=target_path) +download_file("sleep_brainvision.zip", unzip=True, astype='example_data') +target_path = path_to_visbrain_data(folder='example_data') dfile = os.path.join(target_path, 'sub-02.vhdr') hfile = os.path.join(target_path, 'sub-02.hyp') diff --git a/examples/sleep/load_edf.py b/examples/gui_sleep/load_edf.py similarity index 79% rename from examples/sleep/load_edf.py rename to examples/gui_sleep/load_edf.py index a068a4878..b073b8ac0 100644 --- a/examples/sleep/load_edf.py +++ b/examples/gui_sleep/load_edf.py @@ -10,16 +10,14 @@ .. image:: ../../picture/picsleep/ex_LoadEDF.png """ import os -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data ############################################################################### # LOAD YOUR FILE ############################################################################### -current_path = path_to_visbrain_data() -target_path = os.path.join(current_path, 'sleep_data', 'edf') - -download_file('sleep_edf.zip', unzip=True, to_path=target_path) +download_file('sleep_edf.zip', unzip=True, astype='example_data') +target_path = path_to_visbrain_data(folder='example_data') dfile = os.path.join(target_path, 'excerpt2.edf') hfile = os.path.join(target_path, 'Hypnogram_excerpt2.txt') diff --git a/examples/sleep/load_elan.py b/examples/gui_sleep/load_elan.py similarity index 77% rename from examples/sleep/load_elan.py rename to examples/gui_sleep/load_elan.py index 9f59b2019..216e74047 100644 --- a/examples/sleep/load_elan.py +++ b/examples/gui_sleep/load_elan.py @@ -10,17 +10,15 @@ .. image:: ../../picture/picsleep/ex_LoadElan.png """ import os -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data ############################################################################### # LOAD YOUR FILE ############################################################################### -current_path = path_to_visbrain_data() -target_path = os.path.join(current_path, 'sleep_data', 'elan') - # Download dataset : -download_file("sleep_elan.zip", unzip=True, to_path=target_path) +download_file("sleep_elan.zip", unzip=True, astype='example_data') +target_path = path_to_visbrain_data(folder='example_data') dfile = os.path.join(target_path, 'sub-02.eeg') hfile = os.path.join(target_path, 'sub-02.hyp') diff --git a/examples/sleep/load_matlab.py b/examples/gui_sleep/load_matlab.py similarity index 83% rename from examples/sleep/load_matlab.py rename to examples/gui_sleep/load_matlab.py index 5a5b4b558..5475b6377 100644 --- a/examples/sleep/load_matlab.py +++ b/examples/gui_sleep/load_matlab.py @@ -13,17 +13,15 @@ import numpy as np from scipy.io import loadmat -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data ############################################################################### # LOAD YOUR FILE ############################################################################### -current_path = path_to_visbrain_data() -target_path = os.path.join(current_path, 'sleep_data', 'matlab') - # Download matlab file : -download_file("sleep_matlab.zip", unzip=True, to_path=target_path) +download_file("sleep_matlab.zip", unzip=True, astype='example_data') +target_path = path_to_visbrain_data(folder='example_data') # Load the matlab file : mat = loadmat(os.path.join(target_path, 's2_sleep.mat')) diff --git a/examples/sleep/load_rec.py b/examples/gui_sleep/load_rec.py similarity index 69% rename from examples/sleep/load_rec.py rename to examples/gui_sleep/load_rec.py index bed60a248..58e208b2c 100644 --- a/examples/sleep/load_rec.py +++ b/examples/gui_sleep/load_rec.py @@ -11,18 +11,14 @@ """ import os -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data ############################################################################### # LOAD YOUR FILE ############################################################################### -current_path = path_to_visbrain_data() -target_path = os.path.join(current_path, 'sleep_data', 'rec') - -# Download the rec file : -download_file('sleep_rec.zip', unzip=True, to_path=target_path, - remove_archive=True) +download_file('sleep_rec.zip', unzip=True, astype='example_data') +target_path = path_to_visbrain_data(folder='example_data') dfile = os.path.join(target_path, '1.rec') diff --git a/examples/sleep/load_using_mne.py b/examples/gui_sleep/load_using_mne.py similarity index 86% rename from examples/sleep/load_using_mne.py rename to examples/gui_sleep/load_using_mne.py index c1a2c936c..04da785b5 100644 --- a/examples/sleep/load_using_mne.py +++ b/examples/gui_sleep/load_using_mne.py @@ -17,17 +17,15 @@ """ import os from mne import io -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data ############################################################################### # LOAD YOUR FILE ############################################################################### -current_path = path_to_visbrain_data() -target_path = os.path.join(current_path, 'sleep_data', 'brainvision') - # Download dataset : -download_file("sleep_brainvision.zip", unzip=True, to_path=target_path) +download_file("sleep_brainvision.zip", unzip=True, astype='example_data') +target_path = path_to_visbrain_data(folder='example_data') dfile = os.path.join(target_path, 'sub-02.vhdr') hfile = os.path.join(target_path, 'sub-02.hyp') diff --git a/examples/gui_sleep/plot_hypnogram.py b/examples/gui_sleep/plot_hypnogram.py new file mode 100644 index 000000000..d76acff7a --- /dev/null +++ b/examples/gui_sleep/plot_hypnogram.py @@ -0,0 +1,32 @@ +""" +Matplotlib plot of an hypnogram +=============================== + +Plot a hypnogram using matplotlib. +""" +from visbrain.io import write_fig_hyp, read_hypno, download_file + +############################################################################### +# Plotting properties +############################################################################### +# Define plotting properties + +grid = True # display the grid +ascolor = True # plt as color or in black and white +file = None # Name of the file to be saved example : 'myfile.png' + +############################################################################### +# Hypnogram data +############################################################################### +# For the illustration, a hypnogram is downloaded + +path_to_hypno = download_file("s101_jbe.hyp", astype='example_data') +data, sf = read_hypno(path_to_hypno) + +############################################################################### +# Plot the hypnogram +############################################################################### +# Plot the hypnogram. If file is None, the window is displayed otherwise the +# figure is saved + +write_fig_hyp(data, sf, grid=grid, ascolor=ascolor, file=file) diff --git a/examples/sleep/replace_detection_basic.py b/examples/gui_sleep/replace_detection_basic.py similarity index 93% rename from examples/sleep/replace_detection_basic.py rename to examples/gui_sleep/replace_detection_basic.py index efc870870..490dc7f73 100644 --- a/examples/sleep/replace_detection_basic.py +++ b/examples/gui_sleep/replace_detection_basic.py @@ -23,15 +23,12 @@ import os import numpy as np -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data -# Get data path and where to save it : -current_path = path_to_visbrain_data() -target_path = os.path.join(current_path, 'sleep_data', 'edf') - # Download the file : -download_file('sleep_edf.zip', unzip=True, to_path=target_path) +download_file('sleep_edf.zip', unzip=True, astype='example_data') +target_path = path_to_visbrain_data(folder='example_data') # Get data path : dfile = os.path.join(target_path, 'excerpt2.edf') # data diff --git a/examples/sleep/replace_detection_wonambi.py b/examples/gui_sleep/replace_detection_wonambi.py similarity index 94% rename from examples/sleep/replace_detection_wonambi.py rename to examples/gui_sleep/replace_detection_wonambi.py index b332aff10..31cea8360 100644 --- a/examples/sleep/replace_detection_wonambi.py +++ b/examples/gui_sleep/replace_detection_wonambi.py @@ -27,18 +27,15 @@ import os import numpy as np -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data from wonambi.detect.spindle import DetectSpindle, detect_Moelle2011 from wonambi.detect.slowwave import DetectSlowWave, detect_Massimini2004 -# Get data path and where to save it : -current_path = path_to_visbrain_data() -target_path = os.path.join(current_path, 'sleep_data', 'edf') - # Download the file : -download_file('sleep_edf.zip', unzip=True, to_path=target_path) +download_file('sleep_edf.zip', unzip=True, astype='example_data') +target_path = path_to_visbrain_data(folder='example_data') # Get data path : dfile = os.path.join(target_path, 'excerpt2.edf') # data diff --git a/examples/objects/ex_brain_obj.py b/examples/objects/ex_brain_obj.py index ebd406f44..ab24adc16 100644 --- a/examples/objects/ex_brain_obj.py +++ b/examples/objects/ex_brain_obj.py @@ -1,25 +1,17 @@ """ -Brain object -============ +Brain object (BrainObj) : complete tutorial +=========================================== This example illustrate the main functionalities and inputs of the brain object i.e : - * Use a default brain template + * Use included MNI brain template * Select the hemisphere ('both', 'left', 'right') * Use a translucent or opaque brain + * Project source's activity on the surface of the brain * Parcellize the brain and send data to selected parcellates * Add fMRI activation and MEG inverse solution -List of the brain templates supported by default : - - * B1 - * B2 - * B3 - * Inflated (fsaverage) - * White - * Sphere - .. image:: ../../picture/picobjects/ex_brain_obj.png """ import numpy as np @@ -27,131 +19,222 @@ from visbrain.objects import BrainObj, ColorbarObj, SceneObj, SourceObj from visbrain.io import download_file, read_stc +############################################################################### +# Scene creation +############################################################################### +# The SceneObj is Matplotlib subplot like in which, you can add visbrain's +# objects. We first create the scene with a black background, a fixed size -print(""" -# ============================================================================= -# Default scene -# ============================================================================= -""") -CAM_STATE = dict(azimuth=0, # azimuth angle - elevation=90, # elevation angle - ) +# Scene creation +sc = SceneObj(bgcolor='black', size=(1400, 1000)) +# Colorbar default arguments. See `visbrain.objects.ColorbarObj` CBAR_STATE = dict(cbtxtsz=12, txtsz=10., width=.1, cbtxtsh=3., rect=(-.3, -2., 1., 4.)) -sc = SceneObj(camera_state=CAM_STATE, bgcolor=(.1, .1, .1), size=(1400, 1000)) - -print(""" -# ============================================================================= -# Translucent inflated brain template -# ============================================================================= -""") -b_obj_fs = BrainObj('inflated', translucent=True) -b_obj_fs.alpha = 0.03 +KW = dict(title_size=14., zoom=1.2) + +############################################################################### +# .. note:: +# The BrainObj can interact with sources (SourceObj). For example, if the +# source object represent intracranial data (e.g iEEG) those sources can +# be projected on the surface of the brain. This is an important feature +# because intracranial implantations is usually subject dependant and the +# projection is a good way to plot results across subjects. To illustrate +# this feature, we provide a set of intracranial MNI coordinates. + +# Download iEEG coordinates and define some random data +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) +xyz, subjects = mat['xyz'], mat['subjects'] +data = np.random.rand(xyz.shape[0]) + + +############################################################################### +# Basic brain using MNI template +############################################################################### +# By default, Visbrain include several MNI brain templates (B1, B3, B3, +# inflated, white and shere). + +# Translucent inflated BrainObj with both hemispheres displayed +b_obj_fs = BrainObj('inflated', translucent=True, hemisphere='both') +# Add the brain to the scene. Note that `row_span` means that the plot will +# occupy two rows (row 0 and 1) sc.add_to_subplot(b_obj_fs, row=0, col=0, row_span=2, - title='Translucent inflated brain template') + title='Translucent inflated brain template', **KW) -print(""" -# ============================================================================= -# Left and right hemispheres of the white template -# ============================================================================= -""") +############################################################################### +# Select the left or the right hemisphere +############################################################################### +# You can use the `hemisphere` input to select either the 'left', 'right' or +# 'both' hemispheres. + +# Opaque left hemispehre of the white matter b_obj_lw = BrainObj('white', hemisphere='left', translucent=False) sc.add_to_subplot(b_obj_lw, row=0, col=1, rotate='right', - title='Left hemisphere') -b_obj_rw = BrainObj('white', hemisphere='both', translucent=True) - -# Define a source object and project data on the right hemisphere: -mat = np.load(download_file('xyz_sample.npz')) -xyz, subjects = mat['xyz'], mat['subjects'] -data = np.random.rand(xyz.shape[0]) -s_obj = SourceObj('Sources', xyz, data=data, cmap='inferno') + title='Left hemisphere', **KW) + +############################################################################### +# Projection iEEG data on the surface of the brain +############################################################################### +# As explain above, we define a source object and project the source's activity +# on the surface of the brain + +# First, define a brain object used for the projection +b_obj_proj = BrainObj('B3', hemisphere='both', translucent=False) +# Define the source object +s_obj = SourceObj('iEEG', xyz, data=data, cmap='inferno') +# Just for fun, color sources according to the data :) s_obj.color_sources(data=data) -b_obj_rw.project_sources(s_obj, cmap='viridis') -sc.add_to_subplot(s_obj, row=0, col=2) -sc.add_to_subplot(b_obj_rw, row=0, col=2, rotate='left', - title='Right hemisphere', use_this_cam=True) - -print(""" -# ============================================================================= -# Parcellize the brain (using all parcellates) -# ============================================================================= -""") -path_to_file1 = download_file('lh.aparc.a2009s.annot') +# Project source's activity +s_obj.project_sources(b_obj_proj, cmap='plasma') +# Finally, add the source and brain objects to the subplot +sc.add_to_subplot(s_obj, row=0, col=2, title='Project iEEG data', **KW) +sc.add_to_subplot(b_obj_proj, row=0, col=2, rotate='left', use_this_cam=True) +# Finally, add the colorbar : +cb_proj = ColorbarObj(s_obj, cblabel='Projection of niEEG data', **CBAR_STATE) +sc.add_to_subplot(cb_proj, row=0, col=3, width_max=200) + +############################################################################### +# .. note:: +# Here, we used s_obj.project_sources(b_obj) to project source's activity +# on the surface. We could also have used to b_obj.project_sources(s_obj) + +############################################################################### +# Parcellize the brain +############################################################################### +# Here, we parcellize the brain (using all parcellated included in the file). +# Note that those parcellates files comes from MNE-python. + +# Download the annotation file of the left hemisphere lh.aparc.a2009s.annot +path_to_file1 = download_file('lh.aparc.a2009s.annot', astype='example_data') +# Define the brain object (now you should know how to do it) b_obj_parl = BrainObj('inflated', hemisphere='left', translucent=False) -# print(b_obj_parl.get_parcellates(path_to_file1)) # available parcellates +# Print parcellates included in the file +print(b_obj_parl.get_parcellates(path_to_file1)) +# Finally, parcellize the brain and add the brain to the scene b_obj_parl.parcellize(path_to_file1) sc.add_to_subplot(b_obj_parl, row=1, col=1, rotate='left', - title='Parcellize using the Desikan Atlas') - -print(""" -# ============================================================================= -# Send data to parcellates -# ============================================================================= -""") -path_to_file2 = download_file('rh.aparc.annot') + title='Parcellize using the Desikan Atlas', **KW) + +############################################################################### +# .. note:: +# Those annotations files from MNE-python are only compatibles with the +# inflated, white and sphere templates + +############################################################################### +# Send data to parcellates +############################################################################### +# Again, we download an annotation file, but this time for the right hemisphere +# The difference with the example above, is that this time we send some data +# to some specific parcellates + +# Download the annotation file of the right hemisphere rh.aparc.annot +path_to_file2 = download_file('rh.aparc.annot', astype='example_data') +# Define the brain object (again... I know, this is redundant) b_obj_parr = BrainObj('inflated', hemisphere='right', translucent=False) -# print(b_obj_parr.get_parcellates(path_to_file2)) # available parcellates +# Print parcellates included in the file +print(b_obj_parr.get_parcellates(path_to_file2)) +# From the list of printed parcellates, we only select a few of them select_par = ['paracentral', 'precentral', 'fusiform', 'postcentral', 'superiorparietal', 'superiortemporal', 'inferiorparietal', 'inferiortemporal'] +# Now we define some data for each parcellates (one value per pacellate) data_par = [10., .1, 5., 7., 11., 8., 4., 6.] +# Parcellize the brain with the selected parcellates. The data range is +# between [.1, 11.]. Then, we use `vmin` and `vmax` to specify that we want +# every parcellates under vmin to be gray and every parcellates over vmax +# darkred b_obj_parr.parcellize(path_to_file2, select=select_par, hemisphere='right', - cmap='inferno', data=data_par, vmin=1., vmax=10, - under='gray', over='darkred') + cmap='viridis', data=data_par, clim=[.1, 11.], vmin=1., + vmax=10, under='gray', over='darkred') +# Add the brain object to the scene sc.add_to_subplot(b_obj_parr, row=1, col=2, rotate='right', - title='Send data to Desikan-Killiany parcellates') + title='Send data to Desikan-Killiany parcellates', **KW) +# Get the colorbar of the brain object and add it to the scene cb_parr = ColorbarObj(b_obj_parr, cblabel='Data to parcellates', **CBAR_STATE) sc.add_to_subplot(cb_parr, row=1, col=3, width_max=200) -print(""" -# ============================================================================= -# Add a custom brain template -# ============================================================================= -""") -mat = np.load(download_file('Custom.npz')) +############################################################################### +# Custom brain template +############################################################################### +# All of the examples above use MNI brain templates that are included inside +# visbrain. But you can define your own brain template using vertices and faces + +# Download the vertices, faces and normals +mat = np.load(download_file('Custom.npz', astype='example_data')) vert, faces, norms = mat['vertices'], mat['faces'], mat['normals'] -b_obj_custom = BrainObj('Custom', vertices=1000 * vert, faces=faces, +# By default, vertices are in millimeters so we multiply by 1000. +vert *= 1000. +# If your template represent a brain with both hemispheres, you can use the +# `lr_index` to specify which vertices belong to the left or the right +# hemisphere. Basically, `lr_index` is a boolean vector of shape (n_vertices,) +# where True reflect locatino of the left hemisphere and False, the right +# hemisphere +lr_index = vert[0, :] <= 0. +# Create the brain object and add it to the scene (this time it's a bit +# different) +b_obj_custom = BrainObj('Custom', vertices=vert, faces=faces, normals=norms, translucent=False) sc.add_to_subplot(b_obj_custom, row=2, col=0, title='Use a custom template', - rotate='left') - -print(""" -# ============================================================================= -# fMRI activation -# ============================================================================= -""") -file = download_file('lh.sig.nii.gz') + rotate='left', **KW) + +############################################################################### +# .. note:: +# If you doesn't have the normals, it's not a big deal because if no +# normals are provided, normals are going to be computed but it's a bit +# slower. Then, you can save your template using `BrainObj.save`. This can +# be convenient to reload your template later. + +############################################################################### +# fMRI activation +############################################################################### +# Add fMRI activations (included in a nii.gz file) to the surface. The provided +# file comes from MNE-python + +# Download the lh.sig.nii.gz file +file = download_file('lh.sig.nii.gz', astype='example_data') +# Define the [...] you know b_obj_fmri = BrainObj('inflated', translucent=False, sulcus=True) +# Add fMRI activation and hide every activation that is under 5. b_obj_fmri.add_activation(file=file, clim=(5., 20.), hide_under=5, cmap='viridis', hemisphere='left') sc.add_to_subplot(b_obj_fmri, row=2, col=1, title='Add fMRI activation', - rotate='left') - -print(""" -# ============================================================================= -# MEG inverse solution -# ============================================================================= -""") -file = read_stc(download_file('meg_source_estimate-rh.stc')) + rotate='left', **KW) + +############################################################################### +# MEG inverse solution +############################################################################### +# Finally, plot MEG inverse solution. The provided file comes from MNE-python + +# Dowload meg_source_estimate-rh.stc file and load the data +file = read_stc(download_file('meg_source_estimate-rh.stc', + astype='example_data')) +# Get the data of index 2 and the vertices data = file['data'][:, 2] vertices = file['vertices'] +# You know... b_obj_meg = BrainObj('inflated', translucent=False, hemisphere='right', sulcus=True) +# Add MEG data to the surface and hide every values under 5. b_obj_meg.add_activation(data=data, vertices=vertices, hemisphere='right', - smoothing_steps=5, clim=(7., 17.), hide_under=7., - cmap='plasma', vmin=9, vmax=15.) + smoothing_steps=21, clim=(5., 17.), hide_under=5., + cmap='plasma') +# Add the brain and the colorbar object to the scene sc.add_to_subplot(b_obj_meg, row=2, col=2, title='MEG inverse solution', - rotate='right') + rotate='right', **KW) cb_parr = ColorbarObj(b_obj_meg, cblabel='MEG data', **CBAR_STATE) sc.add_to_subplot(cb_parr, row=2, col=3, width_max=200) -"""Link brain rotations -""" -sc.link((0, 1), (1, 2)) -# sc.link((0, 2), (1, 1)) - -"""Screenshot of the scene -""" +############################################################################### +# "Fun" stuff +############################################################################### +# You can link 3D rotations of subplots which means that if you rotate one +# brain, the other linked object inherit from the same rotations. Finally, you +# can take a screenshot of the scene, without the need to open the window. +# This can be particulary convenient when scenes are included inside loops to +# automatize figure generation. + +# Link the rotation of subplots (row=0, col=1) and (row=1, col=2) +# sc.link((0, 1), (1, 2)) +# Screenshot of the scene # sc.screenshot('ex_brain_obj.png', transparent=True) sc.preview() diff --git a/examples/objects/ex_combine_objects.py b/examples/objects/ex_combine_objects.py index 5a61530d7..89e72c013 100644 --- a/examples/objects/ex_combine_objects.py +++ b/examples/objects/ex_combine_objects.py @@ -17,7 +17,7 @@ """Get the path to Visbrain data and download deep sources """ -mat = np.load(download_file('xyz_sample.npz')) +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) xyz, subjects = mat['xyz'], mat['subjects'] data = np.random.uniform(low=-1., high=1., size=(xyz.shape[0],)) @@ -38,7 +38,7 @@ fMRI activation ============================================================================= """) -file = download_file('lh.sig.nii.gz') +file = download_file('lh.sig.nii.gz', astype='example_data') b_obj_fmri = BrainObj('inflated', translucent=False, sulcus=True) b_obj_fmri.add_activation(file=file, clim=(5., 20.), hide_under=5, cmap='viridis', hemisphere='left') @@ -97,7 +97,7 @@ # Connectivity # ============================================================================= """) -arch = np.load(download_file('phase_sync_delta.npz')) +arch = np.load(download_file('phase_sync_delta.npz', astype='example_data')) nodes, edges = arch['nodes'], arch['edges'] c_count = ConnectObj('default', nodes, edges, select=edges > .7, color_by='count', antialias=True, line_width=2., diff --git a/examples/objects/ex_connectivity_obj.py b/examples/objects/ex_connectivity_obj.py index 03bcc81e8..5738dbee4 100644 --- a/examples/objects/ex_connectivity_obj.py +++ b/examples/objects/ex_connectivity_obj.py @@ -1,8 +1,12 @@ """ -Connectivity object -=================== +Connectivity object (ConnectObj) : complete tutorial +==================================================== -Illustration of the main functionalities and inputs of the connctivity object. +Illustration of the main functionalities and inputs of the connctivity object : + + * Color connectivity links according to connectivity strength + * Color connectivity links according to the number of connections per node + * Color connectivity links using custom colors .. image:: ../../picture/picobjects/ex_connect_obj.png """ @@ -11,54 +15,102 @@ from visbrain.objects import ConnectObj, SceneObj, SourceObj, BrainObj from visbrain.io import download_file -arch = np.load(download_file('phase_sync_delta.npz')) +############################################################################### +# Download data and define the scene +############################################################################### +# First, we download a connectivity dataset consisting of the location of each +# node (104) and the connectivity strength between every node (104, 104) + +# Download data +arch = np.load(download_file('phase_sync_delta.npz', astype='example_data')) nodes, edges = arch['nodes'], arch['edges'] +# Create the scene with a black background +sc = SceneObj(size=(1500, 600)) -sc = SceneObj(bgcolor=(.1, .1, .1)) +############################################################################### +# Color by connectivity strength +############################################################################### +# First, we download a connectivity dataset consisting of the location of each +# node (iEEG site) and the connectivity strength between those nodes. The first +# coloring method illustrated bellow consist in coloring connections based on +# a colormap -print(""" -# ============================================================================= -# Color by connectivity strength -# ============================================================================= -""") -c_default = ConnectObj('default', nodes, edges, select=edges > .7, - cmap='Spectral_r', line_width=2.) +# Coloring method +color_by = 'strength' +# Because we don't want to plot every connections, we only keep connections +# above .7 +select = edges > .7 +# Define the connectivity object +c_default = ConnectObj('default', nodes, edges, select=select, line_width=2., + cmap='Spectral_r', color_by=color_by) +# Then, we define the sources s_obj = SourceObj('sources', nodes, color='#ab4642', radius_min=15.) sc.add_to_subplot(c_default, title='Color by connectivity strength') +# And add connect, source and brain objects to the scene sc.add_to_subplot(s_obj) -sc.add_to_subplot(BrainObj('B1'), use_this_cam=True) +sc.add_to_subplot(BrainObj('B3'), use_this_cam=True) + +############################################################################### +# Color by number of connections per node +############################################################################### +# The next coloring method consist in set a color according to the number of +# connections per node. Here, we also illustrate that colors can also by +# `dynamic` (i.e stronger connections are opaque and weak connections are more +# translucent) -print(""" -# ============================================================================= -# Color by number of connections per node -# ============================================================================= -""") -c_count = ConnectObj('default', nodes, edges, select=edges > .7, - color_by='count', antialias=True, line_width=4., - dynamic=(.1, 1.)) +# Coloring method +color_by = 'count' +# Weak connections -> alpha = .1 // strong connections -> alpha = 1. +dynamic = (.1, 1.) +# Define the connectivity and source object +c_count = ConnectObj('default', nodes, edges, select=select, line_width=4., + color_by=color_by, antialias=True, dynamic=dynamic) s_obj_c = SourceObj('sources', nodes, color='olive', radius_min=10., symbol='square') +# And add connect, source and brain objects to the scene sc.add_to_subplot(c_count, row=0, col=1, title='Color by number of connections per node') sc.add_to_subplot(s_obj_c, use_this_cam=True, row=0, col=1) sc.add_to_subplot(BrainObj('B3'), use_this_cam=True, row=0, col=1) -print(""" -# ============================================================================= -# Custom colors -# ============================================================================= -""") +############################################################################### +# Custom colors +############################################################################### +# Finally, you can define your own colors which mean that for a specific +# connectivity strength, you can manually set a unique color. The provided +# dataset has values between [0., 1.] + +# First, we take a copy of the connectivity array edges_copy = edges.copy() -edges_copy[edges_copy >= .85] = 3.1 -edges_copy[np.logical_and(edges_copy >= .8, edges_copy < .9)] = 2.7 -edges_copy[np.logical_and(edges_copy >= .75, edges_copy < .8)] = 1.5 -ccol = {1.5: 'red', 2.7: 'blue', 3.1: 'orange', None: 'lightgray'} +# Then, we force edges to take fixed values +# ==================== ========= =========== +# Condition New value Color +# ==================== ========= =========== +# edges >= 0.8 4. red +# edges in [.78, .8[ 3. orange +# edges in [.74, .78[ 2. blue +# Others - lightgray +# ==================== ========= =========== +edges_copy[edges_copy >= .8] = 4. +edges_copy[np.logical_and(edges_copy >= .78, edges_copy < .8)] = 3. +edges_copy[np.logical_and(edges_copy >= .74, edges_copy < .78)] = 2. +# Now we use a dctionary to set one color per value. +ccol = { + None: 'lightgray', + 2.: 'blue', + 3.: 'orange', + 4.: 'red' +} + +# Define the connectivity and source objects c_cuscol = ConnectObj('default', nodes, edges_copy, select=edges > .7, custom_colors=ccol) s_obj_cu = SourceObj('sources', nodes, color='slategray', radius_min=10., symbol='ring') +# Add objects to the scene sc.add_to_subplot(c_cuscol, row=0, col=2, title='Custom colors') sc.add_to_subplot(s_obj_cu, row=0, col=2) sc.add_to_subplot(BrainObj('white'), use_this_cam=True, row=0, col=2) +# Finally, display the scene sc.preview() diff --git a/examples/objects/ex_cross_section_obj.py b/examples/objects/ex_cross_section_obj.py deleted file mode 100644 index 93bf6145b..000000000 --- a/examples/objects/ex_cross_section_obj.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -Cross-section object -==================== - -Illustration and main functionalities and inputs of the cross-section object. - -.. image:: ../../picture/picobjects/ex_cs_obj.png -""" -from visbrain.objects import CrossSecObj, SceneObj -from visbrain.io import download_file - -sc = SceneObj() - -print(""" -# ============================================================================= -# Brodmann area -# ============================================================================= -""") -cs_brod = CrossSecObj('brodmann', interpolation='nearest', - coords=(70., 80., 90.)) -cs_brod.localize_source((-10., -15., 20.)) -sc.add_to_subplot(cs_brod, row=0, col=0, title='Brodmann area') - -print(""" -# ============================================================================= -# Nii.gz file -# ============================================================================= -""") -path = download_file('GG-853-GM-0.7mm.nii.gz') -cs_cust = CrossSecObj(path, coords=(0., 0., 0.), cmap='gist_stern') -sc.add_to_subplot(cs_cust, row=0, col=1, title='Nii.gz file') - -sc.preview() diff --git a/examples/objects/ex_im_tf_spec_obj.py b/examples/objects/ex_im_tf_spec_obj.py index ca5d25bcf..c12633f8c 100644 --- a/examples/objects/ex_im_tf_spec_obj.py +++ b/examples/objects/ex_im_tf_spec_obj.py @@ -4,81 +4,101 @@ Use and control image, time-frequency maps and spectrogram. + * Display and configure an image (color, interpolation) + * Compute and display time-frequency properties of a signal (spectrogram, + wavelet based time-frequency maps or multi-taper) + .. image:: ../../picture/picobjects/ex_imtfspec_obj.png """ import numpy as np from visbrain.objects import (ImageObj, TimeFrequencyObj, ColorbarObj, SceneObj) -CBAR_STATE = dict(cbtxtsz=12, txtsz=10., width=.2, rect=(-0.2, -2., 1., 4.)) -sc = SceneObj(size=(1200, 1000)) +############################################################################### +# Scene creation +############################################################################### +# First, we define the scene and a few colorbar properties (like font size, +# colorbar width...) -"""Create a 2-D image -""" +CBAR_STATE = dict(cbtxtsz=12, txtsz=10., width=.1, rect=(-0.2, -2., 1., 4.), + cbtxtsh=4.) +sc = SceneObj(size=(1000, 600)) + +############################################################################### +# Create sample data +############################################################################### +# Then we create some data for 1) images (a basic diagonale image) and 2) a +# sine signal with a main frequency at 25hz + +# Define a (10, 10) image n = 10 -time = np.r_[np.arange(n - 1), np.arange(n)[::-1]] -time = time.reshape(-1, 1) + time.reshape(1, -1) -time[np.diag_indices_from(time)] = 30. - -print(""" -# ============================================================================= -# Basic image -# ============================================================================= -""") -im_basic = ImageObj('im', time) -sc.add_to_subplot(im_basic, row=0, col=0, title='Basic image') - -print(""" -# ============================================================================= -# Interpolated image -# ============================================================================= -""") -im_interp = ImageObj('im', time, interpolation='bicubic') -sc.add_to_subplot(im_interp, row=0, col=1, title='Interpolated image') - -print(""" -# ============================================================================= -# Custom color properties -# ============================================================================= -""") -im_color = ImageObj('im', time, interpolation='bicubic', cmap='Spectral_r', - vmin=5., vmax=20., under='gray', over='darkred') -sc.add_to_subplot(im_color, row=0, col=2, title='Custom colors') -cb_im_color = ColorbarObj(im_color, cblabel='Image data', **CBAR_STATE) -sc.add_to_subplot(cb_im_color, row=0, col=3, width_max=150) +image = np.r_[np.arange(n - 1), np.arange(n)[::-1]] +image = image.reshape(-1, 1) + image.reshape(1, -1) +image[np.diag_indices_from(image)] = 30. -"""Define a 25hz sine -""" +# Define a 25hz sine n, sf = 512, 256 time = np.arange(n) / sf # time vector data = np.sin(2 * np.pi * 25. * time) + np.random.rand(n) -print(""" -# ============================================================================= -# Spectrogram -# ============================================================================= -""") -spec = TimeFrequencyObj('spec', data, sf, cmap='RdBu_r') -sc.add_to_subplot(spec, row=1, col=0, title='Spectrogram') - -print(""" -# ============================================================================= -# Time-frequency map -# ============================================================================= -""") +############################################################################### +# Plot an image +############################################################################### +# Most basic plot of the image without further customization + +im_basic = ImageObj('ex1', image) +sc.add_to_subplot(im_basic, row=0, col=0, title='Basic image', zoom=.9) + +############################################################################### +# Interpolated image +############################################################################### +# The image can also be interpolated. Checkout the complete list on the +# VisPy website (vispy.visuals.ImageVisual) + +im_interp = ImageObj('ex2', image, interpolation='bicubic') +sc.add_to_subplot(im_interp, row=0, col=1, title='Interpolated image', zoom=.9) + +############################################################################### +# Color properties +############################################################################### +# The ImageObj allow several custom color properties (such as color +# thresholding, colormap control...) + +# Create the image object +im_color = ImageObj('ex3', image, interpolation='bilinear', cmap='Spectral_r', + vmin=5., vmax=20., under='gray', over='darkred') +sc.add_to_subplot(im_color, row=0, col=2, title='Custom colors', zoom=.9) +# Get the colorbar of the image +cb_im_color = ColorbarObj(im_color, cblabel='Image data', **CBAR_STATE) +sc.add_to_subplot(cb_im_color, row=0, col=3, width_max=150, zoom=.9) + +############################################################################### +# Spectrogram +############################################################################### +# Extract time-frequency properties using the Fourier transform + +spec = TimeFrequencyObj('spec', data, sf, method='fourier', cmap='RdBu_r') +sc.add_to_subplot(spec, row=1, col=0, title='Spectrogram', zoom=.9) + +############################################################################### +# Time-frequency map +############################################################################### +# Extract time-frequency properties using the wavelet convolution + tf = TimeFrequencyObj('tf', data, sf, method='wavelet') -sc.add_to_subplot(tf, row=1, col=1, title='Time-frequency map') - -print('\n-> Compute time-frequency map with windows') -print(""" -# ============================================================================= -# Multi-taper -# ============================================================================= -""") +sc.add_to_subplot(tf, row=1, col=1, title='Time-frequency map', zoom=.9) + +############################################################################### +# Multi-taper +############################################################################### +# Extract time-frequency properties using multi-taper (need installation of +# lspopt package) + tf_mt = TimeFrequencyObj('mt', data, sf, method='multitaper', overlap=.7, interpolation='bicubic', cmap='Spectral_r') -sc.add_to_subplot(tf_mt, row=1, col=2, title='Multi-taper') +sc.add_to_subplot(tf_mt, row=1, col=2, title='Multi-taper', zoom=.9) cb_tf_win = ColorbarObj(tf_mt, cblabel='Power', **CBAR_STATE) -sc.add_to_subplot(cb_tf_win, row=1, col=3, width_max=150) +sc.add_to_subplot(cb_tf_win, row=1, col=3, width_max=150, zoom=.9) +# Display the scene sc.preview() diff --git a/examples/objects/ex_roi_object.py b/examples/objects/ex_roi_object.py index 4f07c089f..49b282c8c 100644 --- a/examples/objects/ex_roi_object.py +++ b/examples/objects/ex_roi_object.py @@ -1,14 +1,29 @@ """ -Region Of Interest (ROI) object -=============================== +Region Of Interest object (RoiObj) : complete tutorial +====================================================== This example illustrate the main functionalities and inputs of the roi object i.e : - * Use either the Brodmann, AAL or Talairach atlases and select ROI from it + * Use either the Brodmann, AAL, Talairach or MIST atlases and select ROI * Color control of ROI * Analyse source's anatomical location using an RoiObj * Project source's activity onto ROI + * Define a custom ROI object + +List of supported ROI atlases : + + * Brodmann areas + * AAL (Automated Anatomical Labeling) + * Talairach + * `MIST `_ includes multiple + resolutions that can be explored + `here `_. + Inside visbrain, supported levels are 7, 12, 20, 36, 64, 122 and ROI. + +.. warning:: + ROI atlases are stored inside NumPy files that are downloaded when needed. + Every ROI files is downloaded to the ~/visbrain_data/roi folder .. image:: ../../picture/picobjects/ex_roi_obj.png """ @@ -17,32 +32,51 @@ from visbrain.objects import RoiObj, ColorbarObj, SceneObj, SourceObj, BrainObj from visbrain.io import download_file, path_to_visbrain_data, read_nifti -"""Get the path to Visbrain data and download deep sources -""" -vb_path = path_to_visbrain_data() -mat = np.load(download_file('xyz_sample.npz')) +############################################################################### +# Download data +############################################################################### +# In order to work, this example need to download some data i.e coordinates of +# intracranial sources and a parcellates atlas (MIST) to illustrate how to +# define your own RoiObj + +# Get the path to the ~/visbrain_data/example_data folder +vb_path = path_to_visbrain_data(folder='example_data') +# Download (x, y, z) coordinates of intracranial sources +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) xyz, subjects = mat['xyz'], mat['subjects'] data = np.random.uniform(low=-1., high=1., size=(xyz.shape[0],)) +# Download the MIST parcellates +download_file('MIST_ROI.zip', unzip=True, astype='example_data') + +############################################################################### +# Scene creation +############################################################################### +# First, we need to create the scene that will host objects -# ============================================================================= -# MAIN SCENE -# ============================================================================= -print("-> Create a scene. By default, we fix the top view of the camera") -CAM_STATE = dict(azimuth=0, # azimuth angle - elevation=90, # elevation angle - scale_factor=200 * 100, - distance=800 * 100, - ) +# Scene creation with a dark background and a custom size +sc = SceneObj(size=(1400, 1000)) +# In this example, we also illustrate the use of the colorbar object. Hence, we +# centralize colorbar properties inside a dictionary CBAR_STATE = dict(cbtxtsz=12, txtsz=10., width=.1, cbtxtsh=3., rect=(-.3, -2., 1., 4.)) -sc = SceneObj(camera_state=CAM_STATE, bgcolor=(.1, .1, .1), size=(1400, 1000)) -# ============================================================================= -# FIND INDEX OF AN ROI -# ============================================================================= -"""Here, we illustrate how to find the integer index of the ROI to plot -""" -# Method 1 : save all ROI in an excel file and search manually the ROI +############################################################################### +# Find the index of a region of interest +############################################################################### +# ROIs are defined with two variables : 1) a volume which contains integers +# and 2) a vector of labels which link every integer inside the volume with a +# label (for example, with the brodmann atlas, the index 4 refers to the label +# brodmann 4). Here, we illustrate how to find the index of a region of +# interest + +##################################### +# **Method 1 :** export all ROI labels and indices in an excel file +# +# This first method load a ROI atlas then, we use the +# :class:`visbrain.objects.RoiObj.get_labels` method to save every related ROI +# informations in an excel file. This first method implies that you manually +# inspect in this file the index of the ROI that you're looking for. + roi_to_find1 = RoiObj('brodmann') # Use Brodmann areas ref_brod = roi_to_find1.get_labels(vb_path) # Save Brodmann roi_to_find1('aal') # Switch to AAL @@ -50,44 +84,147 @@ roi_to_find1('talairach') # Switch to Talairach ref_tal = roi_to_find1.get_labels(vb_path) # Save Talairach +##################################### +# **Method 2 :** explicitly search where is the ROI that you're looking for +# +# Here, we use the :class:`visbrain.objects.RoiObj.where_is` method of the ROI +# object to explicitly search string patterns + # Method 2 : use the `where_is` method roi_to_find1('brodmann') # Switch to Brodmann idx_ba6 = roi_to_find1.where_is('BA6') # Find only BA6 print(ref_brod.loc[idx_ba6]) roi_to_find1('aal') # Switch to AAL -idx_sma = roi_to_find1.where_is(['Supp Motor Area', '(L)'], union=False) +idx_sma = roi_to_find1.where_is('Supp Motor Area') -# ============================================================================= -# BRAIN + BA6 -# ============================================================================= -print('\n-> Plot brodmann area 6') -b_obj = BrainObj('B1') +############################################################################### +# Extract the mesh of an ROI +############################################################################### +# Once you have the index of the ROI that you want to plot, use the +# :class:`visbrain.objects.RoiObj.select_roi` method to extract the mesh (i.e +# vertices and faces) of the ROI. Here, we illustrate this question with the +# brodmann 6 ROI + +# Load the brodmann 6 atlas, get the index of BA6 and extract the mesh roi_brod = RoiObj('brodmann') idx_ba6 = roi_brod.where_is('BA6') roi_brod.select_roi(select=idx_ba6) -roi_brod.get_labels(save_to_path=vb_path) # print available brodmann labels -sc.add_to_subplot(roi_brod, row=0, col=0, title='Brodmann area 6') -sc.add_to_subplot(b_obj, row=0, col=0, use_this_cam=True) - -# ============================================================================= -# MULTIPLE ROI + UNIQUE COLORS -# ============================================================================= -print('\n-> Select and plot multiple ROI with random unique colors') +# Define a brain object and add this brain and ROI objects to the scene +b_obj = BrainObj('B1') +sc.add_to_subplot(b_obj, row=0, col=0, use_this_cam=True, + title='Brodmann area 6 mesh') +sc.add_to_subplot(roi_brod, row=0, col=0) + +############################################################################### +# Set a unique color per ROI mesh +############################################################################### +# If you need, you can set a unique color per plotted ROI mesh. Here, we plot +# the left and right insula and thalamus and set a unique color to each + +# Load the AAL atlas roi_aal = RoiObj('aal') +# Select indicies 29, 30, 77 and 78 (respectively insula left, right and +# thalamus left and right) roi_aal.select_roi(select=[29, 30, 77, 78], unique_color=True, smooth=11) -roi_aal.get_labels(save_to_path=vb_path) # save available AAL labels -sc.add_to_subplot(roi_aal, row=0, col=1, +# Add the ROI to the scene +sc.add_to_subplot(roi_aal, row=0, col=1, rotate='top', zoom=.4, title='Select and plot multiple ROI with unique colors') -# ============================================================================= -# CUSTOM ROI + FIXED COLORS -# ============================================================================= -print("\n-> Use a custom roi_object and plot dorsal and ventral thalamus with " - "fixed colors") +############################################################################### +# Project source's data onto the surface of ROI mesh +############################################################################### +# Once you've extract the mesh of the ROI, you can explicitly specify to the +# :class:`visbrain.object.SourceObj.project_sources` to project the activity +# onto the surface of the ROI. Here, we extract the mesh of the default mode +# network (DMN) and project source's activity on it + +# Define the roi object using the MIST at resolution 7 +roi_dmn = RoiObj('mist_7') +roi_dmn.get_labels(save_to_path=vb_path) # save the labels +dmn_idx = roi_dmn.where_is('Default mode network') +roi_dmn.select_roi(select=dmn_idx) +# Define the source object and project source's data on the DMN +s_dmn = SourceObj('SecondSources', xyz, data=data) +s_dmn.project_sources(roi_dmn, cmap='plasma', clim=(-1., 1.), vmin=-.5, + vmax=.7, under='gray', over='red') +# Get the colorbar of the projection +cb_dmn = ColorbarObj(s_dmn, cblabel='Source activity', **CBAR_STATE) +# Add those objects to the scene +sc.add_to_subplot(roi_dmn, row=0, col=2, rotate='top', zoom=.4, + title="Project source's activity onto the DMN") +sc.add_to_subplot(cb_dmn, row=0, col=3, width_max=200) + + +############################################################################### +# Get anatomical informations of sources +############################################################################### +# If you defined sources (like intracranial recording sites, MEG source +# reconstruction...) you can use the SourceObj to defined those sources and +# then, the RoiObj to identify where are those sources located using the ROI +# volume. Here, we use the MIST at the `ROI` resolution to identify where are +# located those sources + +# Define the MIST object at the ROI level +roi_mist = RoiObj('mist_ROI') +# roi_mist.get_labels(save_to_path=vb_path) # save the labels +# Define the source object and analyse those sources using the MIST +s_obj = SourceObj('anat', xyz, data=data) +analysis = s_obj.analyse_sources(roi_mist) +# print(analysis) # anatomical informations are included in a dataframe +# Color those sources according to the anatomical informations +s_obj.color_sources(analysis=analysis, color_by='name_ROI') +# Add the source object to the scene +sc.add_to_subplot(s_obj, row=1, col=0, rotate='top', zoom=.6, + title='Get anatomical informations of sources') + +############################################################################### +# .. note:: +# In the example above, we analyse sources using only one ROI object. But +# you can also combine anatomical informations that come from several +# ROI. For example, if you want to analyse your sources using brodmann +# areas, AAL and MIST at level 7 : +# +# brod_roi = RoiObj('brodmann') +# +# brod_aal = RoiObj('aal') +# +# brod_mist = RoiObj('mist_7') +# +# s_obj.analyse_sources([brod_roi, brod_aal, brod_mist]) + +############################################################################### +# Select sources that are inside an ROI +############################################################################### +# Here, we illustrate how to only select sources that are inside the +# somatomotor network. + +# Define the roi MIST object at level 7 +somato_str = 'Somatomotor network' +roi_somato = RoiObj('mist_7') +idx_somato = roi_somato.where_is(somato_str) +roi_somato.select_roi(idx_somato, translucent=True) +# Define the source object and analyse anatomical informations +s_somato = SourceObj('somato', xyz, data=data) +analysis = s_somato.analyse_sources(roi_somato, keep_only=somato_str) +s_somato.color_sources(data=data, cmap='bwr') +# Add those objects to the scene +sc.add_to_subplot(roi_somato, row=1, col=1, use_this_cam=True, rotate='top', + title='Display only sources inside the\nsomatomotor network', + zoom=.6) +sc.add_to_subplot(s_somato, row=1, col=1) + +############################################################################### +# Define and use your own region of interest +############################################################################### +# Visbrain comes with several ROI volumes, but you can define your own ROI +# object. To do this, you need a volume (i.e an array with three dimensions) +# and an array of labels. Here, for the sake of illustration, we explain how +# to rebuild the MIST at the ROI resolution. + # Download the MIST_ROI.zip archive. See the README inside the archive -download_file('MIST_ROI.zip', unzip=True) -nifti_file = path_to_visbrain_data('MIST_ROI.nii.gz') -csv_file = path_to_visbrain_data('MIST_ROI.csv') +nifti_file = path_to_visbrain_data(file='MIST_ROI.nii.gz', + folder='example_data') +csv_file = path_to_visbrain_data(file='MIST_ROI.csv', folder='example_data') # Read the .csv file : arr = np.genfromtxt(csv_file, delimiter=';', dtype=str) # Get column names, labels and index : @@ -103,63 +240,21 @@ # Get the volume and the hdr transformation : vol, _, hdr = read_nifti(nifti_file, hdr_as_array=True) # Define the ROI object and save it : -roi_custom = RoiObj('mist_roi', vol=vol, labels=label, index=roi_index, +roi_custom = RoiObj('custom_roi', vol=vol, labels=label, index=roi_index, hdr=hdr) # Find thalamus entries : idx_thalamus = roi_custom.where_is('THALAMUS') colors = {55: 'slateblue', 56: 'olive', 63: 'darkred', 64: '#ab4642'} -roi_custom.select_roi(idx_thalamus, smooth=11, roi_to_color=colors) -sc.add_to_subplot(roi_custom, row=0, col=2, +roi_custom.select_roi(idx_thalamus, roi_to_color=colors) +sc.add_to_subplot(roi_custom, row=1, col=2, zoom=.5, title='Plot dorsal and ventral thalamus with fixed colors') -# ============================================================================= -# ANATOMICAL LOCATION OF SOURCES -# ============================================================================= -print('\n-> Anatomical location of sources using an ROI object') -# Define the ROI object : -roi_tal = RoiObj('talairach') -roi_tal.select_roi(select=[681, 682, 808, 809]) -roi_tal.translucent = True -roi_tal.get_labels(save_to_path=vb_path) # save available Talairach labels -# Define a source object : -s_obj = SourceObj('FirstSources', xyz, data=data) -analysis = s_obj.analyse_sources(roi_tal) -s_obj.color_sources(analysis=analysis, color_by='gyrus') -sc.add_to_subplot(s_obj, row=1, col=0, - title='Anatomical location of sources') -sc.add_to_subplot(roi_tal, row=1, col=0, use_this_cam=True) - -# ============================================================================= -# SELECT SOURCES INSIDE ROI'S -# ============================================================================= -print('\n-> Select only sources inside BA 4, 6 and 8') -# Define the ROI object : -roi_brod_2 = RoiObj('brodmann') -roi_brod_2.select_roi(select=[4, 6, 8]) -roi_brod_2.translucent = True -# Define a source object : -s_obj_2 = SourceObj('SecondSources', xyz, data=data) -analysis = s_obj_2.analyse_sources(roi_brod_2, distance=20., - keep_only=['BA4', 'BA6', 'BA8']) -s_obj_2.color_sources(data=data) -sc.add_to_subplot(s_obj_2, row=1, col=1, - title='Plot only sources in BA4, 6 and 8') -sc.add_to_subplot(roi_brod_2, row=1, col=1, use_this_cam=True) - -# ============================================================================= -# CORTICAL PROJECTION OF SOURCE'S ACTIVITY -# ============================================================================= -print("\n-> Project source's activity onto ROI") -# Define the ROI object : -roi_brod_3 = RoiObj('aal') -roi_brod_3.select_roi(select=[29, 30, 77, 78], smooth=11) -# Define a source object : -s_obj_3 = SourceObj('SecondSources', xyz, data=data) -roi_brod_3.project_sources(s_obj_3, cmap='plasma', clim=(-1., 1.), vmin=-.5, - vmax=.7, under='gray', over='red') -cb_brod_3 = ColorbarObj(roi_brod_3, cblabel='Source activity', **CBAR_STATE) -sc.add_to_subplot(roi_brod_3, row=1, col=2, - title="Project source activity onto ROI") -sc.add_to_subplot(cb_brod_3, row=1, col=3, width_max=200) +############################################################################### +# .. note:: +# Once your RoiObj is defined, you can save it using +# :class:`visbrain.objects.RoiObj.save`. Once the object is saved, you can +# reload it using the name you've used (here we've used the `custom_roi` +# name which means that you can reload it later using RoiObj('custom_roi')) +# Finally, display the scene sc.preview() diff --git a/examples/objects/ex_source_obj.py b/examples/objects/ex_source_obj.py index 208e86fe4..6c8472786 100644 --- a/examples/objects/ex_source_obj.py +++ b/examples/objects/ex_source_obj.py @@ -1,6 +1,6 @@ """ -Source object -============= +Source object (SourceObj) : complete tutorial +============================================= This example illustrate the main functionalities and inputs of the source object i.e : @@ -8,136 +8,326 @@ * Add sources with text * Control the marker symbol and color * Mask sources - * Analyse anatomical locations of sources using either the Brodmann, AAL or - Talairach atlas + * Analyze anatomical locations of sources using region of interest * Color sources according to a data vector or to an anatomical location * Display only sources in the left // right hemisphere * Force source to fit to a mesh * Display only sources inside // outside a mesh +The source objects can interact with several other objects : + + * BrainObj : source's activity and repartition can be projected on the + surface of the brain + * RoiObj : source's activity and repartition can be projected on the + surface of region of interest. In addition, ROI objects can also be used + to get anatomical informations of sources + .. image:: ../../picture/picobjects/ex_source_obj.png """ import numpy as np -from vispy.geometry import create_sphere - -from visbrain.objects import SourceObj, SceneObj, ColorbarObj +from visbrain.objects import SourceObj, SceneObj, ColorbarObj, BrainObj, RoiObj from visbrain.io import download_file -""" -Load the xyz coordinates and corresponding subject name -""" -mat = np.load(download_file('xyz_sample.npz')) +############################################################################### +# .. warning:: +# To be clear with the vocabulary used, the SourceObj has a different +# meaning depending on the recording type. For scalp or intracranial EEG, +# sources reflect electrode, in MEG it could be sensors or source +# reconstruction. + +############################################################################### +# Download data +############################################################################### +# To illustrate the functionalities of the source object, here, we download an +# intracranial dataset consisting of 583 deep recording sites. + +# Download the file and get the (x, y, z) MNI coordinates of the 583 recording +# sites +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) xyz = mat['xyz'] n_sources = xyz.shape[0] text = ['S' + str(k) for k in range(n_sources)] -"""Create a scene. By default, we fix the top view of the camera -""" +############################################################################### +# Scene creation +############################################################################### +# As said in other tutorials, the scene is equivalent with Matplotlib subplots. +# So here, we define a scene that is going to centralize objects in subplots + +# Define the default camera state used for each subplot CAM_STATE = dict(azimuth=0, # azimuth angle elevation=90, # elevation angle - scale_factor=180 # ~distance to the camera + scale_factor=180 # distance to the camera ) -CBAR_STATE = dict(cbtxtsz=12, txtsz=10., width=.5, rect=(1., -2., 1., 4.)) -sc = SceneObj(camera_state=CAM_STATE, size=(1200, 1000)) +S_KW = dict(camera_state=CAM_STATE) +# Create the scene +sc = SceneObj(size=(1600, 1000)) +CBAR_STATE = dict(cbtxtsz=12, txtsz=10., width=.5, cbtxtsh=3., + rect=(1., -2., 1., 4.)) -"""Create the most basic source object -""" -s_obj_basic = SourceObj('Basic', xyz, text=text, text_bold=True, - text_color='yellow') -sc.add_to_subplot(s_obj_basic, row=0, col=0, - title='Default configuration with text') +############################################################################### +# Basic source object +############################################################################### +# The first example consist of only plotting the source, without any +# modifications of the inputs -"""Control the color and the symbol -""" -s_obj_col = SourceObj('S2', xyz, color='slategray', symbol='square') -sc.add_to_subplot(s_obj_col, row=0, col=1, title='Change color and symbol') +# Create the source objects and add this object to the scene +s_obj_basic = SourceObj('Basic', xyz) +sc.add_to_subplot(s_obj_basic, row=0, col=0, title='Default configuration', + **S_KW) -"""Mask sources that have a x coordinate between [-20, 20] and color it into -orange -""" +############################################################################### +# Text, symbol and color control +############################################################################### +# Now, we attach text to each source (bold and yellow) and use a gray squares +# symbol + +# The color definition could either be uniform (e.g 'green', 'blue'...), a list +# of colors or an array of RGB(A) colors +# s_color = 'blue' # uniform definition +s_color = ["#D72638"] * 100 + ["#3772FF"] * 100 + ["#008148"] * 200 + \ + ["#C17D11"] * 183 # list definition +# Define the source object and add this object to the scene +s_obj_col = SourceObj('S2', xyz, text=text, text_size=4., text_color='yellow', + text_bold=True, color=s_color, symbol='square') +sc.add_to_subplot(s_obj_col, row=0, row_span=2, col=1, + title='Text, color and symbol', **S_KW) + +############################################################################### +# Assigning data to sources and radius control +############################################################################### +# This example illustrate how to assign some data to sources and how to control +# the dynamic of radius sources + +# Create some random data of shape (n_sources,) +rnd_data = np.random.uniform(low=-100, high=100, size=(n_sources,)) +# Control the radius range of sources +radius_min = 7. +radius_max = 25. +s_color = np.random.uniform(0., 1., (n_sources, 3)) # array definition +# Define the source object and add this object to the scene +s_rad = SourceObj('rad', xyz, color=s_color, data=rnd_data, + radius_min=radius_min, radius_max=radius_max) +sc.add_to_subplot(s_rad, row=0, col=2, title='Assigning data to sources ', + **S_KW) + +############################################################################### +# Mask sources +############################################################################### +# Sometimes, it could be usefull to mask some sources and display those sources +# with a different color (using `mask_color`). + +# Define the mask for sources that have a x coordinate between [-20, 20] and +# set the color of those masked sources to orange mask = np.logical_and(xyz[:, 0] >= -20., xyz[:, 0] <= 20.) -data = np.random.rand(n_sources) -s_obj_mask = SourceObj('S3', xyz, mask=mask, mask_color='orange', - color='slateblue', data=data, radius_min=2., - radius_max=20.) -sc.add_to_subplot(s_obj_mask, row=0, col=2, - title='Mask sources between [-20., 20.]') - -"""Analyse where sources are located using the Brodmann ROI template and color -sources according to the Brodmann area -""" +mask_color = 'orange' +s_obj_mask = SourceObj('S3', xyz, mask=mask, mask_color=mask_color, + color=s_color, data=rnd_data, radius_min=radius_min, + radius_max=radius_max) +sc.add_to_subplot(s_obj_mask, row=0, col=3, + title='Masked sources between [-20., 20.]\nare orange', + **S_KW) + +############################################################################### +# Get anatomical informations of sources +############################################################################### +# The region of interest object (RoiObj) is basically a volume where each voxel +# is known to be part of an anatomical region. Hence, you can define the RoiObj +# and use it to get the anatomical informations of each source + +# First, create a basic source object s_obj_ba = SourceObj('S4', xyz) -df_brod = s_obj_ba.analyse_sources(roi_obj='brodmann') +# Then, we define a region of interest object (RoiObj). We use brodmann areas +# but you should take a look to the complete tutorial on ROIs because visbrain +# povides several templates (Brodmann, AAL, Talairach and MIST) +roi_obj = RoiObj('brodmann') +# If you want to see labels associated with the brodmann areas, uncomment the +# following line +# print(roi_obj.get_labels()) +# Now, analyse sources using the RoiObj. The argument returned by the +# `SourceObj.analyse_sources` method is a Pandas dataframe +df_brod = s_obj_ba.analyse_sources(roi_obj=roi_obj) +# The dataframe contains a column `brodmann` which is the name of the +# associated brodmann area. Hence, we use it to color sources according to the +# name of brodmann area s_obj_ba.color_sources(analysis=df_brod, color_by='brodmann') +# Finally, add the object to the scene sc.add_to_subplot(s_obj_ba, row=1, col=0, - title='Color sources according to Brodmann area') + title='Color sources according to\n Brodmann area', **S_KW) + +############################################################################### +# Color sources, using predefined colors, according to the AAL location +############################################################################### +# Similarly to the example above, here, we color sources according to the +# Automated Anatomical Labeling (AAL) """Analyse where sources are located using the AAL ROI template and color only the precentral left (green), right (orange), insula right (blue). Others ROI are turn into white. """ +# Create a basic source object s_obj_aal = SourceObj('S5', xyz) +# Define the RoiObj using AAL and analyse sources locations +roi_obj = RoiObj('aal') +# print(roi_obj.get_labels()) df_aal = s_obj_aal.analyse_sources(roi_obj='aal') +# Then, define one color per ROI and color others in gray aal_col = {'Precentral (R)': 'green', 'Precentral (L)': 'orange', 'Insula (R)': 'blue'} +color_others = 'gray' +# Color sources and add the object to the scene s_obj_aal.color_sources(analysis=df_aal, color_by='aal', roi_to_color=aal_col, - color_others='white') -sc.add_to_subplot(s_obj_aal, row=1, col=1, - title='Color only sources in precentral and insula') + color_others=color_others) +sc.add_to_subplot(s_obj_aal, row=1, col=2, + title='Color only sources in\n precentral and insula', + **S_KW) -"""Use a random data vector to color sources -""" -data = np.random.uniform(low=-10., high=10., size=(n_sources,)) -s_obj_data = SourceObj('S3', xyz, data=data) -s_obj_data.color_sources(data=data, cmap='plasma', clim=(-10, 10), vmin=-8., - vmax=8., under='gray', over='red') -sc.add_to_subplot(s_obj_data, row=1, col=2, title='Color sources using data') +############################################################################### +# Color sources according to data +############################################################################### +# A more simple example, but it's also possible to color your sources +# according to a data vector + +# Define the source object +s_obj_data = SourceObj('S3', xyz, data=rnd_data, radius_min=radius_min, + radius_max=radius_max) +# Color sources according to a data vector +s_obj_data.color_sources(data=rnd_data, cmap='viridis', clim=(-100, 100),) +# Get the colorbar of the source object cb_data = ColorbarObj(s_obj_data, cblabel='Random data', border=False, **CBAR_STATE) -sc.add_to_subplot(cb_data, row=1, col=3, width_max=60) +# Add the source and colorbar objects to the scene +sc.add_to_subplot(s_obj_data, row=1, col=3, title='Color sources using data', + **S_KW) +sc.add_to_subplot(cb_data, row=1, col=4, width_max=60) -"""Display only sources in the left hemisphere -""" +############################################################################### +# Project source's activity on the surface of the brain +############################################################################### +# As explained in the BrainObj tutorial, source's activity can be projected on +# the surface of the brain which can be particularly convenient for represent +# source's activity across several intracranially implanted subjects + +# Define the source and brain objects +s_proj = SourceObj('proj', xyz, data=rnd_data) +b_proj = BrainObj('B3', translucent=False) +# Project source's activity on the surface of the brain +s_proj.project_sources(b_proj, cmap='inferno') +sc.add_to_subplot(b_proj, row=2, col=0, title="Project source's activity") + +############################################################################### +# Project masked source's activity on the surface of the brain +############################################################################### +# This is the exact same example as above, except that we also project masked +# sources + +# Define the source and brain objects +s_mask = SourceObj('mask', xyz, data=rnd_data, mask=mask, mask_color='gray') +b_mask = BrainObj('B3', translucent=False) +# Project source's activity on the surface of the brain +s_mask.project_sources(b_mask, cmap='viridis', radius=15.) +sc.add_to_subplot(b_mask, row=2, col=1, + title="Project masked source's activity") + +############################################################################### +# Project source's activity on the surface of the DMN +############################################################################### +# Here, we first use the MIST ROI to get represent the default mode network and +# then, we project source's activity onto the surface of the DMN + +# Define the source and brain objects +s_dmn = SourceObj('dmn', xyz, data=rnd_data, mask=mask, mask_color='gray') +b_mask = BrainObj('B3') +# Define the MIST roi object +roi_dmn = RoiObj('mist_7') +# print(roi_dmn.get_labels()) +# Get the index of the DMN and get the mesh +dmn_idx = roi_dmn.where_is('Default mode network') +roi_dmn.select_roi(dmn_idx) +# Project source's activity on the surface of the DMN +s_dmn.project_sources(roi_dmn, cmap='viridis', radius=15.) +sc.add_to_subplot(b_mask, row=2, col=2, use_this_cam=True, row_span=2, + title="Project source's activity\non the DMN") +sc.add_to_subplot(roi_dmn, row=2, col=2, row_span=2) + +############################################################################### +# Project source's repartition on the surface of the brain +############################################################################### +# Similarly to the example above, we project here the repartition of sources +# which mean the number of contributing sources per vertex + +# Define the source and brain objects +s_rep = SourceObj('proj', xyz, data=rnd_data) +b_rep = BrainObj('B3', translucent=False) +# Project source's activity on the surface of the brain +s_rep.project_sources(b_rep, cmap='viridis', project='repartition') +# Get the colorbar of the brain object +cb_rep = ColorbarObj(b_rep, cblabel='Number of sources\nper vertex', + border=False, **CBAR_STATE) +sc.add_to_subplot(b_rep, row=2, col=3, title="Project source's repartition") +sc.add_to_subplot(cb_rep, row=2, col=4) + +############################################################################### +# Display only sources in the left hemisphere +############################################################################### +# In this little example, we illustrate how to only display sources in the left +# hemisphere + +# Define the source object s_obj_left = SourceObj('S_left', xyz, color='#ab4642') +# Select only sources that belong to the left hemisphere and add the object to +# the scene s_obj_left.set_visible_sources('left') -sc.add_to_subplot(s_obj_left, row=2, col=0, - title='Display sources in left hemisphere') +sc.add_to_subplot(s_obj_left, row=3, col=0, + title='Display sources in left hemisphere', **S_KW) -"""Create a sphere using VisPy -""" -sphere = create_sphere(rows=100, cols=100, radius=50) -sphere_vertices = sphere.get_vertices() +############################################################################### +# Force sources to fit to the surface of the brain +############################################################################### +# First, we force sources to fit to the white matter of the brain. Then, we use +# the talaich ROI to identify which sources belong to the left or right +# hemisphere and color them accordingly -"""Force sources to fit on the vertices of the sphere. Then, we color sources -according to the hemisphere (left=purple, right=yellow) -""" +# Define the Brain and Source objects s_obj_fit = SourceObj('Fit', xyz, symbol='diamond') -s_obj_fit.fit_to_vertices(sphere_vertices) +b_obj_fit = BrainObj('white', translucent=True) +# Get the vertices of the brain object and force sources to fit to those +# vertices +b_obj_vert = b_obj_fit.vertices +s_obj_fit.fit_to_vertices(b_obj_vert) +# Analyse source's anatomical location using the Talairach atlas df_tal = s_obj_fit.analyse_sources(roi_obj='talairach') +# Color sources accordingly to the hemisphere (left='purple', right='yellow') s_obj_fit.color_sources(analysis=df_tal, color_by='hemisphere', roi_to_color={'Left': 'purple', 'Right': 'yellow'}) -sc.add_to_subplot(s_obj_fit, row=2, col=1, - title="Force sources to fit on a sphere") +# Finally, add those objects to the scene +sc.add_to_subplot(s_obj_fit, row=3, col=1, + title="Force sources to fit to the\nsurface of the brain") +sc.add_to_subplot(b_obj_fit, row=3, col=1, use_this_cam=True) + +############################################################################### +# Display only sources inside the brain +############################################################################### +# In this little example, we illustrate how to only display sources inside the +# brain -"""Use the same sphere to display only sources that are inside -""" s_obj_inside = SourceObj('In', xyz, symbol='cross', color='firebrick') -s_obj_inside.set_visible_sources('inside', sphere_vertices) -sc.add_to_subplot(s_obj_inside, row=2, col=2, - title='Display only sources inside a sphere') +s_obj_inside.set_visible_sources('inside', b_obj_vert) +sc.add_to_subplot(s_obj_inside, row=3, col=3, + title='Display only sources inside the brain', **S_KW) -"""If you need, you can link the rotation off all cameras but this can -considerably slow down visualization updates -""" -# sc.link(-1) +############################################################################### +# Take a screenshot of the scene +############################################################################### -"""Screenshot of the scene -""" +# Screenshot of the scene # sc.screenshot('ex_source_obj.png', transparent=True) +# If you need, you can link the rotation off all cameras but this can +# considerably slow down visualization updates +# sc.link(-1) -"""Display the scene -""" +# Display the scene sc.preview() diff --git a/examples/objects/ex_topo_obj.py b/examples/objects/ex_topo_obj.py new file mode 100644 index 000000000..000bc0bce --- /dev/null +++ b/examples/objects/ex_topo_obj.py @@ -0,0 +1,155 @@ +""" +Topoplot object (TopoObj) : complete tutorial +============================================= + +This example illustrate the main functionalities and inputs of the topoplot +object i.e : + + * Use channel names or position to identify which channels are used + * Main color and appearance properties + * Display levels (either regulary spaced and based on a colormap or custom + levels with custom colors) + * Display connectivity + +.. image:: ../../picture/picobjects/ex_topo_obj.png +""" +import numpy as np + +from visbrain.objects import TopoObj, ColorbarObj, SceneObj +from visbrain.io import download_file + +############################################################################### +# Download data +############################################################################### +# First, we download the data. A directory should be created to +# ~/visbrain_data/example_data. This example contains beta power for several +# channels defined by there xy coordinates. + +path = download_file('topoplot_data.npz', astype='example_data') +mat = np.load(path) +xy, data = mat['xyz'], mat['data'] +channels = [str(k) for k in range(len(data))] +# Plotting properties shared across topoplots and colorbar : +kw_top = dict(margin=15 / 100, chan_offset=(0., 0.1, 0.), chan_size=10) +kw_cbar = dict(cbtxtsz=12, txtsz=10., width=.3, txtcolor='black', cbtxtsh=1.8, + rect=(0., -2., 1., 4.), border=False) + +############################################################################### +# Creation of the scene +############################################################################### +# Create a scene with a white background + +sc = SceneObj(bgcolor='white', size=(1600, 900)) + +############################################################################### +# Topoplot based on channel names +############################################################################### +# First definition using channel names only + +# Define some EEG channels and set one data value per channel +ch_names = ['C3', 'C4', 'Cz', 'Fz', 'Pz'] +data_names = [10, 20, 30, 10, 10] +# Create the topoplot and the associated colorbar +t_obj = TopoObj('topo', data_names, channels=ch_names, **kw_top) +cb_obj = ColorbarObj(t_obj, cblabel='Colorbar label', **kw_cbar) +# Add both objects to the scene +# Add the topoplot and the colorbar to the scene : +sc.add_to_subplot(t_obj, row=0, col=0, title='Definition using channel names', + title_color='black', width_max=400) +sc.add_to_subplot(cb_obj, row=0, col=1, width_max=100) + +############################################################################### +# Topoplot based on channel (x, y) coordinates +############################################################################### +# Second definition using channel (x, y) coordinates + +# Create the topoplot and the object : +t_obj_1 = TopoObj('topo', data, channels=channels, xyz=xy, cmap='bwr', + clim=(2., 3.), **kw_top) +cb_obj_1 = ColorbarObj(t_obj_1, cblabel='Beta power', **kw_cbar) +# Add the topoplot and the colorbar to the scene : +sc.add_to_subplot(t_obj_1, row=0, col=2, title_color='black', width_max=400, + title='Definition using channel coordinates') +sc.add_to_subplot(cb_obj_1, row=0, col=3, width_max=100) + +############################################################################### +# Custom extrema colors +############################################################################### +# Here, we use custom colors for extrema i.e every values under `vmin=2.` is +# going to be set to the color 'slateblue' and every values over `vmax=2.8` to +# 'green' + +# Define the topoplot object +t_obj_2 = TopoObj('topo', data, xyz=xy, cmap='inferno', vmin=2., + under='slateblue', vmax=2.8, over='olive', + **kw_top) +# Get the colorbar based on the color properties of the topoplot : +cb_obj_2 = ColorbarObj(t_obj_2, cblabel='Beta power', **kw_cbar) +# Add the topoplot and the colorbar to the scene : +sc.add_to_subplot(t_obj_2, row=0, col=4, title='Custom extrema colors', + title_color='black', width_max=400) +sc.add_to_subplot(cb_obj_2, row=0, col=5, width_max=100) + +############################################################################### +# Connect channels +############################################################################### +# To connect channels together, we need a 2D array of shape +# (n_channels, n_channels) describing connectivity strength between channels. +# Note that the `visbrain.objects.TopoObj.connect` method basically use the +# `visbrain.objects.ConnectObj` object. + +# Create the topoplot and colorbar objects +t_obj_3 = TopoObj('topo', data, xyz=xy, cmap='Spectral_r', **kw_top) +cb_obj_3 = ColorbarObj(t_obj_3, cblabel='Beta power', **kw_cbar) +# Create the 2D array of connectivity links : +connect = (data.reshape(-1, 1) + data.reshape(1, -1)) / 2. +# Select only connectivity links with a connectivity strength under 1.97 +select = connect < 1.97 +# Connect the selected channels : +t_obj_3.connect(connect, select=select, cmap='inferno', antialias=True, + line_width=4.) +# Add the topoplot and the colorbar to the scene : +sc.add_to_subplot(t_obj_3, row=1, col=4, title='Display connectivity', + title_color='black', width_max=400) +sc.add_to_subplot(cb_obj_3, row=1, col=5, width_max=100) + +############################################################################### +# Topoplot with regulary spaced levels +############################################################################### +# Here, we create a topoplot with 10 regulary spaced levels. The color of each +# level is based on the 'bwr' colormap. Note that in order to work properly, +# you need to install `scikit-image `_ + +# Create the topoplot object : +t_obj_4 = TopoObj('topo', data, xyz=xy, levels=10, level_colors='bwr', + **kw_top) +# Get the colorbar based on the color properties of the topoplot : +cb_obj_4 = ColorbarObj(t_obj_4, cblabel='Beta power', **kw_cbar) +# Add the topoplot and the colorbar to the scene : +sc.add_to_subplot(t_obj_4, row=1, col=0, title='Regulary spaced levels', + title_color='black', width_max=400) +sc.add_to_subplot(cb_obj_4, row=1, col=1, width_max=100) + +############################################################################### +# Topoplot with custom levels +############################################################################### +# The only difference with the previous plot is that levels are not regulary +# spaced anymore but they are manually defined, just as color. + +# First level is going to be red, the second one green and the last one blue +level_colors = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) +levels = [2., 2.2, 2.5] +# Create the topoplot object : +t_obj_5 = TopoObj('topo', data, xyz=xy, levels=levels, + level_colors=level_colors, chan_mark_symbol='cross', + line_width=7., chan_mark_color='gray', cmap='plasma', + line_color='#3498db', **kw_top) +# Get the colorbar based on the color properties of the topoplot : +cb_obj_5 = ColorbarObj(t_obj_5, cblabel='Beta power', **kw_cbar) +# Add the topoplot and the colorbar to the scene : +sc.add_to_subplot(t_obj_5, row=1, col=2, title='Custom levels', + title_color='black') +sc.add_to_subplot(cb_obj_5, row=1, col=3, width_max=100) + +# Finally, display the scene +sc.preview() diff --git a/examples/objects/ex_tspic_obj.py b/examples/objects/ex_tspic_obj.py new file mode 100644 index 000000000..39ce937f2 --- /dev/null +++ b/examples/objects/ex_tspic_obj.py @@ -0,0 +1,89 @@ +""" +Time-series and pictures 3D (TimeSeries3DObj & Picture3DObj): complete tutorial +=============================================================================== + +Illustration of the main features of 3d time-series and 3d pictures. This +include : + + * Define and plot of spatially distributed time-series (TS) and pictures + * Masking TS and pictures + * Custom properties (i.e line width, colors, size...) + +.. image:: ../../picture/picobjects/ex_tspic_obj.png +""" +import numpy as np + +from scipy.signal import spectrogram + +from visbrain.objects import TimeSeries3DObj, Picture3DObj, SceneObj +from visbrain.utils import generate_eeg + + +############################################################################### +# Define sample data and scene +############################################################################### +# 3D time-series and pictures need to be attached to sources define in a 3D +# space. + +# Define 5 sources +xyz = np.array([[0, .5, 0], [1, -4, 3], [10, 2, 8], [1, 7, 12], [-4, 5, 6]]) +n_sources = xyz.shape[0] +# Define the time-series of those 5 sources +ts_data = generate_eeg(sf=512., n_channels=n_sources, noise=5., smooth=5)[0] +# Compute the spectrogram of the 5 time-series +pic_data = [] +for k in range(n_sources): + pic_data += [spectrogram(ts_data[k, :], 512., nperseg=128, noverlap=10)[2]] +pic_data = np.asarray(pic_data) +clim = (.01 * pic_data.min(), .01 * pic_data.max()) + +# Scene definition +sc = SceneObj() + +############################################################################### +# Basic plot +############################################################################### +# Basic plot without further customizations + +# Define time-series and picture objects +ts_0 = TimeSeries3DObj('t0', ts_data, xyz, antialias=True) +pic_0 = Picture3DObj('p0', pic_data, xyz, clim=clim) +# Add those objects to the scene +sc.add_to_subplot(ts_0, row=0, col=0, zoom=.2, title='Basic 3D TS plot') +sc.add_to_subplot(pic_0, row=0, col=1, zoom=.5, title='Basic 3D pictures plot') + +############################################################################### +# Subset selection +############################################################################### +# Select a subset of time-series and pictures using either a list of intergers +# or booleans + +# Define a select variables using either intergers or boolean values +s_ts = [0, 2, 4] +s_pic = [True, False, True, False, True] +# Define time-series and picture objects +ts_1 = TimeSeries3DObj('t1', ts_data, xyz, antialias=True, select=s_ts) +pic_1 = Picture3DObj('p1', pic_data, xyz, clim=clim, select=s_pic, cmap='bwr') +# Add those objects to the scene +sc.add_to_subplot(ts_1, row=1, col=0, zoom=.2, title='Select a TS subset') +sc.add_to_subplot(pic_1, row=1, col=1, zoom=.5, + title='Select a subject of pictures') + +############################################################################### +# Shape and color properties +############################################################################### +# Customize colors, time-series amplitude and width, pictures height and width + +# Define time-series and picture objects +ts_2 = TimeSeries3DObj('t2', ts_data, xyz, antialias=True, color='slateblue', + line_width=2., ts_amp=4, ts_width=10) +pic_2 = Picture3DObj('p2', pic_data, xyz, clim=clim, cmap='Spectral_r', + pic_width=10, pic_height=15) +# Add those objects to the scene +sc.add_to_subplot(ts_2, row=2, col=0, zoom=.2, + title='Custom TS color and shape') +sc.add_to_subplot(pic_2, row=2, col=1, zoom=.5, + title='Custom picture color and shape') + +# Finally, display the scene +sc.preview() diff --git a/examples/objects/ex_volume_obj.py b/examples/objects/ex_volume_obj.py index 6844de1de..45e80ee03 100644 --- a/examples/objects/ex_volume_obj.py +++ b/examples/objects/ex_volume_obj.py @@ -1,8 +1,13 @@ """ -Volume object -============= +Volume object (VolumeObj) : complete tutorial +============================================= -Illustration of the main functionalities and inputs of the volume object. +Illustration of the main functionalities and inputs of the volume object : + + * Volume rendering methods (MIP, translucent, additive, Iso) + * Colormap choice + * Select volume levels + * Load an MRI (nii.gz) file .. image:: ../../picture/picobjects/ex_vol_obj.png """ @@ -10,78 +15,80 @@ from visbrain.io import download_file -sc = SceneObj() +# Define the scene +sc = SceneObj(size=(1000, 600)) + +############################################################################### +# MIP rendering +############################################################################### +# MIP rendering with an opaque fire colormap -print(""" -# ============================================================================= -# MIP rendering -# ============================================================================= -""") v_obj_mip = VolumeObj('brodmann', method='mip', cmap='OpaqueFire') -sc.add_to_subplot(v_obj_mip, row=0, col=0, title='MIP rendering') +sc.add_to_subplot(v_obj_mip, row=0, col=0, title='MIP rendering', zoom=.7) + +############################################################################### +# Translucent rendering +############################################################################### +# Translucent rendering with a translucent fire colormap -print(""" -# ============================================================================= -# Translucent rendering -# ============================================================================= -""") v_obj_trans = VolumeObj('aal', method='translucent', cmap='TransFire') -sc.add_to_subplot(v_obj_trans, row=0, col=1, title='Translucent rendering') +sc.add_to_subplot(v_obj_trans, row=0, col=1, title='Translucent rendering', + zoom=.7) + +############################################################################### +# Additive rendering +############################################################################### +# Additive rendering with a translucent grays colormap -print(""" -# ============================================================================= -# Additive rendering -# ============================================================================= -""") v_obj_add = VolumeObj('talairach', method='additive', cmap='TransGrays') -sc.add_to_subplot(v_obj_add, row=0, col=2, title='Additive rendering') - -print(""" -# ============================================================================= -# Iso rendering -# ============================================================================= -""") -v_obj_iso = VolumeObj('brodmann', method='iso', cmap='OpaqueFire') -sc.add_to_subplot(v_obj_iso, row=0, col=3, title='Iso rendering') - -print(""" -# ============================================================================= -# Select ROI -# ============================================================================= -""") +sc.add_to_subplot(v_obj_add, row=0, col=2, title='Additive rendering', zoom=.6) + +############################################################################### +# Iso rendering +############################################################################### +# Iso rendering. Note that here, no threshold are used + +v_obj_iso = VolumeObj('brodmann', method='iso') +sc.add_to_subplot(v_obj_iso, row=0, col=3, title='Iso rendering', zoom=.7) + +############################################################################### +# Volume thresholding +############################################################################### +# Similarly to the example above, here, we use a threshold to cut the volume + +path = download_file('GG-853-WM-0.7mm.nii.gz', astype='example_data') +vol_obj_th = VolumeObj(path, method='iso', threshold=.1) +sc.add_to_subplot(vol_obj_th, row=1, col=0, title='Threshold selection', + zoom=.7) + +############################################################################### +# Select volume levels +############################################################################### +# The volume contains certain levels and you can use the `select` input to +# select only certain levels + v_obj_select = VolumeObj('brodmann', method='iso', cmap='OpaqueFire', select=[4, 6]) -sc.add_to_subplot(v_obj_select, row=1, col=0, +sc.add_to_subplot(v_obj_select, row=1, col=1, zoom=.7, title='Select Brodmann area 4 and 6') +############################################################################### +# MRI file : mip rendering +############################################################################### -print(""" -# ============================================================================= -# Custom nii.gz file -# ============================================================================= -""") -path = download_file('GG-853-GM-0.7mm.nii.gz') +path = download_file('GG-853-GM-0.7mm.nii.gz', astype='example_data') v_obj_nii = VolumeObj(path, method='mip', cmap='OpaqueGrays', threshold=.7) -sc.add_to_subplot(v_obj_nii, row=1, col=1, title='Custom nii.gz file') +sc.add_to_subplot(v_obj_nii, row=1, col=2, title='MRI file (MIP rendering)', + zoom=.7) +############################################################################### +# MRI file : translucent rendering +############################################################################### -print(""" -# ============================================================================= -# Second nii.gz file -# ============================================================================= -""") -path = download_file('GG-853-WM-0.7mm.nii.gz') +path = download_file('GG-853-WM-0.7mm.nii.gz', astype='example_data') vol_obj_sec = VolumeObj(path, method='translucent', cmap='TransGrays') -sc.add_to_subplot(vol_obj_sec, row=1, col=2, title='Second nii.gz file') - -print(""" -# ============================================================================= -# Threshold selection -# ============================================================================= -""") -path = download_file('GG-853-WM-0.7mm.nii.gz') -vol_obj_th = VolumeObj(path, method='iso', threshold=.1) -sc.add_to_subplot(vol_obj_th, row=1, col=3, title='Threshold selection') - +sc.add_to_subplot(vol_obj_sec, row=1, col=3, zoom=.7, + title='MRI file (translucent rendering)') +# Finally, display the scene sc.preview() diff --git a/examples/sleep/README.txt b/examples/sleep/README.txt deleted file mode 100644 index 3036eff30..000000000 --- a/examples/sleep/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -Sleep Examples --------------- - -Examples demonstrating how to use Sleep and how to load files. \ No newline at end of file diff --git a/examples/topo/00_basic_topoplot.py b/examples/topo/00_basic_topoplot.py deleted file mode 100644 index 2cf8bdfde..000000000 --- a/examples/topo/00_basic_topoplot.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Basic topoplot -============== - -Basic topographic plot based on channel names. - -.. image:: ../../picture/pictopo/ex_basic_topoplot.png -""" -from visbrain import Topo - -# Create a topoplot instance : -t = Topo() - -# Create a list of channels, data, title and colorbar label : -name = 'Topo_1' -channels = ['C3', 'C4', 'Cz', 'Fz', 'Pz'] -data = [10, 20, 30, 10, 10] -title = 'Basic topoplot illustration' -cblabel = 'Colorbar label' - -# Add a central topoplot : -t.add_topoplot(name, data, channels=channels, title=title, cblabel=cblabel) - -# Show the window : -t.show() diff --git a/examples/topo/01_use_custom_coordinates.py b/examples/topo/01_use_custom_coordinates.py deleted file mode 100644 index 565331cba..000000000 --- a/examples/topo/01_use_custom_coordinates.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Use custom coordinates -====================== - -Display topographic plots using custom coordinates. - -Download topoplot data (topoplot_data.npz) : -https://www.dropbox.com/s/m76y3p0fyj6lxht/topoplot_data.npz?dl=1 - -.. image:: ../../picture/pictopo/ex_custom_coordinates.png -""" -import numpy as np - -from visbrain import Topo -from visbrain.io import download_file, path_to_visbrain_data - -# Load the data : -download_file('topoplot_data.npz') -mat = np.load(path_to_visbrain_data('topoplot_data.npz')) -xyz, data = mat['xyz'], mat['data'] -channels = [str(k) for k in range(len(data))] - -# Create a topoplot instance : -t = Topo() - -# Add the topoplot defined using xyz coordinates : -t.add_topoplot('Topo_1', data, xyz=xyz, channels=channels, title='Custom data', - cmap='viridis', cblabel='Beta power') - -# Show the window : -t.show() diff --git a/examples/topo/02_grid_topoplot.py b/examples/topo/02_grid_topoplot.py deleted file mode 100644 index d0f2b7dbe..000000000 --- a/examples/topo/02_grid_topoplot.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Grid topoplot -============= - -Display topographic plots into a grid. - -.. image:: ../../picture/pictopo/ex_grid_topoplot.png -""" -from visbrain import Topo - -# Create a topoplot instance : -t = Topo() - -# Create a list of channels, data, title and colorbar label : -channels = ['C3', 'C4', 'Cz', 'Fz', 'Pz'] -data = [10, 20, 30, 10, 10] -kwargs = {'title_size': 2., 'cb_txt_size': 2, 'margin': 20 / 100, - 'chan_offset': (0., 0.08, 0.)} - -# Position (0, 0) -t.add_topoplot('Topo_1', data, channels=channels, title='Topo_1', - cmap='viridis', row=0, col=0, **kwargs) - -# Position (0, 1) -t.add_topoplot('Topo_2', data, channels=channels, title='Topo_2', - cmap='plasma', row=0, col=1, **kwargs) - -# Position (1, 0) -t.add_topoplot('Topo_3', data, channels=channels, title='Topo_3', - cmap='Spectral_r', row=1, col=0, **kwargs) - -# Position (1, 1) -t.add_topoplot('Topo_4', data, channels=channels, title='Topo_4', - cmap='gist_stern', row=1, col=1, **kwargs) - -# Position (2, 0:1) -t.add_topoplot('Topo_5', data, channels=channels, title='Topo_5', - cmap='Blues', row=2, col=0, col_span=2, **kwargs) - -# Position (0:3, 2) -t.add_topoplot('Topo_6', data, channels=channels, title='Topo_6', - cmap='Reds', row=0, col=2, row_span=3, **kwargs) - -# Show the window : -t.show() diff --git a/examples/topo/03_shared_colorbar.py b/examples/topo/03_shared_colorbar.py deleted file mode 100644 index 9b157e5ad..000000000 --- a/examples/topo/03_shared_colorbar.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Shared colorbar -=============== - -Add a shared colorbar across topographic plot. - -If you have several topographic plot that shared the same color properties -(such as limits, colormap...) it might be redundant to have one colorbar per -topoplot. In that case, use the add_shared_colorbar() to have one shared -colorbar for all subplots. - -.. image:: ../../picture/pictopo/ex_shared_colorbar.png -""" -import numpy as np -from visbrain import Topo - -# Create a topoplot instance : -t = Topo() - -# Define the list of channels : -channels = ['C1', 'C2', 'C3', 'C4', 'Cz', 'FCz', 'CPz', 'Fz', 'Pz', 'FC1', - 'FC2', 'FC3', 'FC4', 'CP3', 'CP4', 'CP1', 'CP2', 'F3', 'F4', 'P3', - 'P4', 'C5', 'C6'] -n_channels = len(channels) - -# Generate 4 random datasets : -data_1 = np.random.randn(n_channels) -data_2 = np.random.randn(n_channels) -data_3 = np.random.randn(n_channels) -data_4 = np.random.randn(n_channels) - -"""It is more save to define one dictionnary with all of the color properties -and to use the same dictionnary across all topoplots. - -""" -kwargs = {'cmap': 'viridis', 'clim': (-1.02, 1.01), 'vmin': -.81, - 'under': 'gray', 'vmax': .85, 'over': 'red'} - -""" -Create the (2, 2) topoplot grid with the random datasets. The colorbar is -disabled for each one of those subplots. -""" -t.add_topoplot('Topo_1', data_1, channels=channels, cbar=False, row=0, col=0, - **kwargs) -t.add_topoplot('Topo_2', data_2, channels=channels, cbar=False, row=0, col=1, - **kwargs) -t.add_topoplot('Topo_3', data_3, channels=channels, cbar=False, row=1, col=0, - **kwargs) -t.add_topoplot('Topo_4', data_4, channels=channels, cbar=False, row=1, col=1, - **kwargs) - -"""Finally, add the shared colorbar. This colorbar is on the last column -(col=2) and take all rows (row_span=2). - -The rect input is a tuple defined by (x_start, y_start, width, heigth). This -variable can be used to : - * Translate the colorbar (using x_start and y_start) to be closer to the - subplots. - * Scale the colorbar to have a nice proportion between width and heigth. -""" -t.add_shared_colorbar('Shared', col=2, row_span=2, rect=(0.1, -2, 1.6, 4), - cblabel='Shared colorbar', **kwargs) - -# Show the window : -t.show() diff --git a/examples/topo/04_topoplot_plotting_properties.py b/examples/topo/04_topoplot_plotting_properties.py deleted file mode 100644 index 775394151..000000000 --- a/examples/topo/04_topoplot_plotting_properties.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Plotting properties -=================== - -Display topographic plots in a grid using several plotting properties. - -Download topoplot data (topoplot_data.npz) : -https://www.dropbox.com/s/m76y3p0fyj6lxht/topoplot_data.npz?dl=1 - -.. image:: ../../picture/pictopo/ex_topoplot_plotting_properties.png -""" -import numpy as np - -from visbrain import Topo -from visbrain.io import download_file, path_to_visbrain_data - -# Load the data : -download_file('topoplot_data.npz') -mat = np.load(path_to_visbrain_data('topoplot_data.npz')) -xyz, data = mat['xyz'], mat['data'] -channels = [str(k) for k in range(len(data))] - -kwargs = {'title_size': 3., 'cb_txt_size': 2, 'margin': 15 / 100, - 'chan_offset': (0., 1.1, 0.), 'chan_size': 1.5} - -# Create a topoplot instance : -t = Topo() - -# Topoplot with 10 regulary spaced levels : -t.add_topoplot('Topo_1', data, xyz=xyz, channels=channels, - title='Regulary spaced levels', cmap='viridis', levels=10, - level_colors='Spectral_r', cblabel='Beta power', - title_color='#ab4642', **kwargs) - -""" -Topoplot with custom levels : - * red : 2. - * green : 2.2 - * blue : 2.5 -""" -level_colors = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) -t.add_topoplot('Topo_2', data, xyz=xyz, title='Custom levels', cmap='plasma', - levels=[2., 2.2, 2.5], level_colors=level_colors, - cblabel='Beta power', line_color='#3498db', line_width=7., - chan_mark_color='gray', bgcolor='#34495e', col=1, - title_color='white', chan_mark_symbol='cross', cbar=False, - **kwargs) - -# Fixed colorbar limits : -t.add_topoplot('Topo_3', data, xyz=xyz, title='Fixed colorbar limits', - cmap='Spectral_r', clim=(2., 2.8), channels=channels, - chan_txt_color='white', chan_mark_symbol='x', - bgcolor=(.1, .1, .1), title_color='white', - line_color='white', row=1, **kwargs) - -# Colorbar properties : -t.add_topoplot('Topo_4', data, xyz=xyz, title='Colorbar properties', - cmap='gist_gray', vmin=2., under='slateblue', - vmax=2.8, over='darkred', title_color='#34495e', col=1, row=1, - cblabel='Custom color for extrema', **kwargs) - -# Show the window : -t.show() diff --git a/examples/topo/05_topoplot_and_connectivity.py b/examples/topo/05_topoplot_and_connectivity.py deleted file mode 100644 index 093924e7f..000000000 --- a/examples/topo/05_topoplot_and_connectivity.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Connectivity -============ - -Add connectivity to the topoplot. - -Download topoplot data (topoplot_data.npz) : -https://www.dropbox.com/s/m76y3p0fyj6lxht/topoplot_data.npz?dl=1 - -.. image:: ../../picture/pictopo/ex_connectivity.png -""" -import numpy as np - -from visbrain import Topo -from visbrain.io import download_file, path_to_visbrain_data - -# Load the data : -download_file('topoplot_data.npz') -mat = np.load(path_to_visbrain_data('topoplot_data.npz')) -xyz, data = mat['xyz'], mat['data'] -channels = [str(k) for k in range(len(data))] -n_channels = len(channels) - -"""The connectivity is defined by an upper triangle array of shape -(n_channels, n_channels). -In addition, to improve the selection of edges to display, use the boolean -array `c_select` input parameter to select edges that need to be drawn. -""" -connect = (data.reshape(-1, 1) + data.reshape(1, -1)) / 2. -select = connect < 1.97 - -# Create a topoplot instance : -t = Topo() - -# Add the topoplot defined using xyz coordinates : -t.add_topoplot('Topo_1', data, xyz=xyz, title='Connectivity example', - cblabel='Beta power', c_connect=connect, c_select=select, - cmap='viridis', c_linewidth=4., c_cmap='plasma') - -# Show the window : -t.show() diff --git a/examples/topo/README.txt b/examples/topo/README.txt deleted file mode 100644 index d97e8128b..000000000 --- a/examples/topo/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -Topoplot Examples ------------------ - -Examples demonstrating how to display and configure topographic plot. \ No newline at end of file diff --git a/setup.py b/setup.py index 173977fdb..0b698e47e 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ import os from setuptools import setup, find_packages -__version__ = "0.4.2" +__version__ = "0.4.3" NAME = 'visbrain' AUTHOR = "Visbrain developpers" MAINTAINER = "Etienne Combrisson" @@ -17,12 +17,7 @@ "v" + __version__ + ".tar.gz" # Data path : HERE = os.path.abspath(os.path.dirname(__file__)) -PACKAGE_DATA = {'visbrain.data.templates': ['B1.npz', 'B2.npz', 'B3.npz'], - 'visbrain.data.roi': ['aal.npz', 'brodmann.npz', - 'talairach.npz'], - 'visbrain.data.topo': ['eegref.npz'], - 'visbrain.data.icons': ['*.svg'], - } +PACKAGE_DATA = {} def read(fname): @@ -57,24 +52,26 @@ def read(fname): "matplotlib>=1.5.5", "pyqt5", "pillow", - "Click" + "PyOpenGL" ], + extras_require={ + 'full': ["mne", "tensorpac", "pandas", "xlrd", "scikit-image", + "nibabel"], + 'sleep': ["mne", "tensorpac"], + 'roi': ["pandas", "xlrd"], + 'topo': ["scikit-image"] + }, dependency_links=[], classifiers=["Development Status :: 3 - Alpha", 'Intended Audience :: Science/Research', 'Intended Audience :: Education', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Visualization', - "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", "Operating System :: MacOS", "Operating System :: POSIX :: Linux", "Operating System :: Microsoft :: Windows", "Natural Language :: English" - ], - entry_points=''' - [console_scripts] - visbrain_sleep=visbrain.cli:cli_sleep - visbrain_fig_hyp=visbrain.cli:cli_fig_hyp - visbrain_sleep_stats=visbrain.cli:cli_sleep_stats - ''') + ]) diff --git a/visbrain/__init__.py b/visbrain/__init__.py index 105443e08..25dd5ffe2 100644 --- a/visbrain/__init__.py +++ b/visbrain/__init__.py @@ -12,32 +12,22 @@ * Sleep : visualize polysomnographic data and hypnogram edition. * Signal : data mining module for signal inspection. * Figure : figure-layout for high-quality publication-like figures. -* Colorbar : a colorbar editor * Topo : topographic representations See http://visbrain.org/ for a complete and step-by step documentation """ -import sys +import sys as _sys -# Import modules : -from .brain import Brain -from .colorbar import Colorbar -from .figure import Figure -from .sleep import Sleep -from .topo import Topo -from .signal import Signal - -__all__ = ['Brain', 'Colorbar', 'Figure', 'Signal', 'Sleep', 'Topo'] -__version__ = "0.4.2" +__version__ = "0.4.3" # PyQt5 crash if an error occured. This small function fix it for all modules # to retrieve the PyQt4 behavior : -def pyqt4_behavior(type, value, tback): +def _pyqt4_behavior(type, value, tback): """Retrieve PyQt4 behavior if an error occured.""" - sys.__excepthook__(type, value, tback) + _sys.__excepthook__(type, value, tback) -sys.excepthook = pyqt4_behavior +_sys.excepthook = _pyqt4_behavior diff --git a/visbrain/pyqt_module.py b/visbrain/_pyqt_module.py similarity index 91% rename from visbrain/pyqt_module.py rename to visbrain/_pyqt_module.py index b05d178da..8813b534f 100644 --- a/visbrain/pyqt_module.py +++ b/visbrain/_pyqt_module.py @@ -1,19 +1,19 @@ -"""All visbrain modules based on PyQt5 should inherit from PyQtModule.""" +"""All visbrain modules based on PyQt5 should inherit from _PyQtModule.""" import os import sip + from PyQt5 import QtGui import logging from .utils import set_widget_size, set_log_level from .config import PROFILER, CONFIG -from .io import (get_data_path, path_to_tmp, - clean_tmp, path_to_visbrain_data) +from .io import (path_to_tmp, download_file, clean_tmp, path_to_visbrain_data) sip.setdestroyonexit(False) logger = logging.getLogger('visbrain') -class PyQtModule(object): +class _PyQtModule(object): """Shared methods across PyQt based Visbrain modules. Parameters @@ -83,7 +83,9 @@ def show(self): self.QuickSettings.setCurrentIndex(0) # Set icon (if possible) : if isinstance(self._module_icon, str): - path_ico = get_data_path(folder='icons', file=self._module_icon) + path_ico = path_to_visbrain_data(self._module_icon, 'icons') + if not os.path.isfile(path_ico): + download_file(self._module_icon, astype='icons') if os.path.isfile(path_ico): app_icon = QtGui.QIcon() app_icon.addFile(path_ico) diff --git a/visbrain/brain/interface/gui/vbicon.png b/visbrain/brain/interface/gui/vbicon.png deleted file mode 100644 index 544dc9043..000000000 Binary files a/visbrain/brain/interface/gui/vbicon.png and /dev/null differ diff --git a/visbrain/cli.py b/visbrain/cli.py deleted file mode 100644 index e7fd14a28..000000000 --- a/visbrain/cli.py +++ /dev/null @@ -1,161 +0,0 @@ -"""Command-line control of visbrain.""" -from __future__ import print_function -import click -import os.path -import numpy as np - -from visbrain import Sleep -from visbrain.io import (write_fig_hyp, read_hypno, oversample_hypno, - write_csv) -from visbrain.utils import sleepstats - -############################################################################### -# SLEEP -############################################################################### - -# -------------------- SLEEP GUI -------------------- - - -@click.command() -@click.option('-d', '--data', default=None, - help='Name of the polysomnographic file to load.', - type=click.Path(exists=True)) -@click.option('-h', '--hypno', default=None, - help='Name of the hypnogram file to load.', - type=click.Path(exists=True)) -@click.option('-c', '--config_file', default=None, - help='Path to a configuration file.', - type=click.Path(exists=True)) -@click.option('-a', '--annotations', default=None, - help='Path to an annotation file.', - type=click.Path(exists=True)) -@click.option('--downsample', default=100., - help='Down-sampling frequency. Default is 100.') -@click.option('--use_mne', default=False, - help='Load your file using MNE-python. Default is False.', - type=bool) -@click.option('--preload', default=True, - help='Preload data in memory. Default is True', type=bool) -@click.option('--show', default=True, - help='Display GUI. Default is True', type=bool) -def cli_sleep(data, hypno, config_file, annotations, downsample, use_mne, - preload, show): - """Open the graphical user interface of Sleep.""" - # File conversion : - if data is not None: - data = click.format_filename(data) - if hypno is not None: - hypno = click.format_filename(hypno) - if config_file is not None: - config_file = click.format_filename(config_file) - if annotations is not None: - annotations = click.format_filename(annotations) - s = Sleep(data=data, hypno=hypno, downsample=downsample, - use_mne=use_mne, preload=preload, config_file=config_file, - annotations=annotations) - if show: - s.show() - -# -------------------- HYPNOGRAM TO FIGURE -------------------- - - -@click.command() -@click.option('-h', '--hypno', default=None, - help='Name of the hypnogram file to load (with extension).', - type=click.Path(exists=True)) -@click.option('-g', '--grid', default=False, - help='Add X and Y grids to figure. Default is False.', - type=bool) -@click.option('-c', '--color', default=False, - help='Get colored figure. Default is False (black and white).', - type=bool) -@click.option('-o', '--outfile', default=None, - help='Output filename (with extension).', - type=click.Path(exists=False)) -@click.option('--dpi', default=300, - help='Dots per inches (resolution). Default is 300.', - type=int) -def cli_fig_hyp(hypno, grid, color, outfile, dpi): - """Create hypnogram figure from hypnogram file.""" - # File conversion : - if hypno is not None: - hypno = click.format_filename(hypno) - if outfile is not None: - outfile = click.format_filename(outfile) - ext = os.path.splitext(outfile)[1][1:].strip().lower() - if ext == '': - outfile = outfile + '.png' - else: - outfile = hypno + '.png' - # Load hypnogram - hypno, sf_hyp = read_hypno(hypno) - # Bad cases (e.g. EDF files from DreamBank.net) - if sf_hyp < 1: - mult = int(np.round(len(hypno) / sf_hyp)) - hypno = oversample_hypno(hypno, mult) - sf_hyp = 1 - # Create figure - write_fig_hyp(hypno, sf=sf_hyp, file=outfile, start_s=0., grid=grid, - ascolor=color, dpi=dpi) - print('Hypnogram figure saved to:', outfile) - - -# -------------------- SLEEP STATS -------------------- - -@click.command() -@click.option('-h', '--hypno', default=None, - help='Name of the hypnogram file to load (with extension).', - type=click.Path(exists=True)) -@click.option('-o', '--outfile', default=None, - help='Output filename (with extension - *.csv). If None, sleep \ - statistics will only be displayed and not saved into a file', - type=click.Path(exists=False)) -def cli_sleep_stats(hypno, outfile): - """Compute sleep statistics from hypnogram file and export them in csv. - - Sleep statistics specifications: - - * Time in Bed (TIB) : total duration of the hypnogram. - * Total Dark Time (TDT) : duration of the hypnogram from beginning - to last period of sleep. - * Sleep Period Time (SPT) : duration from first to last period of sleep. - * Wake After Sleep Onset (WASO) : duration of wake periods within SPT - * Sleep Efficiency (SE) : TST / TDT * 100 (%). - * Total Sleep Time (TST) : SPT - WASO. - * W, N1, N2, N3 and REM: sleep stages duration. - * % (W, ... REM) : sleep stages duration expressed in percentages of TDT. - * Latencies: latencies of sleep stages from the beginning of the record. - - (All values except SE and percentages are expressed in minutes) - """ - # File conversion : - if hypno is not None: - hypno_path = click.format_filename(hypno) - if outfile is not None: - outfile = click.format_filename(outfile) - # Check extension - ext = os.path.splitext(outfile)[1][1:].strip().lower() - if ext == '': - outfile = outfile + '.csv' - - # Load hypnogram - hypno, sf_hyp = read_hypno(hypno_path) - if sf_hyp < 1: - mult = int(np.round(len(hypno) / sf_hyp)) - hypno = oversample_hypno(hypno, mult) - sf_hyp = 1 - - # Get sleep stats - stats = sleepstats(hypno, sf_hyp=sf_hyp) - stats['File'] = hypno_path - print('\nSLEEP STATS\n===========') - keys, val = [''] * len(stats), [''] * len(stats) - # Fill table : - for num, (k, v) in enumerate(stats.items()): - print(k, '\t', str(v)) - # Remember variables : - keys[int(num)] = k - val[int(num)] = str(v) - if outfile is not None: - write_csv(outfile, zip(keys, val)) - print('===========\nCSV file saved to:', outfile) diff --git a/visbrain/colorbar/__init__.py b/visbrain/colorbar/__init__.py deleted file mode 100644 index aaca841aa..000000000 --- a/visbrain/colorbar/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .colorbar import Colorbar diff --git a/visbrain/colorbar/colorbar.py b/visbrain/colorbar/colorbar.py deleted file mode 100644 index 26ee2bc4b..000000000 --- a/visbrain/colorbar/colorbar.py +++ /dev/null @@ -1,89 +0,0 @@ -"""Colorbar module.""" -import sys -from PyQt5 import QtWidgets - -import vispy.app as visapp - -from .ui_init import UiInit -from ..visuals import CbarQt, CbarBase, CbarObjetcs - - -class Colorbar(UiInit): - """Display a colorbar editor. - - Parameters - ---------- - config : string | None - Path to a configuration file. - cmap : string | inferno - Matplotlib colormap - clim : tuple/list | None - Limit of the colormap. The clim parameter must be a tuple / list - of two float number each one describing respectively the (min, max) - of the colormap. Every values under clim[0] or over clim[1] will - peaked. - vmin : float | None - Threshold from which every color will have the color defined using - the under parameter bellow. - under : tuple/string | 'gray' - Matplotlib color for values under vmin. - vmax : float | None - Threshold from which every color will have the color defined using - the over parameter bellow. - over : tuple/string | 'red' - Matplotlib color for values over vmax. - cblabel : string | '' - Colorbar label. - cbtxtsz : float | 26. - Text size of the colorbar label. - cbtxtsh : float | 2.3 - Shift for the colorbar label. - txtcolor : string | 'white' - Text color. - txtsz : float | 20. - Text size for clim/vmin/vmax text. - txtsh : float | 1.2 - Shift for clim/vmin/vmax text. - border : bool | True - Display colorbar borders. - limtxt : bool | True - Display vmin/vmax text. - bgcolor : tuple/string | (.1, .1, .1) - Background color of the colorbar canvas. - ndigits : int | 2 - Number of digits for the text. - """ - - def __init__(self, config=None, **kwargs): - """Init.""" - # Manage isvmin / isvmax : - if 'vmin' in list(kwargs.keys()) and (kwargs['vmin'] is not None): - kwargs['isvmin'] = True - if 'vmax' in list(kwargs.keys()) and (kwargs['vmax'] is not None): - kwargs['isvmax'] = True - # Create the app and initialize all graphical elements : - self._app = QtWidgets.QApplication(sys.argv) - # Initialise GUI : - UiInit.__init__(self) - - cbobjs = CbarObjetcs() - if isinstance(config, str): - cbobjs.load(config) - elif isinstance(config, dict): - for k, i in config.items(): - cbobjs.add_object(k, CbarBase(**i), overwrite=False) - else: - if 'name' in kwargs.keys(): - name = kwargs['name'] - del kwargs['name'] - else: - name = 'Colorbar' - obj1 = CbarBase(**kwargs) - cbobjs.add_object(name, obj1, overwrite=False) - self.cbqt = CbarQt(self.guiW, self.vizW, cbobjs) - self.cbqt._fcn_change_object(clean=True) - - def show(self): - """Display the graphical user interface.""" - self.showMaximized() - visapp.run() diff --git a/visbrain/colorbar/gui/__init__.py b/visbrain/colorbar/gui/__init__.py deleted file mode 100644 index 26725acdb..000000000 --- a/visbrain/colorbar/gui/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .cbar_gui import Ui_MainWindow diff --git a/visbrain/colorbar/gui/cbar_gui.py b/visbrain/colorbar/gui/cbar_gui.py deleted file mode 100644 index 536940260..000000000 --- a/visbrain/colorbar/gui/cbar_gui.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- - -# Form implementation generated from reading ui file '/home/etienne/Toolbox/visbrain/visbrain/colorbar/gui/cbar_gui.ui' -# -# Created by: PyQt5 UI code generator 5.8.2 -# -# WARNING! All changes made in this file will be lost! - -from PyQt5 import QtCore, QtGui, QtWidgets - -class Ui_MainWindow(object): - def setupUi(self, MainWindow): - MainWindow.setObjectName("MainWindow") - MainWindow.resize(1198, 908) - self.centralwidget = QtWidgets.QWidget(MainWindow) - self.centralwidget.setObjectName("centralwidget") - self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget) - self.horizontalLayout_2.setObjectName("horizontalLayout_2") - self.guiW = QtWidgets.QWidget(self.centralwidget) - self.guiW.setMinimumSize(QtCore.QSize(300, 0)) - self.guiW.setMaximumSize(QtCore.QSize(400, 16777215)) - self.guiW.setObjectName("guiW") - self.horizontalLayout_2.addWidget(self.guiW) - self.vizW = QtWidgets.QHBoxLayout() - self.vizW.setObjectName("vizW") - self.horizontalLayout_2.addLayout(self.vizW) - MainWindow.setCentralWidget(self.centralwidget) - self.menubar = QtWidgets.QMenuBar(MainWindow) - self.menubar.setGeometry(QtCore.QRect(0, 0, 1198, 25)) - self.menubar.setObjectName("menubar") - self.menuFile = QtWidgets.QMenu(self.menubar) - self.menuFile.setObjectName("menuFile") - self.menuConfiguration = QtWidgets.QMenu(self.menuFile) - self.menuConfiguration.setObjectName("menuConfiguration") - MainWindow.setMenuBar(self.menubar) - self.statusbar = QtWidgets.QStatusBar(MainWindow) - self.statusbar.setObjectName("statusbar") - MainWindow.setStatusBar(self.statusbar) - self.menuCbarScreenshot = QtWidgets.QAction(MainWindow) - self.menuCbarScreenshot.setObjectName("menuCbarScreenshot") - self.menuCbarSaveConfig = QtWidgets.QAction(MainWindow) - self.menuCbarSaveConfig.setObjectName("menuCbarSaveConfig") - self.menuCbarLoadConfig = QtWidgets.QAction(MainWindow) - self.menuCbarLoadConfig.setObjectName("menuCbarLoadConfig") - self.menuConfiguration.addAction(self.menuCbarSaveConfig) - self.menuConfiguration.addAction(self.menuCbarLoadConfig) - self.menuFile.addAction(self.menuConfiguration.menuAction()) - self.menuFile.addAction(self.menuCbarScreenshot) - self.menubar.addAction(self.menuFile.menuAction()) - - self.retranslateUi(MainWindow) - QtCore.QMetaObject.connectSlotsByName(MainWindow) - - def retranslateUi(self, MainWindow): - _translate = QtCore.QCoreApplication.translate - MainWindow.setWindowTitle(_translate("MainWindow", "Colorbar editor")) - self.menuFile.setTitle(_translate("MainWindow", "File")) - self.menuConfiguration.setTitle(_translate("MainWindow", "Configuration")) - self.menuCbarScreenshot.setText(_translate("MainWindow", "Screenshot")) - self.menuCbarSaveConfig.setText(_translate("MainWindow", "Save")) - self.menuCbarLoadConfig.setText(_translate("MainWindow", "Load")) - - -if __name__ == "__main__": - import sys - app = QtWidgets.QApplication(sys.argv) - MainWindow = QtWidgets.QMainWindow() - ui = Ui_MainWindow() - ui.setupUi(MainWindow) - MainWindow.show() - sys.exit(app.exec_()) - diff --git a/visbrain/colorbar/gui/cbar_gui.ui b/visbrain/colorbar/gui/cbar_gui.ui deleted file mode 100644 index 3346276d7..000000000 --- a/visbrain/colorbar/gui/cbar_gui.ui +++ /dev/null @@ -1,83 +0,0 @@ - - - MainWindow - - - - 0 - 0 - 1198 - 908 - - - - Colorbar editor - - - - - - - - 300 - 0 - - - - - 400 - 16777215 - - - - - - - - - - - - - 0 - 0 - 1198 - 25 - - - - - File - - - - Configuration - - - - - - - - - - - - - Screenshot - - - - - Save - - - - - Load - - - - - - diff --git a/visbrain/colorbar/tests/test_colorbar.py b/visbrain/colorbar/tests/test_colorbar.py deleted file mode 100644 index 7bff00ada..000000000 --- a/visbrain/colorbar/tests/test_colorbar.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Test Colorbar module and related methods.""" -import pytest - -from visbrain import Colorbar -from visbrain.tests._tests_visbrain import _TestVisbrain - - -# ---------------- Variables ---------------- -kw = {} -kw['cmap'] = 'Spectral_r' -kw['clim'] = (-10., 10.) -kw['vmin'] = -1.2 -kw['under'] = (.1, .1, .1) -kw['vmax'] = 7.52 -kw['over'] = 'darkred' -kw['cblabel'] = 'Test Colorbar module' -kw['cbtxtsz'] = 2. -kw['cbtxtsh'] = 1.2 -kw['txtcolor'] = 'orange' -kw['txtsz'] = 4. -kw['txtsh'] = 0.5 -kw['border'] = False -kw['limtxt'] = True -kw['bgcolor'] = '#ab4642' -kw['ndigits'] = 4 - -# ---------------- Application ---------------- -cb = Colorbar(**kw) - - -class TestColorbar(_TestVisbrain): - """Test brain.py.""" - - ########################################################################### - # GUI - ########################################################################### - @pytest.mark.skip('Not configured') - def test_save_config(self): - """Test function save_config.""" - cb._fcn_save_cbar_config(filename=self.to_tmp_dir('cb_config.txt')) - - @pytest.mark.skip('Not configured') - def test_load_config(self): - """Test function load_config.""" - cb._fcn_load_cbar_config(filename=self.to_tmp_dir('cb_config.txt')) - - @pytest.mark.skip('Not configured') - def test_screenshot(self): - """Test function screenshot.""" - pass diff --git a/visbrain/colorbar/ui_init.py b/visbrain/colorbar/ui_init.py deleted file mode 100644 index 0ed432d49..000000000 --- a/visbrain/colorbar/ui_init.py +++ /dev/null @@ -1,19 +0,0 @@ -"""GUI initialization.""" -from PyQt5 import QtWidgets -from vispy import app - -from .gui import Ui_MainWindow -from .ui_menu import UiMenu, UiScreenshot - - -class UiInit(QtWidgets.QMainWindow, Ui_MainWindow, app.Canvas, UiMenu, - UiScreenshot): - """Group and initialize the graphical elements and interactions.""" - - def __init__(self): - """Init.""" - # Create the main window : - super(UiInit, self).__init__(None) - self.setupUi(self) - UiMenu.__init__(self) - UiScreenshot.__init__(self) diff --git a/visbrain/colorbar/ui_menu.py b/visbrain/colorbar/ui_menu.py deleted file mode 100644 index b9064d8e7..000000000 --- a/visbrain/colorbar/ui_menu.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Main class for sleep menus managment.""" - -from ..io import write_fig_pyqt, write_fig_canvas, dialog_load, dialog_save -from ..utils import ScreenshotPopup, HelpMenu - - -class UiMenu(HelpMenu): - """Main class for sleep menus managment.""" - - def __init__(self): - """Init.""" - base = 'http://visbrain.org/colorbar.html' - sections = {'Colorbar': base} - HelpMenu.__init__(self, sections, False) - # __________ CONFIG __________ - self.menuCbarSaveConfig.triggered.connect(self._fcn_save_cbar_config) - self.menuCbarLoadConfig.triggered.connect(self._fcn_load_cbar_config) - self.menuCbarScreenshot.triggered.connect(self._fcn_cbar_screenshot) - - def _fcn_save_cbar_config(self, *args, filename=None): - """Save colorbar config.""" - if filename is None: - filename = dialog_save(self, 'Save config File', 'config', - "Text file (*.txt);;All files (*.*)") - - if filename: - self.cbqt.save(filename) - - def _fcn_load_cbar_config(self, *args, filename=None): - """Load colorbar conf.""" - if filename is None: - # Open dialog box : - filename = dialog_load(self, 'Load config File', 'config', - "Text file (*.txt);;All files (*.*)") - - if filename: - self.cbqt.load(filename) - self.cbqt._fcn_ChangeObj(clean=True) - - def _fcn_cbar_screenshot(self): - """Colorbar screenshot.""" - self.show_gui_screenshot() - - -class UiScreenshot(object): - """Initialize the screenshot GUI and functions to apply it.""" - - def __init__(self): - """Init.""" - canvas_names = ['main'] - self._ssGui = ScreenshotPopup(self._fcn_run_screenshot, - canvas_names=canvas_names) - - def show_gui_screenshot(self): - """Display the GUI screenhot.""" - self._ssGui.show() - - def _fcn_run_screenshot(self): - """Run the screenshot.""" - # Get filename : - filename = dialog_save(self, 'Screenshot', 'screenshot', "PNG (*.PNG)" - ";;TIFF (*.tiff);;JPG (*.jpg);;" - "All files (*.*)") - # Get screenshot arguments : - kwargs = self._ssGui.to_kwargs() - - if kwargs['entire']: # Screenshot of the entire window - self._ssGui._ss.close() - write_fig_pyqt(self, filename) - else: # Screenshot of selected canvas - # Remove unsed entries : - del kwargs['entire'], kwargs['canvas'] - write_fig_canvas(filename, self.cbqt.cbviz._canvas, - widget=self.cbqt.cbviz._wc, **kwargs) diff --git a/visbrain/data/data_url.txt b/visbrain/data/data_url.txt deleted file mode 100644 index d58aaed4e..000000000 --- a/visbrain/data/data_url.txt +++ /dev/null @@ -1,33 +0,0 @@ -{ -"xyz_sample.npz": "https://www.dropbox.com/s/whogfxutyxoir1t/xyz_sample.npz?dl=1", -"phase_sync_delta.npz": "https://www.dropbox.com/s/08xaq1lkj7rnuf6/phase_sync_delta.npz?dl=1", -"thalamus.txt": "https://www.dropbox.com/s/sw7vkplwd9zbns1/thalamus.txt?dl=1", -"Px.npy": "https://www.dropbox.com/s/qzwjavhz5lg13km/Px.npy?dl=1", -"meg_source_estimate-lh.stc": "https://www.dropbox.com/s/q6cw5gf3edjqqcc/meg_source_estimate-lh.stc?dl=1", -"meg_source_estimate-rh.stc": "https://www.dropbox.com/s/uyr6291q2z87uvo/meg_source_estimate-rh.stc?dl=1", -"lh.sig.nii.gz": "https://www.dropbox.com/s/soq2l4pqxerhzyo/lh.sig.nii.gz?dl=1", -"lh.alt_sig.nii.gz": "https://www.dropbox.com/s/q7w4mw8se4vknlp/lh.alt_sig.nii.gz?dl=1", -"figure.zip": "https://www.dropbox.com/s/jsjct54ynvdjzfq/figure.zip?dl=1", -"lh.aparc.a2009s.annot": "https://www.dropbox.com/s/zdtgwqal7m9nr8g/lh.aparc.a2009s.annot?dl=1", -"rh.aparc.a2009s.annot": "https://www.dropbox.com/s/zt1o27zgna6id05/rh.aparc.a2009s.annot?dl=1", -"lh.PALS_B12_Brodmann.annot": "https://www.dropbox.com/s/zn3zu9hnym9yny2/lh.PALS_B12_Brodmann.annot?dl=1", -"rh.PALS_B12_Brodmann.annot": "https://www.dropbox.com/s/8ddj6pscs6maau3/rh.PALS_B12_Brodmann.annot?dl=1", -"lh.aparc.annot": "https://www.dropbox.com/s/31voof5ijgyz9zw/lh.aparc.annot?dl=1", -"rh.aparc.annot": "https://www.dropbox.com/s/a06wl7kw1orwiia/rh.aparc.annot?dl=1", -"brain_config.txt": "https://www.dropbox.com/s/o0ljy16mpz7mmxu/brain_config.txt?dl=1", -"cbar_config.zip": "https://www.dropbox.com/s/5o1ph08rmpft200/cbar_config.zip?dl=1", -"GG-853-GM-0.7mm.nii.gz": "https://www.dropbox.com/s/escinae2yprcq52/GG-853-GM-0.7mm.nii.gz?dl=1", -"GG-853-WM-0.7mm.nii.gz": "https://www.dropbox.com/s/26l6ky0bj981yux/GG-853-WM-0.7mm.nii.gz?dl=1", -"Custom.npz": "https://www.dropbox.com/s/twrn2e7hjwsea6b/Custom.npz?dl=1", -"inflated.npz": "https://www.dropbox.com/s/nl2hh0thoy7xbnd/inflated.npz?dl=1", -"sphere.npz": "https://www.dropbox.com/s/os90qcr5rwigqmh/sphere.npz?dl=1", -"white.npz": "https://www.dropbox.com/s/avpq84gq06d5mbc/white.npz?dl=1", -"sulcus.npy": "https://www.dropbox.com/s/jfihlb7pna7ws2i/sulcus.npy?dl=1", -"sleep_rec.zip": "https://www.dropbox.com/s/oer3eh8s6i8wcf5/sleep_rec.zip?dl=1", -"sleep_edf.zip": "https://www.dropbox.com/s/bj1ra95rbksukro/sleep_edf.zip?dl=1", -"sleep_brainvision.zip": "https://www.dropbox.com/s/t2bo9ufvc3f8mbj/sleep_brainvision.zip?dl=1", -"sleep_elan.zip": "https://www.dropbox.com/s/95xvdqivpgk90hg/sleep_elan.zip?dl=1", -"sleep_matlab.zip": "https://www.dropbox.com/s/bmfc2u55xsejbaf/sleep_matlab.zip?dl=1", -"topoplot_data.npz": "https://www.dropbox.com/s/m76y3p0fyj6lxht/topoplot_data.npz?dl=1", -"MIST_ROI.zip": "https://www.dropbox.com/s/3v24xqrc715e2w5/MIST_ROI.zip?dl=1" -} diff --git a/visbrain/data/icons/brain_icon.svg b/visbrain/data/icons/brain_icon.svg deleted file mode 100644 index 7a6ba32f8..000000000 --- a/visbrain/data/icons/brain_icon.svg +++ /dev/null @@ -1,148 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - diff --git a/visbrain/data/icons/colorbar_icon.svg b/visbrain/data/icons/colorbar_icon.svg deleted file mode 100644 index 924c89e05..000000000 --- a/visbrain/data/icons/colorbar_icon.svg +++ /dev/null @@ -1,3829 +0,0 @@ - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - diff --git a/visbrain/data/icons/figure_icon.svg b/visbrain/data/icons/figure_icon.svg deleted file mode 100644 index af0c0c7ba..000000000 --- a/visbrain/data/icons/figure_icon.svg +++ /dev/null @@ -1,3425 +0,0 @@ - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/visbrain/data/icons/sleep_icon.svg b/visbrain/data/icons/sleep_icon.svg deleted file mode 100644 index 4b5251d84..000000000 --- a/visbrain/data/icons/sleep_icon.svg +++ /dev/null @@ -1,225 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - diff --git a/visbrain/data/icons/topo_icon.svg b/visbrain/data/icons/topo_icon.svg deleted file mode 100644 index 762772056..000000000 --- a/visbrain/data/icons/topo_icon.svg +++ /dev/null @@ -1,8226 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - diff --git a/visbrain/data/roi/aal.npz b/visbrain/data/roi/aal.npz deleted file mode 100644 index 8aa8f6a3e..000000000 Binary files a/visbrain/data/roi/aal.npz and /dev/null differ diff --git a/visbrain/data/roi/brodmann.npz b/visbrain/data/roi/brodmann.npz deleted file mode 100644 index 381eac5b9..000000000 Binary files a/visbrain/data/roi/brodmann.npz and /dev/null differ diff --git a/visbrain/data/roi/talairach.npz b/visbrain/data/roi/talairach.npz deleted file mode 100644 index 581c5f0ef..000000000 Binary files a/visbrain/data/roi/talairach.npz and /dev/null differ diff --git a/visbrain/data/templates/B1.npz b/visbrain/data/templates/B1.npz deleted file mode 100644 index 4bcd4b4bb..000000000 Binary files a/visbrain/data/templates/B1.npz and /dev/null differ diff --git a/visbrain/data/templates/B2.npz b/visbrain/data/templates/B2.npz deleted file mode 100644 index 2ca56bfc9..000000000 Binary files a/visbrain/data/templates/B2.npz and /dev/null differ diff --git a/visbrain/data/templates/B3.npz b/visbrain/data/templates/B3.npz deleted file mode 100644 index 676482501..000000000 Binary files a/visbrain/data/templates/B3.npz and /dev/null differ diff --git a/visbrain/data/topo/eegref.npz b/visbrain/data/topo/eegref.npz deleted file mode 100644 index 2713feffa..000000000 Binary files a/visbrain/data/topo/eegref.npz and /dev/null differ diff --git a/visbrain/data_url.json b/visbrain/data_url.json new file mode 100644 index 000000000..c3f9a7a1a --- /dev/null +++ b/visbrain/data_url.json @@ -0,0 +1,57 @@ +{ + "templates": { + "B1.npz": "https://www.dropbox.com/s/7oisd56fm9fkwe1/B1.npz?dl=1", + "B2.npz": "https://www.dropbox.com/s/k6qev2k7ac8fe60/B2.npz?dl=1", + "B3.npz": "https://www.dropbox.com/s/t3huxif2xplned7/B3.npz?dl=1", + "inflated.npz": "https://www.dropbox.com/s/nl2hh0thoy7xbnd/inflated.npz?dl=1", + "sphere.npz": "https://www.dropbox.com/s/os90qcr5rwigqmh/sphere.npz?dl=1", + "white.npz": "https://www.dropbox.com/s/avpq84gq06d5mbc/white.npz?dl=1", + "sulcus.npy": "https://www.dropbox.com/s/jfihlb7pna7ws2i/sulcus.npy?dl=1" + }, + "roi": { + "aal.npz": "https://www.dropbox.com/s/feqaoj58va5anbr/aal.npz?dl=1", + "brodmann.npz": "https://www.dropbox.com/s/qg2w9b191r7f4ox/brodmann.npz?dl=1", + "talairach.npz": "https://www.dropbox.com/s/g9q9al7t481x180/talairach.npz?dl=1", + "mist.zip": "https://www.dropbox.com/s/aefmw90uvx09gqw/mist.zip?dl=1" + }, + "topo": { + "eegref.npz": "https://www.dropbox.com/s/doe3fnxbct5rsss/eegref.npz?dl=1" + }, + "icons": { + "brain_icon.svg": "https://www.dropbox.com/s/47eb6a1jlgzk9vc/brain_icon.svg?dl=1", + "colorbar_icon.svg": "https://www.dropbox.com/s/botcrkxkor1gtzh/colorbar_icon.svg?dl=1", + "figure_icon.svg": "https://www.dropbox.com/s/dpjtsenf9n9mvxt/figure_icon.svg?dl=1", + "sleep_icon.svg": "https://www.dropbox.com/s/bj0a88krqtq5q00/sleep_icon.svg?dl=1", + "topo_icon.svg": "https://www.dropbox.com/s/yvmsuerie5bn075/topo_icon.svg?dl=1" + }, + "example_data": { + "xyz_sample.npz": "https://www.dropbox.com/s/whogfxutyxoir1t/xyz_sample.npz?dl=1", + "phase_sync_delta.npz": "https://www.dropbox.com/s/08xaq1lkj7rnuf6/phase_sync_delta.npz?dl=1", + "thalamus.txt": "https://www.dropbox.com/s/sw7vkplwd9zbns1/thalamus.txt?dl=1", + "Px.npy": "https://www.dropbox.com/s/qzwjavhz5lg13km/Px.npy?dl=1", + "meg_source_estimate-lh.stc": "https://www.dropbox.com/s/q6cw5gf3edjqqcc/meg_source_estimate-lh.stc?dl=1", + "meg_source_estimate-rh.stc": "https://www.dropbox.com/s/uyr6291q2z87uvo/meg_source_estimate-rh.stc?dl=1", + "lh.sig.nii.gz": "https://www.dropbox.com/s/soq2l4pqxerhzyo/lh.sig.nii.gz?dl=1", + "lh.alt_sig.nii.gz": "https://www.dropbox.com/s/q7w4mw8se4vknlp/lh.alt_sig.nii.gz?dl=1", + "figure.zip": "https://www.dropbox.com/s/jsjct54ynvdjzfq/figure.zip?dl=1", + "lh.aparc.a2009s.annot": "https://www.dropbox.com/s/zdtgwqal7m9nr8g/lh.aparc.a2009s.annot?dl=1", + "rh.aparc.a2009s.annot": "https://www.dropbox.com/s/zt1o27zgna6id05/rh.aparc.a2009s.annot?dl=1", + "lh.PALS_B12_Brodmann.annot": "https://www.dropbox.com/s/zn3zu9hnym9yny2/lh.PALS_B12_Brodmann.annot?dl=1", + "rh.PALS_B12_Brodmann.annot": "https://www.dropbox.com/s/8ddj6pscs6maau3/rh.PALS_B12_Brodmann.annot?dl=1", + "lh.aparc.annot": "https://www.dropbox.com/s/31voof5ijgyz9zw/lh.aparc.annot?dl=1", + "rh.aparc.annot": "https://www.dropbox.com/s/a06wl7kw1orwiia/rh.aparc.annot?dl=1", + "brain_config.txt": "https://www.dropbox.com/s/o0ljy16mpz7mmxu/brain_config.txt?dl=1", + "cbar_config.zip": "https://www.dropbox.com/s/5o1ph08rmpft200/cbar_config.zip?dl=1", + "GG-853-GM-0.7mm.nii.gz": "https://www.dropbox.com/s/escinae2yprcq52/GG-853-GM-0.7mm.nii.gz?dl=1", + "GG-853-WM-0.7mm.nii.gz": "https://www.dropbox.com/s/26l6ky0bj981yux/GG-853-WM-0.7mm.nii.gz?dl=1", + "Custom.npz": "https://www.dropbox.com/s/twrn2e7hjwsea6b/Custom.npz?dl=1", + "sleep_rec.zip": "https://www.dropbox.com/s/oer3eh8s6i8wcf5/sleep_rec.zip?dl=1", + "sleep_edf.zip": "https://www.dropbox.com/s/bj1ra95rbksukro/sleep_edf.zip?dl=1", + "sleep_brainvision.zip": "https://www.dropbox.com/s/t2bo9ufvc3f8mbj/sleep_brainvision.zip?dl=1", + "sleep_elan.zip": "https://www.dropbox.com/s/95xvdqivpgk90hg/sleep_elan.zip?dl=1", + "sleep_matlab.zip": "https://www.dropbox.com/s/bmfc2u55xsejbaf/sleep_matlab.zip?dl=1", + "topoplot_data.npz": "https://www.dropbox.com/s/m76y3p0fyj6lxht/topoplot_data.npz?dl=1", + "MIST_ROI.zip": "https://www.dropbox.com/s/3v24xqrc715e2w5/MIST_ROI.zip?dl=1", + "s101_jbe.hyp": "https://www.dropbox.com/s/69c5cke4tuehyqb/s101_jbe.hyp?dl=1" + } +} diff --git a/visbrain/gui/__init__.py b/visbrain/gui/__init__.py new file mode 100644 index 000000000..d55d7741c --- /dev/null +++ b/visbrain/gui/__init__.py @@ -0,0 +1,4 @@ +from .brain import Brain # noqa +from .figure import Figure # noqa +from .signal import Signal # noqa +from .sleep import Sleep # noqa diff --git a/visbrain/brain/__init__.py b/visbrain/gui/brain/__init__.py similarity index 100% rename from visbrain/brain/__init__.py rename to visbrain/gui/brain/__init__.py diff --git a/visbrain/brain/brain.py b/visbrain/gui/brain/brain.py similarity index 95% rename from visbrain/brain/brain.py rename to visbrain/gui/brain/brain.py index 559ef984e..06f961c63 100644 --- a/visbrain/brain/brain.py +++ b/visbrain/gui/brain/brain.py @@ -14,13 +14,13 @@ from .visuals import Visuals from .cbar import BrainCbar from .user import BrainUserMethods -from ..pyqt_module import PyQtModule -from ..config import PROFILER +from visbrain._pyqt_module import _PyQtModule +from visbrain.config import PROFILER logger = logging.getLogger('visbrain') -class Brain(PyQtModule, UiInit, UiElements, Visuals, BrainCbar, +class Brain(_PyQtModule, UiInit, UiElements, Visuals, BrainCbar, BrainUserMethods): """Visualization of brain-data on a standard MNI brain. @@ -91,8 +91,8 @@ class Brain(PyQtModule, UiInit, UiElements, Visuals, BrainCbar, def __init__(self, bgcolor='black', verbose=None, **kwargs): """Init.""" # ====================== PyQt creation ====================== - PyQtModule.__init__(self, verbose=verbose, to_describe='view.wc', - icon='brain_icon.svg') + _PyQtModule.__init__(self, verbose=verbose, to_describe='view.wc', + icon='brain_icon.svg') self._userobj = {} self._gl_scale = 100. # fix appearance for small meshes self._camera = viscam.TurntableCamera(name='MainBrainCamera') @@ -125,6 +125,7 @@ def __init__(self, bgcolor='black', verbose=None, **kwargs): camera = viscam.PanZoomCamera(rect=(-.2, -2.5, 1, 5)) BrainCbar.__init__(self, camera) PROFILER("Colorbar and panzoom creation") + self.background_color(bgcolor) # ====================== Shortcuts ====================== BrainShortcuts.__init__(self, self.cbqt.cbviz._canvas) diff --git a/visbrain/brain/cbar.py b/visbrain/gui/brain/cbar.py similarity index 90% rename from visbrain/brain/cbar.py rename to visbrain/gui/brain/cbar.py index 9dcf3bb67..287c7351a 100644 --- a/visbrain/brain/cbar.py +++ b/visbrain/gui/brain/cbar.py @@ -1,7 +1,7 @@ """Colorbar management for the Brain module.""" import logging -from ..visuals import CbarQt, CbarObjetcs, CbarBase +from visbrain.visuals import CbarQt, CbarObjetcs, CbarBase logger = logging.getLogger('visbrain') @@ -64,6 +64,7 @@ def __init__(self, camera): self.cbqt.select(0) self.cbqt._fcn_change_object() self.menuDispCbar.setEnabled(is_cbqt) + self.cbqt.setEnabled('roi', hasattr(self.roi, 'mesh')) # Add the camera to the colorbar : self.cbqt.add_camera(camera) @@ -86,8 +87,10 @@ def _fcn_link_brain(self): def _fcn_minmax_brain(self): """Executed function for autoscale projections.""" - self.cbqt.cbobjs._objs['brain']._clim = self.atlas._minmax - self.atlas._clim = self.atlas._minmax + self.atlas._update_cbar_minmax() + self.cbqt.cbobjs._objs['brain']['clim'] = self.atlas._clim + kwargs = self.cbqt.cbobjs._objs['brain'].to_kwargs(True) + self.atlas.update_from_dict(kwargs) self.atlas._update_cbar() ########################################################################### @@ -97,13 +100,15 @@ def _fcn_link_roi(self): """Executed function when projection need updates.""" kwargs = self.cbqt.cbobjs._objs['roi'].to_kwargs(True) self.roi.update_from_dict(kwargs) - # self.roi._update_cbar() + self.roi._update_cbar() def _fcn_minmax_roi(self): """Executed function for autoscale projections.""" - self.cbqt.cbobjs._objs['roi']._clim = self.roi._minmax - self.roi._clim = self.roi._minmax - # self.roi._update_cbar() + self.roi._update_cbar_minmax() + self.cbqt.cbobjs._objs['roi']['clim'] = self.roi._clim + kwargs = self.cbqt.cbobjs._objs['roi'].to_kwargs(True) + self.roi.update_from_dict(kwargs) + self.roi._update_cbar() ########################################################################### # CONNECTIVITY diff --git a/visbrain/brain/interface/__init__.py b/visbrain/gui/brain/interface/__init__.py similarity index 100% rename from visbrain/brain/interface/__init__.py rename to visbrain/gui/brain/interface/__init__.py diff --git a/visbrain/brain/interface/gui/__init__.py b/visbrain/gui/brain/interface/gui/__init__.py similarity index 100% rename from visbrain/brain/interface/gui/__init__.py rename to visbrain/gui/brain/interface/gui/__init__.py diff --git a/visbrain/brain/interface/gui/brain_gui.py b/visbrain/gui/brain/interface/gui/brain_gui.py similarity index 89% rename from visbrain/brain/interface/gui/brain_gui.py rename to visbrain/gui/brain/interface/gui/brain_gui.py index d53e3d365..3eee94420 100644 --- a/visbrain/brain/interface/gui/brain_gui.py +++ b/visbrain/gui/brain/interface/gui/brain_gui.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- -# Form implementation generated from reading ui file '/home/etienne/Toolbox/visbrain/visbrain/brain/interface/gui/brain_gui.ui' +# Form implementation generated from reading ui file '/home/etienne/Toolbox/visbrain/visbrain/gui/brain/interface/gui/brain_gui.ui' # -# Created by: PyQt5 UI code generator 5.9 +# Created by: PyQt5 UI code generator 5.11.2 # # WARNING! All changes made in this file will be lost! @@ -111,7 +111,6 @@ def setupUi(self, MainWindow): self._brain_page = QtWidgets.QWidget() self._brain_page.setObjectName("_brain_page") self.verticalLayout_3 = QtWidgets.QVBoxLayout(self._brain_page) - self.verticalLayout_3.setContentsMargins(0, 0, 0, 0) self.verticalLayout_3.setObjectName("verticalLayout_3") self._brain_grp = QtWidgets.QGroupBox(self._brain_page) self._brain_grp.setCheckable(True) @@ -127,6 +126,90 @@ def setupUi(self, MainWindow): self.verticalLayout_49.addWidget(self.line_43) self.gridLayout_5 = QtWidgets.QGridLayout() self.gridLayout_5.setObjectName("gridLayout_5") + self.gridLayout_2 = QtWidgets.QGridLayout() + self.gridLayout_2.setObjectName("gridLayout_2") + self.label_59 = QtWidgets.QLabel(self._brain_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_59.setFont(font) + self.label_59.setObjectName("label_59") + self.gridLayout_2.addWidget(self.label_59, 5, 0, 1, 1) + self._brain_ymin = QtWidgets.QSlider(self._brain_grp) + self._brain_ymin.setMaximum(10) + self._brain_ymin.setSliderPosition(10) + self._brain_ymin.setOrientation(QtCore.Qt.Horizontal) + self._brain_ymin.setInvertedAppearance(False) + self._brain_ymin.setObjectName("_brain_ymin") + self.gridLayout_2.addWidget(self._brain_ymin, 2, 1, 1, 1) + self.label_36 = QtWidgets.QLabel(self._brain_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_36.setFont(font) + self.label_36.setObjectName("label_36") + self.gridLayout_2.addWidget(self.label_36, 2, 0, 1, 1) + self.label_35 = QtWidgets.QLabel(self._brain_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_35.setFont(font) + self.label_35.setObjectName("label_35") + self.gridLayout_2.addWidget(self.label_35, 1, 0, 1, 1) + self.label_56 = QtWidgets.QLabel(self._brain_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_56.setFont(font) + self.label_56.setObjectName("label_56") + self.gridLayout_2.addWidget(self.label_56, 4, 0, 1, 1) + self._brain_xmax = QtWidgets.QSlider(self._brain_grp) + self._brain_xmax.setMaximum(10) + self._brain_xmax.setSliderPosition(10) + self._brain_xmax.setOrientation(QtCore.Qt.Horizontal) + self._brain_xmax.setInvertedAppearance(False) + self._brain_xmax.setInvertedControls(True) + self._brain_xmax.setObjectName("_brain_xmax") + self.gridLayout_2.addWidget(self._brain_xmax, 1, 1, 1, 1) + self._brain_xmin = QtWidgets.QSlider(self._brain_grp) + self._brain_xmin.setMaximum(10) + self._brain_xmin.setSliderPosition(10) + self._brain_xmin.setOrientation(QtCore.Qt.Horizontal) + self._brain_xmin.setInvertedAppearance(False) + self._brain_xmin.setInvertedControls(True) + self._brain_xmin.setObjectName("_brain_xmin") + self.gridLayout_2.addWidget(self._brain_xmin, 0, 1, 1, 1) + self.label_34 = QtWidgets.QLabel(self._brain_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_34.setFont(font) + self.label_34.setObjectName("label_34") + self.gridLayout_2.addWidget(self.label_34, 0, 0, 1, 1) + self.label_40 = QtWidgets.QLabel(self._brain_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_40.setFont(font) + self.label_40.setObjectName("label_40") + self.gridLayout_2.addWidget(self.label_40, 3, 0, 1, 1) + self._brain_ymax = QtWidgets.QSlider(self._brain_grp) + self._brain_ymax.setMaximum(10) + self._brain_ymax.setSliderPosition(10) + self._brain_ymax.setOrientation(QtCore.Qt.Horizontal) + self._brain_ymax.setInvertedControls(True) + self._brain_ymax.setObjectName("_brain_ymax") + self.gridLayout_2.addWidget(self._brain_ymax, 3, 1, 1, 1) + self._brain_zmin = QtWidgets.QSlider(self._brain_grp) + self._brain_zmin.setMaximum(10) + self._brain_zmin.setSliderPosition(10) + self._brain_zmin.setOrientation(QtCore.Qt.Horizontal) + self._brain_zmin.setInvertedAppearance(False) + self._brain_zmin.setObjectName("_brain_zmin") + self.gridLayout_2.addWidget(self._brain_zmin, 4, 1, 1, 1) + self._brain_zmax = QtWidgets.QSlider(self._brain_grp) + self._brain_zmax.setMaximum(10) + self._brain_zmax.setSliderPosition(10) + self._brain_zmax.setOrientation(QtCore.Qt.Horizontal) + self._brain_zmax.setInvertedAppearance(False) + self._brain_zmax.setInvertedControls(True) + self._brain_zmax.setObjectName("_brain_zmax") + self.gridLayout_2.addWidget(self._brain_zmax, 5, 1, 1, 1) + self.gridLayout_5.addLayout(self.gridLayout_2, 3, 2, 1, 1) self.label_19 = QtWidgets.QLabel(self._brain_grp) font = QtGui.QFont() font.setItalic(True) @@ -161,7 +244,7 @@ def setupUi(self, MainWindow): self._brain_hemi.addItem("") self.gridLayout_5.addWidget(self._brain_hemi, 1, 2, 1, 1) spacerItem1 = QtWidgets.QSpacerItem(10, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) - self.gridLayout_5.addItem(spacerItem1, 3, 2, 1, 1) + self.gridLayout_5.addItem(spacerItem1, 5, 2, 1, 1) self.line_55 = QtWidgets.QFrame(self._brain_grp) self.line_55.setMinimumSize(QtCore.QSize(20, 0)) self.line_55.setFrameShape(QtWidgets.QFrame.VLine) @@ -181,6 +264,33 @@ def setupUi(self, MainWindow): self._brain_alpha.setOrientation(QtCore.Qt.Horizontal) self._brain_alpha.setObjectName("_brain_alpha") self.gridLayout_5.addWidget(self._brain_alpha, 0, 2, 1, 1) + self.line_30 = QtWidgets.QFrame(self._brain_grp) + self.line_30.setMinimumSize(QtCore.QSize(20, 0)) + self.line_30.setFrameShape(QtWidgets.QFrame.VLine) + self.line_30.setFrameShadow(QtWidgets.QFrame.Sunken) + self.line_30.setObjectName("line_30") + self.gridLayout_5.addWidget(self.line_30, 3, 1, 1, 1) + self.label_33 = QtWidgets.QLabel(self._brain_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_33.setFont(font) + self.label_33.setObjectName("label_33") + self.gridLayout_5.addWidget(self.label_33, 3, 0, 1, 1) + self.label_62 = QtWidgets.QLabel(self._brain_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_62.setFont(font) + self.label_62.setObjectName("label_62") + self.gridLayout_5.addWidget(self.label_62, 4, 0, 1, 1) + self.line_31 = QtWidgets.QFrame(self._brain_grp) + self.line_31.setMinimumSize(QtCore.QSize(20, 0)) + self.line_31.setFrameShape(QtWidgets.QFrame.VLine) + self.line_31.setFrameShadow(QtWidgets.QFrame.Sunken) + self.line_31.setObjectName("line_31") + self.gridLayout_5.addWidget(self.line_31, 4, 1, 1, 1) + self._brain_inlight = QtWidgets.QCheckBox(self._brain_grp) + self._brain_inlight.setObjectName("_brain_inlight") + self.gridLayout_5.addWidget(self._brain_inlight, 4, 2, 1, 1) self.verticalLayout_49.addLayout(self.gridLayout_5) spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout_49.addItem(spacerItem2) @@ -189,7 +299,6 @@ def setupUi(self, MainWindow): self._roi_page = QtWidgets.QWidget() self._roi_page.setObjectName("_roi_page") self.verticalLayout_11 = QtWidgets.QVBoxLayout(self._roi_page) - self.verticalLayout_11.setContentsMargins(0, 0, 0, 0) self.verticalLayout_11.setObjectName("verticalLayout_11") self._roi_grp = QtWidgets.QGroupBox(self._roi_page) self._roi_grp.setEnabled(True) @@ -208,11 +317,18 @@ def setupUi(self, MainWindow): self.gridLayout_39 = QtWidgets.QGridLayout() self.gridLayout_39.setContentsMargins(-1, 0, -1, -1) self.gridLayout_39.setObjectName("gridLayout_39") + self.label_114 = QtWidgets.QLabel(self._roi_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_114.setFont(font) + self.label_114.setObjectName("label_114") + self.gridLayout_39.addWidget(self.label_114, 2, 0, 1, 1) self._roiSmooth = QtWidgets.QSpinBox(self._roi_grp) self._roiSmooth.setMinimum(3) self._roiSmooth.setSingleStep(2) + self._roiSmooth.setProperty("value", 3) self._roiSmooth.setObjectName("_roiSmooth") - self.gridLayout_39.addWidget(self._roiSmooth, 2, 2, 1, 1) + self.gridLayout_39.addWidget(self._roiSmooth, 3, 2, 1, 1) self.label_113 = QtWidgets.QLabel(self._roi_grp) font = QtGui.QFont() font.setItalic(True) @@ -230,15 +346,9 @@ def setupUi(self, MainWindow): self.line_4.setFrameShape(QtWidgets.QFrame.VLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName("line_4") - self.gridLayout_39.addWidget(self.line_4, 2, 1, 1, 1) + self.gridLayout_39.addWidget(self.line_4, 3, 1, 1, 1) spacerItem3 = QtWidgets.QSpacerItem(40, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) - self.gridLayout_39.addItem(spacerItem3, 3, 2, 1, 1) - self.label_2 = QtWidgets.QLabel(self._roi_grp) - font = QtGui.QFont() - font.setItalic(True) - self.label_2.setFont(font) - self.label_2.setObjectName("label_2") - self.gridLayout_39.addWidget(self.label_2, 2, 0, 1, 1) + self.gridLayout_39.addItem(spacerItem3, 5, 2, 1, 1) self._roiTransp = QtWidgets.QCheckBox(self._roi_grp) self._roiTransp.setEnabled(False) self._roiTransp.setObjectName("_roiTransp") @@ -246,6 +356,29 @@ def setupUi(self, MainWindow): self._roiDiv = QtWidgets.QComboBox(self._roi_grp) self._roiDiv.setObjectName("_roiDiv") self.gridLayout_39.addWidget(self._roiDiv, 1, 2, 1, 1) + self.line_7 = QtWidgets.QFrame(self._roi_grp) + self.line_7.setMinimumSize(QtCore.QSize(10, 0)) + self.line_7.setFrameShape(QtWidgets.QFrame.VLine) + self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken) + self.line_7.setObjectName("line_7") + self.gridLayout_39.addWidget(self.line_7, 2, 1, 1, 1) + self._roiLevel = QtWidgets.QComboBox(self._roi_grp) + self._roiLevel.setEnabled(False) + self._roiLevel.setObjectName("_roiLevel") + self._roiLevel.addItem("") + self._roiLevel.addItem("") + self._roiLevel.addItem("") + self._roiLevel.addItem("") + self._roiLevel.addItem("") + self._roiLevel.addItem("") + self._roiLevel.addItem("") + self.gridLayout_39.addWidget(self._roiLevel, 2, 2, 1, 1) + self._roiIsSmooth = QtWidgets.QCheckBox(self._roi_grp) + self._roiIsSmooth.setObjectName("_roiIsSmooth") + self.gridLayout_39.addWidget(self._roiIsSmooth, 3, 0, 1, 1) + self._roiUniColor = QtWidgets.QCheckBox(self._roi_grp) + self._roiUniColor.setObjectName("_roiUniColor") + self.gridLayout_39.addWidget(self._roiUniColor, 4, 0, 1, 3) self.verticalLayout_23.addLayout(self.gridLayout_39) self.gridLayout_20 = QtWidgets.QGridLayout() self.gridLayout_20.setContentsMargins(-1, 0, -1, -1) @@ -265,13 +398,14 @@ def setupUi(self, MainWindow): self.horizontalLayout_13.addWidget(self._roiButRst) spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_13.addItem(spacerItem6) - self.gridLayout_20.addLayout(self.horizontalLayout_13, 1, 0, 1, 1) + self.gridLayout_20.addLayout(self.horizontalLayout_13, 2, 0, 1, 1) spacerItem7 = QtWidgets.QSpacerItem(0, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_20.addItem(spacerItem7, 0, 1, 1, 1) - self._roiToAdd = QtWidgets.QTableWidget(self._roi_grp) + self._roiFilter = QtWidgets.QLineEdit(self._roi_grp) + self._roiFilter.setObjectName("_roiFilter") + self.gridLayout_20.addWidget(self._roiFilter, 1, 0, 1, 1) + self._roiToAdd = QtWidgets.QTableView(self._roi_grp) self._roiToAdd.setObjectName("_roiToAdd") - self._roiToAdd.setColumnCount(0) - self._roiToAdd.setRowCount(0) self.gridLayout_20.addWidget(self._roiToAdd, 0, 0, 1, 1) self.verticalLayout_23.addLayout(self.gridLayout_20) self.verticalLayout_11.addWidget(self._roi_grp) @@ -279,7 +413,6 @@ def setupUi(self, MainWindow): self._volume_page = QtWidgets.QWidget() self._volume_page.setObjectName("_volume_page") self.verticalLayout_14 = QtWidgets.QVBoxLayout(self._volume_page) - self.verticalLayout_14.setContentsMargins(0, 0, 0, 0) self.verticalLayout_14.setObjectName("verticalLayout_14") self._vol_grp = QtWidgets.QGroupBox(self._volume_page) self._vol_grp.setCheckable(True) @@ -374,7 +507,6 @@ def setupUi(self, MainWindow): self._crossec_page = QtWidgets.QWidget() self._crossec_page.setObjectName("_crossec_page") self.verticalLayout_15 = QtWidgets.QVBoxLayout(self._crossec_page) - self.verticalLayout_15.setContentsMargins(0, 0, 0, 0) self.verticalLayout_15.setObjectName("verticalLayout_15") self._sec_grp = QtWidgets.QGroupBox(self._crossec_page) self._sec_grp.setCheckable(True) @@ -391,18 +523,35 @@ def setupUi(self, MainWindow): self.verticalLayout_19.addWidget(self.line_47) self.gridLayout_10 = QtWidgets.QGridLayout() self.gridLayout_10.setObjectName("gridLayout_10") + self.line_75 = QtWidgets.QFrame(self._sec_grp) + self.line_75.setMinimumSize(QtCore.QSize(20, 0)) + self.line_75.setFrameShape(QtWidgets.QFrame.VLine) + self.line_75.setFrameShadow(QtWidgets.QFrame.Sunken) + self.line_75.setObjectName("line_75") + self.gridLayout_10.addWidget(self.line_75, 1, 1, 1, 1) + self._csAxial = QtWidgets.QSlider(self._sec_grp) + self._csAxial.setMinimum(-5) + self._csAxial.setOrientation(QtCore.Qt.Horizontal) + self._csAxial.setObjectName("_csAxial") + self.gridLayout_10.addWidget(self._csAxial, 5, 2, 1, 1) self.label_46 = QtWidgets.QLabel(self._sec_grp) font = QtGui.QFont() font.setItalic(True) self.label_46.setFont(font) self.label_46.setObjectName("label_46") - self.gridLayout_10.addWidget(self.label_46, 2, 0, 1, 1) + self.gridLayout_10.addWidget(self.label_46, 4, 0, 1, 1) self.line_71 = QtWidgets.QFrame(self._sec_grp) self.line_71.setMinimumSize(QtCore.QSize(20, 0)) self.line_71.setFrameShape(QtWidgets.QFrame.VLine) self.line_71.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_71.setObjectName("line_71") - self.gridLayout_10.addWidget(self.line_71, 3, 1, 1, 1) + self.gridLayout_10.addWidget(self.line_71, 5, 1, 1, 1) + self.label_50 = QtWidgets.QLabel(self._sec_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_50.setFont(font) + self.label_50.setObjectName("label_50") + self.gridLayout_10.addWidget(self.label_50, 1, 0, 1, 1) self.line_73 = QtWidgets.QFrame(self._sec_grp) self.line_73.setMinimumSize(QtCore.QSize(20, 0)) self.line_73.setFrameShape(QtWidgets.QFrame.VLine) @@ -413,13 +562,13 @@ def setupUi(self, MainWindow): self._csCoron.setMinimum(-5) self._csCoron.setOrientation(QtCore.Qt.Horizontal) self._csCoron.setObjectName("_csCoron") - self.gridLayout_10.addWidget(self._csCoron, 2, 2, 1, 1) + self.gridLayout_10.addWidget(self._csCoron, 4, 2, 1, 1) self.label_47 = QtWidgets.QLabel(self._sec_grp) font = QtGui.QFont() font.setItalic(True) self.label_47.setFont(font) self.label_47.setObjectName("label_47") - self.gridLayout_10.addWidget(self.label_47, 3, 0, 1, 1) + self.gridLayout_10.addWidget(self.label_47, 5, 0, 1, 1) self._csDiv = QtWidgets.QComboBox(self._sec_grp) self._csDiv.setObjectName("_csDiv") self.gridLayout_10.addWidget(self._csDiv, 0, 2, 1, 1) @@ -428,46 +577,26 @@ def setupUi(self, MainWindow): self.line_69.setFrameShape(QtWidgets.QFrame.VLine) self.line_69.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_69.setObjectName("line_69") - self.gridLayout_10.addWidget(self.line_69, 2, 1, 1, 1) + self.gridLayout_10.addWidget(self.line_69, 4, 1, 1, 1) self.line_48 = QtWidgets.QFrame(self._sec_grp) self.line_48.setMinimumSize(QtCore.QSize(20, 0)) self.line_48.setFrameShape(QtWidgets.QFrame.VLine) self.line_48.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_48.setObjectName("line_48") - self.gridLayout_10.addWidget(self.line_48, 1, 1, 1, 1) + self.gridLayout_10.addWidget(self.line_48, 3, 1, 1, 1) self._csSagit = QtWidgets.QSlider(self._sec_grp) self._csSagit.setMinimum(-5) self._csSagit.setOrientation(QtCore.Qt.Horizontal) self._csSagit.setInvertedAppearance(False) self._csSagit.setInvertedControls(False) self._csSagit.setObjectName("_csSagit") - self.gridLayout_10.addWidget(self._csSagit, 1, 2, 1, 1) - self.line_87 = QtWidgets.QFrame(self._sec_grp) - self.line_87.setMinimumSize(QtCore.QSize(20, 0)) - self.line_87.setFrameShape(QtWidgets.QFrame.VLine) - self.line_87.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_87.setObjectName("line_87") - self.gridLayout_10.addWidget(self.line_87, 4, 1, 1, 1) - self._csCmap = QtWidgets.QComboBox(self._sec_grp) - self._csCmap.setObjectName("_csCmap") - self.gridLayout_10.addWidget(self._csCmap, 4, 2, 1, 1) - self._csAxial = QtWidgets.QSlider(self._sec_grp) - self._csAxial.setMinimum(-5) - self._csAxial.setOrientation(QtCore.Qt.Horizontal) - self._csAxial.setObjectName("_csAxial") - self.gridLayout_10.addWidget(self._csAxial, 3, 2, 1, 1) - self.label_59 = QtWidgets.QLabel(self._sec_grp) - font = QtGui.QFont() - font.setItalic(True) - self.label_59.setFont(font) - self.label_59.setObjectName("label_59") - self.gridLayout_10.addWidget(self.label_59, 4, 0, 1, 1) + self.gridLayout_10.addWidget(self._csSagit, 3, 2, 1, 1) self.label_44 = QtWidgets.QLabel(self._sec_grp) font = QtGui.QFont() font.setItalic(True) self.label_44.setFont(font) self.label_44.setObjectName("label_44") - self.gridLayout_10.addWidget(self.label_44, 1, 0, 1, 1) + self.gridLayout_10.addWidget(self.label_44, 3, 0, 1, 1) self.label_49 = QtWidgets.QLabel(self._sec_grp) font = QtGui.QFont() font.setItalic(True) @@ -479,7 +608,41 @@ def setupUi(self, MainWindow): self.horizontalLayout_15 = QtWidgets.QHBoxLayout(self.widget_5) self.horizontalLayout_15.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_15.setObjectName("horizontalLayout_15") - self.gridLayout_10.addWidget(self.widget_5, 5, 0, 1, 3) + self.gridLayout_10.addWidget(self.widget_5, 6, 0, 1, 3) + self.line_80 = QtWidgets.QFrame(self._sec_grp) + self.line_80.setMinimumSize(QtCore.QSize(20, 0)) + self.line_80.setFrameShape(QtWidgets.QFrame.VLine) + self.line_80.setFrameShadow(QtWidgets.QFrame.Sunken) + self.line_80.setObjectName("line_80") + self.gridLayout_10.addWidget(self.line_80, 2, 1, 1, 1) + self.label_55 = QtWidgets.QLabel(self._sec_grp) + font = QtGui.QFont() + font.setItalic(True) + self.label_55.setFont(font) + self.label_55.setObjectName("label_55") + self.gridLayout_10.addWidget(self.label_55, 2, 0, 1, 1) + self._csLevel = QtWidgets.QComboBox(self._sec_grp) + self._csLevel.setEnabled(False) + self._csLevel.setObjectName("_csLevel") + self._csLevel.addItem("") + self._csLevel.addItem("") + self._csLevel.addItem("") + self._csLevel.addItem("") + self._csLevel.addItem("") + self._csLevel.addItem("") + self._csLevel.addItem("") + self.gridLayout_10.addWidget(self._csLevel, 1, 2, 1, 1) + self._csInterp = QtWidgets.QComboBox(self._sec_grp) + self._csInterp.setObjectName("_csInterp") + self._csInterp.addItem("") + self._csInterp.addItem("") + self._csInterp.addItem("") + self._csInterp.addItem("") + self._csInterp.addItem("") + self._csInterp.addItem("") + self._csInterp.addItem("") + self._csInterp.addItem("") + self.gridLayout_10.addWidget(self._csInterp, 2, 2, 1, 1) self.verticalLayout_19.addLayout(self.gridLayout_10) spacerItem10 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout_19.addItem(spacerItem10) @@ -488,7 +651,6 @@ def setupUi(self, MainWindow): self._sources_page = QtWidgets.QWidget() self._sources_page.setObjectName("_sources_page") self.verticalLayout_9 = QtWidgets.QVBoxLayout(self._sources_page) - self.verticalLayout_9.setContentsMargins(0, 0, 0, 0) self.verticalLayout_9.setObjectName("verticalLayout_9") self._source_tab = QtWidgets.QTabWidget(self._sources_page) self._source_tab.setObjectName("_source_tab") @@ -501,10 +663,9 @@ def setupUi(self, MainWindow): self.scrollArea_2.setWidgetResizable(True) self.scrollArea_2.setObjectName("scrollArea_2") self.scrollAreaWidgetContents_2 = QtWidgets.QWidget() - self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 316, 569)) + self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 312, 392)) self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2") self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_2) - self.verticalLayout_12.setContentsMargins(0, 0, 0, 0) self.verticalLayout_12.setObjectName("verticalLayout_12") self._s_grp = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_2) self._s_grp.setCheckable(True) @@ -922,7 +1083,6 @@ def setupUi(self, MainWindow): self._connect_page = QtWidgets.QWidget() self._connect_page.setObjectName("_connect_page") self.verticalLayout_20 = QtWidgets.QVBoxLayout(self._connect_page) - self.verticalLayout_20.setContentsMargins(0, 0, 0, 0) self.verticalLayout_20.setObjectName("verticalLayout_20") self._c_grp = QtWidgets.QGroupBox(self._connect_page) font = QtGui.QFont() @@ -966,7 +1126,6 @@ def setupUi(self, MainWindow): self.page_4 = QtWidgets.QWidget() self.page_4.setObjectName("page_4") self.horizontalLayout = QtWidgets.QHBoxLayout(self.page_4) - self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.label_38 = QtWidgets.QLabel(self.page_4) font = QtGui.QFont() @@ -984,7 +1143,6 @@ def setupUi(self, MainWindow): self.page_5 = QtWidgets.QWidget() self.page_5.setObjectName("page_5") self.gridLayout = QtWidgets.QGridLayout(self.page_5) - self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.label_23 = QtWidgets.QLabel(self.page_5) font = QtGui.QFont() @@ -1018,6 +1176,7 @@ def setupUi(self, MainWindow): self._c_colorby.setObjectName("_c_colorby") self._c_colorby.addItem("") self._c_colorby.addItem("") + self._c_colorby.addItem("") self.gridLayout_14.addWidget(self._c_colorby, 0, 1, 1, 1) self.label_37 = QtWidgets.QLabel(self._c_grp) font = QtGui.QFont() @@ -1070,7 +1229,6 @@ def setupUi(self, MainWindow): self._ts_page = QtWidgets.QWidget() self._ts_page.setObjectName("_ts_page") self.verticalLayout_21 = QtWidgets.QVBoxLayout(self._ts_page) - self.verticalLayout_21.setContentsMargins(0, 0, 0, 0) self.verticalLayout_21.setObjectName("verticalLayout_21") self._ts_grp = QtWidgets.QGroupBox(self._ts_page) self._ts_grp.setMinimumSize(QtCore.QSize(0, 0)) @@ -1221,7 +1379,6 @@ def setupUi(self, MainWindow): self._pic_page = QtWidgets.QWidget() self._pic_page.setObjectName("_pic_page") self.verticalLayout_28 = QtWidgets.QVBoxLayout(self._pic_page) - self.verticalLayout_28.setContentsMargins(0, 0, 0, 0) self.verticalLayout_28.setObjectName("verticalLayout_28") self._pic_grp = QtWidgets.QGroupBox(self._pic_page) self._pic_grp.setMinimumSize(QtCore.QSize(0, 0)) @@ -1340,7 +1497,6 @@ def setupUi(self, MainWindow): self._vec_page = QtWidgets.QWidget() self._vec_page.setObjectName("_vec_page") self.verticalLayout = QtWidgets.QVBoxLayout(self._vec_page) - self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self._vec_grp = QtWidgets.QGroupBox(self._vec_page) self._vec_grp.setMinimumSize(QtCore.QSize(0, 0)) @@ -1454,7 +1610,6 @@ def setupUi(self, MainWindow): self.UserMsgBox.setMinimumSize(QtCore.QSize(0, 0)) self.UserMsgBox.setObjectName("UserMsgBox") self.horizontalLayout_52 = QtWidgets.QHBoxLayout(self.UserMsgBox) - self.horizontalLayout_52.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_52.setObjectName("horizontalLayout_52") self.verticalLayout_2.addWidget(self.UserMsgBox) self.QuickSettings.addTab(self.tab, "") @@ -1489,7 +1644,6 @@ def setupUi(self, MainWindow): self._BrainPage = QtWidgets.QWidget() self._BrainPage.setObjectName("_BrainPage") self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self._BrainPage) - self.horizontalLayout_7.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.horizontalLayout_11 = QtWidgets.QHBoxLayout() self.horizontalLayout_11.setSpacing(0) @@ -1515,7 +1669,6 @@ def setupUi(self, MainWindow): self._crossecPage = QtWidgets.QWidget() self._crossecPage.setObjectName("_crossecPage") self.verticalLayout_26 = QtWidgets.QVBoxLayout(self._crossecPage) - self.verticalLayout_26.setContentsMargins(0, 0, 0, 0) self.verticalLayout_26.setObjectName("verticalLayout_26") self._axialLayout = QtWidgets.QHBoxLayout() self._axialLayout.setObjectName("_axialLayout") @@ -1720,7 +1873,7 @@ def setupUi(self, MainWindow): self.retranslateUi(MainWindow) self.QuickSettings.setCurrentIndex(0) - self._obj_stack.setCurrentIndex(4) + self._obj_stack.setCurrentIndex(0) self._brain_hemi.setCurrentIndex(0) self._source_tab.setCurrentIndex(2) self._c_alpha_stack.setCurrentIndex(0) @@ -1746,6 +1899,12 @@ def retranslateUi(self, MainWindow): "name")) self._obj_name_lst.setToolTip(_translate("MainWindow", "

Color connectivity line according to :

- Their connectivity strength

- The number of connections per node

- The line density (use the radius to control the density)

Input parameter : c_colorby

")) self._brain_grp.setTitle(_translate("MainWindow", "Display")) + self.label_59.setText(_translate("MainWindow", "z-max")) + self.label_36.setText(_translate("MainWindow", "y-min")) + self.label_35.setText(_translate("MainWindow", "x-max")) + self.label_56.setText(_translate("MainWindow", "z-min")) + self.label_34.setText(_translate("MainWindow", "x-min")) + self.label_40.setText(_translate("MainWindow", "y-max")) self.label_19.setText(_translate("MainWindow", "Hemisphere")) self.label_26.setText(_translate("MainWindow", "Template")) self._brain_template.setToolTip(_translate("MainWindow", "

Switch brain template.

Input parameter : a_template

")) @@ -1754,13 +1913,25 @@ def retranslateUi(self, MainWindow): self._brain_hemi.setItemText(2, _translate("MainWindow", "right")) self._brain_translucent.setToolTip(_translate("MainWindow", "

Use transparent or opaque brain.

Input parameter : a_proj and a_opacity

")) self._brain_translucent.setText(_translate("MainWindow", "Translucent")) + self.label_33.setText(_translate("MainWindow", "Slice")) + self.label_62.setText(_translate("MainWindow", "Light")) + self._brain_inlight.setText(_translate("MainWindow", "Inside")) self._roi_grp.setTitle(_translate("MainWindow", "Display")) - self.label_113.setText(_translate("MainWindow", "ROI\n" -"type")) - self.label_2.setText(_translate("MainWindow", "Smooth")) + self.label_114.setText(_translate("MainWindow", "Level")) + self.label_113.setText(_translate("MainWindow", "ROI")) self._roiTransp.setText(_translate("MainWindow", "Translucent")) + self._roiLevel.setItemText(0, _translate("MainWindow", "7")) + self._roiLevel.setItemText(1, _translate("MainWindow", "12")) + self._roiLevel.setItemText(2, _translate("MainWindow", "20")) + self._roiLevel.setItemText(3, _translate("MainWindow", "36")) + self._roiLevel.setItemText(4, _translate("MainWindow", "64")) + self._roiLevel.setItemText(5, _translate("MainWindow", "122")) + self._roiLevel.setItemText(6, _translate("MainWindow", "ROI")) + self._roiIsSmooth.setText(_translate("MainWindow", "Smooth")) + self._roiUniColor.setText(_translate("MainWindow", "Unique color")) self._roiButApply.setText(_translate("MainWindow", "Apply")) self._roiButRst.setText(_translate("MainWindow", "Reset")) + self._roiFilter.setPlaceholderText(_translate("MainWindow", "Filter (regular expression)")) self._vol_grp.setTitle(_translate("MainWindow", "Display")) self._volRendering.setItemText(0, _translate("MainWindow", "mip")) self._volRendering.setItemText(1, _translate("MainWindow", "translucent")) @@ -1772,10 +1943,26 @@ def retranslateUi(self, MainWindow): self.label_57.setText(_translate("MainWindow", "Volume")) self._sec_grp.setTitle(_translate("MainWindow", "Display")) self.label_46.setText(_translate("MainWindow", "Coronal")) + self.label_50.setText(_translate("MainWindow", "Level")) self.label_47.setText(_translate("MainWindow", "Axial")) - self.label_59.setText(_translate("MainWindow", "Cmap")) self.label_44.setText(_translate("MainWindow", "Sagittal")) self.label_49.setText(_translate("MainWindow", "Volume")) + self.label_55.setText(_translate("MainWindow", "Interpolation")) + self._csLevel.setItemText(0, _translate("MainWindow", "7")) + self._csLevel.setItemText(1, _translate("MainWindow", "12")) + self._csLevel.setItemText(2, _translate("MainWindow", "20")) + self._csLevel.setItemText(3, _translate("MainWindow", "36")) + self._csLevel.setItemText(4, _translate("MainWindow", "64")) + self._csLevel.setItemText(5, _translate("MainWindow", "122")) + self._csLevel.setItemText(6, _translate("MainWindow", "ROI")) + self._csInterp.setItemText(0, _translate("MainWindow", "nearest")) + self._csInterp.setItemText(1, _translate("MainWindow", "bilinear")) + self._csInterp.setItemText(2, _translate("MainWindow", "hanning")) + self._csInterp.setItemText(3, _translate("MainWindow", "hamming")) + self._csInterp.setItemText(4, _translate("MainWindow", "hermite")) + self._csInterp.setItemText(5, _translate("MainWindow", "kaiser")) + self._csInterp.setItemText(6, _translate("MainWindow", "quadric")) + self._csInterp.setItemText(7, _translate("MainWindow", "bicubic")) self._s_grp.setTitle(_translate("MainWindow", "Display sources")) self._s_select.setToolTip(_translate("MainWindow", "

Hide or display sources. Specify if you want to keep only sources in the left or right hemisphere or sources that are either inside or outside the brain volume.

")) self._s_select.setItemText(0, _translate("MainWindow", "All")) @@ -1864,6 +2051,7 @@ def retranslateUi(self, MainWindow): self._c_colorby.setToolTip(_translate("MainWindow", "

Color connectivity line according to :

- Their connectivity strength

- The number of connections per node

- The line density (use the radius to control the density)

Input parameter : c_colorby

")) self._c_colorby.setItemText(0, _translate("MainWindow", "strength")) self._c_colorby.setItemText(1, _translate("MainWindow", "count")) + self._c_colorby.setItemText(2, _translate("MainWindow", "causal")) self.label_37.setText(_translate("MainWindow", "Transparency")) self.label_109.setText(_translate("MainWindow", "Color")) self.label_22.setText(_translate("MainWindow", "Line\n" diff --git a/visbrain/brain/interface/gui/brain_gui.ui b/visbrain/gui/brain/interface/gui/brain_gui.ui similarity index 90% rename from visbrain/brain/interface/gui/brain_gui.ui rename to visbrain/gui/brain/interface/gui/brain_gui.ui index 4e67e2e86..cbc21be1e 100644 --- a/visbrain/brain/interface/gui/brain_gui.ui +++ b/visbrain/gui/brain/interface/gui/brain_gui.ui @@ -238,7 +238,7 @@ name - 4 + 0 @@ -272,6 +272,187 @@ name + + + + + + + true + + + + z-max + + + + + + + 10 + + + 10 + + + Qt::Horizontal + + + false + + + + + + + + true + + + + y-min + + + + + + + + true + + + + x-max + + + + + + + + true + + + + z-min + + + + + + + 10 + + + 10 + + + Qt::Horizontal + + + false + + + true + + + + + + + 10 + + + 10 + + + Qt::Horizontal + + + false + + + true + + + + + + + + true + + + + x-min + + + + + + + + true + + + + y-max + + + + + + + 10 + + + 10 + + + Qt::Horizontal + + + true + + + + + + + 10 + + + 10 + + + Qt::Horizontal + + + false + + + + + + + 10 + + + 10 + + + Qt::Horizontal + + + false + + + true + + + + + @@ -351,7 +532,7 @@ name - + Qt::Horizontal @@ -411,6 +592,63 @@ name + + + + + 20 + 0 + + + + Qt::Vertical + + + + + + + + true + + + + Slice + + + + + + + + true + + + + Light + + + + + + + + 20 + 0 + + + + Qt::Vertical + + + + + + + Inside + + + @@ -472,7 +710,19 @@ name 0 - + + + + + true + + + + Level + + + + 3 @@ -480,6 +730,9 @@ name 2 + + 3 + @@ -490,8 +743,7 @@ name - ROI -type + ROI @@ -508,7 +760,7 @@ type - + @@ -521,7 +773,7 @@ type - + Qt::Horizontal @@ -534,18 +786,6 @@ type - - - - - true - - - - Smooth - - - @@ -559,6 +799,75 @@ type + + + + + 10 + 0 + + + + Qt::Vertical + + + + + + + false + + + + 7 + + + + + 12 + + + + + 20 + + + + + 36 + + + + + 64 + + + + + 122 + + + + + ROI + + + + + + + + Smooth + + + + + + + Unique color + + + @@ -566,7 +875,7 @@ type 0 - + 0 @@ -642,8 +951,15 @@ type + + + + Filter (regular expression) + + + - + @@ -907,7 +1223,30 @@ type - + + + + + 20 + 0 + + + + Qt::Vertical + + + + + + + -5 + + + Qt::Horizontal + + + + @@ -919,7 +1258,7 @@ type - + @@ -932,6 +1271,18 @@ type + + + + + true + + + + Level + + + @@ -945,7 +1296,7 @@ type - + -5 @@ -955,7 +1306,7 @@ type - + @@ -970,7 +1321,7 @@ type - + @@ -983,7 +1334,7 @@ type - + @@ -996,7 +1347,7 @@ type - + -5 @@ -1012,45 +1363,7 @@ type - - - - - 20 - 0 - - - - Qt::Vertical - - - - - - - - - - -5 - - - Qt::Horizontal - - - - - - - - true - - - - Cmap - - - - + @@ -1074,7 +1387,7 @@ type - + @@ -1083,6 +1396,117 @@ type + + + + + 20 + 0 + + + + Qt::Vertical + + + + + + + + true + + + + Interpolation + + + + + + + false + + + + 7 + + + + + 12 + + + + + 20 + + + + + 36 + + + + + 64 + + + + + 122 + + + + + ROI + + + + + + + + + nearest + + + + + bilinear + + + + + hanning + + + + + hamming + + + + + hermite + + + + + kaiser + + + + + quadric + + + + + bicubic + + + + @@ -1128,8 +1552,8 @@ type 0 0 - 316 - 569 + 312 + 392 @@ -2351,6 +2775,11 @@ ROI count + + + causal + + diff --git a/visbrain/brain/interface/ui_elements/__init__.py b/visbrain/gui/brain/interface/ui_elements/__init__.py similarity index 100% rename from visbrain/brain/interface/ui_elements/__init__.py rename to visbrain/gui/brain/interface/ui_elements/__init__.py diff --git a/visbrain/brain/interface/ui_elements/ui_atlas.py b/visbrain/gui/brain/interface/ui_elements/ui_atlas.py similarity index 69% rename from visbrain/brain/interface/ui_elements/ui_atlas.py rename to visbrain/gui/brain/interface/ui_elements/ui_atlas.py index 437237d39..dbd18c79f 100644 --- a/visbrain/brain/interface/ui_elements/ui_atlas.py +++ b/visbrain/gui/brain/interface/ui_elements/ui_atlas.py @@ -5,10 +5,10 @@ """ import numpy as np import logging -from PyQt5 import QtCore, QtWidgets +from PyQt5 import QtCore -from ....objects.volume_obj import VOLUME_CMAPS -from ....utils import mpl_cmap, mpl_cmap_index, fill_pyqt_table +from visbrain.objects.volume_obj import VOLUME_CMAPS +from visbrain.utils import fill_pyqt_table logger = logging.getLogger('visbrain') @@ -48,6 +48,16 @@ def __init__(self): self._brain_translucent.setChecked(self.atlas.translucent) self._brain_translucent.clicked.connect(self._fcn_brain_translucent) self._brain_alpha.valueChanged.connect(self._fcn_brain_alpha) + # Slices : + self._fcn_brain_reset_slider() + self._brain_xmin.valueChanged.connect(self._fcn_brain_slices) + self._brain_xmax.valueChanged.connect(self._fcn_brain_slices) + self._brain_ymin.valueChanged.connect(self._fcn_brain_slices) + self._brain_ymax.valueChanged.connect(self._fcn_brain_slices) + self._brain_zmin.valueChanged.connect(self._fcn_brain_slices) + self._brain_zmax.valueChanged.connect(self._fcn_brain_slices) + # Light : + self._brain_inlight.clicked.connect(self._fcn_brain_inlight) ####################################################################### # REGION OF INTEREST @@ -60,6 +70,12 @@ def __init__(self): self._roiDiv.addItems(vol_list) self._roiDiv.setCurrentIndex(vol_list.index(self.roi.name)) self._roiDiv.currentIndexChanged.connect(self._fcn_build_roi_list) + # Roi smooth : + self._roiIsSmooth.clicked.connect(self._fcn_roi_smooth) + self._roiSmooth.setEnabled(False) + # MIST level : + self._roiLevel.setEnabled('mist' in self.roi.name.lower()) + self._roiLevel.currentIndexChanged.connect(self._fcn_build_roi_list) # Apply and reset : self._roiButRst.clicked.connect(self._fcn_reset_roi_list) self._roiButApply.clicked.connect(self._fcn_apply_roi_selection) @@ -82,11 +98,8 @@ def __init__(self): # Subdivision : self._csDiv.addItems(vol_list) self._csDiv.currentIndexChanged.connect(self._fcn_crossec_change) - # Cmap : - self._csCmap.addItems(mpl_cmap()) - idx = mpl_cmap_index(self.cross_sec.to_kwargs()['cmap']) - self._csCmap.setCurrentIndex(idx[0]) - self._csCmap.currentIndexChanged.connect(self._fcn_crossec_cmap) + self._csLevel.currentIndexChanged.connect(self._fcn_crossec_change) + self._csInterp.currentIndexChanged.connect(self._fcn_crossec_interp) # Visibility : self._sec_grp.setChecked(self.cross_sec.visible_obj) self._sec_grp.clicked.connect(self._fcn_crossec_viz) @@ -149,8 +162,10 @@ def _fcn_brain_template(self): self.atlas.scale = self._gl_scale self.atlas.reset_camera() self.atlas.rotate('top') + self.atlas._name = template if self.atlas.hemisphere != hemisphere: self.atlas.hemisphere = hemisphere + self._fcn_brain_reset_slider() def _fcn_brain_hemisphere(self): """Change the hemisphere.""" @@ -169,6 +184,59 @@ def _fcn_brain_alpha(self): alpha = self._brain_alpha.value() / 100. self.atlas.alpha = alpha + def _fcn_brain_reset_slider(self): + """Reset min/max slice sliders.""" + v = self.atlas.vertices + n_cut = 1000 + xmin, xmax = v[:, 0].min() - 1., v[:, 0].max() + 1. + ymin, ymax = v[:, 1].min() - 1., v[:, 1].max() + 1. + zmin, zmax = v[:, 2].min() - 1., v[:, 2].max() + 1. + # xmin + self._brain_xmin.setMinimum(xmin) + self._brain_xmin.setMaximum(xmax) + self._brain_xmin.setSingleStep((xmin - xmax) / n_cut) + self._brain_xmin.setValue(xmin) + # xmax + self._brain_xmax.setMinimum(xmin) + self._brain_xmax.setMaximum(xmax) + self._brain_xmax.setSingleStep((xmin - xmax) / n_cut) + self._brain_xmax.setValue(xmax) + # ymin + self._brain_ymin.setMinimum(ymin) + self._brain_ymin.setMaximum(ymax) + self._brain_ymin.setSingleStep((ymin - ymax) / n_cut) + self._brain_ymin.setValue(ymin) + # ymax + self._brain_ymax.setMinimum(ymin) + self._brain_ymax.setMaximum(ymax) + self._brain_ymax.setSingleStep((ymin - ymax) / n_cut) + self._brain_ymax.setValue(ymax) + # zmin + self._brain_zmin.setMinimum(zmin) + self._brain_zmin.setMaximum(zmax) + self._brain_zmin.setSingleStep((zmin - zmax) / n_cut) + self._brain_zmin.setValue(zmin) + # zmax + self._brain_zmax.setMinimum(zmin) + self._brain_zmax.setMaximum(zmax) + self._brain_zmax.setSingleStep((zmin - zmax) / n_cut) + self._brain_zmax.setValue(zmax) + + def _fcn_brain_slices(self): + """Slice the brain.""" + self.atlas.mesh.xmin = float(self._brain_xmin.value()) + self.atlas.mesh.xmax = float(self._brain_xmax.value()) + self.atlas.mesh.ymin = float(self._brain_ymin.value()) + self.atlas.mesh.ymax = float(self._brain_ymax.value()) + self.atlas.mesh.zmin = float(self._brain_zmin.value()) + self.atlas.mesh.zmax = float(self._brain_zmax.value()) + self.atlas.mesh.update() + + def _fcn_brain_inlight(self): + """Set light to be inside the brain.""" + self.atlas.mesh.inv_light = self._brain_inlight.isChecked() + self.atlas.mesh.update() + ########################################################################### ########################################################################### # REGION OF INTEREST @@ -179,56 +247,61 @@ def _fcn_roi_visible(self): self.menuDispROI.setChecked(self._roi_grp.isChecked()) self._fcn_menu_disp_roi() + def _fcn_roi_smooth(self): + """Enable ROI smoothing.""" + self._roiSmooth.setEnabled(self._roiIsSmooth.isChecked()) + def _fcn_build_roi_list(self): """Build a list of checkable ROIs.""" # Select volume : selected_roi = str(self._roiDiv.currentText()) + # Mist : + if 'mist' in selected_roi.lower(): + self._roiLevel.setEnabled('mist' in selected_roi) + level = str(self._roiLevel.currentText()) + selected_roi += '_%s' % level if self.roi.name != selected_roi: self.roi(selected_roi) # Clear widget list and add ROIs : - self._roiToAdd.clear() + self._roiToAdd.reset() df = self.roi.get_labels() - col_names = [''] + list(df.keys()) + if 'mist' in selected_roi.lower(): + df = df[['index', 'name_%s' % level]] + col_names = list(df.keys()) col_names.pop(col_names.index('index')) - cols = [[''] * len(df)] - cols += [list(df[k]) for k in col_names if k not in ['', 'index']] + cols = [list(df[k]) for k in col_names if k not in ['', 'index']] + # Build the table with the filter : + self._roiModel = fill_pyqt_table(self._roiToAdd, col_names, cols, + filter=self._roiFilter, check=0, + filter_col=0) # By default, uncheck items : - fill_pyqt_table(self._roiToAdd, col_names, cols) self._fcn_reset_roi_list() def _fcn_reset_roi_list(self): """Reset ROIs selection.""" # Unchecked all ROIs : - for num in range(self._roiToAdd.rowCount()): - c = QtWidgets.QTableWidgetItem() - c.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled) - c.setCheckState(QtCore.Qt.Unchecked) - self._roiToAdd.setItem(num, 0, c) + for num in range(self._roiModel.rowCount()): + self._roiModel.item(num, 0).setCheckState(QtCore.Qt.Unchecked) def _fcn_get_selected_rois(self): """Get the list of selected ROIs.""" _roiToAdd = [] all_idx = list(self.roi.get_labels()['index']) - for num in range(self._roiToAdd.rowCount()): - item = self._roiToAdd.item(num, 0) + for num in range(self._roiModel.rowCount()): + item = self._roiModel.item(num, 0) if item.checkState(): _roiToAdd.append(all_idx[num]) return _roiToAdd - def _fcn_set_selected_rois(self, selection): - """Set a list of selected rois.""" - for k in selection: - item = self._roiToAdd.item(k) - item.setCheckState(QtCore.Qt.Checked) - def _fcn_apply_roi_selection(self, _, roi_name='roi'): """Apply ROI selection.""" # Get the list of selected ROIs : _roiToAdd = self._fcn_get_selected_rois() - smooth = self._roiSmooth.value() + smooth = self._roiSmooth.value() * int(self._roiIsSmooth.isChecked()) + uni_col = bool(self._roiUniColor.isChecked()) if _roiToAdd: - self.roi.select_roi(_roiToAdd, smooth=smooth) + self.roi.select_roi(_roiToAdd, smooth=smooth, unique_color=uni_col) self.roi.camera = self._camera # Enable projection on ROI and related buttons : self._s_proj_on.model().item(1).setEnabled(True) @@ -265,10 +338,6 @@ def _fcn_crossec_move(self, *args, update=False): sl = self.cross_sec.slice_to_pos((dx, dy, dz)) self.cross_sec.cut_coords(sl) - def _fcn_crossec_cmap(self): - """Change cross-sections colormap.""" - self._fcn_crossec_move(update=True) - def _fcn_crossec_viz(self): """Control cross-sections visibility.""" self.menuDispCrossec.setChecked(self._sec_grp.isChecked()) @@ -278,11 +347,27 @@ def _fcn_crossec_change(self): """Change the cross-sections subdivision type.""" # Get selected volume : name = str(self._csDiv.currentText()) + level = str(self._csLevel.currentText()) + is_mist = name == 'mist' + self._csLevel.setEnabled(is_mist) + name = name + '_' + level if is_mist else name # Select the volume : self.cross_sec(name) + self.cross_sec.contrast = 0. # Update clim and minmax : self._fcn_crossec_sl_limits() - self._fcn_crossec_move(update=True) + self._fcn_crossec_interp() + # self.cross_sec.cut_coords(None) + self._fcn_crossec_move() + self.cross_sec._set_text(0, 'File = ' + name) + # Reset sliders : + self._csSagit.setValue(self.cross_sec._sagittal) + self._csCoron.setValue(self.cross_sec._coronal) + self._csAxial.setValue(self.cross_sec._axial) + + def _fcn_crossec_interp(self): + """Interpolation method.""" + self.cross_sec.interpolation = str(self._csInterp.currentText()) ########################################################################### ########################################################################### diff --git a/visbrain/brain/interface/ui_elements/ui_config.py b/visbrain/gui/brain/interface/ui_elements/ui_config.py similarity index 98% rename from visbrain/brain/interface/ui_elements/ui_config.py rename to visbrain/gui/brain/interface/ui_elements/ui_config.py index b8655fa9a..de44cf3af 100644 --- a/visbrain/brain/interface/ui_elements/ui_config.py +++ b/visbrain/gui/brain/interface/ui_elements/ui_config.py @@ -1,7 +1,9 @@ """Enable the user to save/load the Brain config in a json file.""" from warnings import warn -from ....io import dialog_load, dialog_save, save_config_json, load_config_json -from ....utils import color2json + +from visbrain.io import (dialog_load, dialog_save, save_config_json, + load_config_json) +from visbrain.utils import color2json class UiConfig(object): diff --git a/visbrain/brain/interface/ui_elements/ui_connectivity.py b/visbrain/gui/brain/interface/ui_elements/ui_connectivity.py similarity index 100% rename from visbrain/brain/interface/ui_elements/ui_connectivity.py rename to visbrain/gui/brain/interface/ui_elements/ui_connectivity.py diff --git a/visbrain/brain/interface/ui_elements/ui_elements.py b/visbrain/gui/brain/interface/ui_elements/ui_elements.py similarity index 100% rename from visbrain/brain/interface/ui_elements/ui_elements.py rename to visbrain/gui/brain/interface/ui_elements/ui_elements.py diff --git a/visbrain/brain/interface/ui_elements/ui_menu.py b/visbrain/gui/brain/interface/ui_elements/ui_menu.py similarity index 99% rename from visbrain/brain/interface/ui_elements/ui_menu.py rename to visbrain/gui/brain/interface/ui_elements/ui_menu.py index e292a34d1..e98918586 100644 --- a/visbrain/brain/interface/ui_elements/ui_menu.py +++ b/visbrain/gui/brain/interface/ui_elements/ui_menu.py @@ -1,7 +1,7 @@ """GUI interactions with the contextual menu.""" import vispy.scene.cameras as viscam -from ....utils import HelpMenu +from visbrain.utils import HelpMenu class UiMenu(HelpMenu): diff --git a/visbrain/brain/interface/ui_elements/ui_objects.py b/visbrain/gui/brain/interface/ui_elements/ui_objects.py similarity index 98% rename from visbrain/brain/interface/ui_elements/ui_objects.py rename to visbrain/gui/brain/interface/ui_elements/ui_objects.py index 2ee27b440..c456afbca 100644 --- a/visbrain/brain/interface/ui_elements/ui_objects.py +++ b/visbrain/gui/brain/interface/ui_elements/ui_objects.py @@ -1,7 +1,7 @@ """Main class for the objects panel.""" from functools import wraps -from ....utils import disconnect_all +from visbrain.utils import disconnect_all def _run_method_if_needed(fn): diff --git a/visbrain/brain/interface/ui_elements/ui_pictures.py b/visbrain/gui/brain/interface/ui_elements/ui_pictures.py similarity index 100% rename from visbrain/brain/interface/ui_elements/ui_pictures.py rename to visbrain/gui/brain/interface/ui_elements/ui_pictures.py diff --git a/visbrain/brain/interface/ui_elements/ui_screenshot.py b/visbrain/gui/brain/interface/ui_elements/ui_screenshot.py similarity index 92% rename from visbrain/brain/interface/ui_elements/ui_screenshot.py rename to visbrain/gui/brain/interface/ui_elements/ui_screenshot.py index f8d840ac3..47d492074 100644 --- a/visbrain/brain/interface/ui_elements/ui_screenshot.py +++ b/visbrain/gui/brain/interface/ui_elements/ui_screenshot.py @@ -1,6 +1,6 @@ """Screenshot window and related functions.""" -from ....io import write_fig_pyqt, dialog_save -from ....utils import ScreenshotPopup +from visbrain.io import write_fig_pyqt, dialog_save +from visbrain.utils import ScreenshotPopup class UiScreenshot(object): diff --git a/visbrain/brain/interface/ui_elements/ui_sources.py b/visbrain/gui/brain/interface/ui_elements/ui_sources.py similarity index 97% rename from visbrain/brain/interface/ui_elements/ui_sources.py rename to visbrain/gui/brain/interface/ui_elements/ui_sources.py index 93518b002..e160b9b56 100644 --- a/visbrain/brain/interface/ui_elements/ui_sources.py +++ b/visbrain/gui/brain/interface/ui_elements/ui_sources.py @@ -3,8 +3,8 @@ import numpy as np from .ui_objects import _run_method_if_needed -from ....utils import (textline2color, safely_set_cbox, fill_pyqt_table) -from ....io import dialog_color +from visbrain.utils import (textline2color, safely_set_cbox, fill_pyqt_table) +from visbrain.io import dialog_color logger = logging.getLogger('visbrain') @@ -196,3 +196,4 @@ def _fcn_source_proj(self, _, **kwargs): self.sources.project_sources(b_obj, project=project, radius=radius, contribute=contribute, mask_color=mask_color, **kwargs) + self.cbqt.setEnabled('roi', hasattr(self.roi, 'mesh')) diff --git a/visbrain/brain/interface/ui_elements/ui_timeseries.py b/visbrain/gui/brain/interface/ui_elements/ui_timeseries.py similarity index 97% rename from visbrain/brain/interface/ui_elements/ui_timeseries.py rename to visbrain/gui/brain/interface/ui_elements/ui_timeseries.py index 54b4e9b29..ba727d6e8 100644 --- a/visbrain/brain/interface/ui_elements/ui_timeseries.py +++ b/visbrain/gui/brain/interface/ui_elements/ui_timeseries.py @@ -1,8 +1,8 @@ """GUI interactions with time-series.""" from .ui_objects import _run_method_if_needed -from ....io import dialog_color -from ....utils import textline2color +from visbrain.io import dialog_color +from visbrain.utils import textline2color class UiTimeSeries(object): diff --git a/visbrain/brain/interface/ui_elements/ui_vectors.py b/visbrain/gui/brain/interface/ui_elements/ui_vectors.py similarity index 100% rename from visbrain/brain/interface/ui_elements/ui_vectors.py rename to visbrain/gui/brain/interface/ui_elements/ui_vectors.py diff --git a/visbrain/brain/interface/ui_init.py b/visbrain/gui/brain/interface/ui_init.py similarity index 99% rename from visbrain/brain/interface/ui_init.py rename to visbrain/gui/brain/interface/ui_init.py index aa7ff0f17..639fe73be 100644 --- a/visbrain/brain/interface/ui_init.py +++ b/visbrain/gui/brain/interface/ui_init.py @@ -12,7 +12,7 @@ from vispy.scene.cameras import TurntableCamera from .gui import Ui_MainWindow -from ...objects import VisbrainCanvas +from visbrain.objects import VisbrainCanvas class BrainShortcuts(object): diff --git a/visbrain/brain/tests/test_brain.py b/visbrain/gui/brain/tests/test_brain.py similarity index 97% rename from visbrain/brain/tests/test_brain.py rename to visbrain/gui/brain/tests/test_brain.py index 7d4fa59ca..8c57a7fcb 100644 --- a/visbrain/brain/tests/test_brain.py +++ b/visbrain/gui/brain/tests/test_brain.py @@ -7,20 +7,28 @@ from vispy.app.canvas import MouseEvent, KeyEvent # from vispy.util.keys import Key -from visbrain import Brain +from visbrain.gui import Brain from visbrain.objects import (SourceObj, ConnectObj, TimeSeries3DObj, - Picture3DObj, RoiObj, VolumeObj, CrossSecObj) + Picture3DObj, RoiObj, VolumeObj, CrossSecObj, + BrainObj) from visbrain.io import download_file from visbrain.tests._tests_visbrain import _TestVisbrain # Download intrcranial xyz : -mat = np.load(download_file('xyz_sample.npz')) +mat = np.load(download_file('xyz_sample.npz', astype='example_data')) xyz_full = mat['xyz'] mat.close() xyz_1, xyz_2 = xyz_full[20:30, :], xyz_full[10:20, :] +# ---------------- Brain ---------------- +# Just to be sure to have them on server : +BrainObj('B1') +BrainObj('B2') +BrainObj('B3') +BrainObj('white') + # ---------------- Sources ---------------- # Define some random sources : s_data = 100 * np.random.rand(10) @@ -94,7 +102,7 @@ def test_scene_rotation(self): def test_brain_control(self): """Test method brain_control.""" - template = ['B1', 'B2', 'B3', 'white'] + template = vb.brain_list() hemi = ['left', 'right', 'both'] translucent = [False, True] alpha = [.1, 1.] @@ -108,7 +116,7 @@ def test_brain_control(self): def test_brain_list(self): """Test method brain_list.""" - vb.brain_list() == ['B1', 'B2', 'B3'] + assert len(vb.brain_list()) > 1 ########################################################################### # SOURCES diff --git a/visbrain/brain/user.py b/visbrain/gui/brain/user.py similarity index 99% rename from visbrain/brain/user.py rename to visbrain/gui/brain/user.py index 000192233..1bcbcd97d 100644 --- a/visbrain/brain/user.py +++ b/visbrain/gui/brain/user.py @@ -9,9 +9,9 @@ from scipy.spatial import ConvexHull -from ..visuals import BrainMesh -from ..utils import (color2vb, safely_set_cbox) -from ..io import save_config_json, write_fig_canvas +from visbrain.visuals import BrainMesh +from visbrain.utils import (color2vb, safely_set_cbox) +from visbrain.io import save_config_json, write_fig_canvas logger = logging.getLogger('visbrain') @@ -96,6 +96,7 @@ def background_color(self, color=(.1, .1, .1)): """ bckcolor = color2vb(color).ravel()[0:-1] self.view.canvas.bgcolor = bckcolor + self.cbqt.cbviz.bgcolor = bckcolor def screenshot(self, saveas, canvas='main', print_size=None, dpi=300., unit='centimeter', factor=None, region=None, autocrop=False, diff --git a/visbrain/brain/visuals.py b/visbrain/gui/brain/visuals.py similarity index 94% rename from visbrain/brain/visuals.py rename to visbrain/gui/brain/visuals.py index 7a6f57bc2..601b73799 100644 --- a/visbrain/brain/visuals.py +++ b/visbrain/gui/brain/visuals.py @@ -4,10 +4,11 @@ from vispy import scene import vispy.visuals.transforms as vist -from ..objects import (CombineSources, CombineConnect, CombineTimeSeries, - CombinePictures, CombineVectors, BrainObj, VolumeObj, - RoiObj, CrossSecObj) -from ..config import PROFILER +from visbrain.objects import (CombineSources, CombineConnect, + CombineTimeSeries, CombinePictures, + CombineVectors, BrainObj, VolumeObj, RoiObj, + CrossSecObj) +from visbrain.config import PROFILER logger = logging.getLogger('visbrain') diff --git a/visbrain/figure/__init__.py b/visbrain/gui/figure/__init__.py similarity index 100% rename from visbrain/figure/__init__.py rename to visbrain/gui/figure/__init__.py diff --git a/visbrain/figure/figure.py b/visbrain/gui/figure/figure.py similarity index 99% rename from visbrain/figure/figure.py rename to visbrain/gui/figure/figure.py index f5b5de6cc..45afe3608 100644 --- a/visbrain/figure/figure.py +++ b/visbrain/gui/figure/figure.py @@ -7,7 +7,7 @@ import matplotlib.pyplot as plt import matplotlib as mpl -from ..utils import color2tuple, piccrop, picresize +from visbrain.utils import color2tuple, piccrop, picresize __all__ = ['Figure'] @@ -419,7 +419,7 @@ def _make(self): for loc, spine in ax.spines.items(): if loc in ['left', 'right', 'top', 'bottom']: spine.set_color('none') - ax.tick_params(**{loc: 'off'}) + ax.tick_params(**{loc: False}) @staticmethod def _cbar(cb, cmap, clim, vmin, under, vmax, over, title, ycb, fz_title, diff --git a/visbrain/figure/tests/test_figure.py b/visbrain/gui/figure/tests/test_figure.py similarity index 93% rename from visbrain/figure/tests/test_figure.py rename to visbrain/gui/figure/tests/test_figure.py index 3fb676847..349a9c95e 100644 --- a/visbrain/figure/tests/test_figure.py +++ b/visbrain/gui/figure/tests/test_figure.py @@ -1,16 +1,14 @@ """Test command lines.""" import os -from visbrain import Figure +from visbrain.gui import Figure from visbrain.io import download_file, path_to_visbrain_data from visbrain.tests._tests_visbrain import _TestVisbrain # List of image files to test with : _FILES = ['default.png', 'inside.png', 'count.png', 'density.png', 'repartition.jpg', 'roi.jpg'] -all_downloaded = [os.path.isfile(path_to_visbrain_data(k)) for k in _FILES] -if not all(all_downloaded): - download_file('figure.zip', unzip=True) +download_file('figure.zip', unzip=True, astype='example_data') # Create a tmp/ directory : dir_path = os.path.dirname(os.path.realpath(__file__)) @@ -26,7 +24,7 @@ class TestFigure(_TestVisbrain): def test_figure(self): """Test function figure.""" # Get files : - files = [path_to_visbrain_data(k) for k in _FILES] + files = [path_to_visbrain_data(k, 'example_data') for k in _FILES] # Titles : titles = ['Default', 'Sources inside', 'Connectivity', diff --git a/visbrain/signal/__init__.py b/visbrain/gui/signal/__init__.py similarity index 100% rename from visbrain/signal/__init__.py rename to visbrain/gui/signal/__init__.py diff --git a/visbrain/signal/gui/__init__.py b/visbrain/gui/signal/gui/__init__.py similarity index 100% rename from visbrain/signal/gui/__init__.py rename to visbrain/gui/signal/gui/__init__.py diff --git a/visbrain/signal/gui/signal_gui.py b/visbrain/gui/signal/gui/signal_gui.py similarity index 100% rename from visbrain/signal/gui/signal_gui.py rename to visbrain/gui/signal/gui/signal_gui.py diff --git a/visbrain/signal/gui/signal_gui.ui b/visbrain/gui/signal/gui/signal_gui.ui similarity index 100% rename from visbrain/signal/gui/signal_gui.ui rename to visbrain/gui/signal/gui/signal_gui.ui diff --git a/visbrain/signal/signal.py b/visbrain/gui/signal/signal.py similarity index 98% rename from visbrain/signal/signal.py rename to visbrain/gui/signal/signal.py index bae5eb770..5bbeb6d65 100644 --- a/visbrain/signal/signal.py +++ b/visbrain/gui/signal/signal.py @@ -4,17 +4,17 @@ from .ui_elements import UiElements, UiInit from .visuals import Visuals -from ..utils import (safely_set_cbox, color2tuple, color2vb, mpl_cmap, - toggle_enable_tab) -from ..io import write_fig_canvas -from ..pyqt_module import PyQtModule +from visbrain.utils import (safely_set_cbox, color2tuple, color2vb, mpl_cmap, + toggle_enable_tab) +from visbrain.io import write_fig_canvas +from visbrain._pyqt_module import _PyQtModule # get_screen_size __all__ = ('Signal') -class Signal(PyQtModule, UiInit, UiElements, Visuals): +class Signal(_PyQtModule, UiInit, UiElements, Visuals): """Signal inspection module (data mining). The Signal module can be used to relatively large datasets of @@ -134,7 +134,7 @@ def __init__(self, data, axis=-1, time=None, sf=1., enable_grid=True, grid_titles_color='black', verbose=None, **kwargs): """Init.""" dscb = ['_grid_canvas.canvas.scene', '_signal_canvas.canvas.scene'] - PyQtModule.__init__(self, verbose=verbose, to_describe=dscb) + _PyQtModule.__init__(self, verbose=verbose, to_describe=dscb) self._enable_grid = enable_grid self._previous_form = form display_grid = bool(display_grid * self._enable_grid) diff --git a/visbrain/signal/tests/test_signal.py b/visbrain/gui/signal/tests/test_signal.py similarity index 99% rename from visbrain/signal/tests/test_signal.py rename to visbrain/gui/signal/tests/test_signal.py index 510bc7cb8..7b9cc2527 100644 --- a/visbrain/signal/tests/test_signal.py +++ b/visbrain/gui/signal/tests/test_signal.py @@ -1,7 +1,7 @@ """Test Signal module and related methods.""" from vispy.app.canvas import MouseEvent, KeyEvent -from visbrain import Signal +from visbrain.gui import Signal from visbrain.utils import generate_eeg from visbrain.tests._tests_visbrain import _TestVisbrain diff --git a/visbrain/signal/ui_elements/__init__.py b/visbrain/gui/signal/ui_elements/__init__.py similarity index 100% rename from visbrain/signal/ui_elements/__init__.py rename to visbrain/gui/signal/ui_elements/__init__.py diff --git a/visbrain/signal/ui_elements/ui_annotations.py b/visbrain/gui/signal/ui_elements/ui_annotations.py similarity index 98% rename from visbrain/signal/ui_elements/ui_annotations.py rename to visbrain/gui/signal/ui_elements/ui_annotations.py index bf69e696e..f799559ff 100644 --- a/visbrain/signal/ui_elements/ui_annotations.py +++ b/visbrain/gui/signal/ui_elements/ui_annotations.py @@ -2,8 +2,8 @@ from PyQt5 import QtWidgets import numpy as np -from ...utils import textline2color -from ...io import dialog_color +from visbrain.utils import textline2color +from visbrain.io import dialog_color class UiAnnotations(object): diff --git a/visbrain/signal/ui_elements/ui_elements.py b/visbrain/gui/signal/ui_elements/ui_elements.py similarity index 100% rename from visbrain/signal/ui_elements/ui_elements.py rename to visbrain/gui/signal/ui_elements/ui_elements.py diff --git a/visbrain/signal/ui_elements/ui_grid.py b/visbrain/gui/signal/ui_elements/ui_grid.py similarity index 96% rename from visbrain/signal/ui_elements/ui_grid.py rename to visbrain/gui/signal/ui_elements/ui_grid.py index a9d037ff8..d3a1eb24c 100644 --- a/visbrain/signal/ui_elements/ui_grid.py +++ b/visbrain/gui/signal/ui_elements/ui_grid.py @@ -1,8 +1,8 @@ """Interactions with the grid panel.""" import numpy as np -from ...utils import textline2color, safely_set_spin -from ...io import dialog_color, is_opengl_installed +from visbrain.utils import textline2color, safely_set_spin +from visbrain.io import dialog_color, is_opengl_installed class UiGrid(object): diff --git a/visbrain/signal/ui_elements/ui_init.py b/visbrain/gui/signal/ui_elements/ui_init.py similarity index 98% rename from visbrain/signal/ui_elements/ui_init.py rename to visbrain/gui/signal/ui_elements/ui_init.py index 37198dc3a..b6dd043dc 100644 --- a/visbrain/signal/ui_elements/ui_init.py +++ b/visbrain/gui/signal/ui_elements/ui_init.py @@ -7,7 +7,7 @@ from vispy import app from ..gui import Ui_MainWindow -from ...objects import VisbrainCanvas +from visbrain.objects import VisbrainCanvas class GridShortcuts(object): @@ -102,7 +102,7 @@ def on_mouse_double_click(event): """Executed function when double click mouse over canvas.""" # Get event position and camera rectangle: x_pos, y_pos = event.pos - rect = self._signal_canvas._camera.rect + rect = self._signal_canvas.camera.rect # Get right padding, canvas, title and wc size : cs = canvas.size ws = self._signal_canvas.wc.size diff --git a/visbrain/signal/ui_elements/ui_menu.py b/visbrain/gui/signal/ui_elements/ui_menu.py similarity index 96% rename from visbrain/signal/ui_elements/ui_menu.py rename to visbrain/gui/signal/ui_elements/ui_menu.py index 325571d5a..2c1a6d9e5 100644 --- a/visbrain/signal/ui_elements/ui_menu.py +++ b/visbrain/gui/signal/ui_elements/ui_menu.py @@ -2,9 +2,9 @@ import os import numpy as np -from ...utils import ScreenshotPopup, HelpMenu -from ...io import (dialog_save, dialog_load, write_fig_pyqt, write_fig_canvas, - write_csv, write_txt) +from visbrain.utils import ScreenshotPopup, HelpMenu +from visbrain.io import (dialog_save, dialog_load, write_fig_pyqt, + write_fig_canvas, write_csv, write_txt) class UiMenu(HelpMenu): diff --git a/visbrain/signal/ui_elements/ui_settings.py b/visbrain/gui/signal/ui_elements/ui_settings.py similarity index 97% rename from visbrain/signal/ui_elements/ui_settings.py rename to visbrain/gui/signal/ui_elements/ui_settings.py index 35d496aaa..79e1d0fe1 100644 --- a/visbrain/signal/ui_elements/ui_settings.py +++ b/visbrain/gui/signal/ui_elements/ui_settings.py @@ -1,6 +1,6 @@ """Interactions between user and Settings tab of QuickSettings.""" -from ...utils import textline2color -from ...io import dialog_color +from visbrain.utils import textline2color +from visbrain.io import dialog_color class UiSettings(object): diff --git a/visbrain/signal/ui_elements/ui_signals.py b/visbrain/gui/signal/ui_elements/ui_signals.py similarity index 98% rename from visbrain/signal/ui_elements/ui_signals.py rename to visbrain/gui/signal/ui_elements/ui_signals.py index d81886b73..cbf60ae9f 100644 --- a/visbrain/signal/ui_elements/ui_signals.py +++ b/visbrain/gui/signal/ui_elements/ui_signals.py @@ -1,8 +1,8 @@ """Interactions between user and Signal tab of QuickSettings.""" import numpy as np -from ...utils import textline2color, safely_set_spin -from ...io import dialog_color, is_opengl_installed +from visbrain.utils import textline2color, safely_set_spin +from visbrain.io import dialog_color, is_opengl_installed class UiSignals(object): diff --git a/visbrain/signal/visuals.py b/visbrain/gui/signal/visuals.py similarity index 98% rename from visbrain/signal/visuals.py rename to visbrain/gui/signal/visuals.py index e32f5bc2d..cb8794b32 100644 --- a/visbrain/signal/visuals.py +++ b/visbrain/gui/signal/visuals.py @@ -6,8 +6,8 @@ from vispy import scene import vispy.visuals.transforms as vist -from ..visuals import GridSignal, TFmapsMesh -from ..utils import color2vb, vispy_array, PrepareData +from visbrain.visuals import GridSignal, TFmapsMesh +from visbrain.utils import color2vb, vispy_array, PrepareData __all__ = ('Visuals') @@ -223,7 +223,7 @@ def set_data(self, data, index, color='black', lw=2., nbins=10, idx.insert(self._axis, slice(None)) # Convert data to be compatible with VisPy and prepare data : - data_c = vispy_array(data[idx]).copy() + data_c = vispy_array(data[tuple(idx)]).copy() _data = self._prep._prepare_data(self._sf, data_c, self._time) # Set data : diff --git a/visbrain/sleep/__init__.py b/visbrain/gui/sleep/__init__.py similarity index 100% rename from visbrain/sleep/__init__.py rename to visbrain/gui/sleep/__init__.py diff --git a/visbrain/sleep/interface/__init__.py b/visbrain/gui/sleep/interface/__init__.py similarity index 100% rename from visbrain/sleep/interface/__init__.py rename to visbrain/gui/sleep/interface/__init__.py diff --git a/visbrain/sleep/interface/gui/__init__.py b/visbrain/gui/sleep/interface/gui/__init__.py similarity index 100% rename from visbrain/sleep/interface/gui/__init__.py rename to visbrain/gui/sleep/interface/gui/__init__.py diff --git a/visbrain/sleep/interface/gui/sleep_gui.py b/visbrain/gui/sleep/interface/gui/sleep_gui.py similarity index 100% rename from visbrain/sleep/interface/gui/sleep_gui.py rename to visbrain/gui/sleep/interface/gui/sleep_gui.py diff --git a/visbrain/sleep/interface/gui/sleep_gui.ui b/visbrain/gui/sleep/interface/gui/sleep_gui.ui similarity index 100% rename from visbrain/sleep/interface/gui/sleep_gui.ui rename to visbrain/gui/sleep/interface/gui/sleep_gui.ui diff --git a/visbrain/sleep/interface/ui_elements/__init__.py b/visbrain/gui/sleep/interface/ui_elements/__init__.py similarity index 100% rename from visbrain/sleep/interface/ui_elements/__init__.py rename to visbrain/gui/sleep/interface/ui_elements/__init__.py diff --git a/visbrain/sleep/interface/ui_elements/ui_annotate.py b/visbrain/gui/sleep/interface/ui_elements/ui_annotate.py similarity index 100% rename from visbrain/sleep/interface/ui_elements/ui_annotate.py rename to visbrain/gui/sleep/interface/ui_elements/ui_annotate.py diff --git a/visbrain/sleep/interface/ui_elements/ui_detection.py b/visbrain/gui/sleep/interface/ui_elements/ui_detection.py similarity index 99% rename from visbrain/sleep/interface/ui_elements/ui_detection.py rename to visbrain/gui/sleep/interface/ui_elements/ui_detection.py index 9e3d1e365..428bf6b6d 100644 --- a/visbrain/sleep/interface/ui_elements/ui_detection.py +++ b/visbrain/gui/sleep/interface/ui_elements/ui_detection.py @@ -3,9 +3,9 @@ from PyQt5 import QtWidgets, QtCore import logging -from ....utils import (remdetect, spindlesdetect, slowwavedetect, kcdetect, - peakdetect, mtdetect) -from ....utils.sleep.event import _events_to_index +from visbrain.utils import (remdetect, spindlesdetect, slowwavedetect, + kcdetect, peakdetect, mtdetect) +from visbrain.utils.sleep.event import _events_to_index logger = logging.getLogger('visbrain') diff --git a/visbrain/sleep/interface/ui_elements/ui_elements.py b/visbrain/gui/sleep/interface/ui_elements/ui_elements.py similarity index 95% rename from visbrain/sleep/interface/ui_elements/ui_elements.py rename to visbrain/gui/sleep/interface/ui_elements/ui_elements.py index cf3c8fca9..71fe104d6 100644 --- a/visbrain/sleep/interface/ui_elements/ui_elements.py +++ b/visbrain/gui/sleep/interface/ui_elements/ui_elements.py @@ -9,7 +9,7 @@ from .ui_menu import UiMenu from .ui_annotate import UiAnnotate from .ui_screenshot import UiScreenshot -from ....config import PROFILER +from visbrain.config import PROFILER class UiElements(UiSettings, UiPanels, UiInfo, UiTools, UiScoring, diff --git a/visbrain/sleep/interface/ui_elements/ui_info.py b/visbrain/gui/sleep/interface/ui_elements/ui_info.py similarity index 97% rename from visbrain/sleep/interface/ui_elements/ui_info.py rename to visbrain/gui/sleep/interface/ui_elements/ui_info.py index 269985f66..e00efc9ca 100644 --- a/visbrain/sleep/interface/ui_elements/ui_info.py +++ b/visbrain/gui/sleep/interface/ui_elements/ui_info.py @@ -1,9 +1,9 @@ """Main class for info managment.""" +from os import path from PyQt5 import QtWidgets -from ....utils import sleepstats -from os import path +from visbrain.utils import sleepstats class UiInfo(object): diff --git a/visbrain/sleep/interface/ui_elements/ui_menu.py b/visbrain/gui/sleep/interface/ui_elements/ui_menu.py similarity index 98% rename from visbrain/sleep/interface/ui_elements/ui_menu.py rename to visbrain/gui/sleep/interface/ui_elements/ui_menu.py index d8ef1e4f5..f938e2296 100644 --- a/visbrain/sleep/interface/ui_elements/ui_menu.py +++ b/visbrain/gui/sleep/interface/ui_elements/ui_menu.py @@ -1,14 +1,14 @@ """Main class for sleep menus managment.""" - +import os import numpy as np -import os from PyQt5 import QtWidgets -from ....utils import HelpMenu -from ....io import (dialog_save, dialog_load, write_fig_hyp, write_csv, - write_txt, write_hypno, read_hypno, annotations_to_array, - oversample_hypno, save_config_json) +from visbrain.utils import HelpMenu +from visbrain.io import (dialog_save, dialog_load, write_fig_hyp, write_csv, + write_txt, write_hypno, read_hypno, + annotations_to_array, oversample_hypno, + save_config_json) class UiMenu(HelpMenu): diff --git a/visbrain/sleep/interface/ui_elements/ui_panels.py b/visbrain/gui/sleep/interface/ui_elements/ui_panels.py similarity index 99% rename from visbrain/sleep/interface/ui_elements/ui_panels.py rename to visbrain/gui/sleep/interface/ui_elements/ui_panels.py index 85a29af02..aa3e1dc20 100644 --- a/visbrain/sleep/interface/ui_elements/ui_panels.py +++ b/visbrain/gui/sleep/interface/ui_elements/ui_panels.py @@ -3,11 +3,10 @@ import numpy as np -from visbrain.io.dependencies import is_lspopt_installed - from ..ui_init import AxisCanvas, TimeAxis -from ....utils import mpl_cmap, color2vb -from ....config import PROFILER +from visbrain.utils import mpl_cmap, color2vb +from visbrain.config import PROFILER +from visbrain.io.dependencies import is_lspopt_installed try: _fromUtf8 = QtCore.QString.fromUtf8 @@ -435,6 +434,7 @@ def _fcn_spec_set_data(self): fstart, fend = self._PanSpecFstart.value(), self._PanSpecFend.value() # Get contrast : contrast = self._PanSpecCon.value() + contrast = 1. if contrast < .1 else contrast # Get colormap : cmap = str(self._PanSpecCmap.currentText()) # Get channel to get spectrogram : diff --git a/visbrain/sleep/interface/ui_elements/ui_scoring.py b/visbrain/gui/sleep/interface/ui_elements/ui_scoring.py similarity index 99% rename from visbrain/sleep/interface/ui_elements/ui_scoring.py rename to visbrain/gui/sleep/interface/ui_elements/ui_scoring.py index 9a66b0401..d2afda2b2 100644 --- a/visbrain/sleep/interface/ui_elements/ui_scoring.py +++ b/visbrain/gui/sleep/interface/ui_elements/ui_scoring.py @@ -2,7 +2,7 @@ import numpy as np from PyQt5 import QtWidgets -from ....utils import transient +from visbrain.utils import transient class UiScoring(object): diff --git a/visbrain/sleep/interface/ui_elements/ui_screenshot.py b/visbrain/gui/sleep/interface/ui_elements/ui_screenshot.py similarity index 95% rename from visbrain/sleep/interface/ui_elements/ui_screenshot.py rename to visbrain/gui/sleep/interface/ui_elements/ui_screenshot.py index c397e8e06..f6473ee91 100644 --- a/visbrain/sleep/interface/ui_elements/ui_screenshot.py +++ b/visbrain/gui/sleep/interface/ui_elements/ui_screenshot.py @@ -1,6 +1,6 @@ """Screenshot window and related functions.""" -from ....io import write_fig_pyqt, write_fig_canvas, dialog_save -from ....utils import ScreenshotPopup +from visbrain.io import write_fig_pyqt, write_fig_canvas, dialog_save +from visbrain.utils import ScreenshotPopup class UiScreenshot(object): diff --git a/visbrain/sleep/interface/ui_elements/ui_settings.py b/visbrain/gui/sleep/interface/ui_elements/ui_settings.py similarity index 100% rename from visbrain/sleep/interface/ui_elements/ui_settings.py rename to visbrain/gui/sleep/interface/ui_elements/ui_settings.py diff --git a/visbrain/sleep/interface/ui_elements/ui_tools.py b/visbrain/gui/sleep/interface/ui_elements/ui_tools.py similarity index 98% rename from visbrain/sleep/interface/ui_elements/ui_tools.py rename to visbrain/gui/sleep/interface/ui_elements/ui_tools.py index 88f69c587..695ac20b7 100644 --- a/visbrain/sleep/interface/ui_elements/ui_tools.py +++ b/visbrain/gui/sleep/interface/ui_elements/ui_tools.py @@ -2,8 +2,8 @@ import numpy as np from PyQt5 import QtWidgets -from ....utils import (rereferencing, bipolarization, find_non_eeg, - commonaverage) +from visbrain.utils import (rereferencing, bipolarization, find_non_eeg, + commonaverage) class UiTools(object): diff --git a/visbrain/sleep/interface/ui_init.py b/visbrain/gui/sleep/interface/ui_init.py similarity index 99% rename from visbrain/sleep/interface/ui_init.py rename to visbrain/gui/sleep/interface/ui_init.py index 298a4532f..a8af2fb2c 100644 --- a/visbrain/sleep/interface/ui_init.py +++ b/visbrain/gui/sleep/interface/ui_init.py @@ -11,9 +11,9 @@ from vispy import app, scene import vispy.visuals.transforms as vist -from .gui import Ui_MainWindow from ..visuals.marker import Markers -from ...utils import color2vb +from .gui import Ui_MainWindow +from visbrain.utils import color2vb class UiInit(QtWidgets.QMainWindow, Ui_MainWindow, app.Canvas): diff --git a/visbrain/sleep/sleep.py b/visbrain/gui/sleep/sleep.py similarity index 97% rename from visbrain/sleep/sleep.py rename to visbrain/gui/sleep/sleep.py index b429df4fc..4c8fb38b4 100644 --- a/visbrain/sleep/sleep.py +++ b/visbrain/gui/sleep/sleep.py @@ -7,15 +7,15 @@ from .interface import UiInit, UiElements from .visuals import Visuals -from ..pyqt_module import PyQtModule -from ..utils import (FixedCam, color2vb, MouseEventControl) -from ..io import ReadSleepData -from ..config import PROFILER +from visbrain._pyqt_module import _PyQtModule +from visbrain.utils import (FixedCam, color2vb, MouseEventControl) +from visbrain.io import ReadSleepData +from visbrain.config import PROFILER logger = logging.getLogger('visbrain') -class Sleep(PyQtModule, ReadSleepData, UiInit, Visuals, UiElements, +class Sleep(_PyQtModule, ReadSleepData, UiInit, Visuals, UiElements, MouseEventControl): """Visualize and edit sleep data. @@ -83,7 +83,7 @@ def __init__(self, data=None, hypno=None, config_file=None, axis=True, href=['art', 'wake', 'rem', 'n1', 'n2', 'n3'], preload=True, use_mne=False, kwargs_mne={}, verbose=None): """Init.""" - PyQtModule.__init__(self, verbose=verbose, icon='sleep_icon.svg') + _PyQtModule.__init__(self, verbose=verbose, icon='sleep_icon.svg') # ====================== APP CREATION ====================== UiInit.__init__(self) diff --git a/visbrain/sleep/tests/test_sleep.py b/visbrain/gui/sleep/tests/test_sleep.py similarity index 97% rename from visbrain/sleep/tests/test_sleep.py rename to visbrain/gui/sleep/tests/test_sleep.py index 6eb019fe9..78f8af4f4 100644 --- a/visbrain/sleep/tests/test_sleep.py +++ b/visbrain/gui/sleep/tests/test_sleep.py @@ -5,18 +5,18 @@ from vispy.app.canvas import MouseEvent, KeyEvent from vispy.util.keys import Key -from visbrain import Sleep +from visbrain.gui import Sleep from visbrain.io import download_file, path_to_visbrain_data from visbrain.tests._tests_visbrain import _TestVisbrain # File to load : -sleep_file = path_to_visbrain_data('excerpt2.edf') -hypno_file = path_to_visbrain_data('Hypnogram_excerpt2.txt') +sleep_file = path_to_visbrain_data('excerpt2.edf', 'example_data') +hypno_file = path_to_visbrain_data('Hypnogram_excerpt2.txt', 'example_data') # Download sleep file : if not os.path.isfile(sleep_file): - download_file('sleep_edf.zip', unzip=True) + download_file('sleep_edf.zip', unzip=True, astype='example_data') onset = np.array([100, 2000, 5000]) # Create Sleep application : diff --git a/visbrain/sleep/visuals/__init__.py b/visbrain/gui/sleep/visuals/__init__.py similarity index 100% rename from visbrain/sleep/visuals/__init__.py rename to visbrain/gui/sleep/visuals/__init__.py diff --git a/visbrain/sleep/visuals/marker.py b/visbrain/gui/sleep/visuals/marker.py similarity index 100% rename from visbrain/sleep/visuals/marker.py rename to visbrain/gui/sleep/visuals/marker.py diff --git a/visbrain/sleep/visuals/visuals.py b/visbrain/gui/sleep/visuals/visuals.py similarity index 98% rename from visbrain/sleep/visuals/visuals.py rename to visbrain/gui/sleep/visuals/visuals.py index 24915e681..674204690 100644 --- a/visbrain/sleep/visuals/visuals.py +++ b/visbrain/gui/sleep/visuals/visuals.py @@ -12,10 +12,10 @@ import vispy.visuals.transforms as vist from .marker import Markers -from ...utils import (array2colormap, color2vb, PrepareData) -from ...utils.sleep.event import _index_to_events -from ...visuals import TopoMesh, TFmapsMesh -from ...config import PROFILER +from visbrain.utils import (color2vb, PrepareData, cmap_to_glsl) +from visbrain.utils.sleep.event import _index_to_events +from visbrain.visuals import TopoMesh, TFmapsMesh +from visbrain.config import PROFILER logger = logging.getLogger('visbrain') @@ -510,10 +510,15 @@ def set_data(self, sf, data, time, method='Fourier transform', # =================== COLOR =================== # Get clim : - clim = (contrast * mesh.min(), contrast * mesh.max()) + _mesh = mesh[sls, :] + contrast = 1. if contrast is None else contrast + clim = (contrast * _mesh.min(), contrast * _mesh.max()) # Turn mesh into color array for selected frequencies: - self.mesh.set_data(array2colormap(mesh[sls, :], cmap=cmap, - clim=clim)) + self.mesh.set_data(_mesh) + _min, _max = _mesh.min(), _mesh.max() + _cmap = cmap_to_glsl(limits=(_min, _max), clim=clim, cmap=cmap) + self.mesh.cmap = _cmap + self.mesh.clim = 'auto' self.mesh.interpolation = interp # =================== TRANSFORM =================== diff --git a/visbrain/io/__init__.py b/visbrain/io/__init__.py index cd77ebbd9..7ac5c56ef 100644 --- a/visbrain/io/__init__.py +++ b/visbrain/io/__init__.py @@ -6,6 +6,7 @@ from .path import * # noqa from .read_annotations import * # noqa from .read_data import * # noqa +from .rw_nifti import (read_nifti, read_mist, niimg_to_transform) # noqa from .read_sleep import * # noqa from .rw_config import * # noqa from .rw_hypno import * # noqa diff --git a/visbrain/io/dependencies.py b/visbrain/io/dependencies.py index d4898b32e..2baef75c0 100644 --- a/visbrain/io/dependencies.py +++ b/visbrain/io/dependencies.py @@ -1,8 +1,8 @@ """Test if dependencies are installed.""" __all__ = ('is_mne_installed', 'is_nibabel_installed', 'is_opengl_installed', - 'is_pandas_installed', 'is_lspopt_installed', - 'is_tensorpac_installed', 'is_xlrd_installed') + 'is_pandas_installed', 'is_lspopt_installed', 'is_xlrd_installed', + 'is_tensorpac_installed', 'is_sc_image_installed') def _check_version(v_user, v_compare): @@ -117,3 +117,17 @@ def is_xlrd_installed(raise_error=False): raise IOError("xlrd not installed. In a terminal, run : pip install " "xlrd") return is_installed + + +def is_sc_image_installed(raise_error=False): + """Test if scikit-image is installed.""" + try: + import skimage # noqa + is_installed = True + except: + is_installed = False + # Raise error (if needed) : + if raise_error and not is_installed: + raise IOError("scikit-image not installed. In a terminal, run : pip" + " install scikit-image") + return is_installed diff --git a/visbrain/io/download.py b/visbrain/io/download.py index 31679ccd5..863652e44 100644 --- a/visbrain/io/download.py +++ b/visbrain/io/download.py @@ -7,27 +7,24 @@ from warnings import warn from .rw_config import load_config_json -from .path import get_data_path +from .path import get_data_url_path logger = logging.getLogger('visbrain') -__all__ = ["get_data_url_file", "download_file"] +__all__ = ["download_file"] -def get_data_url_file(): - """Get path to the data_url.txt file.""" - return load_config_json(get_data_path(file='data_url.txt')) - - -def get_data_url(name): +def get_data_url(name, astype): """Get filename and url to a file. Parameters ---------- name : string Name of the file. + astype : string + Type of the file to download. Returns ------- @@ -35,7 +32,7 @@ def get_data_url(name): Url to the file to download. """ # Get path to data_url.txt : - urls = get_data_url_file() + urls = load_config_json(get_data_url_path())[astype] # Try to get the file : try: url_to_download = urls[name] @@ -60,7 +57,7 @@ def reporthook(blocknum, blocksize, totalsize): sys.stderr.write("\rread %d" % (readsofar,)) -def download_file(name, filename=None, to_path=None, unzip=False, +def download_file(name, astype=None, filename=None, to_path=None, unzip=False, remove_archive=False, use_pwd=False): """Download a file. @@ -70,6 +67,9 @@ def download_file(name, filename=None, to_path=None, unzip=False, ---------- name : string Name of the file to download or url. + astype : str | None + If name is a name of a file that can be downloaded, astype refer to the + type of the file. filename : string | None Name of the file to be saved in case of url. to_path : string | None @@ -95,7 +95,9 @@ def download_file(name, filename=None, to_path=None, unzip=False, assert isinstance(filename, str) url = name else: - filename, url = name, get_data_url(name) + assert isinstance(name, str) and isinstance(astype, str) + filename, url = name, get_data_url(name, astype) + to_path = os.path.join(vb_path, astype) to_path = vb_path if not isinstance(to_path, str) else to_path path_to_file = os.path.join(to_path, filename) to_download = not os.path.isfile(path_to_file) diff --git a/visbrain/io/path.py b/visbrain/io/path.py index 7169d952a..72288d83b 100644 --- a/visbrain/io/path.py +++ b/visbrain/io/path.py @@ -6,8 +6,8 @@ logger = logging.getLogger('visbrain') -__all__ = ['path_to_visbrain_data', 'get_data_path', 'get_files_in_data', - 'get_files_in_folders', 'path_to_tmp', 'clean_tmp'] +__all__ = ['path_to_visbrain_data', 'get_files_in_folders', 'path_to_tmp', + 'clean_tmp', 'get_data_url_path'] def path_to_visbrain_data(file=None, folder=None): @@ -36,30 +36,14 @@ def path_to_visbrain_data(file=None, folder=None): return os.path.join(vb_path, file) -def get_data_path(folder=None, file=None): - """Get the path to the visbrain data folder. +def get_data_url_path(): + """Get the path to the data_url JSON file.""" + url_path = sys.modules[__name__].__file__.split('io')[0] + return os.path.join(url_path, 'data_url.json') - This function can find a file in visbrain/data or visbrain/data/folder. - Parameters - ---------- - folder : string | None - Sub-folder of visbrain/data. - file : string | None - File name. - - Returns - ------- - path : string - Path to the data folder or to the file if file is not None. - """ - cur_path = sys.modules[__name__].__file__.split('io')[0] - folder = '' if not isinstance(folder, str) else folder - file = '' if not isinstance(file, str) else file - return os.path.join(*(cur_path, 'data', folder, file)) - - -def get_files_in_folders(*args, with_ext=False, with_path=False, file=None): +def get_files_in_folders(*args, with_ext=False, with_path=False, file=None, + exclude=None, sort=True, unique=True): """Get all files in several folders. Parameters @@ -72,6 +56,12 @@ def get_files_in_folders(*args, with_ext=False, with_path=False, file=None): Specify if returned files should contains full path to it. file : string | None Specify if a specific file name is needed. + exclude : list | None + List of patterns to exclude + sort : bool | True + Sort the resulting list of files. + unique : bool | True + Get a unique list of files. Returns ------- @@ -98,40 +88,24 @@ def get_files_in_folders(*args, with_ext=False, with_path=False, file=None): if isinstance(file, str) and (file in files): files = [files[files.index(file)]] # Return either files with full path or only file name : - if with_ext: - return files - else: - return [os.path.splitext(k)[0] for k in files] - - -def get_files_in_data(folder, with_ext=False): - """Get the list of files in a folder of visbrain/data. - - Parameters - ---------- - folder : string - Sub-folder of visbrain/data. - with_ext : bool | False - Return the list of files with or without extensions. - - Returns - ------- - all_files : list - List of files in visbrain/data/folder. - """ - if not os.path.isdir(get_data_path(folder=folder)): - return [] - all_files = os.listdir(get_data_path(folder=folder)) - if with_ext: - return all_files - else: - return [os.path.splitext(k)[0] for k in all_files] + if not with_ext: + files = [os.path.splitext(k)[0] for k in files] + # Patterns to exclude : + if isinstance(exclude, (list, tuple)): + from itertools import product + files = [k for k, i in product(files, exclude) if i not in k] + # Unique : + if unique: + files = list(set(files)) + # Sort list : + if sort: + files.sort() + return files def path_to_tmp(file=None, folder=None): """Get the path to the tmp folder.""" - vb_path = os.path.join(os.path.expanduser('~'), 'visbrain_data') - tmp_path = os.path.join(vb_path, 'tmp') + tmp_path = os.path.join(path_to_visbrain_data(), 'tmp') if not os.path.exists(tmp_path): os.mkdir(tmp_path) folder = '' if not isinstance(folder, str) else folder @@ -144,8 +118,7 @@ def path_to_tmp(file=None, folder=None): def clean_tmp(): """Clean the tmp folder.""" - vb_path = os.path.join(os.path.expanduser('~'), 'visbrain_data') - tmp_path = os.path.join(vb_path, 'tmp') + tmp_path = os.path.join(path_to_visbrain_data(), 'tmp') if os.path.exists(tmp_path): import shutil shutil.rmtree(tmp_path) diff --git a/visbrain/io/read_data.py b/visbrain/io/read_data.py index 5fd4f81ae..9a597cf18 100644 --- a/visbrain/io/read_data.py +++ b/visbrain/io/read_data.py @@ -7,16 +7,12 @@ - Text (*.txt) - CSV (*.csv) - JSON (*.json) -- NIFTI """ import numpy as np -# import os -from ..utils.transform import array_to_stt -from .dependencies import is_nibabel_installed -__all__ = ('read_mat', 'read_pickle', 'read_npy', 'read_npz', - 'read_txt', 'read_csv', 'read_json', 'read_nifti', 'read_stc') +__all__ = ('read_mat', 'read_pickle', 'read_npy', 'read_npz', 'read_txt', + 'read_csv', 'read_json', 'read_stc') def read_mat(path, vars=None): @@ -56,41 +52,6 @@ def read_json(path): pass -def read_nifti(path, hdr_as_array=False): - """Read data from a NIFTI file using Nibabel. - - Parameters - ---------- - path : string - Path to the nifti file. - - Returns - ------- - vol : array_like - The 3-D volume data. - header : Nifti1Header - Nifti header. - transform : VisPy.transform - The transformation - """ - is_nibabel_installed(raise_error=True) - import nibabel as nib - # Load the file : - img = nib.load(path) - # Get the data and affine transformation :: - vol = img.get_data() - affine = img.affine - # Replace NaNs with 0. : - vol[np.isnan(vol)] = 0. - # Define the transformation : - if hdr_as_array: - transform = affine - else: - transform = array_to_stt(affine) - - return vol, img.header, transform - - def read_stc(path): """Read an STC file from the MNE package. diff --git a/visbrain/io/read_sleep.py b/visbrain/io/read_sleep.py index 8ee4db895..f6d60d7ba 100644 --- a/visbrain/io/read_sleep.py +++ b/visbrain/io/read_sleep.py @@ -14,18 +14,19 @@ from warnings import warn import logging -from .rw_utils import get_file_ext -from .rw_hypno import (read_hypno, oversample_hypno) +from .dependencies import is_mne_installed from .dialog import dialog_load from .mneio import mne_switch -from .dependencies import is_mne_installed -from ..utils import get_dsf, vispy_array -from ..io import merge_annotations +from .rw_hypno import (read_hypno, oversample_hypno) +from .rw_utils import get_file_ext +from .write_data import write_csv from ..config import PROFILER +from ..io import merge_annotations +from ..utils import get_dsf, vispy_array, sleepstats logger = logging.getLogger('visbrain') -__all__ = ['ReadSleepData'] +__all__ = ['ReadSleepData', 'get_sleep_stats'] class ReadSleepData(object): @@ -659,3 +660,61 @@ def read_elan(path, downsample): gain[chan_list][..., np.newaxis] return sf, downsample, dsf, data, chan, n, start_time, None + + +def get_sleep_stats(hypno_file, output_file=None): + """Compute sleep statistics from hypnogram file and export them in csv. + + Sleep statistics specifications: + + * Time in Bed (TIB) : total duration of the hypnogram. + * Total Dark Time (TDT) : duration of the hypnogram from beginning + to last period of sleep. + * Sleep Period Time (SPT) : duration from first to last period of + sleep. + * Wake After Sleep Onset (WASO) : duration of wake periods within SPT + * Sleep Efficiency (SE) : TST / TDT * 100 (%). + * Total Sleep Time (TST) : SPT - WASO. + * W, N1, N2, N3 and REM: sleep stages duration. + * % (W, ... REM) : sleep stages duration expressed in percentages of + TDT. + * Latencies: latencies of sleep stages from the beginning of the + record. + + (All values except SE and percentages are expressed in minutes) + + Parameters + ---------- + hypno_file : string + Full path to the hypnogram file. + output_file : string | None + Full path to the output file. If no file is provided, sleep statictics + are print out to the terminal. + """ + # File conversion : + if output_file is not None: # Check extension + ext = os.path.splitext(output_file)[1][1:].strip().lower() + if ext == '': + output_file = output_file + '.csv' + + # Load hypnogram + hypno, sf_hyp = read_hypno(hypno_file) + if sf_hyp < 1: + mult = int(np.round(len(hypno) / sf_hyp)) + hypno = oversample_hypno(hypno, mult) + sf_hyp = 1 + + # Get sleep stats + stats = sleepstats(hypno, sf_hyp=sf_hyp) + stats['File'] = hypno_file + print('\nSLEEP STATS\n===========') + keys, val = [''] * len(stats), [''] * len(stats) + # Fill table : + for num, (k, v) in enumerate(stats.items()): + print(k, '\t', str(v)) + # Remember variables : + keys[int(num)] = k + val[int(num)] = str(v) + if output_file is not None: + write_csv(output_file, zip(keys, val)) + print('===========\nCSV file saved to:', output_file) diff --git a/visbrain/io/rw_nifti.py b/visbrain/io/rw_nifti.py new file mode 100644 index 000000000..dd051bc63 --- /dev/null +++ b/visbrain/io/rw_nifti.py @@ -0,0 +1,235 @@ +"""Read nifti (nii.gz) files.""" +import os + +import numpy as np + +from .dependencies import is_nibabel_installed +from .path import path_to_visbrain_data +from ..utils.transform import array_to_stt + +from vispy.visuals.transforms import (MatrixTransform, ChainTransform, + STTransform) + + +def read_nifti(path, hdr_as_array=False): + """Read data from a NIFTI file using Nibabel. + + Parameters + ---------- + path : string + Path to the nifti file. + + Returns + ------- + vol : array_like + The 3-D volume data. + header : Nifti1Header + Nifti header. + transform : VisPy.transform + The transformation + """ + is_nibabel_installed(raise_error=True) + import nibabel as nib + # Load the file : + img = nib.load(path) + # Get the data and affine transformation :: + vol = img.get_data() + affine = img.affine + # Replace NaNs with 0. : + vol[np.isnan(vol)] = 0. + # Define the transformation : + if hdr_as_array: + transform = affine + else: + transform = array_to_stt(affine) + + return vol, img.header, transform + + +def read_mist(name): + """Load MIST parcellation. + + See : MIST: A multi-resolution parcellation of functional networks + Authors : Sebastian Urchs, Jonathan Armoza, Yassine Benhajali, + Jolène St-Aubin, Pierre Orban, Pierre Bellec + + Parameters + ---------- + name : string + Name of the level. Use MIST_x with x 7, 12, 20, 36, 64, 122 or ROI. + + Returns + ------- + vol : array_like | None + ROI volume. + labels : array_like | None + Array of labels. + index : array_like | None + Array of index that make the correspondance between the volume values + and labels. + hdr : array_like | None + Array of transform source's coordinates into the volume space. + """ + name = name.upper() + assert ('MIST' in name) and ('_' in name) + level = name.split('_')[-1] + assert level in ['7', '12', '20', '36', '64', '122', 'ROI'] + # Define path : + parc, parc_info = '%s.nii.gz', '%s.csv' + folder, folder_info = 'Parcellations', 'Parcel_Information' + mist_path = path_to_visbrain_data('mist', 'roi') + parc_path = os.path.join(*(mist_path, folder, parc % name)) + parc_info_path = os.path.join(*(mist_path, folder_info, parc_info % name)) + # Load info : + m = np.genfromtxt(parc_info_path, delimiter=';', dtype=str, skip_header=1, + usecols=[0, 1, 2]) + n_roi = m.shape[0] + index = m[:, 0].astype(int) + lab_, name_ = 'label_%s' % level, 'name_%s' % level + labels = np.zeros(n_roi, dtype=[(lab_, object), (name_, object)]) + labels[lab_] = m[:, 1] + labels[name_] = np.char.replace(np.char.capitalize(m[:, 2]), '_', ' ') + # Load parc : + vol, _, hdr = read_nifti(parc_path, hdr_as_array=True) + return vol, labels, index, hdr + + +def _niimg_var(vol, hdr): + """Get transformation variables. + + Parameters + ---------- + vol : array_like + The 3D array of the volume. + hdr : array_like + The (4, 4) transformation array. + + Returns + ------- + sh : array_like + Shape of the volume + diag : array_like + Diagonale of the transformation + tr : array_like + Translation of the transformation + """ + assert vol.ndim == 3, "Volume should be an (n_x, n_y, n_z) array" + n_x, n_y, n_z = vol.shape + assert isinstance(hdr, (MatrixTransform, np.ndarray)) + if isinstance(hdr, MatrixTransform): + affine = np.array(hdr.matrix).copy() + # Get diagonal and translation + d_x, d_y, d_z = np.diag(affine)[0:-1] + t_x, t_y, t_z = affine[-1, 0:-1] + return np.array(vol.shape), np.diag(affine)[0:-1], affine[-1, 0:-1] + + +def _niimg_norm(sh, diag, translate): + """Normalize the volume between (0., 1.).""" + # Compute normalization ratio + ratio = np.abs(diag) * sh + sgn = np.sign(diag) + # Get scale and translate + sc = 1. / ratio + tr = -(translate + np.array([0., 0, 0])) / ratio + # Define transformations of each slice + sg_norm = STTransform(scale=(sc[1], sc[2], 1.), + translate=(tr[1], tr[2], 1.)) + cr_norm = STTransform(scale=(sc[0], sc[2], 1.), + translate=(sgn[0] * tr[0], tr[2], 1.)) + ax_norm = STTransform(scale=(sc[1], sc[0], 1.), + translate=(tr[1], sgn[0] * tr[0], 1.)) + return sg_norm, cr_norm, ax_norm + + +def _niimg_rot(): + """Get rotation trnasformations of each slice.""" + # Sagittal + sg_rot = MatrixTransform() + sg_rot.rotate(90., (0, 0, 1)) + sg_rot.rotate(180., (0, 1, 0)) + # Coronal + cr_rot = MatrixTransform() + cr_rot.rotate(90., (0, 0, 1)) + cr_rot.rotate(180., (0, 1, 0)) + # Axial + ax_rot = MatrixTransform() + ax_rot.rotate(180., (1, 0, 0)) + return sg_rot, cr_rot, ax_rot + + +def _niimg_mat(hdr, idx): + """Get the transformation of a single slice. + + Parameters + ---------- + hdr : array_like + The (4, 4) transformation array. + idx : tuple + Slices indicies. + + Returns + ------- + tf : MatrixTransform + Image transformation. + """ + hdr_mat = np.array(hdr.matrix).copy().T + mat = np.identity(4, dtype=np.float32) + + to_idx = [[idx[0]], [idx[1]]], [idx[0], idx[1]] + mat[[[0], [1]], [0, 1]] = hdr_mat[to_idx] + mat[[0, 1], -1] = hdr_mat[[idx[0], idx[1]], -1] + return MatrixTransform(mat.T) + + +def _niimg_mni(hdr): + """Transformation for MNI conversions of each slice.""" + sg_mni = _niimg_mat(hdr, (2, 1)) + cr_mni = _niimg_mat(hdr, (2, 0)) + ax_mni = _niimg_mat(hdr, (1, 0)) + return sg_mni, cr_mni, ax_mni + + +def niimg_to_transform(vol, hdr, as_bgd=True, vol_bgd=None, hdr_bgd=None): + """Get transformations of nii.gz files for cross-sections. + + Parameters + ---------- + vol : array_like + 3D volume data. + hdr : array_like + Array of transformation of shape (4, 4). + as_bgd : bool | True + Specify if the volume is a background image or have to be considered as + an activation image. + vol_bgd : array_like | None + Volume data if `as_bgd` is True. + hdr_bgd : array_like | None + Transformation array if `as_bgd` is True. + + Returns + ------- + sg_tf : ChainTransform + Transformation of sagittal view + cr_tf : ChainTransform + Transformation of coronal view + ax_tf : ChainTransform + Transformation of axial view + """ + # Get transformation variables + sh_img, diag_img, tr_img = _niimg_var(vol, hdr) + # Get the normalization transformation depending if the volume is an image + # background or an activation image + if as_bgd: # Background image + sg_norm, cr_norm, ax_norm = _niimg_norm(sh_img - 1, diag_img, tr_img) + else: # Activation image + sh_bgd, diag_bgd, tr_bgd = _niimg_var(vol_bgd, hdr_bgd) + sg_norm, cr_norm, ax_norm = _niimg_norm(sh_bgd - 1, diag_bgd, tr_bgd) + # Get MNI and rotation transformations + sg_mni, cr_mni, ax_mni = _niimg_mni(hdr) + sg_rot, cr_rot, ax_rot = _niimg_rot() + # Build the chain of transformation + sg_tf = ChainTransform([sg_norm, sg_rot, sg_mni]) + cr_tf = ChainTransform([cr_norm, cr_rot, cr_mni]) + ax_tf = ChainTransform([ax_norm, ax_rot, ax_mni]) + return sg_tf, cr_tf, ax_tf diff --git a/visbrain/io/tests/test_download.py b/visbrain/io/tests/test_download.py index 6c7461ca5..acf8f0276 100644 --- a/visbrain/io/tests/test_download.py +++ b/visbrain/io/tests/test_download.py @@ -1,27 +1,23 @@ """Test functions in download.py.""" -import pytest -from visbrain.io.download import get_data_url_file, download_file +# import pytest +from visbrain.io.download import download_file class TestDownload(object): """Test functions in download.py.""" - def test_get_data_url_file(self): - """Test function get_data_url_file.""" - get_data_url_file() - def test_download_file(self): """Test function download_file.""" - download_file('Px.npy') + download_file('Px.npy', astype='example_data') def test_download_custom_url(self): """Test function download_custom_url.""" name = "https://www.dropbox.com/s/whogfxutyxoir1t/xyz_sample.npz?dl=1" - download_file(name, filename="text.npz") + download_file(name, filename="text.npz", astype='example_data') - @pytest.mark.skip("Test downloading all files is too slow") - def test_download_files_from_dropbox(self): - """Test function download_file from dropbox.""" - urls = get_data_url_file() - for name in list(urls.keys()): - download_file(name) + # @pytest.mark.skip("Test downloading all files is too slow") + # def test_download_files_from_dropbox(self): + # """Test function download_file from dropbox.""" + # urls = get_data_url_file() + # for name in list(urls.keys()): + # download_file(name, astype='example_data') diff --git a/visbrain/io/tests/test_mneio.py b/visbrain/io/tests/test_mneio.py index f1a126b34..3862119cf 100644 --- a/visbrain/io/tests/test_mneio.py +++ b/visbrain/io/tests/test_mneio.py @@ -11,11 +11,11 @@ class TestMneIO(object): def test_mne_switch(self): """Test function mne_switch.""" # Download sleep file : - sleep_file = path_to_visbrain_data('excerpt2.edf') + sleep_file = path_to_visbrain_data('excerpt2.edf', 'example_data') + file, ext = os.path.splitext(sleep_file) if not os.path.isfile(sleep_file): - download_file('sleep_edf.zip', unzip=True) + download_file('sleep_edf.zip', unzip=True, astype='example_data') to_exclude = ['VAB', 'NAF2P-A1', 'PCPAP', 'POS', 'FP2-A1', 'O2-A1', 'CZ2-A1', 'event_pneumo', 'event_pneumo_aut'] kwargs = dict(exclude=to_exclude, stim_channel=False) - mne_switch(path_to_visbrain_data('excerpt2'), '.edf', 100., - preload=True, **kwargs) + mne_switch(file, ext, 100., preload=True, **kwargs) diff --git a/visbrain/io/tests/test_path.py b/visbrain/io/tests/test_path.py new file mode 100644 index 000000000..1400b69af --- /dev/null +++ b/visbrain/io/tests/test_path.py @@ -0,0 +1,47 @@ +"""Test methods in path.""" +import os + +from visbrain.io.path import (path_to_visbrain_data, get_data_url_path, + path_to_tmp, clean_tmp, get_files_in_folders) + + +class TestPath(object): + """Test path.""" + + def test_get_data_url_path(self): + """Test function get_data_url_path.""" + assert os.path.isfile(get_data_url_path()) + + def test_path_to_visbrain_data(self): + """Test function path_to_visbrain_data.""" + path = path_to_visbrain_data() + assert os.path.isdir(path) + + def test_path_to_tmp(self): + """Test function path_to_tmp.""" + assert os.path.isdir(path_to_tmp()) + + def test_clean_tmp(self): + """Test function clean_tmp.""" + path = path_to_tmp() + assert os.path.isdir(path) + clean_tmp() + assert not os.path.isdir(path) + + def test_get_files_in_folders(self): + """Test function get_files_in_folders.""" + vb_path = path_to_visbrain_data(folder='templates') + # Test with / without extension : + lst = get_files_in_folders(vb_path, with_ext=False) + lst_ext = get_files_in_folders(vb_path, with_ext=True, exclude=['npy']) + assert (len(lst) > 1) and all(['.npz' not in k for k in lst]) + assert all(['.npz' in k for k in lst_ext]) + # Test exclude : + lst_exc = get_files_in_folders(vb_path, exclude=['B1']) + assert all(['B1' not in k for k in lst_exc]) + # Test with / without path : + lst_no_path = get_files_in_folders(vb_path, with_path=False)[0] + lst_path = get_files_in_folders(vb_path, with_path=True)[0] + assert os.path.split(lst_path)[1] == lst_no_path + # Sort : + get_files_in_folders(vb_path, sort=True) diff --git a/visbrain/io/tests/test_read_data.py b/visbrain/io/tests/test_read_data.py index 776107a25..91193c13c 100644 --- a/visbrain/io/tests/test_read_data.py +++ b/visbrain/io/tests/test_read_data.py @@ -1,8 +1,8 @@ """Test function in read_data.py.""" -import pytest +# import pytest from visbrain.io.read_data import (read_mat, read_pickle, read_npy, read_npz, # noqa - read_txt, read_csv, read_json, read_nifti, + read_txt, read_csv, read_json, read_stc) from visbrain.io.download import download_file @@ -12,9 +12,11 @@ class TestReadData(object): def test_read_stc(self): """Test function read_stc.""" - read_stc(download_file("meg_source_estimate-lh.stc")) + read_stc(download_file("meg_source_estimate-lh.stc", + astype='example_data')) - @pytest.mark.slow - def test_read_nifti(self): - """Test function read_nifti.""" - read_nifti(download_file("GG-853-GM-0.7mm.nii.gz")) + # @pytest.mark.slow + # def test_read_nifti(self): + # """Test function read_nifti.""" + # read_nifti(download_file("GG-853-GM-0.7mm.nii.gz", + # astype='example_data')) diff --git a/visbrain/io/write_image.py b/visbrain/io/write_image.py index b25414c2f..6e6060008 100644 --- a/visbrain/io/write_image.py +++ b/visbrain/io/write_image.py @@ -98,7 +98,8 @@ def write_fig_hyp(data, sf, file=None, start_s=0, grid=False, ascolor=False, idxm = np.where(hypno == q)[0] + 1 idxm[idxm >= len(hypno)] = len(hypno) - 1 mask[idxm] = False - plt.plot(np.ma.masked_array(hypno, mask=mask), i, linewidth=lw) + plt.plot(np.ma.masked_array(hypno, mask=mask), i, ls='steps', + linewidth=lw) # Plot REM epochs remcol = 'k' if not ascolor else colors[4] @@ -123,8 +124,8 @@ def write_fig_hyp(data, sf, file=None, start_s=0, grid=False, ascolor=False, if grid: plt.grid(True, 'major', ls=':', lw=.2, c='k', alpha=.3) - plt.tick_params(axis='both', which='both', bottom='on', top='off', - labelbottom='on', left='on', right='off', labelleft='on', + plt.tick_params(axis='both', which='both', bottom=True, top=False, + labelbottom=True, left=True, right=False, labelleft=True, labelcolor='k', direction='out') # Invert Y axis and despine diff --git a/visbrain/mne/plot_mne.py b/visbrain/mne/plot_mne.py index 6a6a4450c..e97c7e8b2 100644 --- a/visbrain/mne/plot_mne.py +++ b/visbrain/mne/plot_mne.py @@ -13,13 +13,12 @@ __all__ = ['mne_plot_source_estimation', 'mne_plot_source_space'] -def _extract_arrays_from_src(src, hemisphere='both'): +def _extract_arrays_from_src(src, hemisphere='both', fact=1.): """Get vertices, faces, active vertices and sources from SourceSpace.""" logger.debug("Loading vert, faces, activ and sources from src structure.") # Define usefull variables : _l_nb, _r_nb = src[0]['rr'].shape[0], src[1]['rr'].shape[0] - _f_off = [0, src[0]['tris'].max() + 1] - _act_off, _fact = [0, _l_nb], 1000. + _f_off, _act_off = [0, src[0]['tris'].max() + 1], [0, _l_nb] _hemi = {'left': [0], 'right': [1], 'both': [0, 1]}[hemisphere] # Get vertices and faces : vertices = np.vstack([src[k]['rr'] for k in [0, 1]]) @@ -28,13 +27,13 @@ def _extract_arrays_from_src(src, hemisphere='both'): # Get active vertex and sources : activ = np.hstack([src[k]['vertno'] + _act_off[k] for k in [0, 1]]) sources = np.vstack([src[k]['rr'][src[k]['vertno']] for k in _hemi]) - return _fact * vertices, faces, lr_index, activ, _fact * sources + return fact * vertices, faces, lr_index, activ, fact * sources def _plt_src(name, kw_brain_obj, active_data, active_vert, sources, kw_source_obj, kw_activation, show): # Define a brain object and a source object : - logger.info('Define a Brain and Source objects') + logger.info(' Define a Brain and Source objects') from visbrain.objects import BrainObj, SourceObj, SceneObj brain_obj, source_obj = name + '_brain', name + '_sources' b_obj = BrainObj(brain_obj, **kw_brain_obj) @@ -42,22 +41,24 @@ def _plt_src(name, kw_brain_obj, active_data, active_vert, sources, s_obj.visible_obj = False # Add data to the BrainObj if needed : if isinstance(active_data, np.ndarray): - logger.info("Add active data between " + logger.info(" Add active data between " "[%2f, %2f]" % (active_data.min(), active_data.max())) b_obj.add_activation(data=active_data, vertices=active_vert, **kw_activation) # Return either a scene or a BrainObj and SourceObj : if show is True: # Display inside the Brain GUI # Define a Brain instance : - from visbrain import Brain + from visbrain.gui import Brain brain = Brain(brain_obj=b_obj, source_obj=s_obj) + brain._brain_template.setEnabled(False) # By default, display colorbar if activation : if isinstance(active_data, np.ndarray): brain.menuDispCbar.setChecked(True) brain._fcn_menu_disp_cbar() brain.show() elif show is 'scene': # return a SceneObj - logger.info('Define a unique scene for the Brain and Source objects') + logger.info(" Define a unique scene for the Brain and Source " + "objects") sc = SceneObj() sc.add_to_subplot(s_obj) sc.add_to_subplot(b_obj, use_this_cam=True) @@ -131,7 +132,7 @@ def mne_plot_source_estimation(sbj, sbj_dir, fwd_file, stc_file=None, (vertices, faces, lr_index, active_vert, sources) = _extract_arrays_from_src(fwd_src, hemisphere) # Head to MNI conversion - logger.info("Head to MNI conversion") + logger.info(" Head to MNI conversion") vertices = head_to_mni(vertices, sbj, mri_head_t, subjects_dir=sbj_dir) sources = head_to_mni(sources, sbj, mri_head_t, subjects_dir=sbj_dir) # Add data to the mesh : @@ -142,16 +143,16 @@ def mne_plot_source_estimation(sbj, sbj_dir, fwd_file, stc_file=None, "(%i)" % (len(active_data), len(active_vert))) active_data = active_vert = None else: - logger.info("Array of active data used.") + logger.info(" Array of active data used.") elif isinstance(stc_file, str) and isinstance(active_data, int): # Get active data : assert os.path.isfile(stc_file) n_tp = active_data data = mne.read_source_estimate(stc_file).data active_data = np.abs(data[:, n_tp] / data[:, n_tp].max()) - logger.info("Time instant %i used for activation" % n_tp) + logger.info(" Time instant %i used for activation" % n_tp) else: - logger.info("No active data detected.") + logger.info(" No active data detected.") active_data = active_vert = None # Complete dicts : kw_brain_obj['vertices'], kw_brain_obj['faces'] = vertices, faces @@ -198,7 +199,7 @@ def mne_plot_source_space(fif_file, active_data=None, hemisphere='both', src = read_source_spaces(fif_file) # Build vertices / faces : (vertices, faces, lr_index, active_vert, - sources) = _extract_arrays_from_src(src, hemisphere) + sources) = _extract_arrays_from_src(src, hemisphere, fact=1000.) # Complete dicts : kw_brain_obj['vertices'], kw_brain_obj['faces'] = vertices, faces kw_brain_obj['lr_index'], kw_brain_obj['hemisphere'] = lr_index, hemisphere diff --git a/visbrain/objects/__init__.py b/visbrain/objects/__init__.py index cd1ed0862..2d53dffa7 100644 --- a/visbrain/objects/__init__.py +++ b/visbrain/objects/__init__.py @@ -11,6 +11,7 @@ from .scene_obj import SceneObj, VisbrainCanvas # noqa from .source_obj import SourceObj, CombineSources # noqa from .tf_obj import TimeFrequencyObj # noqa +from .topo_obj import TopoObj # noqa from .ts3d_obj import TimeSeries3DObj, CombineTimeSeries # noqa from .vector_obj import VectorObj, CombineVectors # noqa from .visbrain_obj import VisbrainObject, CombineObjects # noqa diff --git a/visbrain/objects/_projection.py b/visbrain/objects/_projection.py index e2959c3b4..cb87b645f 100644 --- a/visbrain/objects/_projection.py +++ b/visbrain/objects/_projection.py @@ -2,11 +2,11 @@ import numpy as np from scipy.spatial.distance import cdist -from ..utils import (normalize, array2colormap, color2vb) +from ..utils import (normalize, color2vb) import logging logger = logging.getLogger('visbrain') -PROJ_STR = "%i sources visibles and not masked used for the %s" +PROJ_STR = " %i sources visibles and not masked used for the %s" def _get_eucl_mask(v, xyz, radius, contribute, xsign): @@ -176,7 +176,7 @@ def _get_masked_index(s_obj, v, radius, contribute=False): # Check inputs and get masked xyz / data : xyz, data, v, xsign = _check_projection(s_obj, v, radius, contribute, False) - logger.info("%i sources visibles and masked found" % len(data)) + logger.info(" %i sources visibles and masked found" % len(data)) # Predefined masked euclidian distance : nv, index_faced = v.shape[0], v.shape[1] fmask = np.ones((v.shape[0], index_faced, len(data)), dtype=bool) @@ -196,7 +196,7 @@ def _get_masked_index(s_obj, v, radius, contribute=False): def _project_sources_data(s_obj, b_obj, project='modulation', radius=10., contribute=False, cmap='viridis', clim=None, vmin=None, under='black', vmax=None, over='red', - mask_color=None): + mask_color=None, to_overlay=0): """Project source's data.""" # _____________________ CHECKING _____________________ assert type(s_obj).__name__ in ['SourceObj', 'CombineSources'] @@ -210,37 +210,31 @@ def _project_sources_data(s_obj, b_obj, project='modulation', radius=10., raise ValueError("`project` must either be 'modulation' or " "'repartition'") if mask_color is None: - logger.warning("mask_color use %s.mask_color variable" % s_obj.name) + logger.debug("mask_color use %s.mask_color variable" % s_obj.name) mask_color = s_obj.mask_color mask_color = color2vb(mask_color) - logger.info("Project the source's %s (radius=%r, " + logger.info(" Project the source's %s (radius=%r, " "contribute=%r)" % (project, radius, contribute)) # Get mesh and vertices : mesh = b_obj.mesh vertices = mesh._vertices - mask = np.zeros((vertices.shape[0]), dtype=np.float32) # _____________________ GET MODULATION _____________________ mod = project_fcn(s_obj, vertices, radius, contribute) # Update mesh color informations : - b_obj._cbar_data = mod b_obj._minmax = (float(mod.min()), float(mod.max())) if clim is None: clim = b_obj._minmax b_obj._clim = b_obj._minmax # Get where there's masked sources : + mask_idx = np.zeros((len(mod),), dtype=bool) if s_obj.is_masked: mask_idx = _get_masked_index(s_obj, vertices, radius, contribute) - mask[mask_idx] = 2. mesh.mask_color = mask_color - logger.info("Set masked sources cortical activity to the " + logger.info(" Set masked sources cortical activity to the " "color %s" % str(list(mesh.mask_color.ravel())[0:-1])) - # Enable to set color to active vertices : - if mod.mask.sum(): - mask[~mod.mask] = 1. # _____________________ MODULATION TO COLOR _____________________ - mesh.mask = mask - mod_color = array2colormap(mod, cmap=cmap, clim=clim, vmin=vmin, vmax=vmax, - under=under, over=over) - mesh.color = mod_color + mesh.add_overlay(mod[~mod.mask], np.where(~mod.mask)[0], cmap=cmap, + to_overlay=to_overlay, mask_data=mask_idx, clim=clim, + vmin=vmin, vmax=vmax, under=under, over=over) diff --git a/visbrain/objects/brain_obj.py b/visbrain/objects/brain_obj.py index d5bacc375..0a55336ae 100644 --- a/visbrain/objects/brain_obj.py +++ b/visbrain/objects/brain_obj.py @@ -8,12 +8,9 @@ from .visbrain_obj import VisbrainObject from ._projection import _project_sources_data from ..visuals import BrainMesh -from ..utils import (mesh_edges, smoothing_matrix, array2colormap, - rotate_turntable) -from ..io import (download_file, is_nibabel_installed, is_pandas_installed, - get_data_path, get_files_in_data, add_brain_template, - remove_brain_template, path_to_tmp, get_files_in_folders, - path_to_visbrain_data) +from ..utils import (mesh_edges, smoothing_matrix, rotate_turntable) +from ..io import (is_nibabel_installed, is_pandas_installed, + add_brain_template, remove_brain_template) logger = logging.getLogger('visbrain') @@ -84,11 +81,10 @@ def __init__(self, name, vertices=None, faces=None, normals=None, VisbrainObject.__init__(self, name, parent, transform, verbose, **kw) # Load brain template : self._scale = _scale + self.data_folder = 'templates' self.set_data(name, vertices, faces, normals, lr_index, hemisphere, invert_normals, sulcus) self.translucent = translucent - self._data_color = [] - self._data_mask = [] def __len__(self): """Get the number of vertices.""" @@ -98,35 +94,41 @@ def set_data(self, name=None, vertices=None, faces=None, normals=None, lr_index=None, hemisphere='both', invert_normals=False, sulcus=False): """Load a brain template.""" - # _______________________ DEFAULT _______________________ - b_download = self._get_downloadable_templates() - b_installed = get_files_in_data('templates') - # Need to download the brain template : - if (name in b_download) and (name not in b_installed): - self._add_downloadable_templates(name) - if not isinstance(vertices, np.ndarray): # predefined - (vertices, faces, normals, - lr_index) = self._load_brain_template(name) + # _______________________ TEMPLATE _______________________ + if not all([isinstance(k, np.ndarray) for k in [vertices, faces]]): + to_load = None + name_npz = name + '.npz' + # Identify if the template is already downloaded or not : + if name in self._df_get_downloaded(): + to_load = self._df_get_file(name_npz, download=False) + elif name_npz in self._df_get_downloadable(): # need download + to_load = self._df_download_file(name_npz) + assert isinstance(to_load, str) + # Load the template : + arch = np.load(to_load) + vertices, faces = arch['vertices'], arch['faces'] + normals = arch['normals'] + lr_index = arch['lr_index'] if 'lr_index' in arch.keys() else None + # Sulcus : if sulcus is True: - if name not in b_download: - logger.error("Sulcus only available for inflated, white and " - "sphere templates") - sulcus = None + if not self._df_is_downloaded('sulcus.npy'): + sulcus_file = self._df_download_file('sulcus.npy') else: - to_path = self._get_template_path() - sulcus = np.load(download_file('sulcus.npy', to_path=to_path)) - elif isinstance(sulcus, np.ndarray): - assert len(sulcus) == vertices.shape[0] + sulcus_file = self._df_get_file('sulcus.npy') + sulcus = np.load(sulcus_file) else: sulcus = None - # _______________________ CHECKING _______________________ assert all([isinstance(k, np.ndarray) for k in (vertices, faces)]) if normals is not None: # vertex normals assert isinstance(normals, np.ndarray) assert (lr_index is None) or isinstance(lr_index, np.ndarray) assert hemisphere in ['both', 'left', 'right'] + if isinstance(sulcus, np.ndarray) and len(sulcus) != vertices.shape[0]: + logger.error("Sulcus ignored. Use it only for the inflated, white " + "and sphere brain templates") + sulcus = None self._define_mesh(vertices, faces, normals, lr_index, hemisphere, invert_normals, sulcus) @@ -134,11 +136,8 @@ def set_data(self, name=None, vertices=None, faces=None, normals=None, def clean(self): """Clean brain object.""" self.hemisphere = 'both' - self.mask = 0. self.rotate('top') - self._data_color = [] - self._data_mask = [] - logger.info("Brain object %s cleaned." % self.name) + logger.info(" Brain object %s cleaned." % self.name) def save(self, tmpfile=False): """Save the brain template (if not already saved).""" @@ -156,11 +155,7 @@ def remove(self): def list(self, file=None): """Get the list of all installed templates.""" - path = self._search_in_path() - files = get_files_in_folders(*path, file=file) - download = self._get_downloadable_templates() - all_ = set(files + download) - return list(all_) + return self._df_get_downloaded(with_ext=False, exclude=['sulcus']) def _define_mesh(self, vertices, faces, normals, lr_index, hemisphere, invert_normals, sulcus): @@ -176,49 +171,6 @@ def _define_mesh(self, vertices, faces, normals, lr_index, hemisphere, self.mesh.set_data(vertices=vertices, faces=faces, normals=normals, lr_index=lr_index, hemisphere=hemisphere) - def _search_in_path(self): - """Specify where to find brain templates.""" - _vb_path_tmp = path_to_visbrain_data(folder='templates') - _data_path = get_data_path(folder='templates') - _tmp_path = path_to_tmp(folder='templates') - return _vb_path_tmp, _data_path, _tmp_path - - def _load_brain_template(self, name): - """Load the brain template.""" - path = self._search_in_path() - name = get_files_in_folders(*path, file=name + '.npz')[0] - arch = np.load(name) - vertices, faces = arch['vertices'], arch['faces'] - normals = arch['normals'] - lr_index = arch['lr_index'] if 'lr_index' in arch.keys() else None - return vertices, faces, normals, lr_index - - ########################################################################### - ########################################################################### - # PATH METHODS - ########################################################################### - ########################################################################### - - def _get_template_path(self): - """Get the path where datasets are stored.""" - return get_data_path(folder='templates') - - def _get_default_templates(self): - """Get the default list of brain templates.""" - return ['B1', 'B2', 'B3'] - - def _get_downloadable_templates(self): - """Get the list of brain that can be downloaded.""" - logger.debug("hdr transformation missing for downloadable templates") - return ['white', 'inflated', 'sphere'] - - def _add_downloadable_templates(self, name, ext='.npz'): - """Download then install a brain template.""" - assert name in self._get_downloadable_templates() - to_path = self._get_template_path() - # Download the file : - download_file(name + ext, to_path=to_path) - ########################################################################### ########################################################################### # CAMERA // ROTATION @@ -292,7 +244,7 @@ def rotate(self, fixed=None, scale_factor=None, custom=None, margin=1.08): def project_sources(self, s_obj, project='modulation', radius=10., contribute=False, cmap='viridis', clim=None, vmin=None, under='black', vmax=None, over='red', - mask_color=None): + mask_color=None, to_overlay=0): """Project source's activity or repartition onto the brain object. Parameters @@ -326,9 +278,10 @@ def project_sources(self, s_obj, project='modulation', radius=10., kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) self._default_cblabel = "Source %s" % project _project_sources_data(s_obj, self, project, radius, contribute, - mask_color=mask_color, **kw) + mask_color=mask_color, to_overlay=to_overlay, + **kw) - def add_activation(self, data=None, vertices=None, smoothing_steps=20, + def add_activation(self, data=None, vertices=None, smoothing_steps=5, file=None, hemisphere=None, hide_under=None, n_contours=None, cmap='viridis', clim=None, vmin=None, vmax=None, under='gray', over='red'): @@ -344,15 +297,17 @@ def add_activation(self, data=None, vertices=None, smoothing_steps=20, data : array_like | None Vector array of data of shape (n_data,). vertices : array_like | None - Vector array of vertices of shape (n_vtx). Must be an array of - integers. + Vector array of vertex indices of shape (n_vtx). + Must be an array of integers. If hemisphere is 'left' or 'right' + indexation is done with respect to the specified hemisphere. smoothing_steps : int | 20 - Number of smoothing steps (smoothing is used if n_data < n_vtx) + Number of smoothing steps (smoothing is used if n_data < n_vtx). + If None or 0, no smoothing is performed. file : string | None Full path to the overlay file. hemisphrere : {None, 'both', 'left', 'right'} - The hemisphere to use to add the overlay. If None, the method try - to inferred the hemisphere from the file name. + The hemisphere to use to add the overlay. If None, the method tries + to infer the hemisphere from the file name. hide_under : float | None Hide activations under a certain threshold. n_contours : int | None @@ -371,47 +326,55 @@ def add_activation(self, data=None, vertices=None, smoothing_steps=20, over : string/tuple/array_like | 'red' The color to use for values over vmax. """ - col_kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) + kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) is_under = isinstance(hide_under, (int, float)) - color = np.zeros((len(self.mesh), 4), dtype=np.float32) - mask = np.zeros((len(self.mesh),), dtype=float) + mask = np.zeros((len(self.mesh),), dtype=bool) + data_vec = np.zeros((len(self.mesh),), dtype=np.float32) + sm_data = np.zeros((len(self.mesh),), dtype=np.float32) self._default_cblabel = "Activation" # ============================= METHOD ============================= if isinstance(data, np.ndarray): - if not isinstance(vertices, np.ndarray): - vertices = np.arange(len(data)) - logger.info("Add data to specific vertices.") - assert (data.ndim == 1) and (vertices.ndim == 1) + # Hemisphere : + hemisphere, activ_vert = self._hemisphere_from_file(hemisphere, + file) + activ_vert_idx = np.where(activ_vert)[0] + + is_do_smoothing = True + + if vertices is None: + # Data are defined on a dense grid + assert len(activ_vert_idx) == len(data) + vertices = np.arange(len(activ_vert_idx)) + is_do_smoothing = False + if smoothing_steps: + logger.warning( + 'Data defined on a dense grid; ignore smoothing.') + else: + assert len(vertices) == len(data) + + logger.info(" Add data to specific vertices.") + assert (data.ndim == 1) and (np.asarray(vertices).ndim == 1) assert smoothing_steps is None or isinstance(smoothing_steps, int) + # Get smoothed vertices // data : - if isinstance(smoothing_steps, int): - edges = mesh_edges(self.mesh._faces) - sm_mat = smoothing_matrix(vertices, edges, smoothing_steps) - sm_data = data[sm_mat.col] - rows = sm_mat.row + if hemisphere != 'both': + # Transform to indexing with respect to the whole brain + vert_whole = activ_vert_idx[vertices] else: - sm_data = data - rows = vertices - # Clim : - clim = (sm_data.min(), sm_data.max()) if clim is None else clim - assert len(clim) == 2 - col_kw['clim'] = clim - # Contours : - sm_data = self._data_to_contour(sm_data, clim, n_contours) - _, idx = self._hemisphere_from_file(hemisphere, None) - hemi_idx = np.where(idx)[0] - # Convert into colormap : - smooth_map = array2colormap(sm_data, **col_kw) - color = np.ones((len(self.mesh), 4), dtype=np.float32) - color[hemi_idx[rows], :] = smooth_map - # Mask : - if is_under: - mask[hemi_idx[rows[sm_data >= hide_under]]] = 1. + vert_whole = vertices + + if smoothing_steps and is_do_smoothing: + edges = mesh_edges(self.mesh._faces) + sm_mat = smoothing_matrix(vert_whole, edges, smoothing_steps) + sc = sm_mat * data # actual data smoothing + if hemisphere != 'both': + sc = sc[activ_vert] else: - mask[:] = 1. + sc = np.zeros_like(sm_data[activ_vert]) + sc[vertices] = data elif isinstance(file, str): assert os.path.isfile(file) - logger.info("Add overlay to the {} brain template " + logger.info(" Add overlay to the {} brain template " "({})".format(self._name, file)) from visbrain.io import read_nifti # Load data using Nibabel : @@ -419,31 +382,22 @@ def add_activation(self, data=None, vertices=None, smoothing_steps=20, sc = sc.ravel(order="F") hemisphere = 'both' if len(sc) == len(self.mesh) else hemisphere # Hemisphere : - hemisphere, idx = self._hemisphere_from_file(hemisphere, file) - assert len(sc) == idx.sum() - # Clim : - clim = (sc.min(), sc.max()) if clim is None else clim - assert len(clim) == 2 - col_kw['clim'] = clim - # Contour : - sc = self._data_to_contour(sc, clim, n_contours) - # Convert into colormap : - color[idx, :] = array2colormap(sc, **col_kw) - # Mask : - mask[idx] = 1. - if is_under: - sub_idx = np.where(idx)[0][sc < hide_under] - mask[sub_idx] = 0. + _, activ_vert = self._hemisphere_from_file(hemisphere, file) else: raise ValueError("Unknown activation type.") - # Build mask color : - col_mask = ~np.tile(mask.reshape(-1, 1).astype(bool), (1, 4)) - # Keep a copy of each overlay color and mask : - self._data_color.append(np.ma.masked_array(color, mask=col_mask)) - self._data_mask.append(mask) - # Set color and mask to the mesh : - self.mesh.color = np.ma.array(self._data_color).mean(0) - self.mesh.mask = np.array(self._data_mask).max(0) + # Define the data to send to the vertices : + sm_data[activ_vert] = sc + data_vec[activ_vert] = self._data_to_contour(sc, clim, n_contours) + mask[activ_vert] = True + # Hide under : + if is_under: + mask[sm_data < hide_under] = False + # Clim : + clim = (sc.min(), sc.max()) if clim is None else clim + assert len(clim) == 2 + kw['clim'] = clim + # Add overlay : + self.mesh.add_overlay(data_vec[mask], vertices=np.where(mask)[0], **kw) def parcellize(self, file, select=None, hemisphere=None, data=None, cmap='viridis', clim=None, vmin=None, under='gray', @@ -462,6 +416,8 @@ def parcellize(self, file, select=None, hemisphere=None, data=None, hemisphere : string | None The hemisphere for the parcellation. If None, the hemisphere will be inferred from file name. + data : array_like | None + Use data to be transformed into color for each parcellate. cmap : string | 'viridis' The colormap to use. clim : tuple | None @@ -478,11 +434,13 @@ def parcellize(self, file, select=None, hemisphere=None, data=None, """ idx, u_colors, labels, u_idx = self._load_annot_file(file) roi_labs = [] + kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) + data_vec = np.zeros((len(self.mesh),), dtype=np.float32) # Get the hemisphere and (left // right) boolean index : hemisphere, h_idx = self._hemisphere_from_file(hemisphere, file) # Select conversion : if select is None: - logger.info("Select all parcellates") + logger.info(" Select all parcellates") select = labels.tolist() if 'Unknown' in select: select.pop(select.index('Unknown')) @@ -491,20 +449,18 @@ def parcellize(self, file, select=None, hemisphere=None, data=None, data = np.asarray(data) assert data.ndim == 1 and len(data) == len(select) clim = (data.min(), data.max()) if clim is None else clim - logger.info("Color inferred from data") - u_colors = np.zeros((len(u_idx), 4), dtype=float) kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) - data_color = array2colormap(data, **kw) + logger.info(" Color inferred from data") + u_colors = np.zeros((len(u_idx), 4), dtype=float) self._default_cblabel = "Parcellates data" else: - logger.info("Use default color included in the file") + logger.info(" Use default color included in the file") u_colors = u_colors.astype(float) / 255. - data_color = None # Build the select variable : if isinstance(select, (np.ndarray, list)): select = np.asarray(select) if select.dtype != int: - logger.info('Search parcellates using labels') + logger.info(' Search parcellates using labels') select_str = select.copy() select, bad_select = [], [] for k in select_str: @@ -521,34 +477,33 @@ def parcellize(self, file, select=None, hemisphere=None, data=None, select = np.array(select).ravel() if not select.size: raise ValueError("No parcellates found") + # Get corresponding hemisphere indices (left, right or both) : + hemi_idx = np.where(h_idx)[0] # Prepare color variables : - color = np.zeros((len(self.mesh), 4)) - mask = np.zeros((len(self.mesh),)) + color = [] + mask = np.zeros((len(self.mesh),), dtype=bool) # Set roi color to the mesh : - sub_select = np.where(h_idx)[0] # sub-hemisphere selection no_parcellates = [] for i, k in enumerate(select): sub_idx = np.where(u_idx == k)[0][0] # index location in u_idx if sub_idx: - color_index = sub_select[u_idx[idx] == k] - if data_color is None: - color[color_index, :] = u_colors[sub_idx, :] - else: - color[color_index, :] = data_color[i, :] + vert_index = hemi_idx[u_idx[idx] == k] + color.append(u_colors[sub_idx, :]) roi_labs.append(labels[sub_idx]) - mask[color_index] = 1. + mask[vert_index] = True + data_vec[vert_index] = data[i] if data is not None else i else: no_parcellates.append(str(k)) if no_parcellates: logger.warning("No corresponding parcellates for index " "%s" % ', '.join(np.unique(no_parcellates))) - logger.info("Selected parcellates : %s" % ", ".join(roi_labs)) - # Keep an eye on data color and mask : - self._data_color.append(color) - self._data_mask.append(mask) - # Set color and mask to the mesh : - self.mesh.color = np.array(self._data_color).sum(0) - self.mesh.mask = np.array(self._data_mask).sum(0) + if data is None: + color = np.asarray(color, dtype=np.float32) + kw['cmap'] = color[:, 0:-1] + kw['interpolation'] = 'linear' + logger.info(" Selected parcellates : %s" % ", ".join(roi_labs)) + # Finally, add the overlay to the brain : + self.mesh.add_overlay(data_vec[mask], vertices=np.where(mask)[0], **kw) def get_parcellates(self, file): """Get the list of supported parcellates names and index. @@ -566,8 +521,29 @@ def get_parcellates(self, file): dico = dict(Index=u_idx, Labels=labels, Color=color.tolist()) return pd.DataFrame(dico, columns=['Index', 'Labels', 'Color']) + def slice(self, xmin=None, xmax=None, ymin=None, ymax=None, zmin=None, + zmax=None): + """Take a slice of the brain. + + Parameters + ---------- + xmin, xmax : float | None + Cut the mesh along the x-dimension. + ymin, ymax : float | None + Cut the mesh along the y-dimension. + zmin, zmax : float | None + Cut the mesh along the z-dimension. + """ + self.mesh.xmin = xmin + self.mesh.xmax = xmax + self.mesh.ymin = ymin + self.mesh.ymax = ymax + self.mesh.zmin = zmin + self.mesh.zmax = zmax + @staticmethod def _data_to_contour(data, clim, n_contours): + clim = (data.min(), data.max()) if clim is None else clim if isinstance(n_contours, int): _range = np.linspace(clim[0], clim[1], n_contours) for k in range(len(_range) - 1): @@ -606,7 +582,7 @@ def _load_annot_file(file): id_vert, ctab, names = nibabel.freesurfer.read_annot(file) names = np.array(names).astype(str) color, u_idx = ctab[:, 0:4], ctab[..., -1] - logger.info("Annot file loaded (%s)" % file) + logger.info(" Annot file loaded (%s)" % file) # Test if variables have the same size : if len(u_idx) != len(names): min_len = min(len(u_idx), color.shape[0], len(names)) @@ -626,14 +602,10 @@ def _load_annot_file(file): ########################################################################### def _update_cbar(self): - if isinstance(self._cbar_data, np.ndarray): - color = array2colormap(self._cbar_data, **self.to_kwargs()) - self.mesh.color = color - # else: - # logger.error("No data to update for %s" % self.name) + self.mesh.update_colormap(**self.to_kwargs()) def _update_cbar_minmax(self): - pass + self._clim = self.mesh.minmax ########################################################################### ########################################################################### diff --git a/visbrain/objects/connect_obj.py b/visbrain/objects/connect_obj.py index d4b1f907c..968b3dbf0 100644 --- a/visbrain/objects/connect_obj.py +++ b/visbrain/objects/connect_obj.py @@ -1,4 +1,6 @@ """Base class for objects of type connectivity.""" +import logging + import numpy as np from collections import Counter @@ -6,9 +8,13 @@ from vispy.scene import visuals from .visbrain_obj import VisbrainObject, CombineObjects +from .source_obj import SourceObj from ..utils import array2colormap, normalize, color2vb, wrap_properties +logger = logging.getLogger('visbrain') + + class ConnectObj(VisbrainObject): """Create a connectivity object. @@ -25,10 +31,18 @@ class ConnectObj(VisbrainObject): values of shape (n_nodes, n_nodes). line_width : float | 3. Connectivity line width. - color_by : {'strength', 'count'} - Coloring method. Use 'strength' to color edges according to their - connection strength define by the edges input. Use 'count' to color - edges according to the number of connections per node. + color_by : {'strength', 'count', 'causal'} + Coloring method: + + * 'strength' : color edges according to their connection strength + define by the `edges` input. Only the upper triangle of the + connectivity array is considered. + * 'count' : color edges according to the number of connections per + node. Only the upper triangle of the connectivity array is + considered. + * 'causal' : color edges according to the connectivity strength but + this time, the upper and lower triangles of the connectivity + array in `edges` are considered. custom_colors : dict | None Use a dictionary to colorize edges. For example, {1.2: 'red', 2.8: 'green', None: 'black'} turn connections that have a 1.2 and 2.8 @@ -106,6 +120,7 @@ def __init__(self, name, nodes, edges, select=None, line_width=3., assert sh[1] >= 2 pos = nodes if sh[1] == 3 else np.c_[nodes, np.full((len(self),), _z)] self._pos = pos.astype(np.float32) + logger.info(" %i nodes detected" % self._pos.shape[0]) # Edges : assert edges.shape == (len(self), len(self)) if not np.ma.isMA(edges): @@ -115,10 +130,12 @@ def __init__(self, name, nodes, edges, select=None, line_width=3., if isinstance(select, np.ndarray): assert select.shape == edges.shape and select.dtype == bool edges.mask = np.invert(select) - edges.mask[np.tril_indices(len(self), 0)] = True + if color_by is not 'causal': + edges.mask[np.tril_indices(len(self), 0)] = True + edges.mask[np.diag_indices(len(self))] = True self._edges = edges # Colorby : - assert color_by in ['strength', 'count'] + assert color_by in ['strength', 'count', 'causal'] self._color_by = color_by # Dynamic : if dynamic is not None: @@ -149,18 +166,38 @@ def update(self): def _build_line(self): """Build the connectivity line.""" - # Build the line position (consecutive segments): - nnz_x, nnz_y = np.where(~self._edges.mask) - indices = np.c_[nnz_x, nnz_y].flatten() - line_pos = self._pos[indices, :] - + pos, edges = self._pos, self._edges # Color either edges or nodes : - if self._color_by == 'strength': - nnz_values = self._edges.compressed() - values = np.c_[nnz_values, nnz_values].flatten() - elif self._color_by == 'count': - node_count = Counter(np.ravel([nnz_x, nnz_y])) - values = np.array([node_count[k] for k in indices]) + logger.info(" %s coloring method for connectivity" % self._color_by) + # Switch between coloring method : + if self._color_by in ['strength', 'count']: + # Build line position + nnz_x, nnz_y = np.where(~edges.mask) + indices = np.c_[nnz_x, nnz_y].flatten() + line_pos = pos[indices, :] + if self._color_by == 'strength': + nnz_values = edges.compressed() + values = np.c_[nnz_values, nnz_values].flatten() + elif self._color_by == 'count': + node_count = Counter(np.ravel([nnz_x, nnz_y])) + values = np.array([node_count[k] for k in indices]) + elif self._color_by == 'causal': + idx = np.array(np.where(~edges.mask)).T + # If the array is not symetric, the line needs to be drawn between + # points. If it's symetric, line should stop a the middle point. + # Here, we get the maske value of the symetric and use it to + # ponderate middle point calculation : + pond = (~np.array(edges.mask))[idx[:, 1], idx[:, 0]] + pond = pond.astype(float).reshape(-1, 1) + div = pond + 1. + # Build line pos : + line_pos = np.zeros((2 * idx.shape[0], 3), dtype=float) + line_pos[0::2, :] = pos[idx[:, 0], :] + line_pos[1::2, :] = (pos[idx[:, 1]] + pond * pos[idx[:, 0]]) / div + # Build values : + values = np.full((line_pos.shape[0],), edges.min(), dtype=float) + values[1::2] = edges.compressed() + logger.info(" %i connectivity links displayed" % line_pos.shape[0]) self._minmax = (values.min(), values.max()) if self._clim is None: self._clim = self._minmax @@ -179,12 +216,142 @@ def _build_line(self): # Dynamic color : if self._dynamic is not None: - color[:, 3] = normalize(values.copy(), tomin=self._dynamic[0], - tomax=self._dynamic[1]) + if self._color_by == 'causal': + color[0::2, :] = self._dynamic[0] + else: + color[:, 3] = normalize(values.copy(), tomin=self._dynamic[0], + tomax=self._dynamic[1]) # Send data to the connectivity object : self._connect.set_data(pos=line_pos, color=color) + def get_nb_connections_per_node(self, sort='index', order='ascending'): + """Get the number of connections per node. + + Parameters + ---------- + sort : {'index', 'count'} + Sort either by node index ('index') or according to the number of + connections per node ('count'). + order : {'ascending', 'descending'} + Get the number of connections per node + """ + return self._get_nb_connect(self._edges.mask, sort, order) + + def analyse_connections(self, roi_obj='talairach', group_by=None, + get_centroids=False, replace_bad=True, + bad_patterns=[-1, 'undefined', 'None'], + distance=None, replace_with='Not found', + keep_only=None): + """Analyse connections. + + Parameters + ---------- + roi_obj : string/list | 'talairach' + The ROI object to use. Use either 'talairach', 'brodmann' or 'aal' + to use a predefined ROI template. Otherwise, use a RoiObj object or + a list of RoiObj. + group_by : str | None + Name of the column inside the dataframe for gouping connectivity + results. + replace_bad : bool | True + Replace bad values (True) or not (False). + bad_patterns : list | [-1, 'undefined', 'None'] + Bad patterns to replace if replace_bad is True. + replace_with : string | 'Not found' + Replace bad patterns with this string. + keep_only : list | None + List of string patterns to keep only sources that match. + + Returns + ------- + df : pandas.DataFrames + A Pandas DataFrame or a list of DataFrames if roi_obj is a list. + """ + # Get anatomical info of sources : + s_obj = SourceObj('analyse', self._pos) + df = s_obj.analyse_sources(roi_obj=roi_obj, replace_bad=replace_bad, + bad_patterns=bad_patterns, + distance=distance, + replace_with=replace_with, + keep_only=keep_only) + # If no column, return the full dataframe : + if group_by is None: + return df + # Group DataFrame column : + grp = df.groupby(group_by).groups + labels, index = list(grp.keys()), list(grp.values()) + # Prepare the new connectivity array : + n_labels = len(labels) + x_r = np.zeros((n_labels, n_labels), dtype=float) + mask_r = np.ones((n_labels, n_labels), dtype=bool) + # Loop over the upper triangle : + row, col = np.triu_indices(n_labels) + data, mask = self._edges.data, self._edges.mask + for r, c in zip(row, col): + m = tuple(np.meshgrid(index[r], index[c])) + x_r[r, c], mask_r[r, c] = data[m].mean(), mask[m].all() + # Define a ROI dataframe : + import pandas as pd + columns = [group_by, "Mean connectivity strength inside ROI", + "Number of connections per node"] + df_roi = pd.DataFrame({}, columns=columns) + df_roi[group_by] = labels + df_roi[columns[1]] = np.diag(x_r) + df_roi[columns[2]] = [len(k) for k in index] + # Get (x, y, z) ROI centroids : + if get_centroids: + # Define the RoiObj : + from .roi_obj import RoiObj + if isinstance(roi_obj, str): + r_obj = RoiObj(roi_obj) + assert isinstance(r_obj, RoiObj) + # Search where is the label : + idx, roi_labels, rm_rows = [], [], [] + for k, l in enumerate(labels): + _idx = r_obj.where_is(l, exact=True) + if not len(_idx): + rm_rows += [k] + else: + idx += [_idx[0]] + roi_labels += [l] + xyz = r_obj.get_centroids(idx) + x_r = np.delete(x_r, rm_rows, axis=0) + x_r = np.delete(x_r, rm_rows, axis=1) + mask_r = np.delete(mask_r, rm_rows, axis=0) + mask_r = np.delete(mask_r, rm_rows, axis=1) + df_roi.drop(rm_rows, inplace=True) + df_roi.index = pd.RangeIndex(len(df_roi.index)) + df_roi['X'] = xyz[:, 0] + df_roi['Y'] = xyz[:, 1] + df_roi['Z'] = xyz[:, 2] + x_r = np.ma.masked_array(x_r, mask=mask_r) + return x_r, labels, df_roi + + @staticmethod + def _get_nb_connect(mask, sort, order): + """Sub-function to get the number of connections per node.""" + assert sort in ['index', 'count'], \ + ("`sort` should either be 'index' or 'count'") + assert order in ['ascending', 'descending'], \ + ("`order` should either be 'ascending' or 'descending'") + logger.info(" Get the number of connections per node") + n_nodes = mask.shape[0] + # Get the number of connections per nodes : + nnz_x, nnz_y = np.where(~mask) + dict_ord = dict(Counter(np.ravel([nnz_x, nnz_y]))) + # Full number of connections : + nb_connect = np.zeros((n_nodes, 2), dtype=int) + nb_connect[:, 0] = np.arange(n_nodes) + nb_connect[list(dict_ord.keys()), 1] = list(dict_ord.values()) + # Sort according to node index or number of connections per node : + idx = 0 if sort is 'index' else 1 + args = np.argsort(nb_connect[:, idx]) + # Ascending or descending sorting : + if order == 'descending': + args = np.flip(args) + return nb_connect[args, :] + def _get_camera(self): """Get the most adapted camera.""" d_mean = self._pos.mean(0) @@ -221,7 +388,7 @@ def color_by(self): @wrap_properties def color_by(self, value): """Set color_by value.""" - assert value in ['strength', 'count'] + assert value in ['strength', 'count', 'causal'] self._color_by = value self._build_line() diff --git a/visbrain/objects/crossec_obj.py b/visbrain/objects/crossec_obj.py index 1c11728e2..157412108 100644 --- a/visbrain/objects/crossec_obj.py +++ b/visbrain/objects/crossec_obj.py @@ -8,7 +8,7 @@ import vispy.visuals.transforms as vist from ..utils import cmap_to_glsl, wrap_properties, color2vb, FixedCam -from ..io import read_nifti +from ..io import read_nifti, niimg_to_transform from .volume_obj import _Volume logger = logging.getLogger('visbrain') @@ -68,7 +68,6 @@ def set_slice(self, xyz): self._sagittal = int(sl[0]) self._coronal = int(sl[1]) self._axial = int(sl[2]) - logger.info("Cut coords at position %s" % str(xyz)) def update(self): self._im_sagit.update() @@ -88,6 +87,20 @@ def cmap(self, value): self._im_coron.cmap = value self._im_axial.cmap = value + # ----------- CLIM ----------- + @property + def clim(self): + """Get the clim value.""" + return self._clim + + @clim.setter + def clim(self, value): + """Set clim value.""" + self._im_sagit.clim = value + self._im_coron.clim = value + self._im_axial.clim = value + self._clim = value + # ----------- INTERPOLATION ----------- @property def interpolation(self): @@ -161,7 +174,7 @@ def __init__(self, name, vol=None, hdr=None, coords=None, contrast=0., """Init.""" # __________________________ VOLUME __________________________ _Volume.__init__(self, name, parent, transform, verbose, **kw) - self._rect = (-1.5, -1., 3., 2.) + self._rect = (0., -1., 2., 2.) self._sagittal = 0 self._coronal = 0 self._axial = 0 @@ -198,10 +211,10 @@ def __init__(self, name, vol=None, hdr=None, coords=None, contrast=0., # __________________________ TEXT __________________________ self._txt_format = '%s = %.2f' # Add text (sagit, coron, axial, left, right) : - txt_pos = np.array([[.05, -.1, 0.], [.05, -.2, 0.], [.05, -.3, 0.], - [.05, -.4, 0.], [.05, -.5, 0.], - [-.1, -.1, 0.], [0.1, .9, 0.], # L - [-.1, -.9, 0.], [0.9, .9, 0.]]) # R + txt_pos = np.array([[1.05, -.1, 0.], [1.05, -.2, 0.], [1.05, -.3, 0.], + [1.05, -.4, 0.], [1.05, -.5, 0.], + [0.05, -.1, 0.], [1.1, .9, 0.], # L + [0.05, -.9, 0.], [1.9, .9, 0.]]) # R txt = [''] * 5 + ['L'] * 2 + ['R'] * 2 self._txt = scene.visuals.Text(text=txt, pos=txt_pos, anchor_x='left', color=color2vb(text_color), @@ -269,13 +282,13 @@ def set_activation(self, data, xyz=None, translucent=(None, .5), # Load the nifti volume : vol, _, hdr = read_nifti(data) vol, hdr = self._check_volume(vol, hdr) - fact = [k / i for k, i in zip(self._bgd._sh, vol.shape)] + tf_sagit, tf_coron, tf_axial = niimg_to_transform(vol, hdr, False, + self._vol, self._hdr) # Set transform : - tf_sagit = vist.STTransform(scale=(fact[2], fact[1], 1.)) self._act._im_sagit.transform = tf_sagit - tf_coron = vist.STTransform(scale=(fact[2], fact[0], 1.)) + tf_coron.prepend(vist.STTransform(translate=(1., 0., 0.))) self._act._im_coron.transform = tf_coron - tf_axial = vist.STTransform(scale=(fact[1], fact[0], 1.)) + tf_axial.prepend(vist.STTransform(translate=(0., -1., 0.))) self._act._im_axial.transform = tf_axial # Set the volume and colormap : self._act.set_volume(vol, hdr) @@ -295,7 +308,7 @@ def set_activation(self, data, xyz=None, translucent=(None, .5), if xyz is None: xyz = self._latest_xyz self.cut_coords(xyz) - logger.info("Activation set using the %s file" % name) + logger.info(" Activation set using the %s file" % name) def localize_source(self, coords): """Cut at a specific MNI coordinate and display the cross. @@ -328,19 +341,22 @@ def highlight_sources(self, xyz, radius=1, color='green'): sh = self._bgd._sh vol = np.zeros(sh, dtype=np.float32) _val = 10. + self._sources._im_sagit.transform = self._bgd._im_sagit.transform + self._sources._im_coron.transform = self._bgd._im_coron.transform + self._sources._im_axial.transform = self._bgd._im_axial.transform def f(x, sh): return slice(max(x, int(x - radius)), min(sh - 1, int(x + radius))) for k in range(xyz.shape[0]): sl = self.pos_to_slice(xyz[k, :]) idx = [f(sl[0], sh[0]), f(sl[1], sh[1]), f(sl[2], sh[2])] - vol[idx] = _val + vol[tuple(idx)] = _val self._sources.set_volume(vol, self._hdr) cmap = cmap_to_glsl(limits=(0., _val), translucent=(None, .5), color=color) self._sources.cmap = cmap self.cut_coords(xyz[0, :]) - logger.info("%i sources highlighted" % xyz.shape[0]) + logger.info(" %i sources highlighted" % xyz.shape[0]) ########################################################################### ########################################################################### @@ -359,6 +375,7 @@ def _get_camera(self): """Get the camera.""" # cam = scene.cameras.PanZoomCamera(rect=self._rect) cam = FixedCam(rect=self._rect) + cam.aspect = 1. return cam def _update(self): @@ -370,26 +387,21 @@ def _update(self): self._txt.update() def _grid_transform(self): - sh = self._sh - rz90 = vist.MatrixTransform() - rz90.rotate(90, (0, 0, 1)) - rx180 = vist.MatrixTransform() - rx180.rotate(180, (1, 0, 0)) + tf_sagit, tf_coron, tf_axial = niimg_to_transform(self._vol, self._hdr) # Sagittal transformation : - norm_sagit = vist.STTransform(scale=(1. / sh[1], 1. / sh[2], 1.), - translate=(-1., 0., 0.)) - tf_sagit = vist.ChainTransform([norm_sagit, rz90, rx180]) - self._sagit_node.transform = tf_sagit + self._bgd._im_sagit.transform = tf_sagit + self._cross[0].transform = tf_sagit + self._center[0].transform = tf_sagit # Coronal transformation : - norm_coron = vist.STTransform(scale=(1. / sh[0], 1. / sh[2], 1.), - translate=(0., 0., 0.)) - tf_coron = vist.ChainTransform([norm_coron, rz90, rx180]) - self._coron_node.transform = tf_coron + tf_coron.prepend(vist.STTransform(translate=(1., 0., 0.))) + self._bgd._im_coron.transform = tf_coron + self._cross[1].transform = tf_coron + self._center[1].transform = tf_coron # Axial transformation : - norm_axis = vist.STTransform(scale=(1. / sh[1], 1. / sh[0], 1.), - translate=(-1., 0., 0.)) - tf_axial = vist.ChainTransform([norm_axis, rx180]) - self._axial_node.transform = tf_axial + tf_axial.prepend(vist.STTransform(translate=(0., -1., 0.))) + self._bgd._im_axial.transform = tf_axial + self._cross[2].transform = tf_axial + self._center[2].transform = tf_axial def _set_image(self, xyz, display_cross=False): # xyz = None -> volume center : @@ -445,29 +457,32 @@ def _set_location(self, sl): def _mouse_to_pos(self, pos): """Convert mouse position to pos.""" sh = np.array(self._bgd._sh) - csize = self.canvas.canvas.size - rect = (-1.5, -1., 3., 1.) - # Canvas -> camera conversion : - x = +(pos[0] * rect[2] / csize[0]) + rect[0] - y = -(pos[1] * rect[3] / csize[1]) - rect[1] - if (-1. <= x <= 0.) and (.5 <= y <= 1.): - idx_xy, sl_z = [1, 2], self._bgd._sagittal - x_off, y_off, y_lim, y_inv = 1., -1., 0., 2. - elif (0. <= x <= 1.) and (.5 <= y <= 1.): - idx_xy, sl_z = [0, 2], self._bgd._coronal - x_off, y_off, y_lim, y_inv = 0., -1., 0., 2. - elif (-1. <= x <= 0.) and (0 <= y <= .5): - idx_xy, sl_z = [1, 0], self._bgd._axial - x_off, y_off, y_lim, y_inv = 1., .5, -.5, -1. + if hasattr(self._node.parent.parent.camera, 'rect'): + rect = self._node.parent.parent.camera._real_rect else: return None - # Camera -> pos conversion : - pic = sh[idx_xy] - sl_x = (rect[2] * (x + x_off) * pic[0]) / rect[2] - sl_y = (rect[3] * (y_inv * y + y_off) * pic[1]) / \ - ((1. + y_lim) * rect[3]) + csize = self.canvas.canvas.size + left, bottom, width, height = rect.left, rect.bottom, rect.width, \ + rect.height + sgn = np.sign(np.diag(self._hdr.matrix))[0:-1] + # Canvas -> [0, 1] + x = +(pos[0] * width / csize[0]) + left + y = -(pos[1] * height / csize[1]) - bottom + if (0. <= x <= 1.) and (0. <= y <= 1.): + use_idx, sl_z = [1, 2], self._bgd._sagittal + elif (1. <= x <= 2.) and (0. <= y <= 1.): + use_idx, sl_z = [0, 2], self._bgd._coronal + x -= 1. + x = x if sgn[0] == 1 else 1 - x + elif (0. <= x <= 1.) and (-1. <= y <= 0.): + use_idx, sl_z = [1, 0], self._bgd._axial + y += 1. + y = y if sgn[0] == -1 else 1 - y + # Pixel conversion + x_sh, y_sh = sh[use_idx] + sl_x, sl_y = x * sh[use_idx[0]], y * sh[use_idx[1]] sl_xyz = np.array([sl_z] * 3) - sl_xyz[idx_xy] = [sl_x, sl_y] + sl_xyz[use_idx] = [sl_x, sl_y] return self.slice_to_pos(sl_xyz) def _on_mouse_press(self): @@ -581,6 +596,7 @@ def contrast(self, value): clim = (self._vol.min() * (1. + value), self._vol.max() * (1. - value)) limits = (self._vol.min(), self._vol.max()) self._bgd.cmap = cmap_to_glsl(limits=limits, clim=clim, cmap='Greys_r') + self._bgd.clim = 'auto' self._contrast = value # ----------- TEXT_SIZE ----------- diff --git a/visbrain/objects/image_obj.py b/visbrain/objects/image_obj.py index c5a3f3ce2..7d8351236 100644 --- a/visbrain/objects/image_obj.py +++ b/visbrain/objects/image_obj.py @@ -5,7 +5,7 @@ from vispy import scene from .visbrain_obj import VisbrainObject -from ..utils import array2colormap, wrap_properties, color2vb +from ..utils import vispy_array, wrap_properties, color2vb logger = logging.getLogger('visbrain') @@ -116,14 +116,15 @@ def set_data(self, data, xaxis=None, yaxis=None, clim=None, cmap=None, data = data[::dsf_x, ::dsf_y] xaxis, yaxis = xaxis[::dsf_x], yaxis[::dsf_y] # Set properties : - kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) - # Get color : - color = array2colormap(data, **kw) + self._update_cbar_args(cmap, clim, vmin, vmax, under, over) + # Get colormap : + cmap = self._get_glsl_colormap(limits=(data.min(), data.max())) + self._image.cmap = cmap + self._image.clim = 'auto' else: # data is already a compatible color assert data.shape[-1] in [3, 4] - color = data # Set color to the image : - self._image.set_data(color) + self._image.set_data(vispy_array(data)) fact_x = (self._dim[1] - self._dim[0]) / len(xaxis) fact_y = (self._dim[3] - self._dim[2]) / len(yaxis) sc = (fact_x, fact_y, 1.) diff --git a/visbrain/objects/roi_obj.py b/visbrain/objects/roi_obj.py index c79cff514..d1d49d94c 100644 --- a/visbrain/objects/roi_obj.py +++ b/visbrain/objects/roi_obj.py @@ -4,6 +4,7 @@ from functools import wraps import numpy as np +import numpy.core.defchararray as npchar from scipy.spatial.distance import cdist from vispy import scene @@ -42,24 +43,39 @@ def wrapper(self): class RoiObj(_Volume): """Create a Region Of Interest (ROI) object. + **Main functionalities** + + * Display a mesh of selected ROIs + * Localize sources + + **Supported ROI** + + * Brodmann Areas + * Talairach atlas + * Automated Anatomical Labeling (AAL) + * MIST, including levels 7, 12, 20, 36, 64, 122 and ROI [1]_ + Parameters ---------- name : string Name of the ROI object. If name is 'brodmann', 'aal' or 'talairach' a predefined ROI object is used and vol, index and label are ignored. + MIST [1]_ is also supported. To specify a resolution use `mist_%s` with + %s in ['7', '12', '20', '36', '64', '122', 'ROI'] (e.g 'mist_7', + `mist_ROI`) vol : array_like | None ROI volume. Sould be an array with three dimensions. labels : array_like | None Array of labels. A structured array can be used (i.e label=np.zeros(n_sources, dtype=[('brodmann', int), ('aal', object)])). index : array_like | None - Array of index that make the correspondance between the volumne values + Array of index that make the correspondance between the volume values and labels. The length of index must be the same as label. hdr : array_like | None Array of transform source's coordinates into the volume space. Must be a (4, 4) array. system : {'mni', 'tal'} - The system of the volumne. Can either be MNI ('mni') or Talairach + The system of the volume. Can either be MNI ('mni') or Talairach ('tal'). transform : VisPy.visuals.transforms | None VisPy transformation to set to the parent node. @@ -85,6 +101,12 @@ class RoiObj(_Volume): >>> r = RoiObj('brodmann') >>> r.select_roi(select=[4, 6, 38], unique_color=True, smooth=7) >>> r.preview(axis=True) + + References + ---------- + .. [1] Urchs, S., Armoza, J., Benhajali, Y., St-Aubin, J., Orban, P., & + Bellec, P. (2017). MIST: A multi-resolution parcellation of + functional brain networks. MNI Open Research, 1. """ ########################################################################### @@ -176,13 +198,13 @@ def set_data(self, name, vol=None, labels=None, index=None, hdr=None, labels=np.zeros(n_sources, dtype=[('brodmann', int), ('aal', object)])). index : array_like | None - Array of index that make the correspondance between the volumne + Array of index that make the correspondance between the volume values and labels. The length of index must be the same as labels. hdr : array_like | None Array of transform source's coordinates into the volume space. Must be a (4, 4) array. system : {'mni', 'tal'} - The system of the volumne. Can either be MNI ('mni') or Talairach + The system of the volume. Can either be MNI ('mni') or Talairach ('tal'). """ # Test if pandas is installed : @@ -211,8 +233,13 @@ def set_data(self, name, vol=None, labels=None, index=None, hdr=None, self.ref = pd.DataFrame(label_dict, columns=cols) self.ref = self.ref.set_index(index) self.analysis = pd.DataFrame({}, columns=cols) + self._analysis_backup = self.analysis.copy() - logger.info("%s ROI loaded." % name) + logger.info(" %s ROI loaded." % name) + + def reset(self): + """Reset the RoiObject.""" + self.analysis = self._analysis_backup def get_labels(self, save_to_path=None): """Get the labels associated with the loaded ROI. @@ -230,11 +257,11 @@ def get_labels(self, save_to_path=None): writer = pd.ExcelWriter(save_as) self.ref.to_excel(writer) writer.save() - logger.info("Saved as %s" % save_as) + logger.info(" Saved as %s" % save_as) return self.ref def where_is(self, patterns, df=None, union=True, columns=None, - exact=False): + exact=False, case_sensitive=False): """Find a list of string patterns in a DataFrame. Parameters @@ -249,9 +276,12 @@ def where_is(self, patterns, df=None, union=True, columns=None, intersection (False). columns : list | None List of specific column names to search in. If None, this method - inspect every columns in the DataFrame. + search through the entire DataFrame. exact : bool | False - Specify if the pattern to search have to be exact matching. + Specify if the pattern to search have to be exact matching (True) + or if the pattern is only a part of the result. + case_sensitive : bool | False + Specify if the search have to be case sensitive. Returns ------- @@ -261,32 +291,31 @@ def where_is(self, patterns, df=None, union=True, columns=None, # Check inputs : assert isinstance(patterns, (str, list, tuple)) df_to_use = self.ref if df is None else df - n_rows, _ = df_to_use.shape is_pandas_installed(raise_error=True) import pandas as pd assert isinstance(df_to_use, pd.DataFrame) patterns = [patterns] if isinstance(patterns, str) else patterns - if columns is None: - columns = list(df_to_use.keys()) - if isinstance(columns, str): - columns = [columns] - assert all([k in df_to_use.keys() for k in columns]) - n_cols = len(columns) + patterns = list(patterns) + if columns is not None: + df_to_use = df_to_use[columns] + dfarr = np.array(df_to_use).astype(str) + # Case sensitive : + if not case_sensitive: + dfarr = npchar.lower(dfarr) + patterns = npchar.lower(np.array(patterns).astype(str)) + # Define the matching function : + if exact: + def match(x, pat): return np.any(x == pat, axis=1) # noqa + else: + def match(x, pat): + return np.any((npchar.find(x, pat) + 1).astype(bool), axis=1) # Locate patterns : - idx_to_keep = np.zeros((n_rows, len(patterns)), dtype=bool) - for p, k in enumerate(patterns): - pat_in_col = np.zeros((n_rows, n_cols), dtype=bool) - for c, i in enumerate(columns): - if exact: - pat_in_col[:, c] = df_to_use[i].astype(str) == k - else: - pat_in_col[:, c] = df_to_use[i].astype(str).str.match(k) - idx_to_keep[:, p] = np.any(pat_in_col, 1) + idx_to_keep = np.zeros((dfarr.shape[0], len(patterns)), dtype=bool) + for k, p in enumerate(patterns): + idx_to_keep[:, k] = match(dfarr, str(p)) # Return either the union or intersection across research : - if union: - idx_to_keep = np.any(idx_to_keep, 1) - else: - idx_to_keep = np.all(idx_to_keep, 1) + fcn = np.any if union else np.all + idx_to_keep = fcn(idx_to_keep, 1) if not np.any(idx_to_keep): logger.error("No corresponding entries in the %s ROI for " "%s" % (self.name, ', '.join(patterns))) @@ -329,6 +358,10 @@ def localize_sources(self, xyz, source_name=None, replace_bad=True, if source_name is None: source_name = ['s' + str(k) for k in range(n_sources)] assert len(source_name) == n_sources + # Check analysis : + if len(self.analysis): + logger.debug('Reset analysis because already exist') + self.reset() # Loop over sources : for k in range(n_sources): # Apply HDR transformation : @@ -359,7 +392,7 @@ def localize_sources(self, xyz, source_name=None, replace_bad=True, bad_rows = np.where(np.array(bad_rows).sum(0))[0] good_rows = np.arange(n_sources) good_rows = np.delete(good_rows, bad_rows) - logger.info("%i rows containing the %r pattern " + logger.info(" %i rows containing the %r pattern " "found" % (len(bad_rows), replace_with)) # Get good and bad xyz and compute euclidian distance : xyz_good = xyz_untouched[good_rows, :] @@ -377,7 +410,7 @@ def localize_sources(self, xyz, source_name=None, replace_bad=True, n_replaced += 1 close_str[good_rows] = -1 self.analysis["Replaced with"] = close_str - logger.info("Anatomical informations of %i sources have been " + logger.info(" Anatomical informations of %i sources have been " "replaced using a distance of " "%1.f" % (n_replaced, distance)) # Add Text and (X, Y, Z) to the table : @@ -435,7 +468,7 @@ def _df_to_struct_array(df): ########################################################################### def select_roi(self, select=.5, unique_color=False, roi_to_color=None, - smooth=3): + smooth=3, translucent=False): """Select several Region Of Interest (ROI). Parameters @@ -449,6 +482,8 @@ def select_roi(self, select=.5, unique_color=False, roi_to_color=None, {1: 'red', 2: 'orange'}. smooth : int | 3 Smoothing level. Must be an odd integer (smooth % 2 = 1). + translucent : bool | False + Set if the mesh should be translucent or opaque. """ # Get vertices / faces : vert = np.array([]) @@ -458,21 +493,21 @@ def select_roi(self, select=.5, unique_color=False, roi_to_color=None, unique_color = True if not unique_color: vert, faces = self._select_roi(self._vol.copy(), select, smooth) - logger.info("Same white color used across ROI(s)") + logger.info(" Same white color used across ROI(s)") else: assert not isinstance(select, float) select = [select] if isinstance(select, int) else select - vert, faces, color = np.array([]), np.array([]), np.array([]) + vert, faces, data = np.array([]), np.array([]), np.array([]) # Generate a (n_levels, 4) array of unique colors : if isinstance(roi_to_color, dict): assert len(roi_to_color) == len(select) col_unique = [color2vb(k) for k in roi_to_color.values()] col_unique = np.array(col_unique).reshape(len(select), 4) - logger.info("Specific colors has been defined") + logger.info(" Specific colors has been defined") else: col_unique = np.random.uniform(.1, .9, (len(select), 4)) col_unique[..., -1] = 1. - logger.info("Random color are going to be used.") + logger.info(" Random color are going to be used.") # Get vertices and faces of each ROI : for i, k in enumerate(select): v, f = self._select_roi(self._vol.copy(), int(k), smooth) @@ -480,8 +515,8 @@ def select_roi(self, select=.5, unique_color=False, roi_to_color=None, faces = np.r_[faces, f + faces.max() + 1] if faces.size else f vert = np.r_[vert, v] if vert.size else v # Concatenate color : - col = np.tile(col_unique[[i], ...], (v.shape[0], 1)) - color = np.r_[color, col] if color.size else col + col = np.full((v.shape[0],), i) + data = np.r_[data, col] if data.size else col if vert.size: # Apply hdr transformation to vertices : vert_hdr = self._hdr.map(vert)[:, 0:-1] @@ -490,15 +525,44 @@ def select_roi(self, select=.5, unique_color=False, roi_to_color=None, logger.debug("ROI mesh defined") self.mesh = BrainMesh(vertices=vert_hdr, faces=faces, parent=self._node) + self.mesh.translucent = translucent else: logger.debug("ROI mesh already exist") self.mesh.set_data(vertices=vert_hdr, faces=faces) if unique_color: - self.mask = 1. - self.color = color + self.mesh.add_overlay(data, cmap=col_unique, + interpolation='linear', to_overlay=0) else: raise ValueError("No vertices found for this ROI") + def get_centroids(self, select): + """Get the (x, y, z) coordinates of the center of a ROI. + + Parameters + ---------- + select : list + List of indices of ROIs. Must be a list or tuple of integers. + + Returns + ------- + xyz : array_like + Array of shape (n_indiced, 3) which contains the (x, y, z) + coordinates of each ROI center. + """ + if isinstance(select, int): + select = [select] + is_list = isinstance(select, (list, tuple)) + is_ints = np.all([isinstance(k, int) for k in select]) + if (not is_list) or (not is_ints): + raise ValueError("`select` must be a list of integers.") + xyz = np.zeros((len(select), 3), dtype=np.float32) + for i, k in enumerate(select): + logger.info(" Get centroid of ROI %i" % k) + v = self._select_roi(self._vol.copy(), int(k), None)[0] + vert_hdr = self._hdr.map(v)[:, 0:-1] + xyz[i, :] = vert_hdr.mean(0) + return xyz + def _select_roi(self, vol, level, smooth): if isinstance(level, (int, np.int)): condition = vol != level @@ -510,8 +574,14 @@ def _select_roi(self, vol, level, smooth): vol[condition] = 0 # Get the list of remaining ROIs : unique_vol = np.unique(vol[vol != 0]) - logger.info("Selected ROI(s) : \n%r" % self.ref.loc[unique_vol]) - return isosurface(smooth_3d(vol, smooth), level=.5) + logger.info(" Selected ROI(s) : \n%r" % self.ref.loc[unique_vol]) + # Smooth the volume : + vol_sm, tf = smooth_3d(vol, smooth, correct=True) + # Get the isosurface : + vert, faces = isosurface(vol_sm, level=.5) + # Mesh correction after smoothing : + vert = tf.map(vert)[:, 0:-1] + return vert, faces def _get_camera(self): """Get the most adapted camera.""" @@ -536,7 +606,7 @@ def _get_camera(self): def project_sources(self, s_obj, project='modulation', radius=10., contribute=False, cmap='viridis', clim=None, vmin=None, under='black', vmax=None, over='red', - mask_color=None): + mask_color=None, to_overlay=0): """Project source's activity or repartition onto ROI. Parameters @@ -566,16 +636,31 @@ def project_sources(self, s_obj, project='modulation', radius=10., mask_color : string/tuple/array_like | 'gray' The color to use for the projection of masked sources. If None, the color of the masked sources is going to be used. + to_overlay : int | 0 + The overlay number used for the projection. """ if self: kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) self._default_cblabel = "Source's %s" % project _project_sources_data(s_obj, self, project, radius, contribute, - mask_color=mask_color, **kw) + mask_color=mask_color, to_overlay=to_overlay, + **kw) else: raise ValueError("Cannot project sources because no ROI selected. " "Use the `select_roi` method before.") + ########################################################################### + ########################################################################### + # CBAR + ########################################################################### + ########################################################################### + + def _update_cbar(self): + self.mesh.update_colormap(**self.to_kwargs()) + + def _update_cbar_minmax(self): + self._clim = self.mesh.minmax + ########################################################################### ########################################################################### # PROPERTIES @@ -629,19 +714,6 @@ def normals(self): """Get the normals value.""" return self.mesh._normals - # ----------- MASK ----------- - @property - @wrap_getter_properties - def mask(self): - """Get the mask value.""" - return self.mesh.mask - - @mask.setter - @wrap_setter_properties - def mask(self, value): - """Set mask value.""" - self.mesh.mask = value - # ----------- COLOR ----------- @property @wrap_getter_properties diff --git a/visbrain/objects/scene_obj.py b/visbrain/objects/scene_obj.py index 896e445b6..2ff51824b 100644 --- a/visbrain/objects/scene_obj.py +++ b/visbrain/objects/scene_obj.py @@ -5,8 +5,8 @@ import numpy as np from vispy import scene -from ..io import write_fig_canvas -from ..utils import color2vb, set_log_level, rotate_turntable +from ..io import write_fig_canvas, dialog_save +from ..utils import color2vb, set_log_level, rotate_turntable, FixedCam from ..visuals import CbarVisual from ..config import CONFIG, PROFILER @@ -349,13 +349,20 @@ class SceneObj(object): The default camera state to use. verbose : string Verbosity level. + + Notes + ----- + List of supported shortcuts : + + * **s** : save the figure + * **delete** : reset all views """ def __init__(self, bgcolor='black', camera_state={}, verbose=None, **kwargs): """Init.""" set_log_level(verbose) - logger.info("Scene creation") + logger.info("Creation of a scene") PROFILER('Scene creation') # Create the canvas and the grid : self.canvas = scene.SceneCanvas(keys='interactive', show=False, @@ -412,18 +419,20 @@ def _gl_transform(self, obj): logger.debug("Object rescaled %s" % str([self._fix_gl] * 3)) obj._scale = self._fix_gl sc = [self._fix_gl] * 3 + tf = scene.transforms.STTransform(scale=sc) else: - sc = [1.] * 3 + tf = obj._node.transform # Add transformation to the node : if hasattr(obj, '_node'): # VisbrainObject - obj._node.transform = scene.transforms.STTransform(scale=sc) + obj._node.transform = tf elif hasattr(obj, '_cnode'): # combineObjects - obj._cnode.transform = scene.transforms.STTransform(scale=sc) + obj._cnode.transform = tf def add_to_subplot(self, obj, row=0, col=0, row_span=1, col_span=1, title=None, title_size=12., title_color='white', title_bold=True, use_this_cam=False, rotate=None, - camera_state={}, width_max=None, height_max=None): + zoom=None, camera_state={}, width_max=None, + height_max=None): """Add object to subplot. Parameters @@ -453,6 +462,11 @@ def add_to_subplot(self, obj, row=0, col=0, row_span=1, col_span=1, rotate : string | None Rotate the scene. Use 'top', 'bottom', 'left', 'right', 'front' or 'back'. Only available for 3-D objects. + zoom : float | None + Zoom level. If zoom is in ]0, 1[, the size of the object decrease. + If `zoom=1`, no zoom is applied. If zoom > 1., the size of the + object increase. For example, `zoom=2` means that the displayed + object will appear twice as large. camera_state : dict | {} Arguments to pass to the camera. width_max : float | None @@ -473,7 +487,8 @@ def add_to_subplot(self, obj, row=0, col=0, row_span=1, col_span=1, self._grid_desc[(row + 1, col + 1)] = len(self._grid.children) title_color = color2vb(title_color) tit = scene.visuals.Text(title, color=title_color, anchor_x='left', - bold=title_bold, font_size=title_size) + bold=title_bold, font_size=title_size, + anchor_y='bottom') sub.add_subvisual(tit) else: sub = self[(row, col)] @@ -489,14 +504,27 @@ def add_to_subplot(self, obj, row=0, col=0, row_span=1, col_span=1, sub.height_max = height_max sub.width_max = width_max sub.add(obj.parent) + # Zoom : + if isinstance(zoom, (int, float)): + assert zoom > 0, "`zoom` should be > 0" + if isinstance(sub.camera, scene.cameras.TurntableCamera): + sub.camera.scale_factor /= zoom + elif isinstance(sub.camera, scene.cameras.PanZoomCamera) or \ + isinstance(sub.camera, FixedCam): + r = sub.camera.rect + prop = np.array((r.width, r.height)) / zoom + left = r.center[0] - (prop[0] / 2.) + bottom = r.center[1] - (prop[1] / 2.) + sub.camera.rect = (left, bottom, prop[0], prop[1]) # Camera : if camera_state == {}: camera_state = self._camera_state if isinstance(sub.camera, scene.cameras.TurntableCamera): rotate_turntable(fixed=rotate, camera_state=camera_state, camera=sub.camera) + sub.camera.set_default_state() PROFILER('%s added to the scene' % repr(obj)) - logger.info('%s added to the scene' % repr(obj)) + logger.info(' %s added to the scene' % repr(obj)) def link(self, *args): """Link the camera of several objects of the scene. @@ -512,7 +540,7 @@ def link(self, *args): >>> # Link cameras of subplots (0, 0), (0, 1) and (1, 0) >>> sc.link((0, 0), (0, 1), (1, 0)) """ - logger.info('Link cameras') + logger.info(' Link cameras') if args[0] == -1: args = [(k[0] - 1, k[1] - 1) for k in self._grid_desc.keys()] assert len(args) > 1 @@ -577,13 +605,32 @@ def screenshot(self, saveas, print_size=None, dpi=300., write_fig_canvas(saveas, self.canvas, widget=self.canvas.central_widget, **kwargs) + def _scene_shortcuts(self): + """Add shortcuts to the scene.""" + # On key pressed : + def key_pressed(event): # noqa + if event.text == 's': + from PyQt5.QtWidgets import QWidget + ext = ['png', 'tiff', 'jpg'] + _ext = ['%s file (*.%s)' % (k.upper(), k) for k in ext] + _ext += ['All files (*.*)'] + saveas = dialog_save(QWidget(), name='Export the scene', + default='canvas.png', allext=_ext) + if saveas: + write_fig_canvas(saveas, self.canvas, + widget=self.canvas.central_widget) + self.canvas.events.key_press.connect(key_pressed) + def preview(self): """Previsualize the result.""" self._gl_uniform_transforms() self.canvas.show(visible=True) + # Shortcuts : + self._scene_shortcuts() + # Profiler : if PROFILER and logger.level == 1: logger.profiler("PARENT TREE \n%s" % self._grid.describe_tree()) logger.profiler(" ") PROFILER.finish() - if sys.flags.interactive != 1: + if sys.flags.interactive != 1 and CONFIG['SHOW_PYQT_APP']: CONFIG['VISPY_APP'].run() diff --git a/visbrain/objects/source_obj.py b/visbrain/objects/source_obj.py index ebab73d93..621264ded 100644 --- a/visbrain/objects/source_obj.py +++ b/visbrain/objects/source_obj.py @@ -59,9 +59,9 @@ class SourceObj(VisbrainObject): text : list | None Text to attach to each source. For example, text could be the name of each source. - text_size : float | 3. + text_size : float | 2. Text size attached to sources. - text_color : array_like/string/tuple | 'black' + text_color : array_like/string/tuple | 'white' Text color attached to sources. text_bold : bool | False Specify if the text attached to sources should be bold. @@ -114,8 +114,8 @@ class SourceObj(VisbrainObject): def __init__(self, name, xyz, data=None, color='red', alpha=1., symbol='disc', radius_min=5., radius_max=10., edge_width=0., edge_color='black', system='mni', mask=None, - mask_color='gray', text=None, text_size=3., - text_color='black', text_bold=False, + mask_color='gray', text=None, text_size=2., + text_color='white', text_bold=False, text_translate=(0., 2., 0.), visible=True, transform=None, parent=None, verbose=None, _z=-10., **kw): """Init.""" @@ -126,6 +126,7 @@ def __init__(self, name, xyz, data=None, color='red', alpha=1., assert sh[1] in [2, 3] self._n_sources = sh[0] pos = xyz if sh[1] == 3 else np.c_[xyz, np.full((len(self),), _z)] + logger.info(' %i sources detected' % self._n_sources) # Radius min and max : assert all([isinstance(k, (int, float)) for k in ( radius_min, radius_max)]) @@ -180,8 +181,8 @@ def __init__(self, name, xyz, data=None, color='red', alpha=1., # Radius / color : self.visible = visible self._update_radius() - self._update_color() self.alpha = alpha + self._update_color() def __len__(self): """Get the number of sources.""" @@ -279,7 +280,7 @@ def _get_camera(self): def project_sources(self, b_obj, project='modulation', radius=10., contribute=False, cmap='viridis', clim=None, vmin=None, under='black', vmax=None, over='red', - mask_color=None): + mask_color=None, to_overlay=0): """Project source's activity or repartition onto the brain object. Parameters @@ -309,11 +310,14 @@ def project_sources(self, b_obj, project='modulation', radius=10., mask_color : string/tuple/array_like | 'gray' The color to use for the projection of masked sources. If None, the color of the masked sources is going to be used. + to_overlay : int | 0 + The overlay number used for the projection. """ kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) self._default_cblabel = "Source's %s" % project _project_sources_data(self, b_obj, project, radius, contribute, - mask_color=mask_color, **kw) + mask_color=mask_color, to_overlay=to_overlay, + **kw) ########################################################################### ########################################################################### @@ -350,7 +354,8 @@ def analyse_sources(self, roi_obj='talairach', replace_bad=True, A Pandas DataFrame or a list of DataFrames if roi_obj is a list. """ # List of predefined ROI objects : - proi = ['brodmann', 'aal', 'talairach'] + proi = ['brodmann', 'aal', 'talairach', 'mist_7', 'mist_12', 'mist_20', + 'mist_36', 'mist_64', 'mist_122', 'mist_ROI'] # Define the ROI object if needed : if isinstance(roi_obj, (str, list, tuple)): if isinstance(roi_obj, str): @@ -361,7 +366,7 @@ def analyse_sources(self, roi_obj='talairach', replace_bad=True, roi_obj = [roi_obj] assert all([isinstance(k, RoiObj) for k in roi_obj]) # Convert predefined ROI into RoiObj objects : - logger.info("Analyse source's locations using the %s " + logger.info(" Analyse source's locations using the %s " "atlas" % ', '.join([k.name for k in roi_obj])) if isinstance(roi_obj, (list, tuple)): test_r = all([k in proi or isinstance(k, RoiObj) for k in roi_obj]) @@ -374,7 +379,7 @@ def analyse_sources(self, roi_obj='talairach', replace_bad=True, distance) for k in roi_obj] # Merge multiple DataFrames : if len(df) > 1: - logger.info('Merging DataFrames') + logger.info(' Merging DataFrames') import pandas as pd df_full = df.copy() df = df_full[0] @@ -385,15 +390,17 @@ def analyse_sources(self, roi_obj='talairach', replace_bad=True, else: df = df[0] # Keep only sources that match with patterns : - if isinstance(keep_only, (list, tuple)): + if isinstance(keep_only, (str, list, tuple)): + if isinstance(keep_only, str): + keep_only = [keep_only] idx_to_keep = [] for k, i in product(df.keys(), keep_only): idx_to_keep.append(np.array(df[k], dtype=object) == i) idx_to_keep = np.vstack(idx_to_keep).sum(0).astype(bool) df = df.loc[idx_to_keep] self.visible = idx_to_keep - logger.info("%i sources found in %s" % (len(df), - ', '.join(keep_only))) + logger.info(" %i sources found in %s" % (len(df), + ', '.join(keep_only))) return df @@ -435,13 +442,14 @@ def color_sources(self, analysis=None, color_by=None, data=None, """ if isinstance(data, np.ndarray): assert len(data) == len(self) and (data.ndim == 1) - logger.info("Color %s using a data vector" % self.name) + logger.info(" Color %s using a data vector" % self.name) kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over) colors = array2colormap(data, **kw) elif (analysis is not None) and (color_by is not None): # Group analysis : assert color_by in list(analysis.columns) - logger.info("Color %s according to the %s" % (self.name, color_by)) + logger.info(" Color %s according to the %s" % (self.name, + color_by)) gp = analysis.groupby(color_by).groups # Compute color : if roi_to_color is None: # random color @@ -491,7 +499,7 @@ def set_visible_sources(self, select='all', v=None, distance=5.): assert isinstance(distance, (int, float)) xyz = self._xyz if select in ['inside', 'outside', 'close']: - logger.info("Select sources %s vertices" % select) + logger.info(" Select sources %s vertices" % select) if v.ndim == 2: # index faced vertices v = v[:, np.newaxis, :] # Predifined inside : @@ -518,9 +526,9 @@ def set_visible_sources(self, select='all', v=None, distance=5.): self.visible = cond self.visible_obj = cond msg = 'Display' if cond else 'Hide' - logger.info("%s all sources" % msg) + logger.info(" %s all sources" % msg) elif select in ['left', 'right']: - logger.info('Select sources in the %s hemisphere' % select) + logger.info(' Select sources in the %s hemisphere' % select) vec = xyz[:, 0] self.visible = vec <= 0 if select == 'left' else vec >= 0 diff --git a/visbrain/objects/tests/_testing_objects.py b/visbrain/objects/tests/_testing_objects.py index 0dea5782e..50fb94e19 100644 --- a/visbrain/objects/tests/_testing_objects.py +++ b/visbrain/objects/tests/_testing_objects.py @@ -1,5 +1,5 @@ """Base class for testing visbrain objects.""" -import pytest +# import pytest import vispy from visbrain.tests._tests_visbrain import _TestVisbrain @@ -45,12 +45,12 @@ def test_str(self): def test_describe_tree(self): assert isinstance(self.OBJ.describe_tree(), str) - @pytest.mark.xfail(reason="Failed if display not correctly configured", - run=True, strict=False) - def test_screenshot(self): - """Test screenshot rendering.""" - basename = self.to_tmp_dir(repr(self.OBJ)) - self.OBJ.screenshot(basename + '.png') + # @pytest.mark.xfail(reason="Failed if display not correctly configured", + # run=True, strict=False) + # def test_screenshot(self): + # """Test screenshot rendering.""" + # basename = self.to_tmp_dir(repr(self.OBJ)) + # self.OBJ.screenshot(basename + '.png') def test_parent(self): """Test setting parent.""" diff --git a/visbrain/objects/tests/test_brain_obj.py b/visbrain/objects/tests/test_brain_obj.py index 14a5e1042..937a4a7b3 100644 --- a/visbrain/objects/tests/test_brain_obj.py +++ b/visbrain/objects/tests/test_brain_obj.py @@ -7,7 +7,7 @@ NEEDED_FILES = dict(ANNOT_FILE_1='lh.aparc.annot', - ANNOT_FILE_2='rh.PALS_B12_Brodmann.annot', + ANNOT_FILE_2='rh.aparc.annot', MEG_INVERSE='meg_source_estimate-lh.stc', OVERLAY_1='lh.sig.nii.gz', OVERLAY_2='lh.alt_sig.nii.gz', @@ -34,17 +34,6 @@ class TestBrainObj(_TestObjects): OBJ = b_obj - def _prepare_brain(self, name='inflated'): - b_obj.set_data(name) - b_obj.clean() - - def test_get_template_list(self): - """Test function get_template_list.""" - b_obj._get_template_path() - b_obj._get_default_templates() - b_obj._get_downloadable_templates() - b_obj._add_downloadable_templates('white') - def test_rotation(self): """Test function rotation.""" # Test fixed rotations : @@ -71,6 +60,8 @@ def test_custom_templates(self): def test_get_parcellates(self): """Test function get_parcellates.""" + # Prepare the brain : + b_obj = BrainObj('inflated') import pandas as pd file_1 = self.need_file(NEEDED_FILES['ANNOT_FILE_1']) file_2 = self.need_file(NEEDED_FILES['ANNOT_FILE_2']) @@ -81,7 +72,7 @@ def test_get_parcellates(self): def test_overlay_from_file(self): """Test add_activation method.""" # Prepare the brain : - self._prepare_brain() + b_obj = BrainObj('inflated') file_1 = self.need_file(NEEDED_FILES['OVERLAY_1']) file_2 = self.need_file(NEEDED_FILES['OVERLAY_2']) # Overlay : @@ -99,6 +90,7 @@ def test_overlay_from_file(self): def test_parcellize(self): """Test function parcellize.""" + b_obj = BrainObj('inflated') file_1 = self.need_file(NEEDED_FILES['PARCELLATES_1']) file_2 = self.need_file(NEEDED_FILES['PARCELLATES_2']) b_obj.parcellize(file_1, hemisphere='left') @@ -153,6 +145,5 @@ def test_reload_saved_template(self): def test_remove(self): """Test function remove.""" - b_cust = BrainObj('Custom') - b_cust.remove() + BrainObj('Custom').remove() clean_tmp() diff --git a/visbrain/objects/tests/test_connect_obj.py b/visbrain/objects/tests/test_connect_obj.py index da4245d80..c9f8d7d03 100644 --- a/visbrain/objects/tests/test_connect_obj.py +++ b/visbrain/objects/tests/test_connect_obj.py @@ -36,6 +36,18 @@ def test_definition(self): ConnectObj('C1', nodes, edges, dynamic=(.1, .4)) ConnectObj('C2', nodes, edges, custom_colors=custom_colors) + def test_get_nb_connections_per_node(self): + """Test function get_nb_connections_per_node.""" + sort = ['index', 'count'] + order = ['ascending', 'descending'] + for s in sort: + for o in order: + c_obj.get_nb_connections_per_node(s, o) + + def test_analyse_connections(self): + """Test function analyse_connections.""" + c_obj.analyse_connections(get_centroids=True) + def test_builtin_methods(self): """Test function connect_builtin_methods.""" custom_colors[None] = 'blue' @@ -50,6 +62,8 @@ def test_attributes(self): """Test function connect_attributes.""" self.assert_and_test('line_width', 4.4) self.assert_and_test('color_by', 'strength') + self.assert_and_test('color_by', 'count') + self.assert_and_test('color_by', 'causal') self.assert_and_test('dynamic', (.2, .4)) self.assert_and_test('alpha', 0.7) diff --git a/visbrain/objects/tests/test_crossec_obj.py b/visbrain/objects/tests/test_crossec_obj.py index 82a42eedd..f70464027 100644 --- a/visbrain/objects/tests/test_crossec_obj.py +++ b/visbrain/objects/tests/test_crossec_obj.py @@ -30,11 +30,13 @@ def test_localize_source(self): def test_nii_definition(self): """Test function nii_definition.""" - CrossSecObj(download_file('GG-853-GM-0.7mm.nii.gz')) + CrossSecObj(download_file('GG-853-GM-0.7mm.nii.gz', + astype='example_data')) def test_set_activation(self): """Test function set_activation.""" - cs_obj.set_activation(download_file('GG-853-GM-0.7mm.nii.gz')) + cs_obj.set_activation(download_file('GG-853-GM-0.7mm.nii.gz', + astype='example_data')) def test_highlight_sources(self): """Test function highlight_sources.""" @@ -42,7 +44,8 @@ def test_highlight_sources(self): def test_save(self): """Test function save.""" - v_obj = CrossSecObj(download_file('GG-853-GM-0.7mm.nii.gz')) + v_obj = CrossSecObj(download_file('GG-853-GM-0.7mm.nii.gz', + astype='example_data')) v_obj.save() v_obj.save(tmpfile=True) diff --git a/visbrain/objects/tests/test_hypno_obj.py b/visbrain/objects/tests/test_hypno_obj.py index d1b55c8e5..92315c4ba 100644 --- a/visbrain/objects/tests/test_hypno_obj.py +++ b/visbrain/objects/tests/test_hypno_obj.py @@ -8,7 +8,7 @@ data = np.repeat(np.arange(6), 100) - 1. h_obj = HypnogramObj('hypno', data) -hypno_file = path_to_visbrain_data('Hypnogram_excerpt2.txt') +hypno_file = path_to_visbrain_data('Hypnogram_excerpt2.txt', 'example_data') class TestHypnogramObj(_TestObjects): diff --git a/visbrain/objects/tests/test_roi_obj.py b/visbrain/objects/tests/test_roi_obj.py index 6aec3e8cf..7ce8e6e02 100644 --- a/visbrain/objects/tests/test_roi_obj.py +++ b/visbrain/objects/tests/test_roi_obj.py @@ -15,9 +15,9 @@ xyz[:, 1] -= 50. s_obj = SourceObj('S1', xyz) -download_file('MIST_ROI.zip', unzip=True) -nifti_file = path_to_visbrain_data('MIST_ROI.nii.gz') -csv_file = path_to_visbrain_data('MIST_ROI.csv') +download_file('MIST_ROI.zip', unzip=True, astype='example_data') +nifti_file = path_to_visbrain_data('MIST_ROI.nii.gz', 'example_data') +csv_file = path_to_visbrain_data('MIST_ROI.csv', 'example_data') # Read the .csv file : arr = np.genfromtxt(csv_file, delimiter=';', dtype=str) # Get column names, labels and index : @@ -41,8 +41,11 @@ class TestRoiObj(_TestVolumeObject): def test_definition(self): """Test function definition.""" - for k in ['aal', 'talairach', 'brodmann']: - RoiObj(k) + # Default : + _ = [RoiObj(k) for k in ['aal', 'talairach', 'brodmann']] # noqa + # MIST : + levels = [7, 12, 20, 36, 64, 122, 'ROI'] + _ = [RoiObj('mist_%s' % str(k)) for k in levels] # noqa def test_get_labels(self): """Test function get_labels.""" @@ -62,6 +65,10 @@ def test_localize_sources(self): roi_obj.localize_sources(s_obj.xyz, source_name=s_obj.text) roi_obj.localize_sources(s_obj.xyz, distance=1000.) + def test_get_centroids(self): + """Test function get_centroids.""" + roi_obj.get_centroids([2, 4, 6]) + def test_project_sources(self): """Test function project_sources.""" roi_obj.project_sources(s_obj, 'modulation') @@ -74,8 +81,6 @@ def test_properties(self): assert isinstance(roi_obj.vertices, np.ndarray) assert isinstance(roi_obj.faces, np.ndarray) assert isinstance(roi_obj.normals, np.ndarray) - assert roi_obj.mask is None - assert roi_obj.color is None assert isinstance(roi_obj.mask_color, np.ndarray) def test_select_roi(self): diff --git a/visbrain/objects/tests/test_scene_obj.py b/visbrain/objects/tests/test_scene_obj.py index 22abd22d9..e3163915f 100644 --- a/visbrain/objects/tests/test_scene_obj.py +++ b/visbrain/objects/tests/test_scene_obj.py @@ -1,6 +1,6 @@ """Test SceneObj and VisbrainCanvas.""" import numpy as np -import pytest +# import pytest import vispy from visbrain.tests._tests_visbrain import _TestVisbrain @@ -98,10 +98,10 @@ def test_link(self): sc_obj_3d_2.add_to_subplot(c_obj_2, row=0, col=2) sc_obj_3d_2.link(-1) - @pytest.mark.xfail(reason="Failed if display not correctly configured", - run=True, strict=False) - def test_screenshot(self): - """Test function screenshot.""" - sc_obj_3d_1.screenshot(self.to_tmp_dir('SceneObj_3d1.png')) - sc_obj_3d_2.screenshot(self.to_tmp_dir('SceneObj_3d2.png')) - sc_obj_2d_1.screenshot(self.to_tmp_dir('SceneObj_2d2.png')) + # @pytest.mark.xfail(reason="Failed if display not correctly configured", + # run=True, strict=False) + # def test_screenshot(self): + # """Test function screenshot.""" + # sc_obj_3d_1.screenshot(self.to_tmp_dir('SceneObj_3d1.png')) + # sc_obj_3d_2.screenshot(self.to_tmp_dir('SceneObj_3d2.png')) + # sc_obj_2d_1.screenshot(self.to_tmp_dir('SceneObj_2d2.png')) diff --git a/visbrain/objects/tests/test_topo_obj.py b/visbrain/objects/tests/test_topo_obj.py new file mode 100644 index 000000000..3b2727268 --- /dev/null +++ b/visbrain/objects/tests/test_topo_obj.py @@ -0,0 +1,54 @@ +"""Test BrainObj.""" +import numpy as np + +from visbrain.objects import TopoObj +from visbrain.objects.tests._testing_objects import _TestObjects + + +# Topoplot : +channels = ['C3', 'C4', 'Cz', 'Fz', 'Pz'] +data = [10, 20, 30, 10, 10] +t_obj = TopoObj('topo', data, channels=channels) + + +class TestTopoObj(_TestObjects): + """Test TopoObj.""" + + OBJ = t_obj + + def _get_coordinates(self): + file = self.need_file('topoplot_data.npz') + mat = np.load(file) + xyz, data = mat['xyz'], mat['data'] + channels = [str(k) for k in range(len(data))] + return xyz, data, channels + + def test_channel_definition(self): + """Test the definition of TopoObj using channel names.""" + TopoObj('topo', data, channels=channels) + + def test_xyz_definition(self): + """Test the definition of TopoObj using xyz coordinates.""" + xyz, data, channels = self._get_coordinates() + TopoObj('topo', data, channels=channels, xyz=xyz) + + def test_levels(self): + """Test levels definition.""" + xyz, data, channels = self._get_coordinates() + # Regulary spaced levels : + TopoObj('topo', data, channels=channels, xyz=xyz, levels=10, + level_colors='bwr') + # Manual levels : + level_colors = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) + levels = [2., 2.2, 2.5] + TopoObj('topo', data, channels=channels, xyz=xyz, levels=levels, + level_colors=level_colors) + + def test_connect(self): + """Test connect channels.""" + xyz, data, channels = self._get_coordinates() + connect = (data.reshape(-1, 1) + data.reshape(1, -1)) / 2. + select = connect < 1.97 + t_obj = TopoObj('topo', data, channels=channels, xyz=xyz) + t_obj.connect(connect, select=select, cmap='inferno', antialias=True, + line_width=4.) diff --git a/visbrain/objects/tests/test_volume_obj.py b/visbrain/objects/tests/test_volume_obj.py index c4b8f6c71..cad38dbc3 100644 --- a/visbrain/objects/tests/test_volume_obj.py +++ b/visbrain/objects/tests/test_volume_obj.py @@ -28,13 +28,20 @@ def test_properties(self): self.assert_and_test('cmap', k) self.assert_and_test('threshlod', 5) + def test_extract_activity(self): + """Test function extract_activity.""" + xyz = np.random.uniform(-20, 20, (100, 3)) + v_obj.extract_activity(xyz, radius=10.) + def test_nii_definition(self): """Test function nii_definition.""" - VolumeObj(download_file('GG-853-GM-0.7mm.nii.gz')) + VolumeObj(download_file('GG-853-GM-0.7mm.nii.gz', + astype='example_data')) def test_save(self): """Test function save.""" - v_obj = VolumeObj(download_file('GG-853-GM-0.7mm.nii.gz')) + v_obj = VolumeObj(download_file('GG-853-GM-0.7mm.nii.gz', + astype='example_data')) v_obj.save() v_obj.save(tmpfile=True) diff --git a/visbrain/objects/tf_obj.py b/visbrain/objects/tf_obj.py index bf16bb77d..21f4ef25d 100644 --- a/visbrain/objects/tf_obj.py +++ b/visbrain/objects/tf_obj.py @@ -144,7 +144,7 @@ def set_data(self, data, sf=1., method='fourier', nperseg=256, f_min=1., # Update color arguments : self._update_cbar_args(cmap, clim, vmin, vmax, under, over) - logger.info("Compute time-frequency decomposition using the" + logger.info(" Compute time-frequency decomposition using the" " %s method" % method) if method == 'fourier': @@ -156,7 +156,7 @@ def set_data(self, data, sf=1., method='fourier', nperseg=256, f_min=1., time = np.arange(n_pts) / sf tf = np.zeros((len(freqs), n_pts), dtype=data.dtype) # Compute TF and inplace normalization : - logger.info("Compute the time-frequency map (" + logger.info(" Compute the time-frequency map (" "normalization=%r)" % norm) for i, k in enumerate(freqs): tf[i, :] = np.square(np.abs(morlet(data, sf, k))) @@ -164,8 +164,8 @@ def set_data(self, data, sf=1., method='fourier', nperseg=256, f_min=1., # Averaging : if isinstance(n_window, int): - logger.info("Averaging time-frequency map using windows of " - "size %i with a %f overlap" % (n_window, overlap)) + logger.info(" Averaging time-frequency map using windows of" + " size %i with a %f overlap" % (n_window, overlap)) kw = dict(overlap=overlap, window=window) tf = averaging(tf, n_window, axis=1, **kw) time = averaging(time, n_window, **kw) diff --git a/visbrain/objects/topo_obj.py b/visbrain/objects/topo_obj.py new file mode 100644 index 000000000..9fea5775a --- /dev/null +++ b/visbrain/objects/topo_obj.py @@ -0,0 +1,549 @@ +"""Base class for objects of type connectivity.""" +import logging + +import numpy as np +from scipy.interpolate import interp2d + +from vispy import scene +from vispy.scene import visuals +import vispy.visuals.transforms as vist + +from .visbrain_obj import VisbrainObject +from ..objects import ConnectObj +from ..io import download_file, is_sc_image_installed +from ..utils import (array2colormap, color2vb, mpl_cmap, normalize, + vpnormalize, vprecenter) + +logger = logging.getLogger('visbrain') + + +class TopoObj(VisbrainObject): + """Create a topoplot object. + + Parameters + ---------- + name : string + The name of the connectivity object. + data : array_like + Array of data of shape (n_channels) + xyz : array_like | None + Array of source's coordinates. + channels : list | None + List of channel names. + system : {'cartesian', 'spherical'} + Coordinate system. + levels : array_like/int | None + The levels at which the isocurve is constructed. + level_colors : string/array_like | 'white' + The color to use when drawing the line. If a list is given, it + must be of shape (Nlev), if an array is given, it must be of + shape (Nlev, ...). and provide one color per level + (rgba, colorname). By default, all levels are whites. + unit : {'degree', 'rad'} + If system is 'spherical', specify if angles are in degrees or radians. + line_color : array_like/string | 'black' + Color of lines for the head, nose and eras. + line_width : float | 4. + Line width for the head, nose and eras. + chan_size : float | 12. + Size of channel names text. + chan_mark_color : array_like/string | 'white' + Color of channel markers. + chan_mark_symbol : string | 'disc' + Symbol to use for markers. Use disc, arrow, ring, clobber, square, + diamond, vbar, hbar, cross, tailed_arrow, x, triangle_up, + triangle_down, and star. + chan_txt_color : array_like/string | 'black' + Color of channel names. + cmap : string | None + Matplotlib colormap (like 'viridis', 'inferno'...). + clim : tuple/list | None + Colorbar limit. Every values under / over clim will + clip. + vmin : float | None + Every values under vmin will have the color defined + using the under parameter. + vmax : float | None + Every values over vmin will have the color defined + using the over parameter. + under : tuple/string | None + Matplotlib color under vmin. + over : tuple/string | None + Matplotlib color over vmax. + transform : VisPy.visuals.transforms | None + VisPy transformation to set to the parent node. + parent : VisPy.parent | None + Line object parent. + verbose : string + Verbosity level. + kw : dict | {} + Optional arguments are used to control the colorbar + (See :class:`ColorbarObj`). + + Notes + ----- + List of supported shortcuts : + + * **s** : save the figure + * **** : reset camera + """ + + ########################################################################### + ########################################################################### + # BUILT IN + ########################################################################### + ########################################################################### + + def __init__(self, name, data, xyz=None, channels=None, system='cartesian', + levels=None, level_colors='white', unit='degree', + line_color='black', line_width=3., chan_size=12., + chan_offset=(0., 0., 0.), chan_mark_color='white', + chan_mark_symbol='disc', chan_txt_color='black', + cmap='viridis', clim=None, vmin=None, under='gray', vmax=None, + over='red', margin=.05, transform=None, parent=None, + verbose=None, **kw): + """Init.""" + VisbrainObject.__init__(self, name, parent, transform, verbose, **kw) + + # ======================== VARIABLES ======================== + scale = 800. # fix GL bugs for small plots + pos = np.zeros((1, 3), dtype=np.float32) + # Colors : + line_color = color2vb(line_color) + chan_txt_color = color2vb(chan_txt_color) + self._chan_mark_color = color2vb(chan_mark_color) + self._chan_mark_symbol = chan_mark_symbol + # Disc interpolation : + self._interp = .1 + self._pix = 64 + csize = int(self._pix / self._interp) if self._interp else self._pix + l = csize / 2 # noqa + + # ======================== NODES ======================== + # Main topoplot node : + self.node = scene.Node(name='Topoplot', parent=self._node) + self.node.transform = vist.STTransform(scale=[scale] * 3) + # Headset + channels : + self.node_headfull = scene.Node(name='HeadChan', parent=self.node) + # Headset node : + self.node_head = scene.Node(name='Headset', parent=self.node_headfull) + # Channel node : + self.node_chan = scene.Node(name='Channels', parent=self.node_headfull) + self.node_chan.transform = vist.STTransform(translate=(0., 0., -10.)) + # Dictionaries : + kw_line = {'width': line_width, 'color': line_color, + 'parent': self.node_head, 'antialias': False} + + # ======================== PARENT VISUALS ======================== + # Main disc : + self.disc = visuals.Image(pos=pos, name='Disc', parent=self.node_head, + interpolation='bilinear') + + # ======================== HEAD / NOSE / EAR ======================== + # ------------------ HEAD ------------------ + # Head visual : + self.head = visuals.Line(pos=pos, name='Head', **kw_line) + # Head circle : + theta = np.arange(0, 2 * np.pi, 0.001) + head = np.full((len(theta), 3), -1., dtype=np.float32) + head[:, 0] = l * (1. + np.cos(theta)) + head[:, 1] = l * (1. + np.sin(theta)) + self.head.set_data(pos=head) + + # ------------------ NOSE ------------------ + # Nose visual : + self.nose = visuals.Line(pos=pos, name='Nose', **kw_line) + # Nose data : + wn, hn = csize * 50. / 512., csize * 30. / 512. + nose = np.array([[l - wn, 2 * l - wn, 2.], + [l, 2 * l + hn, 2.], + [l, 2 * l + hn, 2.], + [l + wn, 2 * l - wn, 2.] + ]) + self.nose.set_data(pos=nose, connect='segments') + + # ------------------ EAR ------------------ + we, he = csize * 10. / 512., csize * 30. / 512. + ye = l + he * np.sin(theta) + # Ear left data : + self.earL = visuals.Line(pos=pos, name='EarLeft', **kw_line) + # Ear left visual : + ear_l = np.full((len(theta), 3), 3., dtype=np.float32) + ear_l[:, 0] = 2 * l + we * np.cos(theta) + ear_l[:, 1] = ye + self.earL.set_data(pos=ear_l) + + # Ear right visual : + self.earR = visuals.Line(pos=pos, name='EarRight', **kw_line) + # Ear right data : + ear_r = np.full((len(theta), 3), 3., dtype=np.float32) + ear_r[:, 0] = 0. + we * np.cos(theta) + ear_r[:, 1] = ye + self.earR.set_data(pos=ear_r) + + # ================== CHANNELS ================== + # Channel's markers : + self.chan_markers = visuals.Markers(pos=pos, name='ChanMarkers', + parent=self.node_chan) + # Channel's text : + self.chan_text = visuals.Text(pos=pos, name='ChanText', + parent=self.node_chan, anchor_x='center', + color=chan_txt_color, + font_size=chan_size) + + # ================== CAMERA ================== + self.rect = ((-scale / 2) * (1 + margin), + (-scale / 2) * (1 + margin), + scale * (1. + margin), + scale * (1.11 + margin)) + + # ================== COORDINATES ================== + auto = self._get_channel_coordinates(xyz, channels, system, unit) + if auto: + eucl = np.sqrt(self._xyz[:, 0]**2 + self._xyz[:, 1]**2).max() + self.node_head.transform = vpnormalize(head, dist=2 * eucl) + # Rescale between (-1:1, -1:1) = circle : + circle = vist.STTransform(scale=(.5 / eucl, .5 / eucl, 1.)) + self.node_headfull.transform = circle + # Text translation : + tr = np.array([0., .8, 0.]) + np.array(chan_offset) + else: + # Get coordinates of references along the x and y-axis : + ref_x, ref_y = self._get_ref_coordinates() + # Recenter the topoplot : + t = vist.ChainTransform() + t.prepend(vprecenter(head)) + # Rescale (-ref_x:ref_x, -ref_y:ref_y) (ref_x != ref_y => ellipse) + coef_x = 2 * ref_x / head[:, 0].max() + coef_y = 2 * ref_y / head[:, 1].max() + t.prepend(vist.STTransform(scale=(coef_x, coef_y, 1.))) + self.node_head.transform = t + # Rescale between (-1:1, -1:1) = circle : + circle = vist.STTransform(scale=(.5 / ref_x, .5 / ref_y, 1.)) + self.node_headfull.transform = circle + # Text translation : + tr = np.array([0., .04, 0.]) + np.array(chan_offset) + self.chan_text.transform = vist.STTransform(translate=tr) + + # ================== GRID INTERPOLATION ================== + # Interpolation vectors : + x = y = np.arange(0, self._pix, 1) + xnew = ynew = np.arange(0, self._pix, self._interp) + + # Grid interpolation function : + def _grid_interpolation(grid): + f = interp2d(x, y, grid, kind='linear') + return f(xnew, ynew) + self._grid_interpolation = _grid_interpolation + + self.set_data(data, levels, level_colors, cmap, clim, vmin, under, + vmax, over) + + def __len__(self): + """Return the number of channels.""" + return self._nchan + + def __bool__(self): + """Return if coordinates exist.""" + return hasattr(self, '_xyz') + + def _get_camera(self): + """Get the most adapted camera.""" + cam = scene.cameras.PanZoomCamera(rect=self.rect) + cam.aspect = 1. + return cam + + def set_data(self, data, levels=None, level_colors='white', cmap='viridis', + clim=None, vmin=None, under='gray', vmax=None, over='red'): + """Set data to the topoplot. + + Parameters + ---------- + data : array_like + Array of data of shape (n_channels) + levels : array_like/int | None + The levels at which the isocurve is constructed. + level_colors : string/array_like | 'white' + The color to use when drawing the line. If a list is given, it + must be of shape (Nlev), if an array is given, it must be of + shape (Nlev, ...). and provide one color per level + (rgba, colorname). By default, all levels are whites. + cmap : string | None + Matplotlib colormap (like 'viridis', 'inferno'...). + clim : tuple/list | None + Colorbar limit. Every values under / over clim will + clip. + vmin : float | None + Every values under vmin will have the color defined + using the under parameter. + vmax : float | None + Every values over vmin will have the color defined + using the over parameter. + under : tuple/string | None + Matplotlib color under vmin. + over : tuple/string | None + Matplotlib color over vmax. + """ + # ================== XYZ / CHANNELS / DATA ================== + xyz = self._xyz[self._keeponly] + channels = list(np.array(self._channels)[self._keeponly]) + data = np.asarray(data, dtype=float).ravel() + if len(data) == len(self): + data = data[self._keeponly] + logger.info(" %i channels detected" % len(channels)) + + # =================== CHANNELS =================== + # Markers : + radius = normalize(data, 10., 30.) + self.chan_markers.set_data(pos=xyz, size=radius, edge_color='black', + face_color=self._chan_mark_color, + symbol=self._chan_mark_symbol) + # Names : + if channels is not None: + self.chan_text.text = channels + self.chan_text.pos = xyz + + # =================== GRID =================== + pos_x, pos_y = xyz[:, 0], xyz[:, 1] + xmin, xmax = pos_x.min(), pos_x.max() + ymin, ymax = pos_y.min(), pos_y.max() + xi = np.linspace(xmin, xmax, self._pix) + yi = np.linspace(ymin, ymax, self._pix) + xh, yi = np.meshgrid(xi, yi) + grid = self._griddata(pos_x, pos_y, data, xh, yi) + + # =================== INTERPOLATION =================== + if self._interp is not None: + grid = self._grid_interpolation(grid) + csize = max(self._pix, grid.shape[0]) + # Variables : + l = csize / 2 # noqa + y, x = np.ogrid[-l:l, -l:l] + mask = x**2 + y**2 < l**2 + nmask = np.invert(mask) + + # =================== DISC =================== + # Force min < off-disc values < max : + d_min, d_max = data.min(), data.max() + grid = normalize(grid, d_min, d_max) + clim = (d_min, d_max) if clim is None else clim + self._update_cbar_args(cmap, clim, vmin, vmax, under, over) + grid_color = array2colormap(grid, **self.to_kwargs()) + grid_color[nmask, -1] = 0. + # grid[nmask] = d_min + # self.disc.clim = clim + # self.disc.cmap = cmap_to_glsl(limits=(d_min, d_max), + # translucent=(None, d_min), + # **self.to_kwargs()) + self.disc.set_data(grid_color) + + # =================== LEVELS =================== + if levels is not None: + if isinstance(levels, int): + levels = np.linspace(d_min, d_max, levels) + if isinstance(level_colors, str): + # Get colormaps : + cmaps = mpl_cmap(bool(level_colors.find('_r') + 1)) + if level_colors in cmaps: + level_colors = array2colormap(levels, cmap=level_colors) + grid[nmask] = np.inf + is_sc_image_installed(True) + self.iso = visuals.Isocurve(data=grid, parent=self.node_head, + levels=levels, color_lev=level_colors, + width=2.) + self.iso.transform = vist.STTransform(translate=(0., 0., -5.)) + + def connect(self, connect, **kwargs): + """Draw connectivity lines between channels. + + Parameters + ---------- + connect : array_like + A 2D array of connectivity links of shape (n_channels, n_channels). + kwargs : dict | {} + Optional arguments are passed to the `visbrain.objects.ConnectObj` + object. + """ + logger.info(" Connect channels") + self._connect = ConnectObj('ChanConnect', self._xyz, connect, + parent=self.node_chan, **kwargs) + + def _get_channel_coordinates(self, xyz, channels, system, unit): + """Get channel coordinates. + + Parameters + ---------- + xyz : array_like | None + Array of source's coordinates. + channels : list | None + List of channel names. + system : {'cartesian', 'spherical'} + Coordinate system. + unit : string | {'degree', 'rad'} + If system is 'spherical', specify if angles are in degrees or + radians. + """ + # ===================== + if (xyz is None) and (channels is None): # Both None + raise ValueError("You must either define sources using the xyz or" + " channels inputs") + elif isinstance(xyz, np.ndarray): # xyz exist + if xyz.shape[1] not in [2, 3]: + raise ValueError("Shape of xyz must be (nchan, 2) or " + "(nchan, 3)") + nchan = xyz.shape[0] + if xyz.shape[1] == 2: + xyz = np.c_[xyz, np.zeros((nchan), dtype=np.float)] + xyz[:, 2] = 1. + keeponly = np.ones((xyz.shape[0],), dtype=bool) + channels = [''] * nchan if channels is None else channels + auto = True + elif (xyz is None) and (channels is not None): # channels exist + if all([isinstance(k, str) for k in channels]): + xyz, keeponly = self._get_coordinates_from_name(channels) + system, unit = 'spherical', 'degree' + auto = False + + # Select channels to use : + if any(keeponly): + if not all(keeponly): + ignore = list(np.array(channels)[np.invert(keeponly)]) + logger.warning("Ignored channels for topoplot :" + " %s" % ', '.join(ignore)) + + # ----------- Conversion ----------- + if isinstance(xyz, np.ndarray): + if system == 'cartesian': + pass # all good + elif system == 'spherical': + xyz = self._spherical_to_cartesian(xyz, unit) + xyz = self._array_project_radial_to3d(xyz) + + self._xyz = xyz + self._channels = channels + self._keeponly = keeponly + self._nchan = len(channels) + + return auto + + def _get_ref_coordinates(self, x='T4', y='Fpz'): + """Get cartesian coordinates for electrodes to use as references. + + The ELAN software use by default spherical coordinates with T4 as the + extrema for the x-axis and Fpz as the extrema for the y-axis. + + Parameters + ---------- + x : string | 'T4' + Name of the electrode t use as a reference for the x-axis. + y : string | 'Fpz' + Name of the electrode t use as a reference for the y-axis. + """ + ref = self._get_coordinates_from_name([x, y])[0] + ref = self._spherical_to_cartesian(ref, unit='degree') + ref = self._array_project_radial_to3d(ref) + ref_x, ref_y = ref[0, 0], ref[1, 1] + return ref_x, ref_y + + @staticmethod + def _get_coordinates_from_name(chan): + """From the name of the channels, find xyz coordinates. + + Parameters + ---------- + chan : list + List of channel names. + """ + # Load the coordinates template : + path = download_file('eegref.npz', astype='topo') + file = np.load(path) + name_ref, xyz_ref = file['chan'], file['xyz'] + keeponly = np.ones((len(chan)), dtype=bool) + # Find and load xyz coordinates : + xyz = np.zeros((len(chan), 3), dtype=np.float32) + for num, k in enumerate(chan): + # Find if the channel is present : + idx = np.where(name_ref == k.lower())[0] + if idx.size: + xyz[num, 0:2] = np.array(xyz_ref[idx[0], :]) + else: + keeponly[num] = False + + return np.array(xyz), keeponly + + @staticmethod + def _spherical_to_cartesian(xyz, unit='rad'): + """Convert spherical coordinates to cartesian. + + Parameters + ---------- + xyz : array_like + The array of spheric coordinate of shape (N, 3). + unit : {'rad', 'degree'} + Specify the unit angles. + + Returns + ------- + xyz : array_like + The cartesian coordinates of the angle of shape (N, 3). + """ + # Get theta / phi : + theta, phi = xyz[:, 0], xyz[:, 1] + if unit is 'degree': + np.deg2rad(theta, out=theta) + np.deg2rad(phi, out=phi) + # Get radius : + r = np.sin(theta) + # Get cartesian coordinates : + np.multiply(np.cos(phi), r, out=xyz[:, 0]) + np.multiply(np.sin(phi), r, out=xyz[:, 1]) + np.cos(theta, xyz[:, 2]) + return xyz + + @staticmethod + def _griddata(x, y, v, xi, yi): + """Make griddata.""" + xy = x.ravel() + y.ravel() * -1j + d = xy[None, :] * np.ones((len(xy), 1)) + d = np.abs(d - d.T) + n = d.shape[0] + d.flat[::n + 1] = 1. + + g = (d * d) * (np.log(d) - 1.) + g.flat[::n + 1] = 0. + weights = np.linalg.solve(g, v.ravel()) + + m, n = xi.shape + zi = np.zeros_like(xi) + xy = xy.T + + g = np.empty(xy.shape) + for i in range(m): + for j in range(n): + d = np.abs(xi[i, j] + -1j * yi[i, j] - xy) + mask = np.where(d == 0)[0] + if len(mask): + d[mask] = 1. + np.log(d, out=g) + g -= 1. + g *= d * d + if len(mask): + g[mask] = 0. + zi[i, j] = g.dot(weights) + return zi + + @staticmethod + def _array_project_radial_to3d(points_2d): + """Radial 3d projection.""" + points_2d = np.atleast_2d(points_2d) + alphas = np.sqrt(np.sum(points_2d**2, -1)) + + betas = np.sin(alphas) / alphas + betas[alphas == 0] = 1 + x = points_2d[..., 0] * betas + y = points_2d[..., 1] * betas + z = np.cos(alphas) + + points_3d = np.asarray([x, y, z]).T + + return points_3d diff --git a/visbrain/objects/ts3d_obj.py b/visbrain/objects/ts3d_obj.py index 4ec3d588a..5fd708f82 100644 --- a/visbrain/objects/ts3d_obj.py +++ b/visbrain/objects/ts3d_obj.py @@ -92,7 +92,7 @@ def __init__(self, name, data, xyz, select=None, line_width=1.5, assert isinstance(select, (list, np.ndarray)) self._select = select # Amplitude / width : - assert isinstance(ts_amp, float) and isinstance(ts_width, float) + assert all([isinstance(k, (int, float)) for k in (ts_amp, ts_width)]) self._ts_amp, self._ts_width = ts_amp, ts_width # Translate : assert len(translate) == 3 diff --git a/visbrain/objects/visbrain_obj.py b/visbrain/objects/visbrain_obj.py index 57ac0b292..c6ea059f9 100644 --- a/visbrain/objects/visbrain_obj.py +++ b/visbrain/objects/visbrain_obj.py @@ -1,4 +1,5 @@ """Main class for Visbrain objects.""" +import os import sys import logging @@ -6,7 +7,9 @@ import vispy.visuals.transforms as vist from .scene_obj import VisbrainCanvas -from ..io import write_fig_canvas, dialog_save +from ..io import (write_fig_canvas, dialog_save, path_to_visbrain_data, + load_config_json, get_data_url_path, download_file, + get_files_in_folders) from ..utils import color2vb, set_log_level, merge_cameras from ..config import CONFIG from ..visuals import CbarBase @@ -59,19 +62,86 @@ def __init__(self, **kw): """Init.""" CbarBase.__init__(self, **kw) _VisbrainShortcuts.__init__(self) - self._cbar_data = None self._default_cblabel = '' - self._minmax = None + self._data_folder = None + # --------------------------- CAMERA --------------------------- def _get_camera(self): raise NotImplementedError + # --------------------------- COLORBAR --------------------------- def _update_cbar(self): raise NotImplementedError def _update_cbar_minmax(self): raise NotImplementedError + # --------------------------- DATA --------------------------- + def _df_is_downloadable(self, file): + """Get if a file name could be downloaded.""" + json_path = get_data_url_path() + json_struct = load_config_json(json_path)[self._data_folder] + return file in json_struct + + def _df_is_downloaded(self, file): + """Get if a file has already been downloaded.""" + return os.path.isfile(os.path.join(self._data_folder_path, file)) + + def _df_get_downloadable(self): + """Get the list of files that can be downloaded.""" + json_path = get_data_url_path() + return load_config_json(json_path)[self._data_folder] + + def _df_get_tmp_folder(self): + """Get the tmp associated folder.""" + vb_path = path_to_visbrain_data() + return os.path.join(*(vb_path, 'tmp', self._data_folder)) + + def _df_get_downloaded(self, **kwargs): + """Get the list of files that are already downloaded.""" + main_path = get_files_in_folders(self._data_folder_path, **kwargs) + tmp_path = self._df_get_tmp_folder() + if os.path.isdir(tmp_path): + main_path += get_files_in_folders(tmp_path, **kwargs) + return main_path + + def _df_get_file(self, file, download=True): + """Get the path to a file or download it if needed.""" + is_dl = self._df_is_downloaded(file) + if not is_dl and download: + assert self._df_is_downloadable(file) + self._df_download_file(file) + # Find if the file is in _data_folder or in tmp : + if file in os.listdir(self._data_folder_path): + use_path = self._data_folder_path + else: + use_path = self._df_get_tmp_folder() + return os.path.join(use_path, file) + + def _df_download_file(self, file): + """Download a file.""" + return download_file(file, astype=self._data_folder) + + # ----------- DATA_FOLDER ----------- + @property + def data_folder(self): + """Get the data_folder value.""" + return self._data_folder + + @data_folder.setter + def data_folder(self, value): + """Set data_folder value.""" + if isinstance(self._data_folder, str): + raise ValueError("data_folder can only be set once.") + assert isinstance(value, str) + # Create the directory if it doesn't exist : + full_path = path_to_visbrain_data(folder=value) + if not os.path.exists(full_path): + os.makedirs(full_path) + logger.info("%s folder created" % full_path) + self._data_folder = value + self._data_folder_path = full_path + class VisbrainObject(_VisbrainObj): """Base class inherited by all of the Visbrain objects. @@ -100,7 +170,7 @@ def __init__(self, name, parent=None, transform=None, verbose=None, **kw): self._name = name # Transformation : if transform is None: - transform = vist.NullTransform() + transform = vist.STTransform() self._node.transform = transform # Verbose : set_log_level(verbose) @@ -225,6 +295,11 @@ def screenshot(self, saveas, print_size=None, dpi=300., unit='centimeter', widget=canvas.canvas.central_widget, **kw) self._node.parent = None + def copy(self): + """Get a copy of the object.""" + from copy import copy + return copy(self) + # ----------- PARENT ----------- @property def parent(self): diff --git a/visbrain/objects/volume_obj.py b/visbrain/objects/volume_obj.py index fb6680e37..e1d34de82 100644 --- a/visbrain/objects/volume_obj.py +++ b/visbrain/objects/volume_obj.py @@ -9,11 +9,9 @@ from vispy.visuals.transforms import MatrixTransform from .visbrain_obj import VisbrainObject, CombineObjects -from ..utils import (load_predefined_roi, wrap_properties, normalize, - array_to_stt, stt_to_array) -from ..io import (read_nifti, get_files_in_data, get_files_in_folders, - path_to_visbrain_data, get_data_path, path_to_tmp, - save_volume_template, remove_volume_template) +from ..utils import (wrap_properties, normalize, array_to_stt, stt_to_array) +from ..io import (read_nifti, save_volume_template, remove_volume_template, + download_file, path_to_visbrain_data, read_mist) logger = logging.getLogger('visbrain') @@ -73,23 +71,40 @@ class _Volume(VisbrainObject): def __init__(self, name, parent, transform, verbose, **kw): """Init.""" VisbrainObject.__init__(self, name, parent, transform, verbose, **kw) + self.data_folder = 'roi' def __call__(self, name, vol=None, hdr=None, labels=None, index=None, system=None): """Load a predefined volume.""" _, ext = os.path.splitext(name) - if ('.nii' in ext) or ('gz' in ext): + if ('.nii' in ext) or ('gz' in ext) or ('img' in ext): vol, _, hdr = read_nifti(name) name = os.path.split(name)[1].split('.nii')[0] self._name = name - logger.info('Loading %s' % name) + logger.info(' %s volume loaded' % name) labels = index = system = None elif isinstance(name, str): - path = self.list(file=name + '.npz') - if len(path): - self._name = os.path.split(path[0])[1].split('.npz')[0] - logger.debug("%s volume loaded" % name) - vol, labels, index, hdr, system = load_predefined_roi(path[0]) + # Switch between MIST and {aal, brodmann...} : + if 'MIST' in name.upper(): + mist_path = path_to_visbrain_data('mist', 'roi') + if not os.path.isdir(mist_path): + download_file('mist.zip', astype='roi', unzip=True) + (vol, labels, index, hdr), system = read_mist(name), 'mni' + else: + to_load, name_npz = None, name + '.npz' + if name in self._df_get_downloaded(): + to_load = self._df_get_file(name_npz, download=False) + elif name_npz in self._df_get_downloadable(): + to_load = self._df_download_file(name_npz) + # Load file : + if isinstance(to_load, str): + self._name = os.path.split(to_load)[1].split('.npz')[0] + arch = np.load(to_load) + vol, hdr = arch['vol'], arch['hdr'] + labels, index = arch['labels'], arch['index'] + system = 'tal' if 'talairach' in to_load else 'mni' + logger.debug("%s volume loaded" % name) + self._name = name self._vol, self._hdr = self._check_volume(vol, hdr) self._labels, self._index, self._system = labels, index, system @@ -105,16 +120,9 @@ def remove(self): """Remove the volume template.""" remove_volume_template(self.name) - def _search_in_path(self): - """Specify where to find volume templates.""" - _vb_data = path_to_visbrain_data(folder='roi') - _data = get_data_path(folder='roi') - _tmp = path_to_tmp(folder='roi') - return _vb_data, _data, _tmp - def list(self, file=None): """Get the list of installed volumes.""" - return get_files_in_folders(*self._search_in_path(), file=file) + return self._df_get_downloaded(file=file) def slice_to_pos(self, sl, axis=None, hdr=None): """Return the position from slice in the volume space.""" @@ -171,10 +179,10 @@ def name(self): @name.setter def name(self, value): """Set name value.""" - if value in get_files_in_data('roi', with_ext=False): + if value in self.list(): self(value) self.update() - self._name = value + self._name = value class _CombineVolume(CombineObjects): @@ -186,7 +194,7 @@ def __init__(self, vol_type, objs=None, select=None, parent=None): def list(self, file=None): """Get the list of installed volumes.""" - return get_files_in_folders(*_Volume._search_in_path(self), file=file) + return self._df_get_downloaded(file=file) def save(self, tmpfile=False): for k in self: @@ -211,7 +219,7 @@ class VolumeObj(_Volume): Volume rendering method. Default is 'mip'. threshold : float | 0. Threshold value for iso rendering method. - cmap : {'Opaquegrays', 'TransFire', 'OpaqueFire', 'TransGrays'} + cmap : {'OpaqueGrays', 'TransFire', 'OpaqueFire', 'TransGrays'} Colormap to use. select : list | None Select some structures in the volume. @@ -276,6 +284,41 @@ def set_data(self, vol, hdr=None, threshold=None, cmap=None, self.threshold = threshold self.cmap = cmap + def extract_activity(self, xyz, radius=2.): + """Extract activity of a volume around (x, y, z) points. + + Parameters + ---------- + xyz : array_like + Array of (x, y, z) coordinates of shape (n_sources, 3) + radius : float | 2. + Radius of the sphere around each point. + + Returns + ------- + act : array_like + Activity array of shape (n_sources,) + """ + assert isinstance(xyz, np.ndarray) and (xyz.shape[1] == 3) + assert isinstance(radius, (int, float)) + n_s = xyz.shape[0] + # hdr conversion : + logger.info(" Convert coordinates in volume space") + hdr = self._hdr + center, extrem = np.array([[0.] * 3]), np.array([[radius] * 3]) + xyz_m = np.round(hdr.imap(xyz)[:, 0:-1]).astype(int) + radius_0 = np.round(hdr.imap(center)[:, 0:-1]).astype(int) + radius_1 = np.round(hdr.imap(extrem)[:, 0:-1]).astype(int) + rd = [max(int(k / 2), 1) for k in np.abs(radius_1 - radius_0).ravel()] + # Extact activity : + logger.info(" Extract activity of the %i sources defined" % n_s) + act = np.zeros((n_s,), dtype=np.float32) + for i, k in enumerate(xyz_m): + act[i] = self._vol[k[0] - rd[0]:k[0] + rd[0], + k[1] - rd[1]:k[1] + rd[1], + k[2] - rd[2]:k[2] + rd[2]].mean() + return act + def update(self): """Update the volume.""" self._vol3d.update() diff --git a/visbrain/tests/_tests_visbrain.py b/visbrain/tests/_tests_visbrain.py index 235f0a087..4cd1ca67a 100644 --- a/visbrain/tests/_tests_visbrain.py +++ b/visbrain/tests/_tests_visbrain.py @@ -13,7 +13,7 @@ class _TestVisbrain(object): def need_file(self, file): """Path to a needed file from visbrain-data.""" - return download_file(file) + return download_file(file, astype='example_data') def to_tmp_dir(self, file=None): """Path to a tmp dir in visbrain-data.""" diff --git a/visbrain/tests/test_files.py b/visbrain/tests/test_files.py deleted file mode 100644 index 0588db3da..000000000 --- a/visbrain/tests/test_files.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Test if needed files are successfully installed with visbrain.""" -import os - -from visbrain.io import get_data_path - - -def _test_file(name, path): - assert os.path.isfile(path) - - -def test_brain_templates(): - """Test if templates are installed.""" - name = "Brain template ({})" - for k in ['B1.npz', 'B2.npz', 'B3.npz']: - _test_file(name.format(k), get_data_path(folder='templates', file=k)) - - -def test_roi_templates(): - """Test if templates are installed.""" - name = "ROI template ({})" - for k in ['brodmann.npz', 'aal.npz', 'talairach.npz']: - _test_file(name.format(k), get_data_path(folder='roi', file=k)) - - -def test_icons(): - """Test if Sleep icon is installed.""" - name = "Icons ({})" - icons = ['brain_icon.svg', 'sleep_icon.svg', 'topo_icon.svg', - 'figure_icon.svg', 'colorbar_icon.svg'] - for k in icons: - _test_file(name.format(k), get_data_path(folder='icons', file=k)) - - -def test_topo_file(): - """Test if the topo reference file is installed.""" - path = get_data_path(folder='topo', file='eegref.npz') - _test_file('Topo reference file (eegref.npz)', path) - - -def test_data_url(): - """Test if the data_url.txt is installed.""" - path = get_data_path(file='data_url.txt') - _test_file('URL to data (data_url.txt)', path) diff --git a/visbrain/tests/test_imports.py b/visbrain/tests/test_imports.py index 9f3b825d0..3e8236048 100644 --- a/visbrain/tests/test_imports.py +++ b/visbrain/tests/test_imports.py @@ -23,29 +23,19 @@ def test_import_pyqt(): def test_import_brain(): """Import the Brain module..""" - from visbrain import Brain # noqa + from visbrain.gui import Brain # noqa def test_import_sleep(): """Import the Sleep module..""" - from visbrain import Sleep # noqa + from visbrain.gui import Sleep # noqa def test_import_signal(): """Import the Signal module..""" - from visbrain import Signal # noqa + from visbrain.gui import Signal # noqa def test_import_figure(): """Import the Figure module..""" - from visbrain import Figure # noqa - - -def test_import_topo(): - """Import the Topo module..""" - from visbrain import Topo # noqa - - -def test_import_colorbar(): - """Import the Topo module.""" - from visbrain import Colorbar # noqa + from visbrain.gui import Figure # noqa diff --git a/visbrain/tests/test_xcli.py b/visbrain/tests/test_xcli.py deleted file mode 100644 index 70f85ef92..000000000 --- a/visbrain/tests/test_xcli.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Test command lines.""" -import pytest -import os - -from click.testing import CliRunner - -from visbrain.cli import cli_fig_hyp, cli_sleep_stats, cli_sleep -from visbrain.io import download_file, path_to_visbrain_data -from visbrain.tests._tests_visbrain import _TestVisbrain -# from visbrain.config import CONFIG - -# File to load : -sleep_file = path_to_visbrain_data('excerpt2.edf') -hypno_file = path_to_visbrain_data('Hypnogram_excerpt2.txt') - -# Download sleep file : -if not os.path.isfile(sleep_file): - download_file('sleep_edf.zip', unzip=True) - - -class TestCli(_TestVisbrain): - """Test cli.py.""" - - ########################################################################### - # HYPNO -> FIG - ########################################################################### - @pytest.mark.skip('Segmentation fault') - def test_cli_fig_hyp(self): - """Test function cli_fig_hyp.""" - import matplotlib - matplotlib.use('agg') - runner = CliRunner() - # Run without output : - r1 = runner.invoke(cli_fig_hyp, ['-h', hypno_file, '-g', True, - '-c', True, '--dpi', 100]) - # Run with output : - out = self.to_tmp_dir('hypno.png') - r2 = runner.invoke(cli_fig_hyp, ['-h', hypno_file, '-g', True, - '-c', True, '-o', out, '--dpi', 100]) - print('Result 1 :', r1.output) - print('Result 2 :', r2.output) - - @pytest.mark.skip('Segmentation fault') - def test_cli_sleep_stats(self): - """Test function cli_sleep_stats.""" - runner = CliRunner() - out = self.to_tmp_dir('hypno.csv') - r1 = runner.invoke(cli_sleep_stats, ['-h', hypno_file, '-o', out]) - print('Result : \n', r1.output) - - @pytest.mark.skip('Segmentation fault') - def test_cli_sleep(self): - """Test function cli_sleep.""" - runner = CliRunner() - data = path_to_visbrain_data(sleep_file) - runner.invoke(cli_sleep, ['-d', data, '-h', hypno_file, - '--show', False]) diff --git a/visbrain/topo/__init__.py b/visbrain/topo/__init__.py deleted file mode 100644 index 8c6c52087..000000000 --- a/visbrain/topo/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""From the topo file, import the topo module.""" -from .topo import Topo diff --git a/visbrain/topo/gui/__init__.py b/visbrain/topo/gui/__init__.py deleted file mode 100644 index 28c8561f7..000000000 --- a/visbrain/topo/gui/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .topo_gui import Ui_MainWindow diff --git a/visbrain/topo/gui/topo_gui.py b/visbrain/topo/gui/topo_gui.py deleted file mode 100644 index abb02c6ca..000000000 --- a/visbrain/topo/gui/topo_gui.py +++ /dev/null @@ -1,576 +0,0 @@ -# -*- coding: utf-8 -*- - -# Form implementation generated from reading ui file '/home/etienne/Toolbox/visbrain/visbrain/topo/gui/topo_gui.ui' -# -# Created by: PyQt5 UI code generator 5.6 -# -# WARNING! All changes made in this file will be lost! - -from PyQt5 import QtCore, QtGui, QtWidgets - -class Ui_MainWindow(object): - def setupUi(self, MainWindow): - MainWindow.setObjectName("MainWindow") - MainWindow.setWindowModality(QtCore.Qt.NonModal) - MainWindow.setEnabled(True) - MainWindow.resize(1188, 907) - font = QtGui.QFont() - font.setKerning(True) - font.setStyleStrategy(QtGui.QFont.PreferDefault) - MainWindow.setFont(font) - MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded) - self.centralwidget = QtWidgets.QWidget(MainWindow) - self.centralwidget.setObjectName("centralwidget") - self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget) - self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0) - self.horizontalLayout_2.setSpacing(0) - self.horizontalLayout_2.setObjectName("horizontalLayout_2") - self.q_widget = QtWidgets.QWidget(self.centralwidget) - sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) - sizePolicy.setHorizontalStretch(0) - sizePolicy.setVerticalStretch(0) - sizePolicy.setHeightForWidth(self.q_widget.sizePolicy().hasHeightForWidth()) - self.q_widget.setSizePolicy(sizePolicy) - self.q_widget.setMinimumSize(QtCore.QSize(0, 0)) - self.q_widget.setMaximumSize(QtCore.QSize(450, 16777215)) - self.q_widget.setObjectName("q_widget") - self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.q_widget) - self.verticalLayout_4.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) - self.verticalLayout_4.setContentsMargins(0, 0, 0, 0) - self.verticalLayout_4.setObjectName("verticalLayout_4") - self.QuickSettings = QtWidgets.QTabWidget(self.q_widget) - self.QuickSettings.setMaximumSize(QtCore.QSize(16777215, 16777215)) - self.QuickSettings.setAutoFillBackground(False) - self.QuickSettings.setTabShape(QtWidgets.QTabWidget.Rounded) - self.QuickSettings.setMovable(True) - self.QuickSettings.setObjectName("QuickSettings") - self.q_Detection = QtWidgets.QWidget() - self.q_Detection.setObjectName("q_Detection") - self.verticalLayout_22 = QtWidgets.QVBoxLayout(self.q_Detection) - self.verticalLayout_22.setContentsMargins(0, 0, 0, 0) - self.verticalLayout_22.setObjectName("verticalLayout_22") - self.groupBox_3 = QtWidgets.QGroupBox(self.q_Detection) - self.groupBox_3.setCheckable(True) - self.groupBox_3.setObjectName("groupBox_3") - self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox_3) - self.verticalLayout_3.setContentsMargins(0, -1, 0, -1) - self.verticalLayout_3.setObjectName("verticalLayout_3") - self.gridLayout_4 = QtWidgets.QGridLayout() - self.gridLayout_4.setContentsMargins(-1, 0, -1, -1) - self.gridLayout_4.setObjectName("gridLayout_4") - self.comboBox = QtWidgets.QComboBox(self.groupBox_3) - self.comboBox.setObjectName("comboBox") - self.gridLayout_4.addWidget(self.comboBox, 0, 2, 1, 1) - self.line_6 = QtWidgets.QFrame(self.groupBox_3) - self.line_6.setFrameShape(QtWidgets.QFrame.VLine) - self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_6.setObjectName("line_6") - self.gridLayout_4.addWidget(self.line_6, 0, 1, 1, 1) - self.label_9 = QtWidgets.QLabel(self.groupBox_3) - font = QtGui.QFont() - font.setItalic(True) - self.label_9.setFont(font) - self.label_9.setObjectName("label_9") - self.gridLayout_4.addWidget(self.label_9, 0, 0, 1, 1) - self.label_10 = QtWidgets.QLabel(self.groupBox_3) - font = QtGui.QFont() - font.setItalic(True) - self.label_10.setFont(font) - self.label_10.setObjectName("label_10") - self.gridLayout_4.addWidget(self.label_10, 1, 0, 1, 1) - self.doubleSpinBox_6 = QtWidgets.QDoubleSpinBox(self.groupBox_3) - self.doubleSpinBox_6.setObjectName("doubleSpinBox_6") - self.gridLayout_4.addWidget(self.doubleSpinBox_6, 1, 2, 1, 1) - self.line_7 = QtWidgets.QFrame(self.groupBox_3) - self.line_7.setFrameShape(QtWidgets.QFrame.VLine) - self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_7.setObjectName("line_7") - self.gridLayout_4.addWidget(self.line_7, 1, 1, 1, 1) - self.verticalLayout_3.addLayout(self.gridLayout_4) - self.verticalLayout_22.addWidget(self.groupBox_3) - self.groupBox_4 = QtWidgets.QGroupBox(self.q_Detection) - self.groupBox_4.setCheckable(True) - self.groupBox_4.setObjectName("groupBox_4") - self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.groupBox_4) - self.verticalLayout_5.setContentsMargins(0, -1, 0, -1) - self.verticalLayout_5.setObjectName("verticalLayout_5") - self.gridLayout_5 = QtWidgets.QGridLayout() - self.gridLayout_5.setObjectName("gridLayout_5") - self.label_11 = QtWidgets.QLabel(self.groupBox_4) - font = QtGui.QFont() - font.setItalic(True) - self.label_11.setFont(font) - self.label_11.setObjectName("label_11") - self.gridLayout_5.addWidget(self.label_11, 1, 0, 1, 1) - self.doubleSpinBox_7 = QtWidgets.QDoubleSpinBox(self.groupBox_4) - self.doubleSpinBox_7.setObjectName("doubleSpinBox_7") - self.gridLayout_5.addWidget(self.doubleSpinBox_7, 1, 2, 1, 1) - self.label_12 = QtWidgets.QLabel(self.groupBox_4) - font = QtGui.QFont() - font.setItalic(True) - self.label_12.setFont(font) - self.label_12.setObjectName("label_12") - self.gridLayout_5.addWidget(self.label_12, 2, 0, 1, 1) - self.line_8 = QtWidgets.QFrame(self.groupBox_4) - self.line_8.setFrameShape(QtWidgets.QFrame.VLine) - self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_8.setObjectName("line_8") - self.gridLayout_5.addWidget(self.line_8, 2, 1, 1, 1) - self.gridLayout_6 = QtWidgets.QGridLayout() - self.gridLayout_6.setObjectName("gridLayout_6") - self.label_13 = QtWidgets.QLabel(self.groupBox_4) - font = QtGui.QFont() - font.setItalic(True) - self.label_13.setFont(font) - self.label_13.setObjectName("label_13") - self.gridLayout_6.addWidget(self.label_13, 1, 0, 1, 1) - self.doubleSpinBox_8 = QtWidgets.QDoubleSpinBox(self.groupBox_4) - self.doubleSpinBox_8.setObjectName("doubleSpinBox_8") - self.gridLayout_6.addWidget(self.doubleSpinBox_8, 0, 1, 1, 1) - self.doubleSpinBox_9 = QtWidgets.QDoubleSpinBox(self.groupBox_4) - self.doubleSpinBox_9.setObjectName("doubleSpinBox_9") - self.gridLayout_6.addWidget(self.doubleSpinBox_9, 1, 1, 1, 1) - self.doubleSpinBox_10 = QtWidgets.QDoubleSpinBox(self.groupBox_4) - self.doubleSpinBox_10.setObjectName("doubleSpinBox_10") - self.gridLayout_6.addWidget(self.doubleSpinBox_10, 2, 1, 1, 1) - self.label_14 = QtWidgets.QLabel(self.groupBox_4) - font = QtGui.QFont() - font.setItalic(True) - self.label_14.setFont(font) - self.label_14.setObjectName("label_14") - self.gridLayout_6.addWidget(self.label_14, 2, 0, 1, 1) - self.label_15 = QtWidgets.QLabel(self.groupBox_4) - font = QtGui.QFont() - font.setItalic(True) - self.label_15.setFont(font) - self.label_15.setObjectName("label_15") - self.gridLayout_6.addWidget(self.label_15, 0, 0, 1, 1) - spacerItem = QtWidgets.QSpacerItem(40, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) - self.gridLayout_6.addItem(spacerItem, 3, 1, 1, 1) - self.gridLayout_5.addLayout(self.gridLayout_6, 2, 2, 1, 1) - self.lineEdit_3 = QtWidgets.QLineEdit(self.groupBox_4) - self.lineEdit_3.setObjectName("lineEdit_3") - self.gridLayout_5.addWidget(self.lineEdit_3, 0, 2, 1, 1) - self.label_16 = QtWidgets.QLabel(self.groupBox_4) - font = QtGui.QFont() - font.setItalic(True) - self.label_16.setFont(font) - self.label_16.setObjectName("label_16") - self.gridLayout_5.addWidget(self.label_16, 0, 0, 1, 1) - self.line_9 = QtWidgets.QFrame(self.groupBox_4) - self.line_9.setFrameShape(QtWidgets.QFrame.VLine) - self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_9.setObjectName("line_9") - self.gridLayout_5.addWidget(self.line_9, 0, 1, 1, 1) - self.line_10 = QtWidgets.QFrame(self.groupBox_4) - self.line_10.setFrameShape(QtWidgets.QFrame.VLine) - self.line_10.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_10.setObjectName("line_10") - self.gridLayout_5.addWidget(self.line_10, 1, 1, 1, 1) - self.verticalLayout_5.addLayout(self.gridLayout_5) - self.verticalLayout_22.addWidget(self.groupBox_4) - self.groupBox = QtWidgets.QGroupBox(self.q_Detection) - self.groupBox.setCheckable(True) - self.groupBox.setObjectName("groupBox") - self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox) - self.verticalLayout.setContentsMargins(0, -1, 0, -1) - self.verticalLayout.setObjectName("verticalLayout") - self.gridLayout = QtWidgets.QGridLayout() - self.gridLayout.setObjectName("gridLayout") - self.label = QtWidgets.QLabel(self.groupBox) - font = QtGui.QFont() - font.setItalic(True) - self.label.setFont(font) - self.label.setObjectName("label") - self.gridLayout.addWidget(self.label, 1, 0, 1, 1) - self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.groupBox) - self.doubleSpinBox.setObjectName("doubleSpinBox") - self.gridLayout.addWidget(self.doubleSpinBox, 1, 2, 1, 1) - self.label_2 = QtWidgets.QLabel(self.groupBox) - font = QtGui.QFont() - font.setItalic(True) - self.label_2.setFont(font) - self.label_2.setObjectName("label_2") - self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1) - self.line_2 = QtWidgets.QFrame(self.groupBox) - self.line_2.setFrameShape(QtWidgets.QFrame.VLine) - self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_2.setObjectName("line_2") - self.gridLayout.addWidget(self.line_2, 2, 1, 1, 1) - self.gridLayout_2 = QtWidgets.QGridLayout() - self.gridLayout_2.setObjectName("gridLayout_2") - self.label_4 = QtWidgets.QLabel(self.groupBox) - font = QtGui.QFont() - font.setItalic(True) - self.label_4.setFont(font) - self.label_4.setObjectName("label_4") - self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1) - self.doubleSpinBox_2 = QtWidgets.QDoubleSpinBox(self.groupBox) - self.doubleSpinBox_2.setObjectName("doubleSpinBox_2") - self.gridLayout_2.addWidget(self.doubleSpinBox_2, 0, 1, 1, 1) - self.doubleSpinBox_3 = QtWidgets.QDoubleSpinBox(self.groupBox) - self.doubleSpinBox_3.setObjectName("doubleSpinBox_3") - self.gridLayout_2.addWidget(self.doubleSpinBox_3, 1, 1, 1, 1) - self.doubleSpinBox_4 = QtWidgets.QDoubleSpinBox(self.groupBox) - self.doubleSpinBox_4.setObjectName("doubleSpinBox_4") - self.gridLayout_2.addWidget(self.doubleSpinBox_4, 2, 1, 1, 1) - self.label_5 = QtWidgets.QLabel(self.groupBox) - font = QtGui.QFont() - font.setItalic(True) - self.label_5.setFont(font) - self.label_5.setObjectName("label_5") - self.gridLayout_2.addWidget(self.label_5, 2, 0, 1, 1) - self.label_3 = QtWidgets.QLabel(self.groupBox) - font = QtGui.QFont() - font.setItalic(True) - self.label_3.setFont(font) - self.label_3.setObjectName("label_3") - self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1) - spacerItem1 = QtWidgets.QSpacerItem(40, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) - self.gridLayout_2.addItem(spacerItem1, 3, 1, 1, 1) - self.gridLayout.addLayout(self.gridLayout_2, 2, 2, 1, 1) - self.lineEdit_2 = QtWidgets.QLineEdit(self.groupBox) - self.lineEdit_2.setObjectName("lineEdit_2") - self.gridLayout.addWidget(self.lineEdit_2, 0, 2, 1, 1) - self.label_8 = QtWidgets.QLabel(self.groupBox) - font = QtGui.QFont() - font.setItalic(True) - self.label_8.setFont(font) - self.label_8.setObjectName("label_8") - self.gridLayout.addWidget(self.label_8, 0, 0, 1, 1) - self.line_5 = QtWidgets.QFrame(self.groupBox) - self.line_5.setFrameShape(QtWidgets.QFrame.VLine) - self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_5.setObjectName("line_5") - self.gridLayout.addWidget(self.line_5, 0, 1, 1, 1) - self.line = QtWidgets.QFrame(self.groupBox) - self.line.setFrameShape(QtWidgets.QFrame.VLine) - self.line.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line.setObjectName("line") - self.gridLayout.addWidget(self.line, 1, 1, 1, 1) - self.verticalLayout.addLayout(self.gridLayout) - self.verticalLayout_22.addWidget(self.groupBox) - self.groupBox_2 = QtWidgets.QGroupBox(self.q_Detection) - self.groupBox_2.setCheckable(True) - self.groupBox_2.setObjectName("groupBox_2") - self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox_2) - self.verticalLayout_2.setContentsMargins(0, -1, 0, -1) - self.verticalLayout_2.setObjectName("verticalLayout_2") - self.gridLayout_3 = QtWidgets.QGridLayout() - self.gridLayout_3.setObjectName("gridLayout_3") - self.lineEdit = QtWidgets.QLineEdit(self.groupBox_2) - self.lineEdit.setObjectName("lineEdit") - self.gridLayout_3.addWidget(self.lineEdit, 1, 2, 1, 1) - self.label_6 = QtWidgets.QLabel(self.groupBox_2) - font = QtGui.QFont() - font.setItalic(True) - self.label_6.setFont(font) - self.label_6.setObjectName("label_6") - self.gridLayout_3.addWidget(self.label_6, 1, 0, 1, 1) - self.line_4 = QtWidgets.QFrame(self.groupBox_2) - self.line_4.setFrameShape(QtWidgets.QFrame.VLine) - self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_4.setObjectName("line_4") - self.gridLayout_3.addWidget(self.line_4, 1, 1, 1, 1) - self.label_7 = QtWidgets.QLabel(self.groupBox_2) - font = QtGui.QFont() - font.setItalic(True) - self.label_7.setFont(font) - self.label_7.setObjectName("label_7") - self.gridLayout_3.addWidget(self.label_7, 0, 0, 1, 1) - self.line_3 = QtWidgets.QFrame(self.groupBox_2) - self.line_3.setFrameShape(QtWidgets.QFrame.VLine) - self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) - self.line_3.setObjectName("line_3") - self.gridLayout_3.addWidget(self.line_3, 0, 1, 1, 1) - self.doubleSpinBox_5 = QtWidgets.QDoubleSpinBox(self.groupBox_2) - self.doubleSpinBox_5.setObjectName("doubleSpinBox_5") - self.gridLayout_3.addWidget(self.doubleSpinBox_5, 0, 2, 1, 1) - self.verticalLayout_2.addLayout(self.gridLayout_3) - self.verticalLayout_22.addWidget(self.groupBox_2) - spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) - self.verticalLayout_22.addItem(spacerItem2) - self.QuickSettings.addTab(self.q_Detection, "") - self.verticalLayout_4.addWidget(self.QuickSettings) - self.horizontalLayout_2.addWidget(self.q_widget) - self._TopoLayout = QtWidgets.QVBoxLayout() - self._TopoLayout.setObjectName("_TopoLayout") - spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) - self._TopoLayout.addItem(spacerItem3) - self.horizontalLayout_2.addLayout(self._TopoLayout) - MainWindow.setCentralWidget(self.centralwidget) - self.menubar = QtWidgets.QMenuBar(MainWindow) - self.menubar.setGeometry(QtCore.QRect(0, 0, 1188, 25)) - self.menubar.setObjectName("menubar") - self.menuFile = QtWidgets.QMenu(self.menubar) - self.menuFile.setObjectName("menuFile") - MainWindow.setMenuBar(self.menubar) - self.statusbar = QtWidgets.QStatusBar(MainWindow) - self.statusbar.setObjectName("statusbar") - MainWindow.setStatusBar(self.statusbar) - self.actionSave = QtWidgets.QAction(MainWindow) - self.actionSave.setObjectName("actionSave") - self.actionLoad = QtWidgets.QAction(MainWindow) - self.actionLoad.setObjectName("actionLoad") - self.actionCortical_repartition = QtWidgets.QAction(MainWindow) - self.actionCortical_repartition.setObjectName("actionCortical_repartition") - self.actionCortical = QtWidgets.QAction(MainWindow) - self.actionCortical.setObjectName("actionCortical") - self.actionSagittal = QtWidgets.QAction(MainWindow) - self.actionSagittal.setObjectName("actionSagittal") - self.actionAxial = QtWidgets.QAction(MainWindow) - self.actionAxial.setObjectName("actionAxial") - self.actionCamera = QtWidgets.QAction(MainWindow) - self.actionCamera.setObjectName("actionCamera") - self.actionLeft = QtWidgets.QAction(MainWindow) - self.actionLeft.setObjectName("actionLeft") - self.actionRight = QtWidgets.QAction(MainWindow) - self.actionRight.setObjectName("actionRight") - self.menuDispSettings = QtWidgets.QAction(MainWindow) - self.menuDispSettings.setCheckable(True) - self.menuDispSettings.setChecked(True) - self.menuDispSettings.setObjectName("menuDispSettings") - self.actionClose = QtWidgets.QAction(MainWindow) - self.actionClose.setObjectName("actionClose") - self.actionProjection = QtWidgets.QAction(MainWindow) - self.actionProjection.setObjectName("actionProjection") - self.actionRepartition = QtWidgets.QAction(MainWindow) - self.actionRepartition.setObjectName("actionRepartition") - self.actionShortcuts = QtWidgets.QAction(MainWindow) - self.actionShortcuts.setCheckable(True) - self.actionShortcuts.setObjectName("actionShortcuts") - self.actionUi_settings = QtWidgets.QAction(MainWindow) - self.actionUi_settings.setCheckable(True) - self.actionUi_settings.setChecked(False) - self.actionUi_settings.setObjectName("actionUi_settings") - self.actionNdPlt = QtWidgets.QAction(MainWindow) - self.actionNdPlt.setCheckable(True) - self.actionNdPlt.setObjectName("actionNdPlt") - self.actionOnedPlt = QtWidgets.QAction(MainWindow) - self.actionOnedPlt.setCheckable(True) - self.actionOnedPlt.setObjectName("actionOnedPlt") - self.actionImage = QtWidgets.QAction(MainWindow) - self.actionImage.setCheckable(True) - self.actionImage.setObjectName("actionImage") - self.actionColormap = QtWidgets.QAction(MainWindow) - self.actionColormap.setCheckable(True) - self.actionColormap.setObjectName("actionColormap") - self.menuShortcut = QtWidgets.QAction(MainWindow) - self.menuShortcut.setObjectName("menuShortcut") - self.menuDocumentation = QtWidgets.QAction(MainWindow) - self.menuDocumentation.setObjectName("menuDocumentation") - self.actionScreenshot = QtWidgets.QAction(MainWindow) - self.actionScreenshot.setObjectName("actionScreenshot") - self.actionSave_hypnogram = QtWidgets.QAction(MainWindow) - self.actionSave_hypnogram.setObjectName("actionSave_hypnogram") - self.actionSave_infos = QtWidgets.QAction(MainWindow) - self.actionSave_infos.setObjectName("actionSave_infos") - self.actionSave_scoring = QtWidgets.QAction(MainWindow) - self.actionSave_scoring.setObjectName("actionSave_scoring") - self.actionSave_detection = QtWidgets.QAction(MainWindow) - self.actionSave_detection.setObjectName("actionSave_detection") - self.actionSave_all = QtWidgets.QAction(MainWindow) - self.actionSave_all.setObjectName("actionSave_all") - self.menuExit = QtWidgets.QAction(MainWindow) - self.menuExit.setObjectName("menuExit") - self.actionLoad_hypnogram = QtWidgets.QAction(MainWindow) - self.actionLoad_hypnogram.setObjectName("actionLoad_hypnogram") - self.menuSaveInfoTable = QtWidgets.QAction(MainWindow) - self.menuSaveInfoTable.setObjectName("menuSaveInfoTable") - self.menuSaveScoringTable = QtWidgets.QAction(MainWindow) - self.menuSaveScoringTable.setObjectName("menuSaveScoringTable") - self.actionAll = QtWidgets.QAction(MainWindow) - self.actionAll.setObjectName("actionAll") - self.menuLoadHypno = QtWidgets.QAction(MainWindow) - self.menuLoadHypno.setObjectName("menuLoadHypno") - self.menuLoadData = QtWidgets.QAction(MainWindow) - self.menuLoadData.setEnabled(False) - self.menuLoadData.setObjectName("menuLoadData") - self.actionHypnogram_figure = QtWidgets.QAction(MainWindow) - self.actionHypnogram_figure.setObjectName("actionHypnogram_figure") - self.menuLoadConfig = QtWidgets.QAction(MainWindow) - self.menuLoadConfig.setObjectName("menuLoadConfig") - self.menuSaveConfig = QtWidgets.QAction(MainWindow) - self.menuSaveConfig.setObjectName("menuSaveConfig") - self.menuDownload_pdf_doc = QtWidgets.QAction(MainWindow) - self.menuDownload_pdf_doc.setObjectName("menuDownload_pdf_doc") - self.menuSaveHypnogramFigure = QtWidgets.QAction(MainWindow) - self.menuSaveHypnogramFigure.setObjectName("menuSaveHypnogramFigure") - self.menuSaveHypnogramData = QtWidgets.QAction(MainWindow) - self.menuSaveHypnogramData.setObjectName("menuSaveHypnogramData") - self.menuSaveDetectAll = QtWidgets.QAction(MainWindow) - self.menuSaveDetectAll.setObjectName("menuSaveDetectAll") - self.menuSaveDetectSelected = QtWidgets.QAction(MainWindow) - self.menuSaveDetectSelected.setObjectName("menuSaveDetectSelected") - self.menuSaveScreenshotEntire = QtWidgets.QAction(MainWindow) - self.menuSaveScreenshotEntire.setObjectName("menuSaveScreenshotEntire") - self.menuSaveScreenshotSelected = QtWidgets.QAction(MainWindow) - self.menuSaveScreenshotSelected.setEnabled(False) - self.menuSaveScreenshotSelected.setObjectName("menuSaveScreenshotSelected") - self.menuLoadDetectAll = QtWidgets.QAction(MainWindow) - self.menuLoadDetectAll.setObjectName("menuLoadDetectAll") - self.menuLoadDetectSelect = QtWidgets.QAction(MainWindow) - self.menuLoadDetectSelect.setObjectName("menuLoadDetectSelect") - self.menuDispSpec = QtWidgets.QAction(MainWindow) - self.menuDispSpec.setCheckable(True) - self.menuDispSpec.setChecked(True) - self.menuDispSpec.setObjectName("menuDispSpec") - self.menuDispHypno = QtWidgets.QAction(MainWindow) - self.menuDispHypno.setCheckable(True) - self.menuDispHypno.setChecked(True) - self.menuDispHypno.setObjectName("menuDispHypno") - self.menuDispNavbar = QtWidgets.QAction(MainWindow) - self.menuDispNavbar.setCheckable(True) - self.menuDispNavbar.setChecked(True) - self.menuDispNavbar.setObjectName("menuDispNavbar") - self.menuDispTimeax = QtWidgets.QAction(MainWindow) - self.menuDispTimeax.setCheckable(True) - self.menuDispTimeax.setChecked(True) - self.menuDispTimeax.setObjectName("menuDispTimeax") - self.menuDispTopo = QtWidgets.QAction(MainWindow) - self.menuDispTopo.setCheckable(True) - self.menuDispTopo.setObjectName("menuDispTopo") - self.menuDispIndic = QtWidgets.QAction(MainWindow) - self.menuDispIndic.setCheckable(True) - self.menuDispIndic.setChecked(True) - self.menuDispIndic.setObjectName("menuDispIndic") - self.actionZoom_mode = QtWidgets.QAction(MainWindow) - self.actionZoom_mode.setCheckable(True) - self.actionZoom_mode.setObjectName("actionZoom_mode") - self.menuDispZoom = QtWidgets.QAction(MainWindow) - self.menuDispZoom.setCheckable(True) - self.menuDispZoom.setObjectName("menuDispZoom") - self.menuSettingCleanHyp = QtWidgets.QAction(MainWindow) - self.menuSettingCleanHyp.setObjectName("menuSettingCleanHyp") - self.menuSaveAnnotations = QtWidgets.QAction(MainWindow) - self.menuSaveAnnotations.setObjectName("menuSaveAnnotations") - self.menuLoadAnnotations = QtWidgets.QAction(MainWindow) - self.menuLoadAnnotations.setObjectName("menuLoadAnnotations") - self.menuScreenshot = QtWidgets.QAction(MainWindow) - self.menuScreenshot.setObjectName("menuScreenshot") - self.menuFile.addAction(self.menuScreenshot) - self.menubar.addAction(self.menuFile.menuAction()) - - self.retranslateUi(MainWindow) - self.QuickSettings.setCurrentIndex(0) - QtCore.QMetaObject.connectSlotsByName(MainWindow) - - def retranslateUi(self, MainWindow): - _translate = QtCore.QCoreApplication.translate - MainWindow.setWindowTitle(_translate("MainWindow", "Topo")) - self.QuickSettings.setToolTip(_translate("MainWindow", "


")) - self.groupBox_3.setTitle(_translate("MainWindow", "Markers")) - self.label_9.setText(_translate("MainWindow", "Form")) - self.label_10.setText(_translate("MainWindow", "Size")) - self.groupBox_4.setTitle(_translate("MainWindow", "Title")) - self.label_11.setText(_translate("MainWindow", "Font\n" -"size")) - self.label_12.setText(_translate("MainWindow", "Text\n" -"offset")) - self.label_13.setText(_translate("MainWindow", "dy")) - self.label_14.setText(_translate("MainWindow", "dy")) - self.label_15.setText(_translate("MainWindow", "dx")) - self.label_16.setText(_translate("MainWindow", "Color")) - self.groupBox.setTitle(_translate("MainWindow", "Text")) - self.label.setText(_translate("MainWindow", "Font\n" -"size")) - self.label_2.setText(_translate("MainWindow", "Text\n" -"offset")) - self.label_4.setText(_translate("MainWindow", "dy")) - self.label_5.setText(_translate("MainWindow", "dy")) - self.label_3.setText(_translate("MainWindow", "dx")) - self.label_8.setText(_translate("MainWindow", "Color")) - self.groupBox_2.setTitle(_translate("MainWindow", "Border")) - self.label_6.setText(_translate("MainWindow", "Color")) - self.label_7.setText(_translate("MainWindow", "Width")) - self.QuickSettings.setTabText(self.QuickSettings.indexOf(self.q_Detection), _translate("MainWindow", "Settings")) - self.menuFile.setTitle(_translate("MainWindow", "File")) - self.actionSave.setText(_translate("MainWindow", "Save")) - self.actionLoad.setText(_translate("MainWindow", "Load")) - self.actionCortical_repartition.setText(_translate("MainWindow", "Cortical repartition")) - self.actionCortical.setText(_translate("MainWindow", "Cortical")) - self.actionSagittal.setText(_translate("MainWindow", "Sagittal")) - self.actionAxial.setText(_translate("MainWindow", "Axial")) - self.actionCamera.setText(_translate("MainWindow", "Camera")) - self.actionLeft.setText(_translate("MainWindow", "Left")) - self.actionRight.setText(_translate("MainWindow", "Right")) - self.menuDispSettings.setText(_translate("MainWindow", "Quick settings")) - self.menuDispSettings.setShortcut(_translate("MainWindow", "Ctrl+D")) - self.actionClose.setText(_translate("MainWindow", "Close")) - self.actionClose.setShortcut(_translate("MainWindow", "Ctrl+Q")) - self.actionProjection.setText(_translate("MainWindow", "Projection")) - self.actionProjection.setToolTip(_translate("MainWindow", "

Find all vertices under a distance of t_radius with each source and project s_data to the surface

")) - self.actionProjection.setShortcut(_translate("MainWindow", "Ctrl+P")) - self.actionRepartition.setText(_translate("MainWindow", "Repartition")) - self.actionRepartition.setShortcut(_translate("MainWindow", "Ctrl+R")) - self.actionShortcuts.setText(_translate("MainWindow", "Shortcuts")) - self.actionShortcuts.setShortcut(_translate("MainWindow", "Ctrl+T")) - self.actionUi_settings.setText(_translate("MainWindow", "Ui settings")) - self.actionNdPlt.setText(_translate("MainWindow", "Nd-plot")) - self.actionOnedPlt.setText(_translate("MainWindow", "1d-plot")) - self.actionImage.setText(_translate("MainWindow", "Image")) - self.actionColormap.setText(_translate("MainWindow", "Colormap")) - self.menuShortcut.setText(_translate("MainWindow", "Shortcuts")) - self.menuShortcut.setShortcut(_translate("MainWindow", "Ctrl+T")) - self.menuDocumentation.setText(_translate("MainWindow", "Documentation")) - self.menuDocumentation.setShortcut(_translate("MainWindow", "Ctrl+E")) - self.actionScreenshot.setText(_translate("MainWindow", "Screenshot")) - self.actionScreenshot.setShortcut(_translate("MainWindow", "Ctrl+N")) - self.actionSave_hypnogram.setText(_translate("MainWindow", "Save hypnogram data")) - self.actionSave_infos.setText(_translate("MainWindow", "Save infos table")) - self.actionSave_scoring.setText(_translate("MainWindow", "Save scoring table")) - self.actionSave_detection.setText(_translate("MainWindow", "Save detection table")) - self.actionSave_all.setText(_translate("MainWindow", "Save all")) - self.menuExit.setText(_translate("MainWindow", "Exit")) - self.menuExit.setShortcut(_translate("MainWindow", "Ctrl+Q")) - self.actionLoad_hypnogram.setText(_translate("MainWindow", "Load hypnogram")) - self.menuSaveInfoTable.setText(_translate("MainWindow", "Stats info table")) - self.menuSaveScoringTable.setText(_translate("MainWindow", "Scoring table")) - self.actionAll.setText(_translate("MainWindow", "All")) - self.actionAll.setShortcut(_translate("MainWindow", "Ctrl+S")) - self.menuLoadHypno.setText(_translate("MainWindow", "Hypnogram")) - self.menuLoadData.setText(_translate("MainWindow", "Data")) - self.actionHypnogram_figure.setText(_translate("MainWindow", "Hypnogram figure")) - self.menuLoadConfig.setText(_translate("MainWindow", "GUI config")) - self.menuSaveConfig.setText(_translate("MainWindow", "GUI config")) - self.menuDownload_pdf_doc.setText(_translate("MainWindow", "Download pdf doc")) - self.menuSaveHypnogramFigure.setText(_translate("MainWindow", "Figure")) - self.menuSaveHypnogramData.setText(_translate("MainWindow", "Data")) - self.menuSaveDetectAll.setText(_translate("MainWindow", "All detections")) - self.menuSaveDetectSelected.setText(_translate("MainWindow", "Selected detection")) - self.menuSaveScreenshotEntire.setText(_translate("MainWindow", "Entire window")) - self.menuSaveScreenshotSelected.setText(_translate("MainWindow", "Selected canvas")) - self.menuLoadDetectAll.setText(_translate("MainWindow", "All")) - self.menuLoadDetectSelect.setText(_translate("MainWindow", "Selected")) - self.menuDispSpec.setText(_translate("MainWindow", "Spectrogram")) - self.menuDispSpec.setShortcut(_translate("MainWindow", "S")) - self.menuDispHypno.setText(_translate("MainWindow", "Hypnogram")) - self.menuDispHypno.setShortcut(_translate("MainWindow", "H")) - self.menuDispNavbar.setText(_translate("MainWindow", "Navigation bar")) - self.menuDispNavbar.setShortcut(_translate("MainWindow", "P")) - self.menuDispTimeax.setText(_translate("MainWindow", "Time axis")) - self.menuDispTimeax.setShortcut(_translate("MainWindow", "X")) - self.menuDispTopo.setText(_translate("MainWindow", "Topoplot")) - self.menuDispTopo.setShortcut(_translate("MainWindow", "T")) - self.menuDispIndic.setText(_translate("MainWindow", "Time indicators")) - self.menuDispIndic.setShortcut(_translate("MainWindow", "I")) - self.actionZoom_mode.setText(_translate("MainWindow", "Zoom mode")) - self.actionZoom_mode.setShortcut(_translate("MainWindow", "Z")) - self.menuDispZoom.setText(_translate("MainWindow", "Zoom mode")) - self.menuDispZoom.setShortcut(_translate("MainWindow", "Z")) - self.menuSettingCleanHyp.setText(_translate("MainWindow", "Clean hypnogram")) - self.menuSaveAnnotations.setText(_translate("MainWindow", "Annotation")) - self.menuLoadAnnotations.setText(_translate("MainWindow", "Annotations")) - self.menuScreenshot.setText(_translate("MainWindow", "Screenshot")) - self.menuScreenshot.setShortcut(_translate("MainWindow", "Ctrl+N")) - - -if __name__ == "__main__": - import sys - app = QtWidgets.QApplication(sys.argv) - MainWindow = QtWidgets.QMainWindow() - ui = Ui_MainWindow() - ui.setupUi(MainWindow) - MainWindow.show() - sys.exit(app.exec_()) - diff --git a/visbrain/topo/gui/topo_gui.ui b/visbrain/topo/gui/topo_gui.ui deleted file mode 100644 index 8b62664d1..000000000 --- a/visbrain/topo/gui/topo_gui.ui +++ /dev/null @@ -1,1012 +0,0 @@ - - - MainWindow - - - Qt::NonModal - - - true - - - - 0 - 0 - 1188 - 907 - - - - - PreferDefault - true - - - - Topo - - - QTabWidget::Rounded - - - - - 0 - - - 0 - - - - - - 0 - 0 - - - - - 0 - 0 - - - - - 450 - 16777215 - - - - - QLayout::SetDefaultConstraint - - - - - - 16777215 - 16777215 - - - - <html><head/><body><p><br/></p></body></html> - - - false - - - QTabWidget::Rounded - - - 0 - - - true - - - - Settings - - - - - - Markers - - - true - - - - 0 - - - 0 - - - - - 0 - - - - - - - - Qt::Vertical - - - - - - - - true - - - - Form - - - - - - - - true - - - - Size - - - - - - - - - - Qt::Vertical - - - - - - - - - - - - Title - - - true - - - - 0 - - - 0 - - - - - - - - true - - - - Font -size - - - - - - - - - - - true - - - - Text -offset - - - - - - - Qt::Vertical - - - - - - - - - - true - - - - dy - - - - - - - - - - - - - - - - - true - - - - dy - - - - - - - - true - - - - dx - - - - - - - Qt::Horizontal - - - QSizePolicy::Expanding - - - - 40 - 0 - - - - - - - - - - - - - - true - - - - Color - - - - - - - Qt::Vertical - - - - - - - Qt::Vertical - - - - - - - - - - - - Text - - - true - - - - 0 - - - 0 - - - - - - - - true - - - - Font -size - - - - - - - - - - - true - - - - Text -offset - - - - - - - Qt::Vertical - - - - - - - - - - true - - - - dy - - - - - - - - - - - - - - - - - true - - - - dy - - - - - - - - true - - - - dx - - - - - - - Qt::Horizontal - - - QSizePolicy::Expanding - - - - 40 - 0 - - - - - - - - - - - - - - true - - - - Color - - - - - - - Qt::Vertical - - - - - - - Qt::Vertical - - - - - - - - - - - - Border - - - true - - - - 0 - - - 0 - - - - - - - - - - - true - - - - Color - - - - - - - Qt::Vertical - - - - - - - - true - - - - Width - - - - - - - Qt::Vertical - - - - - - - - - - - - - - - Qt::Vertical - - - - 20 - 40 - - - - - - - - - - - - - - - - - Qt::Horizontal - - - - 40 - 20 - - - - - - - - - - - - 0 - 0 - 1188 - 25 - - - - - File - - - - - - - - - Save - - - - - Load - - - - - Cortical repartition - - - - - Cortical - - - - - Sagittal - - - - - Axial - - - - - Camera - - - - - Left - - - - - Right - - - - - true - - - true - - - Quick settings - - - Ctrl+D - - - - - Close - - - Ctrl+Q - - - - - Projection - - - <html><head/><body><p>Find all vertices under a distance of t_radius with each source and project s_data to the surface</p></body></html> - - - Ctrl+P - - - - - Repartition - - - Ctrl+R - - - - - true - - - Shortcuts - - - Ctrl+T - - - - - true - - - false - - - Ui settings - - - - - true - - - Nd-plot - - - - - true - - - 1d-plot - - - - - true - - - Image - - - - - true - - - Colormap - - - - - Shortcuts - - - Ctrl+T - - - - - Documentation - - - Ctrl+E - - - - - Screenshot - - - Ctrl+N - - - - - Save hypnogram data - - - - - Save infos table - - - - - Save scoring table - - - - - Save detection table - - - - - Save all - - - - - Exit - - - Ctrl+Q - - - - - Load hypnogram - - - - - Stats info table - - - - - Scoring table - - - - - All - - - Ctrl+S - - - - - Hypnogram - - - - - false - - - Data - - - - - Hypnogram figure - - - - - GUI config - - - - - GUI config - - - - - Download pdf doc - - - - - Figure - - - - - Data - - - - - All detections - - - - - Selected detection - - - - - Entire window - - - - - false - - - Selected canvas - - - - - All - - - - - Selected - - - - - true - - - true - - - Spectrogram - - - S - - - - - true - - - true - - - Hypnogram - - - H - - - - - true - - - true - - - Navigation bar - - - P - - - - - true - - - true - - - Time axis - - - X - - - - - true - - - Topoplot - - - T - - - - - true - - - true - - - Time indicators - - - I - - - - - true - - - Zoom mode - - - Z - - - - - true - - - Zoom mode - - - Z - - - - - Clean hypnogram - - - - - Annotation - - - - - Annotations - - - - - Screenshot - - - Ctrl+N - - - - - - diff --git a/visbrain/topo/tests/test_topo.py b/visbrain/topo/tests/test_topo.py deleted file mode 100644 index a0c85bce3..000000000 --- a/visbrain/topo/tests/test_topo.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Test Topo module and related methods.""" -import numpy as np -from visbrain import Topo - -tp = Topo() - - -class TestTopo(object): - """Test topo.py.""" - - def test_add_topoplot(self): - """Test function brain_creation.""" - name = 'Topo_1' - channels = ['C3', 'C4', 'Cz', 'Fz', 'Pz'] - data = [10., 20., 30., 10., 10.] - connect = np.random.rand(len(data), len(data)) - title = 'Basic topoplot illustration' - cblabel = 'Colorbar label' - tp.add_topoplot(name, data, channels=channels, title=title, - cblabel=cblabel, c_connect=connect) - - def test_add_shared_colorbar(self): - """Test function add_shared_colorbar.""" - kwargs = {'cmap': 'viridis', 'clim': (-1.02, 1.01), 'vmin': -.81, - 'under': 'gray', 'vmax': .85, 'over': 'red'} - tp.add_shared_colorbar('Shared', col=2, row_span=2, - rect=(0.1, -2, 1.6, 4), - cblabel='Shared colorbar', **kwargs) diff --git a/visbrain/topo/topo.py b/visbrain/topo/topo.py deleted file mode 100644 index ba830b261..000000000 --- a/visbrain/topo/topo.py +++ /dev/null @@ -1,266 +0,0 @@ -"""Topo class for topographic representations.""" -import numpy as np - -import vispy.scene.cameras as viscam -from vispy.scene import Node - -from ..pyqt_module import PyQtModule -from .ui_init import UiInit -from .ui_elements import UiElements -from ..objects import ConnectObj -from ..visuals import TopoMesh, CbarVisual - - -__all__ = ('Topo') - - -class Topo(PyQtModule, UiInit, UiElements): - """Display topographic representation.""" - - def __init__(self, verbose=None): - """Init.""" - PyQtModule.__init__(self, verbose=verbose, to_describe='_grid', - icon='topo_icon.svg', show_settings=False) - self._topos = {} - self._topoGrid = {} - - # ====================== App creation ====================== - UiInit.__init__(self) - UiElements.__init__(self) - - def __getitem__(self, name): - """Get the object name.""" - return self._topos[name] - - def __setitem__(self, name, value): - """Set the object name.""" - self._topos[name] = value - - def add_topoplot(self, name, data, xyz=None, channels=None, c_connect=None, - c_select=None, c_cmap='inferno', c_linewidth=3., - system='cartesian', unit='degree', title=None, - title_color='black', title_size=5., line_color='black', - line_width=2., chan_size=2., chan_offset=(0., 0., 0.), - chan_mark_color='white', chan_mark_symbol='disc', - chan_txt_color='black', bgcolor='white', cbar=True, - cblabel=None, cb_txt_size=4., levels=None, - level_colors='white', cmap='viridis', clim=None, - vmin=None, under='gray', vmax=None, over='red', row=0, - col=0, row_span=1, col_span=1, margin=.05): - """Add a subplot embedded in a subplot. - - For now, there's two for using coordinates to define the subplot : - - * Using the xyz input (must either be in cartesian or spherical - coordinate system) - * Using the channel input. The Topo class contains a list of - existing channel names and will try to identify those in the - channels variable. - - Parameters - ---------- - name : string - Name of the topographic plot. - data : array_like - Array of data of shape (n_channels,). - xyz : array_like | None - Array of source's coordinates. - channels : list | None - List of channel names. - c_connect : array_like | None - Array of connections between sources. Must be a (n_channels, - n_channels) upper triangular array of connecivity strength. - c_select : array_like | None - Array of boolean values to select which edges to display or to - hide. - c_cmap : string | 'inferno' - Colormap name to use for the edges color. - c_linewidth : float | 3. - Connectivity edges line width. - system : {'cartesian', 'spherical'} - Coordinate system. - unit : {'degree', 'rad'} - If system is 'spherical', specify if angles are in degrees or - radians. - title : string | None - Title of the topoplot. - title_color : array_like/string | 'black' - Color for the title. - title_size : float | 20. - Size of the title. - line_color : array_like/string | 'black' - Color of lines for the head, nose and eras. - line_width : float | 4. - Line width for the head, nose and eras. - chan_size : float | 12. - Size of channel names text. - chan_mark_color : array_like/string | 'white' - Color of channel markers. - chan_mark_symbol : string | 'disc' - Symbol to use for markers. Use disc, arrow, ring, clobber, square, - diamond, vbar, hbar, cross, tailed_arrow, x, triangle_up, - triangle_down, and star. - chan_txt_color : array_like/string | 'black' - Color of channel names. - bgcolor : array_like/string | 'white' - Background color. - cbar : bool | True - Attach a colorbar to the topoplot. - cblabel : string | None - Colorbar label. - cb_txt_size : float | 16. - Text size for the colorbar limits and label. - levels : array_like/int | None - The levels at which the isocurve is constructed. - level_colors : string/array_like | 'white' - The color to use when drawing the line. If a list is given, it - must be of shape (Nlev), if an array is given, it must be of - shape (Nlev, ...). and provide one color per level - (rgba, colorname). By default, all levels are whites. - cmap : string | None - Matplotlib colormap (like 'viridis', 'inferno'...). - clim : tuple/list | None - Colorbar limit. Every values under / over clim will - clip. - vmin : float | None - Every values under vmin will have the color defined - using the under parameter. - vmax : float | None - Every values over vmin will have the color defined - using the over parameter. - under : tuple/string | None - Matplotlib color under vmin. - over : tuple/string | None - Matplotlib color over vmax. - row : int | 0 - The row in which to add the widget (0 is the topmost row) - col : int | 0 - The column in which to add the widget (0 is the leftmost column) - row_span : int | 1 - The number of rows to be occupied by the topoplot. - col_span : int | 1 - The number of columns to be occupied by the topoplot. - margin : float | .05 - Margin percentage between the topoplot and the edge of the subplot. - """ - # Check if name is available : - self._check_name_for(name, 'topoplot') - # Create the topoplot and set the data : - topo = TopoMesh(xyz, channels, system, unit, title, title_color, - title_size, line_color, line_width, chan_size, - chan_offset, chan_mark_color, chan_mark_symbol, - chan_txt_color, bgcolor, cbar, cb_txt_size, - margin) - topo.set_data(data, levels, level_colors, cmap, clim, vmin, under, - vmax, over, cblabel) - # Connectivity : - if isinstance(c_connect, np.ndarray): - assert c_connect.shape == (len(topo), len(topo)) - xyz = topo._xyz[topo._keeponly] - self.connect = ConnectObj('TopoConnect', xyz, c_connect, - cmap=c_cmap, select=c_select, - line_width=c_linewidth, - parent=topo.node_chan) - self[name] = topo - # Create a PanZoom camera : - cam = viscam.PanZoomCamera(aspect=1., rect=topo.rect) - cam.set_default_state() - # Create a subplot and add the camera : - self._topoGrid[name] = self._grid.add_view(row, col, row_span, - col_span, bgcolor=bgcolor, - camera=cam) - # Add the topoplot to the subplot : - self._topoGrid[name].add(self[name].node) - - def add_shared_colorbar(self, name, cmap='viridis', clim=(0, 1), vmin=None, - vmax=None, under='gray', over='red', cblabel='', - cbtxtsz=5., cbtxtsh=2.3, txtcolor='black', - txtsz=3., txtsh=1.2, width=.17, border=True, bw=2., - limtxt=True, bgcolor='white', ndigits=2, row=0, - col=0, row_span=1, col_span=1, - rect=(-1.2, -1.2, 2.4, 2.4)): - """Add a shared colorbar across subplots. - - Parameters - ---------- - cmap : string | None - Matplotlib colormap (like 'viridis', 'inferno'...). - clim : tuple/list | None - Colorbar limit. Every values under / over clim will - clip. - vmin : float | None - Every values under vmin will have the color defined - using the under parameter. - vmax : float | None - Every values over vmin will have the color defined - using the over parameter. - under : tuple/string | None - Matplotlib color under vmin. - over : tuple/string | None - Matplotlib color over vmax. - cblabel : string | '' - Colorbar label. - cbtxtsz : float | 5.. - Text size of the colorbar label. - cbtxtsh : float | 2.3 - Shift for the colorbar label. - txtcolor : string | 'white' - Text color. - txtsz : float | 3. - Text size for clim/vmin/vmax text. - txtsh : float | 1.2 - Shift for clim/vmin/vmax text. - border : bool | True - Display colorbar borders. - bw : float | 2. - Border width. - limtxt : bool | True - Display vmin/vmax text. - bgcolor : tuple/string | (0., 0., 0.) - Background color of the colorbar canvas. - ndigits : int | 2 - Number of digits for the text. - row : int | 0 - The row in which to add the widget (0 is the topmost row) - col : int | 0 - The column in which to add the widget (0 is the leftmost column) - row_span : int | 1 - The number of rows to be occupied by the topoplot. - col_span : int | 1 - The number of columns to be occupied by the topoplot. - rect : tuple | (-1.2, -1.2, 2.4, 2.4) - The 2-D area on the screen to display. The rect input describe - (x_start, y_start, x_width, y_height). This variable can be used - to translate or scale. - """ - # Check if name is avaible : - self._check_name_for(name, 'colorbar') - # Create a PanZoom camera : - cam = viscam.PanZoomCamera(rect=rect) - cam.set_default_state() - # Create a subplot and add the camera : - self._topoGrid[name] = self._grid.add_view(row, col, row_span, - col_span, bgcolor=bgcolor, - camera=cam) - # Get if vmin and vmax exist : - isvmin, isvmax = vmin is not None, vmax is not None - # Create a colorbar object : - parent = Node(name=name) - cbar = CbarVisual(cmap=cmap, clim=clim, vmin=vmin, isvmin=isvmin, - vmax=vmax, isvmax=isvmax, under=under, over=over, - cblabel=cblabel, cbtxtsz=cbtxtsz, cbtxtsh=cbtxtsh, - txtcolor=txtcolor, txtsz=txtsz, txtsh=txtsh, - width=width, border=border, bw=bw, limtxt=limtxt, - bgcolor=bgcolor, ndigits=ndigits, parent=parent) - self[name] = cbar - # Add the colorbar to the subplot : - self._topoGrid[name].add(parent) - - def _check_name_for(self, name, use='topoplot'): - """Check if the object name already exist.""" - if not isinstance(name, str): - raise ValueError("name must be a string describing the name of the" - " " + use) - elif name in list(self._topos.keys()): - raise ValueError("'" + name + "' already exist. Use a different" - " name for this " + use + ".") diff --git a/visbrain/topo/ui_elements/__init__.py b/visbrain/topo/ui_elements/__init__.py deleted file mode 100644 index ef4ee6610..000000000 --- a/visbrain/topo/ui_elements/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Import UiElements.""" -from .ui_elements import UiElements diff --git a/visbrain/topo/ui_elements/ui_elements.py b/visbrain/topo/ui_elements/ui_elements.py deleted file mode 100644 index fe320edee..000000000 --- a/visbrain/topo/ui_elements/ui_elements.py +++ /dev/null @@ -1,14 +0,0 @@ -"""From the topo file, import the topo module.""" -from .ui_settings import UiSettings -from .ui_menu import UiMenu -from .ui_screenshot import UiScreenshot - - -class UiElements(UiSettings, UiMenu, UiScreenshot): - """Initialize UiElements.""" - - def __init__(self): - """Init.""" - UiSettings.__init__(self) - UiMenu.__init__(self) - UiScreenshot.__init__(self) diff --git a/visbrain/topo/ui_elements/ui_menu.py b/visbrain/topo/ui_elements/ui_menu.py deleted file mode 100644 index 13188cfb4..000000000 --- a/visbrain/topo/ui_elements/ui_menu.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Main class for interactions with the menu.""" - -from ...utils import HelpMenu - - -class UiMenu(HelpMenu): - """Interactions between the menu and the user.""" - - def __init__(self): - """Init.""" - base = 'http://visbrain.org/topo.html' - sections = {'Topo': base} - HelpMenu.__init__(self, sections, False) - self.menuScreenshot.triggered.connect(self._fcn_menu_screenshot) - - def _fcn_menu_screenshot(self): - """Take a screenshot from the menu.""" - self.show_gui_screenshot() diff --git a/visbrain/topo/ui_elements/ui_screenshot.py b/visbrain/topo/ui_elements/ui_screenshot.py deleted file mode 100644 index 5f2dfe32f..000000000 --- a/visbrain/topo/ui_elements/ui_screenshot.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Screenshot window and related functions.""" -from ...io import write_fig_pyqt, write_fig_canvas, dialog_save -from ...utils import ScreenshotPopup - - -class UiScreenshot(object): - """Initialize the screenshot GUI and functions to apply it.""" - - def __init__(self): - """Init.""" - canvas_names = ['main'] - self._ssGui = ScreenshotPopup(self._fcn_run_screenshot, - canvas_names=canvas_names) - - def show_gui_screenshot(self): - """Display the GUI screenhot.""" - self._ssGui.show() - - def _fcn_run_screenshot(self): - """Run the screenshot.""" - # Get filename : - filename = dialog_save(self, 'Screenshot', 'screenshot', "PNG (*.PNG)" - ";;TIFF (*.tiff);;JPG (*.jpg);;" - "All files (*.*)") - # Get screenshot arguments : - kwargs = self._ssGui.to_kwargs() - - if kwargs['entire']: # Screenshot of the entire window - self._ssGui._ss.close() - write_fig_pyqt(self, filename) - else: # Screenshot of selected canvas - # Remove unsed entries : - del kwargs['entire'], kwargs['canvas'] - write_fig_canvas(filename, self._view.canvas, widget=self._grid, - **kwargs) diff --git a/visbrain/topo/ui_elements/ui_settings.py b/visbrain/topo/ui_elements/ui_settings.py deleted file mode 100644 index 2b6871c92..000000000 --- a/visbrain/topo/ui_elements/ui_settings.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Main class for interactions with the settings panel.""" - - -class UiSettings(object): - """Interactions between the settings panel and the user.""" - - def __init__(self): - """Init.""" - pass diff --git a/visbrain/topo/ui_init.py b/visbrain/topo/ui_init.py deleted file mode 100644 index 42382aaa4..000000000 --- a/visbrain/topo/ui_init.py +++ /dev/null @@ -1,37 +0,0 @@ -"""This script group the diffrent graphical components. - -Grouped components : - * PyQt elements (window, Pyqt functions...) - * Vispy canvas functions - * User shortcuts -""" -from PyQt5 import QtWidgets -from vispy import app, scene - -from .gui import Ui_MainWindow -from ..config import CONFIG - - -class TopoCanvas(object): - """Canvas creation.""" - - def __init__(self, title='', bgcolor=(0, 0, 0)): - """Init.""" - # Initialize main canvas: - self.canvas = scene.SceneCanvas(keys='interactive', show=False, - dpi=600, bgcolor=bgcolor, - fullscreen=True, resizable=True, - title=title, app=CONFIG['VISPY_APP']) - - -class UiInit(QtWidgets.QMainWindow, Ui_MainWindow, app.Canvas): - """Group and initialize the graphical elements and interactions.""" - - def __init__(self): - """Init.""" - # Create the main window : - super(UiInit, self).__init__(None) - self.setupUi(self) - self._view = TopoCanvas('MainCanvas', 'white') - self._grid = self._view.canvas.central_widget.add_grid() - self._TopoLayout.addWidget(self._view.canvas.native) diff --git a/visbrain/utils/color.py b/visbrain/utils/color.py index cc9bcf571..dc49e2736 100644 --- a/visbrain/utils/color.py +++ b/visbrain/utils/color.py @@ -4,21 +4,182 @@ flexible control of diffrent problem involving colors (like turn an array / string / faces into RBGA colors, defining the basic colormap object...) """ +import logging import numpy as np -from vispy.color.colormap import Colormap +from vispy.color.colormap import Colormap as VispyColormap from matplotlib import cm import matplotlib.colors as mplcol from warnings import warn from .sigproc import normalize +from .mesh import vispy_array -__all__ = ('color2vb', 'array2colormap', 'cmap_to_glsl', 'dynamic_color', - 'color2faces', 'type_coloring', 'mpl_cmap', 'color2tuple', - 'mpl_cmap_index') +__all__ = ('Colormap', 'color2vb', 'array2colormap', 'cmap_to_glsl', + 'dynamic_color', 'color2faces', 'type_coloring', 'mpl_cmap', + 'color2tuple', 'mpl_cmap_index') + + +logger = logging.getLogger('visbrain') + + +class Colormap(object): + """Main colormap class. + + Parameters + ---------- + cmap : string | inferno + Matplotlib colormap + clim : tuple/list | None + Limit of the colormap. The clim parameter must be a tuple / list + of two float number each one describing respectively the (min, max) + of the colormap. Every values under clim[0] or over clim[1] will + peaked. + alpha : float | 1.0 + The opacity to use. The alpha parameter must be between 0 and 1. + vmin : float | None + Threshold from which every color will have the color defined using + the under parameter bellow. + under : tuple/string | 'dimgray' + Matplotlib color for values under vmin. + vmax : float | None + Threshold from which every color will have the color defined using + the over parameter bellow. + over : tuple/string | 'darkred' + Matplotlib color for values over vmax. + translucent : tuple | None + Set a specific range translucent. With f_1 and f_2 two floats, if + translucent is : + + * (f_1, f_2) : values between f_1 and f_2 are set to translucent + * (None, f_2) x <= f_2 are set to translucent + * (f_1, None) f_1 <= x are set to translucent + lut_len : int | 1024 + Number of levels for the colormap. + interpolation : {None, 'linear', 'cubic'} + Interpolation type. Default is None. + + Attributes + ---------- + data : array_like + Color data of shape (n_data, 4) + shape : tuple + Shape of the data. + r : array_like + Red levels. + g : array_like + Green levels. + b : array_like + Blue levels. + rgb : array_like + RGB levels. + alpha : array_like + Transparency level. + glsl : vispy.colors.Colormap + GL colormap version. + """ + + def __init__(self, cmap='viridis', clim=None, vmin=None, under=None, + vmax=None, over=None, translucent=None, alpha=1., + lut_len=1024, interpolation=None): + """Init.""" + # Keep color parameters into a dict : + self._kw = dict(cmap=cmap, clim=clim, vmin=vmin, vmax=vmax, + under=under, over=over, translucent=translucent, + alpha=alpha) + # Color conversion : + if isinstance(cmap, np.ndarray): + assert (cmap.ndim == 2) and (cmap.shape[-1] in (3, 4)) + # cmap = single color : + if (cmap.shape[0] == 1) and isinstance(interpolation, str): + logger.debug("Colormap : unique color repeated.") + data = np.tile(cmap, (lut_len, 1)) + elif (cmap.shape[0] == lut_len) or (interpolation is None): + logger.debug("Colormap : Unique repeated.") + data = cmap + else: + from scipy.interpolate import interp2d + n_ = cmap.shape[1] + x, y = np.linspace(0, 1, n_), np.linspace(0, 1, cmap.shape[0]) + f = interp2d(x, y, cmap, kind=interpolation) + # Interpolate colormap : + data = f(x, np.linspace(0, 1, lut_len)) + elif isinstance(cmap, str): + data = array2colormap(np.linspace(0., 1., lut_len), **self._kw) + # Alpha correction : + if data.shape[-1] == 3: + data = np.c_[data, np.full((data.shape[0],), alpha)] + # NumPy float32 conversion : + self._data = vispy_array(data) + + def to_rgba(self, data): + """Turn a data vector into colors using colormap properties. + + Parameters + ---------- + data : array_like + Vector of data of shape (n_data,). + + Returns + ------- + color : array_like + Array of colors of shape (n_data, 4) + """ + if isinstance(self._kw['cmap'], np.ndarray): + return self._data + else: + return array2colormap(data, **self._kw) + + def __len__(self): + """Get the number of colors in the colormap.""" + return self._data.shape[0] + + def __getitem__(self, name): + """Get a color item.""" + return self._kw[name] + + @property + def data(self): + """Get colormap data.""" + return self._data + + @property + def shape(self): + """Get the shape of the data.""" + return self._data.shape + + @property + def glsl(self): + """Get a glsl version of the colormap.""" + return cmap_to_glsl(lut_len=len(self), **self._kw) + + @property + def r(self): + """Get red levels.""" + return self._data[:, 0] + + @property + def g(self): + """Get green levels.""" + return self._data[:, 1] + + @property + def b(self): + """Get blue levels.""" + return self._data[:, 2] + + @property + def rgb(self): + """Get rgb levels.""" + return self._data[:, 0:3] + + @property + def alpha(self): + """Get transparency level.""" + return self._data[:, -1] def color2vb(color=None, default=(1., 1., 1.), length=1, alpha=1.0, @@ -204,7 +365,7 @@ def array2colormap(x, cmap='inferno', clim=None, alpha=1.0, vmin=None, return x_cmap.astype(np.float32) -def _transclucent_cmap(x, x_cmap, translucent): +def _transclucent_cmap(x, x_cmap, translucent, smooth=None): """Sub function to define transparency.""" if translucent is not None: is_num = [isinstance(k, (int, float)) for k in translucent] @@ -216,17 +377,22 @@ def _transclucent_cmap(x, x_cmap, translucent): elif is_num == [False, True]: # (None, f_2) trans_x = x <= translucent[1] x_cmap[..., -1] = np.invert(trans_x) + if isinstance(smooth, int): + alphas = x_cmap[:, -1] + alphas = np.convolve(alphas, np.hanning(smooth), 'valid') + alphas /= max(alphas.max(), 1.) + x_cmap[smooth - 1::, -1] = alphas return x_cmap -def cmap_to_glsl(limits=None, n_colors=256, color=None, **kwargs): +def cmap_to_glsl(limits=None, lut_len=1024, color=None, **kwargs): """Get a glsl colormap. Parameters ---------- limits : tuple | None Color limits for the object. Must be a tuple of two floats. - n_colors : int | 256 + lut_len : int | 1024 Number of levels for the colormap. color : string | None Use a unique color for the colormap. @@ -242,14 +408,14 @@ def cmap_to_glsl(limits=None, n_colors=256, color=None, **kwargs): limits = (0., 1.) assert len(limits) == 2 # Color transform : - vec = np.linspace(limits[0], limits[1], n_colors) + vec = np.linspace(limits[0], limits[1], lut_len) if color is None: # colormap - cmap = Colormap(array2colormap(vec, **kwargs)) + cmap = VispyColormap(array2colormap(vec, **kwargs)) else: # uniform color translucent = kwargs.get('translucent', None) - rep_col = color2vb(color, length=n_colors) + rep_col = color2vb(color, length=lut_len) cmap_trans = _transclucent_cmap(vec, rep_col, translucent) - cmap = Colormap(cmap_trans) + cmap = VispyColormap(cmap_trans) return cmap diff --git a/visbrain/utils/guitools.py b/visbrain/utils/guitools.py index 709714229..6d337a792 100644 --- a/visbrain/utils/guitools.py +++ b/visbrain/utils/guitools.py @@ -426,7 +426,8 @@ def set_widget_size(app, widget, width=100., height=100.): widget.resize(size) -def fill_pyqt_table(table, col_names=None, col=None, df=None): +def fill_pyqt_table(table, col_names=None, col=None, df=None, filter=None, + filter_col=0, check=None): """Fill a PyQt table widget. Parameters @@ -438,7 +439,8 @@ def fill_pyqt_table(table, col_names=None, col=None, df=None): df : pandas.DataFrame or dict | None Alternatively, a pandas DataFrame or a dictionary can also be used. """ - from PyQt5.QtWidgets import QTableWidgetItem + from PyQt5.QtWidgets import (QTableWidgetItem, QTableWidget, QTableView) + from PyQt5 import QtGui, QtCore # ________________________ Checking ________________________ # Dictionary / pandas.DataFrame : @@ -449,14 +451,35 @@ def fill_pyqt_table(table, col_names=None, col=None, df=None): col.append(df[k]) assert len(col_names) == len(col) assert all([isinstance(k, str) for k in col_names]) - - # ________________________ Define table ________________________ - table.clear() - table.setColumnCount(len(col_names)) - table.setHorizontalHeaderLabels(col_names) - table.setRowCount(len(col[0])) - - # ________________________ Pre-allocate ________________________ - for i in range(table.rowCount()): - for k in range(table.columnCount()): - table.setItem(i, k, QTableWidgetItem(str(col[k][i]))) + n_rows, n_cols = (len(col[0]), len(col_names)) + + # Switch between table view / widget : + if isinstance(table, QTableWidget): # Table widget + table.clear() + table.setColumnCount(n_cols) + table.setHorizontalHeaderLabels(col_names) + table.setRowCount(n_rows) + + for i in range(table.rowCount()): + for k in range(table.columnCount()): + table.setItem(i, k, QTableWidgetItem(str(col[k][i]))) + elif isinstance(table, QTableView): # Table view + table.reset() + # Fill the model : + model = QtGui.QStandardItemModel(n_rows, n_cols) + model.setHorizontalHeaderLabels(col_names) + for i in range(n_rows): + for k in range(n_cols): + item = QtGui.QStandardItem(str(col[k][i])) + item.setCheckable(check == k) + model.setItem(i, k, item) + table.setModel(model) + + if filter is not None: + filt_model = QtCore.QSortFilterProxyModel() + filt_model.setSourceModel(model) + filt_model.setFilterKeyColumn(filter_col) + filt_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + filter.textChanged.connect(filt_model.setFilterRegExp) + table.setModel(filt_model) + return model diff --git a/visbrain/utils/logging.py b/visbrain/utils/logging.py index a560ce564..25c5225dd 100644 --- a/visbrain/utils/logging.py +++ b/visbrain/utils/logging.py @@ -56,10 +56,10 @@ def format(self, record): # Set level color : levelname_color = COLOR_SEQ % (30 + COLORS[name]) + name + RESET_SEQ record.levelname = levelname_color - # if record.levelno == 11: - # logging.Formatter.__init__(self, FORMAT['print']) - # else: - # logging.Formatter.__init__(self, FORMAT[self._format_type]) + if record.levelno == 20: + logging.Formatter.__init__(self, FORMAT['print']) + else: + logging.Formatter.__init__(self, FORMAT[self._format_type]) return formatter_message(logging.Formatter.format(self, record)) diff --git a/visbrain/utils/mesh.py b/visbrain/utils/mesh.py index 531ca9699..01e3884e8 100644 --- a/visbrain/utils/mesh.py +++ b/visbrain/utils/mesh.py @@ -126,7 +126,7 @@ def volume_to_mesh(vol, smooth_factor=3, level=None, **kwargs): Mesh normals. """ # Smooth the volume : - vol_s = smooth_3d(vol, smooth_factor) + vol_s, tf = smooth_3d(vol, smooth_factor, correct=True) # Extract vertices and faces : if level is None: level = .5 @@ -134,6 +134,8 @@ def volume_to_mesh(vol, smooth_factor=3, level=None, **kwargs): vol_s[vol_s != level] = 0 level = .5 vert_n, faces_n = isosurface(vol_s, level=level) + # Smoothing compensation : + vert_n = tf.map(vert_n)[:, 0:-1] # Convert to meshdata : vertices, faces, normals = convert_meshdata(vert_n, faces_n, **kwargs) return vertices, faces, normals diff --git a/visbrain/utils/physio.py b/visbrain/utils/physio.py index ccdf97054..cb6f598ed 100644 --- a/visbrain/utils/physio.py +++ b/visbrain/utils/physio.py @@ -7,10 +7,9 @@ from scipy.stats import zscore from .sigproc import smoothing -from ..io.path import get_data_path, get_files_in_data __all__ = ('find_non_eeg', 'rereferencing', 'bipolarization', 'commonaverage', - 'tal2mni', 'mni2tal', 'load_predefined_roi', 'generate_eeg') + 'tal2mni', 'mni2tal', 'generate_eeg') logger = logging.getLogger('visbrain') @@ -230,36 +229,36 @@ def _spm_matrix(p): p.extend(q[len(p):12]) # Translation t : - t = np.matrix([[1, 0, 0, p[0]], - [0, 1, 0, p[1]], - [0, 0, 1, p[2]], - [0, 0, 0, 1]]) + t = np.array([[1, 0, 0, p[0]], + [0, 1, 0, p[1]], + [0, 0, 1, p[2]], + [0, 0, 0, 1]]) # Rotation 1 : - r1 = np.matrix([[1, 0, 0, 0], - [0, np.cos(p[3]), np.sin(p[3]), 0], - [0, -np.sin(p[3]), np.cos(p[3]), 0], - [0, 0, 0, 1]]) + r1 = np.array([[1, 0, 0, 0], + [0, np.cos(p[3]), np.sin(p[3]), 0], + [0, -np.sin(p[3]), np.cos(p[3]), 0], + [0, 0, 0, 1]]) # Rotation 2 : - r2 = np.matrix([[np.cos(p[4]), 0, np.sin(p[4]), 0], - [0, 1, 0, 0], - [-np.sin([p[4]]), 0, np.cos(p[4]), 0], - [0, 0, 0, 1]]) - # Rotation 3 : - r3 = np.matrix([[np.cos(p[5]), np.sin(p[5]), 0, 0], - [-np.sin(p[5]), np.cos(p[5]), 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]) - # Translation z : - z = np.matrix([[p[6], 0, 0, 0], - [0, p[7], 0, 0], - [0, 0, p[8], 0], + r2 = np.array([[np.cos(p[4]), 0, np.sin(p[4]), 0], + [0, 1, 0, 0], + [-np.sin([p[4]]), 0, np.cos(p[4]), 0], [0, 0, 0, 1]]) - # Translation s : - s = np.matrix([[1, p[9], p[10], 0], - [0, 1, p[11], 0], + # Rotation 3 : + r3 = np.array([[np.cos(p[5]), np.sin(p[5]), 0, 0], + [-np.sin(p[5]), np.cos(p[5]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) - return t * r1 * r2 * r3 * z * s + # Translation z : + z = np.array([[p[6], 0, 0, 0], + [0, p[7], 0, 0], + [0, 0, p[8], 0], + [0, 0, 0, 1]]) + # Translation s : + s = np.array([[1, p[9], p[10], 0], + [0, 1, p[11], 0], + [0, 0, 1, 0], + [0, 0, 0, 1]]) + return np.linalg.multi_dot([t, r1, r2, r3, z, s]) def tal2mni(xyz): @@ -286,10 +285,10 @@ def tal2mni(xyz): downz = np.linalg.inv(_spm_matrix([0., 0., 0., 0., 0., 0., .99, .97, .84])) # Apply rotation and translation : - xyz = rotn * np.c_[xyz, np.ones((n_sources, ))].T + xyz = np.dot(rotn, np.c_[xyz, np.ones((n_sources, ))].T) tmp = np.array(xyz)[2, :] < 0. - xyz[:, tmp] = downz * xyz[:, tmp] - xyz[:, ~tmp] = upz * xyz[:, ~tmp] + xyz[:, tmp] = np.dot(downz, xyz[:, tmp]) + xyz[:, ~tmp] = np.dot(upz, xyz[:, ~tmp]) return np.array(xyz[0:3, :].T) @@ -317,46 +316,11 @@ def mni2tal(xyz): xyz = np.c_[xyz, np.ones((n_sources, ))].T tmp = np.array(xyz)[2, :] < 0. - xyz[:, tmp] = down_t * xyz[:, tmp] - xyz[:, ~tmp] = up_t * xyz[:, ~tmp] + xyz[:, tmp] = np.dot(down_t, xyz[:, tmp]) + xyz[:, ~tmp] = np.dot(up_t, xyz[:, ~tmp]) return np.array(xyz[0:3, :].T) -def load_predefined_roi(name): - """Load a predefined ROI template. - - Parameters - ---------- - name : {'brodmann', 'aal', 'talairach'} - Name of the ROI atlas to load. - - Returns - ------- - vol : array_like - The volume of shape (nx, ny, nz). - labels : array_like - Array of labels of type object and of length n_labels. - index : array_like - Array of index corresponding to the labels of type np.int and of length - n_labels. - hdr : array_like - The matrix of transformation of shape (4, 4). - system : {'mni', 'tal'} - The system used by the volume. - """ - if name in get_files_in_data('roi', with_ext=False): - file = get_data_path(folder='roi', file=name + '.npz') - else: - file = name - # Load archive : - arch = np.load(file) - # Extract informations : - vol, hdr = arch['vol'], arch['hdr'] - labels, index = arch['labels'], arch['index'] - - return vol, labels, index, hdr, 'tal' if name == 'talairach' else 'mni' - - def generate_eeg(sf=512., n_pts=1000, n_channels=1, n_trials=1, n_sines=100, f_min=.5, f_max=160., smooth=50, noise=10, random_state=0): """Generate random eeg signals. diff --git a/visbrain/utils/sigproc.py b/visbrain/utils/sigproc.py index 12ca75ac2..9c5f80562 100644 --- a/visbrain/utils/sigproc.py +++ b/visbrain/utils/sigproc.py @@ -4,6 +4,8 @@ import numpy as np from scipy.signal import fftconvolve +from vispy.visuals.transforms import STTransform, NullTransform + __all__ = ('normalize', 'derivative', 'tkeo', 'zerocrossing', 'power_of_ten', 'averaging', 'normalization', 'smoothing', 'smooth_3d') @@ -166,7 +168,7 @@ def power_of_ten(x, e=3): def averaging(ts, n_window, axis=-1, overlap=0., window='flat'): - """Take the mean of a np.ndarray. + """Take the mean of an ndarray. Parameters ---------- @@ -224,9 +226,9 @@ def averaging(ts, n_window, axis=-1, overlap=0., window='flat'): sl_ts = [slice(None)] * ts.ndim sl_av = sl_ts.copy() for k in range(n_ind): - sl_ts[axis] = slice(ind[k, 0], ind[k, 1]) - sl_av[axis] = slice(k, k + 1) - average[sl_av] += (ts[sl_ts] * win).mean(axis=axis, keepdims=True) + sl_ts[axis], sl_av[axis] = slice(ind[k, 0], ind[k, 1]), slice(k, k + 1) + average[tuple(sl_av)] += (ts[tuple(sl_ts)] * win).mean(axis=axis, + keepdims=True) return average @@ -262,7 +264,7 @@ def normalization(data, axis=-1, norm=None, baseline=None): if (baseline is not None) and (len(baseline) == 2): sl = [slice(None)] * data.ndim sl[axis] = slice(baseline[0], baseline[1]) - _data = data[sl] + _data = data[tuple(sl)] else: _data = None @@ -331,7 +333,7 @@ def smoothing(x, n_window=10, window='hanning'): return y[n_window - 1:-n_window + 1] -def smooth_3d(vol, smooth_factor=3): +def smooth_3d(vol, smooth_factor=3, correct=True): """Smooth a 3-D volume. Parameters @@ -346,9 +348,21 @@ def smooth_3d(vol, smooth_factor=3): vol_smooth : array_like The smooth volume with the same shape as vol. """ - if isinstance(smooth_factor, int) and (smooth_factor >= 3): - sz = np.full((3,), smooth_factor, dtype=int) - smooth = np.ones([smooth_factor] * 3) / np.prod(sz) - return fftconvolve(vol, smooth, mode='same') - else: - return vol + tf = NullTransform() + # No smoothing : + if (not isinstance(smooth_factor, int)) or (smooth_factor < 3): + return vol, tf + # Smoothing array : + sz = np.full((3,), smooth_factor, dtype=int) + smooth = np.ones([smooth_factor] * 3) / np.prod(sz) + # Apply smoothing : + sm = fftconvolve(vol, smooth, mode='same') + if correct: + # Get the shape of the vol and the one with 'full' convolution : + vx, vy, vz = vol.shape + vcx, vcy, vcz = np.array([vx, vy, vz]) + smooth_factor - 1 + # Define transform : + sc = [vx / vcx, vy / vcy, vz / vcz] + tr = .5 * np.array([smooth_factor] * 3) + tf = STTransform(scale=sc, translate=tr) + return sm, tf diff --git a/visbrain/utils/sleep/edf.py b/visbrain/utils/sleep/edf.py index 713b89add..6b2dd6938 100644 --- a/visbrain/utils/sleep/edf.py +++ b/visbrain/utils/sleep/edf.py @@ -13,7 +13,7 @@ from datetime import datetime from math import floor from re import findall -from numpy import empty, asarray, fromstring, iinfo +from numpy import empty, asarray, frombuffer, iinfo lg = getLogger(__name__) @@ -193,7 +193,7 @@ def _read_dat(self, i_chan, begsam, endsam): samples = f.read(2 * (endpos - begpos)) i_dat_end = i_dat + endpos - begpos - dat[i_dat:i_dat_end] = fromstring(samples, dtype=' +Textures +-------- +1D texture : white (0) + sulcus (.5) + mask (1.) +2D texture : overlays (limited to 4 overlays) + License: BSD (3-clause) """ import numpy as np @@ -19,47 +24,55 @@ class is also responsible of turning camera rotations into light ajustement. import vispy.visuals.transforms as vist from vispy.scene.visuals import create_visual_node -from ..utils import (array2colormap, color2vb, convert_meshdata, vispy_array, - wrap_properties) +from visbrain.utils import (Colormap, color2vb, convert_meshdata, + wrap_properties, normalize) logger = logging.getLogger('visbrain') +# Light and color properties : +LUT_LEN = 1024 +LIGHT_POSITION = [0., 0., 1e7] +LIGHT_INTENSITY = [1.] * 3 +COEF_AMBIENT = .05 +COEF_SPECULAR = 0.1 +SULCUS_COLOR = [.4] * 3 + [1.] # Vertex shader : executed code for individual vertices. The transformation # applied to each one of them is the camera rotation. VERT_SHADER = """ #version 120 varying vec3 v_position; -varying vec4 v_color; varying vec3 v_normal; +varying vec4 v_color; void main() { v_position = $a_position; - v_normal = $a_normal; - - // Mask : (0. = (white, sulcus), 1. = color, 2. = mask_color) - // Sulcus : (0. = white, sulcus = gray) - if ($a_mask == 0.) - { - if ($a_sulcus == 0.) - { - v_color = vec4(1., 1., 1., 1.); - } - else if ($a_sulcus == 1.) - { - v_color = vec4(.5, .5, .5, 1.); - } - } - else if ($a_mask == 1.) - { - v_color = $a_color; - } - else if ($a_mask == 2.) - { - v_color = $u_mask_color; + v_normal = $u_inv_light * $a_normal; + + // Compute background color (i.e white / mask / sulcus) + vec4 bg_color = texture1D($u_bgd_text, $a_bgd_data); + + // Compute overlay colors : + vec4 overlay_color = vec4(0., 0., 0., 0.); + float u_div = 0.; + float off = float($u_n_overlays > 1) * 0.999999; + for (int i=0; i<$u_n_overlays; i++) { + // Texture coordinate : + vec2 tex_coords = vec2($u_range[i], (i + off)/$u_n_overlays); + // Get the color using the texture : + vec4 ux = texture2D($u_over_text, tex_coords); + // Ponderate the color with transparency level : + overlay_color += $u_alphas[i] * ux; + // Number of contributing overlay per vertex : + u_div += $u_alphas[i]; } - v_color *= $u_light_color; + overlay_color /= max(u_div, 1.); + + // Mix background and overlay colors : + v_color = mix(bg_color, overlay_color, overlay_color.a); + + // Finally apply camera transform to position : gl_Position = $transform(vec4($a_position, 1)); } """ @@ -78,14 +91,26 @@ class is also responsible of turning camera rotations into light ajustement. varying vec3 v_normal; void main() { + // Slices + if (v_position.x < $u_xmin || v_position.x > $u_xmax) { + discard; + } + if (v_position.y < $u_ymin || v_position.y > $u_ymax) { + discard; + } + if (v_position.z < $u_zmin || v_position.z > $u_zmax) { + discard; + } + + // Adapt light position with camera rotation + vec3 light_pos = $camtf(vec4($u_light_position, 0.)).xyz; // ----------------- Ambient light ----------------- vec3 ambientLight = $u_coef_ambient * v_color.rgb * $u_light_intensity; - // ----------------- Diffuse light ----------------- // Calculate the vector from this pixels surface to the light source - vec3 surfaceToLight = $u_light_position - v_position; + vec3 surfaceToLight = light_pos - v_position; // Calculate the cosine of the angle of incidence float l_surf_norm = length(surfaceToLight) * length(v_normal); @@ -96,20 +121,17 @@ class is also responsible of turning camera rotations into light ajustement. // Get diffuse light : vec3 diffuseLight = v_color.rgb * brightness * $u_light_intensity; - // ----------------- Specular light ----------------- - vec3 surfaceToCamera = vec3(0.0, 0.0, 1.0) - v_position; - vec3 K = normalize(normalize(surfaceToLight) + normalize(surfaceToCamera)); - float specular = clamp(pow(abs(dot(v_normal, K)), 40.), 0.0, 1.0); - specular *= $u_coef_specular; - vec3 specularLight = specular * vec3(1., 1., 1.) * $u_light_intensity; - + vec3 lightDir = normalize(surfaceToLight); + vec3 viewDir = normalize(light_pos - v_position); + vec3 reflectDir = reflect(-lightDir, normalize(v_normal)); + float specular = pow(max(dot(viewDir, reflectDir), 0.0), 32); + vec3 specularLight = $u_coef_specular * specular * vec3(1., 1., 1.); // ----------------- Attenuation ----------------- // float att = 0.0001; - // float distanceToLight = length($u_light_position - v_position); - // float attenuation = 1.0 / (1.0 + att * pow(distanceToLight, 2)); - + // float distanceToLight = length(light_pos - v_position); + // float attenuation = 1.0 / (1.0 + att * pow(distanceToLight, 4)); // ----------------- Linear color ----------------- // Without attenuation : @@ -122,7 +144,6 @@ class is also responsible of turning camera rotations into light ajustement. // ----------------- Gamma correction ----------------- // vec3 gamma = vec3(1.0/1.2); - // ----------------- Final color ----------------- // Without gamma correction : gl_FragColor = vec4(linearColor, $u_alpha); @@ -155,25 +176,11 @@ class BrainVisual(Visual): camera. meshdata : vispy.meshdata | None Custom vispy mesh data - color : tuple/string/hex | None - Alternatively, you can specify a uniform color. - l_position : tuple | (1., 1., 1.) - Tuple of three floats defining (x, y, z) light position. - l_color : tuple | (1., 1., 1., 1.) - Tuple of four floats defining (R, G, B, A) light color. - l_intensity : tuple | (1., 1., 1.) - Tuple of three floats defining (x, y, z) light intensity. - l_ambient : float | 0.05 - Coefficient for the ambient light - l_specular : float | 0.5 - Coefficient for the specular light hemisphere : string | 'both' Choose if an hemisphere has to be selected ('both', 'left', 'right') lr_index : int | None Integer which specify the index where to split left and right hemisphere. - vertfcn : VisPy.transform | None - Transformation to apply to vertices using get_vertices. """ def __len__(self): @@ -190,50 +197,54 @@ def __getitem__(self): def __init__(self, vertices=None, faces=None, normals=None, lr_index=None, hemisphere='both', sulcus=None, alpha=1., mask_color='orange', - light_position=[100.] * 3, light_color=[1.] * 4, - light_intensity=[1.] * 3, coef_ambient=.05, coef_specular=.5, - vertfcn=None, camera=None, meshdata=None, - invert_normals=False): + camera=None, meshdata=None, invert_normals=False): """Init.""" self._camera = None - self._camera_transform = vist.NullTransform() self._translucent = True self._alpha = alpha self._hemisphere = hemisphere + self._n_overlay = 0 + self._data_lim = [] # Initialize the vispy.Visual class with the vertex / fragment buffer : Visual.__init__(self, vcode=VERT_SHADER, fcode=FRAG_SHADER) - # _________________ TRANSFORMATIONS _________________ - self._vertfcn = vist.NullTransform() if vertfcn is None else vertfcn - # _________________ BUFFERS _________________ # Vertices / faces / normals / color : def_3 = np.zeros((0, 3), dtype=np.float32) - def_4 = np.zeros((0, 4), dtype=np.float32) self._vert_buffer = gloo.VertexBuffer(def_3) - self._color_buffer = gloo.VertexBuffer(def_4) self._normals_buffer = gloo.VertexBuffer(def_3) - self._mask_buffer = gloo.VertexBuffer() - self._sulcus_buffer = gloo.VertexBuffer() + self._bgd_buffer = gloo.VertexBuffer() + self._xrange_buffer = gloo.VertexBuffer() + self._alphas_buffer = gloo.VertexBuffer() self._index_buffer = gloo.IndexBuffer() # _________________ PROGRAMS _________________ self.shared_program.vert['a_position'] = self._vert_buffer - self.shared_program.vert['a_color'] = self._color_buffer self.shared_program.vert['a_normal'] = self._normals_buffer + self.shared_program.vert['u_n_overlays'] = self._n_overlay self.shared_program.frag['u_alpha'] = alpha + # _________________ LIGHTS _________________ + self.shared_program.frag['u_light_intensity'] = LIGHT_INTENSITY + self.shared_program.frag['u_coef_ambient'] = COEF_AMBIENT + self.shared_program.frag['u_coef_specular'] = COEF_SPECULAR + self.shared_program.frag['u_light_position'] = LIGHT_POSITION + self.shared_program.frag['camtf'] = vist.NullTransform() + # _________________ DATA / CAMERA / LIGHT _________________ + # Data : self.set_data(vertices, faces, normals, hemisphere, lr_index, invert_normals, sulcus, meshdata) + # Camera : self.set_camera(camera) self.mask_color = mask_color - self.light_color = light_color - self.light_position = light_position - self.light_intensity = light_intensity - self.coef_ambient = coef_ambient - self.coef_specular = coef_specular + + # Slices : + self.xmin, self.xmax = None, None + self.ymin, self.ymax = None, None + self.zmin, self.zmax = None, None + self.inv_light = False # _________________ GL STATE _________________ self.set_gl_state('translucent', depth_test=True, cull_face=False, @@ -300,70 +311,134 @@ def set_data(self, vertices=None, faces=None, normals=None, self._vert_buffer.set_data(vertices, convert=True) self._normals_buffer.set_data(normals, convert=True) self.hemisphere = hemisphere - # Mask : - self._mask = np.zeros((len(self),), dtype=np.float32) - self._mask_buffer.set_data(self._mask, convert=True) - self.shared_program.vert['a_mask'] = self._mask_buffer # Sulcus : - sulcus = self._mask.copy() if sulcus is None else sulcus - sulcus = sulcus.astype(np.float32) + n = len(self) + sulcus = np.zeros((n,), dtype=bool) if sulcus is None else sulcus assert isinstance(sulcus, np.ndarray) - assert len(sulcus) == vertices.shape[0] - assert (sulcus.min() == 0.) and (sulcus.max() <= 1.) - self._sulcus_buffer.set_data(sulcus, convert=True) - self.shared_program.vert['a_sulcus'] = self._sulcus_buffer - # Color : - self.color = np.ones((len(self), 4), dtype=np.float32) - - def set_color(self, data=None, color='white', alpha=1.0, **kwargs): - """Set specific colors on the brain. + assert len(sulcus) == n and sulcus.dtype == bool + + # ____________________ TEXTURES ____________________ + # Background texture : + self._bgd_data = np.zeros((n,), dtype=np.float32) + self._bgd_data[sulcus] = .9 + self._bgd_buffer.set_data(self._bgd_data, convert=True) + self.shared_program.vert['a_bgd_data'] = self._bgd_buffer + # Overlay texture : + self._text2d_data = np.zeros((2, LUT_LEN, 4), dtype=np.float32) + self._text2d = gloo.Texture2D(self._text2d_data) + self.shared_program.vert['u_over_text'] = self._text2d + # Build texture range : + self._xrange = np.zeros((n, 2), dtype=np.float32) + self._xrange_buffer.set_data(self._xrange) + self.shared_program.vert['u_range'] = self._xrange_buffer + # Define buffer for transparency per overlay : + self._alphas = np.zeros((n, 2), dtype=np.float32) + self._alphas_buffer.set_data(self._alphas) + self.shared_program.vert['u_alphas'] = self._alphas_buffer + + def add_overlay(self, data, vertices=None, to_overlay=None, mask_data=None, + **kwargs): + """Add an overlay to the mesh. + + Note that the current implementation limit to a number of of four + overlays. Parameters ---------- - data : array_like | None - Data to use for the color. If data is None, the color will - be uniform using the color parameter. If data is a vector, - the color is going to be deduced from this vector. If data - is a (N, 4) it will be interpreted as a color. - color : tuple/string/hex | 'white' - The default uniform color - alpha : float | 1.0 - Opacity to use if data is a vector - kwargs : dict | { } - Further arguments are passed to the colormap function. + data : array_like + Array of data of shape (n_data,). + vertices : array_like | None + The vertices to color with the data of shape (n_data,). + to_overlay : int | None + Add data to a specific overlay. This parameter must be a integer. + mask_data : array_like | None + Array to specify if some vertices have to be considered as masked + (and use the `mask_color` color) + kwargs : dict | {} + Additional color color properties (cmap, clim, vmin, vmax, under, + over, translucent) """ - # Color to RGBA : - color = color2vb(color, len(self)) - - # Color management : - if data is None: # uniform color - col = np.tile(color, (len(self), 1)) - elif data.ndim == 1: # data vector - col = array2colormap(data.copy(), **kwargs) - elif (data.ndim > 1) and (data.shape[1] == 4): - col = vispy_array(data) + # Check input variables : + if vertices is None: + vertices = np.ones((len(self),), dtype=bool) + if not len(vertices): + logger.warning('Vertices array is empty. Abandoning.') + return + + data = np.asarray(data) + to_overlay = self._n_overlay if to_overlay is None else to_overlay + data_lim = (data.min(), data.max()) + if len(self._data_lim) < to_overlay + 1: + self._data_lim.append(data_lim) else: - col = data - - self._color_buffer.set_data(vispy_array(col)) - self.update() + self._data_lim[to_overlay] = data_lim + # ------------------------------------------------------------- + # TEXTURE COORDINATES + # ------------------------------------------------------------- + need_reshape = to_overlay >= self._xrange.shape[1] + if need_reshape: + # Add column of zeros : + z_ = np.zeros((len(self),), dtype=np.float32) + z_text = np.zeros((1, LUT_LEN, 4), dtype=np.float32) + self._xrange = np.c_[self._xrange, z_] + self._alphas = np.c_[self._alphas, z_] + self._text2d_data = np.concatenate((self._text2d_data, z_text)) + # (x, y) coordinates of the overlay for the texture : + self._xrange[vertices, to_overlay] = normalize(data) + # Transparency : + self._alphas[vertices, to_overlay] = 1. # transparency level + + # ------------------------------------------------------------- + # TEXTURE COLOR + # ------------------------------------------------------------- + # Colormap interpolation (if needed): + colormap = Colormap(**kwargs) + vec = np.linspace(data_lim[0], data_lim[1], LUT_LEN) + self._text2d_data[to_overlay, ...] = colormap.to_rgba(vec) + # Send data to the mask : + if isinstance(mask_data, np.ndarray) and len(mask_data) == len(self): + self._bgd_data[mask_data] = .5 + self._bgd_buffer.set_data(self._bgd_data) + # ------------------------------------------------------------- + # BUFFERS + # ------------------------------------------------------------- + if need_reshape: + # Re-define buffers : + self._xrange_buffer = gloo.VertexBuffer(self._xrange) + self._text2d = gloo.Texture2D(self._text2d_data) + self._alphas_buffer = gloo.VertexBuffer(self._alphas) + # Send buffers to vertex shader : + self.shared_program.vert['u_range'] = self._xrange_buffer + self.shared_program.vert['u_alphas'] = self._alphas_buffer + self.shared_program.vert['u_over_text'] = self._text2d + else: + self._xrange_buffer.set_data(self._xrange) + self._text2d.set_data(self._text2d_data) + self._alphas_buffer.set_data(self._alphas) + # Update the number of overlays : + self._n_overlay = to_overlay + 1 + self.shared_program.vert['u_n_overlays'] = self._n_overlay - def set_alpha(self, alpha, index=None): - """Set transparency to the brain. + def update_colormap(self, to_overlay=None, **kwargs): + """Update colormap properties of an overlay. - Prameters - --------- - alpha : float - Transparency level. - index : array_like | None - Index for sending alpha. Used by slices. + Parameters + ---------- + to_overlay : int | None + Add data to a specific overlay. This parameter must be a integer. + If no overlay is specified, the colormap of the last one is used. + kwargs : dict | {} + Additional color color properties (cmap, clim, vmin, vmax, under, + over, translucent) """ - if index is None: - self._colFaces[..., -1] = np.float32(alpha) - else: - self._colFaces[index, -1] = np.float32(alpha) - self._color_buffer.set_data(self._colFaces) - self.update() + if self._n_overlay >= 1: + overlay = self._n_overlay - 1 if to_overlay is None else to_overlay + # Define the colormap data : + data_lim = self._data_lim[overlay] + col = np.linspace(data_lim[0], data_lim[1], LUT_LEN) + self._text2d_data[overlay, ...] = Colormap(**kwargs).to_rgba(col) + self._text2d.set_data(self._text2d_data) + self.update() def set_camera(self, camera=None): """Set a camera to the mesh. @@ -378,7 +453,7 @@ def set_camera(self, camera=None): """ if camera is not None: self._camera = camera - self._camera_transform = self._camera.transform + self.shared_program.frag['camtf'] = self._camera.transform self.update() def clean(self): @@ -389,8 +464,15 @@ def clean(self): # Delete vertices / faces / colors / normals : self._vert_buffer.delete() self._index_buffer.delete() - self._color_buffer.delete() self._normals_buffer.delete() + self._xrange_buffer.delete() + self._math_buffer.delete() + + def _build_bgd_texture(self): + color_1d = np.c_[np.array([1.] * 4), np.array(self.mask_color), + np.array(SULCUS_COLOR)].T + text_1d = gloo.Texture1D(color_1d.astype(np.float32)) + self.shared_program.vert['u_bgd_text'] = text_1d # ======================================================================= # ======================================================================= @@ -404,9 +486,7 @@ def draw(self, *args, **kwds): def _prepare_draw(self, view=None): """Call everytime there is an interaction with the mesh.""" - view_frag = view.view_program.frag - view_frag['u_light_position'] = self._camera_transform.map( - self._light_position)[0:-1] + pass @staticmethod def _prepare_transforms(view): @@ -423,11 +503,6 @@ def _prepare_transforms(view): # ======================================================================= # ======================================================================= - @property - def get_vertices(self): - """Mesh data.""" - return self._vertfcn.map(self._vertices)[..., 0:-1] - # ----------- HEMISPHERE ----------- @property def hemisphere(self): @@ -448,46 +523,6 @@ def hemisphere(self, value): self.update() self._hemisphere = value - # ----------- COLOR ----------- - @property - def color(self): - """Get the color value.""" - pass - # return self._color - - @color.setter - @wrap_properties - def color(self, value): - """Set color value.""" - assert isinstance(value, np.ndarray) and value.ndim == 2 - assert value.shape[0] == len(self) - self._color_buffer.set_data(value.astype(np.float32)) - self.update() - # self._color = value - - # ----------- MASK ----------- - @property - def mask(self): - """Get the mask value.""" - pass - # return self._mask - - @mask.setter - @wrap_properties - def mask(self, value): - """Set mask value.""" - if isinstance(value, (int, float, bool)): - value = np.full((len(self),), np.float(value), dtype=np.float32) - if len(value) != len(self): - to_mask = value.copy() - logger.debug("Reset brain mask") - value = np.zeros((len(self),), dtype=np.float32) - value[to_mask] = 1. - assert isinstance(value, np.ndarray) and len(value) == len(self) - self._mask_buffer.set_data(value.astype(np.float32), convert=True) - self.update() - # self._mask = value - # ----------- SULCUS ----------- @property def sulcus(self): @@ -499,9 +534,9 @@ def sulcus(self): def sulcus(self, value): """Set sulcus value.""" assert isinstance(value, np.ndarray) and len(value) == len(self) - value = value.astype(np.float32) - assert (value.min() == 0.) and (value.max() == 1.) - self._sulcus_buffer.set_data(value, convert=True) + assert isinstance(value.dtype, bool) + self._bgd_data[value] = 1. + self._bgd_buffer.set_data(self._bgd_data) self.update() # ----------- TRANSPARENT ----------- @@ -551,85 +586,112 @@ def mask_color(self): @wrap_properties def mask_color(self, value): """Set mask_color value.""" - value = color2vb(value) - self.shared_program.vert['u_mask_color'] = value.ravel() + value = color2vb(value).ravel() self._mask_color = value - self.update() + self._build_bgd_texture() - # ----------- LIGHT_POSITION ----------- @property - def light_position(self): - """Get the light_position value.""" - return self._light_position - - @light_position.setter - @wrap_properties - def light_position(self, value): - """Set light_position value.""" - assert len(value) == 3 - self.shared_program.frag['u_light_position'] = value - self._light_position = value - self.update() + def minmax(self): + """Get the data limits value.""" + return self._data_lim[self._n_overlay - 1] - # ----------- LIGHT_COLOR ----------- + # ----------- XMIN ----------- @property - def light_color(self): - """Get the light_color value.""" - return self._light_color + def xmin(self): + """Get the xmin value.""" + return self._xmin + + @xmin.setter + def xmin(self, value): + """Set xmin value.""" + value = self._vertices[:, 0].min() - 1 if value is None else value + assert isinstance(value, (int, float)) + self.shared_program.frag['u_xmin'] = value + self._xmin = value - @light_color.setter - @wrap_properties - def light_color(self, value): - """Set light_color value.""" - assert len(value) == 4 - self.shared_program.vert['u_light_color'] = value - self._light_color = value - self.update() + # ----------- XMAX ----------- + @property + def xmax(self): + """Get the xmax value.""" + return self._xmax + + @xmax.setter + def xmax(self, value): + """Set xmax value.""" + value = self._vertices[:, 0].max() + 1 if value is None else value + assert isinstance(value, (int, float)) + self.shared_program.frag['u_xmax'] = value + self._xmax = value - # ----------- LIGHT_INTENSITY ----------- + # ----------- YMIN ----------- @property - def light_intensity(self): - """Get the light_intensity value.""" - return self._light_intensity + def ymin(self): + """Get the ymin value.""" + return self._ymin + + @ymin.setter + def ymin(self, value): + """Set ymin value.""" + value = self._vertices[:, 1].min() - 1 if value is None else value + assert isinstance(value, (int, float)) + self.shared_program.frag['u_ymin'] = value + self._ymin = value - @light_intensity.setter - @wrap_properties - def light_intensity(self, value): - """Set light_intensity value.""" - assert len(value) == 3 - self.shared_program.frag['u_light_intensity'] = value - self._light_intensity = value - self.update() + # ----------- YMAX ----------- + @property + def ymax(self): + """Get the ymax value.""" + return self._ymax + + @ymax.setter + def ymax(self, value): + """Set ymax value.""" + value = self._vertices[:, 1].max() + 1 if value is None else value + assert isinstance(value, (int, float)) + self.shared_program.frag['u_ymax'] = value + self._ymax = value - # ----------- COEF_AMBIENT ----------- + # ----------- ZMIN ----------- @property - def coef_ambient(self): - """Get the coef_ambient value.""" - return self._coef_ambient + def zmin(self): + """Get the zmin value.""" + return self._zmin + + @zmin.setter + def zmin(self, value): + """Set zmin value.""" + value = self._vertices[:, 2].min() - 1 if value is None else value + assert isinstance(value, (int, float)) + self.shared_program.frag['u_zmin'] = value + self._zmin = value - @coef_ambient.setter - @wrap_properties - def coef_ambient(self, value): - """Set coef_ambient value.""" + # ----------- ZMAX ----------- + @property + def zmax(self): + """Get the zmax value.""" + return self._zmax + + @zmax.setter + def zmax(self, value): + """Set zmax value.""" + value = self._vertices[:, 2].max() + 1 if value is None else value assert isinstance(value, (int, float)) - self.shared_program.frag['u_coef_ambient'] = float(value) - self._coef_ambient = value - self.update() + self.shared_program.frag['u_zmax'] = value + self._zmax = value - # ----------- COEF_SPECULAR ----------- + # ----------- INV_LIGHT ----------- @property - def coef_specular(self): - """Get the coef_specular value.""" - return self._coef_specular + def inv_light(self): + """Get the inv_light value.""" + return self._inv_light - @coef_specular.setter - @wrap_properties - def coef_specular(self, value): - """Set coef_specular value.""" - assert isinstance(value, (int, float)) - self.shared_program.frag['u_coef_specular'] = value - self._coef_specular = value - self.update() + @inv_light.setter + def inv_light(self, value): + """Set inv_light value.""" + assert isinstance(value, bool) + value = -1 if value else 1 + self.shared_program.vert['u_inv_light'] = value + self._inv_light = value BrainMesh = create_visual_node(BrainVisual) diff --git a/visbrain/visuals/cbar/CbarBase.py b/visbrain/visuals/cbar/CbarBase.py index ef4e0bdea..6ec6fddc3 100644 --- a/visbrain/visuals/cbar/CbarBase.py +++ b/visbrain/visuals/cbar/CbarBase.py @@ -1,5 +1,5 @@ """Most basic colorbar class.""" -from ...utils import color2tuple, wrap_properties +from ...utils import color2tuple, wrap_properties, cmap_to_glsl __all__ = ('CbarArgs', 'CbarBase') @@ -38,6 +38,7 @@ def __init__(self, cmap=None, clim=None, isvmin=False, vmin=None, self._vmin, self._vmax = vmin, vmax self._isvmin, self._isvmax = isvmin, isvmax self._under, self._over = under, over + self._minmax = None def to_kwargs(self, addisminmax=False): """Return a dictionary for input arguments. @@ -59,6 +60,22 @@ def to_kwargs(self, addisminmax=False): kwargs['isvmin'], kwargs['isvmax'] = self._isvmin, self._isvmax return kwargs + def _get_glsl_colormap(self, limits=None): + """Get a GLSL version of the colormap. + + Parameters + ---------- + limits : tuple | None + A tuple of floats defining the limits of the data. + + Returns + ------- + cmap : class:`vispy.color.Colormap` + Colormap instance. + """ + limits = limits if limits is not None else self._minmax + return cmap_to_glsl(limits=limits, **self.to_kwargs()) + def _update_cbar_args(self, cmap, clim, vmin, vmax, under, over): """Update colorbar elements.""" kw = dict(clim=clim, cmap=cmap, vmin=vmin, vmax=vmax, under=under, diff --git a/visbrain/visuals/GridSignalVisual.py b/visbrain/visuals/grid_signal_visual.py similarity index 93% rename from visbrain/visuals/GridSignalVisual.py rename to visbrain/visuals/grid_signal_visual.py index 95aa902e4..374db5348 100644 --- a/visbrain/visuals/GridSignalVisual.py +++ b/visbrain/visuals/grid_signal_visual.py @@ -27,20 +27,27 @@ void main() { float nrows = $u_size.x; float ncols = $u_size.y; + // Compute the x coordinate from the time index. float x = -1 + 2*$a_index.z / ($u_n-1); + // Turn position into a vec3 : a_pos = vec3($a_position, 1, 1); vec2 position = vec2(x - (1 - 1 / $u_scale.x), a_pos); + // Find the affine transformation for the subplots. vec2 a = vec2(1./ncols, 1./nrows)*.98; vec2 b = vec2(-1 + $u_space*($a_index.x+.5) / ncols, -1 + $u_space*($a_index.y+.5) / nrows); + // Apply the static subplot transformation + scaling. gl_Position = $transform(vec4(a*$u_scale*position+b, 0.0, 1.0)); - v_color = vec4($a_color, 1.); - v_index = $a_index; + + // Use texture 1D to get the color. + v_color = texture1D($u_text, $a_color); + // For clipping test in the fragment shader. + v_index = $a_index; v_position = gl_Position.xy; } """ @@ -51,7 +58,7 @@ varying vec3 v_index; varying vec2 v_position; void main() { - gl_FragColor = v_color; + gl_FragColor = vec4(v_color.xyz, 1.); // Discard the fragments between the signals (emulate glMultiDrawArrays). if ((fract(v_index.x) > 0.) || (fract(v_index.y) > 0.)) @@ -122,11 +129,10 @@ def __init__(self, data, axis=-1, sf=1., color='random', title=None, rnd_3 = np.zeros((1, 3), dtype=np.float32) self._dbuffer = gloo.VertexBuffer(rnd_1) self._ibuffer = gloo.VertexBuffer(rnd_3) - self._cbuffer = gloo.VertexBuffer(rnd_3) + self._cbuffer = gloo.VertexBuffer() # Send to the program : self.shared_program.vert['a_position'] = self._dbuffer self.shared_program.vert['a_index'] = self._ibuffer - self.shared_program.vert['a_color'] = self._cbuffer self.shared_program.vert['u_size'] = (1, 1) self.shared_program.vert['u_n'] = len(self) @@ -216,14 +222,18 @@ def set_data(self, data=None, axis=None, color=None, title=None, g_size = np.array(self.g_size) n = len(self) if color == 'random': # (m, 3) random color - singcol = np.random.uniform(size=(m, 3), low=rnd_dyn[0], - high=rnd_dyn[1]).astype(np.float32) + color_1d = np.random.uniform(size=(m, 3), low=rnd_dyn[0], + high=rnd_dyn[1]) + color_idx = np.mgrid[0:m, 0:len(self)][0] / m elif color is not None: # (m, 3) uniform color - singcol = color2vb(color, length=m)[:, 0:3] - # Repeat the array n_times to have a (m * n_times, 3) array : - a_color = np.repeat(singcol, n, axis=0) - # Send color to buffer : - self._cbuffer.set_data(vispy_array(a_color)) + color_1d = color2vb(color) + color_idx = np.zeros((m * len(self)), dtype=np.float32) + # Send texture to vertex shader : + text_1d = gloo.Texture1D(color_1d.astype(np.float32)) + self.shared_program.vert['u_text'] = text_1d + # Send color index to use for the texture : + self._cbuffer.set_data(color_idx.astype(np.float32)) + self.shared_program.vert['a_color'] = self._cbuffer # ====================== TITLES ====================== # Titles checking : @@ -315,18 +325,6 @@ def space(self, value): def rect(self): return (-1.05, -1.1, self._space + .1, self._space + .2) - # ----------- COLOR ----------- - @property - def color(self): - """Get the color value.""" - return self._color - - @color.setter - def color(self, value): - """Set color value.""" - self._color = value - self.set_data(color=value) - # ----------- FONT_SIZE ----------- @property def font_size(self): diff --git a/visbrain/visuals/marker_patch.py b/visbrain/visuals/marker_patch.py deleted file mode 100644 index e64001221..000000000 --- a/visbrain/visuals/marker_patch.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Temporaly patch for VisPy markers.""" -import logging -import vispy - -logger = logging.getLogger('visbrain') - -vert_markers_patch = """ -uniform float u_antialias; -uniform float u_px_scale; -uniform float u_scale; - -attribute vec3 a_position; -attribute vec4 a_fg_color; -attribute vec4 a_bg_color; -attribute float a_edgewidth; -attribute float a_size; - -varying vec4 v_fg_color; -varying vec4 v_bg_color; -varying float v_edgewidth; -varying float v_antialias; - -void main (void) { - if (a_size != 0.) - { - $v_size = a_size * u_px_scale * u_scale; - v_edgewidth = a_edgewidth * float(u_px_scale); - v_antialias = u_antialias; - v_fg_color = a_fg_color; - v_bg_color = a_bg_color; - gl_Position = $transform(vec4(a_position,1.0)); - float edgewidth = max(v_edgewidth, 1.0); - gl_PointSize = ($v_size) + 4.*(edgewidth + 1.5*v_antialias); - } -} -""" - -vispy.visuals.markers.vert = vert_markers_patch -logger.debug("Remove markers patch in visuals and SourceObj") diff --git a/visbrain/visuals/PicVisual.py b/visbrain/visuals/pic_visual.py similarity index 92% rename from visbrain/visuals/PicVisual.py rename to visbrain/visuals/pic_visual.py index f12da9b49..4fb6cc95e 100644 --- a/visbrain/visuals/PicVisual.py +++ b/visbrain/visuals/pic_visual.py @@ -8,7 +8,7 @@ from vispy import gloo, visuals, scene -from ..utils import array2colormap +from ..utils import cmap_to_glsl, vispy_array __all__ = ('PicMesh') @@ -16,9 +16,11 @@ #version 120 varying vec4 v_color; +varying float v_data; void main() { - v_color = $a_color; + v_data = $a_data; + v_color = $color; gl_Position = $transform(vec4($a_position, 1.)); } """ @@ -72,10 +74,9 @@ def __init__(self, data, pos, width=1., height=1., dxyz=(0., 0., 0.,), # Re-order data : self._data = data.ravel()[np.argsort(grid.ravel())] + self._data_buffer = gloo.VertexBuffer(vispy_array(self._data)) + self.shared_program.vert['a_data'] = self._data_buffer # Define the color buffer : - color = np.zeros((self._data.shape[0], 4), dtype=np.float32) - self._color_buffer = gloo.VertexBuffer(color) - self.shared_program.vert['a_color'] = self._color_buffer self.shared_program.frag['u_alpha'] = alpha self.alpha = alpha self.set_data(**kwargs) @@ -199,9 +200,11 @@ def set_data(self, width=None, height=None, dxyz=None, **kwargs): a_position = self._data_to_pos(self._pos) self._pos_buffer.set_data(a_position) # Update color properties : - color = array2colormap(self._data, **kwargs) - # Send the color to the buffer : - self._color_buffer.set_data(color) + _limits = (self._data.min(), self._data.max()) + cmap = cmap_to_glsl(limits=_limits, **kwargs) + self.shared_program['texture2D_LUT'] = cmap.texture_lut() + fcn_color = visuals.shaders.Function(cmap.glsl_map) + self.shared_program.vert['color'] = fcn_color('v_data') self.update() # ----------- ALPHA ----------- diff --git a/visbrain/visuals/TFmapsVisual.py b/visbrain/visuals/tf_map_visual.py similarity index 96% rename from visbrain/visuals/TFmapsVisual.py rename to visbrain/visuals/tf_map_visual.py index c39231cb6..3ddb75d15 100644 --- a/visbrain/visuals/TFmapsVisual.py +++ b/visbrain/visuals/tf_map_visual.py @@ -10,8 +10,7 @@ from vispy.scene.visuals import Image from ..visuals import CbarBase -from ..utils import (morlet, array2colormap, vispy_array, averaging, - normalization) +from ..utils import (morlet, cmap_to_glsl, averaging, normalization) __all__ = ('TFmapsMesh') @@ -121,8 +120,9 @@ def set_data(self, data, sf, f_min=1., f_max=160., f_step=1., self._cmap = kwargs.get('cmap', 'viridis') # ======================= COLOR ======================= - cmap = array2colormap(tf, **kwargs) - self._image.set_data(vispy_array(cmap)) + self._image.set_data(tf) + self._image.clim = 'auto' + self._image.cmap = cmap_to_glsl(limits=(tf.min(), tf.max()), **kwargs) # ======================= SCALE // TRANSLATE ======================= # Scale and translate TF : diff --git a/visbrain/visuals/TopoVisual.py b/visbrain/visuals/topo_visual.py similarity index 96% rename from visbrain/visuals/TopoVisual.py rename to visbrain/visuals/topo_visual.py index 1a0125ffb..a348fcad3 100644 --- a/visbrain/visuals/TopoVisual.py +++ b/visbrain/visuals/topo_visual.py @@ -11,7 +11,6 @@ License: BSD (3-clause) """ -import os import logging import numpy as np @@ -23,7 +22,7 @@ from ..utils import (array2colormap, color2vb, mpl_cmap, normalize, vpnormalize, vprecenter) -from ..io import get_data_path +from ..io import download_file from .cbar import CbarVisual logger = logging.getLogger('visbrain') @@ -435,7 +434,7 @@ def _get_coordinates_from_name(chan): List of channel names. """ # Load the coordinates template : - path = os.path.join(get_data_path(), 'topo', 'eegref.npz') + path = download_file('eegref.npz', astype='topo') file = np.load(path) name_ref, xyz_ref = file['chan'], file['xyz'] keeponly = np.ones((len(chan)), dtype=bool) @@ -527,27 +526,3 @@ def array_project_radial_to3d(points_2d): points_3d = np.asarray([x, y, z]).T return points_3d - - # ----------- SHOW_MARKERS ----------- - @property - def show_markers(self): - """Get the show_markers value.""" - return self._show_markers - - @show_markers.setter - def show_markers(self, value): - """Set show_markers value.""" - self._show_markers = value - self.chanMarkers.visible = value - - # ----------- SHOW_NAMES ----------- - @property - def show_names(self): - """Get the show_names value.""" - return self._show_names - - @show_names.setter - def show_names(self, value): - """Set show_names value.""" - self._show_names = value - self.chanText.visible = value