diff --git a/.travis.yml b/.travis.yml index 5d4f9b75..4598e800 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ branches: only: - develop - master + - version-3 cache: - pip: true @@ -33,10 +34,6 @@ jobs: language: python python: "3.6" dist: xenial - - name: "Python 3.5" - python: "3.5" - - name: "Python 2.7" - python: "2.7" - stage: "Linting and formatting" name: "Formatting with black" diff --git a/AUTHORS b/AUTHORS index ab71377f..36d9101b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -18,4 +18,7 @@ Chronological list of authors - Michael Gecht 2018 - - Marc Siggel \ No newline at end of file + - Marc Siggel + +2020 + - Sebastian Kehl diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4ac96019..459c448f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,18 @@ +3.0.1 (2020-10-09) +================== + +Features +-------- + +- Add ability to scan number of MPI ranks and OpenMP threads. (`#165 `_) +- Add support to run multiple simulations on single nodes (GROMACS-only). (`#168 `_) + +3.0.0 +===== + +This version was skipped intentionally. Version 3.0.1 is the first release of the major version 3. + + 2.0.1 (2020-03-04) ================== diff --git a/DEVELOPER.rst b/DEVELOPER.rst index 9d898ccb..6310041c 100644 --- a/DEVELOPER.rst +++ b/DEVELOPER.rst @@ -25,6 +25,14 @@ Using ``poetry`` you can simply run ``poetry install`` to install all dependencies. ``poetry`` will take care of creating a virtual environment for you. +Working on the documentation +---------------------------- + +You will need to install extra packages to work on the documentation. Run +``poetry install --extra docs`` to install all necessary dependencies. When in +the ``docs`` folder, you can run ``poetry run make livehtml`` to start a local +preview of the documentation, that will rebuild when you update a file. + Running commands in the virtual environment ------------------------------------------- @@ -106,7 +114,7 @@ contain the file ``.invisible``. Also the file ``CHANGELOG.rst`` should have been updated. **Important:** Make sure that the version numbers inside - ``mdbenchmark/__init__.py`` and ``CHANGELOG.rst`` match. +``mdbenchmark/__init__.py`` and ``CHANGELOG.rst`` match. 3. Generate dist files ---------------------- @@ -120,7 +128,7 @@ First make sure that your ``wheel`` package is up-to-date:: Next we can generate a source distribution package and universal wheel:: - $ python setup.py sdist bdist_wheel --universal + $ python setup.py sdist bdist_wheel --universal Check that the tarball inside ``./dist/`` includes all needed files (source code, ``README.rst``, ``CHANGELOG.rst``, ``LICENSE``), ! diff --git a/LICENSE b/LICENSE index af0578ff..70c1ef99 100644 --- a/LICENSE +++ b/LICENSE @@ -677,36 +677,6 @@ the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . -========================================================================== - -cadishi.py (mdbenchmark/ext/cadishi) is released under the MIT license: - --------------------------------------------------------------------------- - -MIT License - -Copyright (c) 2015-2017 Klaus Reuter, Max Planck Computing and Data Facility, - Giessenbachstraße 2, 85748 Garching, Germany. -Copyright (c) 2015-2017 Juergen Koefinger, Max Planck Institute of Biophysics, - Max-von-Laue-Straße 3, 60438 Frankfurt am Main, Germany. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ============================================================================ click_test.py (mdbenchmark/ext/click_test) is released under the MIT license @@ -733,4 +703,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file +THE SOFTWARE. diff --git a/README.rst b/README.rst index 3ed3b8b2..1ee2011a 100644 --- a/README.rst +++ b/README.rst @@ -1,6 +1,6 @@ -============================================ - Benchmark molecular dynamics simulations -============================================ +======================================== +Benchmark molecular dynamics simulations +======================================== .. image:: https://img.shields.io/pypi/v/mdbenchmark.svg :target: https://pypi.python.org/pypi/mdbenchmark @@ -51,21 +51,28 @@ pip .. code:: - pip install mdbenchmark + pip install mdbenchmark conda ----- .. code:: - conda install -c conda-forge mdbenchmark + conda install -c conda-forge mdbenchmark + +pipx +---- + +.. code:: + + pipx install mdbenchmark pipenv ------ .. code:: - pipenv install mdbenchmark + pipenv install mdbenchmark After installation MDBenchmark is accessible on your command-line via ``mdbenchmark``:: @@ -91,6 +98,8 @@ Features - Automatically detects the queuing system on your high-performance cluster and submits jobs accordingly. - Grabs performance from the output logs and creates a fancy plot. - Benchmarks systems on CPUs and/or GPUs. +- Find the best parameters by scanning different numbers of MPI ranks and OpenMP threads. +- Run multiple instances of the same simulation on a single node using GROMACS' ``--multidir`` option. Short usage reference ===================== @@ -104,18 +113,18 @@ Benchmark generation Assuming you want to benchmark GROMACS version 2018.3 and your TPR file is called ``protein.tpr``, run the following command:: - mdbenchmark generate --name protein --module gromacs/2018.3 + mdbenchmark generate --name protein --module gromacs/2018.3 To run benchmarks on GPUs simply add the ``--gpu`` flag:: - mdbenchmark generate --name protein --module gromacs/2018.3 --gpu + mdbenchmark generate --name protein --module gromacs/2018.3 --gpu Benchmark submission -------------------- After you generated your benchmarks, you can submit them at once:: - mdbenchmark submit + mdbenchmark submit Benchmark analysis ------------------ @@ -123,12 +132,22 @@ Benchmark analysis As soon as the benchmarks have been submitted you can run the analysis via ``mdbenchmark analyze``. Systems that have not finished yet will be marked with a question mark (``?``). You can save the performance results to a CSV file and subsequently create a plot from the data:: - # Print performance results to console and save them to a file called results.csv + # Print performance results to console and save them to a file called results.csv mdbenchmark analyze --save-csv results.csv - + # Create a plot from the results present in the file results.csv mdbenchmark plot --csv results.csv +Literature +========== + +Please cite the latest MDBenchmark publication if you use the tool to benchmark +your simulations. This will help raise awareness of benchmarking and help people +improve their simulation performance, as well as reduce overall resource +wastage. + +M\. Gecht, M. Siggel, M. Linke, G. Hummer, J. Köfinger MDBenchmark: A toolkit to optimize the performance of molecular dynamics simulations. J. Chem. Phys. 153, 144105 (2020); https://doi.org/10.1063/5.0019045 + Contributing ============ diff --git a/docs/analyze.rst b/docs/analyze.rst index fc909d2a..2fe462ab 100644 --- a/docs/analyze.rst +++ b/docs/analyze.rst @@ -47,19 +47,6 @@ single benchmark:: mdbenchmark analyze --directory draco_gromacs/2018.3 -Plotting of benchmark results ------------------------------ - - .. warning:: - - The function |mdbenchmark.analyze.plot|_ was deprecated with version 2.0. You should migrate to the newer ``mdbenchmark plot``, as it provides more functionality and the former version will be removed in the future. - -MDBenchmark provides a quick and simple way to plot the results of the -benchmarks, giving you a ``.pdf`` file as output. To generate a plot simply use -the ``--plot`` option:: - - mdbenchmark analyze --plot - Plot the number of cores ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/conf.py b/docs/conf.py index 86d1449f..4c11e71d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -51,7 +51,7 @@ # General information about the project. project = "MDBenchmark" -copyright = "2017-2018, The MDBenchmark development team" +copyright = "2017-2020, The MDBenchmark development team" author = "Written by the MDBenchmark development team" # The version info for the project you're documenting, acts as replacement for @@ -59,9 +59,9 @@ # built documents. # # The short X.Y version. -version = "2.0" +version = "3.0" # The full version, including alpha/beta/rc tags. -release = "2.0.1" +release = "3.0.1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/general.rst b/docs/general.rst index 99f7ca34..16e6d1f2 100644 --- a/docs/general.rst +++ b/docs/general.rst @@ -46,7 +46,7 @@ NAMD .. note:: **NAMD support is experimental.** If you encounter any problems or bugs, we - would appreciate to `hear from you`_. + would appreciate to `hear from you`_. Generating benchmarks for NAMD follows a similar process to GROMACS. Assuming the NAMD configuration file is called ``protein.namd``, you will also need the @@ -54,9 +54,9 @@ corresponding ``protein.pdb`` and ``protein.psf`` inside the same folder. .. warning:: - Please be aware that all paths given in the ``protein.namd`` file - must be absolute paths. This ensures that MDBenchmark does not destroy paths - when copying files around during benchmark generation. + Please be aware that all paths given in the ``protein.namd`` file must be + absolute paths. This ensures that MDBenchmark does not destroy paths when + copying files around during benchmark generation. In analogy to the GROMACS setup, you can execute the following command to generate benchmarks for a module named ``namd/2.12``: diff --git a/docs/generate.rst b/docs/generate.rst index 2c35801a..85762063 100644 --- a/docs/generate.rst +++ b/docs/generate.rst @@ -94,18 +94,74 @@ simply use the ``--host`` option:: Running on CPUs or GPUs ----------------------- -Depending on your setup you might want to run your simulations only on GPUs -or CPUs. You can do so with the ``--cpu/--no-cpu`` and ``--gpu/--no-gpu`` flags, ``-c/-nc` and ``-g/-ng` respectively. -If neither of both options is given, benchmarks will be generated for CPUs only. -The default template for the MPCDF cluster ``draco`` showcases the ability to -run benchmarks on GPUs:: +Depending on your setup you might want to run your simulations only on GPUs or +CPUs. You can do so with the ``--cpu/--no-cpu`` and ``--gpu/--no-gpu`` flags, +``-c/-nc` and ``-g/-ng`` respectively. If neither of both options is given, +benchmarks will be generated for CPUs only. The default template for the MPCDF +cluster ``draco`` showcases the ability to run benchmarks on GPUs:: mdbenchmark generate --gpu This generates benchmarks for both GPU and CPU partitions. If you only want to run on GPUs this is easily achieved with:: - mdbenchmark generate --gpu --no-cpu + mdbenchmark generate --gpu --no-cpu + + +Using a different number of ranks or threads +-------------------------------------------- + +The correct choice on the number of MPI ranks and OpenMP threads and +hyperthreading depends on your available hardware and software resources, your +simulation system and used MD engine. MDBenchmark can help you scan different +numbers of ranks and threads. + +.. note:: + + The following was only tested with GROMACS. + +To use this feature, you first need to know the number of physical cores on your +compute nodes. MDBenchmark will try to guess the number of physical cores. The +guess is only correct if the machine from which you submit the jobs, i.e., a +login node on a supercomputer, has the same number of cores as the actual +compute nodes. You can override the number of physical cores with the +``--physical-cores`` options. + +In addition, Intel CPUs are able run two calculations on the same core at the +same time. This feature is called "hyperthreading". If your CPU supports +hyperthreading, then it also has logical cores, which is twice the number of +physical cores. Assuming the CPUs of your compute node have 40 physical cores +and supports hyperthreading, you need to specify the following settings:: + + mdbenchmark generate --physical-cores 40 --logical-cores 80 + +The above example would generate benchmarks without hyperthreading. To enable +hyperthreading you need to specify the ``--hyperthreading`` option:: + + mdbenchmark generate --hyperthreading + +Now that you have defined the number of available cores and whether you want to +toggle hyperthreading, you can define the number of MPI ranks that MDBenchmark +should use for the job:: + + mdbenchmark generate --ranks 2 --ranks 10 --ranks 40 + +The above command will generate jobs using 2, 10 and 40 MPI ranks. MDBenchmark +will calculate the number of OpenMP threads by itself. As a general rule: +`number_of_cores = number_of_ranks * number_of_threads`. If your CPU does not +support hyperthreading, then the number of cores equals the number of physical +cores. If it does support hyperthreading, then it equals the number of logical +cores. + +Combining all options you can run benchmarks on 1-10 with and without GPUs using +either 4, 8 or 20 MPI ranks with hyperthreading with the following command:: + + mdbenchmark generate --max-nodes 10 --cpu --gpu --ranks 4 --ranks 8 --ranks 20 --physical-cores 40 --logical-cores 80 --hyperthreading + +In the above case, MDBenchmark will generate jobs with 4 MPI ranks/20 OpenMP +threads; 8 MPI ranks/10 OpenMP threads and 20 MPI ranks/4 OpenMP threads to +fulfill the constraint from above. A total of 60 benchmarks will be generated +(``10 (nodes) * 2 (gpu/cpu) * 3 (ranks)``). Limiting the run time of benchmarks @@ -128,6 +184,29 @@ If you want your benchmark jobs to have specific names, use the ``--job-name`` o mdbenchmark generate --job-name my_benchmark +Multiple jobs per node +---------------------- + +.. note:: + + Multiple simulations per node are currently only supported with GROMACS. The + developers of MDBenchmark welcome all support to implement further MD engines. + +It is possible to run multiple simulations on a single node to use the available +resources more efficiently. For example, when a node is equipped with two GPUs +it is possible to run either 1, 2 or 4 simulations on this single node. Each +instance of the simulation will generate shorter trajectories, but the overall +performance (the sum of all instances) will most likely be bigger than running a +single simulation on one node. This is especially useful when one is interested +in running many short simulations, instead of a single long simulation. + +To use this feature, users can specify the ``--multidir`` option. This will make +use of the built-in functionality availabe in GROMACS, which itself will take +care of running multiple independent instances of the same system. The following +command will run four benchmarks of a single system on the same node:: + + mdbenchmark generate --multidir 4 + .. _modules: https://linux.die.net/man/1/module .. _draco: https://www.mpcdf.mpg.de/services/computing/draco .. _hydra: https://www.mpcdf.mpg.de/services/computing/hydra diff --git a/docs/index.rst b/docs/index.rst index eaa13c01..133e55fc 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -87,30 +87,43 @@ Finally, you can plot the data from your ``data.csv`` file with:: mdbenchmark plot --csv data.csv +Literature +========== + +Please cite the latest MDBenchmark publication if you use the tool to benchmark +your simulations. This will help raise awareness of benchmarking and help people +improve their simulation performance, as well as reduce overall resource +wastage. + +M\. Gecht, M. Siggel, M. Linke, G. Hummer, J. Köfinger MDBenchmark: A toolkit to optimize the performance of molecular dynamics simulations. J. Chem. Phys. 153, 144105 (2020); https://doi.org/10.1063/5.0019045 + Content ======= .. toctree:: - :maxdepth: 2 - - install - upgrading - general - generate - submit - analyze - plot - jobtemplates + :maxdepth: 2 + + install + upgrading + general + generate + submit + analyze + plot + jobtemplates + mdengine Usage reference =============== .. click:: mdbenchmark.cli:cli - :prog: mdbenchmark - :show-nested: + :prog: mdbenchmark + :show-nested: Indices and tables ================== * :ref:`search` * :ref:`genindex` + +.. _providing your feedback: https://github.com/bio-phys/MDBenchmark/issues/new diff --git a/docs/install.rst b/docs/install.rst index e3455556..8cd1cf3b 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -29,8 +29,8 @@ create an environment called ``benchmark`` and install ``mdbenchmark`` inside. .. code:: - conda create -n benchmark - conda install -n benchmark -c conda-forge mdbenchmark + conda create -n benchmark + conda install -n benchmark -c conda-forge mdbenchmark Before every usage of ``mdbenchmark``, you need to first activate the conda environment via ``source activate benchmark``. After doing this once, you can @@ -61,9 +61,9 @@ After activating the environment, you should be able to install the package via .. note:: - The ``--user`` option leads to the installation of the package in your home - directory ``$HOME``. If you are not using the option, you may get errors due - to missing write permissions. + The ``--user`` option leads to the installation of the package in your home + directory ``$HOME``. If you are not using the option, you may get errors due + to missing write permissions. .. code:: @@ -97,9 +97,9 @@ the folder you installed it in. You can also activate the virtual environment once and then visit different directories afterwards:: - pipenv shell - cd .. - mdbenchmark + pipenv shell + cd .. + mdbenchmark .. _virtual environment: https://docs.python.org/3/tutorial/venv.html .. _conda environment: https://conda.io/docs/user-guide/tasks/manage-environments.html diff --git a/docs/jobtemplates.rst b/docs/jobtemplates.rst index 8ac58443..3f3658eb 100644 --- a/docs/jobtemplates.rst +++ b/docs/jobtemplates.rst @@ -35,9 +35,35 @@ This example shows a HPC running SGE with 30 CPUs per node. module unload gromacs module load {{ module }} module load impi + module load cuda + + # The below configuration was kindly provided by Dr. Klaus Reuter + # --- edit the following two lines to configure your job --- + {% if hyperthreading %} + USE_HT=1 # use hyperthreading, 0 (off) or 1 (on) + {%- else %} + USE_HT=0 # use hyperthreading, 0 (off) or 1 (on) + {%- endif % + N_TASKS_PER_HOST={{ number_of_ranks }} # number of MPI tasks to be started per node + + + # --- no need to touch the lines below --- + N_SLOTS_TOTAL=$NSLOTS + N_TASKS_TOTAL=$((N_TASKS_PER_HOST*NHOSTS)) + N_SLOTS_PER_HOST=$((NSLOTS/NHOSTS)) + N_THREADS_PER_PROCESS=$((N_SLOTS_PER_HOST/N_TASKS_PER_HOST)) + N_THREADS_PER_PROCESS=$(((1+USE_HT)*N_THREADS_PER_PROCESS)) + export OMP_NUM_THREADS=$N_THREADS_PER_PROCESS + if [ $USE_HT ]; then + export OMP_PLACES=threads + else + export OMP_PLACES=cores + fi + + # Edit again below, as you see fit # Run gromacs/{{ version }} for {{ time - 5 }} minutes - mpiexec -n {{ 30 * n_nodes }} -perhost 30 mdrun_mpi -v -maxh {{ time / 60 }} -deffnm {{ name }} + mpiexec -n $N_TASKS_TOTAL -ppn $N_TASKS_PER_HOST mdrun_mpi -ntomp $N_THREADS_PER_PROCESS -v -maxh {{ time / 60 }} -resethway -noconfout -deffnm {{ name }} Slurm ----- @@ -70,9 +96,16 @@ Slurm. {%- endif %} {%- endif %} # - # Number of nodes and MPI tasks per node: + # Request {{ n_nodes }} node(s) #SBATCH --nodes={{ n_nodes }} - #SBATCH --ntasks-per-node=32 + # Set the number of tasks per node (=MPI ranks) + #SBATCH --ntasks-per-node={{ number_of_ranks }} + # Set the number of threads per rank (=OpenMP threads) + #SBATCH --cpus-per-task={{ number_of_threads }} + {% if hyperthreading %} + # Enable hyperthreading + #SBATCH --ntasks-per-core=2 + {%- endif %} # Wall clock limit: #SBATCH --time={{ formatted_time }} @@ -81,14 +114,31 @@ Slurm. module load cuda module load {{ module }} + export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK + {% if hyperthreading %} + export OMP_PLACES=threads + export SLURM_HINT=multithread + {%- else %} + export OMP_PLACES=cores + {%- endif %} + # Run {{ module }} for {{ time }} minutes - srun gmx_mpi mdrun -v -maxh {{ time / 60 }} -deffnm {{ name }} + srun gmx_mpi mdrun -v -ntomp $OMP_NUM_THREADS -maxh {{ time / 60 }} -resethway -noconfout -deffnm {{ name }} + + # Running multiple simulations on a single node (multidir) + # If you want to run multiple simulations on the same node, use the `multidir` + # variable, like so: + # + # srun gmx_mpi mdrun -v -ntomp $OMP_NUM_THREADS -maxh {{ time / 60 }} -resethway -noconfout -deffnm {{ name }} {{ multidir }} + # + # MDBenchmark will set up the folder structure as required by GROMACS and + # replace the variable. LoadLeveler ----------- -Here is an example job template for the MPG cluster ``hydra`` (LoadLeveler). +Here is an example job template for the decomissioned MPG cluster ``hydra`` (LoadLeveler). .. code:: @@ -99,7 +149,7 @@ Here is an example job template for the MPG cluster ``hydra`` (LoadLeveler). # @ job_type = parallel # @ node_usage = not_shared # @ node = {{ n_nodes }} - # @ tasks_per_node = 20 + # @ tasks_per_node = {{ number_of_threads }} {%- if gpu %} # @ requirements = (Feature=="gpu") {%- endif %} @@ -112,30 +162,38 @@ Here is an example job template for the MPG cluster ``hydra`` (LoadLeveler). module load {{ module }} # run {{ module }} for {{ time }} minutes - poe gmx_mpi mdrun -deffnm {{ name }} -maxh {{ time / 60 }} + poe gmx_mpi mdrun -deffnm {{ name }} -maxh {{ time / 60 }} -resethway -noconfout Options passed to job templates ------------------------------- MDBenchmark passes the following variables to each template: -+----------------+---------------------------------------------------------------------+ -| Value | Description | -+================+=====================================================================+ -| name | Name of the TPR file | -+----------------+---------------------------------------------------------------------+ -| job_name | Job name as specified by the user, if not specified same as name | -+----------------+---------------------------------------------------------------------+ -| gpu | Boolean that is true, if GPUs are requested | -+----------------+---------------------------------------------------------------------+ -| module | Name of the module to load | -+----------------+---------------------------------------------------------------------+ -| n_nodes | Maximal number of nodes to run on | -+----------------+---------------------------------------------------------------------+ -| time | Benchmark run time in minutes | -+----------------+---------------------------------------------------------------------+ -| formatted_time | Run time for the queuing system in human readable format (HH:MM:SS) | -+----------------+---------------------------------------------------------------------+ ++-------------------+---------------------------------------------------------------------+ +| Value | Description | ++===================+=====================================================================+ +| name | Name of the TPR file | ++-------------------+---------------------------------------------------------------------+ +| job_name | Job name as specified by the user, if not specified same as name | ++-------------------+---------------------------------------------------------------------+ +| gpu | Boolean that is true, if GPUs are requested | ++-------------------+---------------------------------------------------------------------+ +| module | Name of the module to load | ++-------------------+---------------------------------------------------------------------+ +| n_nodes | Maximal number of nodes to run on | ++-------------------+---------------------------------------------------------------------+ +| number_of_ranks | The number of MPI ranks | ++-------------------+---------------------------------------------------------------------+ +| number_of_threads | The number of OpenMP threads | ++-------------------+---------------------------------------------------------------------+ +| hyperthreading | Whether to use hyperthreading | ++-------------------+---------------------------------------------------------------------+ +| time | Benchmark run time in minutes | ++-------------------+---------------------------------------------------------------------+ +| formatted_time | Run time for the queuing system in human readable format (HH:MM:SS) | ++-------------------+---------------------------------------------------------------------+ +| multidir | Run multiple simulations on a single node (GROMACS only) | ++-------------------+---------------------------------------------------------------------+ To ensure correct termination of jobs ``formatted_time`` is 5 minutes longer than ``time``. diff --git a/docs/mdengine.rst b/docs/mdengine.rst new file mode 100644 index 00000000..ac43bb6e --- /dev/null +++ b/docs/mdengine.rst @@ -0,0 +1,90 @@ +Adding new MD engines +====================== + +If you need to add a new simulation engine to MDBenchmark that is not +yet supported, follow the steps below. + +Set up local environment +------------------------ + +.. note:: + + Make sure that you have `poetry`_ installed. We use it to provision the local + development environment. + +First, clone the ``git`` repository and enter the directory:: + + git clone https://github.com/bio-phys/MDBenchmark.git + cd MDBenchmark + +.. _poetry: https://github.com/python-poetry/poetry + +Install the development dependencies using ``poetry``:: + + poetry install + +You can now run ``mdbenchmark`` with ``poetry``:: + + poetry run mdbenchmark + +Create the new engine +--------------------- + +You can now add a new MD engine to the ``mdbenchmark/mdengines`` folder. Create +a file with the name of the engine, i.e., ``openmm.py``. + +This new file must implement the functions ``prepare_benchmarks()`` and +``check_input_file_exists()``. For reference, refer to the paragraphs below and +the implementations of other engines. + +``prepare_benchmarks()`` +_________________________ + +This function extracts the filename (``md`` from ``md.tpr``) and copies the files required to run each +benchmark into the correct location. It receives the filename (``name``), i.e., +``md.tpr`` and the relative path (``relative_path``) to the benchmark folder +about to be generated as arguments. + +``check_input_file_exists()`` +_____________________________ + +This function should assert that all files needed to run a benchmark with the +given engine exist. It receives the flename (``name``), i.e., ``md.tpr`` as +argument. + +Add log file parser +------------------- + +MDBenchmark needs to know how to extract the performance from the log file that +is produced by the MD engine. Therefore you need to add your engine to the +`PARSE_ENGINE` dictionary in ``mdbenchmark/mdengines/utils.py``. + +The keys inside each engine dictionary have specific functions: + ++--------------------+-------------------------------------------------------------------+ +| Key | Description | ++====================+===================================================================+ +| performance | Start of the line containing the performance | ++--------------------+-------------------------------------------------------------------+ +| performance_return | A lambda function to extract the performance value | ++--------------------+-------------------------------------------------------------------+ +| ncores | Start of the line containing the number of cores used for the run | ++--------------------+-------------------------------------------------------------------+ +| ncores_return | A lambda function to extract the number of cores | ++--------------------+-------------------------------------------------------------------+ +| analyze | A regular expression for the output file to parse | ++--------------------+-------------------------------------------------------------------+ + +Add cleanup exceptions +---------------------- + +Submitting a benchmark via ``mdbenchmark submit --force`` will delete all +previous results. You need to define all files that need to be kept in the +`FILES_TO_KEEP` dictionary in ``mdbenchmark/mdengines/utils.py``. + +Register the engine +------------------- + +Finally you need to register the engine with MDBenchmark. To do this, import the +engine in ``mdbenchmark/mdengines/__init__.py`` and add it into the +``SUPPORTED_ENGINES`` dictionary. diff --git a/mdbenchmark/__init__.py b/mdbenchmark/__init__.py index fa302003..b0d01991 100644 --- a/mdbenchmark/__init__.py +++ b/mdbenchmark/__init__.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,11 +17,8 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -from mdbenchmark.cli import cli -from mdbenchmark.migrations import mds_to_dtr - -# Check that the Python environment is correctly setup -mds_to_dtr.ensure_correct_environment() +from mdbenchmark.__version__ import VERSION as __version_ # noqa: F401 +from mdbenchmark.cli import cli # noqa: F401 if __name__ == "__main__": cli() diff --git a/mdbenchmark/__main__.py b/mdbenchmark/__main__.py index 5592732a..882f72d2 100644 --- a/mdbenchmark/__main__.py +++ b/mdbenchmark/__main__.py @@ -1,3 +1,22 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see from mdbenchmark.cli import cli if __name__ == "__main__": diff --git a/mdbenchmark/__version__.py b/mdbenchmark/__version__.py new file mode 100644 index 00000000..9f97d832 --- /dev/null +++ b/mdbenchmark/__version__.py @@ -0,0 +1,20 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see +VERSION = "3.0.1" diff --git a/mdbenchmark/cli/__init__.py b/mdbenchmark/cli/__init__.py index 3e03bb48..b4ff44c1 100644 --- a/mdbenchmark/cli/__init__.py +++ b/mdbenchmark/cli/__init__.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2019 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,4 +17,4 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -from mdbenchmark.cli.commands import cli +from mdbenchmark.cli.commands import cli # noqa: F401 diff --git a/mdbenchmark/cli/analyze.py b/mdbenchmark/cli/analyze.py index e7a3ebbd..557f8017 100644 --- a/mdbenchmark/cli/analyze.py +++ b/mdbenchmark/cli/analyze.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2019 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -19,33 +19,32 @@ # along with MDBenchmark. If not, see . import click import datreant as dtr -import matplotlib.pyplot as plt import numpy as np -import pandas as pd -from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas -from matplotlib.figure import Figure from mdbenchmark import console -from mdbenchmark.cli.plot import plot_over_group -from mdbenchmark.mdengines import detect_md_engine, utils -from mdbenchmark.migrations import mds_to_dtr -from mdbenchmark.utils import DataFrameFromBundle, PrintDataFrame, generate_output_name +from mdbenchmark.utils import map_columns, parse_bundle, print_dataframe +from mdbenchmark.versions import VersionFactory -plt.switch_backend("agg") - -def do_analyze(directory, plot, ncores, save_csv): +def do_analyze(directory, save_csv): """Analyze benchmarks.""" - # Migrate from MDBenchmark<2 to MDBenchmark=>2 - mds_to_dtr.migrate_to_datreant(directory) - bundle = dtr.discover(directory) + version = VersionFactory(categories=bundle.categories).version_class + + df = parse_bundle( + bundle, columns=version.analyze_categories, sort_values_by=version.analyze_sort, + ) + + # Remove the versions column from the DataFrame + columns_to_drop = ["version"] + df = df.drop(columns=columns_to_drop) - df = DataFrameFromBundle(bundle) + if save_csv is not None: + if not save_csv.endswith(".csv"): + save_csv = "{}.csv".format(save_csv) + df.to_csv(save_csv, index=False) - if save_csv is not None and not save_csv.endswith(".csv"): - save_csv = "{}.csv".format(save_csv) - df.to_csv(save_csv) + console.success("Successfully benchmark data to {}.", save_csv) # Reformat NaN values nicely into question marks. # move this to the bundle function! @@ -56,25 +55,19 @@ def do_analyze(directory, plot, ncores, save_csv): "Systems marked with question marks have either crashed or " "were not started yet." ) - PrintDataFrame(df) - if plot: - console.warn("'--plot' has been deprecated, use '{}'.", "mdbenchmark plot") - - fig = Figure() - FigureCanvas(fig) - ax = fig.add_subplot(111) - - df = pd.read_csv(save_csv) - if ncores: - console.warn( - "Ignoring your value from '{}' and parsing number of cores from log files.", - "--number-cores/-ncores", + # Warn user that we are going to print more than 50 benchmark results to the console + if df.shape[0] > 50: + if click.confirm( + "We are about to print the results of {} benchmarks to the console. Continue?".format( + click.style(str(df.shape[0]), bold=True) ) - ax = plot_over_group(df, plot_cores=ncores, fit=True, ax=ax) - lgd = ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.175)) + ): + pass + else: + console.error("Exiting.") - fig.tight_layout() - fig.savefig( - "runtimes.pdf", type="pdf", bbox_extra_artists=(lgd,), bbox_inches="tight" - ) + # Print the data to the console + print_dataframe( + df, columns=map_columns(version.category_mapping, version.analyze_printing), + ) diff --git a/mdbenchmark/cli/commands.py b/mdbenchmark/cli/commands.py index a08a7da3..7cd072dd 100644 --- a/mdbenchmark/cli/commands.py +++ b/mdbenchmark/cli/commands.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2019 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -19,16 +19,14 @@ # along with MDBenchmark. If not, see . import click +from mdbenchmark.__version__ import VERSION from mdbenchmark.cli.options import AliasedGroup from mdbenchmark.cli.validators import ( print_known_hosts, - validate_cpu_gpu_flags, validate_hosts, validate_module, validate_name, - validate_number_of_nodes, ) -from mdbenchmark.version import VERSION @click.group(cls=AliasedGroup) @@ -46,28 +44,13 @@ def cli(): default=".", show_default=True, ) -@click.option( - "-p", - "--plot", - is_flag=True, - help="DEPRECATED. Please use 'mdbenchmark plot'.\nGenerate a plot of finished benchmarks.", -) -@click.option( - "--ncores", - "--number-cores", - "ncores", - type=int, - default=None, - help="DEPRECATED. Please use 'mdbenchmark plot'.\nNumber of cores per node. If not given it will be parsed from the benchmarks' log file.", - show_default=True, -) @click.option( "-s", "--save-csv", default=None, help="Filename for the CSV file containing benchmark results.", ) -def analyze(directory, plot, ncores, save_csv): +def analyze(directory, save_csv): """Analyze benchmarks and print the performance results. Benchmarks are searched recursively starting from the directory specified @@ -83,7 +66,7 @@ def analyze(directory, plot, ncores, save_csv): """ from mdbenchmark.cli.analyze import do_analyze - do_analyze(directory=directory, plot=plot, ncores=ncores, save_csv=save_csv) + do_analyze(directory=directory, save_csv=save_csv) @cli.command() @@ -165,6 +148,42 @@ def analyze(directory, plot, ncores, save_csv): @click.option( "-y", "--yes", help="Answer all prompts with yes.", default=False, is_flag=True ) +@click.option( + "--physical-cores", + "physical_cores", + help="Number of physical cores on each node.", + type=int, + default=40, +) +@click.option( + "--logical-cores", + "logical_cores", + help="Number of logical cores on each node.", + type=int, + default=40, +) +@click.option( + "--ranks", + "number_of_ranks", + help="Number of ranks to use per node.", + multiple=True, + type=int, +) +@click.option( + "--hyperthreading", + "enable_hyperthreading", + help="Enable hyperthreading.", + default=False, + is_flag=True, +) +@click.option( + "--multidir", + "multidir", + help="Use gromacs multidir simulation.", + multiple=True, + type=int, + default=(1,), +) def generate( name, cpu, @@ -177,6 +196,11 @@ def generate( skip_validation, job_name, yes, + physical_cores, + logical_cores, + number_of_ranks, + enable_hyperthreading, + multidir, ): """Generate benchmarks for molecular dynamics simulations. @@ -212,6 +236,11 @@ def generate( skip_validation=skip_validation, job_name=job_name, yes=yes, + physical_cores=physical_cores, + logical_cores=logical_cores, + number_of_ranks=number_of_ranks, + enable_hyperthreading=enable_hyperthreading, + multidir=multidir, ) @@ -370,3 +399,26 @@ def submit(directory, force_restart, yes): from mdbenchmark.cli.submit import do_submit do_submit(directory=directory, force_restart=force_restart, yes=yes) + + +@cli.command() +@click.option( + "-d", + "--directory", + help="Path in which to look for benchmarks.", + default=".", + show_default=True, +) +def migrate(directory): + """ + Migrate from old versions of MDBenchmark. + + Currently moves from version 1 to version 2. + """ + + from mdbenchmark.migrations import mds_to_dtr + + mds_to_dtr.ensure_correct_environment() + + # Migrate from MDBenchmark<2 to MDBenchmark=>2 + mds_to_dtr.migrate_to_datreant(directory) diff --git a/mdbenchmark/cli/generate.py b/mdbenchmark/cli/generate.py index 58552db4..6882e6a3 100644 --- a/mdbenchmark/cli/generate.py +++ b/mdbenchmark/cli/generate.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2019 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -20,13 +20,24 @@ import os.path import click -import datreant as dtr import pandas as pd from mdbenchmark import console, mdengines, utils -from mdbenchmark.cli.validators import validate_cpu_gpu_flags, validate_number_of_nodes +from mdbenchmark.cli.validators import ( + validate_cpu_gpu_flags, + validate_number_of_nodes, + validate_number_of_simulations, +) from mdbenchmark.mdengines.utils import write_benchmark -from mdbenchmark.utils import ConsolidateDataFrame, DataFrameFromBundle, PrintDataFrame +from mdbenchmark.models import Processor +from mdbenchmark.utils import ( + consolidate_dataframe, + construct_generate_data, + map_columns, + print_dataframe, + validate_required_files, +) +from mdbenchmark.versions import Version3Categories NAMD_WARNING = ( "NAMD support is experimental. " @@ -48,14 +59,49 @@ def do_generate( skip_validation, job_name, yes, + physical_cores, + logical_cores, + number_of_ranks, + enable_hyperthreading, + multidir, ): """Generate a bunch of benchmarks.""" + + # Instantiate the version we are going to use + benchmark_version = Version3Categories() + # Validate the CPU and GPU flags validate_cpu_gpu_flags(cpu, gpu) # Validate the number of nodes validate_number_of_nodes(min_nodes=min_nodes, max_nodes=max_nodes) + if logical_cores < physical_cores: + console.error( + "The number of logical cores cannot be smaller than the number of physical cores." + ) + + if physical_cores and not logical_cores: + console.warn("Assuming logical_cores = 2 * physical_cores") + logical_cores = 2 * physical_cores + + if physical_cores and logical_cores: + processor = Processor( + physical_cores=physical_cores, logical_cores=logical_cores + ) + else: + processor = Processor() + + # Hyperthreading check + if enable_hyperthreading and not processor.supports_hyperthreading: + console.error("The processor of this machine does not support hyperthreading.") + + if not number_of_ranks: + number_of_ranks = (processor.physical_cores,) + + # Validate number of simulations + validate_number_of_simulations(multidir, min_nodes, max_nodes, number_of_ranks) + # Grab the template name for the host. This should always work because # click does the validation for us template = utils.retrieve_host_template(host) @@ -64,96 +110,91 @@ def do_generate( if any(["namd" in m for m in module]): console.warn(NAMD_WARNING, "--gpu") - module = mdengines.normalize_modules(module, skip_validation) - - # If several modules were given and we only cannot find one of them, we - # continue. - if not module: + # Stop if we cannot find any modules. If the user specified multiple + # modules, we will continue with only the valid ones. + modules = mdengines.normalize_modules(module, skip_validation) + if not modules: console.error("No requested modules available!") - df_overview = pd.DataFrame( - columns=[ - "name", - "job_name", - "base_directory", - "template", - "engine", - "module", - "nodes", - "run time [min]", - "gpu", - "host", - ] + # Check if all needed files exist. Throw an error if they do not. + validate_required_files(name=name, modules=modules) + + # Validate that we can use the number of ranks and threads. + # We can continue, if no ValueError is thrown + for ranks in number_of_ranks: + try: + processor.get_ranks_and_threads( + ranks, with_hyperthreading=enable_hyperthreading + ) + except ValueError as e: + console.error(e) + + # Create all benchmark combinations and put them into a DataFrame + data = construct_generate_data( + name, + job_name, + modules, + host, + template, + cpu, + gpu, + time, + min_nodes, + max_nodes, + processor, + number_of_ranks, + enable_hyperthreading, + multidir, ) + df = pd.DataFrame(data, columns=benchmark_version.generate_categories) - i = 1 - for m in module: - # Here we detect the MD engine (supported: GROMACS and NAMD). - engine = mdengines.detect_md_engine(m) - - # Check if all needed files exist. Throw an error if they do not. - engine.check_input_file_exists(name) - - gpu_cpu = {"cpu": cpu, "gpu": gpu} - for pu, state in sorted(gpu_cpu.items()): - if not state: - continue - - directory = "{}_{}".format(host, m) - gpu = False - gpu_string = "" - if pu == "gpu": - gpu = True - directory += "_gpu" - gpu_string = " with GPUs" - - console.info("Creating benchmark system for {}.", m + gpu_string) - - base_directory = dtr.Tree(directory) - - for nodes in range(min_nodes, max_nodes + 1): - df_overview.loc[i] = [ - name, - job_name, - base_directory, - template, - engine, - m, - nodes, - time, - gpu, - host, - ] - i += 1 - - console.info("{}", "Benchmark Summary:") - - df_short = ConsolidateDataFrame(df_overview) - PrintDataFrame(df_short) + # Consolidate the data by grouping on the number of nodes and print to the + # user as an overview. + consolidated_df = consolidate_dataframe( + df, columns=benchmark_version.consolidate_categories + ) + print_dataframe( + consolidated_df[benchmark_version.generate_printing], + columns=map_columns( + map_dict=benchmark_version.category_mapping, + columns=benchmark_version.generate_printing, + ), + ) + # Save the number of benchmarks for later printing + number_of_benchmarks = df.shape[0] + # Ask the user for confirmation to generate files. + # If the user defined `--yes`, we will skip the confirmation immediately. if yes: - console.info("Generating the above benchmarks.") - elif not click.confirm("The above benchmarks will be generated. Continue?"): - console.error("Exiting. No benchmarks generated.") - - for _, row in df_overview.iterrows(): - relative_path, file_basename = os.path.split(row["name"]) - write_benchmark( - engine=row["engine"], - base_directory=row["base_directory"], - template=row["template"], - nodes=row["nodes"], - gpu=row["gpu"], - module=row["module"], - name=file_basename, - relative_path=relative_path, - job_name=row["job_name"], - host=row["host"], - time=row["run time [min]"], + console.info( + "We will generate {} " + + "{benchmark}.".format( + benchmark="benchmark" if number_of_benchmarks == 1 else "benchmarks" + ), + number_of_benchmarks, ) - - # Provide some output for the user + elif not click.confirm( + "We will generate {} benchmarks. Continue?".format(number_of_benchmarks) + ): + console.error("Exiting. No benchmarks were generated.") + + # Generate the benchmarks + with click.progressbar( + df.iterrows(), + length=number_of_benchmarks, + show_pos=True, + label="Generating benchmarks", + ) as bar: + for _, row in bar: + relative_path, file_basename = os.path.split(row["name"]) + mappings = benchmark_version.generate_mapping + kwargs = {"name": file_basename, "relative_path": relative_path} + for key, value in mappings.items(): + kwargs[value] = row[key] + + write_benchmark(**kwargs) + + # Finish up by telling the user how to submit the benchmarks console.info( - "Finished generating all benchmarks.\n" "You can now submit the jobs with {}.", - "mdbenchmark submit", + "Finished! You can submit the jobs with {}.", "mdbenchmark submit", ) diff --git a/mdbenchmark/cli/options.py b/mdbenchmark/cli/options.py index 631e2a25..349acfd6 100644 --- a/mdbenchmark/cli/options.py +++ b/mdbenchmark/cli/options.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2019 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify diff --git a/mdbenchmark/cli/plot.py b/mdbenchmark/cli/plot.py index 7b3cc578..b138703f 100644 --- a/mdbenchmark/cli/plot.py +++ b/mdbenchmark/cli/plot.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2019 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -21,12 +21,15 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd -from matplotlib import rcParams as mpl_rcParams +from matplotlib import rcParams from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure from mdbenchmark import console -from mdbenchmark.utils import calc_slope_intercept, generate_output_name, lin_func +from mdbenchmark.math import calc_slope_intercept, lin_func +from mdbenchmark.mdengines import SUPPORTED_ENGINES +from mdbenchmark.utils import generate_output_name +from mdbenchmark.versions import VersionFactory plt.switch_backend("agg") @@ -56,64 +59,98 @@ def get_xsteps(size, min_x, plot_cores, xtick_step): return step -def plot_projection(df, selection, color, ax=None): - if ax is None: - ax = plt.gca() - slope, intercept = calc_slope_intercept( - (df[selection].iloc[0], df["ns/day"].iloc[0]), - (df[selection].iloc[1], df["ns/day"].iloc[1]), - ) - xstep = df[selection].iloc[1] - df[selection].iloc[0] - xmax = df[selection].iloc[-1] + xstep - x = df[selection] - x = pd.concat([pd.DataFrame({0: [0]}), x, pd.DataFrame({0: [xmax]})]) - # avoid a label and use values instead of pd.Series - ax.plot(x, lin_func(x.values, slope, intercept), ls="--", color=color, alpha=0.5) - return ax +def plot_projection(df, selection, color, performance_column, ax=None): + # Grab x and y values + xs = df[selection].iloc[0:2].values.tolist() + ys = df[performance_column].iloc[0:2].values.tolist() + # Calculate slope and intercept + p1, p2 = list(zip(xs, ys)) + slope, intercept = calc_slope_intercept(p1, p2) -def plot_line(df, selection, label, fit, ax=None): - if ax is None: - ax = plt.gca() + # Plot the projection + xstep = np.diff(xs) + xmax = (df[selection].iloc[-1] + xstep).tolist() + xs = np.array([0] + xs + xmax) + ax.plot(xs, lin_func(xs, slope, intercept), ls="--", color=color, alpha=0.5) + + return ax - p = ax.plot(selection, "ns/day", ".-", data=df, ms="10", label=label) + +def plot_line(df, selection, label, fit, performance_column="performance", ax=None): + mask = np.isfinite(df[performance_column]) + p = ax.plot( + df[selection][mask], + df[performance_column][mask], + ls="solid", + marker="o", + ms="10", + label=label, + ) color = p[0].get_color() if fit and (len(df[selection]) > 1): - plot_projection(df=df, selection=selection, color=color, ax=ax) + plot_projection( + df=df, + selection=selection, + color=color, + performance_column=performance_column, + ax=ax, + ) return ax -def plot_over_group(df, plot_cores, fit, ax=None): - # plot all lines +def plot_over_group(df, plot_cores, fit, performance_column, ax=None): selection = "ncores" if plot_cores else "nodes" + benchmark_version = VersionFactory( + version="3" if "use_gpu" in df.columns else "2" + ).version_class + + for key, group in df.groupby(benchmark_version.consolidate_categories): + # Do not try to plot groups without performance values + if group[performance_column].isnull().all(): + continue + + if benchmark_version.version == "3": + module, template, gpus, ranks, hyperthreading, multidir = key + threads = group.number_of_threads.iloc[0] + else: + gpus, module, template = key + + label = "{template} - {module}, {node_type}".format( + template=template, + module=module, + node_type="mixed CPU-GPU" if gpus else "CPU-only", + ) - groupby = ["gpu", "module", "host"] - gb = df.groupby(groupby) - for key, df in gb: - template = key[2] - module = key[1] - pu = "GPU" if key[0] else "CPU" + # Add ranks, threads and multdir information to label + if benchmark_version.version == "3": + label += " (ranks: {ranks}, threads: {threads}{ht}, nsims: {nsims})".format( + ranks=ranks, + threads=threads, + ht=" [HT]" if hyperthreading else "", + nsims=multidir, + ) - label = "{template} - {module} on {pu}s".format( - template=template, module=module, pu=pu + plot_line( + df=group, + selection=selection, + label=label, + fit=fit, + performance_column=performance_column, + ax=ax, ) - plot_line(df=df, selection=selection, ax=ax, fit=fit, label=label) - # style axes - xlabel = "cores" if plot_cores else "nodes" - ax.set_xlabel("Number of {}".format(xlabel)) - ax.set_ylabel("Performance [ns/day]") + selection_label = "cores" if plot_cores else "nodes" + ax.set_xlabel("Number of {selection}".format(selection=selection_label)) + ax.set_ylabel("Performance (ns/day)") - # here I return the figure as well as the legend return ax def filter_dataframe_for_plotting(df, host_name, module_name, gpu, cpu): - # gpu/cpu can be plotted together or separately if gpu and cpu: - # if no flags are given by the user or both are set everything is plotted console.info("Plotting GPU and CPU data.") elif gpu and not cpu: df = df[df.gpu] @@ -148,7 +185,7 @@ def filter_dataframe_for_plotting(df, host_name, module_name, gpu, cpu): ) for module in module_name: - if module in ["gromacs", "namd"]: + if module in SUPPORTED_ENGINES.keys(): console.info("Plotting all modules for engine '{}'.", module) elif module in df["module"].tolist(): console.info("Plotting module '{}'.", module) @@ -159,8 +196,7 @@ def filter_dataframe_for_plotting(df, host_name, module_name, gpu, cpu): if not module_name: console.info("Plotting all modules in your input data.") - # this should work but we need to check before whether any of the entered - # names are faulty/don't exist + if module_name: df = df[df["module"].str.contains("|".join(module_name))] @@ -194,15 +230,26 @@ def do_plot( "You must specify at least one CSV file.", param_hint='"--csv"' ) - df = pd.concat([pd.read_csv(c, index_col=0) for c in csv]).dropna() + df = pd.concat([pd.read_csv(c) for c in csv]) + performance_column = "performance" if "performance" in df.columns else "ns/day" df = filter_dataframe_for_plotting(df, template, module, gpu, cpu) - mpl_rcParams["font.size"] = font_size + # Exit if there is no performance data + if df[performance_column].isnull().all(): + console.error("There is no performance data to plot.") + + rcParams["font.size"] = font_size fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) - ax = plot_over_group(df=df, plot_cores=plot_cores, fit=fit, ax=ax) + ax = plot_over_group( + df=df, + plot_cores=plot_cores, + fit=fit, + performance_column=performance_column, + ax=ax, + ) # Update xticks selection = "ncores" if plot_cores else "nodes" @@ -217,8 +264,10 @@ def do_plot( ax.set_xlim(min_x - xdiff, max_x + xdiff) # Update yticks - max_y = df["ns/day"].max() or 50 - yticks_steps = ((max_y + 1) / 10).astype(int) + max_y = df[performance_column].max() or 50 + yticks_steps = int(((max_y + 1) // 10)) + if yticks_steps == 0: + yticks_steps = 1 yticks = np.arange(0, max_y + (max_y * 0.25), yticks_steps) ax.set_yticks(yticks) ax.set_ylim(0, max_y + (max_y * 0.25)) @@ -227,7 +276,7 @@ def do_plot( if watermark: ax.text(0.025, 0.925, "MDBenchmark", transform=ax.transAxes, alpha=0.3) - lgd = ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.175)) + legend = ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.175)) plt.tight_layout() if output_name is None and len(csv) == 1: @@ -237,14 +286,12 @@ def do_plot( output_name = generate_output_name(output_format) elif not output_name.endswith(".{}".format(output_format)): output_name = "{}.{}".format(output_name, output_format) - # tight alone does not consider the legend if it is outside the plot. - # therefore i add it manually as extra artist. This way we don't get problems - # with the variability of individual lines which are to be plotted + fig.savefig( output_name, type=output_format, - bbox_extra_artists=(lgd,), + bbox_extra_artists=(legend,), bbox_inches="tight", dpi=dpi, ) - console.info("Your file was saved as '{}' in the working directory.", output_name) + console.info("The plot was saved as '{}'.", output_name) diff --git a/mdbenchmark/cli/submit.py b/mdbenchmark/cli/submit.py index f53d874e..310b0c68 100644 --- a/mdbenchmark/cli/submit.py +++ b/mdbenchmark/cli/submit.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2019 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -24,13 +24,17 @@ import click import datreant as dtr import numpy as np -import pandas as pd from mdbenchmark import console from mdbenchmark.mdengines import detect_md_engine from mdbenchmark.mdengines.utils import cleanup_before_restart -from mdbenchmark.migrations import mds_to_dtr -from mdbenchmark.utils import ConsolidateDataFrame, DataFrameFromBundle, PrintDataFrame +from mdbenchmark.utils import ( + consolidate_dataframe, + map_columns, + parse_bundle, + print_dataframe, +) +from mdbenchmark.versions import VersionFactory PATHS = os.environ["PATH"].split(":") BATCH_SYSTEMS = {"slurm": "sbatch", "sge": "qsub", "Loadleveler": "llsubmit"} @@ -49,9 +53,6 @@ def get_batch_command(): def do_submit(directory, force_restart, yes): """Submit the benchmarks.""" - # Migrate from MDBenchmark<2 to MDBenchmark=>2 - mds_to_dtr.migrate_to_datreant(directory) - bundle = dtr.discover(directory) # Exit if no bundles were found in the current directory. @@ -76,14 +77,35 @@ def do_submit(directory, force_restart, yes): if not force_restart: bundles_to_start = bundles_not_yet_started - df = DataFrameFromBundle(bundles_to_start) + benchmark_version = VersionFactory( + categories=bundles_to_start.categories + ).version_class + + df = parse_bundle( + bundles_to_start, + columns=benchmark_version.submit_categories, + sort_values_by=benchmark_version.analyze_sort, + discard_performance=True, + ) # Reformat NaN values nicely into question marks. df_to_print = df.replace(np.nan, "?") - df_to_print = df.drop(columns=["ns/day", "ncores"]) - console.info("{}", "Benchmark Summary:") - df_short = ConsolidateDataFrame(df_to_print) - PrintDataFrame(df_short) + + columns_to_drop = ["ncores", "version"] + df_to_print = df.drop(columns=columns_to_drop) + + # Consolidate the data by grouping on the number of nodes and print to the + # user as an overview. + consolidated_df = consolidate_dataframe( + df_to_print, columns=benchmark_version.consolidate_categories + ) + print_dataframe( + consolidated_df, + columns=map_columns( + map_dict=benchmark_version.category_mapping, + columns=benchmark_version.generate_printing[1:], + ), + ) # Ask the user to confirm whether they want to submit the benchmarks if yes: diff --git a/mdbenchmark/cli/validators.py b/mdbenchmark/cli/validators.py index 5ef70837..40b86585 100644 --- a/mdbenchmark/cli/validators.py +++ b/mdbenchmark/cli/validators.py @@ -1,8 +1,38 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see import click +import numpy as np from mdbenchmark import console, utils +def validate_cores(ctx, param, *args, **kwargs): + """Validate that we are given a positive integer bigger than 0.""" + for option, value in kwargs.items(): + if value is None or not isinstance(int, value) or value < 1: + raise click.BadParameter( + "Please specify the number of {option} cores.".format(option=option), + param_hint='"--{option}"', + ) + + def validate_name(ctx, param, name=None): """Validate that we are given a name argument.""" if name is None: @@ -45,6 +75,21 @@ def validate_number_of_nodes(min_nodes, max_nodes): ) +def validate_number_of_simulations(nsims, min_nodes, max_nodes, nranks): + """validate that the number of simulations is an integer multiple of + number of nodes times number of ranks per node. + """ + for nn in range(min_nodes, max_nodes + 1): + nranks = np.array([nn * ri for ri in nranks]) + for nsim in nsims: + if np.any(nranks % nsim): + raise click.BadParameter( + "The total number of ranks must be an integer multiple of" + + " the number of simulations", + param_hint='"--multidir" / "--ranks" / "--min-nodes" / "--max-nodes"', + ) + + def print_known_hosts(ctx, param, value): """Callback to print all available hosts to the user.""" if not value or ctx.resilient_parsing: diff --git a/mdbenchmark/console.py b/mdbenchmark/console.py index a1997c55..00547de5 100644 --- a/mdbenchmark/console.py +++ b/mdbenchmark/console.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,8 +17,6 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -import six - import sys import click @@ -34,6 +32,7 @@ def console_wrapper( bg=None, underline=None, blink=None, + newlines=False, **kwargs ): """Wrapper to consolidate all click.echo() calls. @@ -84,11 +83,15 @@ def console_wrapper( if kwargs: kwargs = { k: click.style(str(v), bold=bold, fg=fg, bg=bg, underline=underline) - for k, v in six.iteritems(kwargs) + for k, v in kwargs.items() } try: + if newlines: + click.echo("") click.echo(message.format(*args, **kwargs), file=filehandler) + if newlines: + click.echo("") except IndexError: raise ValueError( "Number of placeholders do not correspond to the number of curly brackets " @@ -108,6 +111,12 @@ def warn(message, *args, **kwargs): console_wrapper(message, prefix=prefix, args=args, **kwargs) +def success(message, *args, **kwargs): + """Output a success message and exit the script.""" + console_wrapper(message, args=args, **kwargs) + sys.exit(0) + + def error(message, *args, **kwargs): """Output an error to the users console and stop execution of the script.""" prefix = click.style("ERROR", fg="red", bold=True) diff --git a/mdbenchmark/ext/__init__.py b/mdbenchmark/ext/__init__.py index e69de29b..0ef20097 100644 --- a/mdbenchmark/ext/__init__.py +++ b/mdbenchmark/ext/__init__.py @@ -0,0 +1,19 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see . diff --git a/mdbenchmark/ext/cadishi.py b/mdbenchmark/ext/cadishi.py deleted file mode 100644 index ba131123..00000000 --- a/mdbenchmark/ext/cadishi.py +++ /dev/null @@ -1,30 +0,0 @@ -# Functions from this file are copied from cadishi. See copyright notice below. -# -# Copyright (c) Klaus Reuter, Juergen Koefinger -# See the file AUTHORS.rst for the full list of contributors. -# -# Released under the MIT License, see the file LICENSE.txt. - - -def _cat_proc_cpuinfo_grep_query_sort_uniq(query): - """Determine the number of unique lines in /proc/cpuinfo - - Parameters - ---------- - string : query - string the lines to be searched for shall begin with - - Returns - ------- - set - unique lines in /proc/cpuinfo that begin with query - - May throw an IOError exception in case /proc/cpuinfo does not exist. - """ - items_seen = set() - with open("/proc/cpuinfo") as fp: - for line_raw in fp: - if line_raw.startswith(query): - line = line_raw.replace("\t", "").strip("\n") - items_seen.add(line) - return items_seen diff --git a/mdbenchmark/math.py b/mdbenchmark/math.py new file mode 100644 index 00000000..f4cf635e --- /dev/null +++ b/mdbenchmark/math.py @@ -0,0 +1,33 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see +import numpy as np + + +def lin_func(x, m, b): + return m * x + b + + +def calc_slope_intercept(p1, p2): + p1 = np.asarray(p1) + p2 = np.asarray(p2) + diff = p1 - p2 + slope = diff[1] / diff[0] + intercept = p1[1] - (p1[0] * slope) + return np.hstack([slope, intercept]) diff --git a/mdbenchmark/mdengines/__init__.py b/mdbenchmark/mdengines/__init__.py index 64960fa8..cdc876c7 100644 --- a/mdbenchmark/mdengines/__init__.py +++ b/mdbenchmark/mdengines/__init__.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,8 +17,6 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -import six - import os from collections import defaultdict @@ -40,7 +38,7 @@ def detect_md_engine(modulename): supported. """ - for name, engine in six.iteritems(SUPPORTED_ENGINES): + for name, engine in SUPPORTED_ENGINES.items(): if name in modulename.lower(): return engine @@ -89,7 +87,7 @@ def get_available_modules(): # Go through the directory structure and grab all version of modules that we support. for paths in MODULE_PATHS.split(":"): - for path, subdirs, files in os.walk(paths): + for path, _, files in os.walk(paths): for mdengine in SUPPORTED_ENGINES: if mdengine in path: for name in files: @@ -120,7 +118,9 @@ def normalize_modules(modules, skip_validation): if detect_md_engine(engine_name) is None: console.error( "There is currently no support for '{}'. " - "Supported MD engines are: gromacs, namd.", + + "Supported MD engines are: {}.".format( + ", ".join(sorted(SUPPORTED_ENGINES.keys())) + ), engine_name, ) diff --git a/mdbenchmark/mdengines/gromacs.py b/mdbenchmark/mdengines/gromacs.py index 86d019fb..1d607178 100644 --- a/mdbenchmark/mdengines/gromacs.py +++ b/mdbenchmark/mdengines/gromacs.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,22 +17,19 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -from __future__ import absolute_import - import os -import re -from glob import glob +import string from shutil import copyfile -import numpy as np - from mdbenchmark import console NAME = "gromacs" +LOWERCASE_LETTERS = string.ascii_lowercase + def prepare_benchmark(name, relative_path, *args, **kwargs): - sim = kwargs["sim"] + benchmark = kwargs["benchmark"] full_filename = name + ".tpr" if name.endswith(".tpr"): @@ -41,11 +38,27 @@ def prepare_benchmark(name, relative_path, *args, **kwargs): filepath = os.path.join(relative_path, full_filename) - copyfile(filepath, sim[full_filename].relpath) + if kwargs["multidir"] == 1: + copyfile(filepath, benchmark[full_filename].relpath) + else: + for i in range(kwargs["multidir"]): + subdir = benchmark[LOWERCASE_LETTERS[i] + "/" + full_filename].make() + copyfile(filepath, subdir.relpath) return name +def prepare_multidir(multidir): + multidir_string = "" + + if multidir != 1: + multidir_string = "-multidir" + for i in range(multidir): + multidir_string += " " + LOWERCASE_LETTERS[i] + + return multidir_string + + def check_input_file_exists(name): """Check if the TPR file exists.""" fn = name diff --git a/mdbenchmark/mdengines/namd.py b/mdbenchmark/mdengines/namd.py index 22d57b71..a074fa8e 100644 --- a/mdbenchmark/mdengines/namd.py +++ b/mdbenchmark/mdengines/namd.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -18,19 +18,18 @@ # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . import os -import re -from glob import glob from shutil import copyfile -import numpy as np - from mdbenchmark import console NAME = "namd" def prepare_benchmark(name, relative_path, *args, **kwargs): - sim = kwargs["sim"] + benchmark = kwargs["benchmark"] + + if not kwargs["multidir"] == 1: + console.error("The NAMD-engine currently only supports '--multidir 1'") if name.endswith(".namd"): name = name[:-5] @@ -47,13 +46,17 @@ def prepare_benchmark(name, relative_path, *args, **kwargs): analyze_namd_file(fh) fh.seek(0) - copyfile(namd_relpath, sim[namd].relpath) - copyfile(psf_relpath, sim[psf].relpath) - copyfile(pdb_relpath, sim[pdb].relpath) + copyfile(namd_relpath, benchmark[namd].relpath) + copyfile(psf_relpath, benchmark[psf].relpath) + copyfile(pdb_relpath, benchmark[pdb].relpath) return name +def prepare_multidir(multidir): + return None + + def analyze_namd_file(fh): """ Check whether the NAMD config file has any relative imports or variables """ diff --git a/mdbenchmark/mdengines/utils.py b/mdbenchmark/mdengines/utils.py index b66f786b..7a940f5f 100644 --- a/mdbenchmark/mdengines/utils.py +++ b/mdbenchmark/mdengines/utils.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -20,14 +20,10 @@ import os import re from glob import glob -from shutil import copyfile import datreant as dtr import numpy as np -from mdbenchmark import console -from mdbenchmark.mdengines.namd import analyze_namd_file - FILES_TO_KEEP = { "gromacs": [".*/bench.job", ".*.tpr", ".*.mdp"], "namd": [".*/bench.job", ".*.namd", ".*.psf", ".*.pdb"], @@ -39,7 +35,7 @@ "performance_return": lambda line: float(line.split()[1]), "ncores": "Running on", "ncores_return": lambda line: int(line.split()[6]), - "analyze": "[!#]*log*", + "analyze": "**/[!#]*log*", }, "namd": { "performance": "Benchmark time", @@ -95,39 +91,71 @@ def parse_ncores(engine, fh): return np.nan -def analyze_run(engine, sim): +def analyze_benchmark(engine, benchmark): """ Analyze performance data from a simulation run with any MD engine. """ - ns_day = np.nan + performance = np.nan ncores = np.nan + ranks = np.nan + threads = np.nan + hyperthreading = np.nan + module = None + multidir = np.nan # search all output files - output_files = glob(os.path.join(sim.relpath, PARSE_ENGINE[engine.NAME]["analyze"])) + output_files = glob( + os.path.join(benchmark.relpath, PARSE_ENGINE[engine.NAME]["analyze"]), + recursive=True, + ) if output_files: - with open(output_files[0]) as fh: - ns_day = parse_ns_day(engine, fh) - fh.seek(0) - ncores = parse_ncores(engine, fh) - - # Backward compatibility to benchmark systems created with older versions - # of MDBenchmark - if "time" not in sim.categories: - sim.categories["time"] = 0 - if "module" in sim.categories: - module = sim.categories["module"] - else: - module = sim.categories["version"] - - return ( + performance = [] + ncores = [] + for f in output_files: + with open(f) as fh: + performance.append(parse_ns_day(engine, fh)) + fh.seek(0) + ncores.append(parse_ncores(engine, fh)) + performance = np.sum(performance) + ncores = ncores[0] + + if "time" not in benchmark.categories: + benchmark.categories["time"] = 0 + + if "multidir" in benchmark.categories: + multidir = benchmark.categories["multidir"] + + # Backwards compatibility to version <2 + if "module" not in benchmark.categories and "version" in benchmark.categories: + module = benchmark.categories["version"] + + # Version >=2,<=3 + if "module" in benchmark.categories: + module = benchmark.categories["module"] + + # Version >=3 + if ( + "version" in benchmark.categories + and isinstance(benchmark.categories["version"], int) + and benchmark.categories["version"] >= 3 + ): + ranks = benchmark.categories["ranks"] + threads = benchmark.categories["threads"] + hyperthreading = benchmark.categories["hyperthreading"] + + return [ module, - sim.categories["nodes"], - ns_day, - sim.categories["time"], - sim.categories["gpu"], - sim.categories["host"], + benchmark.categories["nodes"], + performance, + benchmark.categories["time"], + benchmark.categories["gpu"], + benchmark.categories["host"], ncores, - ) + ranks, + threads, + hyperthreading, + multidir, + ] def cleanup_before_restart(engine, sim): @@ -162,18 +190,34 @@ def write_benchmark( job_name, host, time, + number_of_ranks, + number_of_threads, + hyperthreading, + multidir, ): - """Generate a benchmark folder with the respective Sim object.""" + """Generate a benchmark folder with the respective Benchmark object.""" # Create the `dtr.Treant` object - sim = dtr.Treant(base_directory["{}/".format(nodes)]) + hyperthreading_string = "wht" if hyperthreading else "woht" + directory = base_directory[ + "n{nodes:03d}_r{ranks:02d}_t{threads:02d}_{ht}_nsim{nsim:01d}/".format( + nodes=nodes, + ranks=number_of_ranks, + threads=number_of_threads, + ht=hyperthreading_string, + nsim=multidir, + ) + ] + benchmark = dtr.Treant(directory) # Do MD engine specific things. Here we also format the name. - name = engine.prepare_benchmark(name=name, relative_path=relative_path, sim=sim) + name = engine.prepare_benchmark( + name=name, relative_path=relative_path, benchmark=benchmark, multidir=multidir + ) if job_name is None: job_name = name - # Add categories to the `Sim` object - sim.categories = { + # Add categories as metadata + benchmark.categories = { "module": module, "gpu": gpu, "nodes": nodes, @@ -181,12 +225,20 @@ def write_benchmark( "time": time, "name": name, "started": False, + "ranks": number_of_ranks, + "threads": number_of_threads, + "hyperthreading": hyperthreading, + "version": 3, + "multidir": multidir, } # Add some time buffer to the requested time. Otherwise the queuing system # kills the job before the benchmark is finished formatted_time = "{:02d}:{:02d}:00".format(*divmod(time + 5, 60)) + # get engine specific multidir template replacement + multidir_string = engine.prepare_multidir(multidir) + # Create benchmark job script script = template.render( name=name, @@ -197,8 +249,12 @@ def write_benchmark( n_nodes=nodes, time=time, formatted_time=formatted_time, + number_of_ranks=number_of_ranks, + number_of_threads=number_of_threads, + hyperthreading=hyperthreading, + multidir=multidir_string, ) # Write the actual job script that is going to be submitted to the cluster - with open(sim["bench.job"].relpath, "w") as fh: + with open(benchmark["bench.job"].relpath, "w") as fh: fh.write(script) diff --git a/mdbenchmark/migrations/__init__.py b/mdbenchmark/migrations/__init__.py index e69de29b..5db5015c 100644 --- a/mdbenchmark/migrations/__init__.py +++ b/mdbenchmark/migrations/__init__.py @@ -0,0 +1,19 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see diff --git a/mdbenchmark/migrations/mds_to_dtr.py b/mdbenchmark/migrations/mds_to_dtr.py index f7c1bec8..0b6fb5bc 100644 --- a/mdbenchmark/migrations/mds_to_dtr.py +++ b/mdbenchmark/migrations/mds_to_dtr.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -40,19 +40,19 @@ def ensure_correct_environment(): import datreant as dtr try: - version = dtr.__version__ # noqa: F841 + version = dtr.__version__ # noqa: F401,F841 except AttributeError: old_packages.append("datreant.core") try: - import datreant.data + import datreant.data # noqa: F401 old_packages.append("datreant.data") except ImportError: pass try: - import mdsynthesis + import mdsynthesis # noqa: F401 old_packages.append("mdsynthesis") except ImportError: @@ -65,7 +65,7 @@ def ensure_correct_environment(): def search_mdsynthesis_sim_files(folder): """Search for `Sim.*.json` files generated by `mdsynthesis`.""" bundles = [] - for root, dirnames, filenames in os.walk(folder): + for root, _, filenames in os.walk(folder): for filename in fnmatch.filter(filenames, "Sim*"): bundles.append(os.path.join(root, filename)) diff --git a/mdbenchmark/models.py b/mdbenchmark/models.py new file mode 100644 index 00000000..1e7a294d --- /dev/null +++ b/mdbenchmark/models.py @@ -0,0 +1,95 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see +from psutil import cpu_count + + +class Processor: + """Representation of all CPUs of a single node. + + We use the singular "processor", instead of "processors", even if a node has + multiple CPU sockets. + """ + + def __init__(self, physical_cores=0, logical_cores=0): + self.physical_cores = physical_cores + self.logical_cores = logical_cores + + if self.physical_cores == 0 or self.logical_cores == 0: + self._set_number_of_available_cores() + + def __repr__(self): + return f"" + + def _set_number_of_available_cores(self): + """Deterine the number of physical and logical cores with `psutil.cpu_count()`.""" + self.physical_cores = cpu_count(logical=False) + self.logical_cores = cpu_count(logical=True) + + @property + def _get_number_of_available_cores(self): + """Return the number of available cores. + + If hyperthreading is supported, this returns the number of logical cores. In the other case + it is the number of physical cores.""" + return ( + self.logical_cores if self.supports_hyperthreading else self.physical_cores + ) + + @property + def supports_hyperthreading(self): + """Return True if the CPU supports hyperthreading.""" + return self.physical_cores == self.logical_cores // 2 + + def number_of_ranks_is_valid(self, number_of_ranks): + """Validate the input for the number of ranks.""" + # Number of ranks must be equal or bigger than 1. + if number_of_ranks < 1: + return False + + remainder = self._get_number_of_available_cores / number_of_ranks + + # The remainder must be equal or bigger than 1. + if remainder < 1: + return False + + # All cores are used up by the ranks. This is not a valid setting with + # hyperthreading. + if self.supports_hyperthreading and remainder < 2: + return False + + return True + + def get_ranks_and_threads(self, number_of_ranks, with_hyperthreading=True): + """Compute the number of OpenMP threads that we can use with number_of_ranks.""" + if not self.number_of_ranks_is_valid(number_of_ranks): + raise ValueError( + f"The number of ranks ({number_of_ranks}) is not a valid value on this system!" + ) + + number_of_threads = self._get_number_of_available_cores // number_of_ranks + + if ( + not with_hyperthreading + and number_of_threads != 1 + and not self.physical_cores == self.logical_cores + ): + number_of_threads //= 2 + + return (number_of_ranks, number_of_threads) diff --git a/mdbenchmark/templates/cobra b/mdbenchmark/templates/cobra index a0e6b8b7..21bbbd1d 100644 --- a/mdbenchmark/templates/cobra +++ b/mdbenchmark/templates/cobra @@ -7,30 +7,22 @@ # Job Name: #SBATCH -J {{ job_name }} # -# Queue (Partition): {%- if gpu %} -#SBATCH --partition=gpu +# Request two RTX5000 per node #SBATCH --constraint="gpu" -#SBATCH --gres=gpu:2 -{%- elif (time <= 30) and (n_nodes <= 32) %} -#SBATCH --partition=express -{%- elif (n_nodes <= 32) %} -#SBATCH --partition=medium -{%- elif (n_nodes <= 64) %} -#SBATCH --partition=n0064 -{%- elif (n_nodes <= 128) %} -#SBATCH --partition=n0128 -{%- elif (n_nodes <= 265) %} -#SBATCH --partition=n0265 -{%- elif (n_nodes <= 512) %} -#SBATCH --partition=n0512 -{%- else %} -#SBATCH --partition=n0620 +#SBATCH --gres=gpu:rtx5000:2 {%- endif %} # -# Number of nodes and MPI tasks per node: +# Request {{ n_nodes }} node(s) #SBATCH --nodes={{ n_nodes }} -#SBATCH --ntasks-per-node=40 +# Set the number of tasks per node (=MPI ranks) +#SBATCH --ntasks-per-node={{ number_of_ranks }} +# Set the number of threads per rank (=OpenMP threads) +#SBATCH --cpus-per-task={{ number_of_threads }} +{% if hyperthreading %} +# Enable hyperthreading +#SBATCH --ntasks-per-core=2 +{%- endif %} # Wall clock limit: #SBATCH --time={{ formatted_time }} @@ -40,9 +32,17 @@ module load impi module load cuda module load {{ module }} +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +{% if hyperthreading %} +export OMP_PLACES=threads +export SLURM_HINT=multithread +{%- else %} +export OMP_PLACES=cores +{%- endif %} + # Run {{ module }} for {{ time }} minutes {%- if mdengine == "gromacs" %} -srun gmx_mpi mdrun -v -maxh {{ time / 60 }} -deffnm {{ name }} +srun gmx_mpi mdrun {{ multidir }} -v -ntomp $OMP_NUM_THREADS -maxh {{ time / 60 }} -resethway -deffnm {{ name }} -noconfout {%- elif mdengine == "namd" %} srun namd2 {{ name }}.namd {%- endif %} diff --git a/mdbenchmark/templates/draco b/mdbenchmark/templates/draco index 6814eb01..2c0d7b58 100644 --- a/mdbenchmark/templates/draco +++ b/mdbenchmark/templates/draco @@ -10,9 +10,8 @@ # Queue (Partition): {%- if gpu %} #SBATCH --partition=gpu -#SBATCH --constraint='gpu' +#SBATCH --constraint="gpu" #SBATCH --gres=gpu:2 - {%- else %} {%- if time is lessthan 30 or time is equalto 30 %} #SBATCH --partition=express @@ -23,9 +22,16 @@ {%- endif %} {%- endif %} # -# Number of nodes and MPI tasks per node: +# Request {{ n_nodes }} node(s) #SBATCH --nodes={{ n_nodes }} -#SBATCH --ntasks-per-node=32 +# Set the number of tasks per node (=MPI ranks) +#SBATCH --ntasks-per-node={{ number_of_ranks }} +# Set the number of threads per rank (=OpenMP threads) +#SBATCH --cpus-per-task={{ number_of_threads }} +{% if hyperthreading %} +# Enable hyperthreading +#SBATCH --ntasks-per-core=2 +{%- endif %} # Wall clock limit: #SBATCH --time={{ formatted_time }} @@ -35,9 +41,17 @@ module load impi module load cuda module load {{ module }} +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK +{% if hyperthreading %} +export OMP_PLACES=threads +export SLURM_HINT=multithread +{%- else %} +export OMP_PLACES=cores +{%- endif %} + # Run {{ module }} for {{ time }} minutes {%- if mdengine == "gromacs" %} -srun gmx_mpi mdrun -v -maxh {{ time / 60 }} -deffnm {{ name }} +srun gmx_mpi mdrun -v -ntomp $OMP_NUM_THREADS -maxh {{ time / 60 }} -resethway -deffnm {{ name }} -noconfout {%- elif mdengine == "namd" %} srun namd2 {{ name }}.namd {%- endif %} diff --git a/mdbenchmark/tests/__init__.py b/mdbenchmark/tests/__init__.py index e69de29b..5db5015c 100644 --- a/mdbenchmark/tests/__init__.py +++ b/mdbenchmark/tests/__init__.py @@ -0,0 +1,19 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see diff --git a/mdbenchmark/testing.py b/mdbenchmark/tests/conftest.py similarity index 94% rename from mdbenchmark/testing.py rename to mdbenchmark/tests/conftest.py index 3dccaca0..d31bba64 100644 --- a/mdbenchmark/testing.py +++ b/mdbenchmark/tests/conftest.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,12 +17,12 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -from __future__ import absolute_import - from os.path import exists, isfile, join as pjoin import pytest +from mdbenchmark.ext.click_test import cli_runner # noqa: F401 + class TestDataDir(object): """ diff --git a/mdbenchmark/tests/data/analyze-files-gromacs-consolidated.csv b/mdbenchmark/tests/data/analyze-files-gromacs-consolidated.csv index 19fac903..80ca1a4e 100644 --- a/mdbenchmark/tests/data/analyze-files-gromacs-consolidated.csv +++ b/mdbenchmark/tests/data/analyze-files-gromacs-consolidated.csv @@ -1,2 +1,2 @@ -,module,nodes,run time [min],host,gpu -0,gromacs/2016.3,1-5,15,draco,False +module,nodes,time,gpu,host,ncores,number_of_ranks,number_of_threads,hyperthreading +gromacs/2016.3,1-5,15,False,draco,32,,,, diff --git a/mdbenchmark/tests/data/analyze-files-gromacs-prompt.csv b/mdbenchmark/tests/data/analyze-files-gromacs-prompt.csv deleted file mode 100644 index 3cc1c729..00000000 --- a/mdbenchmark/tests/data/analyze-files-gromacs-prompt.csv +++ /dev/null @@ -1,2 +0,0 @@ -,module,nodes,run time [min],host,gpu -0,gromacs/2016.3,1,15,draco,False diff --git a/mdbenchmark/tests/data/analyze-files-gromacs.csv b/mdbenchmark/tests/data/analyze-files-gromacs.csv index 8b7b9da5..c80aa354 100644 --- a/mdbenchmark/tests/data/analyze-files-gromacs.csv +++ b/mdbenchmark/tests/data/analyze-files-gromacs.csv @@ -1,6 +1,6 @@ -module,nodes,ns/day,run time [min],gpu,host,ncores -gromacs/2016.3,1,98.147,15,False,draco,32 -gromacs/2016.3,2,178.044,15,False,draco,64 -gromacs/2016.3,3,226.108,15,False,draco,96 -gromacs/2016.3,4,246.973,15,False,draco,128 -gromacs/2016.3,5,254.266,15,False,draco,160 +module,nodes,ns/day,time,gpu,host,ncores,number_of_ranks,number_of_threads,hyperthreading,version +gromacs/2016.3,1,98.147,15,False,draco,32,,,,2 +gromacs/2016.3,2,178.044,15,False,draco,64,,,,2 +gromacs/2016.3,3,226.108,15,False,draco,96,,,,2 +gromacs/2016.3,4,246.973,15,False,draco,128,,,,2 +gromacs/2016.3,5,254.266,15,False,draco,160,,,,2 diff --git a/mdbenchmark/tests/data/gromacs/test_prompt.csv b/mdbenchmark/tests/data/gromacs/test_prompt.csv new file mode 100644 index 00000000..c0a56c19 --- /dev/null +++ b/mdbenchmark/tests/data/gromacs/test_prompt.csv @@ -0,0 +1,2 @@ +,module,nodes,run time [min],gpu,host,ranks,threads,hyperthreading +0,gromacs/2016.3,1,15,False,draco,40,1,False diff --git a/mdbenchmark/tests/mdengines/test_gromacs.py b/mdbenchmark/tests/mdengines/test_gromacs.py index 20c23686..31555161 100644 --- a/mdbenchmark/tests/mdengines/test_gromacs.py +++ b/mdbenchmark/tests/mdengines/test_gromacs.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,7 +17,7 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -from six.moves import StringIO +from io import StringIO import datreant as dtr import numpy as np @@ -81,8 +81,8 @@ def sim_old(tmpdir_factory): ) -def test_analyze_run(sim): - res = utils.analyze_run(gromacs, sim) +def test_analyze_benchmark(sim): + res = utils.analyze_benchmark(gromacs, sim) assert res[0] == "gromacs/5.1.4" # version assert res[1] == 42 # nodes assert np.isnan(res[2]) # ns_day @@ -92,8 +92,8 @@ def test_analyze_run(sim): assert np.isnan(res[6]) # ncores -def test_analyze_run_backward_compatibility(sim_old): - res = utils.analyze_run(gromacs, sim_old) +def test_analyze_benchmark_backward_compatibility(sim_old): + res = utils.analyze_benchmark(gromacs, sim_old) assert res[0] == "5.1.4" # version assert res[1] == 42 # nodes assert np.isnan(res[2]) # ns_day @@ -104,12 +104,13 @@ def test_analyze_run_backward_compatibility(sim_old): @pytest.mark.parametrize("input_name", ["md", "md.tpr"]) +@pytest.mark.skip() def test_check_file_extension(capsys, input_name, tmpdir): """Test that we check for all files needed to run GROMACS benchmarks.""" output = "ERROR File md.tpr does not exist, but is needed for GROMACS benchmarks.\n" with pytest.raises(SystemExit) as e: gromacs.check_input_file_exists(input_name) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert e.type == SystemExit assert e.code == 1 assert out == output diff --git a/mdbenchmark/tests/mdengines/test_init.py b/mdbenchmark/tests/mdengines/test_init.py index 0ec6e83d..21163e65 100644 --- a/mdbenchmark/tests/mdengines/test_init.py +++ b/mdbenchmark/tests/mdengines/test_init.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -22,7 +22,6 @@ import pytest from mdbenchmark import cli -from mdbenchmark.ext.click_test import cli_runner from mdbenchmark.mdengines import ( detect_md_engine, get_available_modules, @@ -94,12 +93,12 @@ def test_normalize_modules(capsys, monkeypatch, tmpdir): """Test that normalize modules works as expected.""" # Test the warning when we skip the validation normalize_modules(modules=["gromacs/2016.4"], skip_validation=True) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert out == "WARNING Not performing module name validation.\n" # Test the warning when we do not skip the validation normalize_modules(modules=["gromacs/2016.4"], skip_validation=False) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert ( out == "WARNING Cannot locate modules available on this host. " "Not performing module name validation.\n" @@ -162,7 +161,7 @@ def test_validation(capsys, monkeypatch, tmpdir): output = "ERROR We were not able to determine the module name.\n" with pytest.raises(SystemExit) as e: validate_module_name("wrong=format") - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert e.type == SystemExit assert e.code == 1 diff --git a/mdbenchmark/tests/mdengines/test_namd.py b/mdbenchmark/tests/mdengines/test_namd.py index 7e48865f..7859a603 100644 --- a/mdbenchmark/tests/mdengines/test_namd.py +++ b/mdbenchmark/tests/mdengines/test_namd.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,7 +17,7 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -from six.moves import StringIO +from io import StringIO import datreant as dtr import numpy as np @@ -66,8 +66,8 @@ def sim(tmpdir_factory): ) -def test_analyze_run(sim): - res = utils.analyze_run(namd, sim) +def test_analyze_benchmark(sim): + res = utils.analyze_benchmark(namd, sim) assert res[0] == "namd/11" # version assert res[1] == 42 # nodes assert np.isnan(res[2]) # ns_day @@ -84,7 +84,7 @@ def test_check_file_extension(capsys, input_file, tmpdir): output = "ERROR File md.namd does not exist, but is needed for NAMD benchmarks.\n" with pytest.raises(SystemExit) as e: namd.check_input_file_exists(input_file) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert e.type == SystemExit assert e.code == 1 assert out == output @@ -143,11 +143,11 @@ def test_analyze_namd_file( if exit_exception: with pytest.raises(exit_exception): namd.analyze_namd_file(fh) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert out.type == exit_exception assert out.code == exit_code assert out == output else: namd.analyze_namd_file(fh) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert out == output diff --git a/mdbenchmark/tests/mdengines/test_utils.py b/mdbenchmark/tests/mdengines/test_utils.py index ddded05c..c1676bde 100644 --- a/mdbenchmark/tests/mdengines/test_utils.py +++ b/mdbenchmark/tests/mdengines/test_utils.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -48,7 +48,11 @@ def test_prepare_benchmark(engine, input_name, extensions, tmpdir): relative_path, filename = os.path.split(input_name) sim = dtr.Treant("./{}".format(engine)) name = engine.prepare_benchmark( - name=filename, relative_path=relative_path, sim=sim + name=filename, + relative_path=relative_path, + sim=sim, + benchmark=sim, + multidir=1, ) assert name == "md" @@ -67,7 +71,7 @@ def test_prepare_benchmark(engine, input_name, extensions, tmpdir): def test_write_benchmark(engine, gpu, job_name, module, input_name, extensions, tmpdir): """Test that the write_benchmark works as expected.""" host = "draco" - base_dirname = "{}_{}".format(host, engine) + base_dirname = "{}_{}".format(host, engine.NAME) nodes = 5 with tmpdir.as_cwd(): base_directory = dtr.Tree(base_dirname) @@ -88,18 +92,21 @@ def test_write_benchmark(engine, gpu, job_name, module, input_name, extensions, relative_path=".", host=host, time=15, + number_of_ranks=40, + number_of_threads=1, + hyperthreading=False, + multidir=1, ) expected_job_name = "md" if job_name is None else job_name + folder_name = "n{nodes:03d}_r{ranks:02d}_t{threads:02d}_{ht}_nsim{nsim:01d}/".format( + nodes=nodes, ranks=40, threads=1, ht="woht", nsim=1, + ) assert os.path.exists(base_dirname) - assert os.path.exists( - os.path.join(base_dirname, "{}".format(nodes), input_name) - ) + assert os.path.exists(os.path.join(base_dirname, folder_name, input_name,)) - with open( - os.path.join(base_dirname, "{}".format(nodes), "bench.job"), "r" - ) as f: + with open(os.path.join(base_dirname, folder_name, "bench.job"), "r") as f: for line in f: if "#SBATCH -J" in line: assert line == "#SBATCH -J {}\n".format(expected_job_name) diff --git a/mdbenchmark/tests/migrations/test_mds_to_dtr.py b/mdbenchmark/tests/migrations/test_mds_to_dtr.py index 1fd2dfb7..016e5a42 100644 --- a/mdbenchmark/tests/migrations/test_mds_to_dtr.py +++ b/mdbenchmark/tests/migrations/test_mds_to_dtr.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -20,7 +20,6 @@ import os import uuid -import datreant as dtr import pytest from mdbenchmark.migrations import mds_to_dtr @@ -111,13 +110,13 @@ def test_convert_to_datreant(create_sim_files): def test_migrate_to_datreant(tmpdir, capsys, create_sim_files): - directory, files = create_sim_files + directory, _ = create_sim_files with tmpdir.as_cwd(): assert mds_to_dtr.migrate_to_datreant(".") is None mds_to_dtr.migrate_to_datreant(str(directory)) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() output = ( "Converting old benchmark metadata to new format!\n" diff --git a/mdbenchmark/tests/test_analyze.py b/mdbenchmark/tests/test_analyze.py index b9d3c8e3..23bb15ac 100644 --- a/mdbenchmark/tests/test_analyze.py +++ b/mdbenchmark/tests/test_analyze.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,19 +17,16 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -import os - import datreant as dtr import numpy as np import pandas as pd from mdbenchmark import cli -from mdbenchmark.ext.click_test import cli_runner -from mdbenchmark.testing import data, datafiles -from mdbenchmark.utils import ConsolidateDataFrame, DataFrameFromBundle, PrintDataFrame +from mdbenchmark.utils import map_columns, parse_bundle, print_dataframe +from mdbenchmark.versions import Version2Categories -def test_analyze_gromacs(cli_runner, tmpdir, data): +def test_analyze_gromacs(cli_runner, tmpdir, capsys, data): """Test that the output is OK when all outputs are fine.""" with tmpdir.as_cwd(): @@ -38,26 +35,44 @@ def test_analyze_gromacs(cli_runner, tmpdir, data): ) df = pd.read_csv(data["analyze-files-gromacs.csv"]) - test_output = PrintDataFrame(df, False) + "\n" + df = df.iloc[:, :-1] + df = df.replace(np.nan, "?") + version = Version2Categories() + print_dataframe( + df, columns=map_columns(version.category_mapping, version.analyze_printing), + ) + + out, _ = capsys.readouterr() + out = "Setting up...\n" + out assert result.exit_code == 0 - assert result.output == test_output + assert result.output == "\n".join(out.split("\n")) -def test_analyze_namd(cli_runner, tmpdir, data): +def test_analyze_namd(cli_runner, tmpdir, capsys, data): with tmpdir.as_cwd(): result = cli_runner.invoke( cli, ["analyze", "--directory={}".format(data["analyze-files-namd"])] ) - + version = Version2Categories() bundle = dtr.discover(data["analyze-files-namd"]) - df = DataFrameFromBundle(bundle) - test_output = PrintDataFrame(df, False) + "\n" + df = parse_bundle( + bundle, + columns=version.analyze_categories, + sort_values_by=version.analyze_sort, + ) + df = df.iloc[:, :-1] + df = df.replace(np.nan, "?") + print_dataframe( + df, columns=map_columns(version.category_mapping, version.analyze_printing), + ) + out, _ = capsys.readouterr() + out = "Setting up...\n" + out assert result.exit_code == 0 - assert result.output == test_output + assert result.output == "\n".join(out.split("\n")) -def test_analyze_with_errors(cli_runner, tmpdir, data): +def test_analyze_with_errors(cli_runner, tmpdir, capsys, data): """Test that we warn the user of errors in the output files. Also test that we show a question mark instead of a float in the corresponding cell. """ @@ -66,34 +81,23 @@ def test_analyze_with_errors(cli_runner, tmpdir, data): result = cli_runner.invoke( cli, ["analyze", "--directory={}".format(data["analyze-files-w-errors"])] ) - + version = Version2Categories() bundle = dtr.discover(data["analyze-files-w-errors"]) - df = DataFrameFromBundle(bundle) + df = parse_bundle( + bundle, + columns=version.analyze_categories, + sort_values_by=version.analyze_sort, + ) + df = df.iloc[:, :-1] df = df.replace(np.nan, "?") - test_output = PrintDataFrame(df, False) + "\n" - - assert result.exit_code == 0 - assert result.output == test_output - - -def test_analyze_plot(cli_runner, tmpdir, data): - with tmpdir.as_cwd(): - - result = cli_runner.invoke( - cli, - [ - "analyze", - "--directory={}".format(data["analyze-files-gromacs"], "--plot"), - ], + print_dataframe( + df, columns=map_columns(version.category_mapping, version.analyze_printing), ) - bundle = dtr.discover(data["analyze-files-gromacs"]) - df = DataFrameFromBundle(bundle) - test_output = PrintDataFrame(df, False) + "\n" - + out, _ = capsys.readouterr() + out = "Setting up...\n" + out assert result.exit_code == 0 - assert result.output == test_output - os.path.isfile("runtimes.pdf") + assert result.output == "\n".join(out.split("\n")) def test_analyze_console_messages(cli_runner, tmpdir): @@ -101,6 +105,6 @@ def test_analyze_console_messages(cli_runner, tmpdir): with tmpdir.as_cwd(): # Test error message if the TPR file does not exist result = cli_runner.invoke(cli, ["analyze", "--directory=look_here/"]) - output = "ERROR There is no data for the given path.\n" + output = "Setting up...\nERROR There is no data for the given path.\n" assert result.exit_code == 1 assert result.output == output diff --git a/mdbenchmark/tests/test_cli.py b/mdbenchmark/tests/test_cli.py index 99e28171..7e23d6a7 100644 --- a/mdbenchmark/tests/test_cli.py +++ b/mdbenchmark/tests/test_cli.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -19,7 +19,6 @@ # along with MDBenchmark. If not, see . from mdbenchmark import cli -from mdbenchmark.ext.click_test import cli_runner def test_aliasedgroup_unknown_command(cli_runner): @@ -28,7 +27,7 @@ def test_aliasedgroup_unknown_command(cli_runner): assert result.exit_code == 2 output = ( "Usage: cli [OPTIONS] COMMAND [ARGS]...\n" - 'Try "cli --help" for help.\n\n' + "Try 'cli --help' for help.\n\n" "Error: Sub command unknown: unknown_command\n" ) assert result.output == output diff --git a/mdbenchmark/tests/test_console.py b/mdbenchmark/tests/test_console.py index f2e604ba..78ce5932 100644 --- a/mdbenchmark/tests/test_console.py +++ b/mdbenchmark/tests/test_console.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -17,7 +17,7 @@ # # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . -from six import StringIO +from io import StringIO import pytest diff --git a/mdbenchmark/tests/test_testing.py b/mdbenchmark/tests/test_fixtures.py similarity index 90% rename from mdbenchmark/tests/test_testing.py rename to mdbenchmark/tests/test_fixtures.py index ddb9c94a..32819113 100644 --- a/mdbenchmark/tests/test_testing.py +++ b/mdbenchmark/tests/test_fixtures.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -19,8 +19,6 @@ # along with MDBenchmark. If not, see . import pytest -from mdbenchmark.testing import data, datafiles - def test_datafiles(datafiles): with pytest.raises(RuntimeError): diff --git a/mdbenchmark/tests/test_generate.py b/mdbenchmark/tests/test_generate.py index 49db1484..c19f5fca 100644 --- a/mdbenchmark/tests/test_generate.py +++ b/mdbenchmark/tests/test_generate.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -20,7 +20,6 @@ import os import datreant as dtr -import pandas as pd import pytest from click import exceptions @@ -34,9 +33,7 @@ validate_name, validate_number_of_nodes, ) -from mdbenchmark.ext.click_test import cli_runner from mdbenchmark.mdengines import SUPPORTED_ENGINES -from mdbenchmark.utils import ConsolidateDataFrame, DataFrameFromBundle, PrintDataFrame DIR_STRUCTURE = { "applications": { @@ -64,61 +61,11 @@ def exit(self): return MockCtx() -@pytest.fixture -def generate_output_create(): - def _output(gpu=True, n_benchmarks=4, runtime=15): - gpu_string = "{}" - if gpu: - gpu_string = "{} with GPUs" - - return "Creating benchmark system for {}.\n".format(gpu_string) - - return _output - - -@pytest.fixture -def generate_output_table(): - def _output(short=False): - title = "Benchmark Summary:\n" - bundle = dtr.discover() - df = DataFrameFromBundle(bundle) - if short: - df = ConsolidateDataFrame(df) - - return title + PrintDataFrame(df, False) + "\n" - - return _output - - -@pytest.fixture -def generate_output_finish(): - return ( - "Generating the above benchmarks.\n" - "Finished generating all benchmarks.\nYou can" - " now submit the jobs with mdbenchmark submit.\n" - ) - - -@pytest.fixture -def generate_output( - generate_output_create, generate_output_table, generate_output_finish -): - def _output(gpu=True, n_benchmarks=4, runtime=15): - create_string = generate_output_create( - gpu=gpu, n_benchmarks=n_benchmarks, runtime=runtime - ) - table_string = generate_output_table(short=True) - finish_string = generate_output_finish - return create_string + table_string + finish_string - - return _output - - @pytest.mark.parametrize( "module, extensions", [("gromacs/2016", ["tpr"]), ("namd/11", ["namd", "pdb", "psf"])], ) -def test_generate_simple_input(cli_runner, generate_output, module, extensions, tmpdir): +def test_generate(cli_runner, module, extensions, tmpdir): """Test that we can generate benchmarks for all supported MD engines w/o module validation.""" with tmpdir.as_cwd(): for ext in extensions: @@ -138,16 +85,32 @@ def test_generate_simple_input(cli_runner, generate_output, module, extensions, ], ) - output = generate_output().format(module) - output = ( - "WARNING Cannot locate modules available on this host. " - "Not performing module name validation.\n" + output + expected_output = ( + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | gromacs/2016 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n" ) + + if "namd" in module: + expected_output = ( + "+---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | namd/11 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n" + ) + + start_of_message = "WARNING Cannot locate modules available on this host. Not performing module name validation.\n\n" + end_of_message = ( + "We will generate 4 benchmarks.\n" + "Finished! You can submit the jobs with mdbenchmark submit.\n" + ) + output = start_of_message + "".join(expected_output) + end_of_message if "namd" in module: output = NAMD_WARNING_FORMATTED + output - # Test that we get a warning, if no module name validation is performed. - assert result.exit_code == 0 assert result.output == output @@ -156,14 +119,8 @@ def test_generate_simple_input(cli_runner, generate_output, module, extensions, "module, extensions", [("gromacs/2016", ["tpr"]), ("namd/11", ["namd", "pdb", "psf"])], ) -def test_generate_simple_input_with_cpu_gpu( - cli_runner, - generate_output_create, - generate_output_finish, - generate_output_table, - module, - extensions, - tmpdir, +def test_generate_with_cpu_gpu( + cli_runner, module, extensions, tmpdir, ): """Test that we can generate benchmarks for CPUs and GPUs at once.""" with tmpdir.as_cwd(): @@ -183,19 +140,34 @@ def test_generate_simple_input_with_cpu_gpu( ], ) - output = generate_output_create(gpu=False).format(module) - output = ( - "WARNING Cannot locate modules available on this host. " - "Not performing module name validation.\n" + output + expected_output = ( + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | gromacs/2016 | 1-4 | 15 | False | draco | 40 | 1 | False | 1 |\n" + "| protein | gromacs/2016 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n" ) - output += generate_output_create(gpu=True).format(module) - output += generate_output_table(True) - output += generate_output_finish + + if "namd" in module: + expected_output = ( + "+---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | namd/11 | 1-4 | 15 | False | draco | 40 | 1 | False | 1 |\n" + "| protein | namd/11 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n" + ) + + start_of_message = "WARNING Cannot locate modules available on this host. Not performing module name validation.\n\n" + end_of_message = ( + "We will generate 8 benchmarks.\n" + "Finished! You can submit the jobs with mdbenchmark submit.\n" + ) + output = start_of_message + "".join(expected_output) + end_of_message if "namd" in module: output = NAMD_WARNING_FORMATTED + output - # Test that we get a warning, if no module name validation is performed. - assert result.exit_code == 0 assert result.output == output @@ -204,14 +176,17 @@ def test_generate_simple_input_with_cpu_gpu( "module, extensions", [("gromacs/2016", ["tpr"]), ("namd/11", ["namd", "pdb", "psf"])], ) -def test_generate_simple_input_with_working_validation( - cli_runner, generate_output, module, monkeypatch, extensions, tmpdir -): - """Test that we can generate benchmarks for all supported MD engines with module validation.""" +def test_generate_with_validation(cli_runner, module, monkeypatch, extensions, tmpdir): with tmpdir.as_cwd(): for ext in extensions: open("protein.{}".format(ext), "a").close() + # monkeypatch the output of the available modules + monkeypatch.setattr( + "mdbenchmark.mdengines.get_available_modules", + lambda: {"gromacs": ["2016"], "namd": ["11"]}, + ) + result = cli_runner.invoke( cli, [ @@ -226,21 +201,32 @@ def test_generate_simple_input_with_working_validation( ], ) - output = generate_output().format(module) - output = ( - "WARNING Cannot locate modules available on this host. Not performing module name validation.\n" - + output + expected_output = ( + "\n" + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | gromacs/2016 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n" ) - if "namd" in module: - output = NAMD_WARNING_FORMATTED + output - # monkeypatch the output of the available modules - monkeypatch.setattr( - "mdbenchmark.mdengines.get_available_modules", - lambda: {"gromacs": ["2016"], "namd": ["11"]}, - ) + if "namd" in module: + expected_output = ( + "+---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | namd/11 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n" + ) + + end_of_message = ( + "We will generate 4 benchmarks.\n" + "Finished! You can submit the jobs with mdbenchmark submit.\n" + ) + output = "".join(expected_output) + end_of_message + if "namd" in module: + output = NAMD_WARNING_FORMATTED + "\n" + output - # Test that we get a warning, if no module name validation is performed. assert result.exit_code == 0 assert result.output == output @@ -249,9 +235,7 @@ def test_generate_simple_input_with_working_validation( "module, extensions", [("gromacs/2016", ["tpr"]), ("namd/11", ["namd", "pdb", "psf"])], ) -def test_generate_skip_validation( - cli_runner, module, extensions, generate_output, monkeypatch, tmpdir -): +def test_generate_skip_validation(cli_runner, module, extensions, monkeypatch, tmpdir): """Test that we can skip the validation during benchmark generation.""" with tmpdir.as_cwd(): for ext in extensions: @@ -267,7 +251,7 @@ def test_generate_skip_validation( cli, [ "generate", - "--module={}".format(module), + f"--module={module}", "--host=draco", "--max-nodes=4", "--gpu", @@ -278,8 +262,32 @@ def test_generate_skip_validation( ], ) - output = generate_output().format(module) - output = "WARNING Not performing module name validation.\n" + output + expected_output = ( + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | gromacs/2016 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n" + ) + + if "namd" in module: + expected_output = ( + "+---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | namd/11 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n" + ) + + end_of_message = ( + "We will generate 4 benchmarks.\n" + "Finished! You can submit the jobs with mdbenchmark submit.\n" + ) + output = ( + "WARNING Not performing module name validation.\n\n" + + "".join(expected_output) + + end_of_message + ) if "namd" in module: output = NAMD_WARNING_FORMATTED + output @@ -320,79 +328,6 @@ def test_generate_unsupported_engine(cli_runner, monkeypatch, tmpdir): assert result.output == output -@pytest.mark.parametrize( - "engine, module, version, extensions", - [ - ("gromacs", "gromacs/2016", "2016", ["tpr"]), - ("namd", "namd/11", "11", ["namd", "pdb", "psf"]), - ], -) -def test_generate_odd_number_of_nodes( - cli_runner, - engine, - module, - extensions, - generate_output, - monkeypatch, - tmpdir, - version, -): - """Make sure we generate the correct folder structure.""" - with tmpdir.as_cwd(): - for ext in extensions: - open("protein.{}".format(ext), "a").close() - - monkeypatch.setattr( - "mdbenchmark.mdengines.get_available_modules", - lambda: {"gromacs": ["2016"], "namd": ["11"]}, - ) - - result = cli_runner.invoke( - cli, - [ - "generate", - "--module={}".format(module), - "--host=draco", - "--min-nodes=6", - "--max-nodes=8", - "--gpu", - "--no-cpu", - "--name=protein", - "--yes", - ], - ) - - output1 = "Creating benchmark system for {} with GPUs.\n".format(module) - - bundle = dtr.discover() - df = DataFrameFromBundle(bundle) - df = ConsolidateDataFrame(df) - test_output = "Benchmark Summary:\n" + PrintDataFrame(df, False) + "\n" - - output2 = ( - "Generating the above benchmarks.\n" - "Finished generating all benchmarks.\n" - "You can now submit the jobs with mdbenchmark submit.\n" - ) - - if "namd" in module: - output = NAMD_WARNING_FORMATTED + output1 + test_output + output2 - else: - output = output1 + test_output + output2 - - assert result.exit_code == 0 - assert result.output == output - assert os.path.exists("draco_{}".format(engine)) - host_engine_version_path = "draco_{}/{}_gpu/".format(engine, version) - for i in range(6, 9): - assert os.path.exists(host_engine_version_path + "{}".format(i)) - for ext in extensions: - assert os.path.exists( - host_engine_version_path + "{}/protein.{}".format(i, ext) - ) - assert os.path.exists(host_engine_version_path + "{}/bench.job".format(i)) - - def test_generate_console_messages(cli_runner, monkeypatch, tmpdir): """Test that the CLI for generate prints all error messages as expected.""" with tmpdir.as_cwd(): @@ -499,19 +434,21 @@ def test_generate_namd_experimental_warning(cli_runner, monkeypatch, tmpdir): "All input files must be in the current directory. " "Parameter paths must be absolute. Only crude file checks are performed! " "If you use the --gpu option make sure you use the GPU compatible NAMD module!\n" - "Creating benchmark system for namd/123.\n" + "\n" + ) + expected_output = ( + "+--------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n", + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n", + "|--------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n", + "| md | namd/123 | 1-5 | 15 | False | draco | 40 | 1 | False | 1 |\n", + "+--------+----------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n", ) - bundle = dtr.discover() - df = DataFrameFromBundle(bundle) - df = ConsolidateDataFrame(df) - test_output = "Benchmark Summary:\n" + PrintDataFrame(df, False) + "\n" output2 = ( - "Generating the above benchmarks.\n" - "Finished generating all benchmarks.\nYou can " - "now submit the jobs with mdbenchmark submit.\n" + "We will generate 5 benchmarks.\n" + "Finished! You can submit the jobs with mdbenchmark submit.\n" ) - output = output1 + test_output + output2 + output = output1 + "".join(expected_output) + output2 assert result.exit_code == 0 assert result.output == output @@ -520,7 +457,7 @@ def test_generate_namd_experimental_warning(cli_runner, monkeypatch, tmpdir): def test_print_known_hosts(ctx_mock, capsys): """Test that the print_known_hosts function works as expected.""" print_known_hosts(ctx_mock, None, True) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert out == "Available host templates:\ncobra\ndraco\nhydra\n" @@ -604,7 +541,7 @@ def test_validate_generate_host(ctx_mock): assert validate_hosts(ctx_mock, None, host="draco") == "draco" -def test_generate_test_prompt_yes(cli_runner, tmpdir, generate_output): +def test_generate_prompt_yes(cli_runner, tmpdir): """Test whether promt answer yes works.""" with tmpdir.as_cwd(): open("protein.tpr", "a").close() @@ -624,25 +561,27 @@ def test_generate_test_prompt_yes(cli_runner, tmpdir, generate_output): ) output1 = ( "WARNING Cannot locate modules available on this host. Not performing module name validation.\n" - "Creating benchmark system for gromacs/2016 with GPUs.\n" + "\n" ) - bundle = dtr.discover() - df = DataFrameFromBundle(bundle) - df = ConsolidateDataFrame(df) + expected_output = ( + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n" + "| Name | Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? | # Simulations |\n" + "|---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------|\n" + "| protein | gromacs/2016 | 1-4 | 15 | True | draco | 40 | 1 | False | 1 |\n" + "+---------+--------------+---------+--------------+---------+--------+-----------+-------------+-------------------+-----------------+\n\n", + ) + output2 = ( - "The above benchmarks will be generated. Continue? [y/N]: y\n" - "Finished generating all benchmarks.\n" - "You can now submit the jobs with mdbenchmark submit.\n" + "We will generate 4 benchmarks. Continue? [y/N]: y\n" + "Finished! You can submit the jobs with mdbenchmark submit.\n" ) - mid = "Benchmark Summary:\n" + PrintDataFrame(df, False) + "\n" - output = output1 + mid + output2 - # Test that we get a warning, if no module name validation is performed. + output = output1 + "".join(expected_output) + output2 assert result.exit_code == 0 assert result.output == output -def test_generate_test_prompt_no(cli_runner, tmpdir, generate_output): +def test_generate_test_prompt_no(cli_runner, tmpdir): """Test whether promt answer no works.""" with tmpdir.as_cwd(): open("protein.tpr", "a").close() diff --git a/mdbenchmark/tests/test_math.py b/mdbenchmark/tests/test_math.py new file mode 100644 index 00000000..d82c123f --- /dev/null +++ b/mdbenchmark/tests/test_math.py @@ -0,0 +1,42 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see +import numpy as np +from numpy.testing import assert_equal + +from mdbenchmark import math + + +def test_lin_func(): + """Test `lin_func()`.""" + m, x, b = [5, 3, 2] + + assert_equal(math.lin_func(m, x, b), (m * x) + b) + + +def test_calc_slope_intercept(): + """Test `calc_slope_intercept()`""" + x1, y1 = [1, 1] + x2, y2 = [2, 2] + slope = (y2 - y1) / (x2 - x1) + intercept = y1 - (x1 * slope) + + slope_intercept = math.calc_slope_intercept((x1, y1), (x2, y2)) + + assert_equal(slope_intercept, np.hstack([slope, intercept])) diff --git a/mdbenchmark/tests/test_models.py b/mdbenchmark/tests/test_models.py new file mode 100644 index 00000000..81ced77e --- /dev/null +++ b/mdbenchmark/tests/test_models.py @@ -0,0 +1,90 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see +import pytest + +from mdbenchmark.models import Processor + + +@pytest.mark.parametrize( + "physical_cores, logical_cores, expected_result", [[80, 40, 80], [40, 40, 40]] +) +def test_get_available_number_of_cores( + monkeypatch, physical_cores, logical_cores, expected_result +): + monkeypatch.setattr( + "mdbenchmark.models.cpu_count", + lambda logical: logical_cores if logical else physical_cores, + ) + obj = Processor() + assert obj._get_number_of_available_cores == expected_result + + +@pytest.mark.parametrize( + "physical_cores, logical_cores, number_of_ranks, expected_result", + [ + [40, 80, 40, True], + [80, 80, 20, True], + [40, 40, 80, False], + [40, 40, 80, False], + [80, 80, 0, False], + [80, 80, -1, False], + ], +) +def test_number_of_ranks_is_valid( + physical_cores, logical_cores, number_of_ranks, expected_result +): + obj = Processor(physical_cores, logical_cores) + assert expected_result == obj.number_of_ranks_is_valid(number_of_ranks) + + +@pytest.mark.parametrize( + "physical_cores, logical_cores, expected_result", + [ + [40, 80, True], + [40, 40, False], + [40, 24, False], # Can the last case ever happen? + ], +) +def test_supports_hyperthreading(physical_cores, logical_cores, expected_result): + obj = Processor(physical_cores, logical_cores) + result = obj.supports_hyperthreading + + assert expected_result == result + + +@pytest.mark.parametrize( + "physical_cores, logical_cores, number_of_ranks, with_hyperthreading, expected_result", + [ + [40, 40, 40, False, (40, 1)], + [40, 80, 40, True, (40, 2)], + [40, 80, 40, False, (40, 1)], + [40, 80, 20, True, (20, 4)], + [40, 40, 20, False, (20, 2)], + [40, 40, 4, False, (4, 10)], + [40, 40, 2, False, (2, 20)], + ], +) +def test_ranks_and_threads( + physical_cores, logical_cores, number_of_ranks, with_hyperthreading, expected_result +): + obj = Processor(physical_cores, logical_cores) + ranks_threads = obj.get_ranks_and_threads(number_of_ranks, with_hyperthreading) + + assert expected_result == ranks_threads diff --git a/mdbenchmark/tests/test_plot.py b/mdbenchmark/tests/test_plot.py index 7364a83e..768299c1 100644 --- a/mdbenchmark/tests/test_plot.py +++ b/mdbenchmark/tests/test_plot.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -19,20 +19,12 @@ # along with MDBenchmark. If not, see . import os -import click -import matplotlib.pyplot as plt -import numpy as np import pandas as pd import pytest -from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas -from matplotlib.figure import Figure -from numpy.testing import assert_equal from pandas.testing import assert_frame_equal -from mdbenchmark import cli, utils +from mdbenchmark import cli from mdbenchmark.cli import plot -from mdbenchmark.ext.click_test import cli_runner -from mdbenchmark.testing import data @pytest.mark.parametrize( @@ -65,7 +57,7 @@ def test_plot_gpu(cli_runner, tmpdir, data): "Plotting GPU and CPU data.\n" "Plotting all hosts in input file.\n" "Plotting all modules in your input data.\n" - "Your file was saved as 'testpng.png' in the working directory.\n" + "The plot was saved as 'testpng.png'.\n" ) result = cli_runner.invoke( @@ -94,9 +86,7 @@ def test_plot_host_only(cli_runner, tmpdir, host, data): "Plotting GPU and CPU data.\n" "Data for the following hosts will be plotted: {}\n" "Plotting all modules in your input data.\n" - "Your file was saved as 'testpng.png' in the working directory.\n".format( - host - ) + "The plot was saved as 'testpng.png'.\n".format(host) ) result = cli_runner.invoke( @@ -125,18 +115,14 @@ def test_plot_module_only(cli_runner, tmpdir, module, data): "Plotting GPU and CPU data.\n" "Plotting all hosts in input file.\n" "Plotting all modules for engine '{}'.\n" - "Your file was saved as 'testpng.png' in the working directory.\n".format( - module - ) + "The plot was saved as 'testpng.png'.\n".format(module) ) else: output = ( "Plotting GPU and CPU data.\n" "Plotting all hosts in input file.\n" "Plotting module '{}'.\n" - "Your file was saved as 'testpng.png' in the working directory.\n".format( - module - ) + "The plot was saved as 'testpng.png'.\n".format(module) ) result = cli_runner.invoke( @@ -164,17 +150,14 @@ def test_plot_output_type(cli_runner, tmpdir, data, output_type): "All modules will be plotted.\n" "All hosts will be plotted.\n" "A total of 2 runs will be plotted.\n" - "Your file was saved as 'test.{}' in the working directory.\n".format( - output_type - ) + "The plot was saved as 'test.{}'.\n".format(output_type) ) output = ( "Plotting GPU and CPU data.\n" "Plotting all hosts in input file.\n" "Plotting all modules in your input data.\n" - "Your file was saved as 'testfile.{}' in the working " - "directory.\n".format(output_type) + "The plot was saved as 'testfile.{}'.\n".format(output_type) ) result = cli_runner.invoke( cli, @@ -228,7 +211,7 @@ def test_plot_filter_dataframe_for_plotting_gpu_and_cpu_fail( plot.filter_dataframe_for_plotting( df=input_df, host_name=(), module_name=(), gpu=False, cpu=False ) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert out == expected_output assert error.type == SystemExit assert error.value.code == 1 @@ -292,46 +275,7 @@ def test_plot_filter_empty_dataframe_error(cli_runner, capsys, tmpdir, data): "Plotting GPU data only.\n" "ERROR Your filtering led to an empty dataset. Exiting.\n" ) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert out == expected_output assert error.type == SystemExit assert error.value.code == 1 - - -def test_plot_plot_projection(capsys, cli_runner, tmpdir, data): - """Assert whether the line projection function returns an ax object. - """ - df = pd.read_csv(data["testcsv.csv"]) - df = df[:2] - selection = "nodes" - color = "grey" - fig = Figure() - FigureCanvas(fig) - ax = fig.add_subplot(111) - plot.plot_projection(df=df, selection=selection, color=color, ax=ax) - - -def test_plot_plot_line(capsys, cli_runner, tmpdir, data): - """Assert whether the single plot entry works and returns an ax object. - """ - df = pd.read_csv(data["testcsv.csv"]) - df = df[:2] - selection = "nodes" - label = "test" - fig = Figure() - FigureCanvas(fig) - ax = fig.add_subplot(111) - plot.plot_line(df=df, selection=selection, label=label, fit=True, ax=ax) - - -def test_plot_plot_line_singlepoint(capsys, cli_runner, tmpdir, data): - """Assert whether the single plot entry works and returns an ax object. - """ - df = pd.read_csv(data["testcsv.csv"]) - df = df[:1] - selection = "nodes" - label = "test" - fig = Figure() - FigureCanvas(fig) - ax = fig.add_subplot(111) - plot.plot_line(df=df, selection=selection, label=label, fit=True, ax=ax) diff --git a/mdbenchmark/tests/test_submit.py b/mdbenchmark/tests/test_submit.py index 28bae900..4e380e1a 100644 --- a/mdbenchmark/tests/test_submit.py +++ b/mdbenchmark/tests/test_submit.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -23,10 +23,9 @@ from mdbenchmark import cli from mdbenchmark.cli.submit import get_batch_command -from mdbenchmark.ext.click_test import cli_runner from mdbenchmark.mdengines import gromacs -from mdbenchmark.testing import data -from mdbenchmark.utils import DataFrameFromBundle, PrintDataFrame +from mdbenchmark.utils import map_columns, print_dataframe +from mdbenchmark.versions import Version2Categories @pytest.mark.skip(reason="monkeypatching is a problem. skip for now.") @@ -42,7 +41,7 @@ def test_get_batch_command(capsys, monkeypatch, tmpdir): ) with pytest.raises(SystemExit): get_batch_command() - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert out == output # Test non-fail state @@ -53,7 +52,7 @@ def test_get_batch_command(capsys, monkeypatch, tmpdir): @pytest.mark.skip(reason="monkeypatching is a problem. skip for now.") def test_submit_resubmit(cli_runner, monkeypatch, tmpdir, data): """Test that we cannot submit a benchmark system that was already submitted, - unless we force it. + unless we force it. """ with tmpdir.as_cwd(): # Test that we get an error if we try to point the submit function to @@ -69,7 +68,7 @@ def test_submit_resubmit(cli_runner, monkeypatch, tmpdir, data): ["submit", "--directory={}".format(data["analyze-files-gromacs"]), "--yes"], ) df = pd.read_csv(data["analyze-files-gromacs-consolidated.csv"], index_col=0) - s = PrintDataFrame(df, False) + s = print_dataframe(df, False) output = "ERROR All generated benchmarks were already started once. You can force a restart with --force.\n" @@ -112,6 +111,7 @@ def test_submit_resubmit(cli_runner, monkeypatch, tmpdir, data): def test_submit_test_prompt_no(cli_runner, tmpdir, data): """Test whether prompt answer no works.""" + benchmark_version = Version2Categories() with tmpdir.as_cwd(): result = cli_runner.invoke( cli, @@ -122,18 +122,19 @@ def test_submit_test_prompt_no(cli_runner, tmpdir, data): input="n\n", ) - df = pd.read_csv(data["analyze-files-gromacs-prompt.csv"], index_col=0) - s = PrintDataFrame(df, False) - - output = ( - "Benchmark Summary:\n" - + s - + "\nThe above benchmarks will be submitted. Continue? [y/N]: n\n" - + "ERROR Exiting. No benchmarks submitted.\n" + df = pd.read_csv(data["gromacs/test_prompt.csv"], index_col=0) + print_dataframe( + df, + columns=map_columns( + map_dict=benchmark_version.category_mapping, + columns=benchmark_version.generate_printing[1:], + ), ) assert result.exit_code == 1 - assert result.output == output + assert ( + result.output.split("\n")[-2] == "ERROR Exiting. No benchmarks submitted." + ) @pytest.mark.skip(reason="monkeypatching is a problem. skip for now.") @@ -162,8 +163,8 @@ def test_submit_test_prompt_yes(cli_runner, tmpdir, data, monkeypatch): input="y\n", ) - df = pd.read_csv(data["analyze-files-gromacs-prompt.csv"], index_col=0) - s = PrintDataFrame(df, False) + df = pd.read_csv(data["gromacs/test_prompt.csv"], index_col=0) + s = print_dataframe(df, False) output = ( "Benchmark Summary:\n" diff --git a/mdbenchmark/tests/test_utils.py b/mdbenchmark/tests/test_utils.py index ea1d06e7..7d4fb377 100644 --- a/mdbenchmark/tests/test_utils.py +++ b/mdbenchmark/tests/test_utils.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -21,15 +21,13 @@ import datreant as dtr import jinja2 -import numpy as np import pandas as pd import tabulate -from numpy.testing import assert_equal from pandas.testing import assert_frame_equal from mdbenchmark import utils -from mdbenchmark.ext.click_test import cli_runner -from mdbenchmark.testing import data +from mdbenchmark.utils import map_columns, print_dataframe +from mdbenchmark.versions import Version2Categories, VersionFactory def test_mdbenchmark_template_environment_variable(monkeypatch): @@ -101,93 +99,47 @@ def test_retrieve_host_template(monkeypatch): assert utils.retrieve_host_template("minerva") == "minerva" -def test_lin_func(): - """Test `lin_func()`.""" - m, x, b = [5, 3, 2] - - assert_equal(utils.lin_func(m, x, b), (m * x) + b) - - -def test_calc_slope_intercept(): - """Test `calc_slope_intercept()`""" - x1, y1 = [1, 1] - x2, y2 = [2, 2] - slope = (y2 - y1) / (x2 - x1) - intercept = y1 - (x1 * slope) - - slope_intercept = utils.calc_slope_intercept((x1, y1), (x2, y2)) - - assert_equal(slope_intercept, np.hstack([slope, intercept])) - - -def test_guess_ncores(capsys, monkeypatch): - """Test that we can guess the correct number of cores on the supported - systems. - """ - - def dummy(arg): - return "ABC" - - # Test on Linux - monkeypatch.setattr("mdbenchmark.utils.sys.platform", "linux") - monkeypatch.setattr( - "mdbenchmark.utils._cat_proc_cpuinfo_grep_query_sort_uniq", dummy - ) - assert utils.guess_ncores() == 9 - - # Test on Darwin - monkeypatch.setattr("mdbenchmark.utils.sys.platform", "darwin") - monkeypatch.setattr("mdbenchmark.utils.mp.cpu_count", lambda: 10) - assert utils.guess_ncores() == 5 - - # Test on some unknown platform - monkeypatch.setattr("mdbenchmark.utils.sys.platform", "starlord") - output = ( - "WARNING Could not guess number of physical cores. " - "Assuming there is only 1 core per node.\n" +def test_parse_bundle(data): + bundle = dtr.discover(data["analyze-files-gromacs"]) + version = VersionFactory(categories=bundle.categories).version_class + test_output = utils.parse_bundle( + bundle, columns=version.analyze_categories, sort_values_by=version.analyze_sort, ) + expected_output = pd.read_csv(data["analyze-files-gromacs.csv"], index_col=False) + assert_frame_equal(test_output, expected_output) - utils.guess_ncores() - out, err = capsys.readouterr() - assert out == output - -def test_DataFrameFromBundle(data): - """Test DataFrameFromBundle function. - This is used in other tests, therefore everything is hard coded - If changes are made to the layout type this should be changed here. - """ +def test_consolidate_dataframe(capsys, data): bundle = dtr.discover(data["analyze-files-gromacs"]) - test_output = utils.DataFrameFromBundle(bundle) - - expected_output = pd.read_csv(data["analyze-files-gromacs.csv"]) - - # TODO: This test fails if we test the dtype. This is weird and I would - # like to know why this is...the output itself is fine. - assert_frame_equal(test_output, expected_output, check_dtype=False) - + version = VersionFactory(categories=bundle.categories).version_class + df = utils.parse_bundle( + bundle, columns=version.analyze_categories, sort_values_by=version.analyze_sort, + ) + test_output = utils.consolidate_dataframe( + df, columns=version.consolidate_categories + ) -def test_ConsolidateDataFrame(data): - """ Test the ConsolidateDataFrame function. - This is used in other tests, therefore everyting is hard coded. - If changes are made to the layout type this should be changed here. - """ - bundle = dtr.discover(data["analyze-files-gromacs"]) - df = utils.DataFrameFromBundle(bundle) - test_output = utils.ConsolidateDataFrame(df) + print_dataframe( + test_output[version.generate_printing[1:]], + columns=map_columns( + map_dict=version.category_mapping, columns=version.generate_printing[1:], + ), + ) - expected_output = pd.read_csv( - data["analyze-files-gromacs-consolidated.csv"], index_col=0 + expected_output = ( + "Setting up...\n\n" + "+----------------+---------+--------------+---------+--------+-----------+-------------+-------------------+\n", + "| Module | Nodes | Time (min) | GPUs? | Host | # ranks | # threads | Hyperthreading? |\n", + "|----------------+---------+--------------+---------+--------+-----------+-------------+-------------------|\n", + "| gromacs/2016.3 | 1-5 | 15 | False | draco | nan | nan | nan |\n", + "+----------------+---------+--------------+---------+--------+-----------+-------------+-------------------+\n\n", ) - assert_frame_equal(test_output, expected_output, check_dtype=False) + out, _ = capsys.readouterr() + assert "\n".join(out.split("\n")) == "".join(expected_output) def test_group_consecutives(): - """Tests the group_consecutives function. - This is used in other tests, therefore everyting is hard coded. - If changes are made to the layout type this should be changed here. - """ vals = [1, 2, 4, 5, 7, 10] test_output = utils.group_consecutives(vals) @@ -196,16 +148,15 @@ def test_group_consecutives(): assert test_output == expected_output -def test_PrintDataFrame(data): - """Tests the group_consecutives function. - This is used in other tests, therefore everyting is hard coded. - If changes are made to the layout type this should be changed here. - """ - df_test = pd.read_csv(data["analyze-files-gromacs.csv"]) - test_output = utils.PrintDataFrame(df_test, False) +def test_print_dataframe(capsys, data): + df = pd.read_csv(data["analyze-files-gromacs.csv"]) + version = Version2Categories() + utils.print_dataframe(df, version.analyze_printing + ["version"]) expected_output = tabulate.tabulate( - df_test, headers="keys", tablefmt="psql", showindex=False + df, headers="keys", tablefmt="psql", showindex=False ) + expected_output = "\n" + expected_output + "\n\n" + out, _ = capsys.readouterr() - assert expected_output == test_output + assert "\n".join(out.split("\n")) == expected_output diff --git a/mdbenchmark/tests/test_versions.py b/mdbenchmark/tests/test_versions.py new file mode 100644 index 00000000..6286ab9a --- /dev/null +++ b/mdbenchmark/tests/test_versions.py @@ -0,0 +1,81 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see +import pytest + +from mdbenchmark.versions import VersionCategories, VersionFactory + +CATEGORIES = { + "VERSION_2": ["name", "started", "module", "host", "time", "gpu", "nodes"], + "VERSION_3": [ + "name", + "started", + "module", + "host", + "time", + "gpu", + "nodes", + "version", + ], +} + +IMPLEMENTED_VERSION_CLASSES = [cls for cls in VersionCategories.__subclasses__()] + + +def test_init_raises_exception(): + with pytest.raises(ValueError) as err: + VersionFactory() + + assert "must be set" in str(err.value) + + +@pytest.mark.parametrize("version", ("2", "3")) +def test_version(version): + obj = VersionFactory(version=version) + assert obj.version == version + + +@pytest.mark.parametrize("version", ("2", "3")) +def test_guess_version(version): + obj = VersionFactory(categories=CATEGORIES[f"VERSION_{version}"]) + assert obj.version == version + + +def test_version_class_zero_does_not_exist(): + obj = VersionFactory(version="0") + assert obj.version_class is None + + +@pytest.mark.parametrize( + "attribute", + ( + "version", + "consolidate_categories", + "generate_categories", + "generate_mapping", + "generate_printing", + "analyze_categories", + "analyze_printing", + "analyze_sort", + "category_mapping", + ), +) +def test_not_implemented(attribute): + obj = VersionCategories() + assert getattr(obj, attribute) == NotImplemented diff --git a/mdbenchmark/utils.py b/mdbenchmark/utils.py index 8103f123..06b3f99b 100644 --- a/mdbenchmark/utils.py +++ b/mdbenchmark/utils.py @@ -2,7 +2,7 @@ # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDBenchmark -# Copyright (c) 2017-2018 The MDBenchmark development team and contributors +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors # (see the file AUTHORS for the full list of names) # # MDBenchmark is free software: you can redistribute it and/or modify @@ -18,22 +18,17 @@ # You should have received a copy of the GNU General Public License # along with MDBenchmark. If not, see . import datetime as dt -import multiprocessing as mp import os import socket -import sys import click import datreant as dtr -import numpy as np import pandas as pd import xdg from jinja2 import ChoiceLoader, Environment, FileSystemLoader, PackageLoader -from jinja2.exceptions import TemplateNotFound from tabulate import tabulate -from mdbenchmark import console -from mdbenchmark.ext.cadishi import _cat_proc_cpuinfo_grep_query_sort_uniq +from mdbenchmark import console, mdengines from mdbenchmark.mdengines import detect_md_engine, utils # Order where to look for host templates: HOME -> etc -> package @@ -79,7 +74,7 @@ def retrieve_host_template(host=None): Parameter --------- host : str - Name of the host template to lookup + Name of the host template to lookup Returns ------- @@ -88,39 +83,84 @@ def retrieve_host_template(host=None): return ENV.get_template(host) -def lin_func(x, m, b): - return m * x + b +def validate_required_files(name, modules): + for module in modules: + # Here we detect the MD engine (supported: GROMACS and NAMD). + engine = mdengines.detect_md_engine(module) + engine.check_input_file_exists(name) -def calc_slope_intercept(x, y): - x = np.asarray(x) - y = np.asarray(y) - diff = x - y - slope = diff[1] / diff[0] - intercept = x[1] - (x[0] * slope) - return np.hstack([slope, intercept]) - +def construct_directory_name(template, module, gpu): + return "{template}_{module}{gpu}".format( + template=template, module=module, gpu="_gpu" if gpu else "" + ) -def guess_ncores(): - """Guess the number of physical CPU cores. - We inspect `/proc/cpuinfo` to grab the actual number.""" - total_cores = None - if sys.platform.startswith("linux"): - nsocket = len(_cat_proc_cpuinfo_grep_query_sort_uniq("physical id")) - ncores = len(_cat_proc_cpuinfo_grep_query_sort_uniq("core id")) - total_cores = ncores * nsocket - elif sys.platform == "darwin": - # assumes we have an INTEL CPU with hyper-threading. As of 2017 this is - # true for all officially supported Apple models. - total_cores = mp.cpu_count() // 2 - if total_cores is None: - console.warn( - "Could not guess number of physical cores. " - "Assuming there is only 1 core per node." - ) - total_cores = 1 - return total_cores +def construct_generate_data( + name, + job_name, + modules, + host, + template, + cpu, + gpu, + time, + min_nodes, + max_nodes, + processor, + number_of_ranks, + enable_hyperthreading, + multidir, +): + data = [] + for module in modules: + # Here we detect the MD engine (supported: GROMACS and NAMD). + engine = mdengines.detect_md_engine(module) + + # Iterate over CPUs or GPUs + gpu_cpu = {"cpu": cpu, "gpu": gpu} + for key, value in sorted(gpu_cpu.items()): + # Skip the current processing unit + if not value: + continue + + # Generate directory name and string representation for the user. + # Also set the `gpu` variable for later use. + gpu = True if key == "gpu" else False + directory = construct_directory_name(template.name, module, gpu) + + # Set up the path to the new directory as `datreant.Tree` + base_directory = dtr.Tree(directory) + + # Do the main iteration over nodes, ranks and number of simulations + for nodes in range(min_nodes, max_nodes + 1): + for _ranks in number_of_ranks: + ranks, threads = processor.get_ranks_and_threads( + _ranks, with_hyperthreading=enable_hyperthreading + ) + for nsim in multidir: + + # Append the data to our list + data.append( + [ + name, + job_name, + base_directory, + host, + engine, + module, + nodes, + time, + gpu, + template, + ranks, + threads, + enable_hyperthreading, + nsim, + ] + ) + + return data def generate_output_name(extension): @@ -131,92 +171,95 @@ def generate_output_name(extension): return out -def DataFrameFromBundle(bundle): - """Generates a DataFrame from a datreant bundle.""" - df = pd.DataFrame( - columns=["module", "nodes", "ns/day", "run time [min]", "gpu", "host", "ncores"] - ) +def parse_bundle(bundle, columns, sort_values_by, discard_performance=False): + """Generates a DataFrame from a datreant.Bundle.""" + data = [] - for i, sim in enumerate(bundle): - # older versions wrote a version category. This ensures backwards compatibility - if "module" in sim.categories: - module = sim.categories["module"] - else: - module = sim.categories["version"] - # call the engine specific analysis functions - engine = detect_md_engine(module) - df.loc[i] = utils.analyze_run(engine=engine, sim=sim) + with click.progressbar( + bundle, length=len(bundle), label="Analyzing benchmarks", show_pos=True + ) as bar: + for treant in bar: + module = treant.categories["module"] + engine = detect_md_engine(module) + row = utils.analyze_benchmark(engine=engine, benchmark=treant) - if df.empty: - console.error("There is no data for the given path.") + version = 2 + if "version" in treant.categories: + version = 3 + if version == 2: + row.pop() # multidir is not a category for version 2 data + row += [version] - # Sort values by `nodes` - df = df.sort_values( - ["host", "module", "run time [min]", "gpu", "nodes"] - ).reset_index(drop=True) + if discard_performance: + row = row[:2] + row[3:] - return df + data.append(row) + df = pd.DataFrame(data, columns=columns) -def ConsolidateDataFrame(df): - """Edits a DataFrame and consolidates the output. - Requires the previously generated DataFrame. - Returns a newly formatted DataFrame. - """ - df_short = pd.DataFrame( - columns=["module", "nodes", "run time [min]", "host", "gpu"] - ) + # Exit if no data is available + if df.empty: + console.error("There is no data for the given path.") - groupby = ["module", "host", "gpu"] - gb = df.groupby(groupby) + # Sort values by `nodes` + df = df.sort_values(sort_values_by).reset_index(drop=True) - i = 0 - for key, df in gb: + return df - node_print_output = [] - node_groups = group_consecutives(df["nodes"].tolist()) - for node_g in node_groups: - if len(node_g) == 1: - node_print_output.append(node_g[0]) - else: - node_print_output.append(str(node_g[0]) + "-" + str(node_g[-1])) +def map_columns(map_dict, columns): + return [map_dict[key] for key in columns] - values = ", ".join(str(v) for v in node_print_output) - df_short.loc[i] = (key[0], values, df["run time [min]"].iloc[0], key[1], key[2]) - i += 1 - return df_short +def consolidate_dataframe(df, columns): + """Return a shortened version of a DataFrame, grouping the nodes.""" + new_columns = df.columns + agg = {column: "first" for column in new_columns if column not in columns} + agg["nodes"] = format_interval_groups + new_df = df.groupby(columns, as_index=False).agg(agg) + return new_df[new_columns] -def PrintDataFrame(df, printdf=True): +def print_dataframe(df, columns): """Print a nicely formatted shortened DataFrame.""" - tab = tabulate(df, headers="keys", tablefmt="psql", showindex=False) - if printdf is True: - print(tab) - else: - return tab + table = df.copy() + table.columns = columns + table = tabulate(table, headers="keys", tablefmt="psql", showindex=False) + console.info(table, newlines=True) -def group_consecutives(vals, step=1): +def group_consecutives(values, step=1): """Return list of consecutive lists of numbers from vals (number list). - This list hast to be at least ordered such that N+1 > N. - Adapted from code found on stack overflow. - Question Thread: - https://stackoverflow.com/questions/7352684/ - Solved by: - https://stackoverflow.com/users/308066/dkamins + This list hast to be at least ordered such that N+1 > N. + Adapted from code found on stack overflow. + Question Thread: + https://stackoverflow.com/questions/7352684/ + Solved by: + https://stackoverflow.com/users/308066/dkamins """ run = [] result = [run] - expect = None - for v in vals: - if (v == expect) or (expect is None): - run.append(v) + expected = None + for value in values: + if (value == expected) or (expected is None): + run.append(value) else: - run = [v] + run = [value] result.append(run) - expect = v + step + expected = value + step return result + + +def format_interval_groups(nodes): + output = [] + groups = group_consecutives(nodes) + + for group in groups: + if len(group) == 1: + output.append(group[0]) + else: + output.append(str(group[0]) + "-" + str(group[-1])) + + return ", ".join(str(node) for node in output) diff --git a/mdbenchmark/version.py b/mdbenchmark/version.py deleted file mode 100644 index b46c2e74..00000000 --- a/mdbenchmark/version.py +++ /dev/null @@ -1 +0,0 @@ -VERSION = "2.0.1" diff --git a/mdbenchmark/versions.py b/mdbenchmark/versions.py new file mode 100644 index 00000000..10ed40cd --- /dev/null +++ b/mdbenchmark/versions.py @@ -0,0 +1,267 @@ +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 +# +# MDBenchmark +# Copyright (c) 2017-2020 The MDBenchmark development team and contributors +# (see the file AUTHORS for the full list of names) +# +# MDBenchmark is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# MDBenchmark is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with MDBenchmark. If not, see +from mdbenchmark import console + + +class VersionCategories: + version = NotImplemented + consolidate_categories = NotImplemented + generate_categories = NotImplemented + generate_mapping = NotImplemented + generate_printing = NotImplemented + analyze_categories = NotImplemented + analyze_printing = NotImplemented + analyze_sort = NotImplemented + category_mapping = NotImplemented + + def __getattr__(self, attr): + return self.__getattribute__(attr) + + +class Version2Categories(VersionCategories): + version = "2" + consolidate_categories = [ + "module", + "host", + "gpu", + ] + analyze_categories = [ + "module", + "nodes", + "ns/day", + "time", + "gpu", + "host", + "ncores", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + "version", + ] + analyze_printing = [ + "module", + "nodes", + "ns/day", + "time", + "gpu", + "host", + "ncores", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + ] + analyze_sort = ["module", "gpu", "nodes"] + generate_printing = [ + "name", + "module", + "nodes", + "time", + "gpu", + "host", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + ] + submit_categories = [ + "module", + "nodes", + "time", + "gpu", + "host", + "ncores", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + "version", + ] + category_mapping = { + "name": "Name", + "engine": "Engine", + "module": "Module", + "nodes": "Nodes", + "ns/day": "Performances (ns/day)", + "time": "Time (min)", + "gpu": "GPUs?", + "host": "Host", + "ncores": "# cores", + "number_of_ranks": "# ranks", + "number_of_threads": "# threads", + "hyperthreading": "Hyperthreading?", + "job_name": "Job name", + "submitted": "Submitted?", + } + + +class Version3Categories(VersionCategories): + version = "3" + consolidate_categories = [ + "module", + "host", + "use_gpu", + "number_of_ranks", + "hyperthreading", + "multidir", + ] + generate_categories = [ + "name", + "job_name", + "base_directory", + "host", + "engine", + "module", + "nodes", + "time", + "use_gpu", + "template", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + "multidir", + ] + generate_mapping = { + "engine": "engine", + "base_directory": "base_directory", + "template": "template", + "nodes": "nodes", + "use_gpu": "gpu", + "module": "module", + "job_name": "job_name", + "host": "host", + "time": "time", + "number_of_ranks": "number_of_ranks", + "number_of_threads": "number_of_threads", + "hyperthreading": "hyperthreading", + "multidir": "multidir", + } + generate_printing = [ + "name", + "module", + "nodes", + "time", + "use_gpu", + "host", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + "multidir", + ] + analyze_categories = [ + "module", + "nodes", + "performance", + "time", + "use_gpu", + "host", + "ncores", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + "multidir", + "version", + ] + analyze_printing = [ + "module", + "nodes", + "performance", + "time", + "use_gpu", + "host", + "ncores", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + "multidir", + ] + analyze_sort = ["module", "number_of_ranks", "hyperthreading", "use_gpu", "nodes"] + submit_categories = [ + "module", + "nodes", + "time", + "use_gpu", + "host", + "ncores", + "number_of_ranks", + "number_of_threads", + "hyperthreading", + "multidir", + "version", + ] + category_mapping = { + "name": "Name", + "engine": "Engine", + "module": "Module", + "nodes": "Nodes", + "performance": "Performances (ns/day)", + "time": "Time (min)", + "use_gpu": "GPUs?", + "host": "Host", + "ncores": "# cores", + "number_of_ranks": "# ranks", + "number_of_threads": "# threads", + "hyperthreading": "Hyperthreading?", + "job_name": "Job name", + "submitted": "Submitted?", + "multidir": "# Simulations", + } + + +VERSIONS = [Version2Categories(), Version3Categories()] + + +class VersionFactory: + """Factory class that provides access to categories needed at different lifecycles. + + It tries to determine the version given some categories or uses an initial version value.""" + + def __init__(self, categories=None, version=None): + if categories is None and version is None: + raise ValueError("Either `categories` or `version` must be set.") + + if categories is not None: + self._guess_version(categories) + + if version is not None: + self.version = version + + def _guess_version(self, categories): + console.info("Setting up...") + try: + if "module" in categories and "version" in categories: + # Versions >=3 have both a "module" and "version" key + self.version = "3" + elif "module" in categories: + # Version 2 uses "module", but has no "version" key + self.version = "2" + else: + # We found a version that is not enumerated above + self.version = "next" + except TypeError: + # If we point datreant to an empty or non-existent directory, it + # will throw an error. Catch it and set some default version. + self.version = "3" + + @property + def version_class(self): + matches = [cls for cls in VERSIONS if cls.version == self.version] + + if len(matches) == 0: + return None + + return matches[0] diff --git a/poetry.lock b/poetry.lock index d9f2a247..ea93a8c9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -21,7 +21,7 @@ marker = "python_version >= \"3.6\" and python_version < \"4.0\"" name = "appdirs" optional = false python-versions = "*" -version = "1.4.3" +version = "1.4.4" [[package]] category = "dev" @@ -48,43 +48,18 @@ optional = false python-versions = "*" version = "0.3.3" -[[package]] -category = "dev" -description = "A abstract syntax tree for Python with inference support." -name = "astroid" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "1.6.6" - -[package.dependencies] -lazy-object-proxy = "*" -six = "*" -wrapt = "*" - -[package.dependencies."backports.functools-lru-cache"] -python = "<3.4" -version = "*" - -[package.dependencies.enum34] -python = "<3.4" -version = ">=1.1.3" - -[package.dependencies.singledispatch] -python = "<3.4" -version = "*" - [[package]] category = "dev" description = "An abstract syntax tree for Python with inference support." name = "astroid" optional = false -python-versions = ">=3.5.*" -version = "2.3.3" +python-versions = ">=3.5" +version = "2.4.2" [package.dependencies] lazy-object-proxy = ">=1.4.0,<1.5.0" six = ">=1.12,<2.0" -wrapt = ">=1.11.0,<1.12.0" +wrapt = ">=1.11,<2.0" [package.dependencies.typed-ast] python = "<3.8" @@ -93,10 +68,11 @@ version = ">=1.4.0,<1.5" [[package]] category = "dev" description = "Atomic file writes." +marker = "sys_platform == \"win32\"" name = "atomicwrites" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "1.3.0" +version = "1.4.0" [[package]] category = "dev" @@ -129,29 +105,7 @@ description = "Specifications for callback functions passed in to an API" name = "backcall" optional = false python-versions = "*" -version = "0.1.0" - -[[package]] -category = "dev" -description = "Backport of functools.lru_cache" -marker = "python_version < \"3.4\"" -name = "backports.functools-lru-cache" -optional = false -python-versions = ">=2.6" -version = "1.6.1" - -[package.extras] -docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=3.5,<3.7.3 || >3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-black-multipy", "pytest-cov"] - -[[package]] -category = "dev" -description = "A backport of the get_terminal_size function from Python 3.3's shutil." -marker = "python_version == \"2.7\"" -name = "backports.shutil-get-terminal-size" -optional = false -python-versions = "*" -version = "1.0.0" +version = "0.2.0" [[package]] category = "dev" @@ -180,7 +134,7 @@ description = "Python package for providing Mozilla's CA Bundle." name = "certifi" optional = true python-versions = "*" -version = "2019.11.28" +version = "2020.4.5.2" [[package]] category = "main" @@ -195,47 +149,25 @@ category = "main" description = "Composable command line interface toolkit" name = "click" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "7.0" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "7.1.2" [[package]] category = "main" description = "Cross-platform colored terminal text." -marker = "sys_platform == \"win32\" and python_version != \"3.4\" or sys_platform == \"win32\"" +marker = "sys_platform == \"win32\"" name = "colorama" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" version = "0.4.3" -[[package]] -category = "dev" -description = "Updated configparser from Python 3.7 for Python 2.6+." -marker = "python_version < \"3.2\"" -name = "configparser" -optional = false -python-versions = ">=2.6" -version = "4.0.2" - -[package.extras] -docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=3.5,<3.7.3 || >3.7.3)", "pytest-checkdocs (>=1.2)", "pytest-flake8", "pytest-black-multipy"] - -[[package]] -category = "dev" -description = "Backports and enhancements for the contextlib module" -marker = "python_version < \"3.4\"" -name = "contextlib2" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "0.6.0.post1" - [[package]] category = "dev" description = "Code coverage measurement for Python" name = "coverage" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" -version = "5.0.3" +version = "5.1" [package.extras] toml = ["toml"] @@ -284,28 +216,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" version = "0.16" -[[package]] -category = "dev" -description = "Discover and load entry points from installed packages." -name = "entrypoints" -optional = false -python-versions = ">=2.7" -version = "0.3" - -[package.dependencies] -[package.dependencies.configparser] -python = ">=2.7,<2.8" -version = ">=3.5" - -[[package]] -category = "dev" -description = "Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4" -marker = "python_version < \"3.4\"" -name = "enum34" -optional = false -python-versions = "*" -version = "1.1.9" - [[package]] category = "dev" description = "execnet: rapid multi-Python deployment" @@ -322,61 +232,21 @@ testing = ["pre-commit"] [[package]] category = "dev" -description = "the modular source code checker: pep8, pyflakes and co" +description = "the modular source code checker: pep8 pyflakes and co" name = "flake8" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "3.7.9" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +version = "3.8.3" [package.dependencies] -entrypoints = ">=0.3.0,<0.4.0" mccabe = ">=0.6.0,<0.7.0" -pycodestyle = ">=2.5.0,<2.6.0" -pyflakes = ">=2.1.0,<2.2.0" +pycodestyle = ">=2.6.0a1,<2.7.0" +pyflakes = ">=2.2.0,<2.3.0" -[package.dependencies.configparser] -python = "<3.2" -version = "*" - -[package.dependencies.enum34] -python = "<3.4" -version = "*" - -[package.dependencies.functools32] -python = "<3.2" -version = "*" - -[package.dependencies.typing] -python = "<3.5" +[package.dependencies.importlib-metadata] +python = "<3.8" version = "*" -[[package]] -category = "dev" -description = "Python function signatures from PEP362 for Python 2.6, 2.7 and 3.2+" -marker = "python_version < \"3.0\"" -name = "funcsigs" -optional = false -python-versions = "*" -version = "1.0.2" - -[[package]] -category = "dev" -description = "Backport of the functools module from Python 3.2.3 for use on 2.7 and PyPy." -marker = "python_version < \"3.2\"" -name = "functools32" -optional = false -python-versions = "*" -version = "3.2.3-2" - -[[package]] -category = "dev" -description = "Backport of the concurrent.futures package from Python 3" -marker = "python_version < \"3.2\"" -name = "futures" -optional = false -python-versions = ">=2.6, <3" -version = "3.3.0" - [[package]] category = "main" description = "Fuzzy string matching in python" @@ -411,26 +281,14 @@ marker = "python_version < \"3.8\"" name = "importlib-metadata" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" -version = "1.5.0" +version = "1.6.1" [package.dependencies] zipp = ">=0.5" -[package.dependencies.configparser] -python = "<3" -version = ">=3.5" - -[package.dependencies.contextlib2] -python = "<3" -version = "*" - -[package.dependencies.pathlib2] -python = "<3" -version = "*" - [package.extras] docs = ["sphinx", "rst.linker"] -testing = ["packaging", "importlib-resources"] +testing = ["packaging", "pep517", "importlib-resources (>=1.3)"] [[package]] category = "dev" @@ -448,51 +306,8 @@ category = "dev" description = "IPython: Productive Interactive Computing" name = "ipython" optional = false -python-versions = "*" -version = "5.9.0" - -[package.dependencies] -appnope = "*" -colorama = "*" -decorator = "*" -pexpect = "*" -pickleshare = "*" -prompt-toolkit = ">=1.0.4,<2.0.0" -pygments = "*" -setuptools = ">=18.5" -simplegeneric = ">0.8" -traitlets = ">=4.2" - -[package.dependencies."backports.shutil-get-terminal-size"] -python = ">=2.7,<2.8" -version = "*" - -[package.dependencies.pathlib2] -python = ">=2.7,<2.8 || >=3.3,<3.4" -version = "*" - -[package.dependencies.win-unicode-console] -python = "<3.6" -version = ">=0.5" - -[package.extras] -all = ["nbformat", "ipykernel", "pygments", "testpath", "notebook", "nbconvert", "ipyparallel", "qtconsole", "Sphinx (>=1.3)", "requests", "nose (>=0.10.1)", "ipywidgets"] -doc = ["Sphinx (>=1.3)"] -kernel = ["ipykernel"] -nbconvert = ["nbconvert"] -nbformat = ["nbformat"] -notebook = ["notebook", "ipywidgets"] -parallel = ["ipyparallel"] -qtconsole = ["qtconsole"] -test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "mock", "numpy"] - -[[package]] -category = "dev" -description = "IPython: Productive Interactive Computing" -name = "ipython" -optional = false -python-versions = ">=3.5" -version = "7.9.0" +python-versions = ">=3.6" +version = "7.15.0" [package.dependencies] appnope = "*" @@ -502,17 +317,13 @@ decorator = "*" jedi = ">=0.10" pexpect = "*" pickleshare = "*" -prompt-toolkit = ">=2.0.0,<2.1.0" +prompt-toolkit = ">=2.0.0,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.1.0" pygments = "*" setuptools = ">=18.5" traitlets = ">=4.2" -[package.dependencies.win-unicode-console] -python = "<3.6" -version = ">=0.5" - [package.extras] -all = ["testpath", "nbformat", "numpy", "notebook", "ipyparallel", "nbconvert", "ipywidgets", "pygments", "requests", "nose (>=0.10.1)", "qtconsole", "ipykernel", "Sphinx (>=1.3)"] +all = ["Sphinx (>=1.3)", "ipykernel", "ipyparallel", "ipywidgets", "nbconvert", "nbformat", "nose (>=0.10.1)", "notebook", "numpy (>=1.14)", "pygments", "qtconsole", "requests", "testpath"] doc = ["Sphinx (>=1.3)"] kernel = ["ipykernel"] nbconvert = ["nbconvert"] @@ -520,7 +331,7 @@ nbformat = ["nbformat"] notebook = ["notebook", "ipywidgets"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] -test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "numpy"] +test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "numpy (>=1.14)"] [[package]] category = "dev" @@ -538,15 +349,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" version = "4.3.21" -[package.dependencies] -[package.dependencies."backports.functools-lru-cache"] -python = "<3.2" -version = "*" - -[package.dependencies.futures] -python = "<3.2" -version = "*" - [package.extras] pipfile = ["pipreqs", "requirementslib"] pyproject = ["toml"] @@ -558,15 +360,15 @@ category = "dev" description = "An autocompletion tool for Python that can be used for text editors." name = "jedi" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "0.16.0" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "0.17.1" [package.dependencies] -parso = ">=0.5.2" +parso = ">=0.7.0,<0.8.0" [package.extras] qa = ["flake8 (3.7.9)"] -testing = ["colorama (0.4.1)", "docopt", "pytest (>=3.9.0,<5.0.0)"] +testing = ["Django (<3.1)", "colorama", "docopt", "pytest (>=3.9.0,<5.0.0)"] [[package]] category = "main" @@ -574,7 +376,7 @@ description = "A very fast and expressive template engine." name = "jinja2" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "2.11.1" +version = "2.11.2" [package.dependencies] MarkupSafe = ">=0.23" @@ -587,11 +389,8 @@ category = "main" description = "A fast implementation of the Cassowary constraint solver" name = "kiwisolver" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "1.1.0" - -[package.dependencies] -setuptools = "*" +python-versions = ">=3.6" +version = "1.2.0" [[package]] category = "dev" @@ -607,11 +406,14 @@ description = "Python LiveReload is an awesome tool for web developers" name = "livereload" optional = true python-versions = "*" -version = "2.6.1" +version = "2.6.2" [package.dependencies] six = "*" -tornado = "*" + +[package.dependencies.tornado] +python = ">=2.8" +version = "*" [[package]] category = "main" @@ -626,30 +428,13 @@ category = "main" description = "Python plotting package" name = "matplotlib" optional = false -python-versions = "*" -version = "2.2.5" - -[package.dependencies] -cycler = ">=0.10" -kiwisolver = ">=1.0.1" -numpy = ">=1.7.1" -pyparsing = ">=2.0.1,<2.0.4 || >2.0.4,<2.1.2 || >2.1.2,<2.1.6 || >2.1.6" -python-dateutil = ">=2.1" -pytz = "*" -six = ">=1.10" - -[[package]] -category = "main" -description = "Python plotting package" -name = "matplotlib" -optional = false -python-versions = ">=3.5" -version = "3.0.3" +python-versions = ">=3.6" +version = "3.2.2" [package.dependencies] cycler = ">=0.10" kiwisolver = ">=1.0.1" -numpy = ">=1.10.0" +numpy = ">=1.11" pyparsing = ">=2.0.1,<2.0.4 || >2.0.4,<2.1.2 || >2.1.2,<2.1.6 || >2.1.6" python-dateutil = ">=2.1" @@ -661,33 +446,13 @@ optional = false python-versions = "*" version = "0.6.1" -[[package]] -category = "dev" -description = "More routines for operating on iterables, beyond itertools" -marker = "python_version <= \"2.7\"" -name = "more-itertools" -optional = false -python-versions = "*" -version = "5.0.0" - -[package.dependencies] -six = ">=1.0.0,<2.0.0" - [[package]] category = "dev" description = "More routines for operating on iterables, beyond itertools" name = "more-itertools" optional = false python-versions = ">=3.5" -version = "8.2.0" - -[[package]] -category = "main" -description = "NumPy is the fundamental package for array computing with Python." -name = "numpy" -optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" -version = "1.16.6" +version = "8.4.0" [[package]] category = "main" @@ -695,7 +460,7 @@ description = "NumPy is the fundamental package for array computing with Python. name = "numpy" optional = false python-versions = ">=3.5" -version = "1.18.1" +version = "1.18.5" [[package]] category = "main" @@ -703,7 +468,7 @@ description = "Core utilities for Python packages" name = "packaging" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "20.1" +version = "20.4" [package.dependencies] pyparsing = ">=2.0.2" @@ -714,13 +479,16 @@ category = "main" description = "Powerful data structures for data analysis, time series, and statistics" name = "pandas" optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" -version = "0.24.2" +python-versions = ">=3.5.3" +version = "0.25.3" [package.dependencies] -numpy = ">=1.12.0" -python-dateutil = ">=2.5.0" -pytz = ">=2011k" +numpy = ">=1.13.3" +python-dateutil = ">=2.6.1" +pytz = ">=2017.2" + +[package.extras] +test = ["pytest (>=4.0.2)", "pytest-xdist", "hypothesis (>=3.58)"] [[package]] category = "dev" @@ -728,7 +496,7 @@ description = "A Python Parser" name = "parso" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "0.6.2" +version = "0.7.0" [package.extras] testing = ["docopt", "pytest (>=3.0.7)"] @@ -744,10 +512,6 @@ version = "2.3.5" [package.dependencies] six = "*" -[package.dependencies.scandir] -python = "<3.5" -version = "*" - [[package]] category = "dev" description = "Utility library for gitignore style pattern matching of file paths." @@ -755,7 +519,7 @@ marker = "python_version >= \"3.6\" and python_version < \"4.0\"" name = "pathspec" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "0.7.0" +version = "0.8.0" [[package]] category = "main" @@ -771,7 +535,7 @@ description = "Python Build Reasonableness" name = "pbr" optional = true python-versions = "*" -version = "5.4.4" +version = "5.4.5" [[package]] category = "dev" @@ -801,11 +565,6 @@ optional = false python-versions = "*" version = "0.7.5" -[package.dependencies] -[package.dependencies.pathlib2] -python = ">=2.6.0,<2.8.0 || >=3.2.0,<3.4.0" -version = "*" - [[package]] category = "dev" description = "plugin and hook calling mechanisms for python" @@ -835,29 +594,27 @@ category = "dev" description = "Library for building powerful interactive command lines in Python" name = "prompt-toolkit" optional = false -python-versions = "*" -version = "1.0.18" +python-versions = ">=3.6" +version = "3.0.3" [package.dependencies] -six = ">=1.9.0" wcwidth = "*" [[package]] -category = "dev" -description = "Library for building powerful interactive command lines in Python" -name = "prompt-toolkit" +category = "main" +description = "Cross-platform lib for process and system monitoring in Python." +name = "psutil" optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -version = "2.0.10" +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "5.7.0" -[package.dependencies] -six = ">=1.9.0" -wcwidth = "*" +[package.extras] +enum = ["enum34"] [[package]] category = "dev" description = "Run a subprocess in a pseudo terminal" -marker = "sys_platform != \"win32\"" +marker = "python_version >= \"3.4\" and sys_platform != \"win32\" or sys_platform != \"win32\"" name = "ptyprocess" optional = false python-versions = "*" @@ -869,7 +626,7 @@ description = "library with cross-python path, ini-parsing, io, code, log facili name = "py" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "1.8.1" +version = "1.8.2" [[package]] category = "dev" @@ -877,7 +634,7 @@ description = "Python style guide checker" name = "pycodestyle" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "2.5.0" +version = "2.6.0" [[package]] category = "dev" @@ -885,42 +642,15 @@ description = "passive checker of Python programs" name = "pyflakes" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "2.1.1" +version = "2.2.0" [[package]] category = "main" description = "Pygments is a syntax highlighting package written in Python." name = "pygments" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "2.5.2" - -[[package]] -category = "dev" -description = "python code static checker" -name = "pylint" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <3.7" -version = "1.9.5" - -[package.dependencies] -astroid = ">=1.6,<2.0" -colorama = "*" -isort = ">=4.2.5" -mccabe = "*" -six = "*" - -[package.dependencies."backports.functools-lru-cache"] -python = ">=2.7,<2.8" -version = "*" - -[package.dependencies.configparser] -python = ">=2.7,<2.8" -version = "*" - -[package.dependencies.singledispatch] -python = "<3.4" -version = "*" +python-versions = ">=3.5" +version = "2.6.1" [[package]] category = "dev" @@ -928,13 +658,14 @@ description = "python code static checker" name = "pylint" optional = false python-versions = ">=3.5.*" -version = "2.4.4" +version = "2.5.3" [package.dependencies] -astroid = ">=2.3.0,<2.4" +astroid = ">=2.4.0,<=2.5" colorama = "*" isort = ">=4.2.5,<5" mccabe = ">=0.6,<0.7" +toml = ">=0.7.1" [[package]] category = "main" @@ -942,47 +673,7 @@ description = "Python parsing module" name = "pyparsing" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -version = "2.4.6" - -[[package]] -category = "dev" -description = "pytest: simple powerful testing with Python" -name = "pytest" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" -version = "4.6.9" - -[package.dependencies] -atomicwrites = ">=1.0" -attrs = ">=17.4.0" -packaging = "*" -pluggy = ">=0.12,<1.0" -py = ">=1.5.0" -six = ">=1.10.0" -wcwidth = "*" - -[package.dependencies.colorama] -python = "<3.4.0 || >=3.5.0" -version = "*" - -[package.dependencies.funcsigs] -python = "<3.0" -version = ">=1.0" - -[package.dependencies.importlib-metadata] -python = "<3.8" -version = ">=0.12" - -[package.dependencies.more-itertools] -python = "<2.8" -version = ">=4.0.0,<6.0.0" - -[package.dependencies.pathlib2] -python = "<3.6" -version = ">=2.2.0" - -[package.extras] -testing = ["argcomplete", "hypothesis (>=3.56)", "nose", "requests", "mock"] +version = "2.4.7" [[package]] category = "dev" @@ -990,7 +681,7 @@ description = "pytest: simple powerful testing with Python" name = "pytest" optional = false python-versions = ">=3.5" -version = "5.3.5" +version = "5.4.3" [package.dependencies] atomicwrites = ">=1.0" @@ -1006,10 +697,6 @@ wcwidth = "*" python = "<3.8" version = ">=0.12" -[package.dependencies.pathlib2] -python = "<3.6" -version = ">=2.2.0" - [package.extras] checkqa-mypy = ["mypy (v0.761)"] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] @@ -1031,15 +718,15 @@ category = "dev" description = "Pytest plugin for measuring coverage." name = "pytest-cov" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "2.8.1" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "2.10.0" [package.dependencies] coverage = ">=4.4" -pytest = ">=3.6" +pytest = ">=4.6" [package.extras] -testing = ["fields", "hunter", "process-tests (2.0.2)", "six", "virtualenv"] +testing = ["fields", "hunter", "process-tests (2.0.2)", "six", "pytest-xdist", "virtualenv"] [[package]] category = "dev" @@ -1060,7 +747,7 @@ description = "pytest-sugar is a plugin for pytest that changes the default look name = "pytest-sugar" optional = false python-versions = "*" -version = "0.9.2" +version = "0.9.3" [package.dependencies] packaging = ">=14.1" @@ -1095,7 +782,7 @@ description = "World timezone definitions, modern and historical" name = "pytz" optional = false python-versions = "*" -version = "2019.3" +version = "2020.1" [[package]] category = "main" @@ -1103,7 +790,7 @@ description = "YAML parser and emitter for Python" name = "pyyaml" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "5.3" +version = "5.3.1" [[package]] category = "dev" @@ -1112,7 +799,7 @@ marker = "python_version >= \"3.6\" and python_version < \"4.0\"" name = "regex" optional = false python-versions = "*" -version = "2020.2.20" +version = "2020.6.8" [[package]] category = "main" @@ -1120,7 +807,7 @@ description = "Python HTTP for Humans." name = "requests" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "2.23.0" +version = "2.24.0" [package.dependencies] certifi = ">=2017.4.17" @@ -1138,7 +825,7 @@ description = "reStructuredText linter" name = "restructuredtext-lint" optional = false python-versions = "*" -version = "1.3.0" +version = "1.3.1" [package.dependencies] docutils = ">=0.11,<1.0" @@ -1151,33 +838,13 @@ optional = false python-versions = "*" version = "1.10.0" -[[package]] -category = "dev" -description = "Simple generic functions (similar to Python's own len(), pickle.dump(), etc.)" -name = "simplegeneric" -optional = false -python-versions = "*" -version = "0.8.1" - -[[package]] -category = "dev" -description = "This library brings functools.singledispatch from Python 3.4 to Python 2.6-3.3." -marker = "python_version < \"3.4\"" -name = "singledispatch" -optional = false -python-versions = "*" -version = "3.4.0.3" - -[package.dependencies] -six = "*" - [[package]] category = "main" description = "Python 2 and 3 compatibility utilities" name = "six" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -version = "1.14.0" +version = "1.15.0" [[package]] category = "main" @@ -1210,10 +877,6 @@ six = ">=1.5" snowballstemmer = ">=1.1" sphinxcontrib-websupport = "*" -[package.dependencies.typing] -python = "<3.5" -version = "*" - [package.extras] test = ["mock", "pytest", "pytest-cov", "html5lib", "flake8 (>=3.5.0)", "flake8-import-order", "enum34", "mypy", "typed-ast"] websupport = ["sqlalchemy (>=0.9)", "whoosh (>=2.0)"] @@ -1241,22 +904,11 @@ description = "Sphinx extension that automatically documents click applications" name = "sphinx-click" optional = true python-versions = "*" -version = "2.3.1" +version = "2.3.2" [package.dependencies] pbr = ">=2.0" -sphinx = ">=1.5,<3.0" - -[[package]] -category = "main" -description = "Sphinx API for Web Apps" -name = "sphinxcontrib-websupport" -optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "1.1.2" - -[package.extras] -test = ["pytest", "mock"] +sphinx = ">=1.5,<4.0" [[package]] category = "main" @@ -1264,7 +916,7 @@ description = "Sphinx API for Web Apps" name = "sphinxcontrib-websupport" optional = true python-versions = ">=3.5" -version = "1.2.0" +version = "1.2.2" [package.extras] lint = ["flake8"] @@ -1276,7 +928,7 @@ description = "Pretty-print tabular data" name = "tabulate" optional = false python-versions = "*" -version = "0.8.6" +version = "0.8.7" [package.extras] widechars = ["wcwidth"] @@ -1295,15 +947,7 @@ description = "Python Library for Tom's Obvious, Minimal Language" name = "toml" optional = false python-versions = "*" -version = "0.10.0" - -[[package]] -category = "main" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -name = "tornado" -optional = true -python-versions = ">= 2.7, !=3.0.*, !=3.1.*, !=3.2.*, != 3.3.*" -version = "5.1.1" +version = "0.10.1" [[package]] category = "main" @@ -1311,7 +955,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib name = "tornado" optional = true python-versions = ">= 3.5" -version = "6.0.3" +version = "6.0.4" [[package]] category = "dev" @@ -1340,10 +984,6 @@ decorator = "*" ipython-genutils = "*" six = "*" -[package.dependencies.enum34] -python = ">=2.7,<2.8" -version = "*" - [package.extras] test = ["pytest", "mock"] @@ -1356,26 +996,17 @@ optional = false python-versions = "*" version = "1.4.1" -[[package]] -category = "main" -description = "Type Hints for Python" -marker = "python_version < \"3.5\"" -name = "typing" -optional = false -python-versions = "*" -version = "3.7.4.1" - [[package]] category = "main" description = "HTTP library with thread-safe connection pooling, file post, and more." name = "urllib3" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" -version = "1.25.8" +version = "1.25.9" [package.extras] brotli = ["brotlipy (>=0.6.0)"] -secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "pyOpenSSL (>=0.14)", "ipaddress"] socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7,<2.0)"] [[package]] @@ -1394,28 +1025,11 @@ watchmedo = ["PyYAML (>=3.10)", "argh (>=0.24.1)"] [[package]] category = "dev" -description = "Measures number of Terminal column cells of wide-character codes" +description = "Measures the displayed width of unicode strings in a terminal" name = "wcwidth" optional = false python-versions = "*" -version = "0.1.8" - -[[package]] -category = "dev" -description = "Enable Unicode input and display when running Python from Windows console." -marker = "sys_platform == \"win32\" and python_version < \"3.6\"" -name = "win-unicode-console" -optional = false -python-versions = "*" -version = "0.5" - -[[package]] -category = "dev" -description = "Module for decorators, wrappers and monkey patching." -name = "wrapt" -optional = false -python-versions = "*" -version = "1.11.2" +version = "0.2.4" [[package]] category = "dev" @@ -1423,7 +1037,7 @@ description = "Module for decorators, wrappers and monkey patching." name = "wrapt" optional = false python-versions = "*" -version = "1.12.0" +version = "1.12.1" [[package]] category = "main" @@ -1439,24 +1053,19 @@ description = "Backport of pathlib-compatible object wrapper for zip files" marker = "python_version < \"3.8\"" name = "zipp" optional = false -python-versions = ">=2.7" -version = "1.2.0" - -[package.dependencies] -[package.dependencies.contextlib2] -python = "<3.4" -version = "*" +python-versions = ">=3.6" +version = "3.1.0" [package.extras] docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] -testing = ["pathlib2", "unittest2", "jaraco.itertools", "func-timeout"] +testing = ["jaraco.itertools", "func-timeout"] [extras] docs = ["Sphinx", "sphinx-autobuild", "sphinx-click"] [metadata] -content-hash = "ab3646f46870d9095a1bb5dd2bb45360259df4b737f402f32ccbbc61dd18c427" -python-versions = "~2.7 || ^3.5" +content-hash = "6bccfcd1870e8ce9c79d2bbe1c736e6501bd1bd1471ca518304b3f0c7db3f9c8" +python-versions = "^3.6" [metadata.files] alabaster = [ @@ -1468,8 +1077,8 @@ apipkg = [ {file = "apipkg-1.5.tar.gz", hash = "sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6"}, ] appdirs = [ - {file = "appdirs-1.4.3-py2.py3-none-any.whl", hash = "sha256:d8b24664561d0d34ddfaec54636d502d7cea6e29c3eaf68f3df6180863e2166e"}, - {file = "appdirs-1.4.3.tar.gz", hash = "sha256:9e5896d1372858f8dd3344faf4e5014d21849c756c8d5701f78f8a103b372d92"}, + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, ] appnope = [ {file = "appnope-0.1.0-py2.py3-none-any.whl", hash = "sha256:5b26757dc6f79a3b7dc9fab95359328d5747fcb2409d331ea66d0272b90ab2a0"}, @@ -1483,14 +1092,12 @@ asciitree = [ {file = "asciitree-0.3.3.tar.gz", hash = "sha256:4aa4b9b649f85e3fcb343363d97564aa1fb62e249677f2e18a96765145cc0f6e"}, ] astroid = [ - {file = "astroid-1.6.6-py2.py3-none-any.whl", hash = "sha256:87de48a92e29cedf7210ffa853d11441e7ad94cb47bacd91b023499b51cbc756"}, - {file = "astroid-1.6.6.tar.gz", hash = "sha256:d25869fc7f44f1d9fb7d24fd7ea0639656f5355fc3089cd1f3d18c6ec6b124c7"}, - {file = "astroid-2.3.3-py3-none-any.whl", hash = "sha256:840947ebfa8b58f318d42301cf8c0a20fd794a33b61cc4638e28e9e61ba32f42"}, - {file = "astroid-2.3.3.tar.gz", hash = "sha256:71ea07f44df9568a75d0f354c49143a4575d90645e9fead6dfb52c26a85ed13a"}, + {file = "astroid-2.4.2-py3-none-any.whl", hash = "sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386"}, + {file = "astroid-2.4.2.tar.gz", hash = "sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703"}, ] atomicwrites = [ - {file = "atomicwrites-1.3.0-py2.py3-none-any.whl", hash = "sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4"}, - {file = "atomicwrites-1.3.0.tar.gz", hash = "sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6"}, + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, ] attrs = [ {file = "attrs-19.3.0-py2.py3-none-any.whl", hash = "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c"}, @@ -1501,77 +1108,61 @@ babel = [ {file = "Babel-2.8.0.tar.gz", hash = "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38"}, ] backcall = [ - {file = "backcall-0.1.0.tar.gz", hash = "sha256:38ecd85be2c1e78f77fd91700c76e14667dc21e2713b63876c0eb901196e01e4"}, - {file = "backcall-0.1.0.zip", hash = "sha256:bbbf4b1e5cd2bdb08f915895b51081c041bac22394fdfcfdfbe9f14b77c08bf2"}, -] -"backports.functools-lru-cache" = [ - {file = "backports.functools_lru_cache-1.6.1-py2.py3-none-any.whl", hash = "sha256:0bada4c2f8a43d533e4ecb7a12214d9420e66eb206d54bf2d682581ca4b80848"}, - {file = "backports.functools_lru_cache-1.6.1.tar.gz", hash = "sha256:8fde5f188da2d593bd5bc0be98d9abc46c95bb8a9dde93429570192ee6cc2d4a"}, -] -"backports.shutil-get-terminal-size" = [ - {file = "backports.shutil_get_terminal_size-1.0.0-py2.py3-none-any.whl", hash = "sha256:0975ba55054c15e346944b38956a4c9cbee9009391e41b86c68990effb8c1f64"}, - {file = "backports.shutil_get_terminal_size-1.0.0.tar.gz", hash = "sha256:713e7a8228ae80341c70586d1cc0a8caa5207346927e23d09dcbcaf18eadec80"}, + {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, + {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, ] black = [ {file = "black-19.10b0-py36-none-any.whl", hash = "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b"}, {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, ] certifi = [ - {file = "certifi-2019.11.28-py2.py3-none-any.whl", hash = "sha256:017c25db2a153ce562900032d5bc68e9f191e44e9a0f762f373977de9df1fbb3"}, - {file = "certifi-2019.11.28.tar.gz", hash = "sha256:25b64c7da4cd7479594d035c08c2d809eb4aab3a26e5a990ea98cc450c320f1f"}, + {file = "certifi-2020.4.5.2-py2.py3-none-any.whl", hash = "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc"}, + {file = "certifi-2020.4.5.2.tar.gz", hash = "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1"}, ] chardet = [ {file = "chardet-3.0.4-py2.py3-none-any.whl", hash = "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"}, {file = "chardet-3.0.4.tar.gz", hash = "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae"}, ] click = [ - {file = "Click-7.0-py2.py3-none-any.whl", hash = "sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13"}, - {file = "Click-7.0.tar.gz", hash = "sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7"}, + {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, + {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, ] colorama = [ {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, ] -configparser = [ - {file = "configparser-4.0.2-py2.py3-none-any.whl", hash = "sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c"}, - {file = "configparser-4.0.2.tar.gz", hash = "sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df"}, -] -contextlib2 = [ - {file = "contextlib2-0.6.0.post1-py2.py3-none-any.whl", hash = "sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b"}, - {file = "contextlib2-0.6.0.post1.tar.gz", hash = "sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e"}, -] coverage = [ - {file = "coverage-5.0.3-cp27-cp27m-macosx_10_12_x86_64.whl", hash = "sha256:cc1109f54a14d940b8512ee9f1c3975c181bbb200306c6d8b87d93376538782f"}, - {file = "coverage-5.0.3-cp27-cp27m-macosx_10_13_intel.whl", hash = "sha256:be18f4ae5a9e46edae3f329de2191747966a34a3d93046dbdf897319923923bc"}, - {file = "coverage-5.0.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:3230d1003eec018ad4a472d254991e34241e0bbd513e97a29727c7c2f637bd2a"}, - {file = "coverage-5.0.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:e69215621707119c6baf99bda014a45b999d37602cb7043d943c76a59b05bf52"}, - {file = "coverage-5.0.3-cp27-cp27m-win32.whl", hash = "sha256:1daa3eceed220f9fdb80d5ff950dd95112cd27f70d004c7918ca6dfc6c47054c"}, - {file = "coverage-5.0.3-cp27-cp27m-win_amd64.whl", hash = "sha256:51bc7710b13a2ae0c726f69756cf7ffd4362f4ac36546e243136187cfcc8aa73"}, - {file = "coverage-5.0.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:9bea19ac2f08672636350f203db89382121c9c2ade85d945953ef3c8cf9d2a68"}, - {file = "coverage-5.0.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:5012d3b8d5a500834783689a5d2292fe06ec75dc86ee1ccdad04b6f5bf231691"}, - {file = "coverage-5.0.3-cp35-cp35m-macosx_10_12_x86_64.whl", hash = "sha256:d513cc3db248e566e07a0da99c230aca3556d9b09ed02f420664e2da97eac301"}, - {file = "coverage-5.0.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3dbb72eaeea5763676a1a1efd9b427a048c97c39ed92e13336e726117d0b72bf"}, - {file = "coverage-5.0.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:15cf13a6896048d6d947bf7d222f36e4809ab926894beb748fc9caa14605d9c3"}, - {file = "coverage-5.0.3-cp35-cp35m-win32.whl", hash = "sha256:fca1669d464f0c9831fd10be2eef6b86f5ebd76c724d1e0706ebdff86bb4adf0"}, - {file = "coverage-5.0.3-cp35-cp35m-win_amd64.whl", hash = "sha256:1e44a022500d944d42f94df76727ba3fc0a5c0b672c358b61067abb88caee7a0"}, - {file = "coverage-5.0.3-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:b26aaf69713e5674efbde4d728fb7124e429c9466aeaf5f4a7e9e699b12c9fe2"}, - {file = "coverage-5.0.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:722e4557c8039aad9592c6a4213db75da08c2cd9945320220634f637251c3894"}, - {file = "coverage-5.0.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:7afad9835e7a651d3551eab18cbc0fdb888f0a6136169fbef0662d9cdc9987cf"}, - {file = "coverage-5.0.3-cp36-cp36m-win32.whl", hash = "sha256:25dbf1110d70bab68a74b4b9d74f30e99b177cde3388e07cc7272f2168bd1477"}, - {file = "coverage-5.0.3-cp36-cp36m-win_amd64.whl", hash = "sha256:c312e57847db2526bc92b9bfa78266bfbaabac3fdcd751df4d062cd4c23e46dc"}, - {file = "coverage-5.0.3-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:a8b8ac7876bc3598e43e2603f772d2353d9931709345ad6c1149009fd1bc81b8"}, - {file = "coverage-5.0.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:527b4f316e6bf7755082a783726da20671a0cc388b786a64417780b90565b987"}, - {file = "coverage-5.0.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:d649dc0bcace6fcdb446ae02b98798a856593b19b637c1b9af8edadf2b150bea"}, - {file = "coverage-5.0.3-cp37-cp37m-win32.whl", hash = "sha256:cd60f507c125ac0ad83f05803063bed27e50fa903b9c2cfee3f8a6867ca600fc"}, - {file = "coverage-5.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c60097190fe9dc2b329a0eb03393e2e0829156a589bd732e70794c0dd804258e"}, - {file = "coverage-5.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:d7008a6796095a79544f4da1ee49418901961c97ca9e9d44904205ff7d6aa8cb"}, - {file = "coverage-5.0.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:ea9525e0fef2de9208250d6c5aeeee0138921057cd67fcef90fbed49c4d62d37"}, - {file = "coverage-5.0.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c62a2143e1313944bf4a5ab34fd3b4be15367a02e9478b0ce800cb510e3bbb9d"}, - {file = "coverage-5.0.3-cp38-cp38m-win32.whl", hash = "sha256:b0840b45187699affd4c6588286d429cd79a99d509fe3de0f209594669bb0954"}, - {file = "coverage-5.0.3-cp38-cp38m-win_amd64.whl", hash = "sha256:76e2057e8ffba5472fd28a3a010431fd9e928885ff480cb278877c6e9943cc2e"}, - {file = "coverage-5.0.3-cp39-cp39m-win32.whl", hash = "sha256:b63dd43f455ba878e5e9f80ba4f748c0a2156dde6e0e6e690310e24d6e8caf40"}, - {file = "coverage-5.0.3-cp39-cp39m-win_amd64.whl", hash = "sha256:da93027835164b8223e8e5af2cf902a4c80ed93cb0909417234f4a9df3bcd9af"}, - {file = "coverage-5.0.3.tar.gz", hash = "sha256:77afca04240c40450c331fa796b3eab6f1e15c5ecf8bf2b8bee9706cd5452fef"}, + {file = "coverage-5.1-cp27-cp27m-macosx_10_12_x86_64.whl", hash = "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65"}, + {file = "coverage-5.1-cp27-cp27m-macosx_10_13_intel.whl", hash = "sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2"}, + {file = "coverage-5.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04"}, + {file = "coverage-5.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6"}, + {file = "coverage-5.1-cp27-cp27m-win32.whl", hash = "sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796"}, + {file = "coverage-5.1-cp27-cp27m-win_amd64.whl", hash = "sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730"}, + {file = "coverage-5.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0"}, + {file = "coverage-5.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a"}, + {file = "coverage-5.1-cp35-cp35m-macosx_10_12_x86_64.whl", hash = "sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf"}, + {file = "coverage-5.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9"}, + {file = "coverage-5.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768"}, + {file = "coverage-5.1-cp35-cp35m-win32.whl", hash = "sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2"}, + {file = "coverage-5.1-cp35-cp35m-win_amd64.whl", hash = "sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7"}, + {file = "coverage-5.1-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0"}, + {file = "coverage-5.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019"}, + {file = "coverage-5.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c"}, + {file = "coverage-5.1-cp36-cp36m-win32.whl", hash = "sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1"}, + {file = "coverage-5.1-cp36-cp36m-win_amd64.whl", hash = "sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7"}, + {file = "coverage-5.1-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355"}, + {file = "coverage-5.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489"}, + {file = "coverage-5.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd"}, + {file = "coverage-5.1-cp37-cp37m-win32.whl", hash = "sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e"}, + {file = "coverage-5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a"}, + {file = "coverage-5.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55"}, + {file = "coverage-5.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c"}, + {file = "coverage-5.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef"}, + {file = "coverage-5.1-cp38-cp38-win32.whl", hash = "sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24"}, + {file = "coverage-5.1-cp38-cp38-win_amd64.whl", hash = "sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0"}, + {file = "coverage-5.1-cp39-cp39-win32.whl", hash = "sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4"}, + {file = "coverage-5.1-cp39-cp39-win_amd64.whl", hash = "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e"}, + {file = "coverage-5.1.tar.gz", hash = "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052"}, ] cycler = [ {file = "cycler-0.10.0-py2.py3-none-any.whl", hash = "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d"}, @@ -1589,34 +1180,13 @@ docutils = [ {file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"}, {file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"}, ] -entrypoints = [ - {file = "entrypoints-0.3-py2.py3-none-any.whl", hash = "sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19"}, - {file = "entrypoints-0.3.tar.gz", hash = "sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451"}, -] -enum34 = [ - {file = "enum34-1.1.9-py2-none-any.whl", hash = "sha256:98df1f1937840b7d8012fea7f0b36392a3e6fd8a2f429c48a3ff4b1aad907f3f"}, - {file = "enum34-1.1.9-py3-none-any.whl", hash = "sha256:708aabfb3d5898f99674c390d360d59efdd08547019763622365f19e84a7fef4"}, - {file = "enum34-1.1.9.tar.gz", hash = "sha256:13ef9a1c478203252107f66c25b99b45b1865693ca1284aab40dafa7e1e7ac17"}, -] execnet = [ {file = "execnet-1.7.1-py2.py3-none-any.whl", hash = "sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547"}, {file = "execnet-1.7.1.tar.gz", hash = "sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50"}, ] flake8 = [ - {file = "flake8-3.7.9-py2.py3-none-any.whl", hash = "sha256:49356e766643ad15072a789a20915d3c91dc89fd313ccd71802303fd67e4deca"}, - {file = "flake8-3.7.9.tar.gz", hash = "sha256:45681a117ecc81e870cbf1262835ae4af5e7a8b08e40b944a8a6e6b895914cfb"}, -] -funcsigs = [ - {file = "funcsigs-1.0.2-py2.py3-none-any.whl", hash = "sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca"}, - {file = "funcsigs-1.0.2.tar.gz", hash = "sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"}, -] -functools32 = [ - {file = "functools32-3.2.3-2.tar.gz", hash = "sha256:f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d"}, - {file = "functools32-3.2.3-2.zip", hash = "sha256:89d824aa6c358c421a234d7f9ee0bd75933a67c29588ce50aaa3acdf4d403fa0"}, -] -futures = [ - {file = "futures-3.3.0-py2-none-any.whl", hash = "sha256:49b3f5b064b6e3afc3316421a3f25f66c137ae88f068abbf72830170033c5e16"}, - {file = "futures-3.3.0.tar.gz", hash = "sha256:7e033af76a5e35f58e56da7a91e687706faf4e7bdfb2cbc3f2cca6b9bcda9794"}, + {file = "flake8-3.8.3-py2.py3-none-any.whl", hash = "sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c"}, + {file = "flake8-3.8.3.tar.gz", hash = "sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208"}, ] fuzzywuzzy = [ {file = "fuzzywuzzy-0.18.0-py2.py3-none-any.whl", hash = "sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993"}, @@ -1631,19 +1201,16 @@ imagesize = [ {file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"}, ] importlib-metadata = [ - {file = "importlib_metadata-1.5.0-py2.py3-none-any.whl", hash = "sha256:b97607a1a18a5100839aec1dc26a1ea17ee0d93b20b0f008d80a5a050afb200b"}, - {file = "importlib_metadata-1.5.0.tar.gz", hash = "sha256:06f5b3a99029c7134207dd882428a66992a9de2bef7c2b699b5641f9886c3302"}, + {file = "importlib_metadata-1.6.1-py2.py3-none-any.whl", hash = "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958"}, + {file = "importlib_metadata-1.6.1.tar.gz", hash = "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545"}, ] incremental = [ {file = "incremental-17.5.0-py2.py3-none-any.whl", hash = "sha256:717e12246dddf231a349175f48d74d93e2897244939173b01974ab6661406b9f"}, {file = "incremental-17.5.0.tar.gz", hash = "sha256:7b751696aaf36eebfab537e458929e194460051ccad279c72b755a167eebd4b3"}, ] ipython = [ - {file = "ipython-5.9.0-py2-none-any.whl", hash = "sha256:54526d92db62bedd872c18131ac7d753fcf054ea34752e1e6ef8eb26391fb1f0"}, - {file = "ipython-5.9.0-py3-none-any.whl", hash = "sha256:fbeb7b8344dbb7f4939227ed9b2816ac6028db1775521365619b77f3c943ba74"}, - {file = "ipython-5.9.0.tar.gz", hash = "sha256:8ac83f3a6232b7a5ee4d3535193e782d3de8c260e7b034b968a9cd1e1580f789"}, - {file = "ipython-7.9.0-py3-none-any.whl", hash = "sha256:ed7ebe1cba899c1c3ccad6f7f1c2d2369464cc77dba8eebc65e2043e19cda995"}, - {file = "ipython-7.9.0.tar.gz", hash = "sha256:dfd303b270b7b5232b3d08bd30ec6fd685d8a58cabd54055e3d69d8f029f7280"}, + {file = "ipython-7.15.0-py3-none-any.whl", hash = "sha256:1b85d65632211bf5d3e6f1406f3393c8c429a47d7b947b9a87812aa5bce6595c"}, + {file = "ipython-7.15.0.tar.gz", hash = "sha256:0ef1433879816a960cd3ae1ae1dc82c64732ca75cec8dab5a4e29783fb571d0e"}, ] ipython-genutils = [ {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, @@ -1654,51 +1221,30 @@ isort = [ {file = "isort-4.3.21.tar.gz", hash = "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1"}, ] jedi = [ - {file = "jedi-0.16.0-py2.py3-none-any.whl", hash = "sha256:b4f4052551025c6b0b0b193b29a6ff7bdb74c52450631206c262aef9f7159ad2"}, - {file = "jedi-0.16.0.tar.gz", hash = "sha256:d5c871cb9360b414f981e7072c52c33258d598305280fef91c6cae34739d65d5"}, + {file = "jedi-0.17.1-py2.py3-none-any.whl", hash = "sha256:1ddb0ec78059e8e27ec9eb5098360b4ea0a3dd840bedf21415ea820c21b40a22"}, + {file = "jedi-0.17.1.tar.gz", hash = "sha256:807d5d4f96711a2bcfdd5dfa3b1ae6d09aa53832b182090b222b5efb81f52f63"}, ] jinja2 = [ - {file = "Jinja2-2.11.1-py2.py3-none-any.whl", hash = "sha256:b0eaf100007721b5c16c1fc1eecb87409464edc10469ddc9a22a27a99123be49"}, - {file = "Jinja2-2.11.1.tar.gz", hash = "sha256:93187ffbc7808079673ef52771baa950426fd664d3aad1d0fa3e95644360e250"}, + {file = "Jinja2-2.11.2-py2.py3-none-any.whl", hash = "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035"}, + {file = "Jinja2-2.11.2.tar.gz", hash = "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0"}, ] kiwisolver = [ - {file = "kiwisolver-1.1.0-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:7f4dd50874177d2bb060d74769210f3bce1af87a8c7cf5b37d032ebf94f0aca3"}, - {file = "kiwisolver-1.1.0-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:fe51b79da0062f8e9d49ed0182a626a7dc7a0cbca0328f612c6ee5e4711c81e4"}, - {file = "kiwisolver-1.1.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:f790f8b3dff3d53453de6a7b7ddd173d2e020fb160baff578d578065b108a05f"}, - {file = "kiwisolver-1.1.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f2b22153870ca5cf2ab9c940d7bc38e8e9089fa0f7e5856ea195e1cf4ff43d5a"}, - {file = "kiwisolver-1.1.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:e8bf074363ce2babeb4764d94f8e65efd22e6a7c74860a4f05a6947afc020ff2"}, - {file = "kiwisolver-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:05b5b061e09f60f56244adc885c4a7867da25ca387376b02c1efc29cc16bcd0f"}, - {file = "kiwisolver-1.1.0-cp27-none-win32.whl", hash = "sha256:47b8cb81a7d18dbaf4fed6a61c3cecdb5adec7b4ac292bddb0d016d57e8507d5"}, - {file = "kiwisolver-1.1.0-cp27-none-win_amd64.whl", hash = "sha256:b64916959e4ae0ac78af7c3e8cef4becee0c0e9694ad477b4c6b3a536de6a544"}, - {file = "kiwisolver-1.1.0-cp34-cp34m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:682e54f0ce8f45981878756d7203fd01e188cc6c8b2c5e2cf03675390b4534d5"}, - {file = "kiwisolver-1.1.0-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:d52e3b1868a4e8fd18b5cb15055c76820df514e26aa84cc02f593d99fef6707f"}, - {file = "kiwisolver-1.1.0-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:8aa7009437640beb2768bfd06da049bad0df85f47ff18426261acecd1cf00897"}, - {file = "kiwisolver-1.1.0-cp34-none-win32.whl", hash = "sha256:26f4fbd6f5e1dabff70a9ba0d2c4bd30761086454aa30dddc5b52764ee4852b7"}, - {file = "kiwisolver-1.1.0-cp34-none-win_amd64.whl", hash = "sha256:79bfb2f0bd7cbf9ea256612c9523367e5ec51d7cd616ae20ca2c90f575d839a2"}, - {file = "kiwisolver-1.1.0-cp35-cp35m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:3b2378ad387f49cbb328205bda569b9f87288d6bc1bf4cd683c34523a2341efe"}, - {file = "kiwisolver-1.1.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:aa716b9122307c50686356cfb47bfbc66541868078d0c801341df31dca1232a9"}, - {file = "kiwisolver-1.1.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:58e626e1f7dfbb620d08d457325a4cdac65d1809680009f46bf41eaf74ad0187"}, - {file = "kiwisolver-1.1.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:e3a21a720791712ed721c7b95d433e036134de6f18c77dbe96119eaf7aa08004"}, - {file = "kiwisolver-1.1.0-cp35-none-win32.whl", hash = "sha256:939f36f21a8c571686eb491acfffa9c7f1ac345087281b412d63ea39ca14ec4a"}, - {file = "kiwisolver-1.1.0-cp35-none-win_amd64.whl", hash = "sha256:9733b7f64bd9f807832d673355f79703f81f0b3e52bfce420fc00d8cb28c6a6c"}, - {file = "kiwisolver-1.1.0-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:acc4df99308111585121db217681f1ce0eecb48d3a828a2f9bbf9773f4937e9e"}, - {file = "kiwisolver-1.1.0-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:9105ce82dcc32c73eb53a04c869b6a4bc756b43e4385f76ea7943e827f529e4d"}, - {file = "kiwisolver-1.1.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:f16814a4a96dc04bf1da7d53ee8d5b1d6decfc1a92a63349bb15d37b6a263dd9"}, - {file = "kiwisolver-1.1.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:400599c0fe58d21522cae0e8b22318e09d9729451b17ee61ba8e1e7c0346565c"}, - {file = "kiwisolver-1.1.0-cp36-none-win32.whl", hash = "sha256:db1a5d3cc4ae943d674718d6c47d2d82488ddd94b93b9e12d24aabdbfe48caee"}, - {file = "kiwisolver-1.1.0-cp36-none-win_amd64.whl", hash = "sha256:5a52e1b006bfa5be04fe4debbcdd2688432a9af4b207a3f429c74ad625022641"}, - {file = "kiwisolver-1.1.0-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:a02f6c3e229d0b7220bd74600e9351e18bc0c361b05f29adae0d10599ae0e326"}, - {file = "kiwisolver-1.1.0-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:9491578147849b93e70d7c1d23cb1229458f71fc79c51d52dce0809b2ca44eea"}, - {file = "kiwisolver-1.1.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:5c7ca4e449ac9f99b3b9d4693debb1d6d237d1542dd6a56b3305fe8a9620f883"}, - {file = "kiwisolver-1.1.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a0c0a9f06872330d0dd31b45607197caab3c22777600e88031bfe66799e70bb0"}, - {file = "kiwisolver-1.1.0-cp37-none-win32.whl", hash = "sha256:8944a16020c07b682df861207b7e0efcd2f46c7488619cb55f65882279119389"}, - {file = "kiwisolver-1.1.0-cp37-none-win_amd64.whl", hash = "sha256:d3fcf0819dc3fea58be1fd1ca390851bdb719a549850e708ed858503ff25d995"}, - {file = "kiwisolver-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:933df612c453928f1c6faa9236161a1d999a26cd40abf1dc5d7ebbc6dbfb8fca"}, - {file = "kiwisolver-1.1.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d22702cadb86b6fcba0e6b907d9f84a312db9cd6934ee728144ce3018e715ee1"}, - {file = "kiwisolver-1.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:210d8c39d01758d76c2b9a693567e1657ec661229bc32eac30761fa79b2474b0"}, - {file = "kiwisolver-1.1.0-cp38-none-win32.whl", hash = "sha256:76275ee077772c8dde04fb6c5bc24b91af1bb3e7f4816fd1852f1495a64dad93"}, - {file = "kiwisolver-1.1.0-cp38-none-win_amd64.whl", hash = "sha256:3b15d56a9cd40c52d7ab763ff0bc700edbb4e1a298dc43715ecccd605002cf11"}, - {file = "kiwisolver-1.1.0.tar.gz", hash = "sha256:53eaed412477c836e1b9522c19858a8557d6e595077830146182225613b11a75"}, + {file = "kiwisolver-1.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:443c2320520eda0a5b930b2725b26f6175ca4453c61f739fef7a5847bd262f74"}, + {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:efcf3397ae1e3c3a4a0a0636542bcad5adad3b1dd3e8e629d0b6e201347176c8"}, + {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fccefc0d36a38c57b7bd233a9b485e2f1eb71903ca7ad7adacad6c28a56d62d2"}, + {file = "kiwisolver-1.2.0-cp36-none-win32.whl", hash = "sha256:60a78858580761fe611d22127868f3dc9f98871e6fdf0a15cc4203ed9ba6179b"}, + {file = "kiwisolver-1.2.0-cp36-none-win_amd64.whl", hash = "sha256:556da0a5f60f6486ec4969abbc1dd83cf9b5c2deadc8288508e55c0f5f87d29c"}, + {file = "kiwisolver-1.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7cc095a4661bdd8a5742aaf7c10ea9fac142d76ff1770a0f84394038126d8fc7"}, + {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:c955791d80e464da3b471ab41eb65cf5a40c15ce9b001fdc5bbc241170de58ec"}, + {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:603162139684ee56bcd57acc74035fceed7dd8d732f38c0959c8bd157f913fec"}, + {file = "kiwisolver-1.2.0-cp37-none-win32.whl", hash = "sha256:03662cbd3e6729f341a97dd2690b271e51a67a68322affab12a5b011344b973c"}, + {file = "kiwisolver-1.2.0-cp37-none-win_amd64.whl", hash = "sha256:4eadb361baf3069f278b055e3bb53fa189cea2fd02cb2c353b7a99ebb4477ef1"}, + {file = "kiwisolver-1.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c31bc3c8e903d60a1ea31a754c72559398d91b5929fcb329b1c3a3d3f6e72113"}, + {file = "kiwisolver-1.2.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d52b989dc23cdaa92582ceb4af8d5bcc94d74b2c3e64cd6785558ec6a879793e"}, + {file = "kiwisolver-1.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:e586b28354d7b6584d8973656a7954b1c69c93f708c0c07b77884f91640b7657"}, + {file = "kiwisolver-1.2.0-cp38-none-win32.whl", hash = "sha256:d069ef4b20b1e6b19f790d00097a5d5d2c50871b66d10075dab78938dc2ee2cf"}, + {file = "kiwisolver-1.2.0-cp38-none-win_amd64.whl", hash = "sha256:18d749f3e56c0480dccd1714230da0f328e6e4accf188dd4e6884bdd06bf02dd"}, + {file = "kiwisolver-1.2.0.tar.gz", hash = "sha256:247800260cd38160c362d211dcaf4ed0f7816afb5efe56544748b21d6ad6d17f"}, ] lazy-object-proxy = [ {file = "lazy-object-proxy-1.4.3.tar.gz", hash = "sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0"}, @@ -1724,8 +1270,7 @@ lazy-object-proxy = [ {file = "lazy_object_proxy-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239"}, ] livereload = [ - {file = "livereload-2.6.1-py2.py3-none-any.whl", hash = "sha256:78d55f2c268a8823ba499305dcac64e28ddeb9a92571e12d543cd304faf5817b"}, - {file = "livereload-2.6.1.tar.gz", hash = "sha256:89254f78d7529d7ea0a3417d224c34287ebfe266b05e67e51facaf82c27f0f66"}, + {file = "livereload-2.6.2.tar.gz", hash = "sha256:d1eddcb5c5eb8d2ca1fa1f750e580da624c0f7fcb734aa5780dc81b7dcbd89be"}, ] markupsafe = [ {file = "MarkupSafe-1.1.1-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161"}, @@ -1763,146 +1308,97 @@ markupsafe = [ {file = "MarkupSafe-1.1.1.tar.gz", hash = "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"}, ] matplotlib = [ - {file = "matplotlib-2.2.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:d541616636cff89a7d6427ce583a5b48a93e4fbb9c7ce3e0f5f47b2436d376bb"}, - {file = "matplotlib-2.2.5-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:7aac72d80be3d0d0378dd8136fe6e5379533f840ea1b68de63ba8eaf2adb1dee"}, - {file = "matplotlib-2.2.5-cp27-cp27m-win32.whl", hash = "sha256:9b60582cbfd2cc314e2cd84fb86c9c879f8887458cf27940720ef8aa5a73b3b4"}, - {file = "matplotlib-2.2.5-cp27-cp27m-win_amd64.whl", hash = "sha256:845a4c2db94419f0642946a2577fc3f50e339824eff759c68c4bb988c9769955"}, - {file = "matplotlib-2.2.5-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:57077b4023f1af0151b6b580bccfcff2e3ec1e0f689ef58e4d1e751cdfbf13f0"}, - {file = "matplotlib-2.2.5-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:f436a4a425b6b7150cffde9581dc4563ae3f4f10494191db57547c202d1c15b7"}, - {file = "matplotlib-2.2.5-cp34-cp34m-win32.whl", hash = "sha256:aa545123f55da7c6566c0e0e66c52d938129865cec2f3058a1842ca62741e248"}, - {file = "matplotlib-2.2.5-cp34-cp34m-win_amd64.whl", hash = "sha256:62c671fa6a426d59578354c9ba6ba109f91ed65901180c999191a217f8d6a35f"}, - {file = "matplotlib-2.2.5-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:8f0af3228314b46ee72009c604f40c7e07b5d52048e252abb205a5ff77cc8d6b"}, - {file = "matplotlib-2.2.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:88ad35aae58d1800844e3d1c06bea2831092ff930dd7902f0d3976ba762894f8"}, - {file = "matplotlib-2.2.5-cp35-cp35m-win32.whl", hash = "sha256:8e791377f2f76fdf23bb12f71bf8182f07d5d994ad9ab7b0f7038b0f79f85ccb"}, - {file = "matplotlib-2.2.5-cp35-cp35m-win_amd64.whl", hash = "sha256:6bcd44556cdce178100180d0d04df68ab50eb267c60453f34f248c5559579a9e"}, - {file = "matplotlib-2.2.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2391e179bd91f7e9727f4d1a09803ce4dc973ed5c517b42430e9edf60bfdcc6a"}, - {file = "matplotlib-2.2.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5d46546f152a24ce06e20b0aff0b74648cae0ab0e8de1470a4d4b5a2a8aaf414"}, - {file = "matplotlib-2.2.5-cp36-cp36m-win32.whl", hash = "sha256:4c65889d35736ce0f2f94e0dbac72c93f85cea613fd477c7970e7af5d1e71f11"}, - {file = "matplotlib-2.2.5-cp36-cp36m-win_amd64.whl", hash = "sha256:9e43d73ac507545d49aea0cfa7b1f6a37eec74621bd9bdff8dbb6d16f560ced8"}, - {file = "matplotlib-2.2.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a3c1c62db6469cf8eb778c5c4c020d6a35424b9a4cb385db7486b77ae35be358"}, - {file = "matplotlib-2.2.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:59b219991e5ebf858cd0d35cdd97c3db30b7fd003fe9321aac0355aa8ae665b1"}, - {file = "matplotlib-2.2.5-cp37-cp37m-win32.whl", hash = "sha256:7f78529a92242b4adc1db9daf5b71362a35cd9a5cd8cb4db2b83349df2b0dbb8"}, - {file = "matplotlib-2.2.5-cp37-cp37m-win_amd64.whl", hash = "sha256:240565f560ff35f1c8bb5449a51c420c926478e087db89237a47ec92a29b32d1"}, - {file = "matplotlib-2.2.5-cp38-cp38-win32.whl", hash = "sha256:73f29adce52f98564e738537449e35ea64edd37c043773694a5ee1ac9424d85a"}, - {file = "matplotlib-2.2.5-cp38-cp38-win_amd64.whl", hash = "sha256:2f8cb0f84419808b9915cf8bf31b6e1b7542c1e4805399035799a2419e085b91"}, - {file = "matplotlib-2.2.5-pp273-pypy_73-win32.whl", hash = "sha256:b11160764f2d1757788ad209723cada52f72f7b80903a49e5b6f8c055f5e38bd"}, - {file = "matplotlib-2.2.5-pp373-pypy36_pp73-win32.whl", hash = "sha256:900b4f00dbd37e8b7dfe5f7209f506384c69617ab109f417ea09a55baab1af7a"}, - {file = "matplotlib-2.2.5.tar.gz", hash = "sha256:a3037a840cd9dfdc2df9fee8af8f76ca82bfab173c0f9468193ca7a89a2b60ea"}, - {file = "matplotlib-3.0.3-cp35-cp35m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:e918d51b1fda82a65fdf52d2f3914b2246481cc2a9cd10e223e6be6078916ff3"}, - {file = "matplotlib-3.0.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:63e498067d32d627111cd1162cae1621f1221f9d4c6a9745dd7233f29de581b6"}, - {file = "matplotlib-3.0.3-cp35-cp35m-win32.whl", hash = "sha256:91c54d6bb9eeaaff965656c5ea6cbdcbf780bad8462ac99b30b451548194746f"}, - {file = "matplotlib-3.0.3-cp35-cp35m-win_amd64.whl", hash = "sha256:cf8ae10559a78aee0409ede1e9d4fda03895433eeafe609dd9ed67e45f552db0"}, - {file = "matplotlib-3.0.3-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:de5ccd3500247f85fe4f9fad90f80a8bd397e4f110a4c33fabf95f07403e8372"}, - {file = "matplotlib-3.0.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e8d1939262aa6b36d0c51f50a50a43a04b9618d20db31e6c0192b1463067aeef"}, - {file = "matplotlib-3.0.3-cp36-cp36m-win32.whl", hash = "sha256:d51d0889d1c4d51c51a9822265c0494ea3e70a52bdd88358e0863daca46fa23a"}, - {file = "matplotlib-3.0.3-cp36-cp36m-win_amd64.whl", hash = "sha256:1ae6549976b6ceb6ee426272a28c0fc9715b3e3669694d560c8f661c5b39e2c5"}, - {file = "matplotlib-3.0.3-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:aeef177647bb3fccfe09065481989d7dfc5ac59e9367d6a00a3481062cf651e4"}, - {file = "matplotlib-3.0.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:4d4250bf508dd07cca3b43888097f873cadb66eec6ac63dbbfb798798ec07af2"}, - {file = "matplotlib-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:53af2e01d7f1700ed2b64a9091bc865360c9c4032f625451c4589a826854c787"}, - {file = "matplotlib-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:7169a34971e398dd58e87e173f97366fd88a3fa80852704530433eb224a8ca57"}, - {file = "matplotlib-3.0.3.tar.gz", hash = "sha256:e1d33589e32f482d0a7d1957bf473d43341115d40d33f578dad44432e47df7b7"}, + {file = "matplotlib-3.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a47abc48c7b81fe6e636dde8a58e49b13d87d140e0f448213a4879f4a3f73345"}, + {file = "matplotlib-3.2.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:20bcd11efe194cd302bd0653cb025b8d16bcd80442359bfca8d49dc805f35ec8"}, + {file = "matplotlib-3.2.2-cp36-cp36m-win32.whl", hash = "sha256:2a6d64336b547e25730b6221e7aadfb01a391a065d43b5f51f0b9d7f673d2dd2"}, + {file = "matplotlib-3.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:4416825ebc9c1f135027a30e8d8aea0edcf45078ce767c7f7386737413cfb98f"}, + {file = "matplotlib-3.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:465c752278d27895e23f1379d6fcfa3a2990643b803c25e3bc16a10641d2346a"}, + {file = "matplotlib-3.2.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:81de040403a33bf3c68e9d4a40e26c8d24da00f7e3fadd845003b7e106785da7"}, + {file = "matplotlib-3.2.2-cp37-cp37m-win32.whl", hash = "sha256:006413f08ba5db1f5b1e0d6fbdc2ac9058b062ccf552f57182563a78579c34b4"}, + {file = "matplotlib-3.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:da06fa530591a141ffbe1712bbeec784734c3436b40c942d21652f305199b5d9"}, + {file = "matplotlib-3.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:894dd47c0a6ce38dc19bc87d1f7e2b0608310b2a18d1572291157450b05ce874"}, + {file = "matplotlib-3.2.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1ab264770e7cf2cf4feb99f22c737066aef21ddf1ec402dc255450ac15eacb7b"}, + {file = "matplotlib-3.2.2-cp38-cp38-win32.whl", hash = "sha256:91c153f4318e3c67c035fd1185f5ea2613f15008b73b66985033033f6fe54bbd"}, + {file = "matplotlib-3.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:a68e42e22f7fd190a532e4215e142276970c2d54040a0c46842fcb3db8b6ec5b"}, + {file = "matplotlib-3.2.2-cp39-cp39-win32.whl", hash = "sha256:647cf232ccf6265d2ba1ac4103e8c8b6ac7b03a40da3421234ffb03dda217f59"}, + {file = "matplotlib-3.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:31d32c83bb2b617377c6156f75e88b9ec2ded289e47ad4ff0f263dc1019d88b1"}, + {file = "matplotlib-3.2.2-pp36-pypy36_pp73-win32.whl", hash = "sha256:67065d938df34478451af62fbd0670d2b51c4d859fb66673064eb5de8660dd7c"}, + {file = "matplotlib-3.2.2.tar.gz", hash = "sha256:3d77a6630d093d74cbbfebaa0571d00790966be1ed204e4a8239f5cbd6835c5d"}, ] mccabe = [ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, ] more-itertools = [ - {file = "more-itertools-5.0.0.tar.gz", hash = "sha256:38a936c0a6d98a38bcc2d03fdaaedaba9f412879461dd2ceff8d37564d6522e4"}, - {file = "more_itertools-5.0.0-py2-none-any.whl", hash = "sha256:c0a5785b1109a6bd7fac76d6837fd1feca158e54e521ccd2ae8bfe393cc9d4fc"}, - {file = "more_itertools-5.0.0-py3-none-any.whl", hash = "sha256:fe7a7cae1ccb57d33952113ff4fa1bc5f879963600ed74918f1236e212ee50b9"}, - {file = "more-itertools-8.2.0.tar.gz", hash = "sha256:b1ddb932186d8a6ac451e1d95844b382f55e12686d51ca0c68b6f61f2ab7a507"}, - {file = "more_itertools-8.2.0-py3-none-any.whl", hash = "sha256:5dd8bcf33e5f9513ffa06d5ad33d78f31e1931ac9a18f33d37e77a180d393a7c"}, + {file = "more-itertools-8.4.0.tar.gz", hash = "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5"}, + {file = "more_itertools-8.4.0-py3-none-any.whl", hash = "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2"}, ] numpy = [ - {file = "numpy-1.16.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:08bf4f66f190822f4642e036accde8da810b87fffc0b9409e7a00d9e54760099"}, - {file = "numpy-1.16.6-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:d759ca1b76ac6f6b6159fb74984126035feb1dee9f68b4b961889b6dc090f33a"}, - {file = "numpy-1.16.6-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:d3c5377c6122de876e695937ef41ffee5d2831154c5e4856481b93406cdfeecb"}, - {file = "numpy-1.16.6-cp27-cp27m-win32.whl", hash = "sha256:345b1748e6b0d4773a518868c783b16fdc33a22683bdb863484cd29fe8d206e6"}, - {file = "numpy-1.16.6-cp27-cp27m-win_amd64.whl", hash = "sha256:7a5a1f49a643aa1ab3e0579da0a48b8a48ea4369eb63c5065459d0a37f430237"}, - {file = "numpy-1.16.6-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:817eed5a6ec2fc9c1a0ee3fbf9a441c66b6766383580513ccbdf3121acc0b4fb"}, - {file = "numpy-1.16.6-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1680c8d5086a88d293dfd1a10b6429a09140cacee878034fa2308472ec835db4"}, - {file = "numpy-1.16.6-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:a4383edb1b8caa989c3541a37ef204916322c503b8eeacc7ee8f4ba24cac97b8"}, - {file = "numpy-1.16.6-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:9bb690692f3101583b0b99f3be362742e4f8ebe6c7934fa36cd8ca2b567a0bcc"}, - {file = "numpy-1.16.6-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:b9e334568ca1bf56598eddfac6db6a75bcf1c91aa90d598648f21e45207daeae"}, - {file = "numpy-1.16.6-cp35-cp35m-win32.whl", hash = "sha256:55cae40d2024c56e7b79fb070106cb4289dcc6b55c62dba1d89a6944448c6a53"}, - {file = "numpy-1.16.6-cp35-cp35m-win_amd64.whl", hash = "sha256:a1ffc9c770ccc2be9284310a3726c918b26ca19b34c0079e7a41aba950ab175f"}, - {file = "numpy-1.16.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3f423b06bf67cd1dbf72e13e9b53a9ca71972e5abf712ee6cb5d8cbb178fff02"}, - {file = "numpy-1.16.6-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:34e6bb44e3d9a663f903b8c297ede865b4dff039aa43cc9a0b249e02c27f1396"}, - {file = "numpy-1.16.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:60c56922c9d759d664078fbef94132377ef1498ab27dd3d0cc7a21b346e68c06"}, - {file = "numpy-1.16.6-cp36-cp36m-win32.whl", hash = "sha256:23cad5e5858dfb73c0e5bce03fe78e5e5908c22263156c58d4afdbb240683c6c"}, - {file = "numpy-1.16.6-cp36-cp36m-win_amd64.whl", hash = "sha256:77399828d96cca386bfba453025c34f22569909d90332b961d3d4341cdb46a84"}, - {file = "numpy-1.16.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:97ddfa7688295d460ee48a4d76337e9fdd2506d9d1d0eee7f0348b42b430da4c"}, - {file = "numpy-1.16.6-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:390f6e14a8d73591f086680464aa101a9be9187d0c633f48c98b429b31b712c2"}, - {file = "numpy-1.16.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a1772dc227e3e415eeaa646d25690dc854bddc3d626e454c7c27acba060cb900"}, - {file = "numpy-1.16.6-cp37-cp37m-win32.whl", hash = "sha256:c9fb4fcfcdcaccfe2c4e1f9e0133ed59df5df2aa3655f3d391887e892b0a784c"}, - {file = "numpy-1.16.6-cp37-cp37m-win_amd64.whl", hash = "sha256:6b1853364775edb85ceb0f7f8214d9e993d4d1d9bd3310eae80529ea14ba2ba6"}, - {file = "numpy-1.16.6.zip", hash = "sha256:e5cf3fdf13401885e8eea8170624ec96225e2174eb0c611c6f26dd33b489e3ff"}, - {file = "numpy-1.18.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:20b26aaa5b3da029942cdcce719b363dbe58696ad182aff0e5dcb1687ec946dc"}, - {file = "numpy-1.18.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:70a840a26f4e61defa7bdf811d7498a284ced303dfbc35acb7be12a39b2aa121"}, - {file = "numpy-1.18.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:17aa7a81fe7599a10f2b7d95856dc5cf84a4eefa45bc96123cbbc3ebc568994e"}, - {file = "numpy-1.18.1-cp35-cp35m-win32.whl", hash = "sha256:f3d0a94ad151870978fb93538e95411c83899c9dc63e6fb65542f769568ecfa5"}, - {file = "numpy-1.18.1-cp35-cp35m-win_amd64.whl", hash = "sha256:1786a08236f2c92ae0e70423c45e1e62788ed33028f94ca99c4df03f5be6b3c6"}, - {file = "numpy-1.18.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ae0975f42ab1f28364dcda3dde3cf6c1ddab3e1d4b2909da0cb0191fa9ca0480"}, - {file = "numpy-1.18.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:cf7eb6b1025d3e169989416b1adcd676624c2dbed9e3bcb7137f51bfc8cc2572"}, - {file = "numpy-1.18.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:b765ed3930b92812aa698a455847141869ef755a87e099fddd4ccf9d81fffb57"}, - {file = "numpy-1.18.1-cp36-cp36m-win32.whl", hash = "sha256:2d75908ab3ced4223ccba595b48e538afa5ecc37405923d1fea6906d7c3a50bc"}, - {file = "numpy-1.18.1-cp36-cp36m-win_amd64.whl", hash = "sha256:9acdf933c1fd263c513a2df3dceecea6f3ff4419d80bf238510976bf9bcb26cd"}, - {file = "numpy-1.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:56bc8ded6fcd9adea90f65377438f9fea8c05fcf7c5ba766bef258d0da1554aa"}, - {file = "numpy-1.18.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:e422c3152921cece8b6a2fb6b0b4d73b6579bd20ae075e7d15143e711f3ca2ca"}, - {file = "numpy-1.18.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:b3af02ecc999c8003e538e60c89a2b37646b39b688d4e44d7373e11c2debabec"}, - {file = "numpy-1.18.1-cp37-cp37m-win32.whl", hash = "sha256:d92350c22b150c1cae7ebb0ee8b5670cc84848f6359cf6b5d8f86617098a9b73"}, - {file = "numpy-1.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:77c3bfe65d8560487052ad55c6998a04b654c2fbc36d546aef2b2e511e760971"}, - {file = "numpy-1.18.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c98c5ffd7d41611407a1103ae11c8b634ad6a43606eca3e2a5a269e5d6e8eb07"}, - {file = "numpy-1.18.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:9537eecf179f566fd1c160a2e912ca0b8e02d773af0a7a1120ad4f7507cd0d26"}, - {file = "numpy-1.18.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:e840f552a509e3380b0f0ec977e8124d0dc34dc0e68289ca28f4d7c1d0d79474"}, - {file = "numpy-1.18.1-cp38-cp38-win32.whl", hash = "sha256:590355aeade1a2eaba17617c19edccb7db8d78760175256e3cf94590a1a964f3"}, - {file = "numpy-1.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:39d2c685af15d3ce682c99ce5925cc66efc824652e10990d2462dfe9b8918c6a"}, - {file = "numpy-1.18.1.zip", hash = "sha256:b6ff59cee96b454516e47e7721098e6ceebef435e3e21ac2d6c3b8b02628eb77"}, + {file = "numpy-1.18.5-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0"}, + {file = "numpy-1.18.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583"}, + {file = "numpy-1.18.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271"}, + {file = "numpy-1.18.5-cp35-cp35m-win32.whl", hash = "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3"}, + {file = "numpy-1.18.5-cp35-cp35m-win_amd64.whl", hash = "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1"}, + {file = "numpy-1.18.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc"}, + {file = "numpy-1.18.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675"}, + {file = "numpy-1.18.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8"}, + {file = "numpy-1.18.5-cp36-cp36m-win32.whl", hash = "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5"}, + {file = "numpy-1.18.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161"}, + {file = "numpy-1.18.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824"}, + {file = "numpy-1.18.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf"}, + {file = "numpy-1.18.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f"}, + {file = "numpy-1.18.5-cp37-cp37m-win32.whl", hash = "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f"}, + {file = "numpy-1.18.5-cp37-cp37m-win_amd64.whl", hash = "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233"}, + {file = "numpy-1.18.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b"}, + {file = "numpy-1.18.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7"}, + {file = "numpy-1.18.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb"}, + {file = "numpy-1.18.5-cp38-cp38-win32.whl", hash = "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a"}, + {file = "numpy-1.18.5-cp38-cp38-win_amd64.whl", hash = "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f"}, + {file = "numpy-1.18.5.zip", hash = "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b"}, ] packaging = [ - {file = "packaging-20.1-py2.py3-none-any.whl", hash = "sha256:170748228214b70b672c581a3dd610ee51f733018650740e98c7df862a583f73"}, - {file = "packaging-20.1.tar.gz", hash = "sha256:e665345f9eef0c621aa0bf2f8d78cf6d21904eef16a93f020240b704a57f1334"}, + {file = "packaging-20.4-py2.py3-none-any.whl", hash = "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"}, + {file = "packaging-20.4.tar.gz", hash = "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8"}, ] pandas = [ - {file = "pandas-0.24.2-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:17916d818592c9ec891cbef2e90f98cc85e0f1e89ed0924c9b5220dc3209c846"}, - {file = "pandas-0.24.2-cp27-cp27m-win32.whl", hash = "sha256:42e5ad741a0d09232efbc7fc648226ed93306551772fc8aecc6dce9f0e676794"}, - {file = "pandas-0.24.2-cp27-cp27m-win_amd64.whl", hash = "sha256:c9a4b7c55115eb278c19aa14b34fcf5920c8fe7797a09b7b053ddd6195ea89b3"}, - {file = "pandas-0.24.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:5149a6db3e74f23dc3f5a216c2c9ae2e12920aa2d4a5b77e44e5b804a5f93248"}, - {file = "pandas-0.24.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:cc8fc0c7a8d5951dc738f1c1447f71c43734244453616f32b8aa0ef6013a5dfb"}, - {file = "pandas-0.24.2-cp35-cp35m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:17450e25ae69e2e6b303817bdf26b2cd57f69595d8550a77c308be0cd0fd58fa"}, - {file = "pandas-0.24.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:366f30710172cb45a6b4f43b66c220653b1ea50303fbbd94e50571637ffb9167"}, - {file = "pandas-0.24.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:4e718e7f395ba5bfe8b6f6aaf2ff1c65a09bb77a36af6394621434e7cc813204"}, - {file = "pandas-0.24.2-cp35-cp35m-win32.whl", hash = "sha256:8c872f7fdf3018b7891e1e3e86c55b190e6c5cee70cab771e8f246c855001296"}, - {file = "pandas-0.24.2-cp35-cp35m-win_amd64.whl", hash = "sha256:a3352bacac12e1fc646213b998bce586f965c9d431773d9e91db27c7c48a1f7d"}, - {file = "pandas-0.24.2-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:d7b460bc316064540ce0c41c1438c416a40746fd8a4fb2999668bf18f3c4acf1"}, - {file = "pandas-0.24.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c1bd07ebc15285535f61ddd8c0c75d0d6293e80e1ee6d9a8d73f3f36954342d0"}, - {file = "pandas-0.24.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:071e42b89b57baa17031af8c6b6bbd2e9a5c68c595bc6bf9adabd7a9ed125d3b"}, - {file = "pandas-0.24.2-cp36-cp36m-win32.whl", hash = "sha256:2538f099ab0e9f9c9d09bbcd94b47fd889bad06dc7ae96b1ed583f1dc1a7a822"}, - {file = "pandas-0.24.2-cp36-cp36m-win_amd64.whl", hash = "sha256:83c702615052f2a0a7fb1dd289726e29ec87a27272d775cb77affe749cca28f8"}, - {file = "pandas-0.24.2-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:627594338d6dd995cfc0bacd8e654cd9e1252d2a7c959449228df6740d737eb8"}, - {file = "pandas-0.24.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:4fe0d7e6438212e839fc5010c78b822664f1a824c0d263fd858f44131d9166e2"}, - {file = "pandas-0.24.2-cp37-cp37m-win32.whl", hash = "sha256:bcdd06007cca02d51350f96debe51331dec429ac8f93930a43eb8fb5639e3eb5"}, - {file = "pandas-0.24.2-cp37-cp37m-win_amd64.whl", hash = "sha256:90f116086063934afd51e61a802a943826d2aac572b2f7d55caaac51c13db5b5"}, - {file = "pandas-0.24.2.tar.gz", hash = "sha256:4f919f409c433577a501e023943e582c57355d50a724c589e78bc1d551a535a2"}, + {file = "pandas-0.25.3-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:df8864824b1fe488cf778c3650ee59c3a0d8f42e53707de167ba6b4f7d35f133"}, + {file = "pandas-0.25.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:7458c48e3d15b8aaa7d575be60e1e4dd70348efcd9376656b72fecd55c59a4c3"}, + {file = "pandas-0.25.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:61741f5aeb252f39c3031d11405305b6d10ce663c53bc3112705d7ad66c013d0"}, + {file = "pandas-0.25.3-cp35-cp35m-win32.whl", hash = "sha256:adc3d3a3f9e59a38d923e90e20c4922fc62d1e5a03d083440468c6d8f3f1ae0a"}, + {file = "pandas-0.25.3-cp35-cp35m-win_amd64.whl", hash = "sha256:975c461accd14e89d71772e89108a050fa824c0b87a67d34cedf245f6681fc17"}, + {file = "pandas-0.25.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ee50c2142cdcf41995655d499a157d0a812fce55c97d9aad13bc1eef837ed36c"}, + {file = "pandas-0.25.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:4545467a637e0e1393f7d05d61dace89689ad6d6f66f267f86fff737b702cce9"}, + {file = "pandas-0.25.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:bbe3eb765a0b1e578833d243e2814b60c825b7fdbf4cdfe8e8aae8a08ed56ecf"}, + {file = "pandas-0.25.3-cp36-cp36m-win32.whl", hash = "sha256:8153705d6545fd9eb6dd2bc79301bff08825d2e2f716d5dced48daafc2d0b81f"}, + {file = "pandas-0.25.3-cp36-cp36m-win_amd64.whl", hash = "sha256:26382aab9c119735908d94d2c5c08020a4a0a82969b7e5eefb92f902b3b30ad7"}, + {file = "pandas-0.25.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:00dff3a8e337f5ed7ad295d98a31821d3d0fe7792da82d78d7fd79b89c03ea9d"}, + {file = "pandas-0.25.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e45055c30a608076e31a9fcd780a956ed3b1fa20db61561b8d88b79259f526f7"}, + {file = "pandas-0.25.3-cp37-cp37m-win32.whl", hash = "sha256:255920e63850dc512ce356233081098554d641ba99c3767dde9e9f35630f994b"}, + {file = "pandas-0.25.3-cp37-cp37m-win_amd64.whl", hash = "sha256:22361b1597c8c2ffd697aa9bf85423afa9e1fcfa6b1ea821054a244d5f24d75e"}, + {file = "pandas-0.25.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9962957a27bfb70ab64103d0a7b42fa59c642fb4ed4cb75d0227b7bb9228535d"}, + {file = "pandas-0.25.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:78bf638993219311377ce9836b3dc05f627a666d0dbc8cec37c0ff3c9ada673b"}, + {file = "pandas-0.25.3-cp38-cp38-win32.whl", hash = "sha256:6a3ac2c87e4e32a969921d1428525f09462770c349147aa8e9ab95f88c71ec71"}, + {file = "pandas-0.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:33970f4cacdd9a0ddb8f21e151bfb9f178afb7c36eb7c25b9094c02876f385c2"}, + {file = "pandas-0.25.3.tar.gz", hash = "sha256:52da74df8a9c9a103af0a72c9d5fdc8e0183a90884278db7f386b5692a2220a4"}, ] parso = [ - {file = "parso-0.6.2-py2.py3-none-any.whl", hash = "sha256:8515fc12cfca6ee3aa59138741fc5624d62340c97e401c74875769948d4f2995"}, - {file = "parso-0.6.2.tar.gz", hash = "sha256:0c5659e0c6eba20636f99a04f469798dca8da279645ce5c387315b2c23912157"}, + {file = "parso-0.7.0-py2.py3-none-any.whl", hash = "sha256:158c140fc04112dc45bca311633ae5033c2c2a7b732fa33d0955bad8152a8dd0"}, + {file = "parso-0.7.0.tar.gz", hash = "sha256:908e9fae2144a076d72ae4e25539143d40b8e3eafbaeae03c1bfe226f4cdf12c"}, ] pathlib2 = [ {file = "pathlib2-2.3.5-py2.py3-none-any.whl", hash = "sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db"}, {file = "pathlib2-2.3.5.tar.gz", hash = "sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868"}, ] pathspec = [ - {file = "pathspec-0.7.0-py2.py3-none-any.whl", hash = "sha256:163b0632d4e31cef212976cf57b43d9fd6b0bac6e67c26015d611a647d5e7424"}, - {file = "pathspec-0.7.0.tar.gz", hash = "sha256:562aa70af2e0d434367d9790ad37aed893de47f1693e4201fd1d3dca15d19b96"}, + {file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"}, + {file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"}, ] pathtools = [ {file = "pathtools-0.1.2.tar.gz", hash = "sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0"}, ] pbr = [ - {file = "pbr-5.4.4-py2.py3-none-any.whl", hash = "sha256:61aa52a0f18b71c5cc58232d2cf8f8d09cd67fcad60b742a60124cb8d6951488"}, - {file = "pbr-5.4.4.tar.gz", hash = "sha256:139d2625547dbfa5fb0b81daebb39601c478c21956dc57e2e07b74450a8c506b"}, + {file = "pbr-5.4.5-py2.py3-none-any.whl", hash = "sha256:579170e23f8e0c2f24b0de612f71f648eccb79fb1322c814ae6b3c07b5ba23e8"}, + {file = "pbr-5.4.5.tar.gz", hash = "sha256:07f558fece33b05caf857474a366dfcc00562bca13dd8b47b2b3e22d9f9bf55c"}, ] pep8 = [ {file = "pep8-1.7.1-py2.py3-none-any.whl", hash = "sha256:b22cfae5db09833bb9bd7c8463b53e1a9c9b39f12e304a8d0bba729c501827ee"}, @@ -1924,62 +1420,66 @@ port-for = [ {file = "port-for-0.3.1.tar.gz", hash = "sha256:b16a84bb29c2954db44c29be38b17c659c9c27e33918dec16b90d375cc596f1c"}, ] prompt-toolkit = [ - {file = "prompt_toolkit-1.0.18-py2-none-any.whl", hash = "sha256:f7eec66105baf40eda9ab026cd8b2e251337eea8d111196695d82e0c5f0af852"}, - {file = "prompt_toolkit-1.0.18-py3-none-any.whl", hash = "sha256:37925b37a4af1f6448c76b7606e0285f79f434ad246dda007a27411cca730c6d"}, - {file = "prompt_toolkit-1.0.18.tar.gz", hash = "sha256:dd4fca02c8069497ad931a2d09914c6b0d1b50151ce876bc15bde4c747090126"}, - {file = "prompt_toolkit-2.0.10-py2-none-any.whl", hash = "sha256:e7f8af9e3d70f514373bf41aa51bc33af12a6db3f71461ea47fea985defb2c31"}, - {file = "prompt_toolkit-2.0.10-py3-none-any.whl", hash = "sha256:46642344ce457641f28fc9d1c9ca939b63dadf8df128b86f1b9860e59c73a5e4"}, - {file = "prompt_toolkit-2.0.10.tar.gz", hash = "sha256:f15af68f66e664eaa559d4ac8a928111eebd5feda0c11738b5998045224829db"}, + {file = "prompt_toolkit-3.0.3-py3-none-any.whl", hash = "sha256:c93e53af97f630f12f5f62a3274e79527936ed466f038953dfa379d4941f651a"}, + {file = "prompt_toolkit-3.0.3.tar.gz", hash = "sha256:a402e9bf468b63314e37460b68ba68243d55b2f8c4d0192f85a019af3945050e"}, +] +psutil = [ + {file = "psutil-5.7.0-cp27-none-win32.whl", hash = "sha256:298af2f14b635c3c7118fd9183843f4e73e681bb6f01e12284d4d70d48a60953"}, + {file = "psutil-5.7.0-cp27-none-win_amd64.whl", hash = "sha256:75e22717d4dbc7ca529ec5063000b2b294fc9a367f9c9ede1f65846c7955fd38"}, + {file = "psutil-5.7.0-cp35-cp35m-win32.whl", hash = "sha256:f344ca230dd8e8d5eee16827596f1c22ec0876127c28e800d7ae20ed44c4b310"}, + {file = "psutil-5.7.0-cp35-cp35m-win_amd64.whl", hash = "sha256:e2d0c5b07c6fe5a87fa27b7855017edb0d52ee73b71e6ee368fae268605cc3f5"}, + {file = "psutil-5.7.0-cp36-cp36m-win32.whl", hash = "sha256:a02f4ac50d4a23253b68233b07e7cdb567bd025b982d5cf0ee78296990c22d9e"}, + {file = "psutil-5.7.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1413f4158eb50e110777c4f15d7c759521703bd6beb58926f1d562da40180058"}, + {file = "psutil-5.7.0-cp37-cp37m-win32.whl", hash = "sha256:d008ddc00c6906ec80040d26dc2d3e3962109e40ad07fd8a12d0284ce5e0e4f8"}, + {file = "psutil-5.7.0-cp37-cp37m-win_amd64.whl", hash = "sha256:73f35ab66c6c7a9ce82ba44b1e9b1050be2a80cd4dcc3352cc108656b115c74f"}, + {file = "psutil-5.7.0-cp38-cp38-win32.whl", hash = "sha256:60b86f327c198561f101a92be1995f9ae0399736b6eced8f24af41ec64fb88d4"}, + {file = "psutil-5.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:d84029b190c8a66a946e28b4d3934d2ca1528ec94764b180f7d6ea57b0e75e26"}, + {file = "psutil-5.7.0.tar.gz", hash = "sha256:685ec16ca14d079455892f25bd124df26ff9137664af445563c1bd36629b5e0e"}, ] ptyprocess = [ {file = "ptyprocess-0.6.0-py2.py3-none-any.whl", hash = "sha256:d7cc528d76e76342423ca640335bd3633420dc1366f258cb31d05e865ef5ca1f"}, {file = "ptyprocess-0.6.0.tar.gz", hash = "sha256:923f299cc5ad920c68f2bc0bc98b75b9f838b93b599941a6b63ddbc2476394c0"}, ] py = [ - {file = "py-1.8.1-py2.py3-none-any.whl", hash = "sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0"}, - {file = "py-1.8.1.tar.gz", hash = "sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa"}, + {file = "py-1.8.2-py2.py3-none-any.whl", hash = "sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44"}, + {file = "py-1.8.2.tar.gz", hash = "sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b"}, ] pycodestyle = [ - {file = "pycodestyle-2.5.0-py2.py3-none-any.whl", hash = "sha256:95a2219d12372f05704562a14ec30bc76b05a5b297b21a5dfe3f6fac3491ae56"}, - {file = "pycodestyle-2.5.0.tar.gz", hash = "sha256:e40a936c9a450ad81df37f549d676d127b1b66000a6c500caa2b085bc0ca976c"}, + {file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"}, + {file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"}, ] pyflakes = [ - {file = "pyflakes-2.1.1-py2.py3-none-any.whl", hash = "sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0"}, - {file = "pyflakes-2.1.1.tar.gz", hash = "sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2"}, + {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, + {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, ] pygments = [ - {file = "Pygments-2.5.2-py2.py3-none-any.whl", hash = "sha256:2a3fe295e54a20164a9df49c75fa58526d3be48e14aceba6d6b1e8ac0bfd6f1b"}, - {file = "Pygments-2.5.2.tar.gz", hash = "sha256:98c8aa5a9f778fcd1026a17361ddaf7330d1b7c62ae97c3bb0ae73e0b9b6b0fe"}, + {file = "Pygments-2.6.1-py3-none-any.whl", hash = "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324"}, + {file = "Pygments-2.6.1.tar.gz", hash = "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44"}, ] pylint = [ - {file = "pylint-1.9.5-py2.py3-none-any.whl", hash = "sha256:367e3d49813d349a905390ac27989eff82ab84958731c5ef0bef867452cfdc42"}, - {file = "pylint-1.9.5.tar.gz", hash = "sha256:97a42df23d436c70132971d1dcb9efad2fe5c0c6add55b90161e773caf729300"}, - {file = "pylint-2.4.4-py3-none-any.whl", hash = "sha256:886e6afc935ea2590b462664b161ca9a5e40168ea99e5300935f6591ad467df4"}, - {file = "pylint-2.4.4.tar.gz", hash = "sha256:3db5468ad013380e987410a8d6956226963aed94ecb5f9d3a28acca6d9ac36cd"}, + {file = "pylint-2.5.3-py3-none-any.whl", hash = "sha256:d0ece7d223fe422088b0e8f13fa0a1e8eb745ebffcb8ed53d3e95394b6101a1c"}, + {file = "pylint-2.5.3.tar.gz", hash = "sha256:7dd78437f2d8d019717dbf287772d0b2dbdfd13fc016aa7faa08d67bccc46adc"}, ] pyparsing = [ - {file = "pyparsing-2.4.6-py2.py3-none-any.whl", hash = "sha256:c342dccb5250c08d45fd6f8b4a559613ca603b57498511740e65cd11a2e7dcec"}, - {file = "pyparsing-2.4.6.tar.gz", hash = "sha256:4c830582a84fb022400b85429791bc551f1f4871c33f23e44f353119e92f969f"}, + {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, + {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, ] pytest = [ - {file = "pytest-4.6.9-py2.py3-none-any.whl", hash = "sha256:c77a5f30a90e0ce24db9eaa14ddfd38d4afb5ea159309bdd2dae55b931bc9324"}, - {file = "pytest-4.6.9.tar.gz", hash = "sha256:19e8f75eac01dd3f211edd465b39efbcbdc8fc5f7866d7dd49fedb30d8adf339"}, - {file = "pytest-5.3.5-py3-none-any.whl", hash = "sha256:ff615c761e25eb25df19edddc0b970302d2a9091fbce0e7213298d85fb61fef6"}, - {file = "pytest-5.3.5.tar.gz", hash = "sha256:0d5fe9189a148acc3c3eb2ac8e1ac0742cb7618c084f3d228baaec0c254b318d"}, + {file = "pytest-5.4.3-py3-none-any.whl", hash = "sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1"}, + {file = "pytest-5.4.3.tar.gz", hash = "sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8"}, ] pytest-cache = [ {file = "pytest-cache-1.0.tar.gz", hash = "sha256:be7468edd4d3d83f1e844959fd6e3fd28e77a481440a7118d430130ea31b07a9"}, ] pytest-cov = [ - {file = "pytest-cov-2.8.1.tar.gz", hash = "sha256:cc6742d8bac45070217169f5f72ceee1e0e55b0221f54bcf24845972d3a47f2b"}, - {file = "pytest_cov-2.8.1-py2.py3-none-any.whl", hash = "sha256:cdbdef4f870408ebdbfeb44e63e07eb18bb4619fae852f6e760645fa36172626"}, + {file = "pytest-cov-2.10.0.tar.gz", hash = "sha256:1a629dc9f48e53512fcbfda6b07de490c374b0c83c55ff7a1720b3fccff0ac87"}, + {file = "pytest_cov-2.10.0-py2.py3-none-any.whl", hash = "sha256:6e6d18092dce6fad667cd7020deed816f858ad3b49d5b5e2b1cc1c97a4dba65c"}, ] pytest-pep8 = [ {file = "pytest-pep8-1.0.6.tar.gz", hash = "sha256:032ef7e5fa3ac30f4458c73e05bb67b0f036a8a5cb418a534b3170f89f120318"}, ] pytest-sugar = [ - {file = "pytest-sugar-0.9.2.tar.gz", hash = "sha256:fcd87a74b2bce5386d244b49ad60549bfbc4602527797fac167da147983f58ab"}, - {file = "pytest_sugar-0.9.2-py2.py3-none-any.whl", hash = "sha256:26cf8289fe10880cbbc130bd77398c4e6a8b936d8393b116a5c16121d95ab283"}, + {file = "pytest-sugar-0.9.3.tar.gz", hash = "sha256:1630b5b7ea3624919b73fde37cffb87965c5087a4afab8a43074ff44e0d810c4"}, ] python-dateutil = [ {file = "python-dateutil-2.8.1.tar.gz", hash = "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"}, @@ -1989,51 +1489,51 @@ python-levenshtein = [ {file = "python-Levenshtein-0.12.0.tar.gz", hash = "sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1"}, ] pytz = [ - {file = "pytz-2019.3-py2.py3-none-any.whl", hash = "sha256:1c557d7d0e871de1f5ccd5833f60fb2550652da6be2693c1e02300743d21500d"}, - {file = "pytz-2019.3.tar.gz", hash = "sha256:b02c06db6cf09c12dd25137e563b31700d3b80fcc4ad23abb7a315f2789819be"}, + {file = "pytz-2020.1-py2.py3-none-any.whl", hash = "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed"}, + {file = "pytz-2020.1.tar.gz", hash = "sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048"}, ] pyyaml = [ - {file = "PyYAML-5.3-cp27-cp27m-win32.whl", hash = "sha256:940532b111b1952befd7db542c370887a8611660d2b9becff75d39355303d82d"}, - {file = "PyYAML-5.3-cp27-cp27m-win_amd64.whl", hash = "sha256:059b2ee3194d718896c0ad077dd8c043e5e909d9180f387ce42012662a4946d6"}, - {file = "PyYAML-5.3-cp35-cp35m-win32.whl", hash = "sha256:4fee71aa5bc6ed9d5f116327c04273e25ae31a3020386916905767ec4fc5317e"}, - {file = "PyYAML-5.3-cp35-cp35m-win_amd64.whl", hash = "sha256:dbbb2379c19ed6042e8f11f2a2c66d39cceb8aeace421bfc29d085d93eda3689"}, - {file = "PyYAML-5.3-cp36-cp36m-win32.whl", hash = "sha256:e3a057b7a64f1222b56e47bcff5e4b94c4f61faac04c7c4ecb1985e18caa3994"}, - {file = "PyYAML-5.3-cp36-cp36m-win_amd64.whl", hash = "sha256:74782fbd4d4f87ff04159e986886931456a1894c61229be9eaf4de6f6e44b99e"}, - {file = "PyYAML-5.3-cp37-cp37m-win32.whl", hash = "sha256:24521fa2890642614558b492b473bee0ac1f8057a7263156b02e8b14c88ce6f5"}, - {file = "PyYAML-5.3-cp37-cp37m-win_amd64.whl", hash = "sha256:1cf708e2ac57f3aabc87405f04b86354f66799c8e62c28c5fc5f88b5521b2dbf"}, - {file = "PyYAML-5.3-cp38-cp38-win32.whl", hash = "sha256:70024e02197337533eef7b85b068212420f950319cc8c580261963aefc75f811"}, - {file = "PyYAML-5.3-cp38-cp38-win_amd64.whl", hash = "sha256:cb1f2f5e426dc9f07a7681419fe39cee823bb74f723f36f70399123f439e9b20"}, - {file = "PyYAML-5.3.tar.gz", hash = "sha256:e9f45bd5b92c7974e59bcd2dcc8631a6b6cc380a904725fce7bc08872e691615"}, + {file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"}, + {file = "PyYAML-5.3.1-cp27-cp27m-win_amd64.whl", hash = "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win32.whl", hash = "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win_amd64.whl", hash = "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf"}, + {file = "PyYAML-5.3.1-cp38-cp38-win32.whl", hash = "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97"}, + {file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"}, + {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, ] regex = [ - {file = "regex-2020.2.20-cp27-cp27m-win32.whl", hash = "sha256:99272d6b6a68c7ae4391908fc15f6b8c9a6c345a46b632d7fdb7ef6c883a2bbb"}, - {file = "regex-2020.2.20-cp27-cp27m-win_amd64.whl", hash = "sha256:974535648f31c2b712a6b2595969f8ab370834080e00ab24e5dbb9d19b8bfb74"}, - {file = "regex-2020.2.20-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:5de40649d4f88a15c9489ed37f88f053c15400257eeb18425ac7ed0a4e119400"}, - {file = "regex-2020.2.20-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:82469a0c1330a4beb3d42568f82dffa32226ced006e0b063719468dcd40ffdf0"}, - {file = "regex-2020.2.20-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:d58a4fa7910102500722defbde6e2816b0372a4fcc85c7e239323767c74f5cbc"}, - {file = "regex-2020.2.20-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:f1ac2dc65105a53c1c2d72b1d3e98c2464a133b4067a51a3d2477b28449709a0"}, - {file = "regex-2020.2.20-cp36-cp36m-win32.whl", hash = "sha256:8c2b7fa4d72781577ac45ab658da44c7518e6d96e2a50d04ecb0fd8f28b21d69"}, - {file = "regex-2020.2.20-cp36-cp36m-win_amd64.whl", hash = "sha256:269f0c5ff23639316b29f31df199f401e4cb87529eafff0c76828071635d417b"}, - {file = "regex-2020.2.20-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:bed7986547ce54d230fd8721aba6fd19459cdc6d315497b98686d0416efaff4e"}, - {file = "regex-2020.2.20-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:046e83a8b160aff37e7034139a336b660b01dbfe58706f9d73f5cdc6b3460242"}, - {file = "regex-2020.2.20-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:b33ebcd0222c1d77e61dbcd04a9fd139359bded86803063d3d2d197b796c63ce"}, - {file = "regex-2020.2.20-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:bba52d72e16a554d1894a0cc74041da50eea99a8483e591a9edf1025a66843ab"}, - {file = "regex-2020.2.20-cp37-cp37m-win32.whl", hash = "sha256:01b2d70cbaed11f72e57c1cfbaca71b02e3b98f739ce33f5f26f71859ad90431"}, - {file = "regex-2020.2.20-cp37-cp37m-win_amd64.whl", hash = "sha256:113309e819634f499d0006f6200700c8209a2a8bf6bd1bdc863a4d9d6776a5d1"}, - {file = "regex-2020.2.20-cp38-cp38-manylinux1_i686.whl", hash = "sha256:25f4ce26b68425b80a233ce7b6218743c71cf7297dbe02feab1d711a2bf90045"}, - {file = "regex-2020.2.20-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:9b64a4cc825ec4df262050c17e18f60252cdd94742b4ba1286bcfe481f1c0f26"}, - {file = "regex-2020.2.20-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:9ff16d994309b26a1cdf666a6309c1ef51ad4f72f99d3392bcd7b7139577a1f2"}, - {file = "regex-2020.2.20-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:c7f58a0e0e13fb44623b65b01052dae8e820ed9b8b654bb6296bc9c41f571b70"}, - {file = "regex-2020.2.20-cp38-cp38-win32.whl", hash = "sha256:200539b5124bc4721247a823a47d116a7a23e62cc6695744e3eb5454a8888e6d"}, - {file = "regex-2020.2.20-cp38-cp38-win_amd64.whl", hash = "sha256:7f78f963e62a61e294adb6ff5db901b629ef78cb2a1cfce3cf4eeba80c1c67aa"}, - {file = "regex-2020.2.20.tar.gz", hash = "sha256:9e9624440d754733eddbcd4614378c18713d2d9d0dc647cf9c72f64e39671be5"}, + {file = "regex-2020.6.8-cp27-cp27m-win32.whl", hash = "sha256:fbff901c54c22425a5b809b914a3bfaf4b9570eee0e5ce8186ac71eb2025191c"}, + {file = "regex-2020.6.8-cp27-cp27m-win_amd64.whl", hash = "sha256:112e34adf95e45158c597feea65d06a8124898bdeac975c9087fe71b572bd938"}, + {file = "regex-2020.6.8-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:92d8a043a4241a710c1cf7593f5577fbb832cf6c3a00ff3fc1ff2052aff5dd89"}, + {file = "regex-2020.6.8-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:bae83f2a56ab30d5353b47f9b2a33e4aac4de9401fb582b55c42b132a8ac3868"}, + {file = "regex-2020.6.8-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:b2ba0f78b3ef375114856cbdaa30559914d081c416b431f2437f83ce4f8b7f2f"}, + {file = "regex-2020.6.8-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:95fa7726d073c87141f7bbfb04c284901f8328e2d430eeb71b8ffdd5742a5ded"}, + {file = "regex-2020.6.8-cp36-cp36m-win32.whl", hash = "sha256:e3cdc9423808f7e1bb9c2e0bdb1c9dc37b0607b30d646ff6faf0d4e41ee8fee3"}, + {file = "regex-2020.6.8-cp36-cp36m-win_amd64.whl", hash = "sha256:c78e66a922de1c95a208e4ec02e2e5cf0bb83a36ceececc10a72841e53fbf2bd"}, + {file = "regex-2020.6.8-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:08997a37b221a3e27d68ffb601e45abfb0093d39ee770e4257bd2f5115e8cb0a"}, + {file = "regex-2020.6.8-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2f6f211633ee8d3f7706953e9d3edc7ce63a1d6aad0be5dcee1ece127eea13ae"}, + {file = "regex-2020.6.8-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:55b4c25cbb3b29f8d5e63aeed27b49fa0f8476b0d4e1b3171d85db891938cc3a"}, + {file = "regex-2020.6.8-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:89cda1a5d3e33ec9e231ece7307afc101b5217523d55ef4dc7fb2abd6de71ba3"}, + {file = "regex-2020.6.8-cp37-cp37m-win32.whl", hash = "sha256:690f858d9a94d903cf5cada62ce069b5d93b313d7d05456dbcd99420856562d9"}, + {file = "regex-2020.6.8-cp37-cp37m-win_amd64.whl", hash = "sha256:1700419d8a18c26ff396b3b06ace315b5f2a6e780dad387e4c48717a12a22c29"}, + {file = "regex-2020.6.8-cp38-cp38-manylinux1_i686.whl", hash = "sha256:654cb773b2792e50151f0e22be0f2b6e1c3a04c5328ff1d9d59c0398d37ef610"}, + {file = "regex-2020.6.8-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:52e1b4bef02f4040b2fd547357a170fc1146e60ab310cdbdd098db86e929b387"}, + {file = "regex-2020.6.8-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:cf59bbf282b627130f5ba68b7fa3abdb96372b24b66bdf72a4920e8153fc7910"}, + {file = "regex-2020.6.8-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:5aaa5928b039ae440d775acea11d01e42ff26e1561c0ffcd3d805750973c6baf"}, + {file = "regex-2020.6.8-cp38-cp38-win32.whl", hash = "sha256:97712e0d0af05febd8ab63d2ef0ab2d0cd9deddf4476f7aa153f76feef4b2754"}, + {file = "regex-2020.6.8-cp38-cp38-win_amd64.whl", hash = "sha256:6ad8663c17db4c5ef438141f99e291c4d4edfeaacc0ce28b5bba2b0bf273d9b5"}, + {file = "regex-2020.6.8.tar.gz", hash = "sha256:e9b64e609d37438f7d6e68c2546d2cb8062f3adb27e6336bc129b51be20773ac"}, ] requests = [ - {file = "requests-2.23.0-py2.py3-none-any.whl", hash = "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee"}, - {file = "requests-2.23.0.tar.gz", hash = "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6"}, + {file = "requests-2.24.0-py2.py3-none-any.whl", hash = "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898"}, + {file = "requests-2.24.0.tar.gz", hash = "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b"}, ] restructuredtext-lint = [ - {file = "restructuredtext_lint-1.3.0.tar.gz", hash = "sha256:97b3da356d5b3a8514d8f1f9098febd8b41463bed6a1d9f126cf0a048b6fd908"}, + {file = "restructuredtext_lint-1.3.1.tar.gz", hash = "sha256:470e53b64817211a42805c3a104d2216f6f5834b22fe7adb637d1de4d6501fb8"}, ] scandir = [ {file = "scandir-1.10.0-cp27-cp27m-win32.whl", hash = "sha256:92c85ac42f41ffdc35b6da57ed991575bdbe69db895507af88b9f499b701c188"}, @@ -2048,16 +1548,9 @@ scandir = [ {file = "scandir-1.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b24086f2375c4a094a6b51e78b4cf7ca16c721dcee2eddd7aa6494b42d6d519d"}, {file = "scandir-1.10.0.tar.gz", hash = "sha256:4d4631f6062e658e9007ab3149a9b914f3548cb38bfb021c64f39a025ce578ae"}, ] -simplegeneric = [ - {file = "simplegeneric-0.8.1.zip", hash = "sha256:dc972e06094b9af5b855b3df4a646395e43d1c9d0d39ed345b7393560d0b9173"}, -] -singledispatch = [ - {file = "singledispatch-3.4.0.3-py2.py3-none-any.whl", hash = "sha256:833b46966687b3de7f438c761ac475213e53b306740f1abfaa86e1d1aae56aa8"}, - {file = "singledispatch-3.4.0.3.tar.gz", hash = "sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c"}, -] six = [ - {file = "six-1.14.0-py2.py3-none-any.whl", hash = "sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c"}, - {file = "six-1.14.0.tar.gz", hash = "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a"}, + {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, + {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, ] snowballstemmer = [ {file = "snowballstemmer-2.0.0-py2.py3-none-any.whl", hash = "sha256:209f257d7533fdb3cb73bdbd24f436239ca3b2fa67d56f6ff88e86be08cc5ef0"}, @@ -2072,41 +1565,34 @@ sphinx-autobuild = [ {file = "sphinx_autobuild-0.7.1-py2-none-any.whl", hash = "sha256:e60aea0789cab02fa32ee63c7acae5ef41c06f1434d9fd0a74250a61f5994692"}, ] sphinx-click = [ - {file = "sphinx-click-2.3.1.tar.gz", hash = "sha256:793c68b41c4a9435f953e2a27f9bf5883729037b7431f32b2776257c2966bd1b"}, - {file = "sphinx_click-2.3.1-py2.py3-none-any.whl", hash = "sha256:8c6274666730686a65efbae0b4465879b030372333de3114aeb63c44204da32e"}, + {file = "sphinx-click-2.3.2.tar.gz", hash = "sha256:1b649ebe9f7a85b78ef6545d1dc258da5abca850ac6375be104d484a6334a728"}, + {file = "sphinx_click-2.3.2-py2.py3-none-any.whl", hash = "sha256:06952d5de6cbe2cb7d6dc656bc471652d2b484cf1e1b2d65edb7f4f2e867c7f6"}, ] sphinxcontrib-websupport = [ - {file = "sphinxcontrib-websupport-1.1.2.tar.gz", hash = "sha256:1501befb0fdf1d1c29a800fdbf4ef5dc5369377300ddbdd16d2cd40e54c6eefc"}, - {file = "sphinxcontrib_websupport-1.1.2-py2.py3-none-any.whl", hash = "sha256:e02f717baf02d0b6c3dd62cf81232ffca4c9d5c331e03766982e3ff9f1d2bc3f"}, - {file = "sphinxcontrib-websupport-1.2.0.tar.gz", hash = "sha256:bad3fbd312bc36a31841e06e7617471587ef642bdacdbdddaa8cc30cf251b5ea"}, - {file = "sphinxcontrib_websupport-1.2.0-py2.py3-none-any.whl", hash = "sha256:50fb98fcb8ff2a8869af2afa6b8ee51b3baeb0b17dacd72505105bf15d506ead"}, + {file = "sphinxcontrib-websupport-1.2.2.tar.gz", hash = "sha256:33c0db6c0635b9dc3e72629b7278ca3b9fa24c156eeeaf1674be8f268831d951"}, + {file = "sphinxcontrib_websupport-1.2.2-py2.py3-none-any.whl", hash = "sha256:c155cfa18e8b7c832b3cac0a2d41810ebacd26b26ba9624cd2f42c3496dad04b"}, ] tabulate = [ - {file = "tabulate-0.8.6.tar.gz", hash = "sha256:5470cc6687a091c7042cee89b2946d9235fe9f6d49c193a4ae2ac7bf386737c8"}, + {file = "tabulate-0.8.7-py3-none-any.whl", hash = "sha256:ac64cb76d53b1231d364babcd72abbb16855adac7de6665122f97b593f1eb2ba"}, + {file = "tabulate-0.8.7.tar.gz", hash = "sha256:db2723a20d04bcda8522165c73eea7c300eda74e0ce852d9022e0159d7895007"}, ] termcolor = [ {file = "termcolor-1.1.0.tar.gz", hash = "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"}, ] toml = [ - {file = "toml-0.10.0-py2.7.egg", hash = "sha256:f1db651f9657708513243e61e6cc67d101a39bad662eaa9b5546f789338e07a3"}, - {file = "toml-0.10.0-py2.py3-none-any.whl", hash = "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e"}, - {file = "toml-0.10.0.tar.gz", hash = "sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c"}, + {file = "toml-0.10.1-py2.py3-none-any.whl", hash = "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88"}, + {file = "toml-0.10.1.tar.gz", hash = "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f"}, ] tornado = [ - {file = "tornado-5.1.1-cp35-cp35m-win32.whl", hash = "sha256:732e836008c708de2e89a31cb2fa6c0e5a70cb60492bee6f1ea1047500feaf7f"}, - {file = "tornado-5.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:0662d28b1ca9f67108c7e3b77afabfb9c7e87bde174fbda78186ecedc2499a9d"}, - {file = "tornado-5.1.1-cp36-cp36m-win32.whl", hash = "sha256:8154ec22c450df4e06b35f131adc4f2f3a12ec85981a203301d310abf580500f"}, - {file = "tornado-5.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:d4b3e5329f572f055b587efc57d29bd051589fb5a43ec8898c77a47ec2fa2bbb"}, - {file = "tornado-5.1.1-cp37-cp37m-win32.whl", hash = "sha256:e5f2585afccbff22390cddac29849df463b252b711aa2ce7c5f3f342a5b3b444"}, - {file = "tornado-5.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:8e9d728c4579682e837c92fdd98036bd5cdefa1da2aaf6acf26947e6dd0c01c5"}, - {file = "tornado-5.1.1.tar.gz", hash = "sha256:4e5158d97583502a7e2739951553cbd88a72076f152b4b11b64b9a10c4c49409"}, - {file = "tornado-6.0.3-cp35-cp35m-win32.whl", hash = "sha256:c9399267c926a4e7c418baa5cbe91c7d1cf362d505a1ef898fde44a07c9dd8a5"}, - {file = "tornado-6.0.3-cp35-cp35m-win_amd64.whl", hash = "sha256:398e0d35e086ba38a0427c3b37f4337327231942e731edaa6e9fd1865bbd6f60"}, - {file = "tornado-6.0.3-cp36-cp36m-win32.whl", hash = "sha256:4e73ef678b1a859f0cb29e1d895526a20ea64b5ffd510a2307b5998c7df24281"}, - {file = "tornado-6.0.3-cp36-cp36m-win_amd64.whl", hash = "sha256:349884248c36801afa19e342a77cc4458caca694b0eda633f5878e458a44cb2c"}, - {file = "tornado-6.0.3-cp37-cp37m-win32.whl", hash = "sha256:559bce3d31484b665259f50cd94c5c28b961b09315ccd838f284687245f416e5"}, - {file = "tornado-6.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:abbe53a39734ef4aba061fca54e30c6b4639d3e1f59653f0da37a0003de148c7"}, - {file = "tornado-6.0.3.tar.gz", hash = "sha256:c845db36ba616912074c5b1ee897f8e0124df269468f25e4fe21fe72f6edd7a9"}, + {file = "tornado-6.0.4-cp35-cp35m-win32.whl", hash = "sha256:5217e601700f24e966ddab689f90b7ea4bd91ff3357c3600fa1045e26d68e55d"}, + {file = "tornado-6.0.4-cp35-cp35m-win_amd64.whl", hash = "sha256:c98232a3ac391f5faea6821b53db8db461157baa788f5d6222a193e9456e1740"}, + {file = "tornado-6.0.4-cp36-cp36m-win32.whl", hash = "sha256:5f6a07e62e799be5d2330e68d808c8ac41d4a259b9cea61da4101b83cb5dc673"}, + {file = "tornado-6.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c952975c8ba74f546ae6de2e226ab3cc3cc11ae47baf607459a6728585bb542a"}, + {file = "tornado-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:2c027eb2a393d964b22b5c154d1a23a5f8727db6fda837118a776b29e2b8ebc6"}, + {file = "tornado-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:5618f72e947533832cbc3dec54e1dffc1747a5cb17d1fd91577ed14fa0dc081b"}, + {file = "tornado-6.0.4-cp38-cp38-win32.whl", hash = "sha256:22aed82c2ea340c3771e3babc5ef220272f6fd06b5108a53b4976d0d722bcd52"}, + {file = "tornado-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:c58d56003daf1b616336781b26d184023ea4af13ae143d9dda65e31e534940b9"}, + {file = "tornado-6.0.4.tar.gz", hash = "sha256:0fe2d45ba43b00a41cd73f8be321a44936dc1aba233dee979f17a042b83eb6dc"}, ] towncrier = [ {file = "towncrier-19.2.0-py2.py3-none-any.whl", hash = "sha256:de19da8b8cb44f18ea7ed3a3823087d2af8fcf497151bb9fd1e1b092ff56ed8d"}, @@ -2139,34 +1625,25 @@ typed-ast = [ {file = "typed_ast-1.4.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34"}, {file = "typed_ast-1.4.1.tar.gz", hash = "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b"}, ] -typing = [ - {file = "typing-3.7.4.1-py2-none-any.whl", hash = "sha256:c8cabb5ab8945cd2f54917be357d134db9cc1eb039e59d1606dc1e60cb1d9d36"}, - {file = "typing-3.7.4.1-py3-none-any.whl", hash = "sha256:f38d83c5a7a7086543a0f649564d661859c5146a85775ab90c0d2f93ffaa9714"}, - {file = "typing-3.7.4.1.tar.gz", hash = "sha256:91dfe6f3f706ee8cc32d38edbbf304e9b7583fb37108fef38229617f8b3eba23"}, -] urllib3 = [ - {file = "urllib3-1.25.8-py2.py3-none-any.whl", hash = "sha256:2f3db8b19923a873b3e5256dc9c2dedfa883e33d87c690d9c7913e1f40673cdc"}, - {file = "urllib3-1.25.8.tar.gz", hash = "sha256:87716c2d2a7121198ebcb7ce7cccf6ce5e9ba539041cfbaeecfb641dc0bf6acc"}, + {file = "urllib3-1.25.9-py2.py3-none-any.whl", hash = "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115"}, + {file = "urllib3-1.25.9.tar.gz", hash = "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527"}, ] watchdog = [ {file = "watchdog-0.10.2.tar.gz", hash = "sha256:c560efb643faed5ef28784b2245cf8874f939569717a4a12826a173ac644456b"}, ] wcwidth = [ - {file = "wcwidth-0.1.8-py2.py3-none-any.whl", hash = "sha256:8fd29383f539be45b20bd4df0dc29c20ba48654a41e661925e612311e9f3c603"}, - {file = "wcwidth-0.1.8.tar.gz", hash = "sha256:f28b3e8a6483e5d49e7f8949ac1a78314e740333ae305b4ba5defd3e74fb37a8"}, -] -win-unicode-console = [ - {file = "win_unicode_console-0.5.zip", hash = "sha256:d4142d4d56d46f449d6f00536a73625a871cba040f0bc1a2e305a04578f07d1e"}, + {file = "wcwidth-0.2.4-py2.py3-none-any.whl", hash = "sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f"}, + {file = "wcwidth-0.2.4.tar.gz", hash = "sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f"}, ] wrapt = [ - {file = "wrapt-1.11.2.tar.gz", hash = "sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1"}, - {file = "wrapt-1.12.0.tar.gz", hash = "sha256:0ec40d9fd4ec9f9e3ff9bdd12dbd3535f4085949f4db93025089d7a673ea94e8"}, + {file = "wrapt-1.12.1.tar.gz", hash = "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"}, ] xdg = [ {file = "xdg-1.0.7-py2.py3-none-any.whl", hash = "sha256:4b4aaeefb4a94590a17b2e1aba32cac7babd45af5b3bcf89844b17ea13821555"}, {file = "xdg-1.0.7.tar.gz", hash = "sha256:b9c929e72a29783f9ae5d31a73b67c4a3e2754381bbfa72b9633e0f0d5c34120"}, ] zipp = [ - {file = "zipp-1.2.0-py2.py3-none-any.whl", hash = "sha256:e0d9e63797e483a30d27e09fffd308c59a700d365ec34e93cc100844168bf921"}, - {file = "zipp-1.2.0.tar.gz", hash = "sha256:c70410551488251b0fee67b460fb9a536af8d6f9f008ad10ac51f615b6a521b1"}, + {file = "zipp-3.1.0-py3-none-any.whl", hash = "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b"}, + {file = "zipp-3.1.0.tar.gz", hash = "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"}, ] diff --git a/pyproject.toml b/pyproject.toml index d7d11ca4..54274e68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,8 @@ [tool.poetry] name = "mdbenchmark" -version = "2.0.1" +version = "3.0.1" license = "GPL-3.0" -authors = ["Max Linke", "Michael Gecht", "Marc Siggel"] +authors = ["Max Linke", "Michael Gecht", "Marc Siggel", "Sebastian Kehl"] description = "Quickly generate, start and analyze benchmarks for your molecular dynamics simulations." keywords = ["benchmark", "molecular dynamics", "simulations", "gromacs", "namd"] readme = "README.rst" @@ -20,10 +20,7 @@ classifiers = [ "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", @@ -35,7 +32,7 @@ classifiers = [ ] [tool.poetry.dependencies] -python = "~2.7 || ^3.5" +python = "^3.6" numpy = ">=1.15" pandas = ">=0.24" matplotlib = ">=2" @@ -50,6 +47,7 @@ tabulate = "^0.8.5" Sphinx = { version = "^1", optional = true } sphinx-autobuild = { version = "^0.7.1", optional = true } sphinx-click = { version = "^2.3", optional = true } +psutil = "^5.7.0" [tool.poetry.extras] docs = ["Sphinx", "sphinx-autobuild", "sphinx-click"] @@ -62,7 +60,7 @@ pytest-cache = "^1.0" pytest-pep8 = "^1.0" pytest-sugar = "^0.9.2" black = {version = "^19.10b0", python = "^3.6", allow-prereleases = true} -flake8 = "^3.7" +flake8 = "^3.8" isort = "^4.3" pylint = ">=1" restructuredtext_lint = "^1.3" @@ -79,5 +77,5 @@ title_format = "{version} ({project_date})" issue_format = "`#{issue} `_" [build-system] -requires = ["poetry>=0.12"] -build-backend = "poetry.masonry.api" +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/setup.cfg b/setup.cfg index f0327deb..312dd1cd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [flake8] -ignore = N806, N803, N802, I100, I101, I201, F401, F811, W503, E203, E501 +ignore = E203, E501, W503 [isort] multi_line_output=3 @@ -9,9 +9,8 @@ combine_as_imports=True line_length=88 default_section = THIRDPARTY known_first_party = mdbenchmark -known_future_library = six known_third_party = click,datreant,matplotlib,numpy,pandas,tabulate -sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER +sections = STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER [coverage:run] omit = @@ -20,7 +19,7 @@ omit = [tool:pytest] pep8ignore = - *.py N806, N803, N802, I100, I101, I201 + *.py mdbenchmark/tests/test_analyze.py ALL [build_sphinx]