From e90ad91310dffbb2f64813abdcef624dfc75c82b Mon Sep 17 00:00:00 2001 From: mrava87 Date: Sat, 27 Jul 2024 22:31:24 +0300 Subject: [PATCH] Clean up of codes with pre-commit --- .github/workflows/build.yaml | 2 +- .gitignore | 2 +- .pre-commit-config.yaml | 30 ++ .travis.yml | 13 - COPYING | 504 ------------------------- Makefile | 5 +- README.md | 2 +- docs/Makefile | 2 +- docs/source/conf.py | 5 +- docs/source/index.rst | 1 + environment-dev.yml | 8 +- environment.yml | 1 + pytests/test_spgl1.py | 421 +++++++++++++-------- readthedocs.yml | 2 +- requirements-dev.txt | 8 +- requirements.txt | 3 +- setup.cfg | 2 +- spgl1/__init__.py | 20 +- spgl1/lsqr.py | 414 +++++++++++---------- spgl1/spgl1.py | 595 +++++++++++++++++++----------- tutorials/README.txt | 2 +- tutorials/mmvnn.py | 99 ++--- tutorials/{spgl1.py => spgl1s.py} | 119 +++--- 23 files changed, 1060 insertions(+), 1200 deletions(-) create mode 100644 .pre-commit-config.yaml delete mode 100644 .travis.yml delete mode 100644 COPYING rename tutorials/{spgl1.py => spgl1s.py} (71%) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 9b0b9be..b8030f4 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -30,4 +30,4 @@ jobs: pip install . - name: Test with pytest run: | - pytest \ No newline at end of file + pytest diff --git a/.gitignore b/.gitignore index 2e4865d..f3b0a21 100644 --- a/.gitignore +++ b/.gitignore @@ -25,4 +25,4 @@ spgl1.egg-info/ # Docs docs/build/ docs/source/api/generated -docs/source/tutorials \ No newline at end of file +docs/source/tutorials diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..a4ca2e1 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,30 @@ +exclude: "^docs/" +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + args: # arguments to configure black + - --line-length=88 + + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + name: isort (python) + args: + [ + "--profile", + "black", + "--skip", + "__init__.py", + "--filter-files", + "--line-length=88", + ] diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b745649..0000000 --- a/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: python - -python: - - "3.6" - -os: - - linux - -script: - - pip3 install -r requirements-dev.txt - - pip3 install . - - python3 setup.py test - diff --git a/COPYING b/COPYING deleted file mode 100644 index 5ab7695..0000000 --- a/COPYING +++ /dev/null @@ -1,504 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - , 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - - diff --git a/Makefile b/Makefile index b322674..bed929c 100755 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ PIP := $(shell command -v pip3 2> /dev/null || command which pip 2> /dev/null) PYTHON := $(shell command -v python3 2> /dev/null || command which python 2> /dev/null) -.PHONY: install dev-install install_conda dev-install_conda tests doc docupdate +.PHONY: install dev-install install_conda dev-install_conda tests doc docupdate servedoc pipcheck: ifndef PIP @@ -39,3 +39,6 @@ doc: docupdate: cd docs && make html && cd .. + +servedoc: + $(PYTHON) -m http.server --directory docs/build/html/ diff --git a/README.md b/README.md index fd7d944..1e9bb95 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ It is designed to solve any of the following three problems: 2. Basis pursuit (BP): ``minimize ||x||_1 subject to Ax = b`` - + 3. Lasso: ``minimize ||Ax - b||_2 subject to ||x||_1 <= tau``, diff --git a/docs/Makefile b/docs/Makefile index 9cc9c20..e43812f 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -7,7 +7,7 @@ # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build -SPHINXPROJ = Pylops +SPHINXPROJ = Spgl1 SOURCEDIR = source BUILDDIR = build diff --git a/docs/source/conf.py b/docs/source/conf.py index e9d3db0..21f5a2b 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -3,7 +3,6 @@ import os import datetime import sphinx_rtd_theme -import sphinx_gallery from sphinx_gallery.sorting import ExampleTitleSortKey from pkg_resources import get_distribution @@ -75,6 +74,7 @@ templates_path = ['_templates'] exclude_patterns = ['_build', '**.ipynb_checkpoints'] source_suffix = '.rst' + # The encoding of source files. source_encoding = 'utf-8-sig' master_doc = 'index' @@ -106,7 +106,6 @@ html_show_copyright = True # Theme config -#html_theme = "alabaster" html_theme = "sphinx_rtd_theme" html_theme_options = { 'logo_only': True, @@ -129,4 +128,4 @@ # Load the custom CSS files (needs sphinx >= 1.6 for this to work) def setup(app): - app.add_stylesheet("style.css") \ No newline at end of file + app.add_css_file("style.css") diff --git a/docs/source/index.rst b/docs/source/index.rst index c5ae727..f2368c2 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -49,6 +49,7 @@ where more appropriate implementation choices were identified for the Python pro :hidden: Installation + Tutorials Reference documentation Credits diff --git a/environment-dev.yml b/environment-dev.yml index fc651d0..a03a999 100755 --- a/environment-dev.yml +++ b/environment-dev.yml @@ -4,6 +4,7 @@ channels: - conda-forge dependencies: - python>=3.6.4 + - pip - numpy>=1.15.0 - scipy - matplotlib @@ -12,10 +13,15 @@ dependencies: - pytest - Sphinx - numpydoc + - pre-commit + - autopep8 + - isort + - black - pip: - pytest-runner - setuptools_scm - sphinx-rtd-theme - sphinx-gallery - nbsphinx - - image \ No newline at end of file + - image + - flake8 diff --git a/environment.yml b/environment.yml index 74bc696..ca71e5e 100755 --- a/environment.yml +++ b/environment.yml @@ -3,5 +3,6 @@ channels: - defaults dependencies: - python>=3.6.4 + - pip - numpy>=1.15.0 - scipy diff --git a/pytests/test_spgl1.py b/pytests/test_spgl1.py index 64b8bfb..574703a 100755 --- a/pytests/test_spgl1.py +++ b/pytests/test_spgl1.py @@ -1,39 +1,86 @@ -import pytest import numpy as np - +import pytest from numpy.testing import assert_array_almost_equal -from scipy.sparse import spdiags, csr_matrix -from spgl1.spgl1 import oneprojector, norm_l12nn_primal, norm_l12nn_dual, \ - norm_l12nn_project -from spgl1 import spg_lasso, spg_bp, spg_bpdn, spg_mmv - +from scipy.sparse import csr_matrix, spdiags from scipy.sparse.linalg import lsqr as splsqr -from spgl1.lsqr import lsqr +from spgl1 import spg_bp, spg_bpdn, spg_lasso, spg_mmv +from spgl1.lsqr import lsqr +from spgl1.spgl1 import ( + norm_l12nn_dual, + norm_l12nn_primal, + norm_l12nn_project, + oneprojector, +) # dense matrix (dtype=np.float64) -par1 = {'n': 50, 'm': 50, 'k': 14, - 'dtype': np.float64, 'sparse': False} # square -par2 = {'n': 128, 'm': 50, 'k': 14, - 'dtype': np.float64, 'sparse': False} # underdetermined -par3 = {'n': 50, 'm': 100, 'k': 14, - 'dtype': np.float64, 'sparse': False} # overdetermined +par1 = { + "n": 50, + "m": 50, + "k": 14, + "dtype": np.float64, + "sparse": False, +} # square +par2 = { + "n": 128, + "m": 50, + "k": 14, + "dtype": np.float64, + "sparse": False, +} # underdetermined +par3 = { + "n": 50, + "m": 100, + "k": 14, + "dtype": np.float64, + "sparse": False, +} # overdetermined # sparse matrix (dtype=np.float64) -par1_sp = {'n': 50, 'm': 50, 'k': 14, - 'dtype': np.float64, 'sparse': True} # square -par2_sp = {'n': 128, 'm': 50, 'k': 14, - 'dtype': np.float64, 'sparse': True} # underdetermined -par3_sp = {'n': 50, 'm': 100, 'k': 14, - 'dtype': np.float64, 'sparse': True} # overdetermined +par1_sp = { + "n": 50, + "m": 50, + "k": 14, + "dtype": np.float64, + "sparse": True, +} # square +par2_sp = { + "n": 128, + "m": 50, + "k": 14, + "dtype": np.float64, + "sparse": True, +} # underdetermined +par3_sp = { + "n": 50, + "m": 100, + "k": 14, + "dtype": np.float64, + "sparse": True, +} # overdetermined # dense matrix (dtype=np.float32) -par1_f32 = {'n': 50, 'm': 50, 'k': 14, - 'dtype': np.float32, 'sparse': False} # square -par2_f32 = {'n': 128, 'm': 50, 'k': 14, - 'dtype': np.float32, 'sparse': False} # underdetermined -par3_f32 = {'n': 50, 'm': 100, 'k': 14, - 'dtype': np.float32, 'sparse': False} # overdetermined +par1_f32 = { + "n": 50, + "m": 50, + "k": 14, + "dtype": np.float32, + "sparse": False, +} # square +par2_f32 = { + "n": 128, + "m": 50, + "k": 14, + "dtype": np.float32, + "sparse": False, +} # underdetermined +par3_f32 = { + "n": 50, + "m": 100, + "k": 14, + "dtype": np.float32, + "sparse": False, +} # overdetermined @pytest.mark.parametrize("par", [(par1), (par1_f32)]) @@ -44,16 +91,27 @@ def test_oneprojector(par): """ np.random.seed(0) - b = np.random.normal(0., 1., par['n']).astype(dtype=par['dtype']) - d = 1. - bp = oneprojector(b, d, 1.) - assert bp.dtype == par['dtype'] - assert np.linalg.norm(bp, 1) < 1. - - -@pytest.mark.parametrize("par", [(par1), (par2), (par3), - (par1_sp), (par2_sp), (par3_sp), - (par1_f32), (par2_f32), (par3_f32)]) + b = np.random.normal(0.0, 1.0, par["n"]).astype(dtype=par["dtype"]) + d = 1.0 + bp = oneprojector(b, d, 1.0) + assert bp.dtype == par["dtype"] + assert np.linalg.norm(bp, 1) < 1.0 + + +@pytest.mark.parametrize( + "par", + [ + (par1), + (par2), + (par3), + (par1_sp), + (par2_sp), + (par3_sp), + (par1_f32), + (par2_f32), + (par3_f32), + ], +) def test_lasso(par): """LASSO problem for ||x||_1 <= pi: @@ -62,21 +120,21 @@ def test_lasso(par): """ np.random.seed(0) - n = par['n'] - m = par['m'] - k = par['k'] + n = par["n"] + m = par["m"] + k = par["k"] - A, A1 = np.linalg.qr(np.random.randn(n, m), 'reduced') + A, A1 = np.linalg.qr(np.random.randn(n, m), "reduced") if m > n: A = A1.copy() - if par['sparse']: + if par["sparse"]: A = csr_matrix(A) - A = A.astype(par['dtype']) + A = A.astype(par["dtype"]) # Create sparse vector p = np.random.permutation(m) p = p[0:k] - x = np.zeros(m, dtype=par['dtype']) + x = np.zeros(m, dtype=par["dtype"]) x[p] = np.random.randn(k) # Set up vector b, and run solver @@ -84,18 +142,29 @@ def test_lasso(par): tau = np.pi xinv, resid, _, _ = spg_lasso(A, b, tau, verbosity=0) - assert xinv.dtype == par['dtype'] + assert xinv.dtype == par["dtype"] assert np.linalg.norm(xinv, 1) - np.pi < 1e-5 # Run solver with subspace minimization xinv, resid, _, _ = spg_lasso(A, b, tau, subspace_min=True, verbosity=0) - assert xinv.dtype == par['dtype'] + assert xinv.dtype == par["dtype"] assert np.linalg.norm(xinv, 1) - np.pi < 1e-5 -@pytest.mark.parametrize("par", [(par1), (par2), (par3), - (par1_sp), (par2_sp), (par3_sp), - (par1_f32), (par2_f32), (par3_f32)]) +@pytest.mark.parametrize( + "par", + [ + (par1), + (par2), + (par3), + (par1_sp), + (par2_sp), + (par3_sp), + (par1_f32), + (par2_f32), + (par3_f32), + ], +) def test_bp(par): """Basis pursuit (BP) problem: @@ -105,36 +174,36 @@ def test_bp(par): np.random.seed(0) # Create random m-by-n encoding matrix - n = par['n'] - m = par['m'] - k = par['k'] + n = par["n"] + m = par["m"] + k = par["k"] - A, A1 = np.linalg.qr(np.random.randn(n, m), 'reduced') + A, A1 = np.linalg.qr(np.random.randn(n, m), "reduced") if m > n: A = A1.copy() - A = A.astype(par['dtype']) + A = A.astype(par["dtype"]) # Create sparse vector p = np.random.permutation(m) p = p[0:k] - x = np.zeros(m, dtype=par['dtype']) + x = np.zeros(m, dtype=par["dtype"]) x[p] = np.random.randn(k) # Set up vector b, and run solver b = A.dot(x) xinv, _, _, _ = spg_bp(A, b, verbosity=0) - assert xinv.dtype == par['dtype'] - assert_array_almost_equal(x, xinv, decimal=3) + assert xinv.dtype == par["dtype"] + assert_array_almost_equal(x, xinv, decimal=3 if par["dtype"] == np.float64 else 2) # Run solver with subspace minimization xinv, _, _, _ = spg_bp(A, b, subspace_min=True, verbosity=0) - assert xinv.dtype == par['dtype'] - assert_array_almost_equal(x, xinv, decimal=3) + assert xinv.dtype == par["dtype"] + assert_array_almost_equal(x, xinv, decimal=3 if par["dtype"] == np.float64 else 2) -@pytest.mark.parametrize("par", [(par1), (par3), - (par1_sp), (par3_sp), - (par1_f32), (par3_f32)]) +@pytest.mark.parametrize( + "par", [(par1), (par3), (par1_sp), (par3_sp), (par1_f32), (par3_f32)] +) def test_bpdn(par): """Basis pursuit denoise (BPDN) problem: @@ -144,38 +213,53 @@ def test_bpdn(par): np.random.seed(0) # Create random m-by-n encoding matrix - n = par['n'] - m = par['m'] - k = par['k'] + n = par["n"] + m = par["m"] + k = par["k"] - A, A1 = np.linalg.qr(np.random.randn(n, m), 'reduced') + A, A1 = np.linalg.qr(np.random.randn(n, m), "reduced") if m > n: A = A1.copy() - A = A.astype(par['dtype']) + A = A.astype(par["dtype"]) # Create sparse vector p = np.random.permutation(m) p = p[0:k] - x = np.zeros(m, dtype=par['dtype']) + x = np.zeros(m, dtype=par["dtype"]) x[p] = np.random.randn(k) # Set up vector b, and run solver - b = A.dot(x) + np.random.randn(n).astype(par['dtype']) * 0.075 + b = A.dot(x) + np.random.randn(n).astype(par["dtype"]) * 0.075 sigma = 0.10 xinv, resid, _, _ = spg_bpdn(A, b, sigma, iter_lim=1000, verbosity=0) - assert xinv.dtype == par['dtype'] - assert np.linalg.norm(resid) < sigma * 1.1 # need to check why resid is slighly bigger than sigma + assert xinv.dtype == par["dtype"] + assert ( + np.linalg.norm(resid) < sigma * 1.1 + ) # need to check why resid is slighly bigger than sigma # Run solver with subspace minimization xinv, _, _, _ = spg_bpdn(A, b, sigma, subspace_min=True, verbosity=0) - assert xinv.dtype == par['dtype'] - assert np.linalg.norm(resid) < sigma * 1.1 # need to check why resid is slighly bigger than sigma - - -@pytest.mark.parametrize("par", [(par1), (par2), (par3), - (par1_sp), (par2_sp), (par3_sp), - (par1_f32), (par2_f32), (par3_f32)]) + assert xinv.dtype == par["dtype"] + assert ( + np.linalg.norm(resid) < sigma * 1.1 + ) # need to check why resid is slighly bigger than sigma + + +@pytest.mark.parametrize( + "par", + [ + (par1), + (par2), + (par3), + (par1_sp), + (par2_sp), + (par3_sp), + (par1_f32), + (par2_f32), + (par3_f32), + ], +) def test_bp_complex(par): """Basis pursuit (BP) problem for complex variables: @@ -184,15 +268,16 @@ def test_bp_complex(par): """ np.random.seed(0) - dtypecomplex = (np.ones(1, dtype=par['dtype']) + - 1j * np.ones(1, dtype=par['dtype'])).dtype + dtypecomplex = ( + np.ones(1, dtype=par["dtype"]) + 1j * np.ones(1, dtype=par["dtype"]) + ).dtype # Create random m-by-n encoding matrix - n = par['n'] - m = par['m'] - k = par['k'] + n = par["n"] + m = par["m"] + k = par["k"] - A, A1 = np.linalg.qr(np.random.randn(n, m), 'reduced') + A, A1 = np.linalg.qr(np.random.randn(n, m), "reduced") if m > n: A = A1.copy() A = A.astype(dtypecomplex) @@ -207,17 +292,28 @@ def test_bp_complex(par): b = A.dot(x) xinv, _, _, _ = spg_bp(A, b, verbosity=0) assert xinv.dtype == dtypecomplex - assert_array_almost_equal(x, xinv, decimal=3) + assert_array_almost_equal(x, xinv, decimal=3 if par["dtype"] == np.float64 else 2) # Run solver with subspace minimization xinv, _, _, _ = spg_bp(A, b, subspace_min=True, verbosity=0) assert xinv.dtype == dtypecomplex - assert_array_almost_equal(x, xinv, decimal=3) - - -@pytest.mark.parametrize("par", [(par1), (par2), (par3), - (par1_sp), (par2_sp), (par3_sp), - (par1_f32), (par2_f32), (par3_f32)]) + assert_array_almost_equal(x, xinv, decimal=3 if par["dtype"] == np.float64 else 2) + + +@pytest.mark.parametrize( + "par", + [ + (par1), + (par2), + (par3), + (par1_sp), + (par2_sp), + (par3_sp), + (par1_f32), + (par2_f32), + (par3_f32), + ], +) def test_weighted_bp(par): """Weighted Basis pursuit (WBP) problem: @@ -231,35 +327,46 @@ def test_weighted_bp(par): np.random.seed(0) # Create random m-by-n encoding matrix - n = par['n'] - m = par['m'] - k = par['k'] + n = par["n"] + m = par["m"] + k = par["k"] - A, A1 = np.linalg.qr(np.random.randn(n, m), 'reduced') + A, A1 = np.linalg.qr(np.random.randn(n, m), "reduced") if m > n: A = A1.copy() - A = A.astype(par['dtype']) + A = A.astype(par["dtype"]) # Create sparse vector p = np.random.permutation(m) p = p[0:k] - x = np.zeros(m, dtype=par['dtype']) + x = np.zeros(m, dtype=par["dtype"]) x[p] = np.random.randn(k) # Set up weights w and vector b - w = 0.1 * np.random.rand(m).astype(dtype=par['dtype']) + 0.1 # Weights + w = 0.1 * np.random.rand(m).astype(dtype=par["dtype"]) + 0.1 # Weights b = A.dot(x / w) # Signal xinv, _, _, _ = spg_bp(A, b, iter_lim=1000, weights=w, verbosity=0) # Reconstructed solution, with weighting xinv *= w - assert xinv.dtype == par['dtype'] - assert_array_almost_equal(x, xinv, decimal=3) - - -@pytest.mark.parametrize("par", [(par1), (par2), (par3), - (par1_sp), (par2_sp), (par3_sp), - (par1_f32), (par2_f32), (par3_f32)]) + assert xinv.dtype == par["dtype"] + assert_array_almost_equal(x, xinv, decimal=3 if par["dtype"] == np.float64 else 2) + + +@pytest.mark.parametrize( + "par", + [ + (par1), + (par2), + (par3), + (par1_sp), + (par2_sp), + (par3_sp), + (par1_f32), + (par2_f32), + (par3_f32), + ], +) def test_multiple_measurements(par): """Multiple measurement vector (MMV) problem: @@ -269,20 +376,20 @@ def test_multiple_measurements(par): np.random.seed(0) # Create random m-by-n encoding matrix - m = par['m'] - n = par['n'] - k = par['k'] + m = par["m"] + n = par["n"] + k = par["k"] l = 6 A = np.random.randn(m, n) - A = A.astype(par['dtype']) + A = A.astype(par["dtype"]) p = np.random.permutation(n)[:k] - X = np.zeros((n, l), dtype=par['dtype']) + X = np.zeros((n, l), dtype=par["dtype"]) X[p, :] = np.random.randn(k, l) - weights = 0.1 * np.random.rand(n).astype(par['dtype']) + 0.1 - W = 1 / weights * np.eye(n, dtype=par['dtype']) + weights = 0.1 * np.random.rand(n).astype(par["dtype"]) + 0.1 + W = 1 / weights * np.eye(n, dtype=par["dtype"]) B = A.dot(W).dot(X) @@ -293,49 +400,63 @@ def test_multiple_measurements(par): X_w, _, _, _ = spg_mmv(A, B, 0, weights=weights, verbosity=0) X_w = spdiags(weights, 0, n, n).dot(X_w) - assert X_uw.dtype == par['dtype'] - assert X_w.dtype == par['dtype'] + assert X_uw.dtype == par["dtype"] + assert X_w.dtype == par["dtype"] assert_array_almost_equal(X, X_uw, decimal=1) assert_array_almost_equal(X, X_w, decimal=1) -@pytest.mark.parametrize("par", [(par1), (par2), (par3), - (par1_sp), (par2_sp), (par3_sp), - (par1_f32), (par2_f32), (par3_f32)]) +@pytest.mark.parametrize( + "par", + [ + (par1), + (par2), + (par3), + (par1_sp), + (par2_sp), + (par3_sp), + (par1_f32), + (par2_f32), + (par3_f32), + ], +) def test_multiple_measurements_nonnegative(par): - """Multiple measurement vector (MMV) problem with non-negative norm: - """ + """Multiple measurement vector (MMV) problem with non-negative norm:""" np.random.seed(0) # Create random m-by-n encoding matrix - m = par['m'] - n = par['n'] - k = par['k'] + m = par["m"] + n = par["n"] + k = par["k"] l = 6 A = np.random.randn(m, n) - A = A.astype(par['dtype']) + A = A.astype(par["dtype"]) p = np.random.permutation(n)[:k] - X = np.zeros((n, l), dtype=par['dtype']) + X = np.zeros((n, l), dtype=par["dtype"]) X[p, :] = np.abs(np.random.randn(k, l)) B = A.dot(X) - Xnn, _, _, _ = spg_mmv(A, B, 0, project=norm_l12nn_project, - primal_norm=norm_l12nn_primal, - dual_norm=norm_l12nn_dual, iter_lim=20, - verbosity=0) - assert Xnn.dtype == par['dtype'] + Xnn, _, _, _ = spg_mmv( + A, + B, + 0, + project=norm_l12nn_project, + primal_norm=norm_l12nn_primal, + dual_norm=norm_l12nn_dual, + iter_lim=20, + verbosity=0, + ) + assert Xnn.dtype == par["dtype"] assert np.any(Xnn < 0) == False -# temporary tests... will not be included in scipy later on -@pytest.mark.parametrize("par", [(par1), (par3), - (par1_sp), (par3_sp)]) +@pytest.mark.parametrize("par", [(par1), (par3), (par1_sp), (par3_sp)]) def test_lsqr(par): - """Compare local LSQR with scipy LSQR - """ + """Compare local LSQR with scipy LSQR""" + def Aprodfun(A, x, mode): if mode == 1: y = np.dot(A, x) @@ -346,8 +467,8 @@ def Aprodfun(A, x, mode): np.random.seed(0) # Create random m-by-n encoding matrix - m = par['m'] - n = par['n'] + m = par["m"] + n = par["n"] A = np.random.normal(0, 1, (m, n)) Aprod = lambda x, mode: Aprodfun(A, x, mode) x = np.ones(n) @@ -360,12 +481,24 @@ def Aprodfun(A, x, mode): itn_max = 500 show = 0 - xinv, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = \ - lsqr(m, n, Aprod, y, damp, atol, btol, conlim, itn_max, show) - - xinv_sp, istop_sp, itn_sp, r1norm_sp, r2norm_sp, anorm_sp, \ - acond_sp, arnorm_sp, xnorm_sp, var_sp = \ - splsqr(A, y, damp, atol, btol, conlim, itn_max, show) - - assert_array_almost_equal(xinv, x, decimal=2) - assert_array_almost_equal(xinv_sp, x, decimal=2) \ No newline at end of file + xinv, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = lsqr( + m, n, Aprod, y, damp, atol, btol, conlim, itn_max, show + ) + + ( + xinv_sp, + istop_sp, + itn_sp, + r1norm_sp, + r2norm_sp, + anorm_sp, + acond_sp, + arnorm_sp, + xnorm_sp, + var_sp, + ) = splsqr(A, y, damp, atol, btol, conlim, itn_max, show) + + assert_array_almost_equal(xinv, x, decimal=3 if par["dtype"] == np.float64 else 2) + assert_array_almost_equal( + xinv_sp, x, decimal=3 if par["dtype"] == np.float64 else 2 + ) diff --git a/readthedocs.yml b/readthedocs.yml index e17e869..4d61b30 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -4,4 +4,4 @@ python: version: 3.9 setup_py_install: true -requirements_file: requirements-dev.txt \ No newline at end of file +requirements_file: requirements-dev.txt diff --git a/requirements-dev.txt b/requirements-dev.txt index 0e33baa..d9741c3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -6,9 +6,15 @@ jupyter pytest pytest-runner setuptools_scm +docutils<0.18 Sphinx sphinx-rtd-theme sphinx-gallery numpydoc nbsphinx -image \ No newline at end of file +image +pre-commit +autopep8 +isort +black +flake8 diff --git a/requirements.txt b/requirements.txt index 0796d28..9c558e3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1 @@ -numpy>=1.15.0 -scipy \ No newline at end of file +. diff --git a/setup.cfg b/setup.cfg index 6811fdc..efc2cf4 100755 --- a/setup.cfg +++ b/setup.cfg @@ -3,4 +3,4 @@ test=pytest [tool:pytest] addopts = --verbose -python_files = pytests/*.py \ No newline at end of file +python_files = pytests/*.py diff --git a/spgl1/__init__.py b/spgl1/__init__.py index dd50a6d..0bd229b 100755 --- a/spgl1/__init__.py +++ b/spgl1/__init__.py @@ -1,8 +1,24 @@ -from .spgl1 import * from .lsqr import lsqr +from .spgl1 import * + + +__all__ = [ + "oneprojector", + "norm_l1nn_primal", + "norm_l1nn_dual", + "norm_l1nn_project", + "norm_l12nn_primal", + "norm_l12nn_dual", + "norm_l12nn_project", + "spgl1", + "spg_bp", + "spg_bpdn", + "spg_lasso", + "spg_mmv", +] try: from .version import version as __version__ except ImportError: - __version__ = '0.0.0' \ No newline at end of file + __version__ = "0.0.0" diff --git a/spgl1/lsqr.py b/spgl1/lsqr.py index 129e4c6..619a720 100755 --- a/spgl1/lsqr.py +++ b/spgl1/lsqr.py @@ -1,166 +1,170 @@ from __future__ import division + import numpy as np -def lsqr( m, n, Aprod, b, damp, atol, btol, conlim, itnlim, show ): -# // % -# // % LSQR solves Ax = b or min ||b - Ax||_2 if damp = 0, -# // % or min || (b) - ( A )x || otherwise. -# // % || (0) (damp I) ||2 -# // % A is an m by n matrix defined or a function handle of aprod( mode,x ), -# // % that performs the matrix-vector operations. -# // % If mode = 1, aprod must return y = Ax without altering x. -# // % If mode = 2, aprod must return y = A'x without altering x. - -# // %----------------------------------------------------------------------- -# // % LSQR uses an iterative (conjugate-gradient-like) method. -# // % For further information, see -# // % 1. C. C. Paige and M. A. Saunders (1982a). -# // % LSQR: An algorithm for sparse linear equations and sparse least squares, -# // % ACM TOMS 8(1), 43-71. -# // % 2. C. C. Paige and M. A. Saunders (1982b). -# // % Algorithm 583. LSQR: Sparse linear equations and least squares problems, -# // % ACM TOMS 8(2), 195-209. -# // % 3. M. A. Saunders (1995). Solution of sparse rectangular systems using -# // % LSQR and CRAIG, BIT 35, 588-604. -# // % -# // % Input parameters: -# // % atol, btol are stopping tolerances. If both are 1.0e-9 (say), -# // % the final residual norm should be accurate to about 9 digits. -# // % (The final x will usually have fewer correct digits, -# // % depending on cond(A) and the size of damp.) -# // % conlim is also a stopping tolerance. lsqr terminates if an estimate -# // % of cond(A) exceeds conlim. For compatible systems Ax = b, -# // % conlim could be as large as 1.0e+12 (say). For least-squares -# // % problems, conlim should be less than 1.0e+8. -# // % Maximum precision can be obtained by setting -# // % atol = btol = conlim = zero, but the number of iterations -# // % may then be excessive. -# // % itnlim is an explicit limit on iterations (for safety). -# // % show = 1 gives an iteration log, -# // % show = 0 suppresses output. -# // % -# // % Output parameters: -# // % x is the final solution. -# // % istop gives the reason for termination. -# // % istop = 1 means x is an approximate solution to Ax = b. -# // % = 2 means x approximately solves the least-squares problem. -# // % r1norm = norm(r), where r = b - Ax. -# // % r2norm = sqrt( norm(r)^2 + damp^2 * norm(x)^2 ) -# // % = r1norm if damp = 0. -# // % anorm = estimate of Frobenius norm of Abar = [ A ]. -# // % [damp*I] -# // % acond = estimate of cond(Abar). -# // % arnorm = estimate of norm(A'*r - damp^2*x). -# // % xnorm = norm(x). -# // % var (if present) estimates all diagonals of (A'A)^{-1} (if damp=0) -# // % or more generally (A'A + damp^2*I)^{-1}. -# // % This is well defined if A has full column rank or damp > 0. -# // % (Not sure what var means if rank(A) < n and damp = 0.) -# // % -# // % -# // % 1990: Derived from Fortran 77 version of LSQR. -# // % 22 May 1992: bbnorm was used incorrectly. Replaced by anorm. -# // % 26 Oct 1992: More input and output parameters added. -# // % 01 Sep 1994: Matrix-vector routine is now a parameter 'aprodname'. -# // % Print log reformatted. -# // % 14 Jun 1997: show added to allow printing or not. -# // % 30 Jun 1997: var added as an optional output parameter. -# // % 07 Aug 2002: Output parameter rnorm replaced by r1norm and r2norm. -# // % Michael Saunders, Systems Optimization Laboratory, -# // % Dept of MS&E, Stanford University. -# // % 03 Jul 2007: Modified 'aprodname' to A, which can either be an m by n -# // % matrix, or a function handle. -# // % Ewout van den Berg, University of British Columbia -# // % 03 Jul 2007: Modified 'test2' condition, omitted 'test1'. -# // % Ewout van den Berg, University of British Columbia -# // %----------------------------------------------------------------------- + +def lsqr(m, n, Aprod, b, damp, atol, btol, conlim, itnlim, show): + # + # LSQR solves Ax = b or min ||b - Ax||_2 if damp = 0, + # or min || (b) - ( A )x || otherwise. + # || (0) (damp I) ||2 + # A is an m by n matrix defined or a function handle of aprod( mode,x ), + # that performs the matrix-vector operations. + # If mode = 1, aprod must return y = Ax without altering x. + # If mode = 2, aprod must return y = A'x without altering x. + + # ----------------------------------------------------------------------- + # LSQR uses an iterative (conjugate-gradient-like) method. + # For further information, see + # 1. C. C. Paige and M. A. Saunders (1982a). + # LSQR: An algorithm for sparse linear equations and sparse least squares, + # ACM TOMS 8(1), 43-71. + # 2. C. C. Paige and M. A. Saunders (1982b). + # Algorithm 583. LSQR: Sparse linear equations and least squares problems, + # ACM TOMS 8(2), 195-209. + # 3. M. A. Saunders (1995). Solution of sparse rectangular systems using + # LSQR and CRAIG, BIT 35, 588-604. + # + # Input parameters: + # atol, btol are stopping tolerances. If both are 1.0e-9 (say), + # the final residual norm should be accurate to about 9 digits. + # (The final x will usually have fewer correct digits, + # depending on cond(A) and the size of damp.) + # conlim is also a stopping tolerance. lsqr terminates if an estimate + # of cond(A) exceeds conlim. For compatible systems Ax = b, + # conlim could be as large as 1.0e+12 (say). For least-squares + # problems, conlim should be less than 1.0e+8. + # Maximum precision can be obtained by setting + # atol = btol = conlim = zero, but the number of iterations + # may then be excessive. + # itnlim is an explicit limit on iterations (for safety). + # show = 1 gives an iteration log, + # show = 0 suppresses output. + # + # Output parameters: + # x is the final solution. + # istop gives the reason for termination. + # istop = 1 means x is an approximate solution to Ax = b. + # = 2 means x approximately solves the least-squares problem. + # r1norm = norm(r), where r = b - Ax. + # r2norm = sqrt( norm(r)^2 + damp^2 * norm(x)^2 ) + # = r1norm if damp = 0. + # anorm = estimate of Frobenius norm of Abar = [ A ]. + # [damp*I] + # acond = estimate of cond(Abar). + # arnorm = estimate of norm(A'*r - damp^2*x). + # xnorm = norm(x). + # var (if present) estimates all diagonals of (A'A)^{-1} (if damp=0) + # or more generally (A'A + damp^2*I)^{-1}. + # This is well defined if A has full column rank or damp > 0. + # (Not sure what var means if rank(A) < n and damp = 0.) + # + # + # 1990: Derived from Fortran 77 version of LSQR. + # 22 May 1992: bbnorm was used incorrectly. Replaced by anorm. + # 26 Oct 1992: More input and output parameters added. + # 01 Sep 1994: Matrix-vector routine is now a parameter 'aprodname'. + # Print log reformatted. + # 14 Jun 1997: show added to allow printing or not. + # 30 Jun 1997: var added as an optional output parameter. + # 07 Aug 2002: Output parameter rnorm replaced by r1norm and r2norm. + # Michael Saunders, Systems Optimization Laboratory, + # Dept of MS&E, Stanford University. + # 03 Jul 2007: Modified 'aprodname' to A, which can either be an m by n + # matrix, or a function handle. + # Ewout van den Berg, University of British Columbia + # 03 Jul 2007: Modified 'test2' condition, omitted 'test1'. + # Ewout van den Berg, University of British Columbia + # ----------------------------------------------------------------------- nprodA = 0 nprodAT = 0 -# // % Initialize. - msg = ('The exact solution is x = 0 ', - 'Ax - b is small enough, given atol, btol ', - 'The least-squares solution is good enough, given atol ', - 'The estimate of cond(Abar) has exceeded conlim ', - 'Ax - b is small enough for this machine ', - 'The least-squares solution is good enough for this machine', - 'Cond(Abar) seems to be too large for this machine ', - 'The iteration limit has been reached ') + # Initialize. + msg = ( + "The exact solution is x = 0 ", + "Ax - b is small enough, given atol, btol ", + "The least-squares solution is good enough, given atol ", + "The estimate of cond(Abar) has exceeded conlim ", + "Ax - b is small enough for this machine ", + "The least-squares solution is good enough for this machine", + "Cond(Abar) seems to be too large for this machine ", + "The iteration limit has been reached ", + ) wantvar = True var = np.zeros(n) if show: - print(' ') - print('LSQR Least-squares solution of Ax = b') - str1 = 'The matrix A has %8g rows and %8g cols' % (m, n) - str2 = 'damp = %20.14e wantvar = %8g' %(damp,wantvar) - str3 = 'atol = %8.2e conlim = %8.2e' %(atol, conlim) - str4 = 'btol = %8.2e itnlim = %8g' %(btol, itnlim) + print(" ") + print("LSQR Least-squares solution of Ax = b") + str1 = "The matrix A has %8g rows and %8g cols" % (m, n) + str2 = "damp = %20.14e wantvar = %8g" % (damp, wantvar) + str3 = "atol = %8.2e conlim = %8.2e" % (atol, conlim) + str4 = "btol = %8.2e itnlim = %8g" % (btol, itnlim) print(str1) print(str2) print(str3) print(str4) - itn = 0 - istop = 0 - nstop = 0 - ctol = 0 + itn = 0 + istop = 0 + nstop = 0 + ctol = 0 if conlim > 0: - ctol = 1./conlim - anorm = 0 - acond = 0 + ctol = 1.0 / conlim + anorm = 0 + acond = 0 dampsq = damp**2.0 ddnorm = 0 - res2 = 0 - xnorm = 0 + res2 = 0 + xnorm = 0 xxnorm = 0 - z = 0 - cs2 = -1 - sn2 = 0 + z = 0 + cs2 = -1 + sn2 = 0 # % Set up the first vectors u and v for the bidiagonalization. # % These satisfy beta*u = b, alfa*v = A'u.;' - u = b[0:m] - x = np.zeros(n) - alfa = 0 - beta = np.linalg.norm( u ) + u = b[0:m] + x = np.zeros(n) + alfa = 0 + beta = np.linalg.norm(u) if beta > 0: - u = u/beta - v = Aprod(u,2) - nprodAT+=1 - alfa = np.linalg.norm( v ) + u = u / beta + v = Aprod(u, 2) + nprodAT += 1 + alfa = np.linalg.norm(v) if alfa > 0: - v = v/alfa + v = v / alfa w = v.copy() arnorm = alfa * beta if arnorm == 0: - # if show, disp(msg(1,:)); end + # if show, disp(msg(1,:)); end return - arnorm0= arnorm + arnorm0 = arnorm rhobar = alfa phibar = beta - bnorm = beta - rnorm = beta + bnorm = beta + rnorm = beta r1norm = rnorm r2norm = rnorm - head1 = ' Itn x(1) r1norm r2norm ' - head2 = ' Compatible LS Norm A Cond A' + head1 = " Itn x(1) r1norm r2norm " + head2 = " Compatible LS Norm A Cond A" if show: - print(' ') + print(" ") print(head1 + head2) - test1 = 1 - test2 = alfa / beta - str1 = '%6g %12.5e' %(itn, x[0]) - str2 = ' %10.3e %10.3e' %(r1norm, r2norm) - str3 = ' %8.1e %8.1e' %(test1, test2 ) - print(str1+str2+str3) + test1 = 1 + test2 = alfa / beta + str1 = "%6g %12.5e" % (itn, x[0]) + str2 = " %10.3e %10.3e" % (r1norm, r2norm) + str3 = " %8.1e %8.1e" % (test1, test2) + print(str1 + str2 + str3) # %------------------------------------------------------------------ # % Main iteration loop. @@ -172,75 +176,75 @@ def lsqr( m, n, Aprod, b, damp, atol, btol, conlim, itnlim, show ): # % beta*u = a*v - alfa*u, # % alfa*v = A'*u - beta*v.' - u = Aprod(v,1) - alfa*u - nprodA+=1 - beta = np.linalg.norm( u ) + u = Aprod(v, 1) - alfa * u + nprodA += 1 + beta = np.linalg.norm(u) if beta > 0: - u = u/beta - anorm = np.linalg.norm([anorm,alfa,beta,damp]) - v = Aprod(u, 2) - beta*v - nprodAT+=1 - alfa = np.linalg.norm( v ) + u = u / beta + anorm = np.linalg.norm([anorm, alfa, beta, damp]) + v = Aprod(u, 2) - beta * v + nprodAT += 1 + alfa = np.linalg.norm(v) if alfa > 0: - v = v/alfa + v = v / alfa # % Use a plane rotation to eliminate the damping parameter. # % This alters the diagonal (rhobar) of the lower-bidiagonal matrix. - rhobar1 = np.linalg.norm([rhobar,damp]) - cs1 = rhobar / rhobar1 - sn1 = damp / rhobar1 - psi = sn1 * phibar - phibar = cs1 * phibar + rhobar1 = np.linalg.norm([rhobar, damp]) + cs1 = rhobar / rhobar1 + sn1 = damp / rhobar1 + psi = sn1 * phibar + phibar = cs1 * phibar # % Use a plane rotation to eliminate the subdiagonal element (beta) # % of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix. - rho = np.linalg.norm([rhobar1,beta]) - cs = rhobar1/ rho - sn = beta / rho - theta = sn * alfa - rhobar = - cs * alfa - phi = cs * phibar - phibar = sn * phibar - tau = sn * phi + rho = np.linalg.norm([rhobar1, beta]) + cs = rhobar1 / rho + sn = beta / rho + theta = sn * alfa + rhobar = -cs * alfa + phi = cs * phibar + phibar = sn * phibar + tau = sn * phi # % Update x and w. - t1 = phi /rho - t2 = - theta/rho - dk = w/rho + t1 = phi / rho + t2 = -theta / rho + dk = w / rho - x = x + t1*w - w = v + t2*w - ddnorm = ddnorm + np.linalg.norm(dk)**2.0 + x = x + t1 * w + w = v + t2 * w + ddnorm = ddnorm + np.linalg.norm(dk) ** 2.0 if wantvar: - var = var + np.dot(dk,dk) + var = var + np.dot(dk, dk) # % Use a plane rotation on the right to eliminate the # % super-diagonal element (theta) of the upper-bidiagonal matrix. # % Then use the result to estimate norm(x). - delta = sn2 * rho - gambar = - cs2 * rho - rhs = phi - delta * z - zbar = rhs / gambar - xnorm = np.sqrt(xxnorm + zbar**2.) - gamma = np.linalg.norm([gambar,theta]) - cs2 = gambar / gamma - sn2 = theta / gamma - z = rhs / gamma - xxnorm = xxnorm + z**2. + delta = sn2 * rho + gambar = -cs2 * rho + rhs = phi - delta * z + zbar = rhs / gambar + xnorm = np.sqrt(xxnorm + zbar**2.0) + gamma = np.linalg.norm([gambar, theta]) + cs2 = gambar / gamma + sn2 = theta / gamma + z = rhs / gamma + xxnorm = xxnorm + z**2.0 # % Test for convergence. # % First, estimate the condition of the matrix Abar, # % and the norms of rbar and Abar'rbar.' - acond = anorm * np.sqrt( ddnorm ) - res1 = phibar**2. - res2 = res2 + psi**2. - rnorm = np.sqrt( res1 + res2 ) - arnorm = alfa * abs( tau ) + acond = anorm * np.sqrt(ddnorm) + res1 = phibar**2.0 + res2 = res2 + psi**2.0 + rnorm = np.sqrt(res1 + res2) + arnorm = alfa * abs(tau) # % 07 Aug 2002: # % Distinguish between @@ -251,21 +255,21 @@ def lsqr( m, n, Aprod, b, damp, atol, btol, conlim, itnlim, show ): # % r1norm = sqrt(r2norm^2 - damp^2*||x||^2). # % Although there is cancellation, it might be accurate enough. - r1sq = rnorm**2. - dampsq * xxnorm - r1norm = np.sqrt( abs(r1sq) ) + r1sq = rnorm**2.0 - dampsq * xxnorm + r1norm = np.sqrt(abs(r1sq)) if r1sq < 0: - r1norm = - r1norm - r2norm = rnorm.copy() + r1norm = -r1norm + r2norm = rnorm.copy() # % Now use these norms to estimate certain other quantities, # % some of which will be small near a solution. - test1 = rnorm / bnorm - test2 = arnorm / arnorm0 + test1 = rnorm / bnorm + test2 = arnorm / arnorm0 # % test2 = arnorm/( anorm * rnorm ); - test3 = 1. / acond - t1 = test1 / (1. + anorm * xnorm / bnorm) - rtol = btol + atol * anorm * xnorm / bnorm + test3 = 1.0 / acond + t1 = test1 / (1.0 + anorm * xnorm / bnorm) + rtol = btol + atol * anorm * xnorm / bnorm # % The following tests guard against extremely small values of # % atol, btol or ctol. (The user may have set any or all of @@ -275,68 +279,68 @@ def lsqr( m, n, Aprod, b, damp, atol, btol, conlim, itnlim, show ): if itn >= itnlim: istop = 7 - if 1 + test3 <= 1: + if 1 + test3 <= 1: istop = 6 - if 1 + test2 <= 1: + if 1 + test2 <= 1: istop = 5 - if 1 + t1 <= 1: + if 1 + t1 <= 1: istop = 4 # % Allow for tolerances set by the user. - if test3 <= ctol: + if test3 <= ctol: istop = 3 - if test2 <= atol: + if test2 <= atol: istop = 2 - if test1 <= rtol: + if test1 <= rtol: istop = 1 # % See if it is time to print something. if show: - prnt = 0; + prnt = 0 if n <= 40: prnt = 1 - if itn <= 10: + if itn <= 10: prnt = 1 - if itn >= itnlim-10: + if itn >= itnlim - 10: prnt = 1 if itn % 10 == 0: prnt = 1 - if test3 <= 2*ctol: + if test3 <= 2 * ctol: prnt = 1 - if test2 <= 10*atol: + if test2 <= 10 * atol: prnt = 1 - if test1 <= 10*rtol: + if test1 <= 10 * rtol: prnt = 1 - if istop != 0: + if istop != 0: prnt = 1 if prnt == 1: - str1 = '%6g %12.5e' %(itn, x[0]) - str2 = ' %10.3e %10.3e' %(r1norm, r2norm ) - str3 = ' %8.1e %8.1e' %(test1, test2 ) - str4 = ' %8.1e %8.1e' %(anorm, acond ) - print(str1+str2+str3+str4) + str1 = "%6g %12.5e" % (itn, x[0]) + str2 = " %10.3e %10.3e" % (r1norm, r2norm) + str3 = " %8.1e %8.1e" % (test1, test2) + str4 = " %8.1e %8.1e" % (anorm, acond) + print(str1 + str2 + str3 + str4) if istop > 0: break # % End of iteration loop. # % Print the stopping condition. if show: - print(' ') - print('LSQR finished') + print(" ") + print("LSQR finished") print(msg[istop]) - print(' ') - str1 = 'istop =%8g r1norm =%8.1e' %(istop, r1norm ) - str2 = 'anorm =%8.1e arnorm =%8.1e' %(anorm, arnorm ) - str3 = 'itn =%8g r2norm =%8.1e' %(itn, r2norm ) - str4 = 'acond =%8.1e xnorm =%8.1e' %( acond, xnorm ) - print(str1 +' '+ str2) - print(str3 +' '+ str4) - print(' ') + print(" ") + str1 = "istop =%8g r1norm =%8.1e" % (istop, r1norm) + str2 = "anorm =%8.1e arnorm =%8.1e" % (anorm, arnorm) + str3 = "itn =%8g r2norm =%8.1e" % (itn, r2norm) + str4 = "acond =%8.1e xnorm =%8.1e" % (acond, xnorm) + print(str1 + " " + str2) + print(str3 + " " + str4) + print(" ") # end - print('nprodA', nprodA) - print('nprodA', nprodAT) + print("nprodA", nprodA) + print("nprodA", nprodAT) return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var diff --git a/spgl1/spgl1.py b/spgl1/spgl1.py index df74abb..73a4318 100755 --- a/spgl1/spgl1.py +++ b/spgl1/spgl1.py @@ -1,10 +1,11 @@ -from __future__ import division, absolute_import +from __future__ import absolute_import, division + import logging import time -import numpy as np +import numpy as np from scipy.sparse import spdiags -from scipy.sparse.linalg import aslinearoperator, LinearOperator, lsqr +from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsqr logger = logging.getLogger(__name__) @@ -36,6 +37,7 @@ class _LSQRprod(LinearOperator): This operator is used to augument the spgl1 operator during subspace minimization via LSQR. """ + def __init__(self, A, nnz_idx, ebar, n): self.A = A self.nnz_idd = nnz_idx @@ -44,20 +46,23 @@ def __init__(self, A, nnz_idx, ebar, n): self.n = n self.shape = (A.shape[0], self.nbar) self.dtype = A.dtype + def _matvec(self, x): y = np.zeros(self.n, dtype=x.dtype) - y[self.nnz_idd] = \ - x - (1. / self.nbar) * np.dot(np.dot(np.conj(self.ebar), - x), self.ebar) + y[self.nnz_idd] = x - (1.0 / self.nbar) * np.dot( + np.dot(np.conj(self.ebar), x), self.ebar + ) z = self.A.matvec(y) return z + def _rmatvec(self, x): y = self.A.rmatvec(x) - z = y[self.nnz_idd] - \ - (1. / self.nbar) * np.dot(np.dot(np.conj(self.ebar), - y[self.nnz_idd]), self.ebar) + z = y[self.nnz_idd] - (1.0 / self.nbar) * np.dot( + np.dot(np.conj(self.ebar), y[self.nnz_idd]), self.ebar + ) return z + class _blockdiag(LinearOperator): """Block-diagonal operator. @@ -66,32 +71,36 @@ class _blockdiag(LinearOperator): matrices of size ``N x G`` and ``M x G`` respectively, and the operator ``A`` is applied to each column of the vectors. """ + def __init__(self, A, m, n, g): self.m = m self.n = n self.g = g self.A = A self.AH = A.H - self.shape = (m*g, n*g) + self.shape = (m * g, n * g) self.dtype = A.dtype + def _matvec(self, x): x = x.reshape(self.n, self.g) y = self.A.matmat(x) return y.ravel() + def _rmatvec(self, x): x = x.reshape(self.m, self.g) y = self.AH.matmat(x) return y.ravel() + # private methods def _printf(fid, message): - """Print a message in file (fid=file ID) or on screen (fid=None) - """ + """Print a message in file (fid=file ID) or on screen (fid=None)""" if fid is None: print(message) else: fid.write(message) + def _oneprojector_i(b, tau): n = b.size x = np.zeros(n, dtype=b.dtype) @@ -118,11 +127,12 @@ def _oneprojector_i(b, tau): x[x < 0] = 0 return x + def _oneprojector_d(b, d, tau): n = b.size x = np.zeros(n, dtype=b.dtype) - if tau >= np.linalg.norm(d*b, 1): + if tau >= np.linalg.norm(d * b, 1): x = b.copy() elif tau < np.spacing(1): pass @@ -133,27 +143,29 @@ def _oneprojector_d(b, d, tau): d = d[idx] # Optimize - csdb = np.cumsum(d*b) - csd2 = np.cumsum(d*d) - alpha1 = (csdb-tau)/csd2 - alpha2 = b/d + csdb = np.cumsum(d * b) + csd2 = np.cumsum(d * d) + alpha1 = (csdb - tau) / csd2 + alpha2 = b / d ggg = np.where(alpha1 >= alpha2)[0] if ggg.size == 0: i = n else: i = ggg[0] if i > 0: - soft = alpha1[i-1] + soft = alpha1[i - 1] x[idx[0:i]] = b[0:i] - d[0:i] * max(0, soft) return x + def _oneprojector_di(b, d, tau): if np.isscalar(d): - p = _oneprojector_i(b, tau/abs(d)) + p = _oneprojector_i(b, tau / abs(d)) else: p = _oneprojector_d(b, d, tau) return p + def oneprojector(b, d, tau): """One projector. @@ -182,7 +194,7 @@ def oneprojector(b, d, tau): """ if not np.isscalar(d) and b.size != d.size: - raise ValueError('vectors b and d must have the same length') + raise ValueError("vectors b and d must have the same length") if np.isscalar(d) and d == 0: x = b.copy() @@ -193,7 +205,7 @@ def oneprojector(b, d, tau): # Perform the projection if np.isscalar(d): - x = _oneprojector_di(b, 1., tau/d) + x = _oneprojector_di(b, 1.0, tau / d) else: d = np.abs(d) # Get index of non-zero entries of d, set x equal b for others @@ -205,6 +217,7 @@ def oneprojector(b, d, tau): return x + def _norm_l1_primal(x, weights): """L1 norm with weighted input vector @@ -221,7 +234,8 @@ def _norm_l1_primal(x, weights): L1 norm """ - return np.linalg.norm(x*weights, 1) + return np.linalg.norm(x * weights, 1) + def _norm_l1_dual(x, weights): """L_inf norm with weighted input vector (dual to L1 norm) @@ -239,7 +253,8 @@ def _norm_l1_dual(x, weights): L_inf norm """ - return np.linalg.norm(x/weights, np.inf) + return np.linalg.norm(x / weights, np.inf) + def _norm_l1_project(x, weights, tau): """Projection onto the one-norm ball @@ -270,6 +285,7 @@ def _norm_l1_project(x, weights, tau): xproj = x * xc return xproj + def _norm_l12_primal(g, x, weights): """L1 norm with weighted input vector with number of groups equal to g Parameters @@ -293,6 +309,7 @@ def _norm_l12_primal(g, x, weights): nrm = np.sum(weights * np.sqrt(np.sum(xx**2, axis=1))) return nrm + def _norm_l12_dual(g, x, weights): """L_inf norm with weighted input vector with number of groups equal to g Parameters @@ -313,9 +330,10 @@ def _norm_l12_dual(g, x, weights): xx = xx.reshape(m, g) if np.iscomplexobj(xx): xx = np.abs(xx) - nrm = np.linalg.norm(np.sqrt(np.sum(xx**2, axis=1))/weights, np.inf) + nrm = np.linalg.norm(np.sqrt(np.sum(xx**2, axis=1)) / weights, np.inf) return nrm + def _norm_l12_project(g, x, weights, tau): """Projection with number of groups equal to g Parameters @@ -347,6 +365,7 @@ def _norm_l12_project(g, x, weights, tau): xx = spdiags(xc, 0, m, m) * xx return xx.flatten() + def norm_l1nn_primal(x, weights): """Non-negative L1 gauge function @@ -369,6 +388,7 @@ def norm_l1nn_primal(x, weights): p = np.linalg.norm(x * weights, 1) return p + def norm_l1nn_dual(x, weights): """Dual of non-negative L1 gauge function @@ -387,9 +407,10 @@ def norm_l1nn_dual(x, weights): """ xx = x.copy() xx[xx < 0] = 0 - p = np.linalg.norm(xx/weights, np.inf) + p = np.linalg.norm(xx / weights, np.inf) return p + def norm_l1nn_project(x, weights, tau): """Projection onto the non-negative part of the one-norm ball @@ -412,6 +433,7 @@ def norm_l1nn_project(x, weights, tau): xx[xx < 0] = 0 return _norm_l1_project(xx, weights, tau) + def norm_l12nn_primal(g, x, weights): """Non-negative L1 norm with weighted input vector with number of groups equal to g @@ -439,9 +461,10 @@ def norm_l12nn_primal(g, x, weights): xm = x.reshape(m, g) if np.iscomplexobj(x): xm = np.abs(xm) - nrm = np.sum(weights*np.sqrt(np.sum(xm**2, axis=1))) + nrm = np.sum(weights * np.sqrt(np.sum(xm**2, axis=1))) return nrm + def norm_l12nn_dual(g, x, weights): """Dual on non-legative L1L norm with weighted input vector with number of groups equal to g @@ -468,9 +491,10 @@ def norm_l12nn_dual(g, x, weights): if np.iscomplexobj(xx): xx = np.abs(xx) xx[xx < 0] = 0 - nrm = np.linalg.norm(np.sqrt(np.sum(xx ** 2, axis=1)) / weights, np.inf) + nrm = np.linalg.norm(np.sqrt(np.sum(xx**2, axis=1)) / weights, np.inf) return nrm + def norm_l12nn_project(g, x, weights, tau): """Projection with number of groups equal to g @@ -497,6 +521,7 @@ def norm_l12nn_project(g, x, weights, tau): xx[xx < 0] = 0 return _norm_l12_project(g, xx, weights, tau) + def _spg_line_curvy(x, g, fmax, A, b, project, weights, tau): """Projected backtracking linesearch. @@ -543,9 +568,9 @@ def _spg_line_curvy(x, g, fmax, A, b, project, weights, tau): """ gamma = 1e-4 maxiters = 10 - step = 1. - snorm = 0. - scale = 1. + step = 1.0 + snorm = 0.0 + scale = 1.0 nsafe = 0 niters = 0 n = x.size @@ -554,12 +579,12 @@ def _spg_line_curvy(x, g, fmax, A, b, project, weights, tau): while 1: # Evaluate trial point and function value. start_time_project = time.time() - xnew = project(x - (step*scale*g).astype(g.dtype), weights, tau) + xnew = project(x - (step * scale * g).astype(g.dtype), weights, tau) timeproject += time.time() - start_time_project start_time_matprod = time.time() rnew = b - A.matvec(xnew) timematprod += time.time() - start_time_matprod - fnew = np.abs(np.conj(rnew).dot(rnew)) / 2. + fnew = np.abs(np.conj(rnew).dot(rnew)) / 2.0 s = xnew - x gts = scale * np.real(np.dot(np.conj(g), s)) @@ -567,7 +592,7 @@ def _spg_line_curvy(x, g, fmax, A, b, project, weights, tau): err = EXIT_NODESCENT_spgline break - if fnew < fmax + gamma*step*gts: + if fnew < fmax + gamma * step * gts: err = EXIT_CONVERGED_spgline break elif niters >= maxiters: @@ -576,7 +601,7 @@ def _spg_line_curvy(x, g, fmax, A, b, project, weights, tau): # New linesearch iteration. niters += 1 - step /= 2. + step /= 2.0 # Safeguard: If stepMax is huge, then even damped search # directions can give exactly the same point after projection. If @@ -586,10 +611,11 @@ def _spg_line_curvy(x, g, fmax, A, b, project, weights, tau): snorm = np.linalg.norm(s) / np.sqrt(n) if abs(snorm - snormold) <= 1e-6 * snorm: gnorm = np.linalg.norm(g) / np.sqrt(n) - scale = snorm / gnorm / (2.**nsafe) - nsafe += 1. + scale = snorm / gnorm / (2.0**nsafe) + nsafe += 1.0 return fnew, xnew, rnew, niters, step, err, timeproject, timematprod + def _spg_line(f, x, d, gtd, fmax, A, b): """Non-monotone linesearch. @@ -627,24 +653,24 @@ def _spg_line(f, x, d, gtd, fmax, A, b): """ maxiters = 10 - step = 1. + step = 1.0 niters = 0 gamma = 1e-4 gtd = -abs(gtd) timematprod = 0 while 1: # Evaluate trial point and function value. - xnew = x + step*d + xnew = x + step * d start_time_matprod = time.time() rnew = b - A.matvec(xnew) timematprod += time.time() - start_time_matprod - fnew = abs(np.conj(rnew).dot(rnew)) / 2. + fnew = abs(np.conj(rnew).dot(rnew)) / 2.0 # Check exit conditions. - if fnew < fmax + gamma*step*gtd: # Sufficient descent condition. + if fnew < fmax + gamma * step * gtd: # Sufficient descent condition. err = EXIT_CONVERGED_spgline break - elif niters >= maxiters: # Too many linesearch iterations. + elif niters >= maxiters: # Too many linesearch iterations. err = EXIT_ITERATIONS_spgline break @@ -653,14 +679,15 @@ def _spg_line(f, x, d, gtd, fmax, A, b): # Safeguarded quadratic interpolation. if step <= 0.1: - step /= 2. + step /= 2.0 else: - tmp = (-gtd*step**2.) / (2*(fnew-f-step*gtd)) - if tmp < 0.1 or tmp > 0.9*step or np.isnan(tmp): - tmp = step / 2. + tmp = (-gtd * step**2.0) / (2 * (fnew - f - step * gtd)) + if tmp < 0.1 or tmp > 0.9 * step or np.isnan(tmp): + tmp = step / 2.0 step = tmp return fnew, xnew, rnew, niters, err, timematprod + def _active_vars(x, g, nnz_idx, opttol, weights, dual_norm): """Find the current active set. @@ -676,8 +703,8 @@ def _active_vars(x, g, nnz_idx, opttol, weights, dual_norm): Number of elements that changed in the support. """ - xtol = min([.1, 10.*opttol]) - gtol = min([.1, 10.*opttol]) + xtol = min([0.1, 10.0 * opttol]) + gtol = min([0.1, 10.0 * opttol]) gnorm = dual_norm(g, weights) nnz_old = nnz_idx @@ -702,13 +729,31 @@ def _active_vars(x, g, nnz_idx, opttol, weights, dual_norm): return nnz_x, nnz_g, nnz_idx, nnz_diff -def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, - iter_lim=None, n_prev_vals=3, bp_tol=1e-6, - ls_tol=1e-6, opt_tol=1e-4, dec_tol=1e-4, step_min=1e-16, - step_max=1e5, active_set_niters=np.inf, subspace_min=False, - iscomplex=False, max_matvec=np.inf, weights=None, - project=_norm_l1_project, primal_norm=_norm_l1_primal, - dual_norm=_norm_l1_dual): +def spgl1( + A, + b, + tau=0, + sigma=0, + x0=None, + fid=None, + verbosity=0, + iter_lim=None, + n_prev_vals=3, + bp_tol=1e-6, + ls_tol=1e-6, + opt_tol=1e-4, + dec_tol=1e-4, + step_min=1e-16, + step_max=1e5, + active_set_niters=np.inf, + subspace_min=False, + iscomplex=False, + max_matvec=np.inf, + weights=None, + project=_norm_l1_project, + primal_norm=_norm_l1_primal, + dual_norm=_norm_l1_dual, +): r"""SPGL1 solver. Solve basis pursuit (BP), basis pursuit denoise (BPDN), or LASSO problems @@ -855,28 +900,28 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, if iter_lim is None: iter_lim = 10 * m - max_line_errors = 10 # Maximum number of line-search failures. - piv_tol = 1e-12 # Threshold for significant Newton step. - max_matvec = max(3, max_matvec) # Max number of allowed matvec/rmatvec. + max_line_errors = 10 # Maximum number of line-search failures. + piv_tol = 1e-12 # Threshold for significant Newton step. + max_matvec = max(3, max_matvec) # Max number of allowed matvec/rmatvec. # Initialize local variables. - niters = 0 # Total SPGL1 iterations. - niters_lsqr = 0 # Total LSQR iterations. - nprodA = 0 # Number of matvec operations - nprodAt = 0 # Number of rmatvec operations - last_fv = np.full(10, -np.inf) # Last m function values. + niters = 0 # Total SPGL1 iterations. + niters_lsqr = 0 # Total LSQR iterations. + nprodA = 0 # Number of matvec operations + nprodAt = 0 # Number of rmatvec operations + last_fv = np.full(10, -np.inf) # Last m function values. nline_tot = 0 # Total number of linesearch steps. print_tau = False - n_newton = 0 # Number of Newton iterations + n_newton = 0 # Number of Newton iterations bnorm = np.linalg.norm(b) stat = False - time_project = 0 # Time spent in projections - time_matprod = 0 # Time spent in matvec computations - nnz_niters = 0 # No. of iterations with fixed pattern. - nnz_idx = None # Active-set indicator. - subspace = False # Flag if did subspace min in current itn. - stepg = 1 # Step length for projected gradient. - test_updatetau = False # Previous step did not update tau + time_project = 0 # Time spent in projections + time_matprod = 0 # Time spent in matvec computations + nnz_niters = 0 # No. of iterations with fixed pattern. + nnz_idx = None # Active-set indicator. + subspace = False # Flag if did subspace min in current itn. + stepg = 1 # Step length for projected gradient. + test_updatetau = False # Previous step did not update tau # Determine initial x and see if problem is complex realx = np.isreal(A).all() and np.isreal(b).all() @@ -895,21 +940,21 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, # needs to apply, so the check was removed. if weights is not None: if not np.isfinite(weights).all(): - raise ValueError('Entries in weights must be finite') + raise ValueError("Entries in weights must be finite") if np.any(weights <= 0): - raise ValueError('Entries in weights must be strictly positive') + raise ValueError("Entries in weights must be strictly positive") else: weights = 1 # Quick exit if sigma >= ||b||. Set tau = 0 to short-circuit the loop. if bnorm <= sigma: - logger.warning('W: sigma >= ||b||. Exact solution is x = 0.') + logger.warning("W: sigma >= ||b||. Exact solution is x = 0.") tau = 0 single_tau = True # Do not do subspace minimization if x is complex. if not realx and subspace_min: - logger.warning('W: Subspace minimization disabled when variables are complex.') + logger.warning("W: Subspace minimization disabled when variables are complex.") subspace_min = False #% Pre-allocate iteration info vectors @@ -919,43 +964,66 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, # Log header. if verbosity >= 1: - _printf(fid, '') - _printf(fid, '='*80+'') - _printf(fid, 'SPGL1') - _printf(fid, '='*80+'') - _printf(fid, '%-22s: %8i %4s' % ('No. rows', m, '')) - _printf(fid, '%-22s: %8i\n' % ('No. columns', n)) - _printf(fid, '%-22s: %8.2e %4s' % ('Initial tau', tau, '')) - _printf(fid, '%-22s: %8.2e\n' % ('Two-norm of b', bnorm)) - _printf(fid, '%-22s: %8.2e %4s' % ('Optimality tol', opt_tol, '')) + _printf(fid, "") + _printf(fid, "=" * 80 + "") + _printf(fid, "SPGL1") + _printf(fid, "=" * 80 + "") + _printf(fid, "%-22s: %8i %4s" % ("No. rows", m, "")) + _printf(fid, "%-22s: %8i\n" % ("No. columns", n)) + _printf(fid, "%-22s: %8.2e %4s" % ("Initial tau", tau, "")) + _printf(fid, "%-22s: %8.2e\n" % ("Two-norm of b", bnorm)) + _printf(fid, "%-22s: %8.2e %4s" % ("Optimality tol", opt_tol, "")) if single_tau: - _printf(fid, '%-22s: %8.2e\n' % ('Target one-norm of x', tau)) + _printf(fid, "%-22s: %8.2e\n" % ("Target one-norm of x", tau)) else: - _printf(fid, '%-22s: %8.2e\n' % ('Target objective', sigma)) - _printf(fid, '%-22s: %8.2e %4s' % ('Basis pursuit tol', bp_tol, '')) - _printf(fid, '%-22s: %8i\n' % ('Maximum iterations', iter_lim)) + _printf(fid, "%-22s: %8.2e\n" % ("Target objective", sigma)) + _printf(fid, "%-22s: %8.2e %4s" % ("Basis pursuit tol", bp_tol, "")) + _printf(fid, "%-22s: %8i\n" % ("Maximum iterations", iter_lim)) if verbosity >= 2: if single_tau: - logb = '%5i %13.7e %13.7e %9.2e %6.1f %6i %6i %6s' - logh = '%5s %13s %13s %9s %6s %6s %6s\n' - _printf(fid, logh % ('iterr', 'Objective', 'Relative Gap', - 'gnorm', 'stepg', 'nnz_x', 'nnz_g')) + logb = "%5i %13.7e %13.7e %9.2e %6.1f %6i %6i %6s" + logh = "%5s %13s %13s %9s %6s %6s %6s\n" + _printf( + fid, + logh + % ( + "iterr", + "Objective", + "Relative Gap", + "gnorm", + "stepg", + "nnz_x", + "nnz_g", + ), + ) else: - logb = '%5i %13.7e %13.7e %9.2e %9.3e %6.1f %6i %6i %6s' - logh = '%5s %13s %13s %9s %9s %6s %6s %6s %6s\n' - _printf(fid, logh % ('iterr', 'Objective', 'Relative Gap', - 'Rel Error', 'gnorm', 'stepg', 'nnz_x', - 'nnz_g', 'tau')) + logb = "%5i %13.7e %13.7e %9.2e %9.3e %6.1f %6i %6i %6s" + logh = "%5s %13s %13s %9s %9s %6s %6s %6s %6s\n" + _printf( + fid, + logh + % ( + "iterr", + "Objective", + "Relative Gap", + "Rel Error", + "gnorm", + "stepg", + "nnz_x", + "nnz_g", + "tau", + ), + ) # Project the starting point and evaluate function and gradient. start_time_project = time.time() x = project(x, weights, tau) time_project += time.time() - start_time_project start_time_matvec = time.time() - r = b - A.matvec(x) # r = b - Ax - g = -A.rmatvec(r) # g = -A'r + r = b - A.matvec(x) # r = b - Ax + g = -A.rmatvec(r) # g = -A'r time_matprod += time.time() - start_time_matvec - f = np.linalg.norm(r)**2 / 2. + f = np.linalg.norm(r) ** 2 / 2.0 nprodA += 1 nprodAt += 1 @@ -970,10 +1038,10 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, dx = project(x - g, weights, tau) - x time_project += time.time() - start_time_project dxnorm = np.linalg.norm(dx, np.inf) - if dxnorm < (1. / step_max): + if dxnorm < (1.0 / step_max): gstep = step_max else: - gstep = min(step_max, max(step_min, 1. / dxnorm)) + gstep = min(step_max, max(step_min, 1.0 / dxnorm)) # Main iteration loop. while 1: @@ -982,30 +1050,31 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, # Compute quantities needed for log and exit conditions. gnorm = dual_norm(-g, weights) rnorm = np.linalg.norm(r) - gap = np.dot(np.conj(r), r-b) + tau*gnorm - rgap = abs(gap) / max(1., f) + gap = np.dot(np.conj(r), r - b) + tau * gnorm + rgap = abs(gap) / max(1.0, f) aerror1 = rnorm - sigma - aerror2 = f - sigma**2. / 2. - rerror1 = abs(aerror1) / max(1., rnorm) - rerror2 = abs(aerror2) / max(1., f) + aerror2 = f - sigma**2.0 / 2.0 + rerror1 = abs(aerror1) / max(1.0, rnorm) + rerror2 = abs(aerror2) / max(1.0, f) #% Count number of consecutive iterations with identical support. nnz_old = nnz_idx - nnz_x, nnz_g, nnz_idx, nnz_diff = _active_vars(x, g, nnz_idx, opt_tol, - weights, dual_norm) + nnz_x, nnz_g, nnz_idx, nnz_diff = _active_vars( + x, g, nnz_idx, opt_tol, weights, dual_norm + ) if nnz_diff: nnz_niters = 0 else: nnz_niters += nnz_niters - if nnz_niters+1 >= active_set_niters: + if nnz_niters + 1 >= active_set_niters: stat = EXIT_ACTIVE_SET # Single tau: Check if were optimal. # The 2nd condition is there to guard against large tau. if single_tau: - if rgap <= opt_tol or rnorm < opt_tol*bnorm: + if rgap <= opt_tol or rnorm < opt_tol * bnorm: stat = EXIT_OPTIMAL - else: # Multiple tau: Check if found root and/or if tau needs updating. + else: # Multiple tau: Check if found root and/or if tau needs updating. # Test if a least-squares solution has been found if gnorm <= ls_tol * rnorm: stat = EXIT_LEAST_SQUARES @@ -1013,24 +1082,29 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, # The problem is nearly optimal for the current tau. # Check optimality of the current root. if rnorm <= sigma: - stat = EXIT_SUBOPTIMAL_BP # Found suboptimal BP sol. + stat = EXIT_SUBOPTIMAL_BP # Found suboptimal BP sol. if rerror1 <= opt_tol: - stat = EXIT_ROOT_FOUND # Found approx root. + stat = EXIT_ROOT_FOUND # Found approx root. if rnorm <= bp_tol * bnorm: - stat = EXIT_BPSOL_FOUND # Resid minimzd -> BP sol. + stat = EXIT_BPSOL_FOUND # Resid minimzd -> BP sol. fchange = np.abs(f - fold) test_relchange1 = fchange <= dec_tol * f test_relcchange2 = fchange <= 1e-1 * f * (np.abs(rnorm - sigma)) - test_updatetau = ((test_relchange1 and rnorm > 2 * sigma) or \ - (test_relcchange2 and rnorm <= 2 * sigma)) and \ - not stat and not test_updatetau + test_updatetau = ( + ( + (test_relchange1 and rnorm > 2 * sigma) + or (test_relcchange2 and rnorm <= 2 * sigma) + ) + and not stat + and not test_updatetau + ) if test_updatetau: # Update tau. tau_old = tau tau = max(0, tau + (rnorm * aerror1) / gnorm) n_newton += 1 - print_tau = np.abs(tau_old - tau) >= 1e-6 * tau # For log only. + print_tau = np.abs(tau_old - tau) >= 1e-6 * tau # For log only. if tau < tau_old: # The one-norm ball has decreased. Need to make sure that # the next iterate is feasible, which we do by projecting it. @@ -1041,10 +1115,10 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, # Update the residual, gradient, and function value start_time_matvec = time.time() r = b - A.matvec(x) - g = - A.rmatvec(r) + g = -A.rmatvec(r) time_matprod += time.time() - start_time_matvec - f = np.linalg.norm(r) ** 2 / 2. + f = np.linalg.norm(r) ** 2 / 2.0 nprodA += 1 nprodAt += 1 @@ -1057,30 +1131,57 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, stat = EXIT_ITERATIONS # Print log, update history and act on exit conditions. - if verbosity >= 2 and \ - (((niters < 10) or (iter_lim - niters < 10) or (niters % 10 == 0)) - or single_tau or print_tau or stat): - tauflag = ' ' - subflag = '' + if verbosity >= 2 and ( + ((niters < 10) or (iter_lim - niters < 10) or (niters % 10 == 0)) + or single_tau + or print_tau + or stat + ): + tauflag = " " + subflag = "" if print_tau: - tauflag = ' %13.7e' %tau + tauflag = " %13.7e" % tau if subspace: - subflag = ' S %2i' % niters_lsqr + subflag = " S %2i" % niters_lsqr if single_tau: - _printf(fid, logb %(niters, rnorm, rgap, gnorm, np.log10(stepg), - nnz_x, nnz_g, subflag)) + _printf( + fid, + logb + % ( + niters, + rnorm, + rgap, + gnorm, + np.log10(stepg), + nnz_x, + nnz_g, + subflag, + ), + ) if subspace: - _printf(fid, ' %s' % subflag) + _printf(fid, " %s" % subflag) else: - _printf(fid, logb %(niters, rnorm, rgap, rerror1, gnorm, - np.log10(stepg), nnz_x, nnz_g, - tauflag+subflag)) + _printf( + fid, + logb + % ( + niters, + rnorm, + rgap, + rerror1, + gnorm, + np.log10(stepg), + nnz_x, + nnz_g, + tauflag + subflag, + ), + ) print_tau = False subspace = False # Update history info - if niters > 0 and niters % _allocSize == 0: # enlarge allocation - allocincrement = min(_allocSize, iter_lim-xnorm1.shape[0]) + if niters > 0 and niters % _allocSize == 0: # enlarge allocation + allocincrement = min(_allocSize, iter_lim - xnorm1.shape[0]) xnorm1 = np.hstack((xnorm1, np.zeros(allocincrement))) rnorm2 = np.hstack((rnorm2, np.zeros(allocincrement))) lambdaa = np.hstack((lambdaa, np.zeros(allocincrement))) @@ -1101,10 +1202,16 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, while 1: # Projected gradient step and linesearch. - f, x, r, niter_line, stepg, lnerr, \ - time_project_curvy, time_matprod_curvy = \ - _spg_line_curvy(x, gstep*g, max(last_fv), A, b, - project, weights, tau) + ( + f, + x, + r, + niter_line, + stepg, + lnerr, + time_project_curvy, + time_matprod_curvy, + ) = _spg_line_curvy(x, gstep * g, max(last_fv), A, b, project, weights, tau) time_project += time_project_curvy time_matprod += time_matprod_curvy nprodA += niter_line + 1 @@ -1119,11 +1226,12 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, x = xold.copy() f = fold start_time_project = time.time() - dx = project(x - gstep*g, weights, tau) - x + dx = project(x - gstep * g, weights, tau) - x time_project += time.time() - start_time_project gtd = np.dot(np.conj(g), dx) - f, x, r, niter_line, lnerr, time_matprod = \ - _spg_line(f, x, dx, gtd, max(last_fv), A, b) + f, x, r, niter_line, lnerr, time_matprod = _spg_line( + f, x, dx, gtd, max(last_fv), A, b + ) time_matprod += time_matprod nprodA += niter_line + 1 nline_tot += niter_line @@ -1139,20 +1247,24 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, if max_line_errors <= 0: stat = EXIT_LINE_ERROR else: - step_max = step_max / 10. - logger.warning('Linesearch failed with error %s. ' - 'Damping max BB scaling to %s', lnerr, - step_max) + step_max = step_max / 10.0 + logger.warning( + "Linesearch failed with error %s. " + "Damping max BB scaling to %s", + lnerr, + step_max, + ) max_line_errors -= 1 # Subspace minimization (only if active-set change is small). if subspace_min: start_time_matvec = time.time() - g = - A.rmatvec(r) + g = -A.rmatvec(r) time_matprod += time.time() - start_time_matvec nprodAt += 1 - nnz_x, nnz_g, nnz_idx, nnz_diff = \ - _active_vars(x, g, nnz_old, opt_tol, weights, dual_norm) + nnz_x, nnz_g, nnz_idx, nnz_diff = _active_vars( + x, g, nnz_old, opt_tol, weights, dual_norm + ) if not nnz_diff: if nnz_x == nnz_g: iter_lim_lsqr = 20 @@ -1164,9 +1276,9 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, nebar = np.size(ebar) Sprod = _LSQRprod(A, nnz_idx, ebar, n) - dxbar, istop, niters_lsqr = \ - lsqr(Sprod, r, 1e-5, 1e-1, 1e-1, 1e12, - iter_lim=iter_lim_lsqr, show=0)[0:3] + dxbar, istop, niters_lsqr = lsqr( + Sprod, r, 1e-5, 1e-1, 1e-1, 1e12, iter_lim=iter_lim_lsqr, show=0 + )[0:3] nprodA += niters_lsqr nprodAt += niters_lsqr + 1 niters_lsqr = niters_lsqr + niters_lsqr @@ -1175,9 +1287,9 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, if istop != 4: # Push dx back into full space: dx = Z dx. dx = np.zeros(n, dtype=x.dtype) - dx[nnz_idx] = \ - dxbar - (1/nebar)*np.dot(np.dot(np.conj(ebar.T), - dxbar), dxbar) + dx[nnz_idx] = dxbar - (1 / nebar) * np.dot( + np.dot(np.conj(ebar.T), dxbar), dxbar + ) # Find largest step to a change in sign. block1 = nnz_idx & (x < 0) & (dx > +piv_tol) @@ -1190,26 +1302,27 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, alpha2 = min(-x[block2] / dx[block2]) alpha = min([1, alpha1, alpha2]) if alpha < 0: - raise ValueError('Alpha smaller than zero') + raise ValueError("Alpha smaller than zero") if np.dot(np.conj(ebar.T), dx[nnz_idx]) > opt_tol: - raise ValueError('Subspace update signed sum ' - 'bigger than tolerance') + raise ValueError( + "Subspace update signed sum " "bigger than tolerance" + ) # Update variables. - x = x + alpha*dx + x = x + alpha * dx start_time_matvec = time.time() r = b - A.matvec(x) time_matprod += time.time() - start_time_matvec - f = abs(np.dot(np.conj(r), r)) / 2. + f = abs(np.dot(np.conj(r), r)) / 2.0 subspace = True nprodA += 1 if primal_norm(x, weights) > tau + opt_tol: - raise ValueError('Primal norm out of bound') + raise ValueError("Primal norm out of bound") # Update gradient and compute new Barzilai-Borwein scaling. if not lnerr: start_time_matvec = time.time() - g = - A.rmatvec(r) + g = -A.rmatvec(r) time_matprod += time.time() - start_time_matvec nprodAt += 1 s = x - xold @@ -1222,7 +1335,7 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, gstep = min(step_max, max(step_min, sts / sty)) else: gstep = min(step_max, gstep) - break # Leave while loop. This is done to allow stopping the + break # Leave while loop. This is done to allow stopping the # computations at any time within the loop if max_matvec is # reached. If this is not the case, the loop is stopped here. @@ -1235,7 +1348,7 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, break # Update function history. - if single_tau or f > sigma**2 / 2.: # Dont update if superoptimal. + if single_tau or f > sigma**2 / 2.0: # Dont update if superoptimal. last_fv[np.mod(niters, n_prev_vals)] = f.copy() if fbest > f: fbest = f.copy() @@ -1243,12 +1356,12 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, # Restore best solution (only if solving single problem). if single_tau and f > fbest: - rnorm = np.sqrt(2.*fbest) - print('Restoring best iterate to objective '+str(rnorm)) + rnorm = np.sqrt(2.0 * fbest) + print("Restoring best iterate to objective " + str(rnorm)) x = xbest.copy() start_time_matvec = time.time() r = b - A.matvec(x) - g = - A.rmatvec(r) + g = -A.rmatvec(r) time_matprod += time.time() - start_time_matvec gnorm = dual_norm(g, weights) rnorm = np.linalg.norm(r) @@ -1257,60 +1370,86 @@ def spgl1(A, b, tau=0, sigma=0, x0=None, fid=None, verbosity=0, # Final cleanup before exit. info = {} - info['tau'] = tau - info['rnorm'] = rnorm - info['rgap'] = rgap - info['gnorm'] = gnorm - info['stat'] = stat - info['niters'] = niters - info['nprodA'] = nprodA - info['nprodAt'] = nprodAt - info['n_newton'] = n_newton - info['time_project'] = time_project - info['time_matprod'] = time_matprod - info['niters_lsqr'] = niters_lsqr - info['time_total'] = time.time() - start_time - info['xnorm1'] = xnorm1[0:niters] - info['rnorm2'] = rnorm2[0:niters] - info['lambdaa'] = lambdaa[0:niters] + info["tau"] = tau + info["rnorm"] = rnorm + info["rgap"] = rgap + info["gnorm"] = gnorm + info["stat"] = stat + info["niters"] = niters + info["nprodA"] = nprodA + info["nprodAt"] = nprodAt + info["n_newton"] = n_newton + info["time_project"] = time_project + info["time_matprod"] = time_matprod + info["niters_lsqr"] = niters_lsqr + info["time_total"] = time.time() - start_time + info["xnorm1"] = xnorm1[0:niters] + info["rnorm2"] = rnorm2[0:niters] + info["lambdaa"] = lambdaa[0:niters] # Print final output. if verbosity >= 1: - _printf(fid, '') + _printf(fid, "") if stat == EXIT_OPTIMAL: - _printf(fid, 'EXIT -- Optimal solution found') + _printf(fid, "EXIT -- Optimal solution found") elif stat == EXIT_ITERATIONS: - _printf(fid, 'ERROR EXIT -- Too many iterations') + _printf(fid, "ERROR EXIT -- Too many iterations") elif stat == EXIT_ROOT_FOUND: - _printf(fid, 'EXIT -- Found a root') + _printf(fid, "EXIT -- Found a root") elif stat == EXIT_BPSOL_FOUND: - _printf(fid, 'EXIT -- Found a BP solution') + _printf(fid, "EXIT -- Found a BP solution") elif stat == EXIT_LEAST_SQUARES: - _printf(fid, 'EXIT -- Found a least-squares solution') + _printf(fid, "EXIT -- Found a least-squares solution") elif stat == EXIT_LINE_ERROR: - _printf(fid, 'ERROR EXIT -- Linesearch error (%d)' % lnerr) + _printf(fid, "ERROR EXIT -- Linesearch error (%d)" % lnerr) elif stat == EXIT_SUBOPTIMAL_BP: - _printf(fid, 'EXIT -- Found a suboptimal BP solution') + _printf(fid, "EXIT -- Found a suboptimal BP solution") elif stat == EXIT_MATVEC_LIMIT: - _printf(fid, 'EXIT -- Maximum matrix-vector operations reached') + _printf(fid, "EXIT -- Maximum matrix-vector operations reached") elif stat == EXIT_ACTIVE_SET: - _printf(fid, 'EXIT -- Found a possible active set') + _printf(fid, "EXIT -- Found a possible active set") else: - _printf(fid, 'SPGL1 ERROR: Unknown termination condition') - _printf(fid, '') - - _printf(fid, '%-20s: %6i %6s %-20s: %6.1f' % - ('Products with A', nprodA, '', 'Total time (secs)', - info['time_total'])) - _printf(fid, '%-20s: %6i %6s %-20s: %6.1f' % - ('Products with A^H', nprodAt, '', - 'Project time (secs)', info['time_project'])) - _printf(fid, '%-20s: %6i %6s %-20s: %6.1f' % - ('Newton iterations', n_newton, '', 'Mat-vec time (secs)', - info['time_matprod'])) - _printf(fid, '%-20s: %6i %6s %-20s: %6i' % - ('Line search its', nline_tot, '', 'Subspace iterations', - niters_lsqr)) + _printf(fid, "SPGL1 ERROR: Unknown termination condition") + _printf(fid, "") + + _printf( + fid, + "%-20s: %6i %6s %-20s: %6.1f" + % ( + "Products with A", + nprodA, + "", + "Total time (secs)", + info["time_total"], + ), + ) + _printf( + fid, + "%-20s: %6i %6s %-20s: %6.1f" + % ( + "Products with A^H", + nprodAt, + "", + "Project time (secs)", + info["time_project"], + ), + ) + _printf( + fid, + "%-20s: %6i %6s %-20s: %6.1f" + % ( + "Newton iterations", + n_newton, + "", + "Mat-vec time (secs)", + info["time_matprod"], + ), + ) + _printf( + fid, + "%-20s: %6i %6s %-20s: %6i" + % ("Line search its", nline_tot, "", "Subspace iterations", niters_lsqr), + ) return x, r, g, info @@ -1356,6 +1495,7 @@ def spg_bp(A, b, **kwargs): x, r, g, info = spgl1(A, b, tau, sigma, x0, **kwargs) return x, r, g, info + def spg_bpdn(A, b, sigma, **kwargs): """Basis pursuit denoise (BPDN) problem. @@ -1398,6 +1538,7 @@ def spg_bpdn(A, b, sigma, **kwargs): x, r, g, info = spgl1(A, b, tau, sigma, x0, **kwargs) return x, r, g, info + def spg_lasso(A, b, tau, **kwargs): """LASSO problem. @@ -1439,6 +1580,7 @@ def spg_lasso(A, b, tau, **kwargs): x, r, g, info = spgl1(A, b, tau, sigma, x0, **kwargs) return x, r, g, info + def spg_mmv(A, B, sigma=0, **kwargs): """MMV problem. @@ -1482,15 +1624,20 @@ def spg_mmv(A, B, sigma=0, **kwargs): A = _blockdiag(A, m, n, groups) # Set projection specific functions - _primal_norm = _norm_l12_primal if 'primal_norm' not in kwargs.keys() \ - else kwargs['primal_norm'] - _dual_norm = _norm_l12_dual if 'dual_norm' not in kwargs.keys() \ - else kwargs['dual_norm'] - _project = _norm_l12_project if 'project' not in kwargs.keys() \ - else kwargs['project'] - kwargs.pop('primal_norm', None) - kwargs.pop('dual_norm', None) - kwargs.pop('project', None) + _primal_norm = ( + _norm_l12_primal + if "primal_norm" not in kwargs.keys() + else kwargs["primal_norm"] + ) + _dual_norm = ( + _norm_l12_dual if "dual_norm" not in kwargs.keys() else kwargs["dual_norm"] + ) + _project = ( + _norm_l12_project if "project" not in kwargs.keys() else kwargs["project"] + ) + kwargs.pop("primal_norm", None) + kwargs.pop("dual_norm", None) + kwargs.pop("project", None) project = lambda x, weight, tau: _project(groups, x, weight, tau) primal_norm = lambda x, weight: _primal_norm(groups, x, weight) @@ -1498,9 +1645,17 @@ def spg_mmv(A, B, sigma=0, **kwargs): tau = 0 x0 = None - x, r, g, info = spgl1(A, B.ravel(), tau, sigma, x0, project=project, - primal_norm=primal_norm, dual_norm=dual_norm, - **kwargs) + x, r, g, info = spgl1( + A, + B.ravel(), + tau, + sigma, + x0, + project=project, + primal_norm=primal_norm, + dual_norm=dual_norm, + **kwargs + ) x = x.reshape(n, groups) g = g.reshape(n, groups) diff --git a/tutorials/README.txt b/tutorials/README.txt index 9b7f69b..cc1d377 100755 --- a/tutorials/README.txt +++ b/tutorials/README.txt @@ -1,4 +1,4 @@ .. _tutorials: Tutorials ---------- \ No newline at end of file +--------- diff --git a/tutorials/mmvnn.py b/tutorials/mmvnn.py index 09e9841..306baea 100755 --- a/tutorials/mmvnn.py +++ b/tutorials/mmvnn.py @@ -1,75 +1,86 @@ r""" MMV with non-negative norms =========================== -In this tutorial we will use the ``spg_mmv`` solver which solves a -multi-measurement vector basis pursuit denoise problem. Both standard L12 -and L12non-negative norms will be compared. The latter set of norms is used in -case allowsour solution to have only positive values in case we have access to +In this tutorial we will use the ``spg_mmv`` solver, which solves a +multi-measurement vector basis pursuit denoise problem. Both the standard L12 +and L12non-negative norms will be compared. The latter set of norms is used if we +want our solution to have only positive values in case we have access to such a kind of prior information. """ -import numpy as np import matplotlib.pyplot as plt +import numpy as np -from spgl1 import spg_mmv -from spgl1 import norm_l12nn_primal, norm_l12nn_dual, norm_l12nn_project - +import spgl1 ############################################################################### # Let's first import our data, matrix operator and weights -data = np.load('../testdata/mmvnn.npz') +data = np.load("../testdata/mmvnn.npz") -A = data['A'] -B = data['B'] -weights = data['weights'] +A = data["A"] +B = data["B"] +weights = data["weights"] ############################################################################### # We apply unweighted inversions with and without non-negative # norms. -X, _, _, info = spg_mmv(A, B, 0.5, iter_lim=100, verbosity=0) +X, _, _, info = spgl1.spg_mmv(A, B, 0.5, iter_lim=100, verbosity=0) -XNN, _, _, infoNN = spg_mmv(A, B, 0.5, iter_lim=100, verbosity=0, - project=norm_l12nn_project, - primal_norm=norm_l12nn_primal, - dual_norm=norm_l12nn_dual) -print('Negative X - MMV:', np.any(X < 0)) -print('Negative X - MMVNN:', np.any(XNN < 0)) -print('Residual norm - MMV:', info['rnorm']) -print('Residual norm - MMVNN:', infoNN['rnorm']) +XNN, _, _, infoNN = spgl1.spg_mmv( + A, + B, + 0.5, + iter_lim=100, + verbosity=0, + project=spgl1.norm_l12nn_project, + primal_norm=spgl1.norm_l12nn_primal, + dual_norm=spgl1.norm_l12nn_dual, +) +print("Negative X - MMV:", np.any(X < 0)) +print("Negative X - MMVNN:", np.any(XNN < 0)) +print("Residual norm - MMV:", info["rnorm"]) +print("Residual norm - MMVNN:", infoNN["rnorm"]) plt.figure() -plt.plot(X[:, 0], 'k', label='Coefficients') -plt.plot(XNN[:, 0], '--r', label='Coefficients NN') +plt.plot(X[:, 0], "k", label="Coefficients") +plt.plot(XNN[:, 0], "--r", label="Coefficients NN") plt.legend() -plt.title('Unweighted Basis Pursuit with Multiple Measurement Vectors') +plt.title("Unweighted Basis Pursuit with Multiple Measurement Vectors") plt.figure() -plt.plot(X[:, 1], 'k', label='Coefficients') -plt.plot(XNN[:, 1], '--r', label='Coefficients NN') +plt.plot(X[:, 1], "k", label="Coefficients") +plt.plot(XNN[:, 1], "--r", label="Coefficients NN") plt.legend() -plt.title('Unweighted Basis Pursuit with Multiple Measurement Vectors') +plt.title("Unweighted Basis Pursuit with Multiple Measurement Vectors") ############################################################################### # We repeat the same steps with weighted norms. -X, _, _, info = spg_mmv(A, B, 0.5, iter_lim=100, - weights=np.array(weights), verbosity=0) -XNN, _, _, infoNN = spg_mmv(A, B, 0.5, iter_lim=100, verbosity=0, - weights=np.array(weights), - project=norm_l12nn_project, - primal_norm=norm_l12nn_primal, - dual_norm=norm_l12nn_dual) -print('Negative X - MMV:', np.any(X < 0)) -print('Negative X - MMVNN:', np.any(XNN < 0)) -print('Residual norm - MMV:', info['rnorm']) -print('Residual norm - MMVNN:', infoNN['rnorm']) +X, _, _, info = spgl1.spg_mmv( + A, B, 0.5, iter_lim=100, weights=np.array(weights), verbosity=0 +) +XNN, _, _, infoNN = spgl1.spg_mmv( + A, + B, + 0.5, + iter_lim=100, + verbosity=0, + weights=np.array(weights), + project=spgl1.norm_l12nn_project, + primal_norm=spgl1.norm_l12nn_primal, + dual_norm=spgl1.norm_l12nn_dual, +) +print("Negative X - MMV:", np.any(X < 0)) +print("Negative X - MMVNN:", np.any(XNN < 0)) +print("Residual norm - MMV:", info["rnorm"]) +print("Residual norm - MMVNN:", infoNN["rnorm"]) plt.figure() -plt.plot(X[:, 0], 'k', label='Coefficients') -plt.plot(XNN[:, 0], '--r', label='Coefficients NN') +plt.plot(X[:, 0], "k", label="Coefficients") +plt.plot(XNN[:, 0], "--r", label="Coefficients NN") plt.legend() -plt.title('Weighted Basis Pursuit with Multiple Measurement Vectors') +plt.title("Weighted Basis Pursuit with Multiple Measurement Vectors") plt.figure() -plt.plot(X[:, 1], 'k', label='Coefficients') -plt.plot(XNN[:, 1], '--r', label='Coefficients NN') +plt.plot(X[:, 1], "k", label="Coefficients") +plt.plot(XNN[:, 1], "--r", label="Coefficients NN") plt.legend() -plt.title('Weighted Basis Pursuit with Multiple Measurement Vectors') +plt.title("Weighted Basis Pursuit with Multiple Measurement Vectors") diff --git a/tutorials/spgl1.py b/tutorials/spgl1s.py similarity index 71% rename from tutorials/spgl1.py rename to tutorials/spgl1s.py index e51d5bf..f86dbc0 100755 --- a/tutorials/spgl1.py +++ b/tutorials/spgl1s.py @@ -4,11 +4,11 @@ In this tutorial we will explore the different solvers in the ``spgl1`` package and apply them to different toy examples. """ -import numpy as np import matplotlib.pyplot as plt - -from scipy.sparse.linalg import LinearOperator +import numpy as np from scipy.sparse import spdiags +from scipy.sparse.linalg import LinearOperator + import spgl1 # Initialize random number generators @@ -19,7 +19,7 @@ m = 50 n = 128 k = 14 -[A, Rtmp] = np.linalg.qr(np.random.randn(n, m), 'reduced') +[A, Rtmp] = np.linalg.qr(np.random.randn(n, m), "reduced") A = A.T p = np.random.permutation(n) p = p[0:k] @@ -36,11 +36,12 @@ tau = np.pi x, resid, grad, info = spgl1.spg_lasso(A, b, tau, verbosity=1) -print('%s%s%s' % ('-'*35, ' Solution ', '-'*35)) -print('nonzeros(x) = %i, ||x||_1 = %12.6e, ||x||_1 - pi = %13.6e' % - (np.sum(np.abs(x) > 1e-5), np.linalg.norm(x, 1), - np.linalg.norm(x, 1)-np.pi)) -print('%s' % ('-'*80)) +print("%s%s%s" % ("-" * 35, " Solution ", "-" * 35)) +print( + "nonzeros(x) = %i, ||x||_1 = %12.6e, ||x||_1 - pi = %13.6e" + % (np.sum(np.abs(x) > 1e-5), np.linalg.norm(x, 1), np.linalg.norm(x, 1) - np.pi) +) +print("%s" % ("-" * 80)) ############################################################################### # Solve the basis pursuit (BP) problem: @@ -52,25 +53,33 @@ x, resid, grad, info = spgl1.spg_bp(A, b, verbosity=2) plt.figure() -plt.plot(x, 'b') -plt.plot(x0, 'ro') -plt.legend(('Recovered coefficients', 'Original coefficients')) -plt.title('Basis Pursuit') +plt.plot(x, "b") +plt.plot(x0, "ro") +plt.legend(("Recovered coefficients", "Original coefficients")) +plt.title("Basis Pursuit") plt.figure() -plt.plot(info['xnorm1'], info['rnorm2'], '.-k') -plt.xlabel(r'$||x||_1$') -plt.ylabel(r'$||r||_2$') -plt.title('Pareto curve') +plt.plot(info["xnorm1"], info["rnorm2"], ".-k") +plt.xlabel(r"$||x||_1$") +plt.ylabel(r"$||r||_2$") +plt.title("Pareto curve") plt.figure() -plt.plot(np.arange(info['niters']), info['rnorm2']/max(info['rnorm2']), - '.-k', label=r'$||r||_2$') -plt.plot(np.arange(info['niters']), info['xnorm1']/max(info['xnorm1']), - '.-r', label=r'$||x||_1$') -plt.xlabel(r'#iter') -plt.ylabel(r'$||r||_2/||x||_1$') -plt.title('Norms') +plt.plot( + np.arange(info["niters"]), + info["rnorm2"] / max(info["rnorm2"]), + ".-k", + label=r"$||r||_2$", +) +plt.plot( + np.arange(info["niters"]), + info["xnorm1"] / max(info["xnorm1"]), + ".-r", + label=r"$||x||_1$", +) +plt.xlabel(r"#iter") +plt.ylabel(r"$||r||_2/||x||_1$") +plt.title("Norms") plt.legend() ############################################################################### @@ -85,10 +94,10 @@ x, resid, grad, info = spgl1.spg_bpdn(A, b, sigma, iter_lim=100, verbosity=2) plt.figure() -plt.plot(x, 'b') -plt.plot(x0, 'ro') -plt.legend(('Recovered coefficients', 'Original coefficients')) -plt.title('Basis Pursuit Denoise') +plt.plot(x, "b") +plt.plot(x0, "ro") +plt.legend(("Recovered coefficients", "Original coefficients")) +plt.title("Basis Pursuit Denoise") ############################################################################### # Solve the basis pursuit (BP) problem in complex variables: @@ -102,15 +111,18 @@ def __init__(self, idx, n): self.n = n self.shape = (len(idx), n) self.dtype = np.complex128 + def _matvec(self, x): # % y = P(idx) * FFT(x) z = np.fft.fft(x) / np.sqrt(n) return z[idx] + def _rmatvec(self, x): z = np.zeros(n, dtype=complex) z[idx] = x return np.fft.ifft(z) * np.sqrt(n) + # Create partial Fourier operator with rows idx idx = np.random.permutation(n) idx = idx[0:m] @@ -125,13 +137,14 @@ def _rmatvec(self, x): z, resid, grad, info = spgl1.spg_bp(opA, b, verbosity=2) plt.figure() -plt.plot(z.real, 'b+', markersize=15.0) -plt.plot(z0.real, 'bo') -plt.plot(z.imag, 'r+', markersize=15.0) -plt.plot(z0.imag, 'ro') -plt.legend(('Recovered (real)', 'Original (real)', - 'Recovered (imag)', 'Original (imag)')) -plt.title('Complex Basis Pursuit') +plt.plot(z.real, "b+", markersize=15.0) +plt.plot(z0.real, "bo") +plt.plot(z.imag, "r+", markersize=15.0) +plt.plot(z0.imag, "ro") +plt.legend( + ("Recovered (real)", "Original (real)", "Recovered (imag)", "Original (imag)") +) +plt.title("Complex Basis Pursuit") ############################################################################### # We can also sample the Pareto frontier at 100 points: @@ -150,10 +163,10 @@ def _rmatvec(self, x): phi[i] = np.linalg.norm(r) plt.figure() -plt.plot(tau, phi, '.') -plt.title('Pareto frontier') -plt.xlabel('||x||_1') -plt.ylabel('||Ax-b||_2') +plt.plot(tau, phi, ".") +plt.title("Pareto frontier") +plt.xlabel("||x||_1") +plt.ylabel("||Ax-b||_2") ############################################################################### # We now solve the weighted basis pursuit (BP) problem: @@ -174,7 +187,7 @@ def _rmatvec(self, x): x0[p[0:k]] = np.random.randn(k) # Set up weights w and vector b -w = np.random.rand(n) + 0.1 # Weights +w = np.random.rand(n) + 0.1 # Weights b = A.dot(x0 / w) # Signal # Solve problem @@ -184,10 +197,10 @@ def _rmatvec(self, x): x1 = x * w plt.figure() -plt.plot(x1, 'b') -plt.plot(x0, 'ro') -plt.legend(('Coefficients', 'Original coefficients')) -plt.title('Weighted Basis Pursuit') +plt.plot(x1, "b") +plt.plot(x0, "ro") +plt.legend(("Coefficients", "Original coefficients")) +plt.title("Weighted Basis Pursuit") k = 9 x0 = np.zeros(n) x0[p[0:k]] = np.random.randn(k) @@ -218,7 +231,7 @@ def _rmatvec(self, x): X0[p, :] = np.random.randn(k, l) weights = 3 * np.random.rand(n) + 0.1 -W = 1/weights * np.eye(n) +W = 1 / weights * np.eye(n) B = A.dot(W).dot(X0) # Solve unweighted version @@ -230,15 +243,15 @@ def _rmatvec(self, x): # Plot results plt.figure() -plt.plot(x_uw[:, 0], 'b-', label='Coefficients (1)') -plt.plot(x_w[:, 0], 'b.', label='Coefficients (2)') -plt.plot(X0[:, 0], 'ro', label='Original coefficients') +plt.plot(x_uw[:, 0], "b-", label="Coefficients (1)") +plt.plot(x_w[:, 0], "b.", label="Coefficients (2)") +plt.plot(X0[:, 0], "ro", label="Original coefficients") plt.legend() -plt.title('Weighted Basis Pursuit with Multiple Measurement Vectors') +plt.title("Weighted Basis Pursuit with Multiple Measurement Vectors") plt.figure() -plt.plot(x_uw[:, 1], 'b-', label='Coefficients (1)') -plt.plot(x_w[:, 1], 'b.', label='Coefficients (2)') -plt.plot(X0[:, 1], 'ro', label='Original coefficients') +plt.plot(x_uw[:, 1], "b-", label="Coefficients (1)") +plt.plot(x_w[:, 1], "b.", label="Coefficients (2)") +plt.plot(X0[:, 1], "ro", label="Original coefficients") plt.legend() -plt.title('Weighted Basis Pursuit with Multiple Measurement Vectors') +plt.title("Weighted Basis Pursuit with Multiple Measurement Vectors")