From 1b420668f5100d7f36c748aee833f92081f0bbd6 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Mon, 22 Jan 2024 14:49:06 -0500 Subject: [PATCH 01/23] Minor improvements to torch.py --- src/osqp/nn/torch.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/osqp/nn/torch.py b/src/osqp/nn/torch.py index 3728345d..213ebee2 100644 --- a/src/osqp/nn/torch.py +++ b/src/osqp/nn/torch.py @@ -138,9 +138,7 @@ def forward(ctx, P_val, q_val, A_val, l_val, u_val): assert A_val.size(1) == len(A_idx[0]), 'Unexpected size of A' assert P_val.size(1) == len(P_idx[0]), 'Unexpected size of P' - P = [spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) for i in range(n_batch)] q = [to_numpy(q_val[i]) for i in range(n_batch)] - A = [spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) for i in range(n_batch)] l = [to_numpy(l_val[i]) for i in range(n_batch)] u = [to_numpy(u_val[i]) for i in range(n_batch)] @@ -148,14 +146,16 @@ def forward(ctx, P_val, q_val, A_val, l_val, u_val): x_torch = torch.zeros((n_batch, n), dtype=dtype, device=device) x = [] + solver = osqp.OSQP(algebra=algebra) for i in range(n_batch): # Solve QP # TODO: Cache solver object in between - solver = osqp.OSQP(algebra=algebra) + P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) + A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) solver.setup( - P[i], + P, q[i], - A[i], + A, l[i], u[i], solver_type=solver_type, From 6a10d30782c22335f09ec2924cd69f1e41aab8ee Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Thu, 25 Jan 2024 13:28:27 -0500 Subject: [PATCH 02/23] Added a check to see if solver exists or not --- src/osqp/nn/torch.py | 84 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 71 insertions(+), 13 deletions(-) diff --git a/src/osqp/nn/torch.py b/src/osqp/nn/torch.py index 213ebee2..ca5a4015 100644 --- a/src/osqp/nn/torch.py +++ b/src/osqp/nn/torch.py @@ -112,6 +112,55 @@ def forward(ctx, P_val, q_val, A_val, l_val, u_val): """ + def _get_update_flag(n_batch: int) -> bool: + """ + This is a helper function that returns a flag if we need to update the solvers + or generate them. Raises an RuntimeError if the number of solvers is invalid. + """ + num_solvers = len(solvers) + if num_solvers not in (0, n_batch): + raise RuntimeError(f"Invalid number of solvers: expected 0 or {n_batch}," + f" but got {num_solvers}.") + return num_solvers==n_batch + + def _setup_update_solvers(n_batch: int, **kwargs) -> None: + """ + This is a helper function that setups new solvers if solvers is empty or updates + the list if it exists. Raises an RuntimeError if the number of solvers is invalid. + """ + + + + update_flag = _get_update_flag(solvers, n_batch) + P_val, P_idx = kwargs.get("P_val"), kwargs.get("P_idx") + A_val, A_idx = kwargs.get("A_val"), kwargs.get("A_idx") + P_shape, A_shape = kwargs.get("P_shape"), kwargs.get("A_shape") + q, l, u = kwargs.get("q"), kwargs.get("l"), kwargs.get("u") + + for i in range(n_batch): + # Solve QP + # TODO: Cache solver object in between + P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) + A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) + if update_flag: + solvers[i].update(q=q[i], l=l[i], u=u[i], Px=P, Px_idx=P_idx, + Ax=A, Ax_idx=A_idx) + else: #setup + solver = osqp.OSQP(algebra=algebra) #TODO: When Ian introduces hard copy, generate only once + solver.setup( + P, + q[i], + A, + l[i], + u[i], + solver_type=solver_type, + verbose=verbose, + eps_abs=eps_abs, + eps_rel=eps_rel, + ) + solvers.append(solver) + + params = [P_val, q_val, A_val, l_val, u_val] for p in params: @@ -146,25 +195,34 @@ def forward(ctx, P_val, q_val, A_val, l_val, u_val): x_torch = torch.zeros((n_batch, n), dtype=dtype, device=device) x = [] - solver = osqp.OSQP(algebra=algebra) for i in range(n_batch): # Solve QP # TODO: Cache solver object in between + update_flag = _get_update_flag(solvers, n_batch) P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) - solver.setup( - P, - q[i], - A, - l[i], - u[i], - solver_type=solver_type, - verbose=verbose, - eps_abs=eps_abs, - eps_rel=eps_rel, - ) + if update_flag: + solver = solvers[i] + solver.update(q=q[i], l=l[i], u=u[i], Px=P, Px_idx=P_idx, + Ax=A, Ax_idx=A_idx) + else: + solver = osqp.OSQP(algebra=algebra) #TODO: Deep copy when available + solver.setup( + P, + q[i], + A, + l[i], + u[i], + solver_type=solver_type, + verbose=verbose, + eps_abs=eps_abs, + eps_rel=eps_rel, + ) result = solver.solve() - solvers.append(solver) + if update_flag: + solvers[i] = solver + else: + solvers.append(solver) status = result.info.status if status != 'solved': # TODO: We can replace this with something calmer and From 049caae37ac850e07e15aaa264d5fbca22ed8dd1 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Mon, 5 Feb 2024 15:51:09 -0500 Subject: [PATCH 03/23] Adding devcontainer --- .devcontainer/devcontainer.json | 16 ++++++++++++++++ .devcontainer/startup.sh | 3 +++ 2 files changed, 19 insertions(+) create mode 100644 .devcontainer/devcontainer.json create mode 100644 .devcontainer/startup.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..c0cd1961 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,16 @@ +{ + "name": "Python 3", + "image": "mcr.microsoft.com/devcontainers/python:0-3.10", + "onCreateCommand": "bash .devcontainer/startup.sh", + "customizations": { + "vscode": { + "settings": { + "python.testing.pytestEnabled": true, + "python.testing.unittestEnabled": false, + "python.testing.pytestArgs": [ + "cvxpy" + ] + } + } + } +} \ No newline at end of file diff --git a/.devcontainer/startup.sh b/.devcontainer/startup.sh new file mode 100644 index 00000000..f0961081 --- /dev/null +++ b/.devcontainer/startup.sh @@ -0,0 +1,3 @@ +pip install -e . +pip install pytest pre-commit +pre-commit install \ No newline at end of file From d8a5e77628150c983cdb540674e027fccba869fb Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Wed, 7 Feb 2024 10:09:28 -0500 Subject: [PATCH 04/23] Update solvers if they exist --- src/osqp/nn/torch.py | 48 ++++++-------------------------------------- 1 file changed, 6 insertions(+), 42 deletions(-) diff --git a/src/osqp/nn/torch.py b/src/osqp/nn/torch.py index ca5a4015..24a24ad7 100644 --- a/src/osqp/nn/torch.py +++ b/src/osqp/nn/torch.py @@ -122,44 +122,7 @@ def _get_update_flag(n_batch: int) -> bool: raise RuntimeError(f"Invalid number of solvers: expected 0 or {n_batch}," f" but got {num_solvers}.") return num_solvers==n_batch - - def _setup_update_solvers(n_batch: int, **kwargs) -> None: - """ - This is a helper function that setups new solvers if solvers is empty or updates - the list if it exists. Raises an RuntimeError if the number of solvers is invalid. - """ - - - - update_flag = _get_update_flag(solvers, n_batch) - P_val, P_idx = kwargs.get("P_val"), kwargs.get("P_idx") - A_val, A_idx = kwargs.get("A_val"), kwargs.get("A_idx") - P_shape, A_shape = kwargs.get("P_shape"), kwargs.get("A_shape") - q, l, u = kwargs.get("q"), kwargs.get("l"), kwargs.get("u") - - for i in range(n_batch): - # Solve QP - # TODO: Cache solver object in between - P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) - A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) - if update_flag: - solvers[i].update(q=q[i], l=l[i], u=u[i], Px=P, Px_idx=P_idx, - Ax=A, Ax_idx=A_idx) - else: #setup - solver = osqp.OSQP(algebra=algebra) #TODO: When Ian introduces hard copy, generate only once - solver.setup( - P, - q[i], - A, - l[i], - u[i], - solver_type=solver_type, - verbose=verbose, - eps_abs=eps_abs, - eps_rel=eps_rel, - ) - solvers.append(solver) - + params = [P_val, q_val, A_val, l_val, u_val] @@ -199,13 +162,14 @@ def _setup_update_solvers(n_batch: int, **kwargs) -> None: # Solve QP # TODO: Cache solver object in between update_flag = _get_update_flag(solvers, n_batch) - P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) - A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) + # P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) if update_flag: solver = solvers[i] - solver.update(q=q[i], l=l[i], u=u[i], Px=P, Px_idx=P_idx, - Ax=A, Ax_idx=A_idx) + solver.update(q=q[i], l=l[i], u=u[i], Px=to_numpy(P_val[i]), Px_idx=P_idx, + Ax=to_numpy(A_val[i]), Ax_idx=A_idx) else: + P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) + A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) solver = osqp.OSQP(algebra=algebra) #TODO: Deep copy when available solver.setup( P, From 91add60f72378eb7725b0c026207742a4b58b355 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Tue, 20 Feb 2024 11:12:03 -0500 Subject: [PATCH 05/23] Parallelized the forward and backward pass in _OSQP_FnFn. Set scipy version to 1.11.4. --- pyproject.toml | 2 +- src/osqp/nn/torch.py | 111 +++++++++++++++++++++++++++---------------- 2 files changed, 70 insertions(+), 43 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 774a0d30..3e236ec6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=64", "wheel", "oldest-supported-numpy", "scipy>=0.13.2", "setuptools_scm>=6.2", "cmake>=3.18"] +requires = ["setuptools>=64", "wheel", "oldest-supported-numpy", "scipy==1.11.4", "setuptools_scm>=6.2", "cmake>=3.18"] build-backend = "setuptools.build_meta" [tool.setuptools_scm] diff --git a/src/osqp/nn/torch.py b/src/osqp/nn/torch.py index 24a24ad7..734f4916 100644 --- a/src/osqp/nn/torch.py +++ b/src/osqp/nn/torch.py @@ -3,6 +3,8 @@ import torch from torch.nn import Module from torch.autograd import Function +from joblib import Parallel, delayed +import multiprocessing import osqp @@ -122,6 +124,43 @@ def _get_update_flag(n_batch: int) -> bool: raise RuntimeError(f"Invalid number of solvers: expected 0 or {n_batch}," f" but got {num_solvers}.") return num_solvers==n_batch + + def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_type, + eps_abs, eps_rel): + """ + This inner function solves for each solver. update_flag has to be passed from + outside to make sure it doesn't change during a parallel run. + """ + # Solve QP + # TODO: Cache solver object in between + # P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) + if update_flag: + solver = solvers[i] + solver.update(q=q[i], l=l[i], u=u[i], Px=to_numpy(P_val[i]), Px_idx=P_idx, + Ax=to_numpy(A_val[i]), Ax_idx=A_idx) + else: + P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) + A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) + solver = osqp.OSQP(algebra=algebra) #TODO: Deep copy when available + solver.setup( + P, + q[i], + A, + l[i], + u[i], + solver_type=solver_type, + verbose=verbose, + eps_abs=eps_abs, + eps_rel=eps_rel, + ) + result = solver.solve() + status = result.info.status + if status != 'solved': + # TODO: We can replace this with something calmer and + # add some more options around potentially ignoring this. + raise RuntimeError(f'Unable to solve QP, status: {status}') + + return solver, result.x params = [P_val, q_val, A_val, l_val, u_val] @@ -157,46 +196,19 @@ def _get_update_flag(n_batch: int) -> bool: # Perform forward step solving the QPs x_torch = torch.zeros((n_batch, n), dtype=dtype, device=device) - x = [] + update_flag = _get_update_flag(n_batch) + n_jobs = multiprocessing.cpu_count() + res = Parallel(n_jobs=n_jobs, prefer="threads")(delayed(_inner_solve)(i=i, + update_flag=update_flag, q=q, l=l, u=u, P_val=P_val, P_idx=P_idx, + A_val=A_val, A_idx=A_idx, solver_type=solver_type, eps_abs=eps_abs, + eps_rel=eps_rel) for i in range(n_batch)) + solvers_loop, x = zip(*res) for i in range(n_batch): - # Solve QP - # TODO: Cache solver object in between - update_flag = _get_update_flag(solvers, n_batch) - # P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) - if update_flag: - solver = solvers[i] - solver.update(q=q[i], l=l[i], u=u[i], Px=to_numpy(P_val[i]), Px_idx=P_idx, - Ax=to_numpy(A_val[i]), Ax_idx=A_idx) - else: - P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) - A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) - solver = osqp.OSQP(algebra=algebra) #TODO: Deep copy when available - solver.setup( - P, - q[i], - A, - l[i], - u[i], - solver_type=solver_type, - verbose=verbose, - eps_abs=eps_abs, - eps_rel=eps_rel, - ) - result = solver.solve() if update_flag: - solvers[i] = solver + solvers[i] = solvers_loop[i] else: - solvers.append(solver) - status = result.info.status - if status != 'solved': - # TODO: We can replace this with something calmer and - # add some more options around potentially ignoring this. - raise RuntimeError(f'Unable to solve QP, status: {status}') - x.append(result.x) - - # This is silently converting result.x to the same - # dtype and device as x_torch. - x_torch[i] = torch.from_numpy(result.x) + solvers.append(solvers_loop[i]) + x_torch[i] = torch.from_numpy(x[i]) # Return solutions if not batch_mode: @@ -206,6 +218,18 @@ def _get_update_flag(n_batch: int) -> bool: @staticmethod def backward(ctx, dl_dx_val): + def _loop_adjoint_derivative(solver, dl_dx): + """ + This inner function calculates dp[i] dl[i], du[i], dP[i], dA[i] + using solvers[i], dl_dx[i]. + """ + solver.adjoint_derivative_compute(dx=dl_dx) + dPi_np, dAi_np = solver.adjoint_derivative_get_mat(as_dense=False, dP_as_triu=False) + dqi_np, dli_np, dui_np = solver.adjoint_derivative_get_vec() + dq, dl, du = [torch.from_numpy(d) for d in [dqi_np, dli_np, dui_np]] + dP, dA = [torch.from_numpy(d.x) for d in [dPi_np, dAi_np]] + return dq, dl, du, dP, dA + dtype = dl_dx_val.dtype device = dl_dx_val.device @@ -230,12 +254,15 @@ def backward(ctx, dl_dx_val): dl = torch.zeros((n_batch, m), dtype=dtype, device=device) du = torch.zeros((n_batch, m), dtype=dtype, device=device) + n_jobs = multiprocessing.cpu_count() + res = Parallel(n_jobs=n_jobs, prefer="threads")(delayed(_loop_adjoint_derivative)(solvers[i], dl_dx[i]) for i in range(n_batch)) + dq_vec, dl_vec, du_vec, dP_vec, dA_vec = zip(*res) for i in range(n_batch): - solvers[i].adjoint_derivative_compute(dx=dl_dx[i]) - dPi_np, dAi_np = solvers[i].adjoint_derivative_get_mat(as_dense=False, dP_as_triu=False) - dqi_np, dli_np, dui_np = solvers[i].adjoint_derivative_get_vec() - dq[i], dl[i], du[i] = [torch.from_numpy(d) for d in [dqi_np, dli_np, dui_np]] - dP[i], dA[i] = [torch.from_numpy(d.x) for d in [dPi_np, dAi_np]] + dq[i] = dq_vec[i] + dl[i] = dl_vec[i] + du[i] = du_vec[i] + dP[i] = dP_vec[i] + dA[i] = dA_vec[i] grads = [dP, dq, dA, dl, du] From 2ad54ff26b89a14227d3a89f1513c4b591a4343c Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Tue, 20 Feb 2024 11:33:43 -0500 Subject: [PATCH 06/23] Scipy 1.11.4 requirement moved from pyproject.toml to setup.py --- pyproject.toml | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3e236ec6..774a0d30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=64", "wheel", "oldest-supported-numpy", "scipy==1.11.4", "setuptools_scm>=6.2", "cmake>=3.18"] +requires = ["setuptools>=64", "wheel", "oldest-supported-numpy", "scipy>=0.13.2", "setuptools_scm>=6.2", "cmake>=3.18"] build-backend = "setuptools.build_meta" [tool.setuptools_scm] diff --git a/setup.py b/setup.py index a5905c74..72b68bb9 100644 --- a/setup.py +++ b/setup.py @@ -166,7 +166,7 @@ def build_extension(self, ext): package_data={'osqp.codegen.pywrapper': ['*.jinja']}, include_package_data=True, zip_safe=False, - install_requires=['numpy>=1.7', 'scipy>=0.13.2', 'qdldl', 'jinja2'], + install_requires=['numpy>=1.7', 'scipy==1.11.4', 'qdldl', 'jinja2'], python_requires='>=3.7', extras_require=extras_require, license='Apache 2.0', From 206d88d7b0d155fd1951bb65a9e1c7c8ec9db32a Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Tue, 20 Feb 2024 11:45:06 -0500 Subject: [PATCH 07/23] Changed the scipy version to 1.10.1 to supoprt Python 3.8. Added joblib as an extra requirement. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 72b68bb9..13bddf50 100644 --- a/setup.py +++ b/setup.py @@ -166,7 +166,7 @@ def build_extension(self, ext): package_data={'osqp.codegen.pywrapper': ['*.jinja']}, include_package_data=True, zip_safe=False, - install_requires=['numpy>=1.7', 'scipy==1.11.4', 'qdldl', 'jinja2'], + install_requires=['numpy>=1.7', 'scipy==1.10.1', 'qdldl', 'jinja2', 'joblib'], python_requires='>=3.7', extras_require=extras_require, license='Apache 2.0', From b9b2991d88b25bb481e5ebb41fd9fa17b369c1ff Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Tue, 20 Feb 2024 11:59:00 -0500 Subject: [PATCH 08/23] joblib moved from "install_requires" to "extras_require". --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 13bddf50..eb3ca834 100644 --- a/setup.py +++ b/setup.py @@ -139,7 +139,7 @@ def build_extension(self, ext): super().build_extension(ext) -extras_require = {'dev': ['pytest>=6', 'torch', 'numdifftools', 'pre-commit']} +extras_require = {'dev': ['pytest>=6', 'torch', 'numdifftools', 'pre-commit', 'joblib']} algebra = os.environ.get('OSQP_ALGEBRA_BACKEND', 'builtin') assert algebra in ('builtin', 'mkl', 'cuda'), f'Unknown algebra {algebra}' @@ -166,7 +166,7 @@ def build_extension(self, ext): package_data={'osqp.codegen.pywrapper': ['*.jinja']}, include_package_data=True, zip_safe=False, - install_requires=['numpy>=1.7', 'scipy==1.10.1', 'qdldl', 'jinja2', 'joblib'], + install_requires=['numpy>=1.7', 'scipy==1.10.1', 'qdldl', 'jinja2'], python_requires='>=3.7', extras_require=extras_require, license='Apache 2.0', From 131398743fa9523b9dd7c321e9c3454aedeab0fb Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Tue, 20 Feb 2024 14:37:58 -0500 Subject: [PATCH 09/23] Changed scipy version to <1.12 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index eb3ca834..31e89d0d 100644 --- a/setup.py +++ b/setup.py @@ -166,7 +166,7 @@ def build_extension(self, ext): package_data={'osqp.codegen.pywrapper': ['*.jinja']}, include_package_data=True, zip_safe=False, - install_requires=['numpy>=1.7', 'scipy==1.10.1', 'qdldl', 'jinja2'], + install_requires=['numpy>=1.7', 'scipy<1.12', 'qdldl', 'jinja2'], python_requires='>=3.7', extras_require=extras_require, license='Apache 2.0', From 274cedb3e612b00d9aabf1942693162736982593 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Tue, 20 Feb 2024 15:33:05 -0500 Subject: [PATCH 10/23] Added joblib to CIBW_TEST_REQUIRES --- .github/workflows/build_aarch64.yml | 2 +- .github/workflows/build_cuda.yml | 2 +- .github/workflows/build_default.yml | 2 +- .github/workflows/build_mkl.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_aarch64.yml b/.github/workflows/build_aarch64.yml index 990da1ab..44c2056f 100644 --- a/.github/workflows/build_aarch64.yml +++ b/.github/workflows/build_aarch64.yml @@ -41,7 +41,7 @@ jobs: CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" CIBW_ARCHS_LINUX: aarch64 CIBW_BEFORE_ALL: "yum -y update && yum install -y blas-devel lapack-devel" - CIBW_TEST_REQUIRES: "pytest torch numdifftools" + CIBW_TEST_REQUIRES: "pytest torch numdifftools joblib" CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests" CIBW_ENVIRONMENT_LINUX: CMAKE_GENERATOR="Unix Makefiles" diff --git a/.github/workflows/build_cuda.yml b/.github/workflows/build_cuda.yml index aca2a6db..e34326a8 100644 --- a/.github/workflows/build_cuda.yml +++ b/.github/workflows/build_cuda.yml @@ -44,7 +44,7 @@ jobs: env: CIBW_BUILD: cp38-* cp39-* cp310-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_* *-macosx_*" - CIBW_TEST_REQUIRES: pytest torch numdifftools + CIBW_TEST_REQUIRES: pytest torch numdifftools joblib CIBW_ENVIRONMENT_LINUX: OSQP_ALGEBRA_BACKEND=cuda CMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc CIBW_BEFORE_ALL_LINUX: bash .github/workflows/prepare_build_environment_linux_cuda.sh diff --git a/.github/workflows/build_default.yml b/.github/workflows/build_default.yml index 1a83529c..25bfc691 100644 --- a/.github/workflows/build_default.yml +++ b/.github/workflows/build_default.yml @@ -44,7 +44,7 @@ jobs: env: CIBW_BUILD: cp38-* cp39-* cp310-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" - CIBW_TEST_REQUIRES: pytest torch numdifftools + CIBW_TEST_REQUIRES: pytest torch numdifftools joblib CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests" - name: Build source diff --git a/.github/workflows/build_mkl.yml b/.github/workflows/build_mkl.yml index 2c8a304b..7e8f4c9d 100644 --- a/.github/workflows/build_mkl.yml +++ b/.github/workflows/build_mkl.yml @@ -45,7 +45,7 @@ jobs: env: CIBW_BUILD: cp38-* cp39-* cp310-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" - CIBW_TEST_REQUIRES: pytest torch numdifftools mkl mkl-devel + CIBW_TEST_REQUIRES: pytest torch numdifftools joblib mkl mkl-devel CIBW_BEFORE_ALL_LINUX: bash .github/workflows/prepare_build_environment_linux_mkl.sh CIBW_ENVIRONMENT_LINUX: "OSQP_ALGEBRA_BACKEND=mkl MKL_ROOT=/opt/intel/oneapi/mkl/latest" From 16deec0376d69135a4dcef9aaec515a5fd16b85c Mon Sep 17 00:00:00 2001 From: Vineet Bansal Date: Wed, 6 Mar 2024 14:36:06 -0500 Subject: [PATCH 11/23] removing py3.8 to debug build error --- .github/workflows/build_default.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_default.yml b/.github/workflows/build_default.yml index 25bfc691..f10bfb7d 100644 --- a/.github/workflows/build_default.yml +++ b/.github/workflows/build_default.yml @@ -42,7 +42,7 @@ jobs: with: output-dir: wheelhouse env: - CIBW_BUILD: cp38-* cp39-* cp310-* + CIBW_BUILD: cp39-* cp310-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" CIBW_TEST_REQUIRES: pytest torch numdifftools joblib CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests" From 5bc4646fc6c91f04375203b16916d2cf3859777c Mon Sep 17 00:00:00 2001 From: Vineet Bansal Date: Wed, 6 Mar 2024 14:51:03 -0500 Subject: [PATCH 12/23] not pinning host python version; cp38 back in --- .github/workflows/build_default.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_default.yml b/.github/workflows/build_default.yml index f10bfb7d..ef1fe298 100644 --- a/.github/workflows/build_default.yml +++ b/.github/workflows/build_default.yml @@ -32,17 +32,15 @@ jobs: run: | choco install windows-sdk-8.1 - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v5 name: Install Python - with: - python-version: '3.9' - name: Build wheels uses: pypa/cibuildwheel@v2.3.1 with: output-dir: wheelhouse env: - CIBW_BUILD: cp39-* cp310-* + CIBW_BUILD: cp38-* cp39-* cp310-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" CIBW_TEST_REQUIRES: pytest torch numdifftools joblib CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests" From 2d04f43350dbe42902db3584088dbc7178bb75fa Mon Sep 17 00:00:00 2001 From: Vineet Bansal Date: Wed, 6 Mar 2024 15:05:04 -0500 Subject: [PATCH 13/23] using latest cibuildwheel marketplace action --- .github/workflows/build_default.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_default.yml b/.github/workflows/build_default.yml index ef1fe298..60722431 100644 --- a/.github/workflows/build_default.yml +++ b/.github/workflows/build_default.yml @@ -36,7 +36,7 @@ jobs: name: Install Python - name: Build wheels - uses: pypa/cibuildwheel@v2.3.1 + uses: pypa/cibuildwheel@v2.16.5 with: output-dir: wheelhouse env: From e37bd6c16aee69147c98fcd1929d4c7a3e7d1b41 Mon Sep 17 00:00:00 2001 From: Vineet Bansal Date: Wed, 6 Mar 2024 16:20:12 -0500 Subject: [PATCH 14/23] is py38 the culprit here? --- .github/workflows/build_default.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_default.yml b/.github/workflows/build_default.yml index 60722431..54d5e7f6 100644 --- a/.github/workflows/build_default.yml +++ b/.github/workflows/build_default.yml @@ -40,7 +40,7 @@ jobs: with: output-dir: wheelhouse env: - CIBW_BUILD: cp38-* cp39-* cp310-* + CIBW_BUILD: cp39-* cp310-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" CIBW_TEST_REQUIRES: pytest torch numdifftools joblib CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests" From 61d3b77565f86200965274c0e9001bf65893f3c7 Mon Sep 17 00:00:00 2001 From: Vineet Bansal Date: Wed, 6 Mar 2024 16:42:19 -0500 Subject: [PATCH 15/23] fewer build deps --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 774a0d30..19ac77e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=64", "wheel", "oldest-supported-numpy", "scipy>=0.13.2", "setuptools_scm>=6.2", "cmake>=3.18"] +requires = ["setuptools>=64", "setuptools_scm>=6.2", "cmake>=3.18"] build-backend = "setuptools.build_meta" [tool.setuptools_scm] From 3588b6d8f7ab125512a4f9a2db08a228f63f42d7 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Mon, 11 Mar 2024 12:50:21 -0400 Subject: [PATCH 16/23] Merge changes from main --- .github/workflows/build_cuda.yml | 39 ++---- .github/workflows/build_default.yml | 32 ++--- .github/workflows/build_mkl.yml | 53 ++------- pyproject.toml | 56 ++++++++- setup.py | 177 ---------------------------- 5 files changed, 81 insertions(+), 276 deletions(-) delete mode 100644 setup.py diff --git a/.github/workflows/build_cuda.yml b/.github/workflows/build_cuda.yml index e34326a8..3139de9b 100644 --- a/.github/workflows/build_cuda.yml +++ b/.github/workflows/build_cuda.yml @@ -19,56 +19,37 @@ jobs: steps: - uses: actions/checkout@master - with: - submodules: 'recursive' - name: Add msbuild to PATH uses: microsoft/setup-msbuild@v1.0.2 - if: startsWith(matrix.os, 'windows') + if: runner.os == 'Windows' - name: Add Windows SDK shell: cmd - if: startsWith(matrix.os, 'windows') + if: runner.os == 'Windows' run: | choco install windows-sdk-8.1 - - uses: actions/setup-python@v2 - name: Install Python - with: - python-version: '3.9' - - - name: Build CUDA wheels - uses: pypa/cibuildwheel@v2.3.1 + - name: Build wheels + uses: pypa/cibuildwheel@v2.16 with: + package-dir: backend/cuda output-dir: wheelhouse env: - CIBW_BUILD: cp38-* cp39-* cp310-* + CIBW_BUILD: cp38-* cp39-* cp310-* cp311-* cp312-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_* *-macosx_*" - CIBW_TEST_REQUIRES: pytest torch numdifftools joblib + CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools joblib - CIBW_ENVIRONMENT_LINUX: OSQP_ALGEBRA_BACKEND=cuda CMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc + CIBW_ENVIRONMENT_LINUX: CMAKE_CUDA_COMPILER=/usr/local/cuda-11.7/bin/nvcc CIBW_BEFORE_ALL_LINUX: bash .github/workflows/prepare_build_environment_linux_cuda.sh CIBW_REPAIR_WHEEL_COMMAND_LINUX: "" - CIBW_TEST_COMMAND_LINUX: "python -c \"from osqp import algebra_available; assert(algebra_available('cuda'))\"" - CIBW_ENVIRONMENT_WINDOWS: OSQP_ALGEBRA_BACKEND=cuda CMAKE_CUDA_COMPILER="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/bin/nvcc.exe" CUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6" + CIBW_ENVIRONMENT_WINDOWS: CMAKE_CUDA_COMPILER="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/bin/nvcc.exe" CUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6" CMAKE_GENERATOR_TOOLSET="cuda=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6" CIBW_BEFORE_ALL_WINDOWS: bash .github/workflows/prepare_build_environment_windows_cuda.sh CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: "" - CIBW_TEST_COMMAND_WINDOWS_TODO: "python -c \"from osqp import algebra_available; assert(algebra_available('cuda'))\"" - - - name: Release to pypi - # if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') - if: ${{ false }} - env: - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} - TWINE_REPOSITORY: testpypi - run: | - python -m pip install --upgrade twine - twine upload wheelhouse/* - name: Upload artifacts to github uses: actions/upload-artifact@v1 with: name: wheels - path: ./wheelhouse + path: ./wheelhouse \ No newline at end of file diff --git a/.github/workflows/build_default.yml b/.github/workflows/build_default.yml index 54d5e7f6..dba3b2e8 100644 --- a/.github/workflows/build_default.yml +++ b/.github/workflows/build_default.yml @@ -15,45 +15,29 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-2022] + os: [ubuntu-latest, macos-latest, windows-2022] steps: - uses: actions/checkout@master - with: - submodules: 'recursive' - - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@v1.0.2 - if: startsWith(matrix.os, 'windows') - - - name: Add Windows SDK - shell: cmd - if: startsWith(matrix.os, 'windows') - run: | - choco install windows-sdk-8.1 - - - uses: actions/setup-python@v5 - name: Install Python - name: Build wheels - uses: pypa/cibuildwheel@v2.16.5 + uses: pypa/cibuildwheel@v2.16 with: output-dir: wheelhouse env: - CIBW_BUILD: cp39-* cp310-* + CIBW_BUILD: cp38-* cp39-* cp310-* cp311-* cp312-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" - CIBW_TEST_REQUIRES: pytest torch numdifftools joblib - CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests" + CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools joblib + CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests -k \"not codegen\"" - name: Build source - if: startsWith(matrix.os, 'ubuntu') + if: runner.os == 'Linux' run: | python -m pip install --upgrade build python -m build --sdist --outdir wheelhouse - name: Release to pypi - # if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') - if: ${{ false }} + if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') env: TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} @@ -66,4 +50,4 @@ jobs: uses: actions/upload-artifact@v1 with: name: wheels - path: ./wheelhouse + path: ./wheelhouse \ No newline at end of file diff --git a/.github/workflows/build_mkl.yml b/.github/workflows/build_mkl.yml index 7e8f4c9d..a06bd6e7 100644 --- a/.github/workflows/build_mkl.yml +++ b/.github/workflows/build_mkl.yml @@ -19,62 +19,31 @@ jobs: steps: - uses: actions/checkout@master - with: - submodules: 'recursive' - - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@v1.0.2 - if: startsWith(matrix.os, 'windows') - - - name: Add Windows SDK - shell: cmd - if: startsWith(matrix.os, 'windows') - run: | - choco install windows-sdk-8.1 - - - uses: actions/setup-python@v2 - name: Install Python - with: - python-version: '3.9' - - - name: Install cibuildwheel - run: python -m pip install cibuildwheel==2.12.3 - name: Build wheels - run: python -m cibuildwheel --output-dir wheelhouse + uses: pypa/cibuildwheel@v2.16 + with: + package-dir: backend/mkl + output-dir: wheelhouse env: - CIBW_BUILD: cp38-* cp39-* cp310-* + CIBW_BUILD: cp38-* cp39-* cp310-* cp311-* cp312-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" - CIBW_TEST_REQUIRES: pytest torch numdifftools joblib mkl mkl-devel + CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools joblib mkl mkl-devel CIBW_BEFORE_ALL_LINUX: bash .github/workflows/prepare_build_environment_linux_mkl.sh - CIBW_ENVIRONMENT_LINUX: "OSQP_ALGEBRA_BACKEND=mkl MKL_ROOT=/opt/intel/oneapi/mkl/latest" - CIBW_TEST_COMMAND_LINUX: "LD_PRELOAD=$(dirname `which python`)/../lib/libmkl_core.so.2:$(dirname `which python`)/../lib/libmkl_sequential.so.2:$(dirname `which python`)/../lib/libmkl_intel_thread.so.2:$(dirname `which python`)/../lib/libmkl_intel_lp64.so.2 pytest -s {project}/src/osqp/tests" + CIBW_ENVIRONMENT_LINUX: "MKL_ROOT=/opt/intel/oneapi/mkl/latest" + CIBW_REPAIR_WHEEL_COMMAND_LINUX: "" CIBW_BEFORE_ALL_MACOS: bash .github/workflows/prepare_build_environment_macos_mkl.sh - CIBW_ENVIRONMENT_MACOS: "OSQP_ALGEBRA_BACKEND=mkl MKL_ROOT=/opt/intel/oneapi/mkl/latest" + CIBW_ENVIRONMENT_MACOS: "MKL_ROOT=/opt/intel/oneapi/mkl/latest" CIBW_REPAIR_WHEEL_COMMAND_MACOS: "" - CIBW_TEST_COMMAND_MACOS: "pytest -s {project}/src/osqp/tests" CIBW_BEFORE_ALL_WINDOWS: bash .github/workflows/prepare_build_environment_windows_mkl.sh - CIBW_ENVIRONMENT_WINDOWS: OSQP_ALGEBRA_BACKEND=mkl MKL_ROOT="C:/Program Files (x86)/Intel/oneAPI/mkl/latest" MKL_DIR="C:/Program Files (x86)/Intel/oneAPI/mkl/latest/lib/cmake/mkl" + CIBW_ENVIRONMENT_WINDOWS: MKL_ROOT="C:/Program Files (x86)/Intel/oneAPI/mkl/latest" MKL_DIR="C:/Program Files (x86)/Intel/oneAPI/mkl/latest/lib/cmake/mkl" CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: "delvewheel repair {wheel} --wheel-dir {dest_dir} --no-mangle-all --add-path \"C:/Program Files (x86)/Intel/oneAPI/mkl/latest/redist/intel64\" --add-dll \"mkl_sequential.2.dll;mkl_def.2.dll;mkl_intel_thread.2.dll\"" - CIBW_TEST_COMMAND_WINDOWS: "python -c \"from osqp import algebra_available; assert(algebra_available('mkl'))\"" - CIBW_TEST_COMMAND_WINDOWS_TODO: "pytest -s {project}/src/osqp/tests" - - - name: Release to pypi - # if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') - if: ${{ false }} - env: - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} - TWINE_REPOSITORY: testpypi - run: | - python -m pip install --upgrade twine - twine upload wheelhouse/* - name: Upload artifacts to github uses: actions/upload-artifact@v1 with: name: wheels - path: ./wheelhouse + path: ./wheelhouse \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 19ac77e5..9051ec93 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,57 @@ [build-system] -requires = ["setuptools>=64", "setuptools_scm>=6.2", "cmake>=3.18"] -build-backend = "setuptools.build_meta" +requires = ["scikit-build-core", "pybind11"] +build-backend = "scikit_build_core.build" -[tool.setuptools_scm] -write_to = "src/osqp/_version.py" +[project] +name = "osqp" +dynamic = ["version"] +description = "OSQP: The Operator Splitting QP Solver" +readme = "README.rst" +requires-python = ">=3.7" +authors = [ + { name = "Bartolomeo Stellato", email = "bartolomeo.stellato@gmail.com" }, + { name = "Goran Banjac" }, +] +dependencies = [ + "jinja2", + "numpy>=1.7", + "qdldl", + "scipy>=0.13.2,<1.12.0", + "setuptools", +] + +[project.optional-dependencies] +cuda = [ + "osqp-cuda", +] +dev = [ + "numdifftools", + "pre-commit", + "pytest>=6", + "torch", +] +mkl = [ + "osqp-mkl", +] + +[project.urls] +Homepage = "https://osqp.org/" + + +[tool.scikit-build] +install.components = ["python", "codegen"] +metadata.version.provider = "scikit_build_core.metadata.setuptools_scm" +minimum-version = "0.8" +wheel.install-dir = "osqp" +sdist.include = ["src/osqp/_version.py"] + + +[tool.scikit-build.cmake.define] +OSQP_ALGEBRA_BACKEND = "builtin" +OSQP_EXT_MODULE_NAME = "ext_builtin" [tool.pytest.ini_options] testpaths = ["src/osqp/tests"] + +[tool.setuptools_scm] +write_to = "src/osqp/_version.py" \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index 31e89d0d..00000000 --- a/setup.py +++ /dev/null @@ -1,177 +0,0 @@ -import os -import shutil -import sys -from glob import glob -from platform import system -from subprocess import check_call - -from distutils.sysconfig import get_python_inc -from setuptools import setup, find_namespace_packages, Extension -from setuptools.command.build_ext import build_ext -from setuptools.command.build_py import build_py - - -class CMakeExtension(Extension): - def __init__(self, name, sourcedir='', cmake_args=None): - Extension.__init__(self, name, sources=[]) - self.sourcedir = os.path.abspath(sourcedir) - self.cmake_args = cmake_args - - -class CustomBuildPy(build_py): - def run(self): - # Build all extensions first so that we generate codegen files in the build folder - # Note that each command like 'build_ext', is run once by setuptools, even if invoked multiple times. - self.run_command('build_ext') - - codegen_build_dir = None - for data_file in self.data_files: - package, src_dir, build_dir, filename = data_file - if package == 'osqp.codegen': - codegen_build_dir = build_dir - - if codegen_build_dir is not None: - for ext in self.distribution.ext_modules: - if hasattr(ext, 'codegen_dir'): - src_dirs = [] - build_dirs = [] - filenames = [] - for filepath in glob( - os.path.join(ext.codegen_dir, 'codegen_src/**'), - recursive=True, - ): - if os.path.isfile(filepath): - dirname = os.path.dirname(filepath) - dirpath = os.path.relpath(dirname, ext.codegen_dir) - src_dirs.append(os.path.join(ext.codegen_dir, dirpath)) - build_dirs.append(os.path.join(codegen_build_dir, dirpath)) - filenames.append(os.path.basename(filepath)) - - if filenames: - for src_dir, build_dir, filename in zip(src_dirs, build_dirs, filenames): - self.data_files.append( - ( - 'osqp.codegen', - src_dir, - build_dir, - [filename], - ) - ) - - super().run() - - -class CmdCMakeBuild(build_ext): - def run(self): - super().run() - # For editable installs, after the extension(s) have been built, copy the 'codegen_src' folder - # from the temporary build folder to the source folder - if self.editable_mode: - codegen_src_folder = os.path.join(self.build_temp, 'codegen_src') - codegen_target_folder = os.path.join('src', 'osqp', 'codegen', 'codegen_src') - if os.path.exists(codegen_src_folder): - if os.path.exists(codegen_target_folder): - shutil.rmtree(codegen_target_folder) - shutil.copytree(codegen_src_folder, codegen_target_folder) - - def build_extension(self, ext): - extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) - thisdir = os.path.dirname(os.path.abspath(__file__)) - cmake_args = [ - '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, - '-DPYTHON=ON', - '-DPYTHON_EXECUTABLE=' + sys.executable, - f'-DPYTHON_INCLUDE_DIRS={get_python_inc()}', - '-DOSQP_BUILD_UNITTESTS=OFF', - '-DOSQP_USE_LONG=OFF', # https://github.com/numpy/numpy/issues/5906 - # https://github.com/ContinuumIO/anaconda-issues/issues/3823 - f'-DOSQP_CUSTOM_PRINTING={thisdir}/cmake/printing.h', - f'-DOSQP_CUSTOM_MEMORY={thisdir}/cmake/memory.h', - ] - - cfg = 'Debug' if self.debug else 'Release' - build_args = ['--config', cfg] - - if system() == 'Windows': - cmake_args += ['-G', 'Visual Studio 17 2022'] - # Finding the CUDA Toolkit on Windows seems to work reliably only if BOTH - # CMAKE_GENERATOR_TOOLSET (-T) and CUDA_TOOLKIT_ROOT_DIR are supplied to cmake - if 'CUDA_TOOLKIT_ROOT_DIR' in os.environ: - cuda_root = os.environ['CUDA_TOOLKIT_ROOT_DIR'] - cmake_args += ['-T', f'cuda={cuda_root}'] - cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)] - if sys.maxsize > 2**32: - cmake_args += ['-A', 'x64'] - build_args += ['--', '/m'] - else: - cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] - build_args += ['--', '-j2'] - - if os.path.exists(self.build_temp): - shutil.rmtree(self.build_temp) - os.makedirs(self.build_temp) - - # Save the build folder as a custom attribute in the extension object, - # as we'll need it to package the codegen files as package_data later. - ext.codegen_dir = self.build_temp - - _ext_name = ext.name.split('.')[-1] - cmake_args.extend([f'-DOSQP_EXT_MODULE_NAME={_ext_name}']) - - # What variables from the environment do we wish to pass on to cmake as variables? - cmake_env_vars = ( - 'CMAKE_CUDA_COMPILER', - 'CUDA_TOOLKIT_ROOT_DIR', - 'MKL_DIR', - 'MKL_ROOT', - ) - for cmake_env_var in cmake_env_vars: - cmake_var = os.environ.get(cmake_env_var) - if cmake_var: - cmake_args.extend([f'-D{cmake_env_var}={cmake_var}']) - - if ext.cmake_args is not None: - cmake_args.extend(ext.cmake_args) - - check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp) - check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp) - - super().build_extension(ext) - - -extras_require = {'dev': ['pytest>=6', 'torch', 'numdifftools', 'pre-commit', 'joblib']} - -algebra = os.environ.get('OSQP_ALGEBRA_BACKEND', 'builtin') -assert algebra in ('builtin', 'mkl', 'cuda'), f'Unknown algebra {algebra}' -if algebra == 'builtin': - package_name = 'osqp' - ext_modules = [CMakeExtension('osqp.ext_builtin', cmake_args=['-DOSQP_ALGEBRA_BACKEND=builtin'])] - extras_require['mkl'] = ['osqp-mkl'] - extras_require['cuda'] = ['osqp-cuda'] -else: - package_name = f'osqp_{algebra}' - ext_modules = [CMakeExtension(f'osqp_{algebra}', cmake_args=[f'-DOSQP_ALGEBRA_BACKEND={algebra}'])] - - -setup( - name=package_name, - author='Bartolomeo Stellato, Goran Banjac', - author_email='bartolomeo.stellato@gmail.com', - description='OSQP: The Operator Splitting QP Solver', - long_description=open('README.rst').read(), - package_dir={'': 'src'}, - # package_data for 'osqp.codegen' is populated by CustomBuildPy to include codegen_src files - # after building extensions, so it should not be included here. - # It is however ok to specify package_data for submodules of 'osqp.codegen'. - package_data={'osqp.codegen.pywrapper': ['*.jinja']}, - include_package_data=True, - zip_safe=False, - install_requires=['numpy>=1.7', 'scipy<1.12', 'qdldl', 'jinja2'], - python_requires='>=3.7', - extras_require=extras_require, - license='Apache 2.0', - url='https://osqp.org/', - cmdclass={'build_ext': CmdCMakeBuild, 'build_py': CustomBuildPy}, - packages=find_namespace_packages(where='src'), - ext_modules=ext_modules, -) From 62a4b4094c7ede5dea8b96aed0283648f52d46db Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Mon, 11 Mar 2024 14:13:35 -0400 Subject: [PATCH 17/23] Minor linter changes --- src/osqp/nn/torch.py | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/src/osqp/nn/torch.py b/src/osqp/nn/torch.py index 734f4916..22784f5a 100644 --- a/src/osqp/nn/torch.py +++ b/src/osqp/nn/torch.py @@ -115,15 +115,15 @@ def forward(ctx, P_val, q_val, A_val, l_val, u_val): """ def _get_update_flag(n_batch: int) -> bool: - """ - This is a helper function that returns a flag if we need to update the solvers - or generate them. Raises an RuntimeError if the number of solvers is invalid. - """ - num_solvers = len(solvers) - if num_solvers not in (0, n_batch): - raise RuntimeError(f"Invalid number of solvers: expected 0 or {n_batch}," - f" but got {num_solvers}.") - return num_solvers==n_batch + """ + This is a helper function that returns a flag if we need to update the solvers + or generate them. Raises an RuntimeError if the number of solvers is invalid. + """ + num_solvers = len(solvers) + if num_solvers not in (0, n_batch): + raise RuntimeError(f"Invalid number of solvers: expected 0 or {n_batch}," + f" but got {num_solvers}.") + return num_solvers == n_batch def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_type, eps_abs, eps_rel): @@ -135,13 +135,14 @@ def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_typ # TODO: Cache solver object in between # P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) if update_flag: - solver = solvers[i] - solver.update(q=q[i], l=l[i], u=u[i], Px=to_numpy(P_val[i]), Px_idx=P_idx, - Ax=to_numpy(A_val[i]), Ax_idx=A_idx) + solver = solvers[i] + solver.update(q=q[i], l=l[i], u=u[i], Px=to_numpy(P_val[i]), Px_idx=P_idx, + Ax=to_numpy(A_val[i]), Ax_idx=A_idx) else: P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) - solver = osqp.OSQP(algebra=algebra) #TODO: Deep copy when available + # TODO: Deep copy when available + solver = osqp.OSQP(algebra=algebra) solver.setup( P, q[i], @@ -162,7 +163,6 @@ def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_typ return solver, result.x - params = [P_val, q_val, A_val, l_val, u_val] for p in params: @@ -198,10 +198,10 @@ def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_typ update_flag = _get_update_flag(n_batch) n_jobs = multiprocessing.cpu_count() - res = Parallel(n_jobs=n_jobs, prefer="threads")(delayed(_inner_solve)(i=i, - update_flag=update_flag, q=q, l=l, u=u, P_val=P_val, P_idx=P_idx, - A_val=A_val, A_idx=A_idx, solver_type=solver_type, eps_abs=eps_abs, - eps_rel=eps_rel) for i in range(n_batch)) + res = Parallel(n_jobs=n_jobs, prefer="threads")(delayed(_inner_solve)(i=i, update_flag=update_flag, q=q, l=l, u=u, + P_val=P_val, P_idx=P_idx, A_val=A_val, A_idx=A_idx, + solver_type=solver_type, eps_abs=eps_abs, + eps_rel=eps_rel) for i in range(n_batch)) solvers_loop, x = zip(*res) for i in range(n_batch): if update_flag: @@ -255,7 +255,8 @@ def _loop_adjoint_derivative(solver, dl_dx): du = torch.zeros((n_batch, m), dtype=dtype, device=device) n_jobs = multiprocessing.cpu_count() - res = Parallel(n_jobs=n_jobs, prefer="threads")(delayed(_loop_adjoint_derivative)(solvers[i], dl_dx[i]) for i in range(n_batch)) + res = Parallel(n_jobs=n_jobs, prefer="threads")(delayed(_loop_adjoint_derivative)(solvers[i], dl_dx[i]) + for i in range(n_batch)) dq_vec, dl_vec, du_vec, dP_vec, dA_vec = zip(*res) for i in range(n_batch): dq[i] = dq_vec[i] From 2d236600b5fed231730a0984a00f997cbbd659b0 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Mon, 11 Mar 2024 15:43:59 -0400 Subject: [PATCH 18/23] pre-commit --- .devcontainer/devcontainer.json | 2 +- .devcontainer/startup.sh | 2 +- .github/workflows/build_cuda.yml | 2 +- .github/workflows/build_default.yml | 2 +- .github/workflows/build_mkl.yml | 2 +- pyproject.toml | 2 +- src/osqp/nn/torch.py | 47 +++++++++++++++++++---------- 7 files changed, 37 insertions(+), 22 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index c0cd1961..2c3d4122 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,4 +13,4 @@ } } } -} \ No newline at end of file +} diff --git a/.devcontainer/startup.sh b/.devcontainer/startup.sh index f0961081..c66b7761 100644 --- a/.devcontainer/startup.sh +++ b/.devcontainer/startup.sh @@ -1,3 +1,3 @@ pip install -e . pip install pytest pre-commit -pre-commit install \ No newline at end of file +pre-commit install diff --git a/.github/workflows/build_cuda.yml b/.github/workflows/build_cuda.yml index 3139de9b..6ebef83e 100644 --- a/.github/workflows/build_cuda.yml +++ b/.github/workflows/build_cuda.yml @@ -52,4 +52,4 @@ jobs: uses: actions/upload-artifact@v1 with: name: wheels - path: ./wheelhouse \ No newline at end of file + path: ./wheelhouse diff --git a/.github/workflows/build_default.yml b/.github/workflows/build_default.yml index dba3b2e8..7c94887f 100644 --- a/.github/workflows/build_default.yml +++ b/.github/workflows/build_default.yml @@ -50,4 +50,4 @@ jobs: uses: actions/upload-artifact@v1 with: name: wheels - path: ./wheelhouse \ No newline at end of file + path: ./wheelhouse diff --git a/.github/workflows/build_mkl.yml b/.github/workflows/build_mkl.yml index a06bd6e7..fc6fec10 100644 --- a/.github/workflows/build_mkl.yml +++ b/.github/workflows/build_mkl.yml @@ -46,4 +46,4 @@ jobs: uses: actions/upload-artifact@v1 with: name: wheels - path: ./wheelhouse \ No newline at end of file + path: ./wheelhouse diff --git a/pyproject.toml b/pyproject.toml index 9051ec93..608d6162 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,4 +54,4 @@ OSQP_EXT_MODULE_NAME = "ext_builtin" testpaths = ["src/osqp/tests"] [tool.setuptools_scm] -write_to = "src/osqp/_version.py" \ No newline at end of file +write_to = "src/osqp/_version.py" diff --git a/src/osqp/nn/torch.py b/src/osqp/nn/torch.py index 22784f5a..3d479f79 100644 --- a/src/osqp/nn/torch.py +++ b/src/osqp/nn/torch.py @@ -121,12 +121,12 @@ def _get_update_flag(n_batch: int) -> bool: """ num_solvers = len(solvers) if num_solvers not in (0, n_batch): - raise RuntimeError(f"Invalid number of solvers: expected 0 or {n_batch}," - f" but got {num_solvers}.") + raise RuntimeError( + f'Invalid number of solvers: expected 0 or {n_batch},' f' but got {num_solvers}.' + ) return num_solvers == n_batch - - def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_type, - eps_abs, eps_rel): + + def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_type, eps_abs, eps_rel): """ This inner function solves for each solver. update_flag has to be passed from outside to make sure it doesn't change during a parallel run. @@ -136,8 +136,9 @@ def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_typ # P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) if update_flag: solver = solvers[i] - solver.update(q=q[i], l=l[i], u=u[i], Px=to_numpy(P_val[i]), Px_idx=P_idx, - Ax=to_numpy(A_val[i]), Ax_idx=A_idx) + solver.update( + q=q[i], l=l[i], u=u[i], Px=to_numpy(P_val[i]), Px_idx=P_idx, Ax=to_numpy(A_val[i]), Ax_idx=A_idx + ) else: P = spa.csc_matrix((to_numpy(P_val[i]), P_idx), shape=P_shape) A = spa.csc_matrix((to_numpy(A_val[i]), A_idx), shape=A_shape) @@ -161,8 +162,8 @@ def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_typ # add some more options around potentially ignoring this. raise RuntimeError(f'Unable to solve QP, status: {status}') - return solver, result.x - + return solver, result.x + params = [P_val, q_val, A_val, l_val, u_val] for p in params: @@ -198,10 +199,23 @@ def _inner_solve(i, update_flag, q, l, u, P_val, P_idx, A_val, A_idx, solver_typ update_flag = _get_update_flag(n_batch) n_jobs = multiprocessing.cpu_count() - res = Parallel(n_jobs=n_jobs, prefer="threads")(delayed(_inner_solve)(i=i, update_flag=update_flag, q=q, l=l, u=u, - P_val=P_val, P_idx=P_idx, A_val=A_val, A_idx=A_idx, - solver_type=solver_type, eps_abs=eps_abs, - eps_rel=eps_rel) for i in range(n_batch)) + res = Parallel(n_jobs=n_jobs, prefer='threads')( + delayed(_inner_solve)( + i=i, + update_flag=update_flag, + q=q, + l=l, + u=u, + P_val=P_val, + P_idx=P_idx, + A_val=A_val, + A_idx=A_idx, + solver_type=solver_type, + eps_abs=eps_abs, + eps_rel=eps_rel, + ) + for i in range(n_batch) + ) solvers_loop, x = zip(*res) for i in range(n_batch): if update_flag: @@ -229,7 +243,7 @@ def _loop_adjoint_derivative(solver, dl_dx): dq, dl, du = [torch.from_numpy(d) for d in [dqi_np, dli_np, dui_np]] dP, dA = [torch.from_numpy(d.x) for d in [dPi_np, dAi_np]] return dq, dl, du, dP, dA - + dtype = dl_dx_val.dtype device = dl_dx_val.device @@ -255,8 +269,9 @@ def _loop_adjoint_derivative(solver, dl_dx): du = torch.zeros((n_batch, m), dtype=dtype, device=device) n_jobs = multiprocessing.cpu_count() - res = Parallel(n_jobs=n_jobs, prefer="threads")(delayed(_loop_adjoint_derivative)(solvers[i], dl_dx[i]) - for i in range(n_batch)) + res = Parallel(n_jobs=n_jobs, prefer='threads')( + delayed(_loop_adjoint_derivative)(solvers[i], dl_dx[i]) for i in range(n_batch) + ) dq_vec, dl_vec, du_vec, dP_vec, dA_vec = zip(*res) for i in range(n_batch): dq[i] = dq_vec[i] From eb9e0f0993b4b74cf7028b7458aee22b77354879 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Thu, 14 Mar 2024 10:55:08 -0400 Subject: [PATCH 19/23] Removed .devcontainer --- .devcontainer/devcontainer.json | 16 ---------------- .devcontainer/startup.sh | 3 --- 2 files changed, 19 deletions(-) delete mode 100644 .devcontainer/devcontainer.json delete mode 100644 .devcontainer/startup.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json deleted file mode 100644 index 2c3d4122..00000000 --- a/.devcontainer/devcontainer.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "name": "Python 3", - "image": "mcr.microsoft.com/devcontainers/python:0-3.10", - "onCreateCommand": "bash .devcontainer/startup.sh", - "customizations": { - "vscode": { - "settings": { - "python.testing.pytestEnabled": true, - "python.testing.unittestEnabled": false, - "python.testing.pytestArgs": [ - "cvxpy" - ] - } - } - } -} diff --git a/.devcontainer/startup.sh b/.devcontainer/startup.sh deleted file mode 100644 index c66b7761..00000000 --- a/.devcontainer/startup.sh +++ /dev/null @@ -1,3 +0,0 @@ -pip install -e . -pip install pytest pre-commit -pre-commit install From e4eefcce648e6a9f6f024b761a2794e3bda133e0 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Thu, 14 Mar 2024 11:04:58 -0400 Subject: [PATCH 20/23] Adding joblib as a dependency; removing it as test dependency --- .github/workflows/build_aarch64.yml | 2 +- .github/workflows/build_cuda.yml | 2 +- .github/workflows/build_default.yml | 2 +- .github/workflows/build_mkl.yml | 2 +- pyproject.toml | 1 + 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_aarch64.yml b/.github/workflows/build_aarch64.yml index da335550..1f795f80 100644 --- a/.github/workflows/build_aarch64.yml +++ b/.github/workflows/build_aarch64.yml @@ -34,7 +34,7 @@ jobs: CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" CIBW_ARCHS_LINUX: aarch64 CIBW_BEFORE_ALL: "yum -y update && yum install -y blas-devel lapack-devel" - CIBW_TEST_REQUIRES: "pytest torch numdifftools joblib" + CIBW_TEST_REQUIRES: "pytest torch numdifftools" CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests" CIBW_ENVIRONMENT_LINUX: CMAKE_GENERATOR="Unix Makefiles" diff --git a/.github/workflows/build_cuda.yml b/.github/workflows/build_cuda.yml index 6ebef83e..881f9cca 100644 --- a/.github/workflows/build_cuda.yml +++ b/.github/workflows/build_cuda.yml @@ -38,7 +38,7 @@ jobs: env: CIBW_BUILD: cp38-* cp39-* cp310-* cp311-* cp312-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_* *-macosx_*" - CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools joblib + CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools CIBW_ENVIRONMENT_LINUX: CMAKE_CUDA_COMPILER=/usr/local/cuda-11.7/bin/nvcc CIBW_BEFORE_ALL_LINUX: bash .github/workflows/prepare_build_environment_linux_cuda.sh diff --git a/.github/workflows/build_default.yml b/.github/workflows/build_default.yml index 7c94887f..1cea5555 100644 --- a/.github/workflows/build_default.yml +++ b/.github/workflows/build_default.yml @@ -27,7 +27,7 @@ jobs: env: CIBW_BUILD: cp38-* cp39-* cp310-* cp311-* cp312-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" - CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools joblib + CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools CIBW_TEST_COMMAND: "python -m pytest -s {project}/src/osqp/tests -k \"not codegen\"" - name: Build source diff --git a/.github/workflows/build_mkl.yml b/.github/workflows/build_mkl.yml index fc6fec10..d9efc664 100644 --- a/.github/workflows/build_mkl.yml +++ b/.github/workflows/build_mkl.yml @@ -28,7 +28,7 @@ jobs: env: CIBW_BUILD: cp38-* cp39-* cp310-* cp311-* cp312-* CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_*" - CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools joblib mkl mkl-devel + CIBW_TEST_REQUIRES: setuptools pytest torch numdifftools mkl mkl-devel CIBW_BEFORE_ALL_LINUX: bash .github/workflows/prepare_build_environment_linux_mkl.sh CIBW_ENVIRONMENT_LINUX: "MKL_ROOT=/opt/intel/oneapi/mkl/latest" diff --git a/pyproject.toml b/pyproject.toml index 608d6162..94817a9d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,6 +18,7 @@ dependencies = [ "qdldl", "scipy>=0.13.2,<1.12.0", "setuptools", + "joblib", ] [project.optional-dependencies] From 31ce9b2a3aa32b69b9567f5d652a744bf5f8ceef Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Thu, 14 Mar 2024 11:31:24 -0400 Subject: [PATCH 21/23] Fixed GIT_TAG on the cmake file to 02a117cfc8ad21b06c2596603a2046ee61c82786 (from master) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 93a847a5..77d033a4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,7 +17,7 @@ list(APPEND CMAKE_MESSAGE_INDENT " ") FetchContent_Declare( osqp GIT_REPOSITORY https://github.com/osqp/osqp.git - GIT_TAG vb/henryiii/skbuild + GIT_TAG 02a117cfc8ad21b06c2596603a2046ee61c82786 ) list(POP_BACK CMAKE_MESSAGE_INDENT) FetchContent_MakeAvailable(osqp) From 06a8375fd7b1ae02fdb9aa5089d9638144bbd838 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Thu, 14 Mar 2024 12:21:47 -0400 Subject: [PATCH 22/23] Check if torch exists before importing --- src/osqp/nn/torch.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/osqp/nn/torch.py b/src/osqp/nn/torch.py index 3d479f79..b92c48db 100644 --- a/src/osqp/nn/torch.py +++ b/src/osqp/nn/torch.py @@ -1,8 +1,11 @@ import numpy as np import scipy.sparse as spa -import torch -from torch.nn import Module -from torch.autograd import Function +try: + import torch + from torch.nn import Module + from torch.autograd import Function +except ImportError as e: + print(f"Import Error: {e}") from joblib import Parallel, delayed import multiprocessing From 00af17ba759b1f68db01519b2c2ddb02f64e50e6 Mon Sep 17 00:00:00 2001 From: Amit Solomon Date: Thu, 14 Mar 2024 12:31:30 -0400 Subject: [PATCH 23/23] Minor linter change --- src/osqp/nn/torch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/osqp/nn/torch.py b/src/osqp/nn/torch.py index b92c48db..fda1dbc8 100644 --- a/src/osqp/nn/torch.py +++ b/src/osqp/nn/torch.py @@ -1,11 +1,12 @@ import numpy as np import scipy.sparse as spa + try: import torch from torch.nn import Module from torch.autograd import Function except ImportError as e: - print(f"Import Error: {e}") + print(f'Import Error: {e}') from joblib import Parallel, delayed import multiprocessing