Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Release v2.3.0 #118

Merged
merged 3 commits into from
Sep 3, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -4,15 +4,17 @@ All notable changes to this project will be documented in this file.

## [Unreleased]

## [2.3.0] - 2024-09-03

### Added

- Add `results_path` argument to the `main` function
- Results: Handle loading/saving CSV files with out-of-test-set problems
- Results: handle loading/saving CSV files with out-of-test-set problems
- Special handling to get CVXOPT version number properly

### Changed

- CLI: Make results path an argument to all commands
- CLI: make results path an argument to all commands
- Rename "include timeouts" argument to "rerun timeouts"

## [2.2.3] - 2024-08-14
@@ -190,7 +192,8 @@ All notable changes to this project will be documented in this file.
- SolverSettings class
- TestSet class

[unreleased]: https://github.com/qpsolvers/qpbenchmark/compare/v2.2.3...HEAD
[unreleased]: https://github.com/qpsolvers/qpbenchmark/compare/v2.3.0...HEAD
[2.3.0]: https://github.com/qpsolvers/qpbenchmark/compare/v2.2.3...v2.3.0
[2.2.3]: https://github.com/qpsolvers/qpbenchmark/compare/v2.2.2...v2.2.3
[2.2.2]: https://github.com/qpsolvers/qpbenchmark/compare/v2.2.1...v2.2.2
[2.2.1]: https://github.com/qpsolvers/qpbenchmark/compare/v2.2.0...v2.2.1
4 changes: 2 additions & 2 deletions CITATION.cff
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
cff-version: 1.2.0
message: "If you find this code helpful, please cite it as below."
title: "qpbenchmark: Benchmark for quadratic programming solvers available in Python"
version: 2.2.3
date-released: 2024-08-14
version: 2.3.0
date-released: 2024-09-03
url: "https://github.com/qpsolvers/qpbenchmark"
license: "Apache-2.0"
authors:
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# QP solvers benchmark

[![CI](https://github.com/qpsolvers/qpbenchmark/actions/workflows/ci.yml/badge.svg)](https://github.com/qpsolvers/qpbenchmark/actions/workflows/ci.yml)
[![CI](https://img.shields.io/github/actions/workflow/status/qpsolvers/qpbenchmark/ci.yml?branch=main)](https://github.com/qpsolvers/qpbenchmark/actions)
[![Coverage](https://coveralls.io/repos/github/qpsolvers/qpbenchmark/badge.svg?branch=main)](https://coveralls.io/github/qpsolvers/qpbenchmark?branch=main)
[![Conda version](https://img.shields.io/conda/vn/conda-forge/qpbenchmark.svg)](https://anaconda.org/conda-forge/qpbenchmark)
[![PyPI version](https://img.shields.io/pypi/v/qpbenchmark)](https://pypi.org/project/qpbenchmark/)
@@ -131,7 +131,7 @@ If you use `qpbenchmark` in your works, please cite all its contributors as foll
author = {Caron, Stéphane and Zaki, Akram and Otta, Pavel and Arnström, Daniel and Carpentier, Justin and Yang, Fengyu and Leziart, Pierre-Alexandre},
url = {https://github.com/qpsolvers/qpbenchmark},
license = {Apache-2.0},
version = {2.2.3},
version = {2.3.0},
year = {2024}
}
```
2 changes: 1 addition & 1 deletion environment.yaml
Original file line number Diff line number Diff line change
@@ -14,7 +14,7 @@ dependencies:
- pip
- piqp >= 0.2.2
- proxsuite >= 0.4.1
- qpbenchmark >= 2.2.3
- qpbenchmark >= 2.3.0
- qpoases >= 3.2.0
- quadprog >= 0.1.11
- pip:
9 changes: 6 additions & 3 deletions qpbenchmark/benchmark.py
Original file line number Diff line number Diff line change
@@ -15,6 +15,7 @@
from importlib import import_module # type: ignore
from typing import Optional

from .exceptions import BenchmarkError
from .plot_metric import plot_metric
from .report import Report
from .results import Results
@@ -196,6 +197,8 @@ def report(args, results: Results, test_set_path: str):
else input("GitHub username to write in the report? ")
)
report = Report(author, results)
if results.csv_path is None:
raise BenchmarkError("not sure where to save report: no results file")
results_dir = os.path.dirname(results.csv_path)
test_set_name = os.path.basename(test_set_path).replace(".py", "")
md_path = f"{results_dir}/{test_set_name}.md"
@@ -212,15 +215,15 @@ def main(
test_set_path: If set, load test set from this Python file.
results_path: Path to the results CSV file.
"""
assert test_set_path.endswith(".py")
assert results_path.endswith(".csv")
assert test_set_path is None or test_set_path.endswith(".py")
assert results_path is None or results_path.endswith(".csv")
args = parse_command_line_arguments(test_set_path)
if test_set_path is None:
test_set_path = args.test_set_path
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
test_set = load_test_set(os.path.abspath(test_set_path))
results = Results(args.results_path or results_path, test_set)
results = Results(results_path or args.results_path, test_set)

if args.command == "run":
run(
23 changes: 15 additions & 8 deletions qpbenchmark/results.py
Original file line number Diff line number Diff line change
@@ -7,7 +7,7 @@
"""Test case results."""

import os.path
from typing import Dict, Tuple
from typing import Dict, Optional, Tuple

import numpy as np
import pandas
@@ -29,7 +29,7 @@ class Results:
test_set: Test set from which results were produced.
"""

csv_path: str
csv_path: Optional[str]
df: pandas.DataFrame
test_set: TestSet

@@ -43,7 +43,7 @@ def check_df(df) -> None:
if not isinstance(df["found"].dtype, np.dtypes.BoolDType):
raise ResultsError('"found" column has some non-boolean values')

def __init__(self, csv_path: str, test_set: TestSet):
def __init__(self, csv_path: Optional[str], test_set: TestSet):
"""Initialize results.

Args:
@@ -74,7 +74,7 @@ def __init__(self, csv_path: str, test_set: TestSet):
"duality_gap": float,
}
)
if os.path.exists(csv_path):
if csv_path is not None and os.path.exists(csv_path):
logging.info(f"Loading existing results from {csv_path}")
df = pandas.concat([df, pandas.read_csv(csv_path)])
Results.check_df(df)
@@ -89,12 +89,19 @@ def __init__(self, csv_path: str, test_set: TestSet):
self.df = test_set_df
self.test_set = test_set

def write(self) -> None:
"""Write results to their CSV file for persistence."""
logging.debug(f"Test set results written to {self.csv_path}")
def write(self, path: Optional[str] = None) -> None:
"""Write results to their CSV file for persistence.

Args:
path: Optional path to a separate file to write to.
"""
save_path: Optional[str] = path or self.csv_path
if save_path is None:
raise BenchmarkError("no path to save results to")
logging.debug(f"Test set results written to {save_path}")
save_df = pandas.concat([self.df, self.__complementary_df])
save_df = save_df.sort_values(by=["problem", "solver", "settings"])
save_df.to_csv(self.csv_path, index=False)
save_df.to_csv(save_path, index=False)

def has(self, problem: Problem, solver: str, settings: str) -> bool:
"""Check if results contain a given run of a solver on a problem.
2 changes: 1 addition & 1 deletion qpbenchmark/version.py
Original file line number Diff line number Diff line change
@@ -9,4 +9,4 @@

def get_version() -> str:
"""Get benchmark version as a string."""
return "2.2.3"
return "2.3.0"