diff --git a/CLAUDE.md b/CLAUDE.md index c2bb12cf..67155ae3 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,7 +6,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ### Running Tests ```bash -# Run all tests +# Run all tests (excluding GPU tests by default) pytest # Run tests with coverage @@ -17,8 +17,16 @@ pytest test/test_model.py # Run a specific test function pytest test/test_model.py::test_model_creation + +# Run GPU tests (requires GPU hardware and cuPDLPx installation) +pytest --run-gpu + +# Run only GPU tests +pytest -m gpu --run-gpu ``` +**GPU Testing**: Tests that require GPU hardware (e.g., cuPDLPx solver) are automatically skipped by default since CI machines typically don't have GPUs. To run GPU tests locally, use the `--run-gpu` flag. The tests are automatically marked with `@pytest.mark.gpu` based on solver capabilities. + ### Linting and Type Checking ```bash # Run linter (ruff) diff --git a/README.md b/README.md index 3cd19df8..644b556c 100644 --- a/README.md +++ b/README.md @@ -149,6 +149,7 @@ Fri 0 4 * [Cplex](https://www.ibm.com/de-de/analytics/cplex-optimizer) * [MOSEK](https://www.mosek.com/) * [COPT](https://www.shanshu.ai/copt) +* [cuPDLPx](https://github.com/MIT-Lu-Lab/cuPDLPx) Note that these do have to be installed by the user separately. diff --git a/doc/contributing.rst b/doc/contributing.rst index 29a43a6d..02162694 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -10,6 +10,9 @@ contributing code. You are invited to submit pull requests / issues to our `Github repository `_. +Development Setup +================= + For linting, formatting and checking your code contributions against our guidelines (e.g. we use `Black `_ as code style and use `pre-commit `_: @@ -19,6 +22,52 @@ and use `pre-commit `_: * To automatically activate ``pre-commit`` on every ``git commit``: Run ``pre-commit install`` * To manually run it: ``pre-commit run --all`` +Running Tests +============= + +Testing is essential for maintaining code quality. We use pytest as our testing framework. + +Basic Testing +------------- + +To run the test suite: + +.. code-block:: bash + + # Install development dependencies + pip install -e .[dev,solvers] + + # Run all tests + pytest + + # Run tests with coverage + pytest --cov=./ --cov-report=xml linopy --doctest-modules test + + # Run a specific test file + pytest test/test_model.py + + # Run a specific test function + pytest test/test_model.py::test_model_creation + +GPU Testing +----------- + +Tests for GPU-accelerated solvers (e.g., cuPDLPx) are automatically skipped by default since CI machines and most development environments don't have GPU hardware. This ensures tests pass in all environments. + +To run GPU tests locally (requires GPU hardware and CUDA): + +.. code-block:: bash + + # Run all tests including GPU tests + pytest --run-gpu + + # Run only GPU tests + pytest -m gpu --run-gpu + +GPU tests are automatically detected based on solver capabilities - no manual marking is required. When you add a new GPU solver to linopy, tests using that solver will automatically be marked as GPU tests. + +See the :doc:`gpu-acceleration` guide for more information about GPU solver setup and usage. + Contributing examples ===================== diff --git a/doc/gpu-acceleration.rst b/doc/gpu-acceleration.rst new file mode 100644 index 00000000..a9048973 --- /dev/null +++ b/doc/gpu-acceleration.rst @@ -0,0 +1,117 @@ +======================== +GPU-Accelerated Solving +======================== + +.. warning:: + + This feature is **experimental** and not tested in CI due to the lack of GPU-enabled machines. Use with caution and please report any issues. + +Linopy supports GPU-accelerated optimization solvers that can significantly speed up solving large-scale linear programming problems by leveraging the parallel processing capabilities of modern GPUs. + +Supported GPU Solvers +===================== + +cuPDLPx +------- + +`cuPDLPx `_ is an open-source, GPU-accelerated first-order solver developed by MIT. It implements a Primal-Dual hybrid gradient (PDHG) method optimized for GPUs. + +To install it, you have to have the `CUDA Toolkit `_ installed requiring NVIDIA GPUs on your computer. Then, install with + +.. code-block:: bash + + # Install CUDA Toolkit first (if not already installed) + # Follow instructions at: https://developer.nvidia.com/cuda-downloads + + # Install cuPDLPx + pip install cupdlpx>=0.1.2 + +**Features:** + +- GPU-accelerated solving for large-scale linear programs +- Open source (Apache 2.0 license) +- Direct API integration with linopy +- Designed for problems with millions of variables and constraints + +**Limitations:** + +- Currently supports only Linear Programming (LP) +- Does not support Mixed-Integer Programming (MIP) or Quadratic Programming (QP) +- Lower numerical precision compared to CPU solvers (typical tolerance: ~2.5e-4 vs 1e-5) +- File I/O not currently supported through cuPDLPx API + +For a complete list of cuPDLPx parameters, see the `cuPDLPx documentation `_. + +Xpress with GPU Acceleration +----------------------------- + +`FICO Xpress `_ version 9.8 and later includes GPU acceleration support for certain operations. + +**Features:** + +- Commercial solver with GPU support +- Supports LP, MIP, and QP +- Full-precision solving + +Prerequisites +============= + +Hardware Requirements +--------------------- + +GPU solvers require: + +- NVIDIA GPU with CUDA support (compute capability 6.0 or higher recommended) +- Sufficient GPU memory for your problem size (varies by problem) +- PCIe 3.0 or higher for optimal data transfer + +Software Requirements +--------------------- + +1. **CUDA Toolkit**: Most GPU solvers require CUDA 11.0 or later +2. **Compatible GPU drivers**: Match your CUDA version + +Verifying Installation +====================== + +To verify that the GPU solvers are properly installed and detected: + +.. code-block:: python + + import linopy + from linopy.solver_capabilities import ( + SolverFeature, + get_available_solvers_with_feature, + ) + + # Check available solvers + print("All available solvers:", linopy.available_solvers) + + # Check GPU-accelerated solvers + gpu_solvers = get_available_solvers_with_feature( + SolverFeature.GPU_ACCELERATION, linopy.available_solvers + ) + print("GPU solvers:", gpu_solvers) + + +By default, GPU tests are skipped in the test suite to support CI environments without GPUs. To run GPU tests locally: + +.. code-block:: bash + + # Run all tests including GPU tests + pytest --run-gpu + + # Run only GPU tests + pytest -m gpu --run-gpu + + # Run specific GPU test + pytest test/test_optimization.py -k cupdlpx --run-gpu + + +References +========== + +- `cuPDLPx Repository `_ +- `cuPDLPx Python Documentation `_ +- `CUDA Installation Guide `_ +- `NVIDIA GPU Computing Resources `_ diff --git a/doc/index.rst b/doc/index.rst index 4825722e..bff9fa65 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -37,7 +37,8 @@ flexible data-handling features: - Use **lazy operations** for large linear programs with `dask `__ - Choose from **different commercial and non-commercial solvers** -- Fast **import and export** a linear model using xarray’s netcdf IO +- Fast **import and export** a linear model using xarray's netcdf IO +- Support for **GPU-accelerated solving** for large-scale problems - Support of various solvers - `Cbc `__ - `GLPK `__ @@ -48,6 +49,7 @@ flexible data-handling features: - `Cplex `__ - `MOSEK `__ - `COPT `__ + - `cuPDLPx `__ (GPU-accelerated) @@ -116,6 +118,7 @@ This package is published under MIT license. infeasible-model solve-on-remote solve-on-oetc + gpu-acceleration migrating-from-pyomo gurobi-double-logging diff --git a/doc/prerequisites.rst b/doc/prerequisites.rst index 88c95ba3..23b17897 100644 --- a/doc/prerequisites.rst +++ b/doc/prerequisites.rst @@ -30,11 +30,14 @@ Install a solver Linopy won't work without a solver. Currently, the following solvers are supported: +CPU-based solvers +~~~~~~~~~~~~~~~~~ + - `Cbc `__ - open source, free, fast - `GLPK `__ - open source, free, not very fast - `HiGHS `__ - open source, free, fast - `Gurobi `__ - closed source, commercial, very fast -- `Xpress `__ - closed source, commercial, very fast +- `Xpress `__ - closed source, commercial, very fast (GPU acceleration available in v9.8+) - `Cplex `__ - closed source, commercial, very fast - `MOSEK `__ - `MindOpt `__ - @@ -54,6 +57,20 @@ We recommend to install the HiGHS solver if possible, which is free and open sou pip install highspy +GPU-accelerated solvers +~~~~~~~~~~~~~~~~~~~~~~~ + +For large-scale optimization problems, GPU-accelerated solvers can provide significant performance improvements: + +- `cuPDLPx `__ - open source, GPU-accelerated first-order solver + +**Note:** GPU solvers require compatible NVIDIA GPU hardware and CUDA installation. See the :doc:`gpu-acceleration` guide for detailed setup instructions. + +.. code:: bash + + pip install cupdlpx + + For most of the other solvers, please click on the links to get further installation information. diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 3212cb72..f7eb925b 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -10,6 +10,7 @@ Release Notes * Fix compatibility for xpress versions below 9.6 (regression) * Performance: Up to 50x faster ``repr()`` for variables/constraints via O(log n) label lookup and direct numpy indexing * Performance: Up to 46x faster ``ncons`` property by replacing ``.flat.labels.unique()`` with direct counting +* Add support for GPU-accelerated solver [cuPDLPx](https://github.com/MIT-Lu-Lab/cuPDLPx) Version 0.5.8 -------------- diff --git a/linopy/io.py b/linopy/io.py index bc47f593..3572422e 100644 --- a/linopy/io.py +++ b/linopy/io.py @@ -8,6 +8,7 @@ import logging import shutil import time +import warnings from collections.abc import Callable from io import BufferedWriter from pathlib import Path @@ -28,6 +29,7 @@ from linopy.objective import Objective if TYPE_CHECKING: + from cupdlpx import Model as cupdlpxModel from highspy.highs import Highs from linopy.model import Model @@ -852,6 +854,72 @@ def to_highspy(m: Model, explicit_coordinate_names: bool = False) -> Highs: return h +def to_cupdlpx(m: Model, explicit_coordinate_names: bool = False) -> cupdlpxModel: + """ + Export the model to cupdlpx. + + This function does not write the model to intermediate files but directly + passes it to cupdlpx. + + cuPDLPx does not support named variables and constraints, so the + `explicit_coordinate_names` parameter is ignored. + + Parameters + ---------- + m : linopy.Model + explicit_coordinate_names : bool, optional + Ignored. cuPDLPx does not support named variables/constraints. + + Returns + ------- + model : cupdlpx.Model + """ + import cupdlpx + + if explicit_coordinate_names: + warnings.warn( + "cuPDLPx does not support named variables/constraints. " + "The explicit_coordinate_names parameter is ignored.", + UserWarning, + stacklevel=2, + ) + + # build model using canonical form matrices and vectors + # see https://github.com/MIT-Lu-Lab/cuPDLPx/tree/main/python#modeling + M = m.matrices + if M.A is None: + msg = "Model has no constraints, cannot export to cuPDLPx." + raise ValueError(msg) + A = M.A.tocsr() # cuPDLPx only supports CSR sparse matrix format + # linopy stores constraints as Ax ?= b and keeps track of inequality + # sense in M.sense. Convert to separate lower and upper bound vectors. + l = np.where( + np.logical_or(np.equal(M.sense, ">"), np.equal(M.sense, "=")), + M.b, + -np.inf, + ) + u = np.where( + np.logical_or(np.equal(M.sense, "<"), np.equal(M.sense, "=")), + M.b, + np.inf, + ) + + cu_model = cupdlpx.Model( + objective_vector=M.c, + constraint_matrix=A, + constraint_lower_bound=l, + constraint_upper_bound=u, + variable_lower_bound=M.lb, + variable_upper_bound=M.ub, + ) + + # change objective sense + if m.objective.sense == "max": + cu_model.ModelSense = cupdlpx.PDLP.MAXIMIZE + + return cu_model + + def to_block_files(m: Model, fn: Path) -> None: """ Write out the linopy model to a block structured output. diff --git a/linopy/model.py b/linopy/model.py index f41d91ba..657b2d45 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -50,6 +50,7 @@ ) from linopy.io import ( to_block_files, + to_cupdlpx, to_file, to_gurobipy, to_highspy, @@ -1661,4 +1662,6 @@ def reset_solution(self) -> None: to_highspy = to_highspy + to_cupdlpx = to_cupdlpx + to_block_files = to_block_files diff --git a/linopy/solver_capabilities.py b/linopy/solver_capabilities.py index c3dafe04..0ffea923 100644 --- a/linopy/solver_capabilities.py +++ b/linopy/solver_capabilities.py @@ -10,24 +10,41 @@ import platform from dataclasses import dataclass from enum import Enum, auto +from importlib.metadata import PackageNotFoundError +from importlib.metadata import version as package_version from typing import TYPE_CHECKING +from packaging.specifiers import SpecifierSet + if TYPE_CHECKING: from collections.abc import Sequence +def _xpress_supports_gpu() -> bool: + """Check if installed xpress version supports GPU acceleration (>=9.8.0).""" + try: + return package_version("xpress") in SpecifierSet(">=9.8.0") + except PackageNotFoundError: + return False + + class SolverFeature(Enum): """Enumeration of all solver capabilities tracked by linopy.""" + # Model feature support + INTEGER_VARIABLES = auto() # Support for integer variables + # Objective function support QUADRATIC_OBJECTIVE = auto() # I/O capabilities DIRECT_API = auto() # Solve directly from Model without writing files LP_FILE_NAMES = auto() # Support for named variables/constraints in LP files + READ_MODEL_FROM_FILE = auto() # Ability to read models from file SOLUTION_FILE_NOT_NEEDED = auto() # Solver doesn't need a solution file # Advanced features + GPU_ACCELERATION = auto() # GPU-accelerated solving IIS_COMPUTATION = auto() # Irreducible Infeasible Set computation # Special constraint types @@ -61,9 +78,11 @@ def supports(self, feature: SolverFeature) -> bool: display_name="Gurobi", features=frozenset( { + SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.DIRECT_API, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOLUTION_FILE_NOT_NEEDED, SolverFeature.IIS_COMPUTATION, SolverFeature.SOS_CONSTRAINTS, @@ -76,9 +95,11 @@ def supports(self, feature: SolverFeature) -> bool: display_name="HiGHS", features=frozenset( { + SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.DIRECT_API, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOLUTION_FILE_NOT_NEEDED, } ), @@ -86,20 +107,32 @@ def supports(self, feature: SolverFeature) -> bool: "glpk": SolverInfo( name="glpk", display_name="GLPK", - features=frozenset(), # No LP_FILE_NAMES support + features=frozenset( + { + SolverFeature.INTEGER_VARIABLES, + SolverFeature.READ_MODEL_FROM_FILE, + } + ), # No LP_FILE_NAMES support ), "cbc": SolverInfo( name="cbc", display_name="CBC", - features=frozenset(), # No LP_FILE_NAMES support + features=frozenset( + { + SolverFeature.INTEGER_VARIABLES, + SolverFeature.READ_MODEL_FROM_FILE, + } + ), # No LP_FILE_NAMES support ), "cplex": SolverInfo( name="cplex", display_name="CPLEX", features=frozenset( { + SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOS_CONSTRAINTS, } ), @@ -109,8 +142,20 @@ def supports(self, feature: SolverFeature) -> bool: display_name="FICO Xpress", features=frozenset( { + SolverFeature.INTEGER_VARIABLES, + SolverFeature.QUADRATIC_OBJECTIVE, + SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, + SolverFeature.SOLUTION_FILE_NOT_NEEDED, + SolverFeature.GPU_ACCELERATION, + SolverFeature.IIS_COMPUTATION, + } + if _xpress_supports_gpu() + else { + SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOLUTION_FILE_NOT_NEEDED, SolverFeature.IIS_COMPUTATION, } @@ -121,13 +166,17 @@ def supports(self, feature: SolverFeature) -> bool: display_name="SCIP", features=frozenset( { + SolverFeature.INTEGER_VARIABLES, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOLUTION_FILE_NOT_NEEDED, } if platform.system() == "Windows" else { + SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOLUTION_FILE_NOT_NEEDED, } # SCIP has a bug with quadratic models on Windows, see: @@ -139,9 +188,11 @@ def supports(self, feature: SolverFeature) -> bool: display_name="MOSEK", features=frozenset( { + SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.DIRECT_API, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOLUTION_FILE_NOT_NEEDED, } ), @@ -151,8 +202,10 @@ def supports(self, feature: SolverFeature) -> bool: display_name="COPT", features=frozenset( { + SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOLUTION_FILE_NOT_NEEDED, } ), @@ -162,8 +215,21 @@ def supports(self, feature: SolverFeature) -> bool: display_name="MindOpt", features=frozenset( { + SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, + SolverFeature.SOLUTION_FILE_NOT_NEEDED, + } + ), + ), + "cupdlpx": SolverInfo( + name="cupdlpx", + display_name="cuPDLPx", + features=frozenset( + { + SolverFeature.DIRECT_API, + SolverFeature.GPU_ACCELERATION, SolverFeature.SOLUTION_FILE_NOT_NEEDED, } ), diff --git a/linopy/solvers.py b/linopy/solvers.py index 941aee81..7cb14006 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -25,6 +25,7 @@ import pandas as pd from packaging.version import parse as parse_version +import linopy.io from linopy.constants import ( Result, Solution, @@ -205,6 +206,16 @@ class xpress_Namespaces: # type: ignore[no-redef] except coptpy.CoptError: pass +with contextlib.suppress(ModuleNotFoundError): + import cupdlpx + + try: + cupdlpx.Model(np.array([0.0]), np.array([[0.0]]), None, None) + available_solvers.append("cupdlpx") + except ImportError: + pass + + quadratic_solvers = [s for s in QUADRATIC_SOLVERS if s in available_solvers] logger = logging.getLogger(__name__) @@ -236,6 +247,7 @@ class SolverName(enum.Enum): COPT = "copt" MindOpt = "mindopt" PIPS = "pips" + cuPDLPx = "cupdlpx" def path_to_string(path: Path) -> str: @@ -2367,3 +2379,248 @@ def __init__( super().__init__(**solver_options) msg = "The PIPS solver interface is not yet implemented." raise NotImplementedError(msg) + + +class cuPDLPx(Solver[None]): + """ + Solver subclass for the cuPDLPx solver. cuPDLPx must be installed + with working GPU support for usage. Find the installation instructions + at https://github.com/MIT-Lu-Lab/cuPDLPx. + + The full list of solver options provided with the python interface + is documented at https://github.com/MIT-Lu-Lab/cuPDLPx/tree/main/python. + + Some example options are: + * LogToConsole : False by default. + * TimeLimit : 3600.0 by default. + * IterationLimit : 2147483647 by default. + + Attributes + ---------- + **solver_options + options for the given solver + """ + + def __init__( + self, + **solver_options: Any, + ) -> None: + super().__init__(**solver_options) + + def solve_problem_from_file( + self, + problem_fn: Path, + solution_fn: Path | None = None, + log_fn: Path | None = None, + warmstart_fn: Path | None = None, + basis_fn: Path | None = None, + env: EnvType | None = None, + ) -> Result: + """ + Solve a linear problem from a problem file using the solver cuPDLPx. + cuPDLPx does not currently support its own file IO, so this function + reads the problem file using linopy (only support netcf files) and + then passes the model to cuPDLPx for solving. + If the solution is feasible the function returns the + objective, solution and dual constraint variables. + + Parameters + ---------- + problem_fn : Path + Path to the problem file. + solution_fn : Path, optional + Path to the solution file. + log_fn : Path, optional + Path to the log file. + warmstart_fn : Path, optional + Path to the warmstart file. + basis_fn : Path, optional + Path to the basis file. + env : None, optional + Environment for the solver + + Returns + ------- + Result + """ + logger.warning( + "cuPDLPx doesn't currently support file IO. Building model from file using linopy." + ) + problem_fn_ = path_to_string(problem_fn) + + if problem_fn_.endswith(".netcdf"): + model: Model = linopy.io.read_netcdf(problem_fn_) + else: + msg = "linopy currently only supports reading models from netcdf files. Try using io_api='direct' instead." + raise NotImplementedError(msg) + + return self.solve_problem_from_model( + model, + solution_fn=solution_fn, + log_fn=log_fn, + warmstart_fn=warmstart_fn, + basis_fn=basis_fn, + env=env, + ) + + def solve_problem_from_model( + self, + model: Model, + solution_fn: Path | None = None, + log_fn: Path | None = None, + warmstart_fn: Path | None = None, + basis_fn: Path | None = None, + env: EnvType | None = None, + explicit_coordinate_names: bool = False, + ) -> Result: + """ + Solve a linear problem directly from a linopy model using the solver cuPDLPx. + If the solution is feasible the function returns the + objective, solution and dual constraint variables. + + Parameters + ---------- + model : linopy.model + Linopy model for the problem. + solution_fn : Path, optional + Path to the solution file. + log_fn : Path, optional + Path to the log file. + warmstart_fn : Path, optional + Path to the warmstart file. + basis_fn : Path, optional + Path to the basis file. + env : None, optional + Environment for the solver + explicit_coordinate_names : bool, optional + Transfer variable and constraint names to the solver (default: False) + + Returns + ------- + Result + """ + + if model.type in ["QP", "MILP"]: + msg = "cuPDLPx does not currently support QP or MILP problems." + raise NotImplementedError(msg) + + cu_model = model.to_cupdlpx() + + return self._solve( + cu_model, + l_model=model, + solution_fn=solution_fn, + log_fn=log_fn, + warmstart_fn=warmstart_fn, + basis_fn=basis_fn, + io_api="direct", + sense=model.sense, + ) + + def _solve( + self, + cu_model: cupdlpx.Model, + l_model: Model | None = None, + solution_fn: Path | None = None, + log_fn: Path | None = None, + warmstart_fn: Path | None = None, + basis_fn: Path | None = None, + io_api: str | None = None, + sense: str | None = None, + ) -> Result: + """ + Solve a linear problem from a cupdlpx.Model object. + + Parameters + ---------- + cu_model: cupdlpx.Model + cupdlpx object. + solution_fn : Path, optional + Path to the solution file. + log_fn : Path, optional + Path to the log file. + warmstart_fn : Path, optional + Path to the warmstart file. + basis_fn : Path, optional + Path to the basis file. + model : linopy.model, optional + Linopy model for the problem. + io_api: str + io_api of the problem. For direct API from linopy model this is "direct". + sense: str + "min" or "max" + + Returns + ------- + Result + """ + + # see https://github.com/MIT-Lu-Lab/cuPDLPx/blob/main/python/cupdlpx/PDLP.py + CONDITION_MAP: dict[int, TerminationCondition] = { + cupdlpx.PDLP.OPTIMAL: TerminationCondition.optimal, + cupdlpx.PDLP.PRIMAL_INFEASIBLE: TerminationCondition.infeasible, + cupdlpx.PDLP.DUAL_INFEASIBLE: TerminationCondition.infeasible_or_unbounded, + cupdlpx.PDLP.TIME_LIMIT: TerminationCondition.time_limit, + cupdlpx.PDLP.ITERATION_LIMIT: TerminationCondition.iteration_limit, + cupdlpx.PDLP.UNSPECIFIED: TerminationCondition.unknown, + } + + self._set_solver_params(cu_model) + + if warmstart_fn is not None: + # cuPDLPx supports warmstart, but there currently isn't the tooling + # to read it in from a file + raise NotImplementedError("Warmstarting not yet implemented for cuPDLPx.") + else: + cu_model.clearWarmStart() + + if basis_fn is not None: + logger.warning("Basis files are not supported by cuPDLPx. Ignoring.") + + if log_fn is not None: + logger.warning("Log files are not supported by cuPDLPx. Ignoring.") + + # solve + cu_model.optimize() + + # parse solution and output + if solution_fn is not None: + raise NotImplementedError( + "Solution file output not yet implemented for cuPDLPx." + ) + + termination_condition = CONDITION_MAP.get( + cu_model.StatusCode, cu_model.StatusCode + ) + status = Status.from_termination_condition(termination_condition) + status.legacy_status = cu_model.Status # cuPDLPx status message + + def get_solver_solution() -> Solution: + objective = cu_model.ObjVal + + vlabels = None if l_model is None else l_model.matrices.vlabels + clabels = None if l_model is None else l_model.matrices.clabels + + sol = pd.Series(cu_model.X, vlabels, dtype=float) + dual = pd.Series(cu_model.Pi, clabels, dtype=float) + + if cu_model.ModelSense == cupdlpx.PDLP.MAXIMIZE: + dual *= -1 # flip sign of duals for max problems + + return Solution(sol, dual, objective) + + solution = self.safe_get_solution(status=status, func=get_solver_solution) + solution = maybe_adjust_objective_sign(solution, io_api, sense) + + # see https://github.com/MIT-Lu-Lab/cuPDLPx/tree/main/python#solution-attributes + return Result(status, solution, cu_model) + + def _set_solver_params(self, cu_model: cupdlpx.Model) -> None: + """ + Set solver options for cuPDLPx model. + + For list of available options, see + https://github.com/MIT-Lu-Lab/cuPDLPx/tree/main/python#parameters + """ + for k, v in self.solver_options.items(): + cu_model.setParam(k, v) diff --git a/pyproject.toml b/pyproject.toml index 21f89c8f..19ef72b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,8 +36,9 @@ dependencies = [ "polars", "tqdm", "deprecation", + "packaging", "google-cloud-storage", - "requests" + "requests", ] [project.urls] @@ -82,6 +83,7 @@ solvers = [ "coptpy!=7.2.1", "xpress; platform_system != 'Darwin' and python_version < '3.11'", "pyscipopt; platform_system != 'Darwin'", + # "cupdlpx>=0.1.2", pip package currently unstable ] [tool.setuptools.packages.find] @@ -97,15 +99,16 @@ version_scheme = "no-guess-dev" [tool.pytest.ini_options] testpaths = ["test"] norecursedirs = ["dev-scripts", "doc", "examples", "benchmark"] +markers = [ + "gpu: marks tests as requiring GPU hardware (deselect with '-m \"not gpu\"')", +] [tool.coverage.run] branch = true source = ["linopy"] omit = ["test/*"] [tool.coverage.report] -exclude_also = [ - "if TYPE_CHECKING:", -] +exclude_also = ["if TYPE_CHECKING:"] [tool.mypy] exclude = ['dev/*', 'examples/*', 'benchmark/*', 'doc/*'] diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 00000000..3197689b --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,50 @@ +"""Pytest configuration and fixtures.""" + +import os + +import pytest + + +def pytest_addoption(parser: pytest.Parser) -> None: + """Add custom command line options.""" + parser.addoption( + "--run-gpu", + action="store_true", + default=False, + help="run tests that require GPU hardware", + ) + + +def pytest_configure(config: pytest.Config) -> None: + """Configure pytest with custom markers and behavior.""" + config.addinivalue_line("markers", "gpu: marks tests as requiring GPU hardware") + + # Set environment variable so test modules can check if GPU tests are enabled + # This is needed because parametrize happens at import time + if config.getoption("--run-gpu", default=False): + os.environ["LINOPY_RUN_GPU_TESTS"] = "1" + else: + os.environ.pop("LINOPY_RUN_GPU_TESTS", None) + + +def pytest_collection_modifyitems( + config: pytest.Config, items: list[pytest.Item] +) -> None: + """Automatically skip GPU tests unless --run-gpu is passed.""" + if config.getoption("--run-gpu"): + return + + skip_gpu = pytest.mark.skip(reason="need --run-gpu option to run GPU tests") + for item in items: + # Check if this is a parametrized test with a GPU solver + if hasattr(item, "callspec") and "solver" in item.callspec.params: + solver = item.callspec.params["solver"] + # Import here to avoid circular dependency + from linopy.solver_capabilities import ( + SolverFeature, + solver_supports, + ) + + if solver_supports(solver, SolverFeature.GPU_ACCELERATION): + item.add_marker(skip_gpu) + item.add_marker(pytest.mark.gpu) diff --git a/test/test_optimization.py b/test/test_optimization.py index cac5be9d..ff790d6e 100644 --- a/test/test_optimization.py +++ b/test/test_optimization.py @@ -15,7 +15,7 @@ import pandas as pd import pytest import xarray as xr -from xarray.testing import assert_equal +from xarray.testing import assert_allclose, assert_equal from linopy import GREATER_EQUAL, LESS_EQUAL, Model, solvers from linopy.common import to_path @@ -37,9 +37,11 @@ # mps io is only supported via highspy io_apis.append("mps") - +file_io_solvers = get_available_solvers_with_feature( + SolverFeature.READ_MODEL_FROM_FILE, available_solvers +) params: list[tuple[str, str, bool]] = list( - itertools.product(available_solvers, io_apis, explicit_coordinate_names) + itertools.product(file_io_solvers, io_apis, explicit_coordinate_names) ) direct_solvers = get_available_solvers_with_feature( @@ -57,6 +59,18 @@ # handled in linopy/solver_capabilities.py by adjusting the registry at import time. feasible_quadratic_solvers: list[str] = list(quadratic_solvers) +feasible_mip_solvers: list[str] = get_available_solvers_with_feature( + SolverFeature.INTEGER_VARIABLES, available_solvers +) + +gpu_solvers: list[str] = get_available_solvers_with_feature( + SolverFeature.GPU_ACCELERATION, available_solvers +) + +# set tolerances for solution checking based on solver type (CPU vs. GPU) +CPU_SOL_TOL: float = 1e-5 # numpy default +GPU_SOL_TOL: float = 2.5e-4 # gpu solvers typically have lower numerical precision + def test_print_solvers(capsys: Any) -> None: with capsys.disabled(): @@ -438,7 +452,8 @@ def test_anonymous_constraint( model.solve( solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names ) - assert_equal(model.solution, model_anonymous_constraint.solution) + tol = GPU_SOL_TOL if solver in gpu_solvers else CPU_SOL_TOL + assert_allclose(model.solution, model_anonymous_constraint.solution, rtol=tol) @pytest.mark.parametrize("solver,io_api,explicit_coordinate_names", params) @@ -519,6 +534,7 @@ def test_solver_time_limit_options( "mosek": {"MSK_DPAR_OPTIMIZER_MAX_TIME": 1}, "mindopt": {"MaxTime": 1}, "copt": {"TimeLimit": 1}, + "cupdlpx": {"TimeLimit": 1}, } status, condition = model.solve( solver, @@ -552,7 +568,8 @@ def test_duplicated_variables( ) -> None: status, condition = model_with_duplicated_variables.solve(solver, io_api=io_api) assert status == "ok" - assert all(model_with_duplicated_variables.solution["x"] == 5) + tol = GPU_SOL_TOL if solver in gpu_solvers else CPU_SOL_TOL + assert all(np.isclose(model_with_duplicated_variables.solution["x"], 5, rtol=tol)) @pytest.mark.parametrize("solver,io_api,explicit_coordinate_names", params) @@ -566,10 +583,19 @@ def test_non_aligned_variables( solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names ) assert status == "ok" + + tol = GPU_SOL_TOL if solver in gpu_solvers else CPU_SOL_TOL + with pytest.warns(UserWarning): - assert model_with_non_aligned_variables.solution["x"][0] == 0 - assert model_with_non_aligned_variables.solution["x"][-1] == 10.5 - assert model_with_non_aligned_variables.solution["y"][0] == 10.5 + assert np.isclose( + model_with_non_aligned_variables.solution["x"][0], 0, rtol=tol + ) + assert np.isclose( + model_with_non_aligned_variables.solution["x"][-1], 10.5, rtol=tol + ) + assert np.isclose( + model_with_non_aligned_variables.solution["y"][0], 10.5, rtol=tol + ) assert np.isnan(model_with_non_aligned_variables.solution["y"][-1]) for dtype in model_with_non_aligned_variables.solution.dtypes.values(): @@ -584,16 +610,17 @@ def test_set_files( io_api: str, explicit_coordinate_names: bool, ) -> None: - status, condition = model.solve( - solver, - io_api=io_api, - explicit_coordinate_names=explicit_coordinate_names, - problem_fn=tmp_path / "problem.lp", - solution_fn=tmp_path / "solution.sol", - log_fn=tmp_path / "logging.log", - keep_files=False, - ) - assert status == "ok" + if solver in file_io_solvers: + status, condition = model.solve( + solver, + io_api=io_api, + explicit_coordinate_names=explicit_coordinate_names, + problem_fn=tmp_path / "problem.lp", + solution_fn=tmp_path / "solution.sol", + log_fn=tmp_path / "logging.log", + keep_files=False, + ) + assert status == "ok" @pytest.mark.parametrize("solver,io_api,explicit_coordinate_names", params) @@ -604,26 +631,33 @@ def test_set_files_and_keep_files( io_api: str, explicit_coordinate_names: bool, ) -> None: - status, condition = model.solve( - solver, - io_api=io_api, - explicit_coordinate_names=explicit_coordinate_names, - problem_fn=tmp_path / "problem.lp", - solution_fn=tmp_path / "solution.sol", - log_fn=tmp_path / "logging.log", - keep_files=True, - ) - assert status == "ok" - if io_api != "direct" and solver != "xpress": - assert (tmp_path / "problem.lp").exists() - assert (tmp_path / "solution.sol").exists() - assert (tmp_path / "logging.log").exists() + if solver in file_io_solvers: + status, condition = model.solve( + solver, + io_api=io_api, + explicit_coordinate_names=explicit_coordinate_names, + problem_fn=tmp_path / "problem.lp", + solution_fn=tmp_path / "solution.sol", + log_fn=tmp_path / "logging.log", + keep_files=True, + ) + assert status == "ok" + if io_api != "direct" and solver != "xpress": + assert (tmp_path / "problem.lp").exists() + assert (tmp_path / "solution.sol").exists() + assert (tmp_path / "logging.log").exists() @pytest.mark.parametrize("solver,io_api,explicit_coordinate_names", params) def test_infeasible_model( model: Model, solver: str, io_api: str, explicit_coordinate_names: bool ) -> None: + if solver == "cupdlpx": + pytest.skip( + "Ongoing issue with cuPDLPx causes it to hang for some unbounded problems. " + "See https://github.com/MIT-Lu-Lab/cuPDLPx/issues/9." + ) + model.add_constraints([(1, "x")], "<=", 0) model.add_constraints([(1, "y")], "<=", 0) @@ -648,6 +682,9 @@ def test_infeasible_model( def test_model_with_inf( model_with_inf: Model, solver: str, io_api: str, explicit_coordinate_names: bool ) -> None: + if solver not in feasible_mip_solvers: + pytest.skip(f"{solver} does not support MIP") + status, condition = model_with_inf.solve(solver, io_api=io_api) assert condition == "optimal" assert (model_with_inf.solution.x == 0).all() @@ -661,6 +698,9 @@ def test_model_with_inf( def test_milp_binary_model( milp_binary_model: Model, solver: str, io_api: str, explicit_coordinate_names: bool ) -> None: + if solver not in feasible_mip_solvers: + pytest.skip(f"{solver} does not support MIP") + status, condition = milp_binary_model.solve( solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names ) @@ -680,6 +720,9 @@ def test_milp_binary_model_r( io_api: str, explicit_coordinate_names: bool, ) -> None: + if solver not in feasible_mip_solvers: + pytest.skip(f"{solver} does not support MIP") + status, condition = milp_binary_model_r.solve( solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names ) @@ -696,6 +739,9 @@ def test_milp_binary_model_r( def test_milp_model( milp_model: Model, solver: str, io_api: str, explicit_coordinate_names: bool ) -> None: + if solver not in feasible_mip_solvers: + pytest.skip(f"{solver} does not support MIP") + status, condition = milp_model.solve( solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names ) @@ -710,14 +756,20 @@ def test_milp_model( def test_milp_model_r( milp_model_r: Model, solver: str, io_api: str, explicit_coordinate_names: bool ) -> None: + if solver not in feasible_mip_solvers: + pytest.skip(f"{solver} does not support MIP") + # MPS format by Highs wrong, see https://github.com/ERGO-Code/HiGHS/issues/1325 - # skip it - if io_api != "mps": - status, condition = milp_model_r.solve( - solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names - ) - assert condition == "optimal" - assert ((milp_model_r.solution.x == 11) | (milp_model_r.solution.y == 0)).all() + if io_api == "mps": + pytest.skip("MPS format bug in HiGHS") + + status, condition = milp_model_r.solve( + solver, + io_api=io_api, + explicit_coordinate_names=explicit_coordinate_names, + ) + assert condition == "optimal" + assert ((milp_model_r.solution.x == 11) | (milp_model_r.solution.y == 0)).all() @pytest.mark.parametrize( @@ -842,10 +894,12 @@ def test_quadratic_model_unbounded( def test_modified_model( modified_model: Model, solver: str, io_api: str, explicit_coordinate_names: bool ) -> None: + if solver not in feasible_mip_solvers: + pytest.skip(f"{solver} does not support MIP") + status, condition = modified_model.solve( solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names ) - assert condition == "optimal" assert (modified_model.solution.x == 0).all() assert (modified_model.solution.y == 10).all() @@ -866,7 +920,8 @@ def test_masked_variable_model( assert y.solution[-2:].isnull().all() assert y.solution[:-2].notnull().all() assert x.solution.notnull().all() - assert (x.solution[-2:] == 10).all() + tol = GPU_SOL_TOL if solver in gpu_solvers else CPU_SOL_TOL + assert (np.isclose(x.solution[-2:], 10, rtol=tol)).all() # Squeeze in solution getter for expressions with masked variables assert_equal(x.add(y).solution, x.solution + y.solution.fillna(0)) @@ -881,8 +936,9 @@ def test_masked_constraint_model( masked_constraint_model.solve( solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names ) - assert (masked_constraint_model.solution.y[:-2] == 10).all() - assert (masked_constraint_model.solution.y[-2:] == 5).all() + tol = GPU_SOL_TOL if solver in gpu_solvers else CPU_SOL_TOL + assert (np.isclose(masked_constraint_model.solution.y[:-2], 10, rtol=tol)).all() + assert (np.isclose(masked_constraint_model.solution.y[-2:], 5, rtol=tol)).all() @pytest.mark.parametrize("solver,io_api,explicit_coordinate_names", params) @@ -893,6 +949,9 @@ def test_basis_and_warmstart( io_api: str, explicit_coordinate_names: bool, ) -> None: + if solver == "cupdlpx": + pytest.skip("cuPDLPx does not yet support warmstart in the Python API.") + basis_fn = tmp_path / "basis.bas" model.solve( solver, @@ -916,14 +975,15 @@ def test_solution_fn_parent_dir_doesnt_exist( explicit_coordinate_names: bool, tmp_path: Any, ) -> None: - solution_fn = tmp_path / "non_existent_dir" / "non_existent_file" - status, condition = model.solve( - solver, - io_api=io_api, - explicit_coordinate_names=explicit_coordinate_names, - solution_fn=solution_fn, - ) - assert status == "ok" + if solver in file_io_solvers: + solution_fn = tmp_path / "non_existent_dir" / "non_existent_file" + status, condition = model.solve( + solver, + io_api=io_api, + explicit_coordinate_names=explicit_coordinate_names, + solution_fn=solution_fn, + ) + assert status == "ok" @pytest.mark.parametrize("solver", available_solvers) @@ -936,7 +996,7 @@ def test_non_supported_solver_io(model: Model, solver: str) -> None: def test_solver_attribute_getter( model: Model, solver: str, io_api: str, explicit_coordinate_names: bool ) -> None: - model.solve(solver) + model.solve(solver, io_api=io_api) if solver != "gurobi": with pytest.raises(NotImplementedError): model.variables.get_solver_attribute("RC") @@ -965,7 +1025,9 @@ def test_model_resolve( ) assert status == "ok" # x = -0.75, y = 3.0 - assert np.isclose(model.objective.value or 0, 5.25) + + tol = GPU_SOL_TOL if solver in gpu_solvers else CPU_SOL_TOL + assert np.isclose(model.objective.value or 0, 5.25, rtol=tol) @pytest.mark.parametrize( diff --git a/test/test_solvers.py b/test/test_solvers.py index 129c1e0b..7f8b01f2 100644 --- a/test/test_solvers.py +++ b/test/test_solvers.py @@ -11,6 +11,7 @@ from test_io import model # noqa: F401 from linopy import Model, solvers +from linopy.solver_capabilities import SolverFeature, solver_supports free_mps_problem = """NAME sample_mip ROWS @@ -53,6 +54,9 @@ def test_free_mps_solution_parsing(solver: str, tmp_path: Path) -> None: except ValueError: raise ValueError(f"Solver '{solver}' is not recognized") + if not solver_supports(solver, SolverFeature.READ_MODEL_FROM_FILE): + pytest.skip("Solver does not support reading model from file.") + # Write the MPS file to the temporary directory mps_file = tmp_path / "problem.mps" mps_file.write_text(free_mps_problem)