diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c7836288..368681b3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,14 +19,11 @@ jobs: - os: ubuntu-latest python: "3.13" toxenv: base - #- os: ubuntu-latest - # python: "3.12" - # toxenv: base - os: ubuntu-latest - python: "3.11" + python: "3.12" toxenv: base - os: ubuntu-latest - python: "3.10" + python: "3.11" toxenv: base - os: ubuntu-latest python: "3.11" @@ -68,9 +65,9 @@ jobs: toxenv: external-notebooks steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} cache: pip @@ -79,18 +76,28 @@ jobs: if: ${{ startsWith(matrix.toxenv, 'external-') }} uses: julia-actions/setup-julia@v2 with: - version: "1.11" + version: "1" # Use the latest stable Julia version - name: Install Julia dependencies if: ${{ startsWith(matrix.toxenv, 'external-') }} - run: julia -e 'using Pkg; Pkg.add("PyCall")' + env: + # make Julia/PyCall bind to the same Python used by the job + PYTHON: ${{ env.pythonLocation }}/bin/python + run: | + julia -e 'using Pkg; Pkg.add("PyCall")' + + - name: Install R + if: ${{ startsWith(matrix.toxenv, 'external-') }} + uses: r-lib/actions/setup-r@v2 + with: + r-version: 'release' - name: Install dependencies run: | case "${{ matrix.toxenv }}" in - base|visualization|mac|base-notebooks) .github/workflows/install_deps.sh base R ;; + base|visualization|mac|base-notebooks) .github/workflows/install_deps.sh base ;; petab) .github/workflows/install_deps.sh amici ;; - external-*) .github/workflows/install_deps.sh base R amici ;; + external-*) .github/workflows/install_deps.sh base amici ;; lint|project|doc|migrate) .github/workflows/install_deps.sh doc ;; esac python -m pip install -U pip tox @@ -100,8 +107,9 @@ jobs: # If each tox env generates coverage.xml, include flags; otherwise combine explicitly - name: Upload coverage - if: ${{ always() && matrix.os == 'ubuntu-latest' }} # optional - uses: codecov/codecov-action@v4 + if: ${{ always() && matrix.os == 'ubuntu-latest' }} + uses: codecov/codecov-action@v6 + continue-on-error: true with: token: ${{ secrets.CODECOV_TOKEN }} files: ./coverage.xml diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 972f4ea6..bb4f6cf1 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -14,10 +14,10 @@ jobs: steps: - name: Check out repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Prepare Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.12" cache: pip diff --git a/.github/workflows/install_deps.sh b/.github/workflows/install_deps.sh index d497a6f3..95f35310 100755 --- a/.github/workflows/install_deps.sh +++ b/.github/workflows/install_deps.sh @@ -48,29 +48,10 @@ install_base() { fi } -install_r() { - log_info "Installing R..." - if is_macos; then - brew install r - else - # Prefer distro packages in CI - apt_install libtirpc-dev r-base r-base-dev - # Make R shared libs discoverable for the rest of the job - export_env_var LD_LIBRARY_PATH "${LD_LIBRARY_PATH:-/usr/lib}:/usr/lib/R/lib:/usr/local/lib/R/lib" - fi - - if command -v R >/dev/null 2>&1; then - log_info "R installed: $(R --version | head -n1)" - else - log_error "R installation failed (R not on PATH)" - exit 1 - fi -} - install_amici() { log_info "Installing AMICI dependencies..." if ! is_macos; then - apt_install swig libatlas-base-dev libhdf5-serial-dev libboost-all-dev + apt_install libhdf5-serial-dev libboost-all-dev fi log_info "Installing AMICI Python package..." python -m pip uninstall -y amici pyabc || true @@ -93,7 +74,6 @@ install_dev_tools() { install_all() { install_base - install_r install_amici install_doc_tools install_dev_tools @@ -105,7 +85,6 @@ Usage: $0 [OPTION]... Options: base Install base dependencies (Redis) - R Install R (Ubuntu: apt; macOS: brew) amici Install AMICI dependencies doc Install documentation tools (Pandoc) dev Install development tools @@ -124,7 +103,6 @@ main() { for arg in "$@"; do case "$arg" in base) install_base ;; - R) install_r ;; amici) install_amici ;; doc) install_doc_tools ;; dev) install_dev_tools ;; diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1487ed83..baa15b38 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -61,7 +61,7 @@ repos: # Configuration default_language_version: - python: python3.11 + python: python3.13 # Global exclusions exclude: | diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f057d194..147afb7c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,14 @@ Release Notes 0.12 Series ........... +0.12.18 (2026-04-14) +-------------------- + +General: + +* Support of amici 1.0.0 +* Remove support for python 3.10 + 0.12.17 (2026-02-24) -------------------- diff --git a/doc/examples/model_selection.ipynb b/doc/examples/model_selection.ipynb index fa40661c..28a02b07 100644 --- a/doc/examples/model_selection.ipynb +++ b/doc/examples/model_selection.ipynb @@ -83,7 +83,7 @@ " # sample from a gaussian\n", " y = st.norm(parameters.x, sigma).rvs()\n", " # return the sample as dictionary\n", - " return {\"y\": y}" + " return {'y': y}" ] }, { @@ -110,8 +110,8 @@ "# Their mean differs.\n", "mu_x_1, mu_x_2 = 0, 1\n", "parameter_priors = [\n", - " pyabc.Distribution(x=pyabc.RV(\"norm\", mu_x_1, sigma)),\n", - " pyabc.Distribution(x=pyabc.RV(\"norm\", mu_x_2, sigma)),\n", + " pyabc.Distribution(x=pyabc.RV('norm', mu_x_1, sigma)),\n", + " pyabc.Distribution(x=pyabc.RV('norm', mu_x_2, sigma)),\n", "]" ] }, @@ -142,7 +142,7 @@ "source": [ "# We plug all the ABC options together\n", "abc = pyabc.ABCSMC(\n", - " models, parameter_priors, pyabc.PercentileDistance(measures_to_use=[\"y\"])\n", + " models, parameter_priors, pyabc.PercentileDistance(measures_to_use=['y'])\n", ")" ] }, @@ -176,8 +176,8 @@ "# y_observed is the important piece here: our actual observation.\n", "y_observed = 1\n", "# and we define where to store the results\n", - "db_path = \"sqlite:///\" + os.path.join(tempfile.gettempdir(), \"test.db\")\n", - "history = abc.new(db_path, {\"y\": y_observed})" + "db_path = 'sqlite:///' + os.path.join(tempfile.gettempdir(), 'test.db')\n", + "history = abc.new(db_path, {'y': y_observed})" ] }, { @@ -205,7 +205,7 @@ } ], "source": [ - "print(\"ABC-SMC run ID:\", history.id)" + "print('ABC-SMC run ID:', history.id)" ] }, { @@ -270,7 +270,7 @@ } ], "source": [ - "history is abc.history" + "assert history is abc.history" ] }, { @@ -351,9 +351,9 @@ } ], "source": [ - "# Evaluate the model probabililties\n", + "# Evaluate the model probabilities\n", "model_probabilities = history.get_model_probabilities()\n", - "model_probabilities" + "print(model_probabilities)" ] }, { diff --git a/doc/examples/multiscale_agent_based.ipynb b/doc/examples/multiscale_agent_based.ipynb index c03c0681..e1b5aa59 100644 --- a/doc/examples/multiscale_agent_based.ipynb +++ b/doc/examples/multiscale_agent_based.ipynb @@ -118,7 +118,7 @@ " ecm_degradation_rate=8e-4,\n", " ecm_division_threshold=1e-2,\n", ")\n", - "print(f\"Simulation took {time() - start_time:.2f}s\")" + "print(f'Simulation took {time() - start_time:.2f}s')" ] }, { @@ -157,29 +157,29 @@ "fig.set_size_inches((16, 5))\n", "\n", "color = {\n", - " \"growth_curve\": \"k\",\n", - " \"extra_cellular_matrix_profile\": \"green\",\n", - " \"proliferation_profile\": \"orange\",\n", + " 'growth_curve': 'k',\n", + " 'extra_cellular_matrix_profile': 'green',\n", + " 'proliferation_profile': 'orange',\n", "}\n", "\n", "x_label = {\n", - " \"growth_curve\": \"Time (d)\",\n", - " \"extra_cellular_matrix_profile\": \"Distance to rim (μm)\",\n", - " \"proliferation_profile\": \"Distance to rim (μm)\",\n", + " 'growth_curve': 'Time (d)',\n", + " 'extra_cellular_matrix_profile': 'Distance to rim (μm)',\n", + " 'proliferation_profile': 'Distance to rim (μm)',\n", "}\n", "\n", "y_label = {\n", - " \"growth_curve\": \"Radius (μm)\",\n", - " \"extra_cellular_matrix_profile\": \"Extracellular matrix intensity\",\n", - " \"proliferation_profile\": \"Fraction proliferating cells\",\n", + " 'growth_curve': 'Radius (μm)',\n", + " 'extra_cellular_matrix_profile': 'Extracellular matrix intensity',\n", + " 'proliferation_profile': 'Fraction proliferating cells',\n", "}\n", "\n", "for ax, (key, val) in zip(axes, observation.items()):\n", " ax.plot(val, color=color[key])\n", - " ax.set_title(capwords(key.replace(\"_\", \" \")))\n", + " ax.set_title(capwords(key.replace('_', ' ')))\n", " ax.set_xlabel(x_label[key])\n", " ax.set_ylabel(y_label[key])\n", - " if key.endswith(\"profile\"):\n", + " if key.endswith('profile'):\n", " ax.set_xlim(0, 600)" ] }, @@ -225,7 +225,7 @@ ")\n", "\n", "prior = Distribution(\n", - " **{key: RV(\"uniform\", a, b - a) for key, (a, b) in limits.items()}\n", + " **{key: RV('uniform', a, b - a) for key, (a, b) in limits.items()}\n", ")" ] }, @@ -309,7 +309,7 @@ } ], "source": [ - "abc.new(\"sqlite:////tmp/test.db\", data_mean)" + "abc.new('sqlite:////tmp/test.db', data_mean)" ] }, { @@ -420,7 +420,7 @@ "\n", "from pyabc import History\n", "\n", - "h_loaded = History(\"sqlite:///\" + stored_data_db)\n", + "h_loaded = History('sqlite:///' + stored_data_db)\n", "\n", "df, w = h_loaded.get_distribution(m=0, t=5)\n", "plot_kde_matrix(df, w, limits=limits);" @@ -490,7 +490,7 @@ ], "source": [ "populations = h_loaded.get_all_populations()\n", - "(populations[populations.t >= 0].plot(\"t\", \"particles\", marker=\"o\"));" + "(populations[populations.t >= 0].plot('t', 'particles', marker='o'));" ] }, { @@ -516,7 +516,7 @@ } ], "source": [ - "print(f\"Execution time: {(time() - start_time)/60:.1f}m\")" + "print(f'Execution time: {(time() - start_time)/60:.1f}m')" ] }, { diff --git a/doc/examples/parameter_inference.ipynb b/doc/examples/parameter_inference.ipynb index 4a2bca24..5720c4a9 100644 --- a/doc/examples/parameter_inference.ipynb +++ b/doc/examples/parameter_inference.ipynb @@ -91,7 +91,7 @@ "outputs": [], "source": [ "def model(parameter):\n", - " return {\"data\": parameter[\"mu\"] + 0.5 * np.random.randn()}" + " return {'data': parameter['mu'] + 0.5 * np.random.randn()}" ] }, { @@ -109,7 +109,7 @@ "metadata": {}, "outputs": [], "source": [ - "prior = pyabc.Distribution(mu=pyabc.RV(\"uniform\", 0, 5))" + "prior = pyabc.Distribution(mu=pyabc.RV('uniform', 0, 5))" ] }, { @@ -138,7 +138,7 @@ "outputs": [], "source": [ "def distance(x, x0):\n", - " return abs(x[\"data\"] - x0[\"data\"])" + " return abs(x['data'] - x0['data'])" ] }, { @@ -211,9 +211,9 @@ } ], "source": [ - "db_path = os.path.join(tempfile.gettempdir(), \"test.db\")\n", + "db_path = os.path.join(tempfile.gettempdir(), 'test.db')\n", "observation = 2.5\n", - "abc.new(\"sqlite:///\" + db_path, {\"data\": observation})" + "abc.new('sqlite:///' + db_path, {'data': observation})" ] }, { @@ -314,12 +314,12 @@ " w,\n", " xmin=0,\n", " xmax=5,\n", - " x=\"mu\",\n", - " xname=r\"$\\mu$\",\n", + " x='mu',\n", + " xname=r'$\\mu$',\n", " ax=ax,\n", - " label=f\"PDF t={t}\",\n", + " label=f'PDF t={t}',\n", " )\n", - "ax.axvline(observation, color=\"k\", linestyle=\"dashed\")\n", + "ax.axvline(observation, color='k', linestyle='dashed')\n", "ax.legend();" ] }, diff --git a/doc/examples/petab_application.ipynb b/doc/examples/petab_application.ipynb index 7332092e..cf7e13b2 100644 --- a/doc/examples/petab_application.ipynb +++ b/doc/examples/petab_application.ipynb @@ -31,7 +31,7 @@ "outputs": [], "source": [ "import petab.v1 as petab\n", - "from amici.petab import import_petab_problem\n", + "from amici.importers.petab.v1 import import_petab_problem\n", "\n", "from pyabc.petab import AmiciPetabImporter" ] @@ -91,7 +91,7 @@ "model = import_petab_problem(petab_problem)\n", "\n", "# the solver to numerically solve the ODE\n", - "solver = model.getSolver()\n", + "solver = model.create_solver()\n", "\n", "# import everything to pyABC\n", "importer = AmiciPetabImporter(petab_problem, model, solver)\n", @@ -197,7 +197,7 @@ "acceptor = pyabc.StochasticAcceptor(\n", " pdf_norm_method = pyabc.ScaledPDFNorm())\n", "\n", - "abc = pyabc.ABCSMC(model, prior, kernel, \n", + "abc = pyabc.ABCSMC(model, prior, kernel,\n", " eps=temperature,\n", " acceptor=acceptor,\n", " sampler=sampler,\n", diff --git a/doc/examples/petab_yaml2sbml.ipynb b/doc/examples/petab_yaml2sbml.ipynb index 8c595784..b395a419 100644 --- a/doc/examples/petab_yaml2sbml.ipynb +++ b/doc/examples/petab_yaml2sbml.ipynb @@ -36,7 +36,7 @@ "\n", "import amici\n", "import numpy as np\n", - "from amici.petab import import_petab_problem\n", + "from amici.importers.petab.v1 import import_petab_problem\n", "\n", "import pyabc\n", "import pyabc.petab\n", @@ -219,13 +219,13 @@ " sys.path.insert(0, os.path.abspath(amici_dir))\n", "model = import_petab_problem(\n", " petab_problem,\n", - " model_output_dir=amici_dir,\n", + " output_dir=amici_dir,\n", " verbose=False,\n", " generate_sensitivity_code=False,\n", ")\n", "\n", "# the solver to numerically solve the ODE\n", - "solver = model.getSolver()\n", + "solver = model.create_solver()\n", "\n", "# import everything to pyABC\n", "importer = pyabc.petab.AmiciPetabImporter(petab_problem, model, solver)\n", @@ -421,16 +421,16 @@ " if amici_dir not in sys.path:\n", " sys.path.insert(0, os.path.abspath(amici_dir))\n", " model_module = importlib.import_module(model_name)\n", - " model = model_module.getModel()\n", - " solver = model.getSolver()\n", + " model = model_module.get_model()\n", + " solver = model.create_solver()\n", "\n", " # measurement times\n", " n_time = 10\n", " meas_times = np.linspace(0, 10, n_time)\n", - " model.setTimepoints(meas_times)\n", + " model.set_timepoints(meas_times)\n", "\n", " # simulate with nominal parameters\n", - " rdata = amici.runAmiciSimulation(model, solver)\n", + " rdata = model.simulate(solver=solver)\n", "\n", " # create noisy data\n", " np.random.seed(2)\n", diff --git a/doc/examples/resuming.ipynb b/doc/examples/resuming.ipynb index a9112e05..e8e924d7 100644 --- a/doc/examples/resuming.ipynb +++ b/doc/examples/resuming.ipynb @@ -81,17 +81,17 @@ "outputs": [], "source": [ "def model(parameter):\n", - " return {\"data\": parameter[\"mean\"] + np.random.randn()}\n", + " return {'data': parameter['mean'] + np.random.randn()}\n", "\n", "\n", - "prior = Distribution(mean=RV(\"uniform\", 0, 5))\n", + "prior = Distribution(mean=RV('uniform', 0, 5))\n", "\n", "\n", "def distance(x, y):\n", - " return abs(x[\"data\"] - y[\"data\"])\n", + " return abs(x['data'] - y['data'])\n", "\n", "\n", - "db = \"sqlite:///\" + os.path.join(gettempdir(), \"test.db\")" + "db = 'sqlite:///' + os.path.join(gettempdir(), 'test.db')" ] }, { @@ -126,9 +126,9 @@ ], "source": [ "abc = ABCSMC(model, prior, distance)\n", - "history = abc.new(db, {\"data\": 2.5})\n", + "history = abc.new(db, {'data': 2.5})\n", "run_id = history.id\n", - "print(\"Run ID:\", run_id)" + "print('Run ID:', run_id)" ] }, { @@ -191,9 +191,7 @@ "output_type": "execute_result" } ], - "source": [ - "history.n_populations" - ] + "source": "print(history.n_populations)" }, { "cell_type": "markdown", @@ -317,9 +315,7 @@ "output_type": "execute_result" } ], - "source": [ - "abc_continued.history.n_populations" - ] + "source": "print(abc_continued.history.n_populations)" }, { "cell_type": "markdown", diff --git a/doc/examples/sde_ion_channels.ipynb b/doc/examples/sde_ion_channels.ipynb index 727cae79..4ab8a423 100644 --- a/doc/examples/sde_ion_channels.ipynb +++ b/doc/examples/sde_ion_channels.ipynb @@ -77,10 +77,10 @@ "import requests\n", "\n", "URL = (\n", - " \"https://senselab.med.yale.edu/modeldb/\"\n", - " \"eavBinDown.cshtml?o=128502&a=23&mime=application/zip\"\n", + " 'https://senselab.med.yale.edu/modeldb/'\n", + " 'eavBinDown.cshtml?o=128502&a=23&mime=application/zip'\n", ")\n", - "req = requests.request(\"GET\", URL)" + "req = requests.request('GET', URL)" ] }, { @@ -117,10 +117,10 @@ "archive = ZipFile(BytesIO(req.content))\n", "archive.extractall(tempdir)\n", "ret = subprocess.run(\n", - " [\"make\", \"HH_run\"], cwd=os.path.join(tempdir, \"ModelDBFolder\")\n", + " ['make', 'HH_run'], cwd=os.path.join(tempdir, 'ModelDBFolder')\n", ")\n", - "EXEC = os.path.join(tempdir, \"ModelDBFolder\", \"HH_run\")\n", - "print(f\"The executable location is {EXEC}\")" + "EXEC = os.path.join(tempdir, 'ModelDBFolder', 'HH_run')\n", + "print(f'The executable location is {EXEC}')" ] }, { @@ -176,11 +176,11 @@ " EXEC,\n", " str(model),\n", " # the binary cannot very long floats\n", - " f\"{membrane_area:.5f}\",\n", + " f'{membrane_area:.5f}',\n", " str(time_steps),\n", " str(time_step_size),\n", " str(isi),\n", - " f\"{dc:.5f}\",\n", + " f'{dc:.5f}',\n", " str(noise),\n", " str(sine_amplitude),\n", " str(sine_frequency),\n", @@ -195,7 +195,7 @@ " delim_whitespace=True,\n", " header=None,\n", " index_col=0,\n", - " names=[\"t\", \"V\", \"Na\", \"K\"],\n", + " names=['t', 'V', 'Na', 'K'],\n", " )\n", " return df" ] @@ -239,19 +239,19 @@ "\n", "%matplotlib inline\n", "\n", - "gt = {\"dc\": 20, \"membrane_dim\": 10}\n", + "gt = {'dc': 20, 'membrane_dim': 10}\n", "fig, axes = plt.subplots(nrows=2, sharex=True)\n", "fig.set_size_inches((12, 8))\n", "for _ in range(10):\n", " observation = simulate(**gt)\n", - " observation.plot(y=\"K\", color=\"C1\", ax=axes[0])\n", - " observation.plot(y=\"Na\", color=\"C0\", ax=axes[1])\n", + " observation.plot(y='K', color='C1', ax=axes[0])\n", + " observation.plot(y='Na', color='C0', ax=axes[1])\n", "for ax in axes:\n", " ax.legend().set_visible(False)\n", - "axes[0].set_title(\"K\")\n", - "axes[0].set_ylabel(\"K\")\n", - "axes[1].set_title(\"Na\")\n", - "axes[1].set_ylabel(\"Na\");" + "axes[0].set_title('K')\n", + "axes[0].set_ylabel('K')\n", + "axes[1].set_title('Na')\n", + "axes[1].set_ylabel('Na');" ] }, { @@ -291,8 +291,8 @@ "dcmin, dcmax = 2, 30\n", "memmin, memmax = 1, 12\n", "prior = Distribution(\n", - " dc=RV(\"uniform\", dcmin, dcmax - dcmin),\n", - " membrane_dim=RV(\"uniform\", memmin, memmax - memmin),\n", + " dc=RV('uniform', dcmin, dcmax - dcmin),\n", + " membrane_dim=RV('uniform', memmin, memmax - memmin),\n", ")" ] }, @@ -310,7 +310,7 @@ "outputs": [], "source": [ "def distance(x, y):\n", - " diff = x[\"data\"][\"K\"] - y[\"data\"][\"K\"]\n", + " diff = x['data']['K'] - y['data']['K']\n", " dist = np.sqrt(np.sum(diff**2))\n", " return dist" ] @@ -331,7 +331,7 @@ "source": [ "def simulate_pyabc(parameter):\n", " res = simulate(**parameter)\n", - " return {\"data\": res}" + " return {'data': res}" ] }, { @@ -388,7 +388,7 @@ "source": [ "abc = ABCSMC(simulate_pyabc, prior, distance, population_size=100)\n", "abc_id = abc.new(\n", - " \"sqlite:///\" + os.path.join(tempdir, \"test.db\"), {\"data\": observation}\n", + " 'sqlite:///' + os.path.join(tempdir, 'test.db'), {'data': observation}\n", ")\n", "history = abc.run(max_nr_populations=10, minimum_epsilon=6)" ] @@ -446,7 +446,7 @@ "dfw = history.get_distribution(m=0)\n", "plot_kde_matrix(\n", " *dfw,\n", - " limits={\"dc\": (dcmin, dcmax), \"membrane_dim\": (memmin, memmax)},\n", + " limits={'dc': (dcmin, dcmax), 'membrane_dim': (memmin, memmax)},\n", " refval=gt,\n", " refval_color='k',\n", ")" @@ -511,7 +511,7 @@ "alpha = 0.5\n", "for _ in range(n):\n", " prior_sample = simulate(**prior.rvs())\n", - " prior_sample.plot(y=\"K\", ax=axes[0], color=\"C1\", alpha=alpha)\n", + " prior_sample.plot(y='K', ax=axes[0], color='C1', alpha=alpha)\n", "\n", "\n", "# Fit a posterior KDE and plot samples form it\n", @@ -520,32 +520,32 @@ "\n", "for _ in range(n):\n", " posterior_sample = simulate(**posterior.rvs())\n", - " posterior_sample.plot(y=\"K\", ax=axes[1], color=\"C0\", alpha=alpha)\n", + " posterior_sample.plot(y='K', ax=axes[1], color='C0', alpha=alpha)\n", "\n", "\n", "# Plot the stored summary statistics\n", "sum_stats = history.get_weighted_sum_stats_for_model(m=0, t=history.max_t)\n", "for stored in sum_stats[1][:n]:\n", - " stored[\"data\"].plot(y=\"K\", ax=axes[2], color=\"C2\", alpha=alpha)\n", + " stored['data'].plot(y='K', ax=axes[2], color='C2', alpha=alpha)\n", "\n", "\n", "# Plot the observation\n", "for ax in axes:\n", - " observation.plot(y=\"K\", ax=ax, color=\"k\", linewidth=1.5)\n", + " observation.plot(y='K', ax=ax, color='k', linewidth=1.5)\n", " ax.legend().set_visible(False)\n", - " ax.set_ylabel(\"K\")\n", + " ax.set_ylabel('K')\n", "\n", "# Add a legend with pseudo artists to first plot\n", "axes[0].legend(\n", " [\n", - " plt.plot([0], color=\"C1\")[0],\n", - " plt.plot([0], color=\"C0\")[0],\n", - " plt.plot([0], color=\"C2\")[0],\n", - " plt.plot([0], color=\"k\")[0],\n", + " plt.plot([0], color='C1')[0],\n", + " plt.plot([0], color='C0')[0],\n", + " plt.plot([0], color='C2')[0],\n", + " plt.plot([0], color='k')[0],\n", " ],\n", - " [\"Prior\", \"Posterior\", \"Stored, accepted\", \"Observation\"],\n", + " ['Prior', 'Posterior', 'Stored, accepted', 'Observation'],\n", " bbox_to_anchor=(0.5, 1),\n", - " loc=\"lower center\",\n", + " loc='lower center',\n", " ncol=4,\n", ");" ] diff --git a/doc/examples/using_R.ipynb b/doc/examples/using_R.ipynb index 55dcb6c4..0d29c359 100644 --- a/doc/examples/using_R.ipynb +++ b/doc/examples/using_R.ipynb @@ -65,7 +65,7 @@ "%matplotlib inline\n", "from pyabc.external.r import R\n", "\n", - "r = R(\"myRModel.R\")" + "r = R('myRModel.R')" ] }, { @@ -267,9 +267,9 @@ "metadata": {}, "outputs": [], "source": [ - "model = r.model(\"myModel\")\n", - "distance = r.distance(\"myDistance\")\n", - "sum_stat = r.summary_statistics(\"mySummaryStatistics\")" + "model = r.model('myModel')\n", + "distance = r.distance('myDistance')\n", + "sum_stat = r.summary_statistics('mySummaryStatistics')" ] }, { @@ -300,7 +300,7 @@ "\n", "pyabc.settings.set_figure_params('pyabc') # for beautified plots\n", "\n", - "prior = Distribution(meanX=RV(\"uniform\", 0, 10), meanY=RV(\"uniform\", 0, 10))\n", + "prior = Distribution(meanX=RV('uniform', 0, 10), meanY=RV('uniform', 0, 10))\n", "abc = ABCSMC(model, prior, distance, summary_statistics=sum_stat)" ] }, @@ -340,8 +340,8 @@ "import os\n", "from tempfile import gettempdir\n", "\n", - "db = \"sqlite:///\" + os.path.join(gettempdir(), \"test.db\")\n", - "abc.new(db, r.observation(\"mySumStatData\"))" + "db = 'sqlite:///' + os.path.join(gettempdir(), 'test.db')\n", + "abc.new(db, r.observation('mySumStatData'))" ] }, { @@ -453,8 +453,8 @@ " ax = plot_kde_2d(\n", " df,\n", " w,\n", - " \"meanX\",\n", - " \"meanY\",\n", + " 'meanX',\n", + " 'meanY',\n", " xmin=0,\n", " xmax=10,\n", " ymin=0,\n", @@ -463,10 +463,10 @@ " numy=100,\n", " )\n", " ax.scatter(\n", - " [4], [8], edgecolor=\"black\", facecolor=\"white\", label=\"Observation\"\n", + " [4], [8], edgecolor='black', facecolor='white', label='Observation'\n", " )\n", " ax.legend()\n", - " ax.set_title(f\"PDF t={t}\")" + " ax.set_title(f'PDF t={t}')" ] }, { @@ -554,7 +554,7 @@ } ], "source": [ - "history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0][\"cars\"].head()" + "history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0]['cars'].head()" ] }, { diff --git a/doc/examples/wasserstein.ipynb b/doc/examples/wasserstein.ipynb index 10439fc8..c103b77f 100644 --- a/doc/examples/wasserstein.ipynb +++ b/doc/examples/wasserstein.ipynb @@ -80,7 +80,7 @@ "\n", "import pyabc\n", "\n", - "pyabc.settings.set_figure_params(\"pyabc\") # for beautified plots" + "pyabc.settings.set_figure_params('pyabc') # for beautified plots" ] }, { @@ -90,21 +90,21 @@ "metadata": {}, "outputs": [], "source": [ - "p_true = {\"p0\": -0.7, \"p1\": 0.1}\n", + "p_true = {'p0': -0.7, 'p1': 0.1}\n", "cov = np.array([[1, 0.5], [0.5, 1]])\n", "n = 100\n", "\n", "\n", "def model(p):\n", - " mean = np.array([p[\"p0\"], p[\"p1\"]])\n", + " mean = np.array([p['p0'], p['p1']])\n", " # shape (n, 2)\n", " y = np.random.multivariate_normal(mean=mean, cov=cov, size=n)\n", - " return {\"y\": y}\n", + " return {'y': y}\n", "\n", "\n", "data = model(p_true)\n", "prior = pyabc.Distribution(\n", - " **{par: pyabc.RV(\"norm\", p_true[par], 0.25) for par in p_true}\n", + " **{par: pyabc.RV('norm', p_true[par], 0.25) for par in p_true}\n", ")\n", "prior_bounds = {par: (p_true[par] - 0.7, p_true[par] + 0.7) for par in p_true}\n", "\n", @@ -114,7 +114,7 @@ "\n", " def __call__(self, data: dict) -> np.ndarray:\n", " # shape (n, dim)\n", - " return data[\"y\"]\n", + " return data['y']\n", "\n", "\n", "class SuffSumstat(pyabc.Sumstat):\n", @@ -122,7 +122,7 @@ "\n", " def __call__(self, data: dict) -> np.ndarray:\n", " # shape (dim,)\n", - " return np.mean(data[\"y\"], axis=0)\n", + " return np.mean(data['y'], axis=0)\n", "\n", "\n", "# population size too small for practice\n", @@ -294,31 +294,31 @@ "\n", "# analysis settings of distance and summary statistics\n", "settings = {\n", - " \"Euclidean\": pyabc.PNormDistance(p=2),\n", - " \"2-Wasserstein\": pyabc.WassersteinDistance(\n", + " 'Euclidean': pyabc.PNormDistance(p=2),\n", + " '2-Wasserstein': pyabc.WassersteinDistance(\n", " p=2,\n", " sumstat=IdSumstat(),\n", " ),\n", - " \"Sliced-2-Wasserstein\": pyabc.SlicedWassersteinDistance(\n", - " metric=\"sqeuclidean\",\n", + " 'Sliced-2-Wasserstein': pyabc.SlicedWassersteinDistance(\n", + " metric='sqeuclidean',\n", " p=2,\n", " sumstat=IdSumstat(),\n", " # number of random projections for Monte-Carlo integration\n", " n_proj=10,\n", " ),\n", - " \"Sufficient summary\": pyabc.PNormDistance(\n", + " 'Sufficient summary': pyabc.PNormDistance(\n", " p=2,\n", " sumstat=SuffSumstat(),\n", " ),\n", "}\n", "\n", "# runs\n", - "db_file = tempfile.mkstemp(suffix=\".db\")[1]\n", + "db_file = tempfile.mkstemp(suffix='.db')[1]\n", "hs = []\n", "for id_, distance in settings.items():\n", " print(id_)\n", " abc = pyabc.ABCSMC(model, prior, distance, population_size=pop_size)\n", - " h = abc.new(db=\"sqlite:///\" + db_file, observed_sum_stat=data)\n", + " h = abc.new(db='sqlite:///' + db_file, observed_sum_stat=data)\n", " abc.run(max_total_nr_simulations=max_eval)\n", " hs.append(h)" ] @@ -375,9 +375,9 @@ " label=id_,\n", " numx=500,\n", " refval=p_true,\n", - " refval_color=\"grey\",\n", + " refval_color='grey',\n", " )\n", - "axes[-1].legend(bbox_to_anchor=(0.9, 0.5), loc=\"center left\")" + "axes[-1].legend(bbox_to_anchor=(0.9, 0.5), loc='center left')" ] }, { diff --git a/doc/sampler.rst b/doc/sampler.rst index 3b725940..ec906393 100644 --- a/doc/sampler.rst +++ b/doc/sampler.rst @@ -410,7 +410,7 @@ submission of many individual, yet related or similar, jobs. sbatch --array=0-99 script_redis_worker script_redis_worker.sh ${SLURM_ARRAY_TASK_ID} Using ``--array`` one specifies the number of jobs (here ``n_jobs`` is manually set to 99, resulting in 100 tasks) -and note that depending on the variable ``${SLURM_ARRAY_TASK_ID}`` the script ``script_redis_worker.sh`` could +and note that depending on the variable ``${SLURM_ARRAY_TASK_ID}`` the script ``script_redis_worker.sh`` could handle for instance different input parameters or input files as identified by a unique index. diff --git a/pyabc/petab/amici.py b/pyabc/petab/amici.py index 23054375..f67c8ab4 100644 --- a/pyabc/petab/amici.py +++ b/pyabc/petab/amici.py @@ -1,5 +1,7 @@ """PEtab import with AMICI simulator.""" +from __future__ import annotations + import copy import logging import os @@ -18,18 +20,20 @@ except ImportError: petab = C = None logger.error( - 'Install PEtab (see https://github.com/icb-dcm/petab) to use ' + 'Install the PEtab library ' + '(see https://github.com/PEtab-dev/libpetab-python/) to use ' 'the petab functionality, e.g. via `pip install pyabc[petab]`' ) try: import amici - from amici.petab import petab_import as amici_petab_import - from amici.petab.simulations import LLH, RDATAS, simulate_petab + import amici.sim.sundials as asd + from amici.importers.petab.v1 import import_petab_problem + from amici.sim.sundials.petab.v1 import LLH, RDATAS, simulate_petab except ImportError: - amici = amici_petab_import = simulate_petab = LLH = RDATAS = None + amici = import_petab_problem = simulate_petab = LLH = RDATAS = None logger.error( - 'Install amici (see https://github.com/icb-dcm/amici) to use ' + 'Install amici (see https://github.com/AMICI-dev/AMICI/) to use ' 'the amici functionality, e.g. via `pip install pyabc[amici]`' ) @@ -113,7 +117,7 @@ def __getstate__(self) -> dict: try: # write amici solver settings to file try: - amici.writeSolverSettingsToHDF5(self.amici_solver, _file) + asd.write_solver_settings_to_hdf5(self.amici_solver, _file) except AttributeError as e: e.args += ( 'Pickling the AmiciObjective requires an AMICI ' @@ -133,8 +137,8 @@ def __getstate__(self) -> dict: def __setstate__(self, state: dict): self.__dict__.update(state) - model = amici_petab_import.import_petab_problem(self.petab_problem) - solver = model.getSolver() + model = import_petab_problem(self.petab_problem) + solver = model.create_solver() _fd, _file = tempfile.mkstemp() try: @@ -143,7 +147,7 @@ def __setstate__(self, state: dict): f.write(state['amici_solver_settings']) # read in solver settings try: - amici.readSolverSettingsFromHDF5(_file, solver) + asd.read_solver_settings_from_hdf5(_file, solver) except AttributeError as err: if not err.args: err.args = ('',) @@ -173,28 +177,26 @@ class AmiciPetabImporter(PetabImporter): amici_model: A corresponding compiled AMICI model that allows simulating data for parameters. If not provided, one is created using - `amici.petab_import.import_petab_problem`. + `amici.importers.petab.v1.import_petab_problem`. amici_solver: An AMICI solver to simulate the model. If not provided, one is created - using `amici_model.getSolver()`. + using `amici_model.create_solver()`. """ def __init__( self, petab_problem: petab.Problem, - amici_model: 'amici.Model' = None, - amici_solver: 'amici.Solver' = None, + amici_model: amici.sim.sundials.Model = None, + amici_solver: amici.sim.sundials.Solver = None, ): super().__init__(petab_problem=petab_problem) if amici_model is None: - amici_model = amici_petab_import.import_petab_problem( - petab_problem - ) + amici_model = import_petab_problem(petab_problem) self.amici_model = amici_model if amici_solver is None: - amici_solver = self.amici_model.getSolver() + amici_solver = self.amici_model.create_solver() self.amici_solver = amici_solver def create_model( @@ -214,8 +216,8 @@ def create_model( Whether to return the simulations also (large, can be stored in database). return_rdatas: - Whether to return the full `List[amici.ExpData]` objects (large, - cannot be stored in database). + Whether to return the full `list[amici.sim.sundials.ExpData]` + objects (large, cannot be stored in database). Returns ------- @@ -237,7 +239,7 @@ def create_model( raise AssertionError('Parameter id mismatch') # no gradients for pyabc - self.amici_solver.setSensitivityOrder(0) + self.amici_solver.set_sensitivity_order(asd.SensitivityOrder.none) model = AmiciModel( petab_problem=self.petab_problem, diff --git a/pyabc/version.py b/pyabc/version.py index 080b4ae8..8327b506 100644 --- a/pyabc/version.py +++ b/pyabc/version.py @@ -1 +1 @@ -__version__ = '0.12.17' +__version__ = '0.12.18' diff --git a/pyproject.toml b/pyproject.toml index 3bd82587..7b274132 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ dynamic = ["version"] description = "Distributed, likelihood-free ABC-SMC inference" readme = { file = "README.md", content-type = "text/markdown" } -requires-python = ">=3.10" +requires-python = ">=3.11" authors = [{ name = "The pyABC developers", email = "jan.hasenauer@uni-bonn.de" }] maintainers = [{ name = "Jonas Arruda", email = "jonas.arruda@uni-bonn.de" }] @@ -33,7 +33,6 @@ classifiers = [ "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", @@ -45,11 +44,11 @@ dependencies = [ "pandas>=2.0.1", "cloudpickle>=1.5.0", "scikit-learn>=0.23.1", - "click>=7.1.2", + "click>=8.3.2", "redis>=2.10.6", "distributed>=2024.2.0", "matplotlib>=3.3.0", - "sqlalchemy>=2.0.0", + "sqlalchemy>=2.0.49", "jabbar>=0.0.10", "gitpython>=3.1.7", "ipykernel>=7.2.0", @@ -78,13 +77,13 @@ pyarrow = ["pyarrow>=22.0.0"] r = [ "rpy2>=3.4.4", "cffi>=1.14.5", - "ipython>=7.18.1", - "pygments>=2.6.1", + "ipython>=9.10.0", + "pygments>=2.20.0", ] julia = [ "julia>=0.6.2", - "pygments>=2.6.1", - "ipython>=7.18.1", + "pygments>=2.20.0", + "ipython>=9.10.0", ] copasi = ["copasi-basico>=0.8"] ot = [ @@ -93,7 +92,7 @@ ot = [ ] petab = ["petab>=0.2.0"] #petab-test = ["petabtests>=0.0.1"] # problem with pysb -amici = ["amici>=0.32.0,<1.0.0"] +amici = ["amici>=1.0.0"] yaml2sbml = ["yaml2sbml>=0.2.1"] migrate = ["alembic>=1.5.4"] plotly = ["plotly>=5.3.1", "kaleido>=0.2.1"] @@ -105,15 +104,15 @@ doc = [ "nbconvert>=7.16.4", "sphinx-rtd-theme>=2.0.0", "sphinx-autodoc-typehints>=2.0.0", - "ipython>=8.4.0", + "ipython>=9.10.0", "sphinx-autobuild>=2021.3.14", ] test = [ - "pytest>=8.0.0", - "pytest-cov>=6.0.0", + "pytest>=9.0.3", + "pytest-cov>=7.1.0", "pytest-rerunfailures>=14.0.0", "pytest-xdist>=3.5.0", - "coverage[toml]>=7.0.0", + "coverage[toml]>=7.13.5", ] [project.scripts] @@ -137,7 +136,7 @@ version = { attr = "pyabc.version.__version__" } [tool.ruff] line-length = 79 -target-version = "py310" +target-version = "py311" [tool.ruff.lint] select = [ @@ -208,11 +207,11 @@ source = [ [dependency-groups] dev = [ "pre-commit>=4.5.1", - "tox>=4.38.0", - "ruff>=0.8.0", - "build>=1.0.0", + "tox>=4.52.1", + "ruff>=0.15.10", + "build>=1.4.3", "twine>=4.0.0", - "pytest>=8.0.0", + "pytest>=9.0.3", "pytest-cov>=6.0.0", "pytest-xdist>=3.5.0", ] diff --git a/test/base/test_bytesstorage.py b/test/base/test_bytesstorage.py index c4275af1..51ffe5b3 100644 --- a/test/base/test_bytesstorage.py +++ b/test/base/test_bytesstorage.py @@ -31,14 +31,9 @@ 'np-single-int', 'np-single-float', 'np-single-str', - 'r-df-cars', - 'r-df-faithful', - 'r-df-iris', ] ) def object_(request): - from rpy2.robjects import r - par = request.param if par == 'empty': return pd.DataFrame() @@ -103,20 +98,10 @@ def object_(request): return np.array(4.1) if par == 'np-single-str': return np.array('foo bar') - if par == 'r-df-cars': - return r['mtcars'] - if par == 'r-df-iris': - return r['iris'] - if par == 'r-df-faithful': - return r['faithful'] raise Exception('Invalid Test DataFrame Type') def test_storage(object_): - import rpy2.robjects as robjects - from rpy2.robjects import pandas2ri - from rpy2.robjects.conversion import get_conversion, localconverter - serial = to_bytes(object_) assert isinstance(serial, bytes) @@ -138,23 +123,13 @@ def test_storage(object_): assert (object_ == rebuilt).all().all() elif isinstance(object_, pd.Series): assert (object_.to_frame() == rebuilt).all().all() - elif isinstance(object_, robjects.DataFrame): - conv = get_conversion() - conv = conv + pandas2ri.converter - with localconverter(conv): - assert (conv.rpy2py(object_) == rebuilt).all().all() else: raise Exception('Could not compare') def _check_type(object_, rebuilt): - import rpy2.robjects as robjects - - # r objects are converted to pd.DataFrame - if isinstance(object_, robjects.DataFrame): - assert isinstance(rebuilt, pd.DataFrame) # pd.Series are converted to pd.DataFrame - elif isinstance(object_, pd.Series): + if isinstance(object_, pd.Series): assert isinstance(rebuilt, pd.DataFrame) # <= 1 dim numpy arrays are converted to primitive type elif isinstance(object_, np.ndarray) and object_.size <= 1: diff --git a/test/base/test_integrated_model.py b/test/base/test_integrated_model.py index b774a024..990a5bb1 100644 --- a/test/base/test_integrated_model.py +++ b/test/base/test_integrated_model.py @@ -60,7 +60,7 @@ def test_early_stopping(): ) # initializing eps manually is necessary as we only have an integrated # model - # TODO automatically iniitalizing would be possible, e.g. using eps = inf + # TODO automatically initializing would be possible, e.g. using eps = inf abc.new(pyabc.create_sqlite_db_id()) abc.run(max_nr_populations=3) diff --git a/test/base/test_storage.py b/test/base/test_storage.py index 2e32cafe..b0f9259f 100644 --- a/test/base/test_storage.py +++ b/test/base/test_storage.py @@ -240,9 +240,6 @@ def test_single_particle_save_load_np_int64(history: History): def test_sum_stats_save_load(history: History): - from rpy2.robjects import pandas2ri, r - from rpy2.robjects.conversion import get_conversion, localconverter - arr = np.random.rand(10) arr2 = np.random.rand(10, 2) particle_list = [ @@ -254,7 +251,6 @@ def test_sum_stats_save_load(history: History): 'ss1': 0.1, 'ss2': arr2, 'ss3': example_df(), - 'rdf0': r['iris'], }, distance=0.1, ), @@ -266,7 +262,6 @@ def test_sum_stats_save_load(history: History): 'ss12': 0.11, 'ss22': arr, 'ss33': example_df(), - 'rdf': r['mtcars'], }, distance=0.1, ), @@ -280,14 +275,9 @@ def test_sum_stats_save_load(history: History): assert sum_stats[0]['ss1'] == 0.1 assert (sum_stats[0]['ss2'] == arr2).all() assert (sum_stats[0]['ss3'] == example_df()).all().all() - conv = get_conversion() - with localconverter(conv + pandas2ri.converter): - assert (sum_stats[0]['rdf0'] == r['iris']).all().all() assert sum_stats[1]['ss12'] == 0.11 assert (sum_stats[1]['ss22'] == arr).all() assert (sum_stats[1]['ss33'] == example_df()).all().all() - with localconverter(conv + pandas2ri.converter): - assert (sum_stats[1]['rdf'] == r['mtcars']).all().all() def test_total_nr_samples(history: History): diff --git a/test/external/test_rpy2_bytesstorage.py b/test/external/test_rpy2_bytesstorage.py new file mode 100644 index 00000000..9274bce3 --- /dev/null +++ b/test/external/test_rpy2_bytesstorage.py @@ -0,0 +1,147 @@ +import numpy as np +import pandas as pd +import pytest + +from pyabc.storage.bytes_storage import from_bytes, to_bytes +from pyabc.storage.numpy_bytes_storage import _primitive_types + + +@pytest.fixture( + params=[ + 'empty', + 'r-df-cars', + 'r-df-faithful', + 'r-df-iris', + ] +) +def object_(request): + from rpy2.robjects import r + + par = request.param + if par == 'empty': + return pd.DataFrame() + if par == 'df-int': + return pd.DataFrame( + { + 'a': np.random.randint(-20, 20, 100), + 'b': np.random.randint(-20, 20, 100), + } + ) + if par == 'df-float': + return pd.DataFrame( + {'a': np.random.randn(100), 'b': np.random.randn(100)} + ) + if par == 'df-non_numeric_str': + return pd.DataFrame({'a': ['foo', 'bar'], 'b': ['bar', 'foo']}) + + if par == 'df-numeric_str': + return pd.DataFrame( + { + 'a': list(map(str, np.random.randn(100))), + 'b': list(map(str, np.random.randint(-20, 20, 100))), + } + ) + if par == 'df-int-float-numeric_str': + return pd.DataFrame( + { + 'a': np.random.randint(-20, 20, 100), + 'b': np.random.randn(100), + 'c': list(map(str, np.random.randint(-20, 20, 100))), + } + ) + if par == 'df-int-float-non_numeric_str-str_ind': + return pd.DataFrame( + {'a': [1, 2], 'b': [1.1, 2.2], 'c': ['foo', 'bar']}, + index=['first', 'second'], + ) + if par == 'df-int-float-numeric_str-str_ind': + return pd.DataFrame( + {'a': [1, 2], 'b': [1.1, 2.2], 'c': ['1', '2']}, + index=['first', 'second'], + ) + if par == 'series': + return pd.Series({'a': 42, 'b': 3.8, 'c': 4.2}) + if par == 'series-no_ind': + return pd.Series(np.random.randn(10)) + if par == 'py-int': + return 42 + if par == 'py-float': + return 42.42 + if par == 'py-str': + return 'foo bar' + if par == 'np-int': + return np.random.randint(-20, 20, 100) + if par == 'np-float': + return np.random.randn(100) + if par == 'np-str': + return np.array(['foo', 'bar']) + if par == 'np-single-int': + return np.array(3) + if par == 'np-single-float': + return np.array(4.1) + if par == 'np-single-str': + return np.array('foo bar') + if par == 'r-df-cars': + return r['mtcars'] + if par == 'r-df-iris': + return r['iris'] + if par == 'r-df-faithful': + return r['faithful'] + raise Exception('Invalid Test DataFrame Type') + + +def test_storage(object_): + import rpy2.robjects as robjects + from rpy2.robjects import pandas2ri + + serial = to_bytes(object_) + assert isinstance(serial, bytes) + + rebuilt = from_bytes(serial) + + # check type + _check_type(object_, rebuilt) + + # check value + if isinstance(object_, int): + assert object_ == rebuilt + elif isinstance(object_, float): + assert object_ == rebuilt + elif isinstance(object_, str): + assert object_ == rebuilt + elif isinstance(object_, np.ndarray): + assert (object_ == rebuilt).all() + elif isinstance(object_, pd.DataFrame): + assert (object_ == rebuilt).all().all() + elif isinstance(object_, pd.Series): + assert (object_.to_frame() == rebuilt).all().all() + elif isinstance(object_, robjects.DataFrame): + with (robjects.default_converter + pandas2ri.converter).context(): + converted = robjects.conversion.get_conversion().rpy2py(object_) + assert (converted == rebuilt).all().all() + else: + raise Exception('Could not compare') + + +def _check_type(object_, rebuilt): + import rpy2.robjects as robjects + + # r objects are converted to pd.DataFrame + if isinstance(object_, robjects.DataFrame): + assert isinstance(rebuilt, pd.DataFrame) + # pd.Series are converted to pd.DataFrame + elif isinstance(object_, pd.Series): + assert isinstance(rebuilt, pd.DataFrame) + # <= 1 dim numpy arrays are converted to primitive type + elif isinstance(object_, np.ndarray) and object_.size <= 1: + for type_ in _primitive_types: + try: + if type_(object_) == object_: + assert isinstance(rebuilt, type_) + return + except (TypeError, ValueError): + pass + raise Exception('Could not check type') + # all others keep their type + else: + assert isinstance(rebuilt, type(object_)) diff --git a/test/external/test_rpy2_storage.py b/test/external/test_rpy2_storage.py new file mode 100644 index 00000000..876a6772 --- /dev/null +++ b/test/external/test_rpy2_storage.py @@ -0,0 +1,98 @@ +import os +import tempfile + +import numpy as np +import pandas as pd +import pytest + +from pyabc import History +from pyabc.parameters import Parameter +from pyabc.population import Particle, Population + + +def example_df(): + return pd.DataFrame( + {'col_a': [1, 2], 'col_b': [1.1, 2.2], 'col_c': ['foo', 'bar']}, + index=['ind_first', 'ind_second'], + ) + + +def path(): + return os.path.join(tempfile.gettempdir(), 'history_test.db') + + +@pytest.fixture(params=['file', 'memory']) +def history(request): + # Test in-memory and filesystem based database + if request.param == 'file': + this_path = '/' + path() + elif request.param == 'memory': + this_path = '' + else: + raise Exception(f'Bad database type for testing: {request.param}') + model_names = [f'fake_name_{k}' for k in range(50)] + h = History('sqlite://' + this_path) + h.store_initial_data( + 0, {}, {}, {}, model_names, '', '', '{"name": "pop_strategy_str_test"}' + ) + yield h + if request.param == 'file': + try: + os.remove(this_path) + except FileNotFoundError: + pass + + +def test_sum_stats_save_load(history: History): + import rpy2.robjects as robjects + from rpy2.robjects import pandas2ri, r + + arr = np.random.rand(10) + arr2 = np.random.rand(10, 2) + particle_list = [ + Particle( + m=0, + parameter=Parameter({'a': 23, 'b': 12}), + weight=0.2, + sum_stat={ + 'ss1': 0.1, + 'ss2': arr2, + 'ss3': example_df(), + 'rdf0': r['iris'], + }, + distance=0.1, + ), + Particle( + m=0, + parameter=Parameter({'a': 23, 'b': 12}), + weight=0.8, + sum_stat={ + 'ss12': 0.11, + 'ss22': arr, + 'ss33': example_df(), + 'rdf': r['mtcars'], + }, + distance=0.1, + ), + ] + + history.append_population( + 0, 42, Population(particle_list), 2, ['m1', 'm2'] + ) + weights, sum_stats = history.get_weighted_sum_stats_for_model(0, 0) + assert (weights == np.array([0.2, 0.8])).all() + assert sum_stats[0]['ss1'] == 0.1 + assert (sum_stats[0]['ss2'] == arr2).all() + assert (sum_stats[0]['ss3'] == example_df()).all().all() + + with (robjects.default_converter + pandas2ri.converter).context(): + iris_pd = robjects.conversion.get_conversion().rpy2py(r['iris']) + assert (sum_stats[0]['rdf0'] == iris_pd).all().all() + + assert sum_stats[1]['ss12'] == 0.11 + assert (sum_stats[1]['ss22'] == arr).all() + assert (sum_stats[1]['ss33'] == example_df()).all().all() + + with (robjects.default_converter + pandas2ri.converter).context(): + mtcars_pd = robjects.conversion.get_conversion().rpy2py(r['mtcars']) + assert (sum_stats[1]['rdf'] == mtcars_pd).all().all() diff --git a/test/petab/test_petab.py b/test/petab/test_petab.py index 3e6a050e..326aab4b 100644 --- a/test/petab/test_petab.py +++ b/test/petab/test_petab.py @@ -2,7 +2,6 @@ import os import sys -import amici.petab.petab_import import cloudpickle as pickle import git import matplotlib.pyplot as plt @@ -12,6 +11,7 @@ import petab.v1.C as C import pytest import scipy.stats +from amici.importers.petab.v1 import import_petab_problem import pyabc.petab import pyabc.petab.base @@ -322,12 +322,12 @@ def boehm_model_importer(): output_folder = f'amici_models/{model_name}' if output_folder not in sys.path: sys.path.insert(0, output_folder) - model = amici.petab.petab_import.import_petab_problem( + model = import_petab_problem( petab_problem, - model_output_dir=output_folder, + output_dir=output_folder, generate_sensitivity_code=False, ) - solver = model.getSolver() + solver = model.create_solver() # import to pyabc importer = pyabc.petab.AmiciPetabImporter(petab_problem, model, solver) diff --git a/test/petab/test_petab_suite.py b/test/petab/test_petab_suite.py index d0f1bfa4..78068a7a 100644 --- a/test/petab/test_petab_suite.py +++ b/test/petab/test_petab_suite.py @@ -9,10 +9,10 @@ import pyabc try: - import amici.petab.petab_import - import amici.petab.simulations import petab.v1 as petab import petabtests + from amici.importers.petab.v1 import import_petab_problem + from amici.sim.sundials.petab.v1 import rdatas_to_measurement_df import pyabc.petab except ImportError: @@ -104,13 +104,13 @@ def _execute_case(case): # models with the same name in a single python session model_name = f'petab_{MODEL_TYPE}_test_case_{case}_{PETAB_VERSION.replace(".", "_")}' - amici_model = amici.petab.petab_import.import_petab_problem( + amici_model = import_petab_problem( petab_problem=petab_problem, model_name=model_name, - model_output_dir=output_folder, + output_dir=output_folder, generate_sensitivity_code=False, ) - solver = amici_model.getSolver() + solver = amici_model.create_solver() # import to pyabc importer = pyabc.petab.AmiciPetabImporter( @@ -127,7 +127,7 @@ def _execute_case(case): # extract results rdatas = ret['rdatas'] chi2 = sum(rdata['chi2'] for rdata in rdatas) - simulation_df = amici.petab.simulations.rdatas_to_measurement_df( + simulation_df = rdatas_to_measurement_df( rdatas, amici_model, importer.petab_problem.measurement_df ) petab.check_measurement_df( diff --git a/tox.ini b/tox.ini index 916404fa..f34a2349 100644 --- a/tox.ini +++ b/tox.ini @@ -42,11 +42,8 @@ description = Clean up before tests [testenv:base] -setenv = - LD_LIBRARY_PATH = {env:LD_LIBRARY_PATH:/usr/lib}:/usr/local/lib/R/lib extras = test - r autograd ot pyarrow @@ -57,8 +54,6 @@ description = Test basic functionality [testenv:visualization] -setenv = - LD_LIBRARY_PATH = {env:LD_LIBRARY_PATH:/usr/lib}:/usr/local/lib/R/lib extras = test plotly @@ -76,11 +71,16 @@ setenv = extras = test r + pyarrow commands = python -m pytest --cov=pyabc --cov-report=xml --cov-append \ test/external/test_external.py -s python -m pytest --cov=pyabc --cov-report=xml --cov-append \ test/external/test_rpy2.py -s + python -m pytest --cov=pyabc --cov-report=xml --cov-append \ + test/external/test_rpy2_bytesstorage.py -s + python -m pytest --cov=pyabc --cov-report=xml --cov-append \ + test/external/test_rpy2_storage.py -s description = Test external model simulators (incl. R) @@ -89,16 +89,21 @@ extras = test julia copasi +commands_pre = + python -c "import subprocess, sys; subprocess.run(['julia', '-e', f'using Pkg; ENV[\"PYTHON\"]=\"{sys.executable}\"; Pkg.build(\"PyCall\")'], check=True)" commands = python -c "import julia; julia.install()" - python -m pytest --cov=pyabc --cov-report=xml --cov-append \ - test/external/test_pyjulia.py -s + #python -m pytest --cov=pyabc --cov-report=xml --cov-append \ + # test/external/test_pyjulia.py -s python -m pytest --cov=pyabc --cov-report=xml --cov-append \ test/copasi -s description = Test external model simulators (Julia, COPASI) [testenv:petab] +deps = + git+https://github.com/PEtab-dev/petab_test_suite@main + git+https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git@master\#subdirectory=src/python&egg=benchmark_models_petab extras = test petab @@ -162,6 +167,8 @@ extras = autograd julia copasi +commands_pre = + python -c "import subprocess, sys; subprocess.run(['julia', '-e', f'using Pkg; ENV[\"PYTHON\"]=\"{sys.executable}\"; Pkg.build(\"PyCall\")'], check=True)" commands = python -c "import julia; julia.install()" bash test/run_notebooks.sh 2 @@ -184,7 +191,7 @@ description = [testenv:quality] skip_install = true deps = - ruff>=0.8.0 + ruff>=0.15.10 commands = ruff check pyabc test test_performance ruff format --check pyabc test test_performance