Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions .github/workflows/refresh-ort-gpu.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name: Refresh ONNX Runtime GPU registry

on:
schedule:
# 1st of each month at 06:00 UTC. ONNX Runtime ships every 4-6 weeks
# and Radeon updates ROCm patches roughly quarterly, so monthly is
# frequent enough to catch updates without spamming review
- cron: '0 6 1 * *'
workflow_dispatch:

permissions:
contents: write
pull-requests: write

jobs:
refresh:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Configure git
run: |
git config user.name 'github-actions[bot]'
git config user.email 'github-actions[bot]@users.noreply.github.com'
- name: Refresh registry and open PR if changed
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
python3 tools/ai/refresh-ort-gpu.py --update --pr || true
2 changes: 2 additions & 0 deletions data/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,8 @@ if(USE_AI)
#
FILE(COPY ai_models.json DESTINATION "${DARKTABLE_DATADIR}")
install(FILES ai_models.json DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/darktable COMPONENT DTApplication)
FILE(COPY ort_gpu.json DESTINATION "${DARKTABLE_DATADIR}")
install(FILES ort_gpu.json DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/darktable COMPONENT DTApplication)
endif(USE_AI)

#
Expand Down
155 changes: 155 additions & 0 deletions data/ort_gpu.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
{
"version": 1,
"install_docs": {
"nvidia": "https://developer.nvidia.com/cudnn-downloads",
"amd": "https://rocm.docs.amd.com/projects/install-on-linux/en/latest/"
},
"packages": [
{
"vendor": "nvidia",
"platform": "linux",
"arch": "x86_64",
"cuda_min": "12.0",
"cuda_max": "12.99",
"ort_version": "1.25.1",
"url": "https://github.com/microsoft/onnxruntime/releases/download/v1.25.1/onnxruntime-linux-x64-gpu-1.25.1.tgz",
"sha256": "ddfc4ca4ccc9cd5345d3820edab710ee84e749569d052eed92c42693d3b448a8",
"format": "tgz",
"lib_pattern": "libonnxruntime",
"install_subdir": "onnxruntime-cuda",
"size_mb": 250,
"requirements": "CUDA 12.x, cuDNN 9.x"
},
{
"vendor": "nvidia",
"platform": "linux",
"arch": "x86_64",
"cuda_min": "13.0",
"cuda_max": "13.99",
"ort_version": "1.25.1",
"url": "https://github.com/microsoft/onnxruntime/releases/download/v1.25.1/onnxruntime-linux-x64-gpu_cuda13-1.25.1.tgz",
"sha256": "ebc14e1290db2a30a7bb415bd1c3e1390a7816bb4db87677dc36d071ed22833c",
"format": "tgz",
"lib_pattern": "libonnxruntime",
"install_subdir": "onnxruntime-cuda",
"size_mb": 200,
"requirements": "CUDA 13.x, cuDNN 9.x"
},
{
"vendor": "nvidia",
"platform": "windows",
"arch": "x86_64",
"cuda_min": "12.0",
"cuda_max": "12.99",
"ort_version": "1.25.1",
"url": "https://github.com/microsoft/onnxruntime/releases/download/v1.25.1/onnxruntime-win-x64-gpu-1.25.1.zip",
"sha256": "e72bc4895f84400368382d7b7287c018eabbb2be384e67446f3ab64af7fe5552",
"format": "zip",
"lib_pattern": "onnxruntime",
"install_subdir": "onnxruntime-cuda",
"size_mb": 300,
"requirements": "CUDA 12.x, cuDNN 9.x"
},
Comment thread
TurboGit marked this conversation as resolved.
{
"vendor": "nvidia",
"platform": "windows",
"arch": "x86_64",
"cuda_min": "13.0",
"cuda_max": "13.99",
"ort_version": "1.25.1",
"url": "https://github.com/microsoft/onnxruntime/releases/download/v1.25.1/onnxruntime-win-x64-gpu_cuda13-1.25.1.zip",
"sha256": "d54b465e5f9300d8928c43772751ac95c123ebd90069a0a4b8ce02e503335469",
"format": "zip",
"lib_pattern": "onnxruntime",
"install_subdir": "onnxruntime-cuda",
"size_mb": 300,
"requirements": "CUDA 13.x, cuDNN 9.x"
},
{
"vendor": "amd",
"platform": "linux",
"arch": "x86_64",
"rocm_min": "7.0",
"rocm_max": "7.0",
"ort_version": "1.22.1",
"url": "https://repo.radeon.com/rocm/manylinux/rocm-rel-7.0.2/onnxruntime_rocm-1.22.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl",
"sha256": "0f9c1555c31cc1bef84504807a0747486017ec4bd9b9f7df0b15e659140a7f7e",
"format": "whl",
"lib_pattern": "libonnxruntime",
"install_subdir": "onnxruntime-migraphx",
"size_mb": 200,
"requirements": "ROCm 7.0, MIGraphX",
"required_libs": [
"libmigraphx_c.so.3",
"librocm_smi64.so.7"
]
},
{
"vendor": "amd",
"platform": "linux",
"arch": "x86_64",
"rocm_min": "7.1",
"rocm_max": "7.1",
"ort_version": "1.23.1",
"url": "https://repo.radeon.com/rocm/manylinux/rocm-rel-7.1.1/onnxruntime_migraphx-1.23.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl",
"sha256": "0bb62f7c2b326b2435396ef90a5f3ad030fe779aa9d0da7013d8b514c27de08f",
"format": "whl",
"lib_pattern": "libonnxruntime",
"install_subdir": "onnxruntime-migraphx",
"size_mb": 100,
"requirements": "ROCm 7.1, MIGraphX"
},
{
"vendor": "amd",
"platform": "linux",
"arch": "x86_64",
"rocm_min": "7.2",
"rocm_max": "7.2",
"ort_version": "1.23.2",
"url": "https://repo.radeon.com/rocm/manylinux/rocm-rel-7.2.2/onnxruntime_migraphx-1.23.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl",
"sha256": "76f22bba96991d6653c8169999817203cde72e6703b8211f00dc8dd80ff8e43b",
"format": "whl",
"lib_pattern": "libonnxruntime",
"install_subdir": "onnxruntime-migraphx",
"size_mb": 50,
"requirements": "ROCm 7.2, MIGraphX"
},
{
"vendor": "intel",
"platform": "linux",
"arch": "x86_64",
"ort_version": "1.24.1",
"url": "https://files.pythonhosted.org/packages/50/cf/17ba72de2df0fcba349937d2788f154397bbc2d1a2d67772a97e26f6bc5f/onnxruntime_openvino-1.24.1-cp312-cp312-manylinux_2_28_x86_64.whl",
"sha256": "d617fac2f59a6ab5ea59a788c3e1592240a129642519aaeaa774761dfe35150e",
"format": "whl",
"lib_pattern": "libonnxruntime",
"lib_extra_patterns": [
"libopenvino",
"libtbb"
],
"install_subdir": "onnxruntime-openvino",
"size_mb": 80,
"requirements": "Intel GPU driver (OpenCL)"
},
{
"vendor": "intel",
"platform": "windows",
"arch": "x86_64",
"ort_version": "1.24.1",
"url": "https://files.pythonhosted.org/packages/59/37/d301f2c68b19a9485ed5db3047e0fb52478f3e73eb08c7d2a7c61be7cc1c/onnxruntime_openvino-1.24.1-cp312-cp312-win_amd64.whl",
"sha256": "f186335a9c9b255633275290da7521d3d4d14c7773fee3127bfa040234d3fa5a",
"format": "whl",
"lib_pattern": "onnxruntime",
"install_subdir": "onnxruntime-openvino",
"size_mb": 20,
"requirements": "Intel GPU driver",
"runtime_url": "https://files.pythonhosted.org/packages/73/cb/07035dcbf60347c0706c4f8d3ffb003c34fe8205e4041699a6655cc858c3/openvino-2026.1.0-21367-cp312-cp312-win_amd64.whl",
"runtime_sha256": "21fbc8f9aa765d9ded024311f4e48e38118643cda0f0139dcfa5422c0c457eb6",
"runtime_lib_pattern": "openvino",
"runtime_extra_patterns": [
"tbb"
],
"runtime_size_mb": 70
}
]
}
149 changes: 149 additions & 0 deletions tools/ai/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
# GPU-Accelerated ONNX Runtime for darktable

darktable bundles a CPU-only ONNX Runtime on Linux, DirectML on Windows,
and CoreML on macOS. To enable GPU acceleration for AI features (denoise,
upscale, segmentation), install a GPU-enabled ONNX Runtime build using the
preferences UI or one of the install scripts in this directory.

## What's bundled by default

| Platform | Bundled ONNX Runtime | GPU support |
|----------|------------|-------------|
| Linux | CPU only | None – install GPU ONNX Runtime below |
| Windows | DirectML | AMD, NVIDIA, Intel via DirectX 12 |
| macOS | CoreML | Apple Neural Engine |

## Easiest: install from darktable preferences

1. Open darktable preferences (Ctrl+,)
2. Go to the **AI** tab
3. Click **install** – darktable detects your GPU and downloads
the correct ONNX Runtime package automatically
4. Restart darktable

Click **detect** instead to find a previously installed or
system-packaged ONNX Runtime library.

## Installing via script

Linux:
```bash
./tools/ai/install-ort-gpu.sh --help # see all flags
./tools/ai/install-ort-gpu.sh
```

Windows (PowerShell):
```powershell
.\tools\ai\install-ort-gpu.ps1 -Help # see all flags
.\tools\ai\install-ort-gpu.ps1
```

If Windows blocks the script ("running scripts is disabled on this
system"), bypass once:

```powershell
powershell -ExecutionPolicy Bypass -File .\tools\ai\install-ort-gpu.ps1
```

### Requirements

**NVIDIA (CUDA)** – Pascal-or-newer GPU (compute 6.0+), driver 525+,
CUDA 12.x or 13.x toolkit, cuDNN 9.x.

**AMD (MIGraphX)** – ROCm-supported GPU (Radeon RX 6000+ / Instinct
MI100+), ROCm 7.x with MIGraphX. Wheels are manylinux-repaired and
bundle their own ROCm runtime.

**Intel (OpenVINO)** – Intel iGPU (HD/UHD/Iris Xe) or Arc discrete,
GPU driver with OpenCL (`intel-opencl-icd`) and/or Level Zero. The
OpenVINO runtime ships in the package.

### AMD: building from source

If the prebuilt package doesn't work (ABI mismatch, unsupported ROCm
version), build ONNX Runtime against your installed ROCm:

```bash
./tools/ai/install-ort-amd-build.sh
```

Requires cmake 3.26+, gcc/g++, python3, git. Takes 10–20 minutes.

## Enabling the custom ONNX Runtime in darktable

After running the script or built-in installer:

1. Open darktable preferences (Ctrl+,)
2. Go to the **AI** tab
3. Click **detect**, or use the browse button to select the library
manually
4. Restart darktable

Or set `DT_ORT_LIBRARY` in the environment:

```bash
# Linux
DT_ORT_LIBRARY=~/.local/lib/onnxruntime-cuda/libonnxruntime.so.1.24.4 darktable
```
```powershell
# Windows
$env:DT_ORT_LIBRARY="$env:LOCALAPPDATA\onnxruntime-cuda\onnxruntime.dll"; darktable
```

If neither preference nor env var is set, darktable uses the bundled
ONNX Runtime (CPU on Linux, DirectML on Windows, CoreML on macOS).

## Verifying

```bash
darktable -d ai
```

Look for:
```
[darktable_ai] loaded ORT 1.24.4 from '/home/user/.local/lib/onnxruntime-cuda/libonnxruntime.so.1.24.4'
[darktable_ai] execution provider: CUDA
[darktable_ai] NVIDIA CUDA enabled successfully on device 0: NVIDIA GeForce RTX 4090
```

## Maintaining the GPU package registry

`data/ort_gpu.json` lists the upstream ONNX Runtime URLs the install scripts and
preferences UI pull from. It needs to be refreshed whenever Microsoft
ships a new ONNX Runtime release or AMD ships a new ROCm patch. Use the
script in this directory:

```bash
# show what would change (no writes); exits non-zero if updates exist
python3 tools/ai/refresh-ort-gpu.py --check

# apply the updates in place
python3 tools/ai/refresh-ort-gpu.py --update

# apply and open a PR via gh CLI (CI mode; needs GITHUB_TOKEN)
python3 tools/ai/refresh-ort-gpu.py --update --pr

# verbose progress (otherwise quiet by default in --update mode)
python3 tools/ai/refresh-ort-gpu.py --check -v
```

What it does:

- Queries `api.github.com/repos/microsoft/onnxruntime/releases` for the
latest non-prerelease that has all four expected GPU assets
(linux/windows × CUDA 12/13). Updates the four NVIDIA entries.
- Scrapes `https://repo.radeon.com/rocm/manylinux/`, finds the cp312
ONNX Runtime wheel in each `rocm-rel-X.Y.Z/` directory, and keeps the
latest patch per ROCm minor. Updates the AMD entries with
range-based matching (`rocm_min: "7.2"`, `rocm_max: "7.3"` covers
every 7.2.x patch).
- Computes SHA256 only for wheels whose URL changed since the last
refresh — a no-op run does no downloads.
- Preserves vendors it doesn't manage (e.g. Intel/OpenVINO) and any
manual fields (`required_libs`, `lib_pattern`, `install_subdir`).

Stdlib only — no extra Python deps. Network access required.

A weekly CI job (`.github/workflows/refresh-ort-gpu.yml`) runs the
script in `--update --pr` mode every Monday and opens a PR if anything
upstream moved. Maintainer reviews and merges; nothing is auto-merged.
Loading