diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 254edc5b..e1e455de 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,8 +1,10 @@
[bumpversion]
-current_version = 1.3.1
+current_version = 1.1.1
commit = True
tag = True
-[bumpversion:file:batdetect2/__init__.py]
+[bumpversion:file:src/batdetect2/__init__.py]
[bumpversion:file:pyproject.toml]
+
+[bumpversion:file:docs/source/conf.py]
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 00000000..f3c6d5fa
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,79 @@
+name: CI
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+
+concurrency:
+ group: ci-${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ checks:
+ name: Checks
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install just
+ uses: taiki-e/install-action@just
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v6
+ with:
+ enable-cache: true
+ cache-dependency-glob: |
+ pyproject.toml
+ uv.lock
+
+ - name: Install dependencies
+ run: uv sync --all-extras --all-groups
+
+ - name: Run formatting, lint, and type checks
+ run: just check
+
+ tests:
+ name: Tests (Python ${{ matrix.python-version }})
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version:
+ - "3.10"
+ - "3.11"
+ - "3.12"
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install just
+ uses: taiki-e/install-action@just
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v6
+ with:
+ enable-cache: true
+ cache-dependency-glob: |
+ pyproject.toml
+ uv.lock
+
+ - name: Install dependencies
+ run: uv sync --all-extras --all-groups
+
+ - name: Run test suite
+ run: just test
diff --git a/.github/workflows/docs-pages.yml b/.github/workflows/docs-pages.yml
new file mode 100644
index 00000000..b07932ae
--- /dev/null
+++ b/.github/workflows/docs-pages.yml
@@ -0,0 +1,69 @@
+name: Docs Pages
+
+on:
+ push:
+ branches:
+ - main
+ workflow_dispatch:
+
+permissions:
+ contents: read
+
+concurrency:
+ group: docs-pages
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: Build Docs
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install just
+ uses: taiki-e/install-action@just
+
+ - name: Configure GitHub Pages
+ uses: actions/configure-pages@v5
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v6
+ with:
+ enable-cache: true
+ cache-dependency-glob: |
+ pyproject.toml
+ uv.lock
+
+ - name: Install dependencies
+ run: uv sync --all-extras --all-groups
+
+ - name: Build docs
+ run: just check-docs
+
+ - name: Upload Pages artifact
+ uses: actions/upload-pages-artifact@v4
+ with:
+ path: docs/build
+
+ deploy:
+ name: Deploy Docs
+ needs: build
+ runs-on: ubuntu-latest
+ permissions:
+ pages: write
+ id-token: write
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
new file mode 100644
index 00000000..8dea6275
--- /dev/null
+++ b/.github/workflows/publish-pypi.yml
@@ -0,0 +1,70 @@
+name: Publish PyPI
+
+on:
+ release:
+ types:
+ - published
+
+permissions:
+ contents: read
+
+concurrency:
+ group: publish-pypi
+ cancel-in-progress: false
+
+jobs:
+ build:
+ name: Build Distributions
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install just
+ uses: taiki-e/install-action@just
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v6
+ with:
+ enable-cache: true
+ cache-dependency-glob: |
+ pyproject.toml
+ uv.lock
+
+ - name: Install dependencies
+ run: just install-dev
+
+ - name: Build distributions
+ run: just build-dist
+
+ - name: Upload distributions
+ uses: actions/upload-artifact@v4
+ with:
+ name: release-dists
+ path: dist/
+
+ publish:
+ name: Publish to PyPI
+ needs: build
+ runs-on: ubuntu-latest
+ permissions:
+ id-token: write
+ environment:
+ name: pypi
+ url: https://pypi.org/p/batdetect2
+
+ steps:
+ - name: Download distributions
+ uses: actions/download-artifact@v5
+ with:
+ name: release-dists
+ path: dist/
+
+ - name: Publish to PyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
deleted file mode 100644
index ec42c73b..00000000
--- a/.github/workflows/python-package.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-name: Python package
-
-on:
- push:
- branches: ["main"]
- pull_request:
- branches: ["main"]
-
-jobs:
- build:
- runs-on: ubuntu-latest
- strategy:
- fail-fast: false
- matrix:
- python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
-
- steps:
- - uses: actions/checkout@v4
- - name: Install uv
- uses: astral-sh/setup-uv@v3
- with:
- enable-cache: true
- cache-dependency-glob: "uv.lock"
- - name: Set up Python ${{ matrix.python-version }}
- run: uv python install ${{ matrix.python-version }}
- - name: Install the project
- run: uv sync --all-extras --dev
- - name: Test with pytest
- run: uv run pytest
diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml
deleted file mode 100644
index 2d1f98fd..00000000
--- a/.github/workflows/python-publish.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-name: Upload Python Package
-
-on:
- release:
- types: [published]
-
-permissions:
- contents: read
-
-jobs:
- deploy:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v4
- - name: Set up Python
- uses: actions/setup-python@v3
- with:
- python-version: "3.x"
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install build
- - name: Build package
- run: python -m build
- - name: Publish package
- uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
- with:
- user: __token__
- password: ${{ secrets.PYPI_API_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 36f820c5..54d53bb6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,6 +50,7 @@ cover/
# Sphinx documentation
docs/_build/
+docs/build/
# PyBuilder
.pybuilder/
@@ -95,8 +96,15 @@ dmypy.json
*.json
plots/*
+!example_data/anns/*.json
+
# Model experiments
experiments/*
+DvcLiveLogger/checkpoints
+logs/
+mlruns/
+/outputs/
+notebooks/lightning_logs
# Jupiter notebooks
.virtual_documents
@@ -105,8 +113,24 @@ experiments/*
# DO Include
!batdetect2_notebook.ipynb
-!batdetect2/models/*.pth.tar
+!src/batdetect2/models/checkpoints/*.pth.tar
!tests/data/*.wav
+!notebooks/*.ipynb
!tests/data/**/*.wav
-notebooks/lightning_logs
+.aider*
+
+# Intermediate artifacts
example_data/preprocessed
+
+# Dev notebooks
+notebooks/tmp
+/tmp
+/.agents/skills
+/notebooks
+/AGENTS.md
+/scripts
+/todo.md
+
+# Assets
+!assets/*
+/models
diff --git a/.pylintrc b/.pylintrc
deleted file mode 100644
index b20ceedb..00000000
--- a/.pylintrc
+++ /dev/null
@@ -1,5 +0,0 @@
-[TYPECHECK]
-
-# List of members which are set dynamically and missed by Pylint inference
-# system, and so shouldn't trigger E1101 when accessed.
-generated-members=torch.*
diff --git a/README.md b/README.md
index ac7a806f..ba29be87 100644
--- a/README.md
+++ b/README.md
@@ -1,161 +1,156 @@
# BatDetect2
-
Code for detecting and classifying bat echolocation calls in high frequency audio recordings.
-> [!NOTE]
-> We’re actively working to make it easier to train and fine-tune BatDetect2 models using custom data. A major update is coming soon to the main branch—stay tuned! In the meantime, you can follow our progress in the train branch.
+
-## Getting started
-### Python Environment
+Code for detecting and classifying bat echolocation calls in high-frequency
+audio recordings.
-We recommend using an isolated Python environment to avoid dependency issues. Choose one
-of the following options:
+> [!WARNING]
+> `batdetect2` 2.0.0b1 is out.
+> This is a beta release and we are gathering user feedback.
+> If you run into issues or have feedback on the new workflows, please use the
+> GitHub issues page to let us know.
+>
+> There are many changes and new recommended workflows.
+> We have left the previous `batdetect2.api` module intact, but if you run
+> into issues or want to upgrade, see the
+> [migration guide](docs/source/legacy/migration-guide.md) in the docs site.
+>
+> This update also ships with a refreshed default model.
+> It was trained in the same way and on the same data as before, but you should
+> still expect small output differences in some cases.
-* Install the Anaconda Python 3.10 distribution for your operating system from [here](https://www.continuum.io/downloads). Create a new environment and activate it:
+## What is BatDetect2
-```bash
-conda create -y --name batdetect2 python==3.10
-conda activate batdetect2
-```
+BatDetect2 is a deep learning model for detecting and classifying bat
+echolocation calls.
+The model generates multiple predictions for each input recording by providing a
+bounding box and predicted class for each individual call within it.
-* If you already have Python installed (version >= 3.8,< 3.11) and prefer using virtual environments then:
+This repository also holds `batdetect2`, a Python-based tool to run, train,
+finetune and evaluate BatDetect2-type models, including the built-in model for
+detecting UK bat species.
+You can use the tool from the command line (terminal) or from Python as needed.
-```bash
-python -m venv .venv
-source .venv/bin/activate
-```
+## Getting Started
-### Installing BatDetect2
-You can use pip to install `batdetect2`:
+We have [extensive documentation](docs/source/index.md) on how to use
+`batdetect2`.
-```bash
-pip install batdetect2
-```
+The docs site is still being built and will be live soon.
+If you want a quick peek for now, see the `docs/` folder in this repository.
-Alternatively, download this code from the repository (by clicking on the green button on top right) and unzip it.
-Once unzipped, run this from extracted folder.
+See our [getting started](docs/source/getting_started.md) guide and then jump
+into any of our tutorials:
-```bash
-pip install .
-```
+- Run the model on a folder of recordings:
+ `docs/source/tutorials/run-inference-on-folder.md`
+- Train your own model:
+ `docs/source/tutorials/train-a-custom-model.md`
+- Evaluate your model:
+ `docs/source/tutorials/evaluate-on-a-test-set.md`
+- Fine-tune a model:
+ `docs/source/tutorials/integrate-with-a-python-pipeline.md`
-Make sure you have the environment activated before installing `batdetect2`.
+### Try the model
+If you want to try the model for UK bat species without installing anything, you
+can try the following:
-## Try the model
-1) You can try a demo of the model (for UK species) on [huggingface](https://huggingface.co/spaces/macaodha/batdetect2).
+1. Demo of the model (for UK species) on
+ [huggingface](https://huggingface.co/spaces/macaodha/batdetect2).
-2) Alternatively, click [here](https://colab.research.google.com/github/macaodha/batdetect2/blob/master/batdetect2_notebook.ipynb) to run the model using Google Colab. You can also run this notebook locally.
-
-
-## Running the model on your own data
-
-After following the above steps to install the code you can run the model on your own data.
+2. Alternatively, click
+ [here](https://colab.research.google.com/github/macaodha/batdetect2/blob/master/batdetect2_notebook.ipynb)
+ to run the model using Google Colab.
+ You can also run this notebook locally.
+### Installing BatDetect2
-### Using the command line
+If you have `uv` installed (if not, we recommend it; follow the instructions
+[here](https://docs.astral.sh/uv/getting-started/installation/)), then you can
+run `batdetect2` one-off with
-You can run the model by opening the command line and typing:
```bash
-batdetect2 detect AUDIO_DIR ANN_DIR DETECTION_THRESHOLD
+uvx batdetect2
```
-e.g.
-```bash
-batdetect2 detect example_data/audio/ example_data/anns/ 0.3
-```
-
-`AUDIO_DIR` is the path on your computer to the audio wav files of interest.
-`ANN_DIR` is the path on your computer where the model predictions will be saved. The model will output both `.csv` and `.json` results for each audio file.
-`DETECTION_THRESHOLD` is a number between 0 and 1 specifying the cut-off threshold applied to the calls. A smaller number will result in more calls detected, but with the chance of introducing more mistakes.
-
-There are also optional arguments, e.g. you can request that the model outputs features (i.e. estimated call parameters) such as duration, max_frequency, etc. by setting the flag `--spec_features`. These will be saved as `*_spec_features.csv` files:
-`batdetect2 detect example_data/audio/ example_data/anns/ 0.3 --spec_features`
-You can also specify which model to use by setting the `--model_path` argument. If not specified, it will default to using a model trained on UK data e.g.
-`batdetect2 detect example_data/audio/ example_data/anns/ 0.3 --model_path models/Net2DFast_UK_same.pth.tar`
+or if you want to install it permanently:
+```bash
+uv tool install batdetect2
+```
-### Using the Python API
-
-If you prefer to process your data within a Python script then you can use the `batdetect2` Python API.
-
-```python
-from batdetect2 import api
-
-AUDIO_FILE = "example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav"
-
-# Process a whole file
-results = api.process_file(AUDIO_FILE)
-
-# Or, load audio and compute spectrograms
-audio = api.load_audio(AUDIO_FILE)
-spec = api.generate_spectrogram(audio)
-
-# And process the audio or the spectrogram with the model
-detections, features, spec = api.process_audio(audio)
-detections, features = api.process_spectrogram(spec)
+and test it with
-# Do something else ...
+```bash
+batdetect2
```
-You can integrate the detections or the extracted features to your custom analysis pipeline.
+### Run BatDetect2 on a folder of recordings
-#### Using the Python API with HTTP
+Once installed, you can run BatDetect2 on a folder of `.wav` files.
+By default it will use the model trained on UK data.
-```python
-from batdetect2 import api
-import io
-import requests
+Example command:
-AUDIO_URL = ""
-
-# Process a whole file from a url
-results = api.process_url(AUDIO_URL)
+```bash
+batdetect2 process directory example_data/audio outputs
+```
-# Or, load audio and compute spectrograms
-# 'requests.get(AUDIO_URL).content' fetches the raw bytes. You are free to use other sources to fetch the raw bytes
-audio = api.load_audio(io.BytesIO(requests.get(AUDIO_URL).content))
-spec = api.generate_spectrogram(audio)
+This will scan the audio files in `example_data/audio` and save model outputs to
+`outputs`.
+If you have your own model checkpoint, you can use it:
-# And process the audio or the spectrogram with the model
-detections, features, spec = api.process_audio(audio)
-detections, features = api.process_spectrogram(spec)
+```bash
+batdetect2 process directory --model path/to/checkpoint.ckpt example_data/audio outputs
```
-## Training the model on your own data
-Take a look at the steps outlined in finetuning readme [here](batdetect2/finetune/readme.md) for a description of how to train your own model.
-
+For the full walkthrough, use
+`docs/source/tutorials/run-inference-on-folder.md`.
## Data and annotations
-The raw audio data and annotations used to train the models in the paper will be added soon.
-The audio interface used to annotate audio data for training and evaluation is available [here](https://github.com/macaodha/batdetect2_GUI).
+The raw audio data and annotations used to train the models in the paper will be
+added soon.
+`batdetect2` supports annotations in various formats and is compatible with the
+outputs of [`whombat`](https://github.com/mbsantiago/whombat/) and this
+[earlier version](https://github.com/macaodha/batdetect2_GUI).
+If you're interested in supporting another format, please reach out or submit a
+PR.
## Warning
-The models developed and shared as part of this repository should be used with caution.
-While they have been evaluated on held out audio data, great care should be taken when using the model outputs for any form of biodiversity assessment.
-Your data may differ, and as a result it is very strongly recommended that you validate the model first using data with known species to ensure that the outputs can be trusted.
+The models developed and shared as part of this repository should be used with
+caution.
+While they have been evaluated on held-out audio data, great care should be
+taken when using the model outputs for any form of biodiversity assessment.
+Your data may differ, and as a result it is very strongly recommended that you
+validate the model first using data with known species to ensure that the
+outputs can be trusted.
+If you train a model, make the best effort to be transparent about its training
+and evaluation data, and inform downstream users about its limitations.
## FAQ
-For more information please consult our [FAQ](faq.md).
+For more information please consult our [FAQ](docs/source/faq.md).
## Reference
-If you find our work useful in your research please consider citing our paper which you can find [here](https://www.biorxiv.org/content/10.1101/2022.12.14.520490v1):
+
+If you find our work useful in your research, please consider citing our paper,
+which you can find
+[here](https://www.biorxiv.org/content/10.1101/2022.12.14.520490v1):
+
```
@article{batdetect2_2022,
title = {Towards a General Approach for Bat Echolocation Detection and Classification},
- author = {Mac Aodha, Oisin and Mart\'{i}nez Balvanera, Santiago and Damstra, Elise and Cooke, Martyn and Eichinski, Philip and Browning, Ella and Barataudm, Michel and Boughey, Katherine and Coles, Roger and Giacomini, Giada and MacSwiney G., M. Cristina and K. Obrist, Martin and Parsons, Stuart and Sattler, Thomas and Jones, Kate E.},
+ author = {Mac Aodha, Oisin and Mart\'{i}nez Balvanera, Santiago and Damstra, Elise and Cooke, Martyn and Eichinski, Philip and Browning, Ella and Barataud, Michel and Boughey, Katherine and Coles, Roger and Giacomini, Giada and MacSwiney G., M. Cristina and K. Obrist, Martin and Parsons, Stuart and Sattler, Thomas and Jones, Kate E.},
journal = {bioRxiv},
year = {2022}
}
```
## Acknowledgements
-Thanks to all the contributors who spent time collecting and annotating audio data.
-
-### TODOs
-- [x] Release the code and pretrained model
-- [ ] Release the datasets and annotations used the experiments in the paper
-- [ ] Add the scripts used to generate the tables and figures from the paper
+Thanks to all the contributors who spent time collecting and annotating audio
+data.
diff --git a/ims/bat_icon.png b/assets/bat_icon.png
similarity index 100%
rename from ims/bat_icon.png
rename to assets/bat_icon.png
diff --git a/batdetect2/__init__.py b/batdetect2/__init__.py
deleted file mode 100644
index ac0d550b..00000000
--- a/batdetect2/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import logging
-
-numba_logger = logging.getLogger("numba")
-numba_logger.setLevel(logging.WARNING)
-
-__version__ = "1.3.1"
diff --git a/batdetect2/detector/parameters.py b/batdetect2/detector/parameters.py
deleted file mode 100644
index 04544ed8..00000000
--- a/batdetect2/detector/parameters.py
+++ /dev/null
@@ -1,232 +0,0 @@
-import datetime
-import os
-
-from batdetect2.types import ProcessingConfiguration, SpectrogramParameters
-
-TARGET_SAMPLERATE_HZ = 256000
-FFT_WIN_LENGTH_S = 512 / 256000.0
-FFT_OVERLAP = 0.75
-MAX_FREQ_HZ = 120000
-MIN_FREQ_HZ = 10000
-RESIZE_FACTOR = 0.5
-SPEC_DIVIDE_FACTOR = 32
-SPEC_HEIGHT = 256
-SCALE_RAW_AUDIO = False
-DETECTION_THRESHOLD = 0.01
-NMS_KERNEL_SIZE = 9
-NMS_TOP_K_PER_SEC = 200
-SPEC_SCALE = "pcen"
-DENOISE_SPEC_AVG = True
-MAX_SCALE_SPEC = False
-
-
-DEFAULT_MODEL_PATH = os.path.join(
- os.path.dirname(os.path.dirname(__file__)),
- "models",
- "Net2DFast_UK_same.pth.tar",
-)
-
-
-DEFAULT_SPECTROGRAM_PARAMETERS: SpectrogramParameters = {
- "fft_win_length": FFT_WIN_LENGTH_S,
- "fft_overlap": FFT_OVERLAP,
- "spec_height": SPEC_HEIGHT,
- "resize_factor": RESIZE_FACTOR,
- "spec_divide_factor": SPEC_DIVIDE_FACTOR,
- "max_freq": MAX_FREQ_HZ,
- "min_freq": MIN_FREQ_HZ,
- "spec_scale": SPEC_SCALE,
- "denoise_spec_avg": DENOISE_SPEC_AVG,
- "max_scale_spec": MAX_SCALE_SPEC,
-}
-
-
-DEFAULT_PROCESSING_CONFIGURATIONS: ProcessingConfiguration = {
- "detection_threshold": DETECTION_THRESHOLD,
- "spec_slices": False,
- "chunk_size": 3,
- "spec_features": False,
- "cnn_features": False,
- "quiet": True,
- "target_samp_rate": TARGET_SAMPLERATE_HZ,
- "fft_win_length": FFT_WIN_LENGTH_S,
- "fft_overlap": FFT_OVERLAP,
- "resize_factor": RESIZE_FACTOR,
- "spec_divide_factor": SPEC_DIVIDE_FACTOR,
- "spec_height": SPEC_HEIGHT,
- "scale_raw_audio": SCALE_RAW_AUDIO,
- "class_names": [],
- "time_expansion": 1,
- "top_n": 3,
- "return_raw_preds": False,
- "max_duration": None,
- "nms_kernel_size": NMS_KERNEL_SIZE,
- "max_freq": MAX_FREQ_HZ,
- "min_freq": MIN_FREQ_HZ,
- "nms_top_k_per_sec": NMS_TOP_K_PER_SEC,
- "spec_scale": SPEC_SCALE,
- "denoise_spec_avg": DENOISE_SPEC_AVG,
- "max_scale_spec": MAX_SCALE_SPEC,
-}
-
-
-def mk_dir(path):
- if not os.path.isdir(path):
- os.makedirs(path)
-
-
-def get_params(make_dirs=False, exps_dir="../../experiments/"):
- params = {}
-
- params[
- "model_name"
- ] = "Net2DFast" # Net2DFast, Net2DSkip, Net2DSimple, Net2DSkipDS, Net2DRN
- params["num_filters"] = 128
-
- now_str = datetime.datetime.now().strftime("%Y_%m_%d__%H_%M_%S")
- model_name = now_str + ".pth.tar"
- params["experiment"] = os.path.join(exps_dir, now_str, "")
- params["model_file_name"] = os.path.join(params["experiment"], model_name)
- params["op_im_dir"] = os.path.join(params["experiment"], "op_ims", "")
- params["op_im_dir_test"] = os.path.join(
- params["experiment"], "op_ims_test", ""
- )
- # params['notes'] = '' # can save notes about an experiment here
-
- # spec parameters
- params[
- "target_samp_rate"
- ] = TARGET_SAMPLERATE_HZ # resamples all audio so that it is at this rate
- params[
- "fft_win_length"
- ] = FFT_WIN_LENGTH_S # in milliseconds, amount of time per stft time step
- params["fft_overlap"] = FFT_OVERLAP # stft window overlap
-
- params[
- "max_freq"
- ] = MAX_FREQ_HZ # in Hz, everything above this will be discarded
- params[
- "min_freq"
- ] = MIN_FREQ_HZ # in Hz, everything below this will be discarded
-
- params[
- "resize_factor"
- ] = RESIZE_FACTOR # resize so the spectrogram at the input of the network
- params[
- "spec_height"
- ] = SPEC_HEIGHT # units are number of frequency bins (before resizing is performed)
- params[
- "spec_train_width"
- ] = 512 # units are number of time steps (before resizing is performed)
- params[
- "spec_divide_factor"
- ] = SPEC_DIVIDE_FACTOR # spectrogram should be divisible by this amount in width and height
-
- # spec processing params
- params[
- "denoise_spec_avg"
- ] = DENOISE_SPEC_AVG # removes the mean for each frequency band
- params[
- "scale_raw_audio"
- ] = SCALE_RAW_AUDIO # scales the raw audio to [-1, 1]
- params[
- "max_scale_spec"
- ] = MAX_SCALE_SPEC # scales the spectrogram so that it is max 1
- params["spec_scale"] = SPEC_SCALE # 'log', 'pcen', 'none'
-
- # detection params
- params[
- "detection_overlap"
- ] = 0.01 # has to be within this number of ms to count as detection
- params[
- "ignore_start_end"
- ] = 0.01 # if start of GT calls are within this time from the start/end of file ignore
- params[
- "detection_threshold"
- ] = DETECTION_THRESHOLD # the smaller this is the better the recall will be
- params[
- "nms_kernel_size"
- ] = NMS_KERNEL_SIZE # size of the kernel for non-max suppression
- params[
- "nms_top_k_per_sec"
- ] = NMS_TOP_K_PER_SEC # keep top K highest predictions per second of audio
- params["target_sigma"] = 2.0
-
- # augmentation params
- params[
- "aug_prob"
- ] = 0.20 # augmentations will be performed with this probability
- params["augment_at_train"] = True
- params["augment_at_train_combine"] = True
- params[
- "echo_max_delay"
- ] = 0.005 # simulate echo by adding copy of raw audio
- params["stretch_squeeze_delta"] = 0.04 # stretch or squeeze spec
- params[
- "mask_max_time_perc"
- ] = 0.05 # max mask size - here percentage, not ideal
- params[
- "mask_max_freq_perc"
- ] = 0.10 # max mask size - here percentage, not ideal
- params[
- "spec_amp_scaling"
- ] = 2.0 # multiply the "volume" by 0:X times current amount
- params["aug_sampling_rates"] = [
- 220500,
- 256000,
- 300000,
- 312500,
- 384000,
- 441000,
- 500000,
- ]
-
- # loss params
- params["train_loss"] = "focal" # mse or focal
- params["det_loss_weight"] = 1.0 # weight for the detection part of the loss
- params["size_loss_weight"] = 0.1 # weight for the bbox size loss
- params["class_loss_weight"] = 2.0 # weight for the classification loss
- params["individual_loss_weight"] = 0.0 # not used
- if params["individual_loss_weight"] == 0.0:
- params[
- "emb_dim"
- ] = 0 # number of dimensions used for individual id embedding
- else:
- params["emb_dim"] = 3
-
- # train params
- params["lr"] = 0.001
- params["batch_size"] = 8
- params["num_workers"] = 4
- params["num_epochs"] = 200
- params["num_eval_epochs"] = 5 # run evaluation every X epochs
- params["device"] = "cuda"
- params["save_test_image_during_train"] = False
- params["save_test_image_after_train"] = True
-
- params["convert_to_genus"] = False
- params["genus_mapping"] = []
- params["class_names"] = []
- params["classes_to_ignore"] = ["", " ", "Unknown", "Not Bat"]
- params["generic_class"] = ["Bat"]
- params["events_of_interest"] = [
- "Echolocation"
- ] # will ignore all other types of events e.g. social calls
-
- # the classes in this list are standardized during training so that the same low and high freq are used
- params["standardize_classs_names"] = []
-
- # create directories
- if make_dirs:
- print("Model name : " + params["model_name"])
- print("Model file : " + params["model_file_name"])
- print("Experiment : " + params["experiment"])
-
- mk_dir(params["experiment"])
- if params["save_test_image_during_train"]:
- mk_dir(params["op_im_dir"])
- if params["save_test_image_after_train"]:
- mk_dir(params["op_im_dir_test"])
- mk_dir(os.path.dirname(params["model_file_name"]))
-
- return params
diff --git a/batdetect2/finetune/prep_data_finetune.py b/batdetect2/finetune/prep_data_finetune.py
deleted file mode 100644
index 11702a9c..00000000
--- a/batdetect2/finetune/prep_data_finetune.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import argparse
-import json
-import os
-
-import numpy as np
-
-import batdetect2.train.train_utils as tu
-
-
-def print_dataset_stats(data, split_name, classes_to_ignore):
- print("\nSplit:", split_name)
- print("Num files:", len(data))
-
- class_cnts = {}
- for dd in data:
- for aa in dd["annotation"]:
- if aa["class"] not in classes_to_ignore:
- if aa["class"] in class_cnts:
- class_cnts[aa["class"]] += 1
- else:
- class_cnts[aa["class"]] = 1
-
- if len(class_cnts) == 0:
- class_names = []
- else:
- class_names = np.sort([*class_cnts]).tolist()
- print("Class count:")
- str_len = np.max([len(cc) for cc in class_names]) + 5
-
- for ii, cc in enumerate(class_names):
- print(str(ii).ljust(5) + cc.ljust(str_len) + str(class_cnts[cc]))
-
- return class_names
-
-
-def load_file_names(file_name):
- if os.path.isfile(file_name):
- with open(file_name) as da:
- files = [line.rstrip() for line in da.readlines()]
- for ff in files:
- if ff.lower()[-3:] != "wav":
- print("Error: Filenames need to end in .wav - ", ff)
- assert False
- else:
- print("Error: Input file not found - ", file_name)
- assert False
-
- return files
-
-
-if __name__ == "__main__":
- info_str = "\nBatDetect - Prepare Data for Finetuning\n"
-
- print(info_str)
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "dataset_name", type=str, help="Name to call your dataset"
- )
- parser.add_argument("audio_dir", type=str, help="Input directory for audio")
- parser.add_argument(
- "ann_dir",
- type=str,
- help="Input directory for where the audio annotations are stored",
- )
- parser.add_argument(
- "op_dir",
- type=str,
- help="Path where the train and test splits will be stored",
- )
- parser.add_argument(
- "--percent_val",
- type=float,
- default=0.20,
- help="Hold out this much data for validation. Should be number between 0 and 1",
- )
- parser.add_argument(
- "--rand_seed",
- type=int,
- default=2001,
- help="Random seed used for creating the validation split",
- )
- parser.add_argument(
- "--train_file",
- type=str,
- default="",
- help="Text file where each line is a wav file in train split",
- )
- parser.add_argument(
- "--test_file",
- type=str,
- default="",
- help="Text file where each line is a wav file in test split",
- )
- parser.add_argument(
- "--input_class_names",
- type=str,
- default="",
- help='Specify names of classes that you want to change. Separate with ";"',
- )
- parser.add_argument(
- "--output_class_names",
- type=str,
- default="",
- help='New class names to use instead. One to one mapping with "--input_class_names". \
- Separate with ";"',
- )
- args = vars(parser.parse_args())
-
- np.random.seed(args["rand_seed"])
-
- classes_to_ignore = ["", " ", "Unknown", "Not Bat"]
- generic_class = ["Bat"]
- events_of_interest = ["Echolocation"]
-
- if args["input_class_names"] != "" and args["output_class_names"] != "":
- # change the names of the classes
- ip_names = args["input_class_names"].split(";")
- op_names = args["output_class_names"].split(";")
- name_dict = dict(zip(ip_names, op_names))
- else:
- name_dict = False
-
- # load annotations
- data_all, _, _ = tu.load_set_of_anns(
- {"ann_path": args["ann_dir"], "wav_path": args["audio_dir"]},
- classes_to_ignore,
- events_of_interest,
- False,
- False,
- list_of_anns=True,
- filter_issues=True,
- name_replace=name_dict,
- )
-
- print("Dataset name: " + args["dataset_name"])
- print("Audio directory: " + args["audio_dir"])
- print("Annotation directory: " + args["ann_dir"])
- print("Ouput directory: " + args["op_dir"])
- print("Num annotated files: " + str(len(data_all)))
-
- if args["train_file"] != "" and args["test_file"] != "":
- # user has specifed the train / test split
- train_files = load_file_names(args["train_file"])
- test_files = load_file_names(args["test_file"])
- file_names_all = [dd["id"] for dd in data_all]
- train_inds = [
- file_names_all.index(ff)
- for ff in train_files
- if ff in file_names_all
- ]
- test_inds = [
- file_names_all.index(ff)
- for ff in test_files
- if ff in file_names_all
- ]
-
- else:
- # split the data into train and test at the file level
- num_exs = len(data_all)
- test_inds = np.random.choice(
- np.arange(num_exs),
- int(num_exs * args["percent_val"]),
- replace=False,
- )
- test_inds = np.sort(test_inds)
- train_inds = np.setdiff1d(np.arange(num_exs), test_inds)
-
- data_train = [data_all[ii] for ii in train_inds]
- data_test = [data_all[ii] for ii in test_inds]
-
- if not os.path.isdir(args["op_dir"]):
- os.makedirs(args["op_dir"])
- op_name = os.path.join(args["op_dir"], args["dataset_name"])
- op_name_train = op_name + "_TRAIN.json"
- op_name_test = op_name + "_TEST.json"
-
- class_un_train = print_dataset_stats(data_train, "Train", classes_to_ignore)
- class_un_test = print_dataset_stats(data_test, "Test", classes_to_ignore)
-
- if len(data_train) > 0 and len(data_test) > 0:
- if class_un_train != class_un_test:
- print(
- '\nError: some classes are not in both the training and test sets.\
- \nTry a different random seed "--rand_seed".'
- )
- assert False
-
- print("\n")
- if len(data_train) == 0:
- print("No train annotations to save")
- else:
- print("Saving: ", op_name_train)
- with open(op_name_train, "w") as da:
- json.dump(data_train, da, indent=2)
-
- if len(data_test) == 0:
- print("No test annotations to save")
- else:
- print("Saving: ", op_name_test)
- with open(op_name_test, "w") as da:
- json.dump(data_test, da, indent=2)
diff --git a/batdetect2/train/audio_dataloader.py b/batdetect2/train/audio_dataloader.py
deleted file mode 100644
index 8130ec6e..00000000
--- a/batdetect2/train/audio_dataloader.py
+++ /dev/null
@@ -1,603 +0,0 @@
-import copy
-from typing import Tuple
-
-import librosa
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torchaudio
-
-import batdetect2.utils.audio_utils as au
-from batdetect2.types import AnnotationGroup, HeatmapParameters
-
-
-def generate_gt_heatmaps(
- spec_op_shape: Tuple[int, int],
- sampling_rate: int,
- ann: AnnotationGroup,
- params: HeatmapParameters,
-) -> Tuple[np.ndarray, np.ndarray, np.ndarray, AnnotationGroup]:
- """Generate ground truth heatmaps from annotations.
-
- Parameters
- ----------
- spec_op_shape : Tuple[int, int]
- Shape of the input spectrogram.
- sampling_rate : int
- Sampling rate of the input audio in Hz.
- ann : AnnotationGroup
- Dictionary containing the annotation information.
- params : HeatmapParameters
- Parameters controlling the generation of the heatmaps.
-
- Returns
- -------
-
- y_2d_det : np.ndarray
- 2D heatmap of the presence of an event.
-
- y_2d_size : np.ndarray
- 2D heatmap of the size of the bounding box associated to event.
-
- y_2d_classes : np.ndarray
- 3D array containing the ground-truth class probabilities for each
- pixel.
-
- ann_aug : AnnotationGroup
- A dictionary containing the annotation information of the
- annotations that are within the input spectrogram, augmented with
- the x and y indices of their pixel location in the input spectrogram.
-
- """
- # spec may be resized on input into the network
- num_classes = len(params["class_names"])
- op_height = spec_op_shape[0]
- op_width = spec_op_shape[1]
- freq_per_bin = (params["max_freq"] - params["min_freq"]) / op_height
-
- # start and end times
- x_pos_start = au.time_to_x_coords(
- ann["start_times"],
- sampling_rate,
- params["fft_win_length"],
- params["fft_overlap"],
- )
- x_pos_start = (params["resize_factor"] * x_pos_start).astype(np.int)
- x_pos_end = au.time_to_x_coords(
- ann["end_times"],
- sampling_rate,
- params["fft_win_length"],
- params["fft_overlap"],
- )
- x_pos_end = (params["resize_factor"] * x_pos_end).astype(np.int)
-
- # location on y axis i.e. frequency
- y_pos_low = (ann["low_freqs"] - params["min_freq"]) / freq_per_bin
- y_pos_low = (op_height - y_pos_low).astype(np.int)
- y_pos_high = (ann["high_freqs"] - params["min_freq"]) / freq_per_bin
- y_pos_high = (op_height - y_pos_high).astype(np.int)
- bb_widths = x_pos_end - x_pos_start
- bb_heights = y_pos_low - y_pos_high
-
- # Only include annotations that are within the input spectrogram
- valid_inds = np.where(
- (x_pos_start >= 0)
- & (x_pos_start < op_width)
- & (y_pos_low >= 0)
- & (y_pos_low < (op_height - 1))
- )[0]
-
- ann_aug: AnnotationGroup = {
- "start_times": ann["start_times"][valid_inds],
- "end_times": ann["end_times"][valid_inds],
- "high_freqs": ann["high_freqs"][valid_inds],
- "low_freqs": ann["low_freqs"][valid_inds],
- "class_ids": ann["class_ids"][valid_inds],
- "individual_ids": ann["individual_ids"][valid_inds],
- }
- ann_aug["x_inds"] = x_pos_start[valid_inds]
- ann_aug["y_inds"] = y_pos_low[valid_inds]
- # keys = [
- # "start_times",
- # "end_times",
- # "high_freqs",
- # "low_freqs",
- # "class_ids",
- # "individual_ids",
- # ]
- # for kk in keys:
- # ann_aug[kk] = ann[kk][valid_inds]
-
- # if the number of calls is only 1, then it is unique
- # TODO would be better if we found these unique calls at the merging stage
- if len(ann_aug["individual_ids"]) == 1:
- ann_aug["individual_ids"][0] = 0
-
- y_2d_det = np.zeros((1, op_height, op_width), dtype=np.float32)
- y_2d_size = np.zeros((2, op_height, op_width), dtype=np.float32)
- # num classes and "background" class
- y_2d_classes: np.ndarray = np.zeros(
- (num_classes + 1, op_height, op_width), dtype=np.float32
- )
-
- # create 2D ground truth heatmaps
- for ii in valid_inds:
- draw_gaussian(
- y_2d_det[0, :],
- (x_pos_start[ii], y_pos_low[ii]),
- params["target_sigma"],
- )
- # draw_gaussian(y_2d_det[0,:], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma'], params['target_sigma']*2)
- y_2d_size[0, y_pos_low[ii], x_pos_start[ii]] = bb_widths[ii]
- y_2d_size[1, y_pos_low[ii], x_pos_start[ii]] = bb_heights[ii]
-
- cls_id = ann["class_ids"][ii]
- if cls_id > -1:
- draw_gaussian(
- y_2d_classes[cls_id, :],
- (x_pos_start[ii], y_pos_low[ii]),
- params["target_sigma"],
- )
- # draw_gaussian(y_2d_classes[cls_id, :], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma'], params['target_sigma']*2)
-
- # be careful as this will have a 1.0 places where we have event but dont know gt class
- # this will be masked in training anyway
- y_2d_classes[num_classes, :] = 1.0 - y_2d_classes.sum(0)
- y_2d_classes = y_2d_classes / y_2d_classes.sum(0)[np.newaxis, ...]
- y_2d_classes[np.isnan(y_2d_classes)] = 0.0
-
- return y_2d_det, y_2d_size, y_2d_classes, ann_aug
-
-
-def draw_gaussian(heatmap, center, sigmax, sigmay=None):
- # center is (x, y)
- # this edits the heatmap inplace
-
- if sigmay is None:
- sigmay = sigmax
- tmp_size = np.maximum(sigmax, sigmay) * 3
- mu_x = int(center[0] + 0.5)
- mu_y = int(center[1] + 0.5)
- w, h = heatmap.shape[0], heatmap.shape[1]
- ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
- br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
-
- if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
- return False
-
- size = 2 * tmp_size + 1
- x = np.arange(0, size, 1, np.float32)
- y = x[:, np.newaxis]
- x0 = y0 = size // 2
- # g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
- g = np.exp(
- -((x - x0) ** 2) / (2 * sigmax**2)
- - ((y - y0) ** 2) / (2 * sigmay**2)
- )
- g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
- g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
- img_x = max(0, ul[0]), min(br[0], h)
- img_y = max(0, ul[1]), min(br[1], w)
- heatmap[img_y[0] : img_y[1], img_x[0] : img_x[1]] = np.maximum(
- heatmap[img_y[0] : img_y[1], img_x[0] : img_x[1]],
- g[g_y[0] : g_y[1], g_x[0] : g_x[1]],
- )
- return True
-
-
-def pad_aray(ip_array, pad_size):
- return np.hstack((ip_array, np.ones(pad_size, dtype=np.int) * -1))
-
-
-def warp_spec_aug(spec, ann, return_spec_for_viz, params):
- # This is messy
- # Augment spectrogram by randomly stretch and squeezing
- # NOTE this also changes the start and stop time in place
-
- # not taking care of spec for viz
- if return_spec_for_viz:
- assert False
-
- delta = params["stretch_squeeze_delta"]
- op_size = (spec.shape[1], spec.shape[2])
- resize_fract_r = np.random.rand() * delta * 2 - delta + 1.0
- resize_amt = int(spec.shape[2] * resize_fract_r)
- if resize_amt >= spec.shape[2]:
- spec_r = torch.cat(
- (
- spec,
- torch.zeros(
- (1, spec.shape[1], resize_amt - spec.shape[2]),
- dtype=spec.dtype,
- ),
- ),
- 2,
- )
- else:
- spec_r = spec[:, :, :resize_amt]
- spec = F.interpolate(
- spec_r.unsqueeze(0), size=op_size, mode="bilinear", align_corners=False
- ).squeeze(0)
- ann["start_times"] *= 1.0 / resize_fract_r
- ann["end_times"] *= 1.0 / resize_fract_r
- return spec
-
-
-def mask_time_aug(spec, params):
- # Mask out a random block of time - repeat up to 3 times
- # SpecAugment: A Simple Data Augmentation Methodfor Automatic Speech Recognition
- fm = torchaudio.transforms.TimeMasking(
- int(spec.shape[1] * params["mask_max_time_perc"])
- )
- for ii in range(np.random.randint(1, 4)):
- spec = fm(spec)
- return spec
-
-
-def mask_freq_aug(spec, params):
- # Mask out a random frequncy range - repeat up to 3 times
- # SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition
- fm = torchaudio.transforms.FrequencyMasking(
- int(spec.shape[1] * params["mask_max_freq_perc"])
- )
- for ii in range(np.random.randint(1, 4)):
- spec = fm(spec)
- return spec
-
-
-def scale_vol_aug(spec, params):
- return spec * np.random.random() * params["spec_amp_scaling"]
-
-
-def echo_aug(audio, sampling_rate, params):
- sample_offset = (
- int(params["echo_max_delay"] * np.random.random() * sampling_rate) + 1
- )
- audio[:-sample_offset] += np.random.random() * audio[sample_offset:]
- return audio
-
-
-def resample_aug(audio, sampling_rate, params):
- sampling_rate_old = sampling_rate
- sampling_rate = np.random.choice(params["aug_sampling_rates"])
- audio = librosa.resample(
- audio,
- orig_sr=sampling_rate_old,
- target_sr=sampling_rate,
- res_type="polyphase",
- )
-
- audio = au.pad_audio(
- audio,
- sampling_rate,
- params["fft_win_length"],
- params["fft_overlap"],
- params["resize_factor"],
- params["spec_divide_factor"],
- params["spec_train_width"],
- )
- duration = audio.shape[0] / float(sampling_rate)
- return audio, sampling_rate, duration
-
-
-def resample_audio(num_samples, sampling_rate, audio2, sampling_rate2):
- if sampling_rate != sampling_rate2:
- audio2 = librosa.resample(
- audio2,
- orig_sr=sampling_rate2,
- target_sr=sampling_rate,
- res_type="polyphase",
- )
- sampling_rate2 = sampling_rate
- if audio2.shape[0] < num_samples:
- audio2 = np.hstack(
- (
- audio2,
- np.zeros((num_samples - audio2.shape[0]), dtype=audio2.dtype),
- )
- )
- elif audio2.shape[0] > num_samples:
- audio2 = audio2[:num_samples]
- return audio2, sampling_rate2
-
-
-def combine_audio_aug(audio, sampling_rate, ann, audio2, sampling_rate2, ann2):
-
- # resample so they are the same
- audio2, sampling_rate2 = resample_audio(
- audio.shape[0], sampling_rate, audio2, sampling_rate2
- )
-
- # # set mean and std to be the same
- # audio2 = (audio2 - audio2.mean())
- # audio2 = (audio2/audio2.std())*audio.std()
- # audio2 = audio2 + audio.mean()
-
- if (
- ann["annotated"]
- and (ann2["annotated"])
- and (sampling_rate2 == sampling_rate)
- and (audio.shape[0] == audio2.shape[0])
- ):
- comb_weight = 0.3 + np.random.random() * 0.4
- audio = comb_weight * audio + (1 - comb_weight) * audio2
- inds = np.argsort(np.hstack((ann["start_times"], ann2["start_times"])))
- for kk in ann.keys():
-
- # when combining calls from different files, assume they come from different individuals
- if kk == "individual_ids":
- if (ann[kk] > -1).sum() > 0:
- ann2[kk][ann2[kk] > -1] += np.max(ann[kk][ann[kk] > -1]) + 1
-
- if (kk != "class_id_file") and (kk != "annotated"):
- ann[kk] = np.hstack((ann[kk], ann2[kk]))[inds]
-
- return audio, ann
-
-
-class AudioLoader(torch.utils.data.Dataset):
- def __init__(self, data_anns_ip, params, dataset_name=None, is_train=False):
-
- self.data_anns = []
- self.is_train = is_train
- self.params = params
- self.return_spec_for_viz = False
-
- for ii in range(len(data_anns_ip)):
- dd = copy.deepcopy(data_anns_ip[ii])
-
- # filter out unused annotation here
- filtered_annotations = []
- for ii, aa in enumerate(dd["annotation"]):
-
- if "individual" in aa.keys():
- aa["individual"] = int(aa["individual"])
-
- # if only one call labeled it has to be from the same individual
- if len(dd["annotation"]) == 1:
- aa["individual"] = 0
-
- # convert class name into class label
- if aa["class"] in self.params["class_names"]:
- aa["class_id"] = self.params["class_names"].index(
- aa["class"]
- )
- else:
- aa["class_id"] = -1
-
- if aa["class"] not in self.params["classes_to_ignore"]:
- filtered_annotations.append(aa)
-
- dd["annotation"] = filtered_annotations
- dd["start_times"] = np.array(
- [aa["start_time"] for aa in dd["annotation"]]
- )
- dd["end_times"] = np.array(
- [aa["end_time"] for aa in dd["annotation"]]
- )
- dd["high_freqs"] = np.array(
- [float(aa["high_freq"]) for aa in dd["annotation"]]
- )
- dd["low_freqs"] = np.array(
- [float(aa["low_freq"]) for aa in dd["annotation"]]
- )
- dd["class_ids"] = np.array(
- [aa["class_id"] for aa in dd["annotation"]]
- ).astype(np.int)
- dd["individual_ids"] = np.array(
- [aa["individual"] for aa in dd["annotation"]]
- ).astype(np.int)
-
- # file level class name
- dd["class_id_file"] = -1
- if "class_name" in dd.keys():
- if dd["class_name"] in self.params["class_names"]:
- dd["class_id_file"] = self.params["class_names"].index(
- dd["class_name"]
- )
-
- self.data_anns.append(dd)
-
- ann_cnt = [len(aa["annotation"]) for aa in self.data_anns]
- self.max_num_anns = 2 * np.max(
- ann_cnt
- ) # x2 because we may be combining files during training
-
- print("\n")
- if dataset_name is not None:
- print("Dataset : " + dataset_name)
- if self.is_train:
- print("Split type : train")
- else:
- print("Split type : test")
- print("Num files : " + str(len(self.data_anns)))
- print("Num calls : " + str(np.sum(ann_cnt)))
-
- def get_file_and_anns(self, index=None):
-
- # if no file specified, choose random one
- if index == None:
- index = np.random.randint(0, len(self.data_anns))
-
- audio_file = self.data_anns[index]["file_path"]
- sampling_rate, audio_raw = au.load_audio(
- audio_file,
- self.data_anns[index]["time_exp"],
- self.params["target_samp_rate"],
- self.params["scale_raw_audio"],
- )
-
- # copy annotation
- ann = {}
- ann["annotated"] = self.data_anns[index]["annotated"]
- ann["class_id_file"] = self.data_anns[index]["class_id_file"]
- keys = [
- "start_times",
- "end_times",
- "high_freqs",
- "low_freqs",
- "class_ids",
- "individual_ids",
- ]
- for kk in keys:
- ann[kk] = self.data_anns[index][kk].copy()
-
- # if train then grab a random crop
- if self.is_train:
- nfft = int(self.params["fft_win_length"] * sampling_rate)
- noverlap = int(self.params["fft_overlap"] * nfft)
- length_samples = (
- self.params["spec_train_width"] * (nfft - noverlap) + noverlap
- )
-
- if audio_raw.shape[0] - length_samples > 0:
- sample_crop = np.random.randint(
- audio_raw.shape[0] - length_samples
- )
- else:
- sample_crop = 0
- audio_raw = audio_raw[sample_crop : sample_crop + length_samples]
- ann["start_times"] = ann["start_times"] - sample_crop / float(
- sampling_rate
- )
- ann["end_times"] = ann["end_times"] - sample_crop / float(
- sampling_rate
- )
-
- # pad audio
- if self.is_train:
- op_spec_target_size = self.params["spec_train_width"]
- else:
- op_spec_target_size = None
- audio_raw = au.pad_audio(
- audio_raw,
- sampling_rate,
- self.params["fft_win_length"],
- self.params["fft_overlap"],
- self.params["resize_factor"],
- self.params["spec_divide_factor"],
- op_spec_target_size,
- )
- duration = audio_raw.shape[0] / float(sampling_rate)
-
- # sort based on time
- inds = np.argsort(ann["start_times"])
- for kk in ann.keys():
- if (kk != "class_id_file") and (kk != "annotated"):
- ann[kk] = ann[kk][inds]
-
- return audio_raw, sampling_rate, duration, ann
-
- def __getitem__(self, index):
-
- # load audio file
- audio, sampling_rate, duration, ann = self.get_file_and_anns(index)
-
- # augment on raw audio
- if self.is_train and self.params["augment_at_train"]:
- # augment - combine with random audio file
- if (
- self.params["augment_at_train_combine"]
- and np.random.random() < self.params["aug_prob"]
- ):
- (
- audio2,
- sampling_rate2,
- duration2,
- ann2,
- ) = self.get_file_and_anns()
- audio, ann = combine_audio_aug(
- audio, sampling_rate, ann, audio2, sampling_rate2, ann2
- )
-
- # simulate echo by adding delayed copy of the file
- if np.random.random() < self.params["aug_prob"]:
- audio = echo_aug(audio, sampling_rate, self.params)
-
- # resample the audio
- # if np.random.random() < self.params['aug_prob']:
- # audio, sampling_rate, duration = resample_aug(audio, sampling_rate, self.params)
-
- # create spectrogram
- spec, spec_for_viz = au.generate_spectrogram(
- audio, sampling_rate, self.params, self.return_spec_for_viz
- )
- rsf = self.params["resize_factor"]
- spec_op_shape = (
- int(self.params["spec_height"] * rsf),
- int(spec.shape[1] * rsf),
- )
-
- # resize the spec
- spec = torch.from_numpy(spec).unsqueeze(0).unsqueeze(0)
- spec = F.interpolate(
- spec, size=spec_op_shape, mode="bilinear", align_corners=False
- ).squeeze(0)
-
- # augment spectrogram
- if self.is_train and self.params["augment_at_train"]:
-
- if np.random.random() < self.params["aug_prob"]:
- spec = scale_vol_aug(spec, self.params)
-
- if np.random.random() < self.params["aug_prob"]:
- spec = warp_spec_aug(
- spec, ann, self.return_spec_for_viz, self.params
- )
-
- if np.random.random() < self.params["aug_prob"]:
- spec = mask_time_aug(spec, self.params)
-
- if np.random.random() < self.params["aug_prob"]:
- spec = mask_freq_aug(spec, self.params)
-
- outputs = {}
- outputs["spec"] = spec
- if self.return_spec_for_viz:
- outputs["spec_for_viz"] = torch.from_numpy(spec_for_viz).unsqueeze(
- 0
- )
-
- # create ground truth heatmaps
- (
- outputs["y_2d_det"],
- outputs["y_2d_size"],
- outputs["y_2d_classes"],
- ann_aug,
- ) = generate_gt_heatmaps(spec_op_shape, sampling_rate, ann, self.params)
-
- # hack to get around requirement that all vectors are the same length in
- # the output batch
- pad_size = self.max_num_anns - len(ann_aug["individual_ids"])
- outputs["is_valid"] = pad_aray(
- np.ones(len(ann_aug["individual_ids"])), pad_size
- )
- keys = [
- "class_ids",
- "individual_ids",
- "x_inds",
- "y_inds",
- "start_times",
- "end_times",
- "low_freqs",
- "high_freqs",
- ]
- for kk in keys:
- outputs[kk] = pad_aray(ann_aug[kk], pad_size)
-
- # convert to pytorch
- for kk in outputs.keys():
- if type(outputs[kk]) != torch.Tensor:
- outputs[kk] = torch.from_numpy(outputs[kk])
-
- # scalars
- outputs["class_id_file"] = ann["class_id_file"]
- outputs["annotated"] = ann["annotated"]
- outputs["duration"] = duration
- outputs["sampling_rate"] = sampling_rate
- outputs["file_id"] = index
-
- return outputs
-
- def __len__(self):
- return len(self.data_anns)
diff --git a/batdetect2/train/losses.py b/batdetect2/train/losses.py
deleted file mode 100644
index 02bfdd66..00000000
--- a/batdetect2/train/losses.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import torch
-import torch.nn.functional as F
-
-
-def bbox_size_loss(pred_size, gt_size):
- """
- Bounding box size loss. Only compute loss where there is a bounding box.
- """
- gt_size_mask = (gt_size > 0).float()
- return F.l1_loss(pred_size * gt_size_mask, gt_size, reduction="sum") / (
- gt_size_mask.sum() + 1e-5
- )
-
-
-def focal_loss(pred, gt, weights=None, valid_mask=None):
- """
- Focal loss adapted from CornerNet: Detecting Objects as Paired Keypoints
- pred (batch x c x h x w)
- gt (batch x c x h x w)
- """
- eps = 1e-5
- beta = 4
- alpha = 2
-
- pos_inds = gt.eq(1).float()
- neg_inds = gt.lt(1).float()
-
- pos_loss = torch.log(pred + eps) * torch.pow(1 - pred, alpha) * pos_inds
- neg_loss = (
- torch.log(1 - pred + eps)
- * torch.pow(pred, alpha)
- * torch.pow(1 - gt, beta)
- * neg_inds
- )
-
- if weights is not None:
- pos_loss = pos_loss * weights
- # neg_loss = neg_loss*weights
-
- if valid_mask is not None:
- pos_loss = pos_loss * valid_mask
- neg_loss = neg_loss * valid_mask
-
- pos_loss = pos_loss.sum()
- neg_loss = neg_loss.sum()
-
- num_pos = pos_inds.float().sum()
- if num_pos == 0:
- loss = -neg_loss
- else:
- loss = -(pos_loss + neg_loss) / num_pos
- return loss
-
-
-def mse_loss(pred, gt, weights=None, valid_mask=None):
- """
- Mean squared error loss.
- """
- if valid_mask is None:
- op = ((gt - pred) ** 2).mean()
- else:
- op = (valid_mask * ((gt - pred) ** 2)).sum() / valid_mask.sum()
- return op
diff --git a/batdetect2/train/train_utils.py b/batdetect2/train/train_utils.py
deleted file mode 100644
index 9b3b4ebf..00000000
--- a/batdetect2/train/train_utils.py
+++ /dev/null
@@ -1,207 +0,0 @@
-import glob
-import json
-
-import numpy as np
-
-
-def write_notes_file(file_name, text):
- with open(file_name, "a") as da:
- da.write(text + "\n")
-
-
-def get_blank_dataset_dict(dataset_name, is_test, ann_path, wav_path):
- ddict = {
- "dataset_name": dataset_name,
- "is_test": is_test,
- "is_binary": False,
- "ann_path": ann_path,
- "wav_path": wav_path,
- }
- return ddict
-
-
-def get_short_class_names(class_names, str_len=3):
- class_names_short = []
- for cc in class_names:
- class_names_short.append(
- " ".join([sp[:str_len] for sp in cc.split(" ")])
- )
- return class_names_short
-
-
-def remove_dupes(data_train, data_test):
- test_ids = [dd["id"] for dd in data_test]
- data_train_prune = []
- for aa in data_train:
- if aa["id"] not in test_ids:
- data_train_prune.append(aa)
- diff = len(data_train) - len(data_train_prune)
- if diff != 0:
- print(diff, "items removed from train set")
- return data_train_prune
-
-
-def get_genus_mapping(class_names):
- genus_names, genus_mapping = np.unique(
- [cc.split(" ")[0] for cc in class_names], return_inverse=True
- )
- return genus_names.tolist(), genus_mapping.tolist()
-
-
-def standardize_low_freq(data, class_of_interest):
- # address the issue of highly variable low frequency annotations
- # this often happens for contstant frequency calls
- # for the class of interest sets the low and high freq to be the dataset mean
- low_freqs = []
- high_freqs = []
- for dd in data:
- for aa in dd["annotation"]:
- if aa["class"] == class_of_interest:
- low_freqs.append(aa["low_freq"])
- high_freqs.append(aa["high_freq"])
-
- low_mean = np.mean(low_freqs)
- high_mean = np.mean(high_freqs)
- assert low_mean < high_mean
-
- print("\nStandardizing low and high frequency for:")
- print(class_of_interest)
- print("low: ", round(low_mean, 2))
- print("high: ", round(high_mean, 2))
-
- # only set the low freq, high stays the same
- # assumes that low_mean < high_mean
- for dd in data:
- for aa in dd["annotation"]:
- if aa["class"] == class_of_interest:
- aa["low_freq"] = low_mean
- if aa["high_freq"] < low_mean:
- aa["high_freq"] = high_mean
-
- return data
-
-
-def load_set_of_anns(
- data,
- classes_to_ignore=[],
- events_of_interest=None,
- convert_to_genus=False,
- verbose=True,
- list_of_anns=False,
- filter_issues=False,
- name_replace=False,
-):
-
- # load the annotations
- anns = []
- if list_of_anns:
- # path to list of individual json files
- anns.extend(load_anns_from_path(data["ann_path"], data["wav_path"]))
- else:
- # dictionary of datasets
- for dd in data:
- anns.extend(load_anns(dd["ann_path"], dd["wav_path"]))
-
- # discarding unannoated files
- anns = [aa for aa in anns if aa["annotated"] is True]
-
- # filter files that have annotation issues - is the input is a dictionary of
- # datasets, this will lilely have already been done
- if filter_issues:
- anns = [aa for aa in anns if aa["issues"] is False]
-
- # check for some basic formatting errors with class names
- for ann in anns:
- for aa in ann["annotation"]:
- aa["class"] = aa["class"].strip()
-
- # only load specified events - i.e. types of calls
- if events_of_interest is not None:
- for ann in anns:
- filtered_events = []
- for aa in ann["annotation"]:
- if aa["event"] in events_of_interest:
- filtered_events.append(aa)
- ann["annotation"] = filtered_events
-
- # change class names
- # replace_names will be a dictionary mapping input name to output
- if type(name_replace) is dict:
- for ann in anns:
- for aa in ann["annotation"]:
- if aa["class"] in name_replace:
- aa["class"] = name_replace[aa["class"]]
-
- # convert everything to genus name
- if convert_to_genus:
- for ann in anns:
- for aa in ann["annotation"]:
- aa["class"] = aa["class"].split(" ")[0]
-
- # get unique class names
- class_names_all = []
- for ann in anns:
- for aa in ann["annotation"]:
- if aa["class"] not in classes_to_ignore:
- class_names_all.append(aa["class"])
-
- class_names, class_cnts = np.unique(class_names_all, return_counts=True)
- class_inv_freq = class_cnts.sum() / (
- len(class_names) * class_cnts.astype(np.float32)
- )
-
- if verbose:
- print("Class count:")
- str_len = np.max([len(cc) for cc in class_names]) + 5
- for cc in range(len(class_names)):
- print(
- str(cc).ljust(5)
- + class_names[cc].ljust(str_len)
- + str(class_cnts[cc])
- )
-
- if len(classes_to_ignore) == 0:
- return anns
- else:
- return anns, class_names.tolist(), class_inv_freq.tolist()
-
-
-def load_anns(ann_file_name, raw_audio_dir):
- with open(ann_file_name) as da:
- anns = json.load(da)
-
- for aa in anns:
- aa["file_path"] = raw_audio_dir + aa["id"]
-
- return anns
-
-
-def load_anns_from_path(ann_file_dir, raw_audio_dir):
- files = glob.glob(ann_file_dir + "*.json")
- anns = []
- for ff in files:
- with open(ff) as da:
- ann = json.load(da)
- ann["file_path"] = raw_audio_dir + ann["id"]
- anns.append(ann)
-
- return anns
-
-
-class AverageMeter(object):
- """Computes and stores the average and current value"""
-
- def __init__(self):
- self.reset()
-
- def reset(self):
- self.val = 0
- self.avg = 0
- self.sum = 0
- self.count = 0
-
- def update(self, val, n=1):
- self.val = val
- self.sum += val * n
- self.count += n
- self.avg = self.sum / self.count
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 00000000..d0c3cbf1
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 00000000..747ffb7b
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/batdetect2/detector/__init__.py b/docs/source/_static/.gitkeep
similarity index 100%
rename from batdetect2/detector/__init__.py
rename to docs/source/_static/.gitkeep
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 00000000..075dac5f
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,78 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+project = "batdetect2"
+copyright = "2025, Oisin Mac Aodha, Santiago Martinez Balvanera"
+author = "Oisin Mac Aodha, Santiago Martinez Balvanera"
+release = "1.1.1"
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = [
+ "sphinx.ext.autodoc",
+ "sphinx.ext.autosummary",
+ "sphinx.ext.intersphinx",
+ "sphinxcontrib.autodoc_pydantic",
+ "sphinx_click",
+ "numpydoc",
+ "myst_parser",
+ "sphinx_autodoc_typehints",
+]
+
+templates_path = ["_templates"]
+exclude_patterns = []
+
+source_suffix = {
+ ".rst": "restructuredtext",
+ ".txt": "markdown",
+ ".md": "markdown",
+}
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = "sphinx_book_theme"
+html_static_path = ["_static"]
+html_theme_options = {
+ "home_page_in_toc": True,
+ "show_navbar_depth": 2,
+ "show_toc_level": 2,
+}
+
+intersphinx_mapping = {
+ "python": ("https://docs.python.org/3", None),
+ "click": ("https://click.palletsprojects.com/en/stable/", None),
+ "librosa": ("https://librosa.org/doc/latest/", None),
+ "lightning": ("https://lightning.ai/docs/pytorch/stable/", None),
+ "loguru": ("https://loguru.readthedocs.io/en/stable/", None),
+ "numpy": ("https://numpy.org/doc/stable/", None),
+ "omegaconf": ("https://omegaconf.readthedocs.io/en/latest/", None),
+ "pytorch": ("https://pytorch.org/docs/stable/", None),
+ "soundevent": ("https://mbsantiago.github.io/soundevent/", None),
+ "pydantic": ("https://docs.pydantic.dev/latest/", None),
+ "xarray": ("https://docs.xarray.dev/en/stable/", None),
+}
+
+# -- Options for autodoc ------------------------------------------------------
+autosummary_generate = False
+autosummary_imported_members = True
+
+autodoc_default_options = {
+ "members": True,
+ "undoc-members": False,
+ "private-members": False,
+ "special-members": False,
+ "inherited-members": False,
+ "show-inheritance": True,
+ "module-first": True,
+}
+
+numpydoc_show_class_members = False
+numpydoc_show_inherited_class_members = False
+numpydoc_class_members_toctree = False
diff --git a/docs/source/development/index.md b/docs/source/development/index.md
new file mode 100644
index 00000000..390f3af2
--- /dev/null
+++ b/docs/source/development/index.md
@@ -0,0 +1,34 @@
+# Development and contribution
+
+Thanks for your interest in improving batdetect2.
+
+## Ways to contribute
+
+- Report bugs and request features on
+ [GitHub Issues](https://github.com/macaodha/batdetect2/issues)
+- Improve docs by opening pull requests with clearer examples, fixes, or
+ missing workflows
+- Contribute code for models, data handling, evaluation, or CLI workflows
+
+## Basic contribution workflow
+
+1. Open an issue (or comment on an existing one) so work is visible.
+2. Create a branch for your change.
+3. Run checks locally before opening a PR:
+
+```bash
+just check
+just docs
+```
+
+4. Open a pull request with a clear summary of what changed and why.
+
+## Development environment
+
+Use `uv` for dependency and environment management.
+
+```bash
+uv sync
+```
+
+For more setup details, see {doc}`../getting_started`.
diff --git a/docs/source/explanation/evaluation-concepts-and-matching.md b/docs/source/explanation/evaluation-concepts-and-matching.md
new file mode 100644
index 00000000..96563eed
--- /dev/null
+++ b/docs/source/explanation/evaluation-concepts-and-matching.md
@@ -0,0 +1,48 @@
+# Evaluation concepts and matching
+
+Evaluation is not just "run predictions and compute one number".
+
+The reported metric depends on the evaluation task, the matching rule, and the treatment of clip boundaries and generic labels.
+
+## Task families answer different questions
+
+Built-in task families include:
+
+- sound event detection,
+- sound event classification,
+- top-class detection,
+- clip detection,
+- clip classification.
+
+Choose the task that matches the scientific or engineering question.
+
+## Matching matters
+
+For sound-event-style tasks, predictions and annotations are matched using an affinity function.
+
+Important controls include:
+
+- `affinity`,
+- `affinity_threshold`,
+- `strict_match`,
+- `ignore_start_end`.
+
+Small changes here can change the reported metric without changing the underlying predictions.
+
+## Boundary handling matters
+
+The evaluation base task can exclude events near clip boundaries through `ignore_start_end`.
+
+This is useful when clip boundaries make matches ambiguous.
+
+## Generic labels can matter in classification
+
+Classification tasks can include or exclude generic targets depending on configuration.
+
+That affects what counts as a valid class-level comparison.
+
+## Related pages
+
+- Evaluate on a test set: {doc}`../tutorials/evaluate-on-a-test-set`
+- Evaluation config reference: {doc}`../reference/evaluation-config`
+- Model output and validation: {doc}`model-output-and-validation`
diff --git a/docs/source/explanation/extracted-features-and-embeddings.md b/docs/source/explanation/extracted-features-and-embeddings.md
new file mode 100644
index 00000000..01d28379
--- /dev/null
+++ b/docs/source/explanation/extracted-features-and-embeddings.md
@@ -0,0 +1,43 @@
+# Extracted features and embeddings
+
+The current API exposes a per-detection `features` vector.
+
+Older BatDetect2 workflows also exposed concepts such as `cnn_feats`,
+`spec_features`, and `spec_slices`.
+
+## What the current feature vector is
+
+In the current stack, each retained detection can carry an internal feature
+representation produced by the model output pipeline.
+
+This is useful for downstream exploration, comparison, and custom analysis.
+
+## What these features are not
+
+They are not automatically human-interpretable ecological variables.
+
+They are also not a substitute for careful validation.
+
+## Why people refer to them as embeddings
+
+In practice, users often treat these feature vectors as embeddings because they
+can be used as dense learned representations of detections.
+
+That usage is reasonable, but you should still treat them as model-derived
+internal representations whose meaning depends on the training setup.
+
+## Legacy terminology versus current terminology
+
+- legacy `cnn_feats` referred to CNN feature outputs in the older workflow,
+- legacy `spec_features` referred to lower-level extracted call features,
+- current `features` are the per-detection vectors attached to `Detection`
+ objects.
+
+These are related ideas, but not necessarily one-to-one replacements.
+
+## Related pages
+
+- Inspect detection features in Python:
+ {doc}`../how_to/inspect-detection-features-in-python`
+- Legacy migration guide:
+ {doc}`../legacy/migration-guide`
diff --git a/docs/source/explanation/index.md b/docs/source/explanation/index.md
new file mode 100644
index 00000000..51557bb7
--- /dev/null
+++ b/docs/source/explanation/index.md
@@ -0,0 +1,19 @@
+# Understanding
+
+Understanding pages explain how BatDetect2 works, what its outputs mean, and how to reason about trade-offs.
+
+Use this section when you want help interpreting the tool, not just running it.
+
+```{toctree}
+:maxdepth: 1
+
+what-batdetect2-predicts
+interpreting-formatted-outputs
+extracted-features-and-embeddings
+model-output-and-validation
+postprocessing-and-thresholds
+pipeline-overview
+preprocessing-consistency
+target-encoding-and-decoding
+evaluation-concepts-and-matching
+```
diff --git a/docs/source/explanation/interpreting-formatted-outputs.md b/docs/source/explanation/interpreting-formatted-outputs.md
new file mode 100644
index 00000000..5bd6d98b
--- /dev/null
+++ b/docs/source/explanation/interpreting-formatted-outputs.md
@@ -0,0 +1,36 @@
+# Interpreting formatted outputs
+
+BatDetect2 can write predictions in several output formats.
+
+Those formats are different views of the same underlying detections, not different model behaviors.
+
+## Separate the underlying detection from the serialized file
+
+Internally, the current stack works with clip-level detections containing geometry, detection score, class scores, and features.
+
+Output formatters then serialize those detections in different ways.
+
+## Raw outputs are richest
+
+The `raw` format preserves the broadest structured view of detections and is a good default when you want to inspect or reload predictions later.
+
+## Tabular outputs are for analysis convenience
+
+The `parquet` format is convenient for data analysis workflows, but the tabular representation is only one projection of the underlying detection object.
+
+## Legacy-shaped outputs are mainly for compatibility
+
+The `batdetect2` formatter writes the older BatDetect2-style JSON shape.
+
+Use it when you need compatibility with older downstream tools or workflows.
+
+## The meaning does not come from the file extension
+
+Do not assume that a `.json`, `.parquet`, or `.nc` file changes what the model predicted.
+
+It changes how the prediction is packaged and how much detail is retained.
+
+## Related pages
+
+- Output formats reference: {doc}`../reference/output-formats`
+- Outputs config reference: {doc}`../reference/outputs-config`
diff --git a/docs/source/explanation/model-output-and-validation.md b/docs/source/explanation/model-output-and-validation.md
new file mode 100644
index 00000000..fc39b38b
--- /dev/null
+++ b/docs/source/explanation/model-output-and-validation.md
@@ -0,0 +1,29 @@
+# Model output and validation
+
+BatDetect2 outputs model predictions, not ground truth. The same configuration
+can behave differently across recording conditions, species compositions, and
+acoustic environments.
+
+## Why threshold choice matters
+
+- Lower detection thresholds increase sensitivity but can increase false
+ positives.
+- Higher thresholds reduce false positives but can miss faint calls.
+
+No threshold is universally correct. The right setting depends on your survey
+objectives and tolerance for false positives versus missed detections.
+
+## Why local validation is required
+
+Model performance depends on how similar your data are to training data.
+Before ecological interpretation, validate predictions on a representative,
+locally reviewed subset.
+
+Recommended validation checks:
+
+1. Compare detection counts against expert-reviewed clips.
+2. Inspect species-level predictions for plausible confusion patterns.
+3. Repeat checks across sites, seasons, and recorder setups.
+
+For practical threshold workflows, see
+{doc}`../how_to/tune-detection-threshold`.
diff --git a/docs/source/explanation/pipeline-overview.md b/docs/source/explanation/pipeline-overview.md
new file mode 100644
index 00000000..5c3f840e
--- /dev/null
+++ b/docs/source/explanation/pipeline-overview.md
@@ -0,0 +1,34 @@
+# Pipeline overview
+
+batdetect2 processes recordings as a sequence of modules. Each stage has a
+clear role and configuration surface.
+
+## End-to-end flow
+
+1. Audio loading
+2. Preprocessing (waveform -> spectrogram)
+3. Detector forward pass
+4. Postprocessing (peaks, decoding, thresholds)
+5. Output formatting and export
+
+## Why the modular design matters
+
+The model, preprocessing, postprocessing, targets, and output formatting are
+configured separately. That makes it easier to:
+
+- swap components without rewriting the whole pipeline,
+- keep experiments reproducible,
+- adapt workflows to new datasets.
+
+## Core objects in the stack
+
+- `BatDetect2API` orchestrates training, inference, and evaluation workflows.
+- `ModelConfig` defines architecture, preprocessing, postprocessing, and
+ targets.
+- `Targets` controls event filtering, class encoding/decoding, and ROI mapping.
+
+## Related pages
+
+- Preprocessing rationale: {doc}`preprocessing-consistency`
+- Postprocessing rationale: {doc}`postprocessing-and-thresholds`
+- Target rationale: {doc}`target-encoding-and-decoding`
diff --git a/docs/source/explanation/postprocessing-and-thresholds.md b/docs/source/explanation/postprocessing-and-thresholds.md
new file mode 100644
index 00000000..3998765f
--- /dev/null
+++ b/docs/source/explanation/postprocessing-and-thresholds.md
@@ -0,0 +1,43 @@
+# Postprocessing and thresholds
+
+After the detector runs on a spectrogram, the model output is still a set of
+dense prediction tensors. Postprocessing turns that into a final list of call
+detections with positions, sizes, and class scores.
+
+## What postprocessing does
+
+In broad terms, the pipeline:
+
+1. suppresses nearby duplicate peaks,
+2. extracts candidate detections,
+3. reads size and class values at each detected location,
+4. decodes outputs into call-level predictions.
+
+This is where score thresholds and output density limits are applied.
+
+## Why thresholds matter
+
+Thresholds control the balance between sensitivity and precision.
+
+- Lower thresholds keep more detections, including weaker calls, but may add
+ false positives.
+- Higher thresholds remove low-confidence detections, but may miss faint calls.
+
+You can tune this behavior per run without retraining the model.
+
+## Two common threshold controls
+
+- `detection_threshold`: minimum score required to keep a detection.
+- `classification_threshold`: minimum class score used when assigning class
+ labels.
+
+Both settings shape the final output and should be validated on reviewed local
+data.
+
+## Practical workflow
+
+Tune thresholds on a representative subset first, then lock settings for the
+full analysis run.
+
+- How-to: {doc}`../how_to/tune-detection-threshold`
+- CLI reference: {doc}`../reference/cli/predict`
diff --git a/docs/source/explanation/preprocessing-consistency.md b/docs/source/explanation/preprocessing-consistency.md
new file mode 100644
index 00000000..b5d18aa5
--- /dev/null
+++ b/docs/source/explanation/preprocessing-consistency.md
@@ -0,0 +1,36 @@
+# Preprocessing consistency
+
+Preprocessing consistency is one of the biggest factors behind stable model
+performance.
+
+## Why consistency matters
+
+The detector is trained on spectrograms produced by a specific preprocessing
+pipeline. If inference uses different settings, the model can see a shifted
+input distribution and performance may drop.
+
+Typical mismatch sources:
+
+- sample-rate differences,
+- changed frequency crop,
+- changed STFT window/hop,
+- changed spectrogram transforms.
+
+## Practical implication
+
+When possible, keep preprocessing settings aligned between:
+
+- training,
+- evaluation,
+- deployment inference.
+
+If you intentionally change preprocessing, treat this as a new experiment and
+re-validate on reviewed local data.
+
+## Related pages
+
+- Configure audio preprocessing:
+ {doc}`../how_to/configure-audio-preprocessing`
+- Configure spectrogram preprocessing:
+ {doc}`../how_to/configure-spectrogram-preprocessing`
+- Preprocessing config reference: {doc}`../reference/preprocessing-config`
diff --git a/docs/source/explanation/target-encoding-and-decoding.md b/docs/source/explanation/target-encoding-and-decoding.md
new file mode 100644
index 00000000..4ec23670
--- /dev/null
+++ b/docs/source/explanation/target-encoding-and-decoding.md
@@ -0,0 +1,40 @@
+# Target encoding and decoding
+
+batdetect2 turns annotated sound events into training targets, then maps model
+outputs back into interpretable predictions.
+
+## Encoding path (annotations -> model targets)
+
+At training time, the target system:
+
+1. checks whether an event belongs to the configured detection target,
+2. assigns a classification label (or none for non-specific class matches),
+3. maps event geometry into position and size targets.
+
+This behaviour is configured through `TargetConfig`,
+`TargetClassConfig`, and ROI mapper settings.
+
+## Decoding path (model outputs -> tags and geometry)
+
+At inference time, class labels and ROI parameters are decoded back into
+annotation tags and geometry.
+
+This makes outputs interpretable in the same conceptual space as your original
+annotations.
+
+## Why this matters
+
+Target definitions are not just metadata. They directly shape:
+
+- what events are treated as positive examples,
+- which class names the model learns,
+- how geometry is represented and reconstructed.
+
+Small changes here can alter both training outcomes and prediction semantics.
+
+## Related pages
+
+- Configure detection target logic: {doc}`../how_to/configure-target-definitions`
+- Configure class mapping: {doc}`../how_to/define-target-classes`
+- Configure ROI mapping: {doc}`../how_to/configure-roi-mapping`
+- Target config reference: {doc}`../reference/targets-config-workflow`
diff --git a/docs/source/explanation/what-batdetect2-predicts.md b/docs/source/explanation/what-batdetect2-predicts.md
new file mode 100644
index 00000000..8ed45689
--- /dev/null
+++ b/docs/source/explanation/what-batdetect2-predicts.md
@@ -0,0 +1,45 @@
+# What BatDetect2 predicts
+
+BatDetect2 predicts call-level events, not recording-level truth.
+
+For each retained detection, the current stack can expose:
+
+- a geometry describing where the event sits in time-frequency space,
+- a detection score,
+- a class-score vector,
+- an internal feature vector.
+
+## Detection score versus class scores
+
+These are different outputs and should not be interpreted as the same thing.
+
+- The detection score is about whether the event is kept as a detection.
+- The class-score vector ranks classes for that detected event.
+
+A detection can be kept while still having uncertain class identity.
+
+## Predictions are conditional on the workflow
+
+The final output also depends on:
+
+- preprocessing,
+- postprocessing,
+- thresholds,
+- target definitions,
+- output transforms.
+
+That is why two runs can differ even when they use the same checkpoint.
+
+## What BatDetect2 does not predict
+
+BatDetect2 does not directly output ecological truth.
+
+It also does not eliminate the need for local validation.
+
+Use reviewed local data before making ecological claims.
+
+## Related pages
+
+- Model output and validation: {doc}`model-output-and-validation`
+- Postprocessing and thresholds: {doc}`postprocessing-and-thresholds`
+- Interpreting formatted outputs: {doc}`interpreting-formatted-outputs`
diff --git a/docs/source/faq.md b/docs/source/faq.md
new file mode 100644
index 00000000..7b530a25
--- /dev/null
+++ b/docs/source/faq.md
@@ -0,0 +1,81 @@
+# FAQ
+
+## Installation and setup
+
+### Do I need Python knowledge to use batdetect2?
+
+Not much.
+If you only want to run the model on your own recordings, you can use the CLI and follow the steps in {doc}`getting_started`.
+
+Some command-line familiarity helps, but you do not need to write Python code for standard inference workflows.
+
+### Are there plans for an R version?
+
+Not currently.
+Output files are plain formats (for example CSV/JSON), so you can read and analyze them in R or other environments.
+
+### I cannot get installation working. What should I do?
+
+First, re-check {doc}`getting_started` and confirm your environment is active.
+If it still fails, open an issue with your OS, install method, and full error output: [GitHub Issues](https://github.com/macaodha/batdetect2/issues).
+
+## Model behavior and performance
+
+### The model does not perform well on my data
+
+This usually means your data distribution differs from training data.
+The best next step is to validate on reviewed local data and then fine-tune/train on your own annotations if needed.
+
+### The model confuses insects/noise with bats
+
+This can happen, especially when recording conditions differ from training conditions.
+Threshold tuning and training with local annotations can improve results.
+
+See {doc}`how_to/tune-detection-threshold`.
+
+### The model struggles with feeding buzzes or social calls
+
+This is a known limitation of available training data in some settings.
+If you have high-quality annotated examples, they are valuable for improving models.
+
+### Calls in the same sequence are predicted as different species
+
+Currently we do not do any sophisticated post processing on the results output by the model.
+We return a probability associated with each species for each call.
+You can use these predictions to clean up the noisy predictions for sequences of calls.
+
+### Can I trust model outputs for biodiversity conclusions?
+
+The models developed and shared as part of this repository should be used with caution.
+While they have been evaluated on held out audio data, great care should be taken when using the model outputs for any form of biodiversity assessment.
+Your data may differ, and as a result it is very strongly recommended that you validate the model first using data with known species to ensure that the outputs can be trusted.
+
+### The pipeline is slow
+
+Runtime depends on hardware and recording duration.
+GPU inference is often much faster than CPU.
+
+## Training and scope
+
+### Can I train on my own species set?
+
+Yes.
+You can train/fine-tune with your own annotated data and species labels.
+
+### Does this work on frequency-division or zero-crossing recordings?
+
+Not directly.
+The workflow assumes audio can be converted to spectrograms from the raw waveform.
+
+### Can this be used for non-bat bioacoustics (for example insects or birds)?
+
+Potentially yes, but expect retraining and configuration changes.
+Open an issue if you want guidance for a specific use case.
+
+## Usage and licensing
+
+### Can I use this for commercial purposes?
+
+No.
+This project is currently for non-commercial use.
+See the repository license for details.
diff --git a/docs/source/getting_started.md b/docs/source/getting_started.md
new file mode 100644
index 00000000..9f1d8762
--- /dev/null
+++ b/docs/source/getting_started.md
@@ -0,0 +1,91 @@
+# Getting started
+
+BatDetect2 can be used in two ways: through the `batdetect2` command line interface (CLI), or as the `batdetect2` Python package.
+The CLI route does not require coding.
+You run commands in the terminal and, in some cases, write configuration files.
+The Python route gives you more flexibility and lets you integrate the model into your own workflows or experiments.
+For most common use cases, both routes give you the same results.
+
+## Try it out
+
+If you want to try BatDetect2 before installing anything locally:
+
+- [Hugging Face demo (UK species)](https://huggingface.co/spaces/macaodha/batdetect2)
+- [Google Colab notebook](https://colab.research.google.com/github/macaodha/batdetect2/blob/master/batdetect2_notebook.ipynb)
+
+## Installation
+
+To use `batdetect2` on your machine, you need to install it first.
+We recommend using `uv` for that.
+`uv` is a tool that helps manage Python software cleanly, without mixing it into the rest of your machine.
+Install `uv` first by following the [installation instructions](https://docs.astral.sh/uv/getting-started/installation/).
+
+### One-off usage
+
+If you are not ready to install `batdetect2` permanently, you can try it with:
+
+```bash
+uvx batdetect2
+```
+
+This still downloads the code and dependencies and runs them on your machine, but the environment is temporary.
+
+### Install the CLI
+
+If you want the `batdetect2` CLI to always be available in your terminal, run:
+
+```bash
+uv tool install batdetect2
+```
+
+If you need to upgrade later:
+
+```bash
+uv tool upgrade batdetect2
+```
+
+Verify the CLI is available:
+
+```bash
+batdetect2
+```
+
+You can then run your first workflow.
+See {doc}`tutorials/run-inference-on-folder` for more details.
+
+### Add it to your Python project
+
+If you are using BatDetect2 from Python code and already manage your projects with `uv`, you can add it with:
+
+```bash
+uv add batdetect2
+```
+
+If you want to upgrade it later:
+
+```bash
+uv add -U batdetect2
+```
+
+#### Alternative with `pip`
+
+If you prefer `pip`, you can use:
+
+```bash
+pip install batdetect2
+```
+
+It is a good idea to create a separate virtual environment first so this does not interfere with other Python environments.
+
+```bash
+python -m venv .venv
+source .venv/bin/activate
+```
+
+## What's next
+
+- Run your first workflow on a folder of recordings: {doc}`tutorials/run-inference-on-folder`
+- If you write code and want the Python route: {doc}`tutorials/integrate-with-a-python-pipeline`
+- For common practical tasks, go to {doc}`how_to/index`
+- For detailed command help, go to {doc}`reference/cli/index`
+- To understand the model and its outputs, go to {doc}`explanation/index`
diff --git a/docs/source/how_to/choose-a-model.md b/docs/source/how_to/choose-a-model.md
new file mode 100644
index 00000000..959deb78
--- /dev/null
+++ b/docs/source/how_to/choose-a-model.md
@@ -0,0 +1,112 @@
+# How to choose a model
+
+Use this guide when you want to choose which model checkpoint BatDetect2 loads.
+
+You can choose a model in both the CLI and the Python API.
+
+## Where you can choose the model
+
+In the CLI, use `--model` with commands that load a checkpoint, including:
+
+- `batdetect2 process`
+- `batdetect2 evaluate`
+- `batdetect2 train`
+- `batdetect2 finetune`
+
+In Python, pass the model source to `BatDetect2API.from_checkpoint(...)`.
+
+If you do not choose a model, BatDetect2 uses the built-in default UK model.
+
+## Use a local checkpoint path
+
+Use a local path when you already have a checkpoint file on disk.
+
+CLI example:
+
+```bash
+batdetect2 process directory \
+ path/to/audio \
+ path/to/outputs \
+ --model path/to/model.ckpt
+```
+
+Python example:
+
+```python
+from batdetect2.api_v2 import BatDetect2API
+
+api = BatDetect2API.from_checkpoint("path/to/model.ckpt")
+```
+
+## Use a bundled checkpoint alias
+
+BatDetect2 also supports bundled checkpoint aliases.
+
+The built-in UK model is available as `uk_same`.
+The alias `batdetect2_uk_same` also works.
+
+CLI example:
+
+```bash
+batdetect2 process directory \
+ path/to/audio \
+ path/to/outputs \
+ --model uk_same
+```
+
+Python example:
+
+```python
+from batdetect2.api_v2 import BatDetect2API
+
+api = BatDetect2API.from_checkpoint("uk_same")
+```
+
+## Use a Hugging Face URI
+
+You can also load a checkpoint from Hugging Face with a URI like:
+
+```text
+hf://owner/repo/path/to/model.ckpt
+```
+
+This needs the optional Hugging Face dependency to be installed.
+For example, install it with `pip install batdetect2[huggingface]`.
+
+CLI example:
+
+```bash
+batdetect2 process directory \
+ path/to/audio \
+ path/to/outputs \
+ --model hf://owner/repo/path/to/model.ckpt
+```
+
+Python example:
+
+```python
+from batdetect2.api_v2 import BatDetect2API
+
+api = BatDetect2API.from_checkpoint(
+ "hf://owner/repo/path/to/model.ckpt"
+)
+```
+
+## Choose the right source
+
+- Use a local path when you already have a checkpoint file.
+- Use an alias when you want one of the bundled models.
+- Use a Hugging Face URI when the checkpoint lives in a Hugging Face repo.
+
+## Related pages
+
+- Run inference on a folder:
+ {doc}`../tutorials/run-inference-on-folder`
+- `BatDetect2API` reference:
+ {doc}`../reference/api`
+- Process command reference:
+ {doc}`../reference/cli/predict`
+- Train a custom model:
+ {doc}`../tutorials/train-a-custom-model`
+- Fine-tune from a checkpoint:
+ {doc}`fine-tune-from-a-checkpoint`
diff --git a/docs/source/how_to/choose-an-inference-input-mode.md b/docs/source/how_to/choose-an-inference-input-mode.md
new file mode 100644
index 00000000..38b0c5a7
--- /dev/null
+++ b/docs/source/how_to/choose-an-inference-input-mode.md
@@ -0,0 +1,71 @@
+# How to choose an inference input mode
+
+Use this guide to decide whether `process directory`, `process file_list`, or
+`process dataset` is the right entry point for your run.
+
+## Use `process directory` when the recordings already live together
+
+This is the simplest choice.
+
+Use it when:
+
+- your recordings are already organized in one directory tree,
+- you want BatDetect2 to discover audio files for you,
+- you are doing a first pass over a folder of recordings.
+
+```bash
+batdetect2 process directory \
+ path/to/model.ckpt \
+ path/to/audio_dir \
+ path/to/outputs
+```
+
+## Use `process file_list` when you need explicit control over the file set
+
+Use it when:
+
+- you want to run only a selected subset,
+- your files are spread across directories,
+- another tool has already produced the exact list of recordings to process.
+
+The list file should contain one path per line.
+
+```bash
+batdetect2 process file_list \
+ path/to/model.ckpt \
+ path/to/audio_files.txt \
+ path/to/outputs
+```
+
+## Use `process dataset` when your workflow is already annotation-set driven
+
+Use it when:
+
+- your project already has a `soundevent` annotation set,
+- you want prediction runs aligned with that annotation metadata,
+- you want BatDetect2 to resolve recording paths from the annotation set.
+
+```bash
+batdetect2 process dataset \
+ path/to/model.ckpt \
+ path/to/annotation_set.json \
+ path/to/outputs
+```
+
+The dataset command reads a `soundevent` annotation set and extracts unique
+recording paths before inference.
+
+## Rule of thumb
+
+- Start with `directory` for the easiest first run.
+- Use `file_list` when selection matters.
+- Use `dataset` when the rest of your workflow is already dataset-based.
+
+## Related pages
+
+- Run batch predictions:
+ {doc}`run-batch-predictions`
+- Tune inference clipping:
+ {doc}`tune-inference-clipping`
+- Process command reference:
+ {doc}`../reference/cli/predict`
diff --git a/docs/source/how_to/choose-and-configure-evaluation-tasks.md b/docs/source/how_to/choose-and-configure-evaluation-tasks.md
new file mode 100644
index 00000000..17a818e2
--- /dev/null
+++ b/docs/source/how_to/choose-and-configure-evaluation-tasks.md
@@ -0,0 +1,74 @@
+# How to choose and configure evaluation tasks
+
+Use this guide when the default evaluation tasks do not match the question you
+want to answer.
+
+## Know the default first
+
+By default, BatDetect2 evaluation starts with:
+
+- sound event detection,
+- sound event classification.
+
+Those are good defaults for many projects, but not for all of them.
+
+## Choose the task that matches the question
+
+Common built-in task families include:
+
+- `sound_event_detection`
+- `sound_event_classification`
+- `top_class_detection`
+- `clip_detection`
+- `clip_classification`
+
+Choose based on the question you care about.
+
+- Use sound-event tasks when you care about individual call events.
+- Use clip tasks when you care about clip-level presence or clip-level class
+ evidence.
+- Use top-class detection when you want matching based on the highest-scoring
+ class per detection.
+
+## Configure tasks in `EvaluationConfig`
+
+Example:
+
+```yaml
+tasks:
+ - name: sound_event_detection
+ prefix: detection
+ affinity_threshold: 0.0
+ strict_match: true
+ - name: clip_classification
+ prefix: clip_classification
+```
+
+Pass the config with:
+
+```bash
+batdetect2 evaluate \
+ path/to/test_dataset.yaml \
+ --model path/to/model.ckpt \
+ --base-dir path/to/project_root \
+ --evaluation-config path/to/evaluation.yaml
+```
+
+Include `--base-dir` when the dataset config resolves recordings through
+relative paths.
+
+## Change one thing at a time
+
+When comparing models or settings, avoid changing task definitions, thresholds,
+matching behavior, and datasets all at once.
+
+Otherwise it becomes hard to explain why the metric changed.
+
+## Related pages
+
+- Evaluation tutorial:
+ {doc}`../tutorials/evaluate-on-a-test-set`
+- Evaluation config reference:
+ {doc}`../reference/evaluation-config`
+- Evaluation concepts:
+ {doc}`../explanation/evaluation-concepts-and-matching`
diff --git a/docs/source/how_to/configure-aoef-dataset.md b/docs/source/how_to/configure-aoef-dataset.md
new file mode 100644
index 00000000..7e7a98cc
--- /dev/null
+++ b/docs/source/how_to/configure-aoef-dataset.md
@@ -0,0 +1,53 @@
+# How to configure an AOEF dataset source
+
+Use this guide when your annotations are stored in AOEF/soundevent JSON files,
+including exports from Whombat.
+
+## 1) Add an AOEF source entry
+
+In your dataset config, add a source with `format: aoef`.
+
+```yaml
+sources:
+ - name: my_aoef_source
+ format: aoef
+ audio_dir: /path/to/audio
+ annotations_path: /path/to/annotations.soundevent.json
+```
+
+## 2) Choose filtering behavior for annotation projects
+
+If `annotations_path` is an `AnnotationProject`, you can filter by task state.
+
+```yaml
+sources:
+ - name: whombat_verified
+ format: aoef
+ audio_dir: /path/to/audio
+ annotations_path: /path/to/project_export.aoef
+ filter:
+ only_completed: true
+ only_verified: true
+ exclude_issues: true
+```
+
+If you omit `filter`, default project filtering is applied.
+
+To disable filtering for project files:
+
+```yaml
+filter: null
+```
+
+## 3) Check that the source loads
+
+Run a summary on your dataset config:
+
+```bash
+batdetect2 data summary path/to/dataset.yaml
+```
+
+## 4) Continue to training or evaluation
+
+- For training: {doc}`../tutorials/train-a-custom-model`
+- For field-level reference: {doc}`../reference/data-sources`
diff --git a/docs/source/how_to/configure-audio-preprocessing.md b/docs/source/how_to/configure-audio-preprocessing.md
new file mode 100644
index 00000000..f59123cc
--- /dev/null
+++ b/docs/source/how_to/configure-audio-preprocessing.md
@@ -0,0 +1,66 @@
+# How to configure audio preprocessing
+
+Use this guide to set sample-rate and waveform-level preprocessing behaviour.
+
+## 1) Set audio loader settings
+
+The audio loader config controls resampling.
+
+```yaml
+samplerate: 256000
+resample:
+ enabled: true
+ method: poly
+```
+
+If your recordings are already at the expected sample rate, you can disable
+resampling.
+
+```yaml
+samplerate: 256000
+resample:
+ enabled: false
+```
+
+## 2) Set waveform transforms in preprocessing config
+
+Waveform transforms are configured in `preprocess.audio_transforms`.
+
+```yaml
+preprocess:
+ audio_transforms:
+ - name: center_audio
+ - name: scale_audio
+ - name: fix_duration
+ duration: 0.5
+```
+
+Available built-ins:
+
+- `center_audio`
+- `scale_audio`
+- `fix_duration`
+
+## 3) Use the config in your workflow
+
+For CLI inference/evaluation, use `--audio-config`.
+
+```bash
+batdetect2 process directory \
+ path/to/model.ckpt \
+ path/to/audio_dir \
+ path/to/outputs \
+ --audio-config path/to/audio.yaml
+```
+
+## 4) Verify quickly on a small subset
+
+Run on a small folder first and confirm that outputs and runtime are as expected
+before full-batch runs.
+
+## Related pages
+
+- Spectrogram settings:
+ {doc}`configure-spectrogram-preprocessing`
+- Preprocessing config reference:
+ {doc}`../reference/preprocessing-config`
diff --git a/docs/source/how_to/configure-roi-mapping.md b/docs/source/how_to/configure-roi-mapping.md
new file mode 100644
index 00000000..6189923b
--- /dev/null
+++ b/docs/source/how_to/configure-roi-mapping.md
@@ -0,0 +1,57 @@
+# How to configure ROI mapping
+
+Use this guide to control how annotation geometry is encoded into training
+targets and decoded back into boxes.
+
+## 1) Set the default ROI mapper
+
+The default mapper is `anchor_bbox`.
+
+```yaml
+roi:
+ default:
+ name: anchor_bbox
+ anchor: bottom-left
+ time_scale: 1000.0
+ frequency_scale: 0.001163
+```
+
+## 2) Choose an anchor strategy
+
+Typical options include `bottom-left` and `center`.
+
+- `bottom-left` is the current default.
+- `center` can be easier to reason about in some workflows.
+
+## 3) Set scale factors intentionally
+
+- `time_scale` controls width scaling.
+- `frequency_scale` controls height scaling.
+
+Use values that are consistent with your model setup and keep them fixed when
+comparing experiments.
+
+## 4) (Optional) override ROI mapping for specific classes
+
+Add class-specific mappers under `roi.overrides`.
+
+```yaml
+roi:
+ default:
+ name: anchor_bbox
+ anchor: bottom-left
+ time_scale: 1000.0
+ frequency_scale: 0.001163
+ overrides:
+ species_x:
+ name: anchor_bbox
+ anchor: center
+ time_scale: 1000.0
+ frequency_scale: 0.001163
+```
+
+## Related pages
+
+- Target definitions: {doc}`configure-target-definitions`
+- Class definitions: {doc}`define-target-classes`
+- Target encoding overview: {doc}`../explanation/target-encoding-and-decoding`
diff --git a/docs/source/how_to/configure-spectrogram-preprocessing.md b/docs/source/how_to/configure-spectrogram-preprocessing.md
new file mode 100644
index 00000000..b92d1ee8
--- /dev/null
+++ b/docs/source/how_to/configure-spectrogram-preprocessing.md
@@ -0,0 +1,59 @@
+# How to configure spectrogram preprocessing
+
+Use this guide to set STFT, frequency range, and spectrogram transforms.
+
+## 1) Configure STFT and frequency range
+
+```yaml
+preprocess:
+ stft:
+ window_duration: 0.002
+ window_overlap: 0.75
+ window_fn: hann
+ frequencies:
+ min_freq: 10000
+ max_freq: 120000
+```
+
+## 2) Configure spectrogram transforms
+
+`spectrogram_transforms` are applied in order.
+
+```yaml
+preprocess:
+ spectrogram_transforms:
+ - name: pcen
+ time_constant: 0.4
+ gain: 0.98
+ bias: 2.0
+ power: 0.5
+ - name: spectral_mean_subtraction
+ - name: scale_amplitude
+ scale: db
+```
+
+Common built-ins:
+
+- `pcen`
+- `spectral_mean_subtraction`
+- `scale_amplitude` (`db` or `power`)
+- `peak_normalize`
+
+## 3) Configure output size
+
+```yaml
+preprocess:
+ size:
+ height: 128
+ resize_factor: 0.5
+```
+
+## 4) Keep train and inference settings aligned
+
+Use the same preprocessing setup for training and prediction whenever possible.
+Large mismatches can degrade model performance.
+
+## Related pages
+
+- Why consistency matters: {doc}`../explanation/preprocessing-consistency`
+- Preprocessing config reference: {doc}`../reference/preprocessing-config`
diff --git a/docs/source/how_to/configure-target-definitions.md b/docs/source/how_to/configure-target-definitions.md
new file mode 100644
index 00000000..243d18e7
--- /dev/null
+++ b/docs/source/how_to/configure-target-definitions.md
@@ -0,0 +1,58 @@
+# How to configure target definitions
+
+Use this guide to define which annotated sound events are considered valid
+detection targets.
+
+## 1) Start from a targets config file
+
+```yaml
+detection_target:
+ name: bat
+ match_if:
+ name: has_tag
+ tag:
+ key: call_type
+ value: Echolocation
+ assign_tags:
+ - key: call_type
+ value: Echolocation
+ - key: order
+ value: Chiroptera
+```
+
+`match_if` decides whether an annotation is included in the detection target.
+
+## 2) Use condition combinators when needed
+
+You can combine conditions with `all_of`, `any_of`, and `not`.
+
+```yaml
+detection_target:
+ name: bat
+ match_if:
+ name: all_of
+ conditions:
+ - name: has_tag
+ tag:
+ key: call_type
+ value: Echolocation
+ - name: not
+ condition:
+ name: has_any_tag
+ tags:
+ - key: call_type
+ value: Social
+ - key: class
+ value: Not Bat
+```
+
+## 3) Verify with a small sample first
+
+Before full training, inspect a small annotation subset and confirm that the
+selection logic keeps the events you expect.
+
+## Related pages
+
+- Class mapping: {doc}`define-target-classes`
+- ROI mapping: {doc}`configure-roi-mapping`
+- Targets reference: {doc}`../reference/targets-config-workflow`
diff --git a/docs/source/how_to/define-target-classes.md b/docs/source/how_to/define-target-classes.md
new file mode 100644
index 00000000..ea0d87c5
--- /dev/null
+++ b/docs/source/how_to/define-target-classes.md
@@ -0,0 +1,59 @@
+# How to define target classes
+
+Use this guide to map annotations to classification labels used during
+training.
+
+## 1) Add classification target entries
+
+Each entry defines a class name and matching tags.
+
+```yaml
+classification_targets:
+ - name: pippip
+ tags:
+ - key: class
+ value: Pipistrellus pipistrellus
+ - name: pippyg
+ tags:
+ - key: class
+ value: Pipistrellus pygmaeus
+```
+
+## 2) Use `assign_tags` to control decoded output tags
+
+If you want prediction output tags to differ from matching tags, set
+`assign_tags` explicitly.
+
+```yaml
+classification_targets:
+ - name: pipistrelle_group
+ tags:
+ - key: class
+ value: Pipistrellus pipistrellus
+ assign_tags:
+ - key: genus
+ value: Pipistrellus
+```
+
+## 3) Use `match_if` for complex class rules
+
+For advanced conditions, use `match_if` instead of `tags`.
+
+```yaml
+classification_targets:
+ - name: long_call
+ match_if:
+ name: duration
+ operator: gt
+ seconds: 0.02
+```
+
+## 4) Confirm class names are unique
+
+`classification_targets.name` values must be unique.
+
+## Related pages
+
+- Detection-target filtering: {doc}`configure-target-definitions`
+- ROI mapping: {doc}`configure-roi-mapping`
+- Targets config reference: {doc}`../reference/targets-config-workflow`
diff --git a/docs/source/how_to/fine-tune-from-a-checkpoint.md b/docs/source/how_to/fine-tune-from-a-checkpoint.md
new file mode 100644
index 00000000..59fb2a6d
--- /dev/null
+++ b/docs/source/how_to/fine-tune-from-a-checkpoint.md
@@ -0,0 +1,45 @@
+# How to fine-tune from a checkpoint
+
+Use this guide when you want to continue from an existing checkpoint instead of training a fresh model config.
+
+## Use `--model` for checkpoint-based training
+
+Pass a checkpoint with `--model`.
+
+Do not combine `--model` with `--model-config`.
+
+```bash
+batdetect2 train \
+ path/to/train_dataset.yaml \
+ --val-dataset path/to/val_dataset.yaml \
+ --model path/to/model.ckpt \
+ --training-config path/to/training.yaml
+```
+
+## Keep targets and preprocessing aligned
+
+If you override targets or audio-related settings while fine-tuning, validate that they still match the checkpoint and your dataset.
+
+Mismatches here can produce confusing failures or invalid comparisons.
+
+## Decide what question the fine-tune should answer
+
+Common fine-tuning goals are:
+
+- adapting to local recording conditions,
+- adapting to a new label set,
+- improving performance on a narrower deployment context.
+
+Make that goal explicit before comparing results.
+
+## Evaluate after fine-tuning
+
+Always compare the fine-tuned checkpoint against a held-out dataset.
+
+Use the same evaluation setup when comparing before and after.
+
+## Related pages
+
+- Training tutorial: {doc}`../tutorials/train-a-custom-model`
+- Evaluate a test set: {doc}`../tutorials/evaluate-on-a-test-set`
+- Train command reference: {doc}`../reference/cli/train`
diff --git a/docs/source/how_to/import-legacy-batdetect2-annotations.md b/docs/source/how_to/import-legacy-batdetect2-annotations.md
new file mode 100644
index 00000000..f07e4151
--- /dev/null
+++ b/docs/source/how_to/import-legacy-batdetect2-annotations.md
@@ -0,0 +1,66 @@
+# How to import legacy batdetect2 annotations
+
+Use this guide if your annotations are in older batdetect2 JSON formats.
+
+Two legacy formats are supported:
+
+- `batdetect2`: one annotation JSON file per recording
+- `batdetect2_file`: one merged JSON file for many recordings
+
+## 1) Choose the correct source format
+
+Directory-based annotations (`format: batdetect2`):
+
+```yaml
+sources:
+ - name: legacy_per_file
+ format: batdetect2
+ audio_dir: /path/to/audio
+ annotations_dir: /path/to/annotation_json_dir
+```
+
+Merged annotation file (`format: batdetect2_file`):
+
+```yaml
+sources:
+ - name: legacy_merged
+ format: batdetect2_file
+ audio_dir: /path/to/audio
+ annotations_path: /path/to/merged_annotations.json
+```
+
+## 2) Set optional legacy filters
+
+Legacy filters are based on `annotated` and `issues` flags.
+
+```yaml
+filter:
+ only_annotated: true
+ exclude_issues: true
+```
+
+To load all entries regardless of flags:
+
+```yaml
+filter: null
+```
+
+## 3) Validate and convert if needed
+
+Check loaded records:
+
+```bash
+batdetect2 data summary path/to/dataset.yaml
+```
+
+Convert to annotation-set output for downstream tooling:
+
+```bash
+batdetect2 data convert path/to/dataset.yaml --output path/to/output.json
+```
+
+## 4) Continue with current workflows
+
+- Run predictions: {doc}`run-batch-predictions`
+- Train on imported data: {doc}`../tutorials/train-a-custom-model`
+- Field-level reference: {doc}`../reference/data-sources`
diff --git a/docs/source/how_to/index.md b/docs/source/how_to/index.md
new file mode 100644
index 00000000..40806dc9
--- /dev/null
+++ b/docs/source/how_to/index.md
@@ -0,0 +1,30 @@
+# How-to Guides
+
+How-to guides help you answer practical questions once you are past the first
+tutorial.
+
+Use this section when you already know the basic workflow and want help with one
+specific task.
+
+```{toctree}
+:maxdepth: 1
+
+choose-a-model
+choose-an-inference-input-mode
+run-batch-predictions
+tune-inference-clipping
+tune-detection-threshold
+inspect-class-scores-in-python
+inspect-detection-features-in-python
+save-predictions-in-different-output-formats
+fine-tune-from-a-checkpoint
+choose-and-configure-evaluation-tasks
+interpret-evaluation-outputs
+configure-aoef-dataset
+import-legacy-batdetect2-annotations
+configure-audio-preprocessing
+configure-spectrogram-preprocessing
+configure-target-definitions
+define-target-classes
+configure-roi-mapping
+```
diff --git a/docs/source/how_to/inspect-class-scores-in-python.md b/docs/source/how_to/inspect-class-scores-in-python.md
new file mode 100644
index 00000000..85ef6642
--- /dev/null
+++ b/docs/source/how_to/inspect-class-scores-in-python.md
@@ -0,0 +1,44 @@
+# How to inspect class scores in Python
+
+Use this guide when you need more than the top class label for each detection.
+
+## Get the ranked class scores
+
+`BatDetect2API.get_class_scores` returns `(class_name, score)` pairs for one detection.
+
+```python
+from pathlib import Path
+
+from batdetect2.api_v2 import BatDetect2API
+
+api = BatDetect2API.from_checkpoint(Path("path/to/model.ckpt"))
+prediction = api.process_file(Path("path/to/audio.wav"))
+
+for detection in prediction.detections:
+ print("detection score:", detection.detection_score)
+ for class_name, score in api.get_class_scores(detection):
+ print(class_name, score)
+```
+
+## Separate detection confidence from class ranking
+
+Keep these two ideas separate:
+
+- `detection_score` tells you how strongly the model kept the event as a detection,
+- `class_scores` tell you how the model ranked classes for that detected event.
+
+A detection can have a reasonable detection score while still having uncertain class ranking.
+
+## Hide the top class if needed
+
+If you want to inspect only the alternatives, pass `include_top_class=False`.
+
+```python
+api.get_class_scores(detection, include_top_class=False)
+```
+
+## Related pages
+
+- Python tutorial: {doc}`../tutorials/integrate-with-a-python-pipeline`
+- API reference: {doc}`../reference/api`
+- Understanding scores: {doc}`../explanation/what-batdetect2-predicts`
diff --git a/docs/source/how_to/inspect-detection-features-in-python.md b/docs/source/how_to/inspect-detection-features-in-python.md
new file mode 100644
index 00000000..72c22f53
--- /dev/null
+++ b/docs/source/how_to/inspect-detection-features-in-python.md
@@ -0,0 +1,49 @@
+# How to inspect detection features in Python
+
+Use this guide when you want the per-detection feature vectors exposed by the current API.
+
+## Get the feature vector for one detection
+
+Each detection carries a `features` vector.
+
+The API exposes it through `get_detection_features`.
+
+```python
+from pathlib import Path
+
+from batdetect2.api_v2 import BatDetect2API
+
+api = BatDetect2API.from_checkpoint(Path("path/to/model.ckpt"))
+prediction = api.process_file(Path("path/to/audio.wav"))
+
+for detection in prediction.detections:
+ features = api.get_detection_features(detection)
+ print(features.shape)
+```
+
+## Use features for exploration, not as ground truth labels
+
+These features are internal model representations attached to detections.
+
+They can be useful for:
+
+- exploratory visualization,
+- downstream clustering,
+- comparison across detections,
+- building extra analysis pipelines.
+
+They do not replace validation.
+
+They also do not automatically have a one-to-one interpretation as ecological variables.
+
+## Save predictions with features included
+
+If you need features on disk, use an output format that supports them, such as `raw` or `parquet`, and keep feature inclusion enabled.
+
+See {doc}`save-predictions-in-different-output-formats`.
+
+## Related pages
+
+- Understanding features and embeddings: {doc}`../explanation/extracted-features-and-embeddings`
+- Output formats reference: {doc}`../reference/output-formats`
+- API reference: {doc}`../reference/api`
diff --git a/docs/source/how_to/interpret-evaluation-outputs.md b/docs/source/how_to/interpret-evaluation-outputs.md
new file mode 100644
index 00000000..f5556c0b
--- /dev/null
+++ b/docs/source/how_to/interpret-evaluation-outputs.md
@@ -0,0 +1,41 @@
+# How to interpret evaluation outputs
+
+Use this guide after `batdetect2 evaluate` has written metrics and plots to disk.
+
+## Start by identifying the task
+
+Do not interpret a metric until you know which evaluation task produced it.
+
+For example, a detection score and a clip-classification score answer different questions.
+
+## Read the output directory as a bundle
+
+Treat the evaluation output directory as one package:
+
+- metrics,
+- plots,
+- saved predictions,
+- config context.
+
+Do not lift a single number out of context and treat it as the whole story.
+
+## Look for failure patterns, not just overall averages
+
+Check:
+
+- whether errors concentrate in certain taxa,
+- whether specific sites or recorder setups behave differently,
+- whether threshold choices are driving the result,
+- whether predictions are near clip boundaries or matching thresholds.
+
+## Keep validation and deployment questions separate
+
+A model can look good on one task and still be a poor fit for your deployment question.
+
+Interpret the outputs in relation to the real use case, not only the easiest metric to report.
+
+## Related pages
+
+- Evaluation tutorial: {doc}`../tutorials/evaluate-on-a-test-set`
+- Evaluation concepts: {doc}`../explanation/evaluation-concepts-and-matching`
+- Model output and validation: {doc}`../explanation/model-output-and-validation`
diff --git a/docs/source/how_to/run-batch-predictions.md b/docs/source/how_to/run-batch-predictions.md
new file mode 100644
index 00000000..515b74e7
--- /dev/null
+++ b/docs/source/how_to/run-batch-predictions.md
@@ -0,0 +1,62 @@
+# How to run batch processing
+
+This guide shows practical command patterns for directory-based and file-list
+processing runs.
+
+Use it after you already know which input mode you want and need concrete
+command templates for a repeatable batch run.
+
+## Process a directory
+
+```bash
+batdetect2 process directory \
+ path/to/model.ckpt \
+ path/to/audio_dir \
+ path/to/outputs
+```
+
+Use this when BatDetect2 should discover the audio files for you.
+
+## Process a file list
+
+```bash
+batdetect2 process file_list \
+ path/to/model.ckpt \
+ path/to/audio_files.txt \
+ path/to/outputs
+```
+
+Use this when another part of your workflow already produced the exact recording
+list to process.
+
+## Process a dataset config
+
+```bash
+batdetect2 process dataset \
+ path/to/model.ckpt \
+ path/to/annotation_set.json \
+ path/to/outputs
+```
+
+Use this when your project already has a `soundevent` annotation set and you
+want to extract unique recording paths from it.
+
+## Useful options
+
+- `--batch-size` to control throughput.
+- `--workers` to set data-loading parallelism.
+- `--format` to select output format.
+- `--inference-config` to control clipping and loader behavior.
+- `--outputs-config` to control serialization and output transforms.
+- `--detection-threshold` to override the detection threshold for a run.
+
+## Practical workflow
+
+For large runs:
+
+1. test the command on a small reviewed subset,
+2. lock the config files and command shape,
+3. write outputs to a dedicated directory per run,
+4. record the checkpoint, config paths, and thresholds used.
+
+For complete option details, see {doc}`../reference/cli/predict`.
diff --git a/docs/source/how_to/save-predictions-in-different-output-formats.md b/docs/source/how_to/save-predictions-in-different-output-formats.md
new file mode 100644
index 00000000..ee4d5c6e
--- /dev/null
+++ b/docs/source/how_to/save-predictions-in-different-output-formats.md
@@ -0,0 +1,95 @@
+# How to save predictions in different output formats
+
+Use this guide when you need BatDetect2 outputs in a specific representation for
+downstream tools.
+
+## Choose the format that matches the job
+
+Current built-in output formats include:
+
+- `raw`:
+ one NetCDF file per clip, best for rich structured outputs,
+- `parquet`:
+ tabular storage for data analysis workflows,
+- `soundevent`:
+ prediction-set JSON for soundevent-style tooling,
+- `batdetect2`:
+ legacy-compatible per-recording JSON and CSV outputs.
+
+## Select a format from the CLI
+
+Use `--format` for quick experiments.
+
+```bash
+batdetect2 process directory \
+ path/to/model.ckpt \
+ path/to/audio_dir \
+ path/to/outputs \
+ --format parquet
+```
+
+## Use an outputs config for repeatable runs
+
+Use an outputs config when you want reproducible control over format and
+transforms.
+
+Example:
+
+```yaml
+format:
+ name: raw
+ include_class_scores: true
+ include_features: true
+ include_geometry: true
+transform:
+ detection_transforms: []
+ clip_transforms: []
+```
+
+Run with:
+
+```bash
+batdetect2 process directory \
+ path/to/model.ckpt \
+ path/to/audio_dir \
+ path/to/outputs \
+ --outputs-config path/to/outputs.yaml
+```
+
+## Pick the simplest useful format
+
+- Use `raw` if you want the richest output surface and easy round-tripping.
+- Use `parquet` if you want tabular analysis in Python or data-lake workflows.
+- Use `soundevent` if you want prediction-set JSON.
+- Use `batdetect2` when you need legacy BatDetect2-style outputs.
+
+## Enable legacy CNN feature CSVs
+
+The `batdetect2` formatter can also write the legacy CNN feature sidecar CSVs.
+This is controlled through the outputs config.
+
+Example:
+
+```yaml
+format:
+ name: batdetect2
+ write_cnn_features_csv: true
+transform:
+ detection_transforms: []
+ clip_transforms: []
+```
+
+When enabled, BatDetect2 writes:
+
+- one `.json` file per recording,
+- one detection `.csv` file per recording,
+- one `_cnn_features.csv` file per recording when detections are present.
+
+## Related pages
+
+- Outputs config reference:
+ {doc}`../reference/outputs-config`
+- Output formats reference:
+ {doc}`../reference/output-formats`
+- Output transforms reference:
+ {doc}`../reference/output-transforms`
diff --git a/docs/source/how_to/tune-detection-threshold.md b/docs/source/how_to/tune-detection-threshold.md
new file mode 100644
index 00000000..8bdd4c75
--- /dev/null
+++ b/docs/source/how_to/tune-detection-threshold.md
@@ -0,0 +1,51 @@
+# How to tune detection threshold
+
+Use this guide to compare detection outputs at different threshold values.
+
+The goal is not to find a universal threshold.
+
+The goal is to choose a threshold that fits your reviewed local data and the
+project trade-off between missed calls and false positives.
+
+## 1) Start with a baseline run
+
+Run an initial prediction workflow and keep outputs in a dedicated folder.
+
+## 2) Sweep threshold values
+
+Run `process` multiple times with different thresholds (for example `0.1`,
+`0.3`, `0.5`) and compare output counts and quality on the same validation
+subset.
+
+```bash
+batdetect2 process directory \
+ path/to/model.ckpt \
+ path/to/audio_dir \
+ path/to/outputs_thr_03 \
+ --detection-threshold 0.3
+```
+
+Keep each threshold run in a separate output directory.
+
+That makes it easier to compare counts and inspect example files without mixing
+results.
+
+## 3) Validate against known calls
+
+Use files with trusted annotations or expert review to select a threshold that
+fits your project goals.
+
+Check both:
+
+- obvious false positives,
+- obvious missed calls.
+
+If class interpretation matters downstream, inspect class ranking behavior as
+well, not just detection counts.
+
+## 4) Record your chosen setting
+
+Write down the chosen threshold and rationale so analyses are reproducible.
+
+For conceptual trade-offs, see
+{doc}`../explanation/model-output-and-validation`.
diff --git a/docs/source/how_to/tune-inference-clipping.md b/docs/source/how_to/tune-inference-clipping.md
new file mode 100644
index 00000000..a12999f8
--- /dev/null
+++ b/docs/source/how_to/tune-inference-clipping.md
@@ -0,0 +1,73 @@
+# How to tune inference clipping
+
+Use this guide when long recordings need to be split into smaller clips during
+inference.
+
+## What clipping controls
+
+`InferenceConfig.clipping` controls how recordings are split before batching.
+
+Key fields are:
+
+- `duration`:
+ clip duration in seconds,
+- `overlap`:
+ overlap between adjacent clips,
+- `max_empty`:
+ how much empty padding is allowed,
+- `discard_empty`:
+ whether empty clips are dropped.
+
+## Start from the defaults
+
+Use the built-in clipping behavior first unless you already know you need
+something else.
+
+Only tune clipping when:
+
+- recordings are much longer than your normal working set,
+- you are seeing edge effects around calls,
+- you need tighter control over throughput or padding behavior.
+
+## Override clipping with an inference config
+
+Create an inference config file and pass it to `process` or `evaluate`.
+
+Example:
+
+```yaml
+clipping:
+ enabled: true
+ duration: 0.5
+ overlap: 0.1
+ max_empty: 0.0
+ discard_empty: true
+loader:
+ batch_size: 8
+```
+
+Run with:
+
+```bash
+batdetect2 process directory \
+ path/to/model.ckpt \
+ path/to/audio_dir \
+ path/to/outputs \
+ --inference-config path/to/inference.yaml
+```
+
+## Validate clipping changes on a small reviewed subset
+
+Changing clipping changes what the model sees per batch and can change how
+events near clip boundaries behave.
+
+Check a reviewed subset before applying clipping changes to a full project.
+
+## Related pages
+
+- Inference config reference:
+ {doc}`../reference/inference-config`
+- Run batch predictions:
+ {doc}`run-batch-predictions`
+- Understanding the pipeline:
+ {doc}`../explanation/pipeline-overview`
diff --git a/docs/source/index.md b/docs/source/index.md
new file mode 100644
index 00000000..3e4e6ca8
--- /dev/null
+++ b/docs/source/index.md
@@ -0,0 +1,114 @@
+# Home
+
+Welcome to the BatDetect2 documentation.
+
+## What is BatDetect2?
+
+`batdetect2` is a deep learning model and software package for detecting and
+classifying bat echolocation calls in high-frequency audio recordings.
+
+You can use it from the command line or from Python, depending on how much
+control you need.
+
+In practice, BatDetect2 scans a recording, finds sounds that look like bat
+calls, and returns one result for each detected call.
+Each result can include where the call appears in the recording, shown as a box
+with start and end time and the lowest and highest frequency, how confident the
+model is that it found a call, and how strongly it matches the available
+classes.
+
+The built-in default model is trained for 17 UK species.
+The package also supports custom training, fine-tuning, evaluation, and more
+advanced workflows from Python.
+
+For more detail on the underlying approach, see the pre-print:
+[Towards a General Approach for Bat Echolocation Detection and Classification](https://www.biorxiv.org/content/10.1101/2022.12.14.520490v1)
+
+```{warning}
+Treat outputs as model predictions, not ground truth.
+Always validate on reviewed local data before using results for ecological inference.
+```
+
+## What can I do with it?
+
+- I want to run the model on my recordings:
+ {doc}`tutorials/run-inference-on-folder`
+- I write code and want to use it from Python:
+ {doc}`tutorials/integrate-with-a-python-pipeline`
+- I want to train or fine-tune a custom model:
+ {doc}`tutorials/train-a-custom-model`
+- I want to evaluate a trained model on held-out data:
+ {doc}`tutorials/evaluate-on-a-test-set`
+
+```{note}
+Looking for the previous BatDetect2 workflow?
+See {doc}`legacy/index`.
+The legacy docs are still available, but new workflows should use `batdetect2 process` and `BatDetect2API`.
+```
+
+## How to use this site
+
+Start with {doc}`getting_started` if you are new.
+
+Then choose the section that matches what you need.
+
+If you are here mainly to run the model on recordings, start with Tutorials.
+
+| Section | Best for | Start here |
+| ------------- | --------------------------------------------- | ------------------------ |
+| Tutorials | Step-by-step routes for the most common tasks | {doc}`tutorials/index` |
+| How-to guides | Answers to specific practical questions | {doc}`how_to/index` |
+| Reference | Detailed command and settings help | {doc}`reference/index` |
+| Understanding | Concepts, interpretation, and trade-offs | {doc}`explanation/index` |
+| Legacy | Previous workflow and migration guidance | {doc}`legacy/index` |
+
+## Get in touch
+
+- GitHub repository:
+ [macaodha/batdetect2](https://github.com/macaodha/batdetect2)
+- Questions, bug reports, and feature requests:
+ [GitHub Issues](https://github.com/macaodha/batdetect2/issues)
+- Common questions:
+ {doc}`faq`
+- Want to contribute?
+ See {doc}`development/index`
+
+## Cite this work
+
+If you use BatDetect2 in research, please cite:
+
+Mac Aodha, O., Martinez Balvanera, S., Damstra, E., et al.
+(2022).
+_Towards a General Approach for Bat Echolocation Detection and Classification_.
+bioRxiv.
+
+or the bibtex entry
+
+```bibtex
+@article{batdetect2_2022,
+ title = {Towards a General Approach for Bat Echolocation Detection and Classification},
+ author = {Mac Aodha, Oisin and Mart\'{i}nez Balvanera, Santiago and Damstra, Elise and Cooke, Martyn and Eichinski, Philip and Browning, Ella and Barataudm, Michel and Boughey, Katherine and Coles, Roger and Giacomini, Giada and MacSwiney G., M. Cristina and K. Obrist, Martin and Parsons, Stuart and Sattler, Thomas and Jones, Kate E.},
+ journal = {bioRxiv},
+ year = {2022}
+}
+```
+
+```{toctree}
+:maxdepth: 1
+:caption: Get Started
+
+getting_started
+faq
+tutorials/index
+how_to/index
+reference/index
+explanation/index
+legacy/index
+```
+
+```{toctree}
+:maxdepth: 1
+:caption: Contributing
+
+development/index
+```
diff --git a/docs/source/legacy/cli-detect.md b/docs/source/legacy/cli-detect.md
new file mode 100644
index 00000000..10a13dad
--- /dev/null
+++ b/docs/source/legacy/cli-detect.md
@@ -0,0 +1,53 @@
+# CLI workflow: `batdetect2 detect`
+
+This page documents the previous CLI workflow based on `batdetect2 detect`.
+
+```{warning}
+This is documentation for a previous version of batdetect2.
+For new workflows, use `batdetect2 process directory` instead.
+If you are migrating, start with {doc}`migration-guide`.
+```
+
+## Processing a folder of audio files
+
+```bash
+batdetect2 detect AUDIO_DIR ANN_DIR DETECTION_THRESHOLD
+```
+
+Example:
+
+```bash
+batdetect2 detect example_data/audio/ example_data/anns/ 0.3
+```
+
+This command scans a directory of audio files, runs the BatDetect2 detector on
+each file, and writes BatDetect2-style outputs into `ANN_DIR`.
+Those outputs usually include one JSON file and one CSV file per recording, and
+can optionally include extra feature CSVs.
+
+`AUDIO_DIR` is the folder containing the input `.wav` files.
+`ANN_DIR` is the folder where model outputs are written.
+
+`DETECTION_THRESHOLD` controls which detections are kept.
+Predictions below this score are discarded.
+Smaller values keep more detections, but usually also increase mistakes.
+
+Common options:
+
+- `--cnn_features` Write extra CNN feature CSV files for each recording.
+- `--spec_features` Extract and write traditional acoustic spectrogram feature
+ CSV files.
+ These are saved as `*_spec_features.csv` files.
+- `--time_expansion_factor` Set the time expansion factor used for all files in
+ the run.
+- `--save_preds_if_empty` Save output files even when no detections are found.
+- `--model_path` Use a specific checkpoint instead of the included default
+ model.
+ If omitted, the command uses the default model trained on UK data.
+
+## Related pages
+
+- Migration guide:
+ {doc}`migration-guide`
+- Current process docs:
+ {doc}`../reference/cli/predict`
diff --git a/docs/source/legacy/index.md b/docs/source/legacy/index.md
new file mode 100644
index 00000000..a9b2c56e
--- /dev/null
+++ b/docs/source/legacy/index.md
@@ -0,0 +1,28 @@
+# BatDetect2 v1.0 documentation
+
+This section documents the BatDetect2 workflow for version 1.
+
+Use these pages if you need to keep working with the older `batdetect2 detect` command or the older `batdetect2.api` interface.
+
+For new projects, we recommend the current workflow:
+
+- CLI:
+ `batdetect2 process`
+- Python:
+ `batdetect2.api_v2.BatDetect2API`
+
+If you are moving from the older workflow, start with {doc}`migration-guide`.
+
+```{warning}
+These pages describe the previous workflow.
+They are kept for continuity and migration support.
+New users should start with {doc}`../getting_started` and {doc}`../tutorials/index`.
+```
+
+```{toctree}
+:maxdepth: 1
+
+cli-detect
+python-api
+migration-guide
+```
diff --git a/docs/source/legacy/migration-guide.md b/docs/source/legacy/migration-guide.md
new file mode 100644
index 00000000..5c75a52d
--- /dev/null
+++ b/docs/source/legacy/migration-guide.md
@@ -0,0 +1,123 @@
+# BatDetect2 2.0 migration guide
+
+Use this guide when moving from BatDetect2 1.x workflows to the CLI and API in
+2.x.
+
+## Why migrate
+
+You get access to newer features.
+The codebase changed quite a bit and now gives you much more control over the
+workflow through config files, improved training and fine-tuning code, and a
+more flexible sound target definition system.
+
+You can also run newer or improved models.
+That includes updated versions of the UK model, plus other models trained with
+the newer codebase.
+
+We are no longer actively supporting version 1.
+No new enhancements are planned there, and only major bug fixes may still be
+considered.
+Future work is focused on version 2, including compatibility with newer Python
+versions.
+
+## Deprecation plan
+
+We have kept the `batdetect2.api` module and the `batdetect2 detect` CLI command
+in place for now.
+You can keep using them without changing your current workflow.
+However, many of the internal functions were relocated, removed or modified.
+If your code relied on anything outside of the `api` module, it may break.
+It is worth checking the new docs first, since there may already be a newer
+feature that covers your use case.
+If not, please open an issue.
+
+Because the old `api` and CLI command are now redundant with the newer stack, we
+plan to remove them in about a year.
+If you want to keep pipelines up to date and long-running, it is a good idea to
+migrate to version 2.
+
+## How to migrate
+
+If you are only using the `batdetect2 detect` CLI command or the
+`batdetect2.api` module, the migration should be fairly simple.
+This guide only covers these two entry points.
+
+### CLI mapping
+
+- `batdetect2 detect AUDIO_DIR ANN_DIR DETECTION_THRESHOLD` -> `batdetect2
+ process directory AUDIO_DIR OUTPUT_PATH --detection-threshold
+ DETECTION_THRESHOLD ...`
+
+Main changes:
+
+- outputs can be written in different formats.
+ See the output format reference for the available options.
+- the detection threshold is now an option instead of a required positional
+ argument.
+- options like saving CNN features are now controlled through config rather than
+ command flags.
+- there are separate subcommands for processing a directory, file list, or
+ dataset.
+
+### Python API mapping
+
+- old:
+ `import batdetect2.api as api`
+- current:
+ `from batdetect2 import BatDetect2API`
+
+Typical migration shape:
+
+```python
+from pathlib import Path
+
+from batdetect2 import BatDetect2API
+
+# If no checkpoint is provided, the default UK model is loaded
+api = BatDetect2API.from_checkpoint()
+prediction = api.process_file(Path("path/to/audio.wav"))
+```
+
+Useful replacements:
+
+- `batdetect2.api.process_file` -> current `BatDetect2API.process_file`
+- `batdetect2.api.process_audio` -> current `BatDetect2API.process_audio`
+- `batdetect2.api.process_spectrogram` -> current
+ `BatDetect2API.process_spectrogram`
+- one-off batch loops -> `BatDetect2API.process_files` or CLI `process`
+
+### Model changes
+
+The default checkpoint used by the new CLI `process` commands and by
+`BatDetect2API` is a newer model trained from scratch using the updated training
+code, but the same model architecture, training procedure, and data.
+Performance did not change substantially, but some differences are still
+expected.
+
+### Species names
+
+For the default UK model there are two naming changes:
+
+1. The original model had a typo and instead of `Barbastella barbastellus` it
+ used `Barbastellus barbastellus`.
+ This has now been corrected.
+2. There has been a recent change in name for `Eptesicus serotinus` to
+ `Cnephaeus serotinus`.
+
+## Stay on version 1
+
+If you prefer not to migrate to version 2 yet, you can keep using version 1.
+In that case, it is a good idea to pin your dependency:
+
+```bash
+pip install "batdetect2>=1.3.1,<2"
+```
+
+## Related pages
+
+- Getting started:
+ {doc}`../getting_started`
+- Tutorials:
+ {doc}`../tutorials/index`
+- API reference:
+ {doc}`../reference/api`
diff --git a/docs/source/legacy/python-api.md b/docs/source/legacy/python-api.md
new file mode 100644
index 00000000..9e07f043
--- /dev/null
+++ b/docs/source/legacy/python-api.md
@@ -0,0 +1,55 @@
+# Legacy Python API: `batdetect2.api`
+
+This page documents the previous Python API workflow based on `batdetect2.api`.
+
+```{warning}
+This is documentation for a previous version of batdetect2.
+For new workflows, use `batdetect2.BatDetect2API`.
+If you are migrating, start with {doc}`migration-guide`.
+```
+
+## Using BatDetect2 in Python
+
+If you prefer to process data inside a Python script, you can use the `batdetect2.api` module.
+
+This interface gives you a simple entry point for running the built-in BatDetect2 model and also exposes the default model and default configuration more directly than the current API.
+
+You can process a whole file in one step, or load audio, generate a spectrogram, and work with lower-level functions yourself.
+
+Common functions:
+
+- `process_file` Load an audio file, run the model, and return BatDetect2-style results for that recording.
+- `process_audio` Run inference on an audio array that is already loaded in memory.
+- `process_spectrogram` Run inference starting from a spectrogram tensor instead of raw audio.
+- `load_audio` Load and resample audio using the legacy preprocessing path.
+- `generate_spectrogram` Convert audio into the spectrogram representation expected by the model.
+- `postprocess` Convert raw model outputs into detections and extracted features.
+
+Typical usage:
+
+```python
+import batdetect2.api as api
+
+AUDIO_FILE = "example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav"
+
+# Process a whole file
+results = api.process_file(AUDIO_FILE)
+annotations = results["pred_dict"]["annotation"]
+
+# Or, load audio and compute spectrograms
+audio = api.load_audio(AUDIO_FILE)
+spec = api.generate_spectrogram(audio)
+
+# And process the audio or the spectrogram with the model
+detections, features, spec = api.process_audio(audio)
+detections, features = api.process_spectrogram(spec)
+
+# Integrate the detections or extracted features into your own analysis
+```
+
+This interface is most useful when you want to work directly with detections, features, spectrograms, or intermediate arrays inside your own code.
+
+## Related pages
+
+- Migration guide: {doc}`migration-guide`
+- Current API reference: {doc}`../reference/api`
diff --git a/docs/source/reference/api.md b/docs/source/reference/api.md
new file mode 100644
index 00000000..4b931408
--- /dev/null
+++ b/docs/source/reference/api.md
@@ -0,0 +1,39 @@
+# `BatDetect2API` reference
+
+`BatDetect2API` is the main Python entry point for BatDetect2.
+
+Use it when you want to load a model, run prediction, inspect detections,
+evaluate results, or train from Python.
+
+Defined in `batdetect2.api_v2`.
+
+## Main ways to create it
+
+- `BatDetect2API.from_checkpoint(path, ...)`
+ - load a trained checkpoint, a bundled checkpoint alias, or a Hugging Face
+ checkpoint.
+- `BatDetect2API.from_config(model_config=..., targets_config=..., ...)`
+ - build a full model stack from config objects.
+
+## Common tasks
+
+- Load a checkpoint and run prediction on one file.
+- Run prediction on many files or clips.
+- Save predictions in one of the supported output formats.
+- Evaluate a model on labelled data.
+- Fine-tune an existing checkpoint on new targets.
+
+## Generated reference
+
+```{eval-rst}
+.. autoclass:: batdetect2.api_v2.BatDetect2API
+```
+
+## Related pages
+
+- Python tutorial:
+ {doc}`../tutorials/integrate-with-a-python-pipeline`
+- Outputs config reference:
+ {doc}`outputs-config`
+- Output formats reference:
+ {doc}`output-formats`
diff --git a/docs/source/reference/cli/base.rst b/docs/source/reference/cli/base.rst
new file mode 100644
index 00000000..d270aa3a
--- /dev/null
+++ b/docs/source/reference/cli/base.rst
@@ -0,0 +1,8 @@
+Base command
+============
+
+The options on this page apply to all subcommands.
+
+.. click:: batdetect2.cli:cli
+ :prog: batdetect2
+ :nested: none
diff --git a/docs/source/reference/cli/data.rst b/docs/source/reference/cli/data.rst
new file mode 100644
index 00000000..506f2540
--- /dev/null
+++ b/docs/source/reference/cli/data.rst
@@ -0,0 +1,8 @@
+Data command
+============
+
+Inspect and convert dataset config files.
+
+.. click:: batdetect2.cli.data:data
+ :prog: batdetect2 data
+ :nested: full
diff --git a/docs/source/reference/cli/detect_legacy.rst b/docs/source/reference/cli/detect_legacy.rst
new file mode 100644
index 00000000..2571a1b8
--- /dev/null
+++ b/docs/source/reference/cli/detect_legacy.rst
@@ -0,0 +1,18 @@
+Legacy detect command
+=====================
+
+.. warning::
+
+ ``batdetect2 detect`` is a legacy compatibility command.
+ Prefer ``batdetect2 process directory`` for new workflows.
+
+Migration at a glance
+---------------------
+
+- Legacy: ``batdetect2 detect AUDIO_DIR ANN_DIR DETECTION_THRESHOLD``
+- Current: ``batdetect2 process directory MODEL_PATH AUDIO_DIR OUTPUT_PATH``
+ with optional ``--detection-threshold``
+
+.. click:: batdetect2.cli.compat:detect
+ :prog: batdetect2 detect
+ :nested: none
diff --git a/docs/source/reference/cli/evaluate.rst b/docs/source/reference/cli/evaluate.rst
new file mode 100644
index 00000000..79281dbb
--- /dev/null
+++ b/docs/source/reference/cli/evaluate.rst
@@ -0,0 +1,11 @@
+Evaluate command
+================
+
+Use ``batdetect2 evaluate`` to compare a checkpoint against labelled test data.
+
+This command writes metrics and any configured artifacts to the output
+directory.
+
+.. click:: batdetect2.cli.evaluate:evaluate_command
+ :prog: batdetect2 evaluate
+ :nested: none
diff --git a/docs/source/reference/cli/finetune.rst b/docs/source/reference/cli/finetune.rst
new file mode 100644
index 00000000..637074c5
--- /dev/null
+++ b/docs/source/reference/cli/finetune.rst
@@ -0,0 +1,11 @@
+Finetune command
+================
+
+Use ``batdetect2 finetune`` to adapt an existing checkpoint to a new target
+definition.
+
+If you do not pass ``--model``, the bundled ``uk_same`` checkpoint is used.
+
+.. click:: batdetect2.cli.finetune:finetune_command
+ :prog: batdetect2 finetune
+ :nested: none
diff --git a/docs/source/reference/cli/index.md b/docs/source/reference/cli/index.md
new file mode 100644
index 00000000..5d25211e
--- /dev/null
+++ b/docs/source/reference/cli/index.md
@@ -0,0 +1,50 @@
+# CLI reference
+
+Use this section to find the right command quickly, then open the command page
+for the full option list.
+
+## Command map
+
+| Command | Use it for | Required positional args |
+| --- | --- | --- |
+| `batdetect2 process` | Run inference on audio | Depends on subcommand (`directory`, `file_list`, `dataset`) |
+| `batdetect2 data` | Inspect and convert dataset configs | Depends on subcommand (`summary`, `convert`) |
+| `batdetect2 train` | Train or fine-tune models | `TRAIN_DATASET` |
+| `batdetect2 finetune` | Fine-tune a checkpoint on new targets | `TRAIN_DATASET` plus `--targets` |
+| `batdetect2 evaluate` | Evaluate a checkpoint on a test dataset | `TEST_DATASET` |
+| `batdetect2 detect` | Legacy compatibility workflow | `AUDIO_DIR`, `ANN_DIR`, `DETECTION_THRESHOLD` |
+
+## Notes
+
+- Global CLI options are documented in {doc}`base`.
+- Paths with spaces should be wrapped in quotes.
+- Input audio is expected to be mono.
+- `process` uses the optional `--detection-threshold` override.
+- `evaluate` takes `TEST_DATASET` as a positional argument and uses `--model`
+ for the checkpoint override.
+- `finetune` defaults to the bundled `uk_same` checkpoint if `--model` is not
+ provided.
+
+```{warning}
+`batdetect2 detect` is a legacy command.
+Prefer `batdetect2 process directory` for new workflows.
+```
+
+## Related pages
+
+- {doc}`../../tutorials/run-inference-on-folder`
+- {doc}`../../how_to/run-batch-predictions`
+- {doc}`../../how_to/tune-detection-threshold`
+- {doc}`../configs`
+
+```{toctree}
+:maxdepth: 1
+
+Base command and global options
+Process command group
+Data command group
+Train command
+Finetune command
+Evaluate command
+Legacy detect command
+```
diff --git a/docs/source/reference/cli/predict.rst b/docs/source/reference/cli/predict.rst
new file mode 100644
index 00000000..080ab4a9
--- /dev/null
+++ b/docs/source/reference/cli/predict.rst
@@ -0,0 +1,17 @@
+Process command
+===============
+
+Use ``batdetect2 process`` to run inference on audio.
+
+Choose a subcommand based on how you want to provide the input:
+
+- ``directory`` for all supported audio files in one folder
+- ``file_list`` for a text file with one audio path per line
+- ``dataset`` for recordings referenced by a dataset file
+
+Use ``--detection-threshold`` when you want to override the configured
+threshold for one run.
+
+.. click:: batdetect2.cli.inference:process
+ :prog: batdetect2 process
+ :nested: full
diff --git a/docs/source/reference/cli/train.rst b/docs/source/reference/cli/train.rst
new file mode 100644
index 00000000..27661ec5
--- /dev/null
+++ b/docs/source/reference/cli/train.rst
@@ -0,0 +1,12 @@
+Train command
+=============
+
+Use ``batdetect2 train`` to start from a fresh model config or continue from an
+existing checkpoint.
+
+If you want to adapt an existing checkpoint to a new target definition, use
+``batdetect2 finetune`` instead.
+
+.. click:: batdetect2.cli.train:train_command
+ :prog: batdetect2 train
+ :nested: none
diff --git a/docs/source/reference/configs.rst b/docs/source/reference/configs.rst
new file mode 100644
index 00000000..f4c93125
--- /dev/null
+++ b/docs/source/reference/configs.rst
@@ -0,0 +1,18 @@
+Config reference
+================
+
+BatDetect2 uses separate config objects for different workflow surfaces.
+
+Use the dedicated reference pages for each config family:
+
+- model config
+- training config
+- logging config
+- inference config
+- evaluation config
+- outputs config
+- preprocessing config
+- postprocess config
+- targets config workflow
+
+Example config files live under `example_data/configs/`.
diff --git a/docs/source/reference/data-sources.md b/docs/source/reference/data-sources.md
new file mode 100644
index 00000000..f74213c6
--- /dev/null
+++ b/docs/source/reference/data-sources.md
@@ -0,0 +1,76 @@
+# Data source reference
+
+This page summarizes dataset source formats and their config fields.
+
+## Supported source formats
+
+| Format | Description |
+| --- | --- |
+| `aoef` | AOEF/soundevent annotation files (`AnnotationSet` or `AnnotationProject`) |
+| `batdetect2` | Legacy format with one JSON annotation file per recording |
+| `batdetect2_file` | Legacy format with one merged JSON annotation file |
+
+## AOEF (`format: aoef`)
+
+Required fields:
+
+- `name`
+- `format`
+- `audio_dir`
+- `annotations_path`
+
+Optional fields:
+
+- `description`
+- `filter`
+
+`filter` is only used when `annotations_path` points to an
+`AnnotationProject`.
+
+AOEF filter options:
+
+- `only_completed` (default: `true`)
+- `only_verified` (default: `false`)
+- `exclude_issues` (default: `true`)
+
+Use `filter: null` to disable project filtering.
+
+## Legacy per-file (`format: batdetect2`)
+
+Required fields:
+
+- `name`
+- `format`
+- `audio_dir`
+- `annotations_dir`
+
+Optional fields:
+
+- `description`
+- `filter`
+
+## Legacy merged file (`format: batdetect2_file`)
+
+Required fields:
+
+- `name`
+- `format`
+- `audio_dir`
+- `annotations_path`
+
+Optional fields:
+
+- `description`
+- `filter`
+
+Legacy filter options:
+
+- `only_annotated` (default: `true`)
+- `exclude_issues` (default: `true`)
+
+Use `filter: null` to disable filtering.
+
+## Related guides
+
+- {doc}`../how_to/configure-aoef-dataset`
+- {doc}`../how_to/import-legacy-batdetect2-annotations`
diff --git a/docs/source/reference/detections.md b/docs/source/reference/detections.md
new file mode 100644
index 00000000..badcec23
--- /dev/null
+++ b/docs/source/reference/detections.md
@@ -0,0 +1,42 @@
+# Detections reference
+
+These are the main prediction objects returned by BatDetect2 inference methods.
+
+Defined in `batdetect2.postprocess.types`.
+
+## `ClipDetections`
+
+`ClipDetections` represents the predictions for one clip or one full recording.
+
+Fields:
+
+- `clip`
+ - the `soundevent` clip metadata for the processed audio.
+- `detections`
+ - list of `Detection` objects for that clip.
+
+## `Detection`
+
+`Detection` represents one detected event.
+
+Fields:
+
+- `geometry`
+ - time-frequency geometry for the detected event.
+- `detection_score`
+ - confidence that there is an event at this location.
+- `class_scores`
+ - class ranking scores for the detected event.
+- `features`
+ - per-detection feature vector from the model.
+
+## Related pages
+
+- Python tutorial:
+ {doc}`../tutorials/integrate-with-a-python-pipeline`
+- API reference:
+ {doc}`api`
+- What BatDetect2 predicts:
+ {doc}`../explanation/what-batdetect2-predicts`
+- Features and embeddings:
+ {doc}`../explanation/extracted-features-and-embeddings`
diff --git a/docs/source/reference/evaluation-config.md b/docs/source/reference/evaluation-config.md
new file mode 100644
index 00000000..a79afeda
--- /dev/null
+++ b/docs/source/reference/evaluation-config.md
@@ -0,0 +1,46 @@
+# Evaluation config reference
+
+`EvaluationConfig` defines which evaluation tasks run and which plots they generate.
+
+Defined in `batdetect2.evaluate.config`.
+
+## Top-level fields
+
+- `tasks`
+ - list of task configs.
+
+## Built-in task families
+
+Current built-in tasks include:
+
+- `sound_event_detection`
+- `sound_event_classification`
+- `top_class_detection`
+- `clip_detection`
+- `clip_classification`
+
+## Shared task controls
+
+Common task-level controls include:
+
+- `prefix`
+- `ignore_start_end`
+
+Sound-event-style tasks also support:
+
+- `affinity`
+- `affinity_threshold`
+- `strict_match`
+
+## Default behavior
+
+The default evaluation config starts with:
+
+- sound event detection,
+- sound event classification.
+
+## Related pages
+
+- Choose and configure evaluation tasks: {doc}`../how_to/choose-and-configure-evaluation-tasks`
+- Evaluation concepts: {doc}`../explanation/evaluation-concepts-and-matching`
+- Evaluate CLI reference: {doc}`cli/evaluate`
diff --git a/docs/source/reference/index.md b/docs/source/reference/index.md
new file mode 100644
index 00000000..f54a8bb0
--- /dev/null
+++ b/docs/source/reference/index.md
@@ -0,0 +1,28 @@
+# Reference documentation
+
+Reference pages are the detailed lookup pages.
+
+Use this section when you need exact command options, setting names, output
+details, or Python API entries.
+
+```{toctree}
+:maxdepth: 1
+
+cli/index
+api
+detections
+model-config
+training-config
+logging-config
+inference-config
+evaluation-config
+outputs-config
+output-formats
+output-transforms
+data-sources
+preprocessing-config
+postprocess-config
+targets-config-workflow
+configs
+targets
+```
diff --git a/docs/source/reference/inference-config.md b/docs/source/reference/inference-config.md
new file mode 100644
index 00000000..1aeebbcd
--- /dev/null
+++ b/docs/source/reference/inference-config.md
@@ -0,0 +1,41 @@
+# Inference config reference
+
+`InferenceConfig` controls how files are clipped and batched during prediction-time workflows.
+
+Defined in `batdetect2.inference.config`.
+
+## Top-level fields
+
+- `loader`
+ - data-loader settings for inference.
+- `clipping`
+ - controls how recordings are split into clips before batching.
+
+## `loader`
+
+Current built-in loader field:
+
+- `batch_size` (int, default `8`)
+
+## `clipping`
+
+Fields:
+
+- `enabled` (bool)
+- `duration` (float, seconds)
+- `overlap` (float, seconds)
+- `max_empty` (float)
+- `discard_empty` (bool)
+
+## When to override this config
+
+Override `InferenceConfig` when:
+
+- long recordings need different clipping behavior,
+- you want to tune batch size for your hardware,
+- you need reproducible prediction settings across runs.
+
+## Related pages
+
+- Tune inference clipping: {doc}`../how_to/tune-inference-clipping`
+- Predict CLI reference: {doc}`cli/predict`
diff --git a/docs/source/reference/logging-config.md b/docs/source/reference/logging-config.md
new file mode 100644
index 00000000..33b36dc8
--- /dev/null
+++ b/docs/source/reference/logging-config.md
@@ -0,0 +1,46 @@
+# Logging config reference
+
+`AppLoggingConfig` controls which logger backend BatDetect2 uses for training,
+evaluation, and inference.
+
+Defined in `batdetect2.logging`.
+
+## Top-level fields
+
+- `train`
+ - logger config for training runs.
+- `evaluation`
+ - logger config for evaluation runs.
+- `inference`
+ - logger config for inference runs.
+
+## Built-in logger backends
+
+Current built-in logger backends are:
+
+- `csv`
+- `tensorboard`
+- `mlflow`
+- `dvclive`
+
+## Default behaviour
+
+By default:
+
+- training uses `csv`,
+- evaluation uses `csv`,
+- inference uses `csv`.
+
+With the CSV logger, training writes a `metrics.csv` file in the log folder.
+
+Example files live under `example_data/configs/`, including
+`example_data/configs/logging.yaml`.
+
+## Related pages
+
+- Train command reference:
+ {doc}`cli/train`
+- Evaluate command reference:
+ {doc}`cli/evaluate`
+- Run inference on a folder:
+ {doc}`../tutorials/run-inference-on-folder`
diff --git a/docs/source/reference/model-config.md b/docs/source/reference/model-config.md
new file mode 100644
index 00000000..1fce1bb2
--- /dev/null
+++ b/docs/source/reference/model-config.md
@@ -0,0 +1,37 @@
+# Model config reference
+
+`ModelConfig` defines the model stack used for training or fresh model
+construction.
+
+Defined in `batdetect2.models`.
+
+## Top-level fields
+
+- `samplerate`
+ - expected input sample rate.
+- `architecture`
+ - backbone network settings.
+- `preprocess`
+ - spectrogram preprocessing settings.
+- `postprocess`
+ - decoding and output filtering settings.
+
+## What this config controls
+
+Use `ModelConfig` when you want to change things like:
+
+- the backbone architecture,
+- the spectrogram settings used by the model,
+- postprocessing settings stored with the model.
+
+Example files live under `example_data/configs/`, including
+`example_data/configs/model.yaml`.
+
+## Related pages
+
+- Preprocessing config:
+ {doc}`preprocessing-config`
+- Postprocess config:
+ {doc}`postprocess-config`
+- Train command reference:
+ {doc}`cli/train`
diff --git a/docs/source/reference/output-formats.md b/docs/source/reference/output-formats.md
new file mode 100644
index 00000000..cb92b1ec
--- /dev/null
+++ b/docs/source/reference/output-formats.md
@@ -0,0 +1,75 @@
+# Output formats reference
+
+BatDetect2 currently supports several built-in output formatters.
+
+## `raw`
+
+Defined by `RawOutputConfig`.
+
+Best for rich structured outputs and round-tripping.
+
+Key fields:
+
+- `include_class_scores`
+- `include_features`
+- `include_geometry`
+
+Writes one NetCDF `.nc` file per clip.
+
+## `parquet`
+
+Defined by `ParquetOutputConfig`.
+
+Best for tabular analysis workflows.
+
+Key fields:
+
+- `include_class_scores`
+- `include_features`
+- `include_geometry`
+
+Writes a parquet table, typically `predictions.parquet`.
+
+## `soundevent`
+
+Defined by `SoundEventOutputConfig`.
+
+Best when you want a `PredictionSet` JSON workflow.
+
+Key fields:
+
+- `top_k`
+- `min_score`
+
+Writes a prediction-set JSON file.
+
+## `batdetect2`
+
+Defined by `BatDetect2OutputConfig`.
+
+This is the legacy-compatible BatDetect2 formatter.
+
+Key fields:
+
+- `event_name`
+- `annotation_note`
+- `write_detection_csv`
+- `write_cnn_features_csv`
+- `save_if_empty`
+- `preserve_audio_tree`
+- `include_file_path`
+
+By default it writes one `.json` file and one detection `.csv` file per
+recording, preserving the input audio directory layout under the output root.
+
+It can also write legacy `_cnn_features.csv` sidecars when
+`write_cnn_features_csv` is enabled.
+
+## Related pages
+
+- Outputs config:
+ {doc}`outputs-config`
+- Save predictions in different output formats:
+ {doc}`../how_to/save-predictions-in-different-output-formats`
+- Understanding formatted outputs:
+ {doc}`../explanation/interpreting-formatted-outputs`
diff --git a/docs/source/reference/output-transforms.md b/docs/source/reference/output-transforms.md
new file mode 100644
index 00000000..b1320656
--- /dev/null
+++ b/docs/source/reference/output-transforms.md
@@ -0,0 +1,37 @@
+# Output transforms reference
+
+Output transforms operate after decoding and before formatting.
+
+Defined in `batdetect2.outputs.transforms`.
+
+## Top-level config
+
+`OutputTransformConfig` contains:
+
+- `detection_transforms`
+- `clip_transforms`
+
+## Detection transforms
+
+Detection transforms operate on one detection at a time.
+
+Built-in examples include:
+
+- filtering by frequency,
+- filtering by duration.
+
+These can remove detections entirely if they fail the transform.
+
+## Clip transforms
+
+Clip transforms operate on the list of detections for one clip.
+
+Built-in examples include:
+
+- removing detections above Nyquist,
+- removing detections at clip edges.
+
+## Related pages
+
+- Outputs config: {doc}`outputs-config`
+- Understanding outputs: {doc}`../explanation/interpreting-formatted-outputs`
diff --git a/docs/source/reference/outputs-config.md b/docs/source/reference/outputs-config.md
new file mode 100644
index 00000000..6726d2c5
--- /dev/null
+++ b/docs/source/reference/outputs-config.md
@@ -0,0 +1,41 @@
+# Outputs config reference
+
+`OutputsConfig` controls two layers of prediction handling:
+
+- how detections are transformed before formatting,
+- how formatted outputs are written to disk.
+
+Defined in `batdetect2.outputs.config`.
+
+## Fields
+
+- `format`
+ - output format config.
+- `transform`
+ - output transform config.
+
+## Mental model
+
+The output workflow is:
+
+1. model outputs are decoded into detections,
+2. optional output transforms filter or adjust those detections,
+3. a formatter serializes them to disk.
+
+## Default behavior
+
+By default, the current stack uses the raw output formatter unless you override
+it.
+
+For CLI processing commands, omitting `--format` now leaves format selection to
+the loaded outputs config.
+If no outputs config is provided, the CLI still uses its command defaults.
+
+## Related pages
+
+- Output formats:
+ {doc}`output-formats`
+- Output transforms:
+ {doc}`output-transforms`
+- Save predictions in different output formats:
+ {doc}`../how_to/save-predictions-in-different-output-formats`
diff --git a/docs/source/reference/postprocess-config.md b/docs/source/reference/postprocess-config.md
new file mode 100644
index 00000000..fbff4982
--- /dev/null
+++ b/docs/source/reference/postprocess-config.md
@@ -0,0 +1,31 @@
+# Postprocess config reference
+
+`PostprocessConfig` controls how raw detector outputs are converted into final
+detections.
+
+Defined in `batdetect2.postprocess.config`.
+
+## Fields
+
+- `nms_kernel_size` (int > 0)
+ - neighborhood size for non-maximum suppression.
+- `detection_threshold` (float >= 0)
+ - minimum detection score to keep a candidate event.
+- `classification_threshold` (float >= 0)
+ - minimum class score used when assigning class tags.
+- `top_k_per_sec` (int > 0)
+ - maximum detection density per second.
+
+## Defaults
+
+- `detection_threshold`: `0.01`
+- `classification_threshold`: `0.1`
+- `top_k_per_sec`: `100`
+
+`nms_kernel_size` defaults to the library constant used by the NMS module.
+
+## Related pages
+
+- Threshold behaviour: {doc}`../explanation/postprocessing-and-thresholds`
+- Threshold tuning workflow: {doc}`../how_to/tune-detection-threshold`
+- CLI predict options: {doc}`cli/predict`
diff --git a/docs/source/reference/preprocessing-config.md b/docs/source/reference/preprocessing-config.md
new file mode 100644
index 00000000..599cfd34
--- /dev/null
+++ b/docs/source/reference/preprocessing-config.md
@@ -0,0 +1,61 @@
+# Preprocessing config reference
+
+This page summarizes preprocessing-related config objects used by batdetect2.
+
+## Audio loader config (`AudioConfig`)
+
+Defined in `batdetect2.audio.loader`.
+
+Fields:
+
+- `samplerate` (int): target audio sample rate in Hz.
+- `resample.enabled` (bool): whether to resample loaded audio.
+- `resample.method` (`poly` or `fourier`): resampling method.
+
+## Model preprocessing config (`PreprocessingConfig`)
+
+Defined in `batdetect2.preprocess.config`.
+
+Top-level fields:
+
+- `audio_transforms`: ordered waveform transforms.
+- `stft`: STFT parameters.
+- `frequencies`: spectrogram frequency range.
+- `spectrogram_transforms`: ordered spectrogram transforms.
+- `size`: final resize settings.
+
+### `audio_transforms` built-ins
+
+- `center_audio`
+- `scale_audio`
+- `fix_duration` (`duration` in seconds)
+
+### `stft` fields
+
+- `window_duration`
+- `window_overlap`
+- `window_fn`
+
+### `frequencies` fields
+
+- `min_freq`
+- `max_freq`
+
+### `spectrogram_transforms` built-ins
+
+- `pcen`
+- `scale_amplitude` (`scale: db|power`)
+- `spectral_mean_subtraction`
+- `peak_normalize`
+
+### `size` fields
+
+- `height`
+- `resize_factor`
+
+## Related pages
+
+- Audio preprocessing how-to: {doc}`../how_to/configure-audio-preprocessing`
+- Spectrogram preprocessing how-to:
+ {doc}`../how_to/configure-spectrogram-preprocessing`
+- Why consistency matters: {doc}`../explanation/preprocessing-consistency`
diff --git a/docs/source/reference/targets-config-workflow.md b/docs/source/reference/targets-config-workflow.md
new file mode 100644
index 00000000..9715a8ea
--- /dev/null
+++ b/docs/source/reference/targets-config-workflow.md
@@ -0,0 +1,67 @@
+# Targets config workflow reference
+
+This page summarizes the target-definition configuration used by batdetect2.
+
+## `TargetConfig`
+
+Defined in `batdetect2.targets.config`.
+
+Fields:
+
+- `detection_target`: one `TargetClassConfig` defining detection eligibility.
+- `classification_targets`: list of `TargetClassConfig` entries for class
+ encoding/decoding.
+- `roi`: ROI mapping config with `default` mapper and optional per-class
+ `overrides`.
+
+## `TargetClassConfig`
+
+Defined in `batdetect2.targets.classes`.
+
+Fields:
+
+- `name`: class label name.
+- `tags`: tag list used for matching (shortcut for `match_if`).
+- `match_if`: explicit condition config (`match_if` is accepted as alias).
+- `assign_tags`: tags used when decoding this class.
+
+`tags` and `match_if` are mutually exclusive.
+
+## Supported condition config types
+
+Built from `batdetect2.data.conditions`.
+
+- `has_tag`
+- `has_all_tags`
+- `has_any_tag`
+- `duration`
+- `frequency`
+- `all_of`
+- `any_of`
+- `not`
+
+## ROI mapper config
+
+`roi.default` and each `roi.overrides.` entry support built-in
+mappers including:
+
+- `anchor_bbox`
+- `peak_energy_bbox`
+
+Key `anchor_bbox` fields:
+
+- `anchor`
+- `time_scale`
+- `frequency_scale`
+
+Top-level ROI mapping shape:
+
+- `default`: fallback mapper used for all classes.
+- `overrides`: optional mapping from class name to mapper config.
+
+## Related pages
+
+- Detection target setup: {doc}`../how_to/configure-target-definitions`
+- Class setup: {doc}`../how_to/define-target-classes`
+- ROI setup: {doc}`../how_to/configure-roi-mapping`
+- Concept overview: {doc}`../explanation/target-encoding-and-decoding`
diff --git a/docs/source/reference/targets.rst b/docs/source/reference/targets.rst
new file mode 100644
index 00000000..eeb1baec
--- /dev/null
+++ b/docs/source/reference/targets.rst
@@ -0,0 +1,5 @@
+Targets reference
+=================
+
+.. automodule:: batdetect2.targets
+ :members:
diff --git a/docs/source/reference/training-config.md b/docs/source/reference/training-config.md
new file mode 100644
index 00000000..86264463
--- /dev/null
+++ b/docs/source/reference/training-config.md
@@ -0,0 +1,50 @@
+# Training config reference
+
+`TrainingConfig` controls the training loop, optimisation, data loading, losses,
+and validation tasks.
+
+Defined in `batdetect2.train.config`.
+
+## Top-level fields
+
+- `train_loader`
+ - training data loading and clipping settings.
+- `val_loader`
+ - validation data loading and clipping settings.
+- `optimizer`
+ - optimiser type and learning rate settings.
+- `scheduler`
+ - learning-rate schedule settings.
+- `loss`
+ - detection, classification, and size loss settings.
+- `trainer`
+ - PyTorch Lightning trainer settings such as `max_epochs`.
+- `labels`
+ - target label generation settings.
+- `validation`
+ - evaluation tasks used during validation.
+- `checkpoints`
+ - checkpoint saving settings.
+
+## What this config controls
+
+Use `TrainingConfig` when you want to change things like:
+
+- batch size,
+- augmentation,
+- optimiser and scheduler settings,
+- number of epochs,
+- validation frequency,
+- checkpoint behaviour.
+
+Example files live under `example_data/configs/`, including
+`example_data/configs/training.yaml`.
+
+## Related pages
+
+- Evaluation config:
+ {doc}`evaluation-config`
+- Train command reference:
+ {doc}`cli/train`
+- Fine-tune from a checkpoint:
+ {doc}`../how_to/fine-tune-from-a-checkpoint`
diff --git a/docs/source/tutorials/evaluate-on-a-test-set.md b/docs/source/tutorials/evaluate-on-a-test-set.md
new file mode 100644
index 00000000..d1d512ed
--- /dev/null
+++ b/docs/source/tutorials/evaluate-on-a-test-set.md
@@ -0,0 +1,138 @@
+# Evaluate on a test set
+
+This tutorial shows how to evaluate a trained checkpoint on a held-out dataset
+and inspect the output metrics.
+
+Use it when you want to measure how a model performs on labelled data that was
+kept aside for testing.
+
+## Before you start
+
+You need:
+
+- a test dataset config,
+- a trained checkpoint or model alias.
+
+```{note}
+This page is for model evaluation.
+If you only want to run BatDetect2 on recordings, start with
+{doc}`run-inference-on-folder` instead.
+```
+
+## What you will do
+
+By the end of this tutorial you will have:
+
+- prepared a test dataset config,
+- run `batdetect2 evaluate`,
+- written evaluation metrics and result files,
+- identified the next pages for model choice and evaluation configuration.
+
+## 1. Create a test dataset config
+
+Evaluation needs a dataset config that points to the labelled data you want to
+use for testing.
+
+This is the same kind of dataset config used for training.
+It explicitly declares which data sources BatDetect2 should read, including the
+audio files and their annotations.
+
+For an example, see `example_data/dataset.yaml`.
+
+If you need help creating the dataset config, follow the dataset section in
+{doc}`train-a-custom-model`.
+For more detail on dataset source formats, see {doc}`../reference/data-sources`.
+
+Use a dataset that was not used for training or tuning.
+
+## 2. Run evaluation
+
+For a simple run, use:
+
+```bash
+batdetect2 evaluate \
+ path/to/test_dataset.yaml
+```
+
+If you do not pass `--model`, BatDetect2 uses the built-in default UK model.
+If you want to choose a different checkpoint, alias, or Hugging Face model, see
+{doc}`../how_to/choose-a-model`.
+
+If you want to save the results somewhere else, add `--output-dir`:
+
+```bash
+batdetect2 evaluate \
+ path/to/test_dataset.yaml \
+ --model path/to/model.ckpt \
+ --output-dir path/to/eval_outputs
+```
+
+This command loads the model, runs prediction on the test dataset, applies the
+evaluation tasks, and writes the results to the output directory.
+
+## 3. Check the output files
+
+By default, the CLI writes evaluation outputs to `outputs/evaluation`.
+
+With the default evaluation config, a run will usually create a folder like
+this:
+
+```text
+outputs/evaluation/
+ version_0/
+ metrics.csv
+ hparams.yaml
+```
+
+The most important file is `metrics.csv`.
+It contains the metric values computed for the evaluation run.
+
+A file like this might start like:
+
+```csv
+classification/average_precision/barbar,classification/average_precision/cneser,...,detection/average_precision
+0.898695170879364,0.9408193826675415,...,0.851219117641449
+```
+
+The exact columns depend on the evaluation tasks you run.
+
+The `hparams.yaml` file records the config used for the evaluation run.
+
+## 4. Expect extra plots and files when configs enable them
+
+You may also see extra outputs such as plots and saved predictions.
+
+For example, if you run evaluation with `example_data/configs/evaluation.yaml`,
+you should expect a richer output folder with:
+
+- `metrics.csv`
+- `hparams.yaml`
+- a `plots/` directory
+- a `predictions/` directory
+
+That config enables more evaluation tasks and plots than the default setup.
+
+So, depending on your evaluation config, you may see files such as:
+
+- precision-recall plots,
+- ROC curves,
+- confusion matrices,
+- example detection plots,
+- saved prediction files.
+
+If you want to control which tasks run and which plots are generated, see
+{doc}`../reference/evaluation-config` and
+{doc}`../how_to/choose-and-configure-evaluation-tasks`.
+
+## Common next steps
+
+- Choose a different model:
+ {doc}`../how_to/choose-a-model`
+- Configure evaluation tasks:
+ {doc}`../how_to/choose-and-configure-evaluation-tasks`
+- Interpret evaluation artifacts:
+ {doc}`../how_to/interpret-evaluation-outputs`
+- Learn the evaluation concepts:
+ {doc}`../explanation/evaluation-concepts-and-matching`
+- Check full evaluate options:
+ {doc}`../reference/cli/evaluate`
diff --git a/docs/source/tutorials/index.md b/docs/source/tutorials/index.md
new file mode 100644
index 00000000..6ad6582b
--- /dev/null
+++ b/docs/source/tutorials/index.md
@@ -0,0 +1,20 @@
+# Tutorials
+
+Welcome to the `batdetect2` tutorials.
+
+These tutorials walk you step by step through the most common use cases and
+workflows.
+They follow the simplest route and are a good place to start with `batdetect2`.
+
+Use {doc}`../how_to/index` for focused guides on specific tasks, or
+{doc}`../explanation/index` if you want to understand the concepts in more
+depth.
+
+```{toctree}
+:maxdepth: 1
+
+run-inference-on-folder
+train-a-custom-model
+evaluate-on-a-test-set
+integrate-with-a-python-pipeline
+```
diff --git a/docs/source/tutorials/integrate-with-a-python-pipeline.md b/docs/source/tutorials/integrate-with-a-python-pipeline.md
new file mode 100644
index 00000000..d31bb04b
--- /dev/null
+++ b/docs/source/tutorials/integrate-with-a-python-pipeline.md
@@ -0,0 +1,160 @@
+# Integrate with a Python pipeline
+
+This tutorial shows a simple Python workflow for loading audio, running BatDetect2, and inspecting the detections.
+
+Use it when you want to work directly in Python rather than through the CLI.
+
+If you mainly want to run the model on recordings, start with {doc}`run-inference-on-folder` instead.
+
+## Before you start
+
+You need:
+
+- BatDetect2 installed in your Python environment,
+- at least one input audio file.
+
+## What you will do
+
+By the end of this tutorial you will have:
+
+- created a `BatDetect2API` object,
+- run inference on one file,
+- inspected detections, scores, and features,
+- used lower-level audio and spectrogram methods for more control,
+- identified the next API workflows for batch processing, training, fine-tuning, and evaluation.
+
+## 1. Create the API instance
+
+For a first run, use the built-in default UK model:
+
+```python
+from batdetect2 import BatDetect2API
+
+# If you don't specify a checkpoint the default model will be loaded
+api = BatDetect2API.from_checkpoint()
+```
+
+If you want to use a different checkpoint later, see {doc}`../how_to/choose-a-model`.
+
+## 2. Run inference on one file
+
+`process_file` is the simplest Python entry point when you want one prediction object per recording.
+
+```python
+from batdetect2 import BatDetect2API
+
+api = BatDetect2API.from_checkpoint()
+prediction = api.process_file("path/to/audio.wav")
+
+for detection in prediction.detections:
+ top_class = api.get_top_class_name(detection)
+ score = detection.detection_score
+ print(top_class, score)
+```
+
+## 3. Understand the prediction objects
+
+`prediction` is a `ClipDetections` object.
+See {doc}`../reference/detections` for the full reference.
+
+Very briefly, `ClipDetections` represents all detections for one processed clip or recording.
+It includes:
+
+- the clip metadata,
+- the list of detections for that clip.
+
+Each item in `prediction.detections` is a `Detection` object.
+
+Each `Detection` includes:
+
+- the time-frequency geometry of the event,
+- a detection score,
+- the class scores,
+- a feature vector.
+
+## 4. Inspect detection score and class scores
+
+The detection score and the class scores answer different questions.
+
+- `detection_score` is about whether the model thinks there is a call at that time-frequency location.
+- `class_scores` are about which class the model prefers for that detected event.
+
+So a detection can have a fairly strong detection score, but still have a more uncertain class ranking.
+
+```python
+for detection in prediction.detections:
+ print("top class:", api.get_top_class_name(detection))
+ print("detection score:", detection.detection_score)
+ print("class scores:")
+ for class_name, score in api.get_class_scores(detection):
+ print(f" {class_name}: {score:.3f}")
+```
+
+If you want more detail on class-score inspection, see {doc}`../how_to/inspect-class-scores-in-python`.
+
+## 5. Inspect the detection features
+
+Each detection also carries a `features` vector.
+
+These are internal model features attached to the detection.
+They can be useful for things like:
+
+- exploratory visualisation,
+- clustering similar detections,
+- comparing detections across files,
+- building downstream analysis pipelines.
+
+They are useful descriptors, but they are not direct ecological labels by themselves.
+
+For more detail, see {doc}`../how_to/inspect-detection-features-in-python` and {doc}`../explanation/extracted-features-and-embeddings`.
+
+## 6. Use lower-level audio and spectrogram methods for more control
+
+If you want finer control over what gets processed and when, the API also lets you work step by step.
+
+For example, you can load the audio yourself, inspect the waveform length, generate the spectrogram, and then run detection on that spectrogram:
+
+```python
+from batdetect2 import BatDetect2API
+
+api = BatDetect2API.from_checkpoint()
+
+audio = api.load_audio("path/to/audio.wav")
+print(audio.shape)
+
+spec = api.generate_spectrogram(audio)
+print(spec.shape)
+
+detections = api.process_spectrogram(spec)
+print(len(detections))
+```
+
+This is helpful when you want to:
+
+- inspect the loaded audio before inference,
+- inspect the generated spectrogram,
+- control which audio segment is processed,
+- run only part of the pipeline in custom code.
+
+You can also call `process_audio(audio)` directly if you already have the waveform array in memory.
+
+## 7. Use the wider API workflows
+
+The Python API is not only for single-file inference.
+It also exposes methods for batch processing, training, evaluation, and fine-tuning.
+
+Examples:
+
+- `process_files(...)` for batch processing from Python,
+- `train(...)` for training,
+- `evaluate(...)` for evaluation,
+- `finetune(...)` for fine-tuning.
+
+Useful next pages:
+
+- Choose a different model: {doc}`../how_to/choose-a-model`
+- Run batch predictions: {doc}`../how_to/run-batch-predictions`
+- Train a custom model: {doc}`train-a-custom-model`
+- Evaluate on a test set: {doc}`evaluate-on-a-test-set`
+- Fine-tune from a checkpoint: {doc}`../how_to/fine-tune-from-a-checkpoint`
+- API reference: {doc}`../reference/api`
diff --git a/docs/source/tutorials/run-inference-on-folder.md b/docs/source/tutorials/run-inference-on-folder.md
new file mode 100644
index 00000000..c4cf5e1e
--- /dev/null
+++ b/docs/source/tutorials/run-inference-on-folder.md
@@ -0,0 +1,217 @@
+# Run BatDetect2 on a folder of audio files
+
+This tutorial shows how to run BatDetect2 on a folder of recordings from the command line.
+
+Use it when you want a first pass over a folder of audio recordings and want to see what BatDetect2 finds.
+
+If you want to follow the tutorial exactly, you can use the example recordings that come with the repository.
+
+## Before you start
+
+You need:
+
+- BatDetect2 installed.
+- A folder containing supported audio files.
+- A place to save the results.
+
+If you have not installed BatDetect2 yet, start with {doc}`../getting_started`.
+
+## Optional: use the repository example files
+
+If you want to follow the steps with the same paths shown here, clone the repository and move into it:
+
+```bash
+git clone https://github.com/macaodha/batdetect2.git
+cd batdetect2
+```
+
+Then you can use these example paths from the repository root.
+
+## What you will do
+
+By the end of this tutorial you will have:
+
+- run `batdetect2 process directory`,
+- saved predictions to disk,
+- checked that BatDetect2 wrote the files you expected,
+- tried a second run with a higher detection threshold,
+- identified the next pages to use if you want to customise the run.
+
+## 1. Choose your input and output folders
+
+Pick:
+
+- the folder containing your audio files,
+- an output folder where BatDetect2 should save results.
+
+Example layout:
+
+```text
+project/
+ audio/
+ file_001.wav
+ file_002.wav
+ outputs/
+```
+
+If `outputs/` does not exist yet, that is fine.
+BatDetect2 can create it.
+
+If you are using the repository example files, your layout already looks like this:
+
+```text
+batdetect2/
+ example_data/
+ audio/
+ 20170701_213954-MYOMYS-LR_0_0.5.wav
+ 20180530_213516-EPTSER-LR_0_0.5.wav
+ 20180627_215323-RHIFER-LR_0_0.5.wav
+```
+
+## 2. Run BatDetect2 on the folder
+
+For a first run, use the built-in default UK model:
+
+```bash
+batdetect2 process directory \
+ path/to/audio \
+ path/to/outputs
+```
+
+If you are using the repository example files, run:
+
+```bash
+batdetect2 process directory \
+ example_data/audio \
+ example_outputs/first_run
+```
+
+What this does:
+
+- looks for supported audio files in `path/to/audio`,
+- runs the model on each recording,
+- saves the results in `path/to/outputs`.
+
+You do not need to choose a model for this first run.
+If you do nothing, BatDetect2 uses the built-in default UK model.
+
+If you want to use a different model later, see {doc}`../how_to/choose-a-model`.
+
+## 3. Check the output files
+
+After the command finishes, look in your output folder.
+
+By default, the CLI writes predictions in the `batdetect2` output format.
+This is a JSON-based format used for BatDetect2-style outputs.
+
+With the default settings, you will usually see one `.json` file and one `_detections.csv` file per recording.
+
+For the repository example run, that means files like:
+
+```text
+example_outputs/first_run/
+ 20170701_213954-MYOMYS-LR_0_0.5.wav.json
+ 20170701_213954-MYOMYS-LR_0_0.5.wav_detections.csv
+ 20180530_213516-EPTSER-LR_0_0.5.wav.json
+ 20180530_213516-EPTSER-LR_0_0.5.wav_detections.csv
+ 20180627_215323-RHIFER-LR_0_0.5.wav.json
+ 20180627_215323-RHIFER-LR_0_0.5.wav_detections.csv
+```
+
+One of the JSON files will look roughly like this:
+
+```json
+{
+ "annotated": false,
+ "annotation": [
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.889,
+ "det_prob": 0.889,
+ "end_time": 0.0668,
+ "event": "Echolocation",
+ "high_freq": 84857,
+ "individual": "-1",
+ "low_freq": 67578,
+ "start_time": 0.0
+ }
+ ]
+}
+```
+
+Very briefly:
+
+- `annotated: false` means this is a prediction file, not a reviewed annotation file.
+- `annotation` holds the list of detections.
+- Each detection includes a predicted class, detection score, class score, time bounds, and frequency bounds.
+
+For more detail, see {doc}`../explanation/interpreting-formatted-outputs`.
+If you want to save results in another format, see {doc}`../how_to/save-predictions-in-different-output-formats`.
+
+## 4. Run the same folder with a higher threshold
+
+If you want, you can also run the same folder again with a higher detection threshold and save that run in a separate output folder.
+
+```bash
+batdetect2 process directory \
+ path/to/audio \
+ path/to/outputs_threshold_05 \
+ --detection-threshold 0.5
+```
+
+Concrete example:
+
+```bash
+batdetect2 process directory \
+ example_data/audio \
+ example_outputs/threshold_05 \
+ --detection-threshold 0.5
+```
+
+Keeping this in a separate folder makes it easy to compare runs later.
+
+## 5. Run the model on a list of recordings
+
+If you only want to process selected recordings, use `file_list`.
+The list file should contain one recording path per line.
+
+Example `audio_files.txt`:
+
+```text
+path/to/audio/file_001.wav
+path/to/audio/file_002.wav
+path/to/audio/file_010.wav
+```
+
+Repository example:
+
+```text
+example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav
+example_data/audio/20180530_213516-EPTSER-LR_0_0.5.wav
+```
+
+Then run:
+
+```bash
+batdetect2 process file_list \
+ path/to/audio_files.txt \
+ path/to/selected_outputs
+```
+
+Concrete example:
+
+```bash
+batdetect2 process file_list \
+ example_data/audio_files.txt \
+ example_outputs/selected_outputs
+```
+
+This is useful when your recordings are spread across folders, or when you only want to run a chosen subset.
+
+## Common next steps
+
+- If your recordings are not all in one folder, or you want to compare input modes, see {doc}`../how_to/choose-an-inference-input-mode`.
+- If you want to save results in another format, see {doc}`../how_to/save-predictions-in-different-output-formats`.
+- If you want to choose a different model, see {doc}`../how_to/choose-a-model`.
+- If you already write code and want more control from Python, see {doc}`integrate-with-a-python-pipeline`.
+- If you want the full command reference, including `--model`, see {doc}`../reference/cli/predict`.
diff --git a/docs/source/tutorials/train-a-custom-model.md b/docs/source/tutorials/train-a-custom-model.md
new file mode 100644
index 00000000..c1f2a34d
--- /dev/null
+++ b/docs/source/tutorials/train-a-custom-model.md
@@ -0,0 +1,208 @@
+# Train a custom model
+
+This tutorial walks through a first custom training run using your own annotations.
+
+Use it when you already have labelled recordings and want to train a model for your own data.
+
+## Before you start
+
+You need:
+
+- BatDetect2 installed.
+- labelled recordings and annotations.
+
+```{note}
+This is not the first page to start with if you only want to run the existing
+model on recordings.
+Use {doc}`run-inference-on-folder` for that.
+```
+
+## Optional: use the repository example files
+
+If you want to follow the steps with the same files shown here, clone the repository and move into it:
+
+```bash
+git clone https://github.com/macaodha/batdetect2.git
+cd batdetect2
+```
+
+## What you will do
+
+By the end of this tutorial you will have:
+
+- created a dataset config,
+- defined a targets config,
+- started a training run,
+- checked the checkpoint and log outputs,
+- identified the next pages for evaluation and customisation.
+
+## 1. Create a dataset config
+
+The dataset config explicitly declares what data you want to use for training.
+It is a YAML file.
+If YAML is new to you, see [Learn YAML in Y Minutes](https://learnxinyminutes.com/yaml/).
+
+In the dataset config, you list one or more data sources.
+Each source tells `batdetect2` where the audio recordings live and where the matching annotations are stored.
+
+BatDetect2 can read annotations from different source formats.
+In this example, we use the example data in the `batdetect2` format.
+
+Use `example_data/dataset.yaml` as a reference:
+
+```yaml
+name: example dataset
+description: Only for demonstration purposes
+sources:
+ - format: batdetect2
+ name: Example Data
+ description: Examples included for testing batdetect2
+ annotations_dir: example_data/anns
+ audio_dir: example_data/audio
+```
+
+For your own project, the main thing to change is the file paths.
+If you have several collections of recordings, you can add more than one source to the same dataset config.
+That lets you describe the full training data you want to use in one place.
+
+If you need more detail on dataset source formats, see {doc}`../reference/data-sources`.
+
+## 2. Define a targets config
+
+The targets config tells BatDetect2 how to turn your annotations into training targets.
+
+It defines two main things:
+
+- what should count as a detection,
+- which classes the model should learn to predict.
+
+In practice, this means the targets config maps the labels in your annotations to the detection and classification outputs used during training.
+
+Use `example_data/targets.yaml` as a reference:
+
+```yaml
+detection_target:
+ name: bat
+ match_if:
+ name: all_of
+ conditions:
+ - name: has_tag
+ tag: { key: event, value: Echolocation }
+ - name: not
+ condition:
+ name: has_tag
+ tag: { key: class, value: Unknown }
+ assign_tags:
+ - key: class
+ value: Bat
+
+classification_targets:
+ - name: myomys
+ tags:
+ - key: class
+ value: Myotis mystacinus
+ - name: pippip
+ tags:
+ - key: class
+ value: Pipistrellus pipistrellus
+```
+
+For your own project, update the matching rules and class definitions so they fit your labels.
+
+In this example:
+
+- `detection_target` says that echolocation calls should be treated as detections,
+- `classification_targets` define the classes the model should predict,
+
+It is worth taking a bit of time over this file, because your targets config decides what the model is actually being asked to learn.
+
+If you need help with that, see {doc}`../how_to/configure-target-definitions` and {doc}`../reference/targets-config-workflow`.
+
+## 3. Run a first training command
+
+For a first run, keep the command simple:
+
+```bash
+batdetect2 train \
+ path/to/train_dataset.yaml \
+ --val-dataset path/to/val_dataset.yaml \
+ --targets path/to/targets.yaml
+```
+
+If you are using the repository example files, run:
+
+```bash
+batdetect2 train \
+ example_data/dataset.yaml \
+ --val-dataset example_data/dataset.yaml \
+ --targets example_data/targets.yaml
+```
+
+This uses the same dataset for training and validation only to keep the example simple.
+For real training runs, you usually want separate training and validation datasets.
+
+This uses the built-in default model and training settings.
+If you want to change the model architecture later, see {doc}`../reference/model-config`.
+If you want to change optimiser settings, batch size, epochs, or checkpoint behaviour, see {doc}`../reference/training-config`.
+
+## 4. Check the training outputs
+
+After the run starts, `batdetect2` should write checkpoints and logs.
+
+By default, training logs are written with the CSV logger.
+That means you should see a log folder with a `metrics.csv` file.
+
+A typical layout looks like this:
+
+```text
+outputs/
+ checkpoints/
+ epoch=19-step=20.ckpt
+ logs/
+ version_0/
+ metrics.csv
+ hparams.yaml
+ training_artifacts/
+ train_dataset.yaml
+ val_dataset.yaml
+ targets.yaml
+ train_class_summary.csv
+ val_class_summary.csv
+```
+
+The checkpoint is the trained model you can use later for inference, evaluation, or sharing with someone else.
+
+The files in `training_artifacts/` record which datasets and targets were used for the run.
+The `hparams.yaml` file records the full training setup, including the configs used for the model, training, and other parts of the run.
+
+The `metrics.csv` file stores one row per validation epoch.
+It includes training losses as well as validation losses and metrics such as:
+
+```csv
+classification/mean_average_precision,detection/average_precision,epoch,total_loss/val
+0.10041624307632446,0.3697187900543213,0,4070.3515625
+0.11328697204589844,0.346899151802063,1,3941.6455078125
+0.1388484090566635,0.36171725392341614,2,3776.323974609375
+```
+
+You may also see class-specific metrics in extra columns.
+
+The more detailed metrics are computed from the validation set.
+If you do not provide `--val-dataset`, those validation metrics will not appear.
+
+Other logger backends are also supported, including TensorBoard, MLflow, and DVCLive.
+See {doc}`../reference/logging-config` if you want to change that.
+
+## Use the trained model
+
+You can now use the trained checkpoint in BatDetect2, or share it with someone else to use in their own runs.
+If you want to load it for inference or evaluation, see {doc}`../how_to/choose-a-model`.
+
+## Common next steps
+
+- Evaluate the trained checkpoint: {doc}`evaluate-on-a-test-set`
+- Fine-tune from a checkpoint: {doc}`../how_to/fine-tune-from-a-checkpoint`
+- Configure targets in more detail: {doc}`../how_to/configure-target-definitions`
+- Configure audio preprocessing: {doc}`../how_to/configure-audio-preprocessing`
+- Configure spectrogram preprocessing: {doc}`../how_to/configure-spectrogram-preprocessing`
+- Check full train options: {doc}`../reference/cli/train`
diff --git a/environment.yml b/environment.yml
deleted file mode 100644
index a21373e4..00000000
--- a/environment.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-name: batdetect2
-channels:
- - defaults
- - conda-forge
- - pytorch
- - nvidia
-dependencies:
- - python==3.10
- - matplotlib
- - pandas
- - scikit-learn
- - numpy
- - pytorch
- - scipy
- - torchvision
- - librosa
- - torchaudio
diff --git a/example_data/anns/20170701_213954-MYOMYS-LR_0_0.5.wav.json b/example_data/anns/20170701_213954-MYOMYS-LR_0_0.5.wav.json
new file mode 100644
index 00000000..d613ff1b
--- /dev/null
+++ b/example_data/anns/20170701_213954-MYOMYS-LR_0_0.5.wav.json
@@ -0,0 +1,177 @@
+{
+ "annotated": true,
+ "annotation": [
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.55,
+ "det_prob": 0.658,
+ "end_time": 0.028,
+ "event": "Echolocation",
+ "high_freq": 107492,
+ "individual": "-1",
+ "low_freq": 33203,
+ "start_time": 0.0225
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.679,
+ "det_prob": 0.742,
+ "end_time": 0.0583,
+ "event": "Echolocation",
+ "high_freq": 113192,
+ "individual": "-1",
+ "low_freq": 28046,
+ "start_time": 0.0525
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.488,
+ "det_prob": 0.585,
+ "end_time": 0.1211,
+ "event": "Echolocation",
+ "high_freq": 107008,
+ "individual": "-1",
+ "low_freq": 33203,
+ "start_time": 0.1155
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.46,
+ "det_prob": 0.503,
+ "end_time": 0.145,
+ "event": "Echolocation",
+ "high_freq": 59621,
+ "individual": "-1",
+ "low_freq": 48671,
+ "start_time": 0.1385
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.656,
+ "det_prob": 0.704,
+ "end_time": 0.1513,
+ "event": "Echolocation",
+ "high_freq": 113493,
+ "individual": "-1",
+ "low_freq": 27187,
+ "start_time": 0.1445
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.549,
+ "det_prob": 0.63,
+ "end_time": 0.2076,
+ "event": "Echolocation",
+ "high_freq": 108573,
+ "individual": "-1",
+ "low_freq": 34062,
+ "start_time": 0.2025
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.503,
+ "det_prob": 0.528,
+ "end_time": 0.224,
+ "event": "Echolocation",
+ "high_freq": 57361,
+ "individual": "-1",
+ "low_freq": 48671,
+ "start_time": 0.2195
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.672,
+ "det_prob": 0.737,
+ "end_time": 0.2374,
+ "event": "Echolocation",
+ "high_freq": 116415,
+ "individual": "-1",
+ "low_freq": 27187,
+ "start_time": 0.2315
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.65,
+ "det_prob": 0.736,
+ "end_time": 0.3058,
+ "event": "Echolocation",
+ "high_freq": 56624,
+ "individual": "-1",
+ "low_freq": 48671,
+ "start_time": 0.2995
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.687,
+ "det_prob": 0.724,
+ "end_time": 0.3312,
+ "event": "Echolocation",
+ "high_freq": 116522,
+ "individual": "-1",
+ "low_freq": 27187,
+ "start_time": 0.3245
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.547,
+ "det_prob": 0.599,
+ "end_time": 0.3762,
+ "event": "Echolocation",
+ "high_freq": 108530,
+ "individual": "-1",
+ "low_freq": 34062,
+ "start_time": 0.3705
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.664,
+ "det_prob": 0.711,
+ "end_time": 0.4184,
+ "event": "Echolocation",
+ "high_freq": 115775,
+ "individual": "-1",
+ "low_freq": 28906,
+ "start_time": 0.4125
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.544,
+ "det_prob": 0.598,
+ "end_time": 0.4423,
+ "event": "Echolocation",
+ "high_freq": 104197,
+ "individual": "-1",
+ "low_freq": 36640,
+ "start_time": 0.4365
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.73,
+ "det_prob": 0.78,
+ "end_time": 0.4803,
+ "event": "Echolocation",
+ "high_freq": 58290,
+ "individual": "-1",
+ "low_freq": 48671,
+ "start_time": 0.4745
+ },
+ {
+ "class": "Myotis mystacinus",
+ "class_prob": 0.404,
+ "det_prob": 0.449,
+ "end_time": 0.4947,
+ "event": "Echolocation",
+ "high_freq": 111336,
+ "individual": "-1",
+ "low_freq": 36640,
+ "start_time": 0.4895
+ }
+ ],
+ "class_name": "Myotis mystacinus",
+ "duration": 0.5,
+ "id": "20170701_213954-MYOMYS-LR_0_0.5.wav",
+ "issues": false,
+ "notes": "Automatically generated. Example data do not assume correct!",
+ "time_exp": 1
+}
+
diff --git a/example_data/anns/20180530_213516-EPTSER-LR_0_0.5.wav.json b/example_data/anns/20180530_213516-EPTSER-LR_0_0.5.wav.json
new file mode 100644
index 00000000..08a83eff
--- /dev/null
+++ b/example_data/anns/20180530_213516-EPTSER-LR_0_0.5.wav.json
@@ -0,0 +1,231 @@
+{
+ "annotated": true,
+ "annotation": [
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.744,
+ "det_prob": 0.77,
+ "end_time": 0.0162,
+ "event": "Echolocation",
+ "high_freq": 65592,
+ "individual": "-1",
+ "low_freq": 27187,
+ "start_time": 0.0085
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.453,
+ "det_prob": 0.459,
+ "end_time": 0.0255,
+ "event": "Echolocation",
+ "high_freq": 59730,
+ "individual": "-1",
+ "low_freq": 46093,
+ "start_time": 0.0205
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.668,
+ "det_prob": 0.68,
+ "end_time": 0.0499,
+ "event": "Echolocation",
+ "high_freq": 57080,
+ "individual": "-1",
+ "low_freq": 46953,
+ "start_time": 0.0445
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.729,
+ "det_prob": 0.739,
+ "end_time": 0.109,
+ "event": "Echolocation",
+ "high_freq": 62808,
+ "individual": "-1",
+ "low_freq": 44375,
+ "start_time": 0.1025
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.591,
+ "det_prob": 0.602,
+ "end_time": 0.1311,
+ "event": "Echolocation",
+ "high_freq": 56848,
+ "individual": "-1",
+ "low_freq": 46953,
+ "start_time": 0.1255
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.696,
+ "det_prob": 0.735,
+ "end_time": 0.1694,
+ "event": "Echolocation",
+ "high_freq": 67238,
+ "individual": "-1",
+ "low_freq": 28046,
+ "start_time": 0.1625
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.617,
+ "det_prob": 0.643,
+ "end_time": 0.2031,
+ "event": "Echolocation",
+ "high_freq": 57047,
+ "individual": "-1",
+ "low_freq": 46093,
+ "start_time": 0.1975
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.507,
+ "det_prob": 0.515,
+ "end_time": 0.2222,
+ "event": "Echolocation",
+ "high_freq": 58214,
+ "individual": "-1",
+ "low_freq": 47812,
+ "start_time": 0.2175
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.201,
+ "det_prob": 0.372,
+ "end_time": 0.2839,
+ "event": "Echolocation",
+ "high_freq": 55667,
+ "individual": "-1",
+ "low_freq": 33203,
+ "start_time": 0.2775
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.749,
+ "det_prob": 0.78,
+ "end_time": 0.2918,
+ "event": "Echolocation",
+ "high_freq": 60611,
+ "individual": "-1",
+ "low_freq": 45234,
+ "start_time": 0.2855
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.239,
+ "det_prob": 0.325,
+ "end_time": 0.3148,
+ "event": "Echolocation",
+ "high_freq": 54100,
+ "individual": "-1",
+ "low_freq": 30625,
+ "start_time": 0.3085
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.621,
+ "det_prob": 0.652,
+ "end_time": 0.3227,
+ "event": "Echolocation",
+ "high_freq": 63504,
+ "individual": "-1",
+ "low_freq": 27187,
+ "start_time": 0.3155
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.32,
+ "det_prob": 0.414,
+ "end_time": 0.3546,
+ "event": "Echolocation",
+ "high_freq": 37589,
+ "individual": "-1",
+ "low_freq": 27187,
+ "start_time": 0.3455
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.69,
+ "det_prob": 0.697,
+ "end_time": 0.3776,
+ "event": "Echolocation",
+ "high_freq": 57262,
+ "individual": "-1",
+ "low_freq": 46093,
+ "start_time": 0.3735
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.34,
+ "det_prob": 0.415,
+ "end_time": 0.4069,
+ "event": "Echolocation",
+ "high_freq": 52025,
+ "individual": "-1",
+ "low_freq": 31484,
+ "start_time": 0.4005
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.386,
+ "det_prob": 0.445,
+ "end_time": 0.4178,
+ "event": "Echolocation",
+ "high_freq": 53951,
+ "individual": "-1",
+ "low_freq": 27187,
+ "start_time": 0.4115
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.393,
+ "det_prob": 0.517,
+ "end_time": 0.4359,
+ "event": "Echolocation",
+ "high_freq": 51724,
+ "individual": "-1",
+ "low_freq": 30625,
+ "start_time": 0.4305
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.332,
+ "det_prob": 0.396,
+ "end_time": 0.4502,
+ "event": "Echolocation",
+ "high_freq": 58310,
+ "individual": "-1",
+ "low_freq": 27187,
+ "start_time": 0.4435
+ },
+ {
+ "class": "Pipistrellus pipistrellus",
+ "class_prob": 0.45,
+ "det_prob": 0.456,
+ "end_time": 0.4638,
+ "event": "Echolocation",
+ "high_freq": 55714,
+ "individual": "-1",
+ "low_freq": 46093,
+ "start_time": 0.4575
+ },
+ {
+ "class": "Eptesicus serotinus",
+ "class_prob": 0.719,
+ "det_prob": 0.766,
+ "end_time": 0.4824,
+ "event": "Echolocation",
+ "high_freq": 66101,
+ "individual": "-1",
+ "low_freq": 28046,
+ "start_time": 0.4755
+ }
+ ],
+ "class_name": "Pipistrellus pipistrellus",
+ "duration": 0.5,
+ "id": "20180530_213516-EPTSER-LR_0_0.5.wav",
+ "issues": false,
+ "notes": "Automatically generated. Example data do not assume correct!",
+ "time_exp": 1
+}
diff --git a/example_data/anns/20180627_215323-RHIFER-LR_0_0.5.wav.json b/example_data/anns/20180627_215323-RHIFER-LR_0_0.5.wav.json
new file mode 100644
index 00000000..670b2fae
--- /dev/null
+++ b/example_data/anns/20180627_215323-RHIFER-LR_0_0.5.wav.json
@@ -0,0 +1,111 @@
+{
+ "annotated": true,
+ "annotation": [
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.407,
+ "det_prob": 0.407,
+ "end_time": 0.066,
+ "event": "Echolocation",
+ "high_freq": 84254,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.0245
+ },
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.759,
+ "det_prob": 0.76,
+ "end_time": 0.1576,
+ "event": "Echolocation",
+ "high_freq": 84048,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.0955
+ },
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.754,
+ "det_prob": 0.755,
+ "end_time": 0.269,
+ "event": "Echolocation",
+ "high_freq": 83768,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.2095
+ },
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.495,
+ "det_prob": 0.495,
+ "end_time": 0.2869,
+ "event": "Echolocation",
+ "high_freq": 84055,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.2425
+ },
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.73,
+ "det_prob": 0.73,
+ "end_time": 0.3631,
+ "event": "Echolocation",
+ "high_freq": 84280,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.3055
+ },
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.648,
+ "det_prob": 0.649,
+ "end_time": 0.3798,
+ "event": "Echolocation",
+ "high_freq": 83030,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.3215
+ },
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.678,
+ "det_prob": 0.678,
+ "end_time": 0.4611,
+ "event": "Echolocation",
+ "high_freq": 84020,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.4065
+ },
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.717,
+ "det_prob": 0.718,
+ "end_time": 0.4987,
+ "event": "Echolocation",
+ "high_freq": 83603,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.4365
+ },
+ {
+ "class": "Rhinolophus ferrumequinum",
+ "class_prob": 0.662,
+ "det_prob": 0.662,
+ "end_time": 0.5503,
+ "event": "Echolocation",
+ "high_freq": 83710,
+ "individual": "-1",
+ "low_freq": 68437,
+ "start_time": 0.4975
+ }
+ ],
+ "class_name": "Rhinolophus ferrumequinum",
+ "duration": 0.5,
+ "id": "20180627_215323-RHIFER-LR_0_0.5.wav",
+ "issues": false,
+ "notes": "Automatically generated. Example data do not assume correct!",
+ "time_exp": 1
+}
+
diff --git a/example_data/audio_files.txt b/example_data/audio_files.txt
new file mode 100644
index 00000000..c53ad8a8
--- /dev/null
+++ b/example_data/audio_files.txt
@@ -0,0 +1,2 @@
+example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav
+example_data/audio/20180530_213516-EPTSER-LR_0_0.5.wav
diff --git a/example_data/configs/audio.yaml b/example_data/configs/audio.yaml
new file mode 100644
index 00000000..01005728
--- /dev/null
+++ b/example_data/configs/audio.yaml
@@ -0,0 +1,4 @@
+samplerate: 256000
+resample:
+ enabled: true
+ method: poly
diff --git a/example_data/configs/evaluation.yaml b/example_data/configs/evaluation.yaml
new file mode 100644
index 00000000..f2577ae7
--- /dev/null
+++ b/example_data/configs/evaluation.yaml
@@ -0,0 +1,37 @@
+tasks:
+ - name: sound_event_detection
+ metrics:
+ - name: average_precision
+ - name: roc_auc
+ plots:
+ - name: pr_curve
+ - name: score_distribution
+ - name: example_detection
+ - name: sound_event_classification
+ metrics:
+ - name: average_precision
+ - name: roc_auc
+ plots:
+ - name: pr_curve
+ - name: top_class_detection
+ metrics:
+ - name: average_precision
+ plots:
+ - name: pr_curve
+ - name: confusion_matrix
+ - name: example_classification
+ - name: clip_detection
+ metrics:
+ - name: average_precision
+ - name: roc_auc
+ plots:
+ - name: pr_curve
+ - name: roc_curve
+ - name: score_distribution
+ - name: clip_classification
+ metrics:
+ - name: average_precision
+ - name: roc_auc
+ plots:
+ - name: pr_curve
+ - name: roc_curve
diff --git a/example_data/configs/inference.yaml b/example_data/configs/inference.yaml
new file mode 100644
index 00000000..f9a3078c
--- /dev/null
+++ b/example_data/configs/inference.yaml
@@ -0,0 +1,9 @@
+loader:
+ batch_size: 8
+
+clipping:
+ enabled: true
+ duration: 0.5
+ overlap: 0.0
+ max_empty: 0.0
+ discard_empty: true
diff --git a/example_data/configs/logging.yaml b/example_data/configs/logging.yaml
new file mode 100644
index 00000000..bb5f366f
--- /dev/null
+++ b/example_data/configs/logging.yaml
@@ -0,0 +1,2 @@
+train:
+ name: csv
diff --git a/example_data/configs/model.yaml b/example_data/configs/model.yaml
new file mode 100644
index 00000000..b03a525e
--- /dev/null
+++ b/example_data/configs/model.yaml
@@ -0,0 +1,59 @@
+samplerate: 256000
+
+preprocess:
+ stft:
+ window_duration: 0.002
+ window_overlap: 0.75
+ window_fn: hann
+ frequencies:
+ max_freq: 120000
+ min_freq: 10000
+ size:
+ height: 128
+ resize_factor: 0.5
+ spectrogram_transforms:
+ - name: pcen
+ time_constant: 0.1
+ gain: 0.98
+ bias: 2
+ power: 0.5
+ - name: spectral_mean_subtraction
+
+architecture:
+ name: UNetBackbone
+ input_height: 128
+ in_channels: 1
+ encoder:
+ layers:
+ - name: FreqCoordConvDown
+ out_channels: 32
+ - name: FreqCoordConvDown
+ out_channels: 64
+ - name: LayerGroup
+ layers:
+ - name: FreqCoordConvDown
+ out_channels: 128
+ - name: ConvBlock
+ out_channels: 256
+ bottleneck:
+ channels: 256
+ layers:
+ - name: SelfAttention
+ attention_channels: 256
+ decoder:
+ layers:
+ - name: FreqCoordConvUp
+ out_channels: 64
+ - name: FreqCoordConvUp
+ out_channels: 32
+ - name: LayerGroup
+ layers:
+ - name: FreqCoordConvUp
+ out_channels: 32
+ - name: ConvBlock
+ out_channels: 32
+
+postprocess:
+ nms_kernel_size: 9
+ detection_threshold: 0.01
+ top_k_per_sec: 200
diff --git a/example_data/configs/outputs.yaml b/example_data/configs/outputs.yaml
new file mode 100644
index 00000000..458093ae
--- /dev/null
+++ b/example_data/configs/outputs.yaml
@@ -0,0 +1,9 @@
+format:
+ name: raw
+ include_class_scores: true
+ include_features: true
+ include_geometry: true
+
+transform:
+ detection_transforms: []
+ clip_transforms: []
diff --git a/example_data/configs/training.yaml b/example_data/configs/training.yaml
new file mode 100644
index 00000000..b99899e8
--- /dev/null
+++ b/example_data/configs/training.yaml
@@ -0,0 +1,79 @@
+optimizer:
+ name: adam
+ learning_rate: 0.001
+
+scheduler:
+ name: cosine_annealing
+ t_max: 100
+
+labels:
+ sigma: 3
+
+trainer:
+ max_epochs: 10
+ check_val_every_n_epoch: 5
+
+train_loader:
+ batch_size: 8
+ shuffle: true
+
+ clipping_strategy:
+ name: random_subclip
+ duration: 0.256
+
+ augmentations:
+ enabled: true
+ audio:
+ - name: mix_audio
+ probability: 0.2
+ min_weight: 0.3
+ max_weight: 0.7
+ - name: add_echo
+ probability: 0.2
+ max_delay: 0.005
+ min_weight: 0.0
+ max_weight: 1.0
+ spectrogram:
+ - name: scale_volume
+ probability: 0.2
+ min_scaling: 0.0
+ max_scaling: 2.0
+ - name: warp
+ probability: 0.2
+ delta: 0.04
+ - name: mask_time
+ probability: 0.2
+ max_perc: 0.05
+ max_masks: 3
+ - name: mask_freq
+ probability: 0.2
+ max_perc: 0.10
+ max_masks: 3
+
+val_loader:
+ clipping_strategy:
+ name: whole_audio_padded
+ chunk_size: 0.256
+
+loss:
+ detection:
+ weight: 1.0
+ focal:
+ beta: 4
+ alpha: 2
+ classification:
+ weight: 2.0
+ focal:
+ beta: 4
+ alpha: 2
+ size:
+ weight: 0.1
+
+validation:
+ tasks:
+ - name: sound_event_detection
+ metrics:
+ - name: average_precision
+ - name: sound_event_classification
+ metrics:
+ - name: average_precision
diff --git a/example_data/dataset.yaml b/example_data/dataset.yaml
new file mode 100644
index 00000000..790da8cc
--- /dev/null
+++ b/example_data/dataset.yaml
@@ -0,0 +1,8 @@
+name: example dataset
+description: Only for demonstration purposes
+sources:
+ - format: batdetect2
+ name: Example Data
+ description: Examples included for testing batdetect2
+ annotations_dir: example_data/anns
+ audio_dir: example_data/audio
diff --git a/example_data/targets.yaml b/example_data/targets.yaml
new file mode 100644
index 00000000..f630dc67
--- /dev/null
+++ b/example_data/targets.yaml
@@ -0,0 +1,37 @@
+detection_target:
+ name: bat
+ match_if:
+ name: all_of
+ conditions:
+ - name: has_tag
+ tag: { key: event, value: Echolocation }
+ - name: not
+ condition:
+ name: has_tag
+ tag: { key: class, value: Unknown }
+ assign_tags:
+ - key: class
+ value: Bat
+
+classification_targets:
+ - name: myomys
+ tags:
+ - key: class
+ value: Myotis mystacinus
+ - name: pippip
+ tags:
+ - key: class
+ value: Pipistrellus pipistrellus
+ - name: eptser
+ tags:
+ - key: class
+ value: Eptesicus serotinus
+ - name: rhifer
+ tags:
+ - key: class
+ value: Rhinolophus ferrumequinum
+
+roi:
+ default:
+ name: anchor_bbox
+ anchor: top-left
diff --git a/faq.md b/faq.md
deleted file mode 100644
index 60fff93b..00000000
--- a/faq.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# BatDetect2 - FAQ
-
-## Installation
-
-#### Do I need to know Python to be able to use this?
-No. To simply run the code on your own data you do not need any knowledge of Python. However, a small bit of familiarity with the terminal (i.e. command line) in Windows/Linux/OSX may make things easier.
-
-
-#### Are there any plans for an R version?
-Currently no. All the scripts export simple `.csv` files that can be read using any programming language of choice.
-
-
-#### How do I install the code?
-The codebase has been tested under Windows 10, Ubuntu, and OSX. Read the instructions in the main readme to get started. If you are having problems getting it working and you feel like you have tried everything (e.g. confirming that your Anaconda Python distribution is correctly installed) feel free to open an issue on GitHub.
-
-
-## Performance
-
-#### The model does not work very well on my data?
-Our model is based on a machine learning approach and as such if your data is very different from our training set it may not work as well. Feel free to use our annotation tools to label some of your own data and retrain the model. Even better, if you have large quantities of audio data with reliable species data that you are willing to share with the community please get in touch.
-
-
-#### The model is incorrectly classifying insects/noise/... as bats?
-Fine-tuning the model on your data can make a big difference. See previous answer.
-
-
-#### The model fails to correctly detect feeding buzzes and social calls?
-This is a limitation of our current training data. If you have such data or would be willing to label some for us please get in touch.
-
-
-#### Calls that are clearly belonging to the same call sequence are being predicted as coming from different species?
-Currently we do not do any sophisticated post processing on the results output by the model. We return a probability associated with each species for each call. You can use these predictions to clean up the noisy predictions for sequences of calls.
-
-
-#### Can I trust the model outputs?
-The models developed and shared as part of this repository should be used with caution. While they have been evaluated on held out audio data, great care should be taken when using the model outputs for any form of biodiversity assessment. Your data may differ, and as a result it is very strongly recommended that you validate the model first using data with known species to ensure that the outputs can be trusted.
-
-
-#### The code works well but it is slow?
-Try a different/faster computer. On a reasonably recent desktop it takes about 13 seconds (on the GPU) or 1.3 minutes (on the CPU) to process 7.5 minutes of audio. In general, we observe a factor of ~5-10 speed up using recent Nvidia GPUs compared to CPU only systems.
-
-
-#### My audio files are very big and as a result the code is slow.
-If your audio files are very long in duration (i.e. multiple minutes) it might be better to split them up into several smaller files. Have a look at the instructions and scripts in our annotation GUI codebase for how to crop your files into shorter ones - see [here](https://github.com/macaodha/batdetect2_GUI).
-
-
-## Training a new model
-
-#### Can I train a model on my own bat data with different species?
-Yes. You just need to provide annotations in the correct format.
-
-
-#### Will this work for frequency-division or zero-crossing recordings?
-No. The code assumes that we can convert the input audio into a spectrogram.
-
-
-#### Will this code work for non-bat audio data e.g. insects or birds?
-In principle yes, however you may need to change some of the training hyper-parameters to ignore high frequency information when you re-train. Please open an issue on GitHub if you have a specific request.
-
-
-
-## Usage
-
-#### Can I use the code for commercial purposes or incorporate raw source code or trained models into my commercial system?
-No. This codebase is currently only for non-commercial use. See the license.
diff --git a/justfile b/justfile
new file mode 100644
index 00000000..8e889f12
--- /dev/null
+++ b/justfile
@@ -0,0 +1,149 @@
+# Default command, runs if no recipe is specified.
+default:
+ just --list
+
+# Variables
+SOURCE_DIR := "src"
+TESTS_DIR := "tests"
+PYTHON_DIRS := "src tests"
+DOCS_SOURCE := "docs/source"
+DOCS_BUILD := "docs/build"
+HTML_COVERAGE_DIR := "htmlcov"
+
+# Show available commands
+help:
+ @just --list
+
+install:
+ uv sync
+
+# Install full development dependencies for CI and docs builds.
+install-dev:
+ uv sync --all-extras --dev
+
+# Testing & Coverage
+# Run tests using pytest.
+test:
+ uv run pytest {{TESTS_DIR}}
+
+# Run the fast subset of tests (excludes @pytest.mark.slow).
+test-quick:
+ uv run pytest --durations=10 -m "not slow" {{TESTS_DIR}}
+
+# Run only long-running tests marked with @pytest.mark.slow.
+test-slow:
+ uv run pytest -m "slow" {{TESTS_DIR}}
+
+# Run tests and generate coverage data.
+coverage:
+ uv run pytest --cov=batdetect2 --cov-report=term-missing --cov-report=xml {{TESTS_DIR}}
+
+# Generate an HTML coverage report.
+coverage-html: coverage
+ @echo "Generating HTML coverage report..."
+ uv run coverage html -d {{HTML_COVERAGE_DIR}}
+ @echo "HTML coverage report generated in {{HTML_COVERAGE_DIR}}/"
+
+# Serve the HTML coverage report locally.
+coverage-serve: coverage-html
+ @echo "Serving report at http://localhost:8000/ ..."
+ uv run python -m http.server --directory {{HTML_COVERAGE_DIR}} 8000
+
+# Documentation
+# Build documentation using Sphinx.
+docs:
+ uv run sphinx-build -b html {{DOCS_SOURCE}} {{DOCS_BUILD}}
+
+# Check that documentation builds successfully.
+check-docs: docs
+
+# Serve documentation with live reload.
+docs-serve:
+ uv run sphinx-autobuild {{DOCS_SOURCE}} {{DOCS_BUILD}} --watch {{SOURCE_DIR}} --open-browser
+
+# Formatting & Linting
+# Format code using ruff.
+fix-format:
+ uv run ruff format {{PYTHON_DIRS}}
+
+# Lint code using ruff and apply automatic fixes.
+fix-lint:
+ uv run ruff check --fix {{PYTHON_DIRS}}
+
+# Combined Formatting & Linting
+fix: fix-format fix-lint
+
+# Checking tasks
+# Check code formatting using ruff.
+check-format:
+ uv run ruff format --check {{PYTHON_DIRS}}
+
+# Lint code using ruff.
+check-lint:
+ uv run ruff check {{PYTHON_DIRS}}
+
+# Type Checking
+# Type check code using ty.
+check-types:
+ uv run ty check {{PYTHON_DIRS}}
+
+# Combined Checks
+# Run all checks (format-check, lint, typecheck).
+check: check-format check-lint check-types
+
+# Run the standard CI validation sequence.
+ci: check test
+
+# Build source and wheel distributions.
+build-dist:
+ uv run --with build python -m build
+
+# Bump the patch version, commit, and tag.
+bump-patch:
+ uvx bump2version patch
+
+# Bump the minor version, commit, and tag.
+bump-minor:
+ uvx bump2version minor
+
+# Bump the major version, commit, and tag.
+bump-major:
+ uvx bump2version major
+
+# Cleaning tasks
+# Remove Python bytecode and cache.
+clean-pyc:
+ find . -type f -name "*.py[co]" -delete
+ find . -type d -name "__pycache__" -exec rm -rf {} +
+
+# Remove test and coverage artifacts.
+clean-test:
+ rm -f .coverage coverage.xml
+ rm -rf .pytest_cache htmlcov/
+
+# Remove built documentation.
+clean-docs:
+ rm -rf {{DOCS_BUILD}}
+
+# Remove package build artifacts.
+clean-build:
+ rm -rf build/ dist/ *.egg-info/
+
+# Remove all build, test, documentation, and Python artifacts.
+clean: clean-build clean-pyc clean-test clean-docs
+
+# Examples
+
+# Train on example data.
+example-train OPTIONS="":
+ uv run batdetect2 train \
+ --val-dataset example_data/dataset.yaml \
+ --base-dir . \
+ --targets example_data/targets.yaml \
+ --model-config example_data/configs/model.yaml \
+ --training-config example_data/configs/training.yaml \
+ --audio-config example_data/configs/audio.yaml \
+ --evaluation-config example_data/configs/evaluation.yaml \
+ --logging-config example_data/configs/logging.yaml \
+ {{OPTIONS}} \
+ example_data/dataset.yaml
diff --git a/pyproject.toml b/pyproject.toml
index 35d7a5a7..bf2fc5f0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "batdetect2"
-version = "1.3.1"
+version = "1.1.1"
description = "Deep learning model for detecting and classifying bat echolocation calls in high frequency audio recordings."
authors = [
{ "name" = "Oisin Mac Aodha", "email" = "oisin.macaodha@ed.ac.uk" },
@@ -8,18 +8,28 @@ authors = [
]
dependencies = [
"click>=8.1.7",
+ "deepmerge>=2.0",
+ "hydra-core>=1.3.2",
"librosa>=0.10.1",
+ "lightning[extra]==2.5.0",
+ "loguru>=0.7.3",
"matplotlib>=3.7.1",
+ "netcdf4>=1.6.5",
"numpy>=1.23.5",
"pandas>=1.5.3",
+ "pydantic>=2.0.0",
+ "pyyaml>=6.0.2",
"scikit-learn>=1.2.2",
"scipy>=1.10.1",
+ "seaborn>=0.13.2",
+ "soundevent[audio,geometry,plot]>=2.10.0",
+ "soundfile>=0.12.1",
+ "tensorboard>=2.16.2",
"torch>=1.13.1",
"torchaudio>=1.13.1",
- "torchvision>=0.14.0",
- "setuptools>=70.1.0; python_version == '3.14'",
+ "xarray>=2024.0.0",
]
-requires-python = ">=3.9,<3.15"
+requires-python = ">=3.10,<3.14"
readme = "README.md"
license = { text = "CC-by-nc-4" }
classifiers = [
@@ -27,12 +37,10 @@ classifiers = [
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: OS Independent",
- "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
- "Programming Language :: Python :: 3.14",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
@@ -55,41 +63,50 @@ build-backend = "hatchling.build"
batdetect2 = "batdetect2.cli:cli"
[dependency-groups]
+huggingface = ["huggingface-hub>=0.32.0"]
+jupyter = ["ipywidgets>=8.1.5", "jupyter>=1.1.1"]
+marimo = ["marimo>=0.12.2", "pyarrow>=20.0.0"]
dev = [
"debugpy>=1.8.8",
"hypothesis>=6.118.7",
- "pyright>=1.1.388",
"pytest>=7.2.2",
"ruff>=0.7.3",
+ "ipykernel>=6.29.4",
+ "setuptools>=69.5.1",
+ "myst-parser>=3.0.1",
+ "sphinx-autobuild>=2024.10.3",
+ "numpydoc>=1.8.0",
+ "sphinx-autodoc-typehints>=2.3.0",
+ "sphinx-book-theme>=1.1.4",
+ "sphinx-click>=6.1.0",
+ "autodoc-pydantic>=2.2.0",
+ "pytest-cov>=6.1.1",
+ "ty>=0.0.1a12",
+ "rust-just>=1.40.0",
+ "pandas-stubs>=2.2.2.240807",
+ "python-lsp-server>=1.13.0",
+ "deepdiff>=8.6.1",
+ "pytest-xdist[psutil]>=3.8.0",
]
-
-[[tool.uv.index]]
-name = "pytorch-cu126"
-url = "https://download.pytorch.org/whl/cu126"
-explicit = true
-
-[[tool.uv.index]]
-name = "pytorch-cu118"
-url = "https://download.pytorch.org/whl/cu118"
-explicit = true
-
-[tool.uv.sources]
-torch = [
- { index = "pytorch-cu118", marker = "python_version == '3.9' and (sys_platform == 'linux' or sys_platform == 'win32')" },
- { index = "pytorch-cu126", marker = "python_version != '3.9' and (sys_platform == 'linux' or sys_platform == 'win32')" },
-]
-torchvision = [
- { index = "pytorch-cu118", marker = "python_version == '3.9' and (sys_platform == 'linux' or sys_platform == 'win32')" },
- { index = "pytorch-cu126", marker = "python_version != '3.9' and (sys_platform == 'linux' or sys_platform == 'win32')" },
+dvclive = ["dvclive>=3.48.2"]
+mlflow = ["mlflow>=3.1.1"]
+gradio = [
+ "gradio>=6.9.0",
]
-torchaudio = [
- { index = "pytorch-cu118", marker = "python_version == '3.9' and (sys_platform == 'linux' or sys_platform == 'win32')" },
- { index = "pytorch-cu126", marker = "python_version != '3.9' and (sys_platform == 'linux' or sys_platform == 'win32')" },
+dvc = [
+ "dvclive>=3.49.0",
]
[tool.ruff]
line-length = 79
-target-version = "py39"
+target-version = "py310"
+exclude = [
+ "src/batdetect2/train/legacy",
+ "src/batdetect2/plotting/legacy",
+ "src/batdetect2/evaluate/legacy",
+ "src/batdetect2/finetune",
+ "src/batdetect2/utils",
+]
[tool.ruff.format]
docstring-code-format = true
@@ -101,9 +118,17 @@ select = ["E4", "E7", "E9", "F", "B", "Q", "I", "NPY201"]
[tool.ruff.lint.pydocstyle]
convention = "numpy"
-[tool.pyright]
-include = ["batdetect2", "tests"]
-venvPath = "."
-venv = ".venv"
-pythonVersion = "3.9"
-pythonPlatform = "All"
+[tool.ty.src]
+include = ["src", "tests"]
+exclude = [
+ "src/batdetect2/train/legacy",
+ "src/batdetect2/plotting/legacy",
+ "src/batdetect2/evaluate/legacy",
+ "src/batdetect2/finetune",
+ "src/batdetect2/utils",
+]
+
+[tool.pytest.ini_options]
+markers = [
+ "slow: marks long-running tests that are skipped in quick runs",
+]
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index cac4479d..00000000
--- a/requirements.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-librosa==0.9.2
-matplotlib==3.6.2
-numpy==1.23.4
-pandas==1.5.2
-scikit_learn==1.2.0
-scipy==1.9.3
-torch==1.13.0
-torchaudio==0.13.0
-torchvision==0.14.0
-click
diff --git a/run_batdetect.py b/run_batdetect.py
deleted file mode 100644
index 3079eca2..00000000
--- a/run_batdetect.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""Run batdetect2.command.main() from the command line."""
-from batdetect2.cli import detect
-
-if __name__ == "__main__":
- detect()
diff --git a/scripts/README.md b/scripts/README.md
deleted file mode 100644
index bcc46929..00000000
--- a/scripts/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-This directory contains some scripts for visualizing the raw data and model outputs.
-
-
-`gen_spec_image.py`: saves the model predictions on a spectrogram of the input audio file.
-e.g.
-`python gen_spec_image.py ../example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav ../models/Net2DFast_UK_same.pth.tar`
-
-
-`gen_spec_video.py`: generates a video showing the model predictions for a file.
-e.g.
-`python gen_spec_video.py ../example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav ../models/Net2DFast_UK_same.pth.tar`
-
-
-
-`gen_dataset_summary_image.py`: generates an image displaying the mean spectrogram for each class in a specified dataset.
-e.g.
-`python gen_dataset_summary_image.py --ann_file PATH_TO_ANN/australia_TRAIN.json PATH_TO_AUDIO/audio/ ../plots/australia/`
diff --git a/scripts/gen_dataset_summary_image.py b/scripts/gen_dataset_summary_image.py
deleted file mode 100644
index a9169003..00000000
--- a/scripts/gen_dataset_summary_image.py
+++ /dev/null
@@ -1,95 +0,0 @@
-"""
-Loads a set of annotations corresponding to a dataset and saves an image which
-is the mean spectrogram for each class.
-"""
-
-import argparse
-import os
-
-import matplotlib.pyplot as plt
-import numpy as np
-import viz_helpers as vz
-
-import batdetect2.detector.parameters as parameters
-import batdetect2.train.train_split as ts
-import batdetect2.train.train_utils as tu
-import batdetect2.utils.audio_utils as au
-
-if __name__ == "__main__":
-
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "audio_path", type=str, help="Input directory for audio"
- )
- parser.add_argument(
- "op_dir",
- type=str,
- help="Path to where single annotation json file is stored",
- )
- parser.add_argument(
- "--ann_file",
- type=str,
- help="Path to where single annotation json file is stored",
- )
- parser.add_argument(
- "--uk_split", type=str, default="", help="Set as: diff or same"
- )
- parser.add_argument(
- "--file_type",
- type=str,
- default="png",
- help="Type of image to save png or pdf",
- )
- args = vars(parser.parse_args())
-
- if not os.path.isdir(args["op_dir"]):
- os.makedirs(args["op_dir"])
-
- params = parameters.get_params(False)
- params["smooth_spec"] = False
- params["spec_width"] = 48
- params["norm_type"] = "log" # log, pcen
- params["aud_pad"] = 0.005
- classes_to_ignore = params["classes_to_ignore"] + params["generic_class"]
-
- # load train annotations
- if args["uk_split"] == "":
- print("\nLoading:", args["ann_file"], "\n")
- dataset_name = os.path.basename(args["ann_file"]).replace(".json", "")
- datasets = []
- datasets.append(
- tu.get_blank_dataset_dict(
- dataset_name, False, args["ann_file"], args["audio_path"]
- )
- )
- else:
- # load uk data - special case
- print("\nLoading:", args["uk_split"], "\n")
- dataset_name = "uk_" + args["uk_split"] # should be uk_diff, or uk_same
- datasets, _ = ts.get_train_test_data(
- args["ann_file"],
- args["audio_path"],
- args["uk_split"],
- load_extra=False,
- )
-
- anns, class_names, _ = tu.load_set_of_anns(
- datasets, classes_to_ignore, params["events_of_interest"]
- )
- class_names_order = range(len(class_names))
-
- x_train, y_train = vz.load_data(
- anns,
- params,
- class_names,
- smooth_spec=params["smooth_spec"],
- norm_type=params["norm_type"],
- )
-
- op_file_name = os.path.join(
- args["op_dir"], dataset_name + "." + args["file_type"]
- )
- vz.save_summary_image(
- x_train, y_train, class_names, params, op_file_name, class_names_order
- )
- print("\nImage saved to:", op_file_name)
diff --git a/scripts/gen_spec_image.py b/scripts/gen_spec_image.py
deleted file mode 100644
index 2249b58b..00000000
--- a/scripts/gen_spec_image.py
+++ /dev/null
@@ -1,210 +0,0 @@
-"""
-Visualize predctions on top of spectrogram.
-
-Will save images with:
-1) raw spectrogram
-2) spectrogram with GT boxes
-3) spectrogram with predicted boxes
-"""
-
-import argparse
-import json
-import os
-import sys
-
-import torch
-import matplotlib.pyplot as plt
-import numpy as np
-
-import batdetect2.evaluate.evaluate_models as evlm
-import batdetect2.utils.audio_utils as au
-import batdetect2.utils.detector_utils as du
-import batdetect2.utils.plot_utils as viz
-
-
-def filter_anns(anns, start_time, stop_time):
- anns_op = []
- for aa in anns:
- if (aa["start_time"] >= start_time) and (
- aa["start_time"] < stop_time - 0.02
- ):
- anns_op.append(aa)
- return anns_op
-
-
-if __name__ == "__main__":
-
- parser = argparse.ArgumentParser()
- parser.add_argument("audio_file", type=str, help="Path to audio file")
- parser.add_argument("model_path", type=str, help="Path to BatDetect model")
- parser.add_argument(
- "--ann_file", type=str, default="", help="Path to annotation file"
- )
- parser.add_argument(
- "--op_dir",
- type=str,
- default="plots/",
- help="Output directory for plots",
- )
- parser.add_argument(
- "--file_type",
- type=str,
- default="png",
- help="Type of image to save png or pdf",
- )
- parser.add_argument(
- "--title_text",
- type=str,
- default="",
- help="Text to add as title of plots",
- )
- parser.add_argument(
- "--detection_threshold",
- type=float,
- default=0.2,
- help="Threshold for output detections",
- )
- parser.add_argument(
- "--start_time",
- type=float,
- default=0.0,
- help="Start time for cropped file",
- )
- parser.add_argument(
- "--stop_time",
- type=float,
- default=0.5,
- help="End time for cropped file",
- )
- parser.add_argument(
- "--time_expansion_factor",
- type=int,
- default=1,
- help="Time expansion factor",
- )
-
- args_cmd = vars(parser.parse_args())
-
- # load the model
- bd_args = du.get_default_bd_args()
- model, params_bd = du.load_model(args_cmd["model_path"])
- bd_args["detection_threshold"] = args_cmd["detection_threshold"]
- bd_args["time_expansion_factor"] = args_cmd["time_expansion_factor"]
-
- # load the annotation if it exists
- gt_present = False
- if args_cmd["ann_file"] != "":
- if os.path.isfile(args_cmd["ann_file"]):
- with open(args_cmd["ann_file"]) as da:
- gt_anns = json.load(da)
- gt_anns = filter_anns(
- gt_anns["annotation"],
- args_cmd["start_time"],
- args_cmd["stop_time"],
- )
- gt_present = True
- else:
- print("Annotation file not found: ", args_cmd["ann_file"])
-
- # load the audio file
- if not os.path.isfile(args_cmd["audio_file"]):
- print("Audio file not found: ", args_cmd["audio_file"])
- sys.exit()
-
- # load audio and crop
- print("\nProcessing: " + os.path.basename(args_cmd["audio_file"]))
- print("\nOutput directory: " + args_cmd["op_dir"])
- sampling_rate, audio = au.load_audio(
- args_cmd["audio_file"],
- args_cmd["time_exp"],
- params_bd["target_samp_rate"],
- params_bd["scale_raw_audio"],
- )
- st_samp = int(sampling_rate * args_cmd["start_time"])
- en_samp = int(sampling_rate * args_cmd["stop_time"])
- if en_samp > audio.shape[0]:
- audio = np.hstack(
- (audio, np.zeros((en_samp) - audio.shape[0], dtype=audio.dtype))
- )
- audio = audio[st_samp:en_samp]
-
- duration = audio.shape[0] / sampling_rate
- print("File duration: {} seconds".format(duration))
-
- # create spec for viz
- spec, _ = au.generate_spectrogram(
- audio, sampling_rate, params_bd, True, False
- )
-
- run_config = {
- **params_bd,
- **bd_args,
- }
-
- # run model and filter detections so only keep ones in relevant time range
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- results = du.process_file(args_cmd["audio_file"], model, run_config, device)
- pred_anns = filter_anns(
- results["pred_dict"]["annotation"],
- args_cmd["start_time"],
- args_cmd["stop_time"],
- )
- print(len(pred_anns), "Detections")
-
- # save output
- if not os.path.isdir(args_cmd["op_dir"]):
- os.makedirs(args_cmd["op_dir"])
-
- # create output file names
- op_path_clean = (
- os.path.basename(args_cmd["audio_file"])[:-4]
- + "_clean."
- + args_cmd["file_type"]
- )
- op_path_clean = os.path.join(args_cmd["op_dir"], op_path_clean)
- op_path_pred = (
- os.path.basename(args_cmd["audio_file"])[:-4]
- + "_pred."
- + args_cmd["file_type"]
- )
- op_path_pred = os.path.join(args_cmd["op_dir"], op_path_pred)
-
- # create and save iamges
- viz.save_ann_spec(
- op_path_clean,
- spec,
- params_bd["min_freq"],
- params_bd["max_freq"],
- duration,
- args_cmd["start_time"],
- "",
- None,
- )
- viz.save_ann_spec(
- op_path_pred,
- spec,
- params_bd["min_freq"],
- params_bd["max_freq"],
- duration,
- args_cmd["start_time"],
- "",
- pred_anns,
- )
-
- if gt_present:
- op_path_gt = (
- os.path.basename(args_cmd["audio_file"])[:-4]
- + "_gt."
- + args_cmd["file_type"]
- )
- op_path_gt = os.path.join(args_cmd["op_dir"], op_path_gt)
- viz.save_ann_spec(
- op_path_gt,
- spec,
- params_bd["min_freq"],
- params_bd["max_freq"],
- duration,
- args_cmd["start_time"],
- "",
- gt_anns,
- )
diff --git a/scripts/gen_spec_video.py b/scripts/gen_spec_video.py
deleted file mode 100644
index 9636cae2..00000000
--- a/scripts/gen_spec_video.py
+++ /dev/null
@@ -1,276 +0,0 @@
-"""
-This script takes an audio file as input, runs the detector, and makes a video output
-
-Notes:
- It needs ffmpeg installed to make the videos
- Sometimes conda can overwrite the default ffmpeg path set this to use system one.
- Check which one is being used with `which ffmpeg`. If conda version, can thow an error.
- Best to use system one - see ffmpeg_path.
-"""
-
-import argparse
-import os
-import shutil
-import sys
-
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-from scipy.io import wavfile
-
-import batdetect2.detector.parameters as parameters
-import batdetect2.utils.audio_utils as au
-import batdetect2.utils.detector_utils as du
-import batdetect2.utils.plot_utils as viz
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("audio_file", type=str, help="Path to input audio file")
- parser.add_argument(
- "model_path", type=str, help="Path to trained BatDetect model"
- )
- parser.add_argument(
- "--op_dir",
- type=str,
- default="generated_vids/",
- help="Path to output directory",
- )
- parser.add_argument(
- "--no_detector", action="store_true", help="Do not run detector"
- )
- parser.add_argument(
- "--plot_class_names_off",
- action="store_true",
- help="Do not plot class names",
- )
- parser.add_argument(
- "--disable_axis", action="store_true", help="Do not plot axis"
- )
- parser.add_argument(
- "--detection_threshold",
- type=float,
- default=0.2,
- help="Cut-off probability for detector",
- )
- parser.add_argument(
- "--time_expansion_factor",
- type=int,
- default=1,
- dest="time_expansion_factor",
- help="The time expansion factor used for all files (default is 1)",
- )
- args_cmd = vars(parser.parse_args())
-
- # file of interest
- audio_file = args_cmd["audio_file"]
- op_dir = args_cmd["op_dir"]
- op_str = "_output"
- ffmpeg_path = "/usr/bin/"
-
- if not os.path.isfile(audio_file):
- print("Audio file not found: ", audio_file)
- sys.exit()
-
- if not os.path.isfile(args_cmd["model_path"]):
- print("Model not found: ", args_cmd["model_path"])
- sys.exit()
-
- start_time = 0.0
- duration = 0.5
- reveal_boxes = True # makes the boxes appear one at a time
- fps = 24
- dpi = 100
-
- op_dir_tmp = os.path.join(op_dir, "op_tmp_vids", "")
- if not os.path.isdir(op_dir_tmp):
- os.makedirs(op_dir_tmp)
- if not os.path.isdir(op_dir):
- os.makedirs(op_dir)
-
- params = parameters.get_params(False)
- args = du.get_default_bd_args()
- args["time_expansion_factor"] = args_cmd["time_expansion_factor"]
- args["detection_threshold"] = args_cmd["detection_threshold"]
-
- # load audio file
- print("\nProcessing: " + os.path.basename(audio_file))
- print("\nOutput directory: " + op_dir)
- sampling_rate, audio = au.load_audio(
- audio_file, args["time_expansion_factor"], params["target_samp_rate"]
- )
- audio = audio[
- int(sampling_rate * start_time) : int(
- sampling_rate * start_time + sampling_rate * duration
- )
- ]
- audio_orig = audio.copy()
- audio = au.pad_audio(
- audio,
- sampling_rate,
- params["fft_win_length"],
- params["fft_overlap"],
- params["resize_factor"],
- params["spec_divide_factor"],
- )
-
- # generate spectrogram
- spec, _ = au.generate_spectrogram(audio, sampling_rate, params, True)
- max_val = spec.max() * 1.1
-
- if not args_cmd["no_detector"]:
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
- print(" Loading model and running detector on entire file ...")
- model, det_params = du.load_model(args_cmd["model_path"])
- det_params["detection_threshold"] = args["detection_threshold"]
-
- run_config = {
- **det_params,
- **args,
- }
- results = du.process_file(
- audio_file,
- model,
- run_config,
- device,
- )
-
- print(" Processing detections and plotting ...")
- detections = []
- for bb in results["pred_dict"]["annotation"]:
- if (bb["start_time"] >= start_time) and (
- bb["end_time"] < start_time + duration
- ):
- detections.append(bb)
-
- # plot boxes
- fig = plt.figure(
- 1, figsize=(spec.shape[1] / dpi, spec.shape[0] / dpi), dpi=dpi
- )
- duration = au.x_coords_to_time(
- spec.shape[1],
- sampling_rate,
- params["fft_win_length"],
- params["fft_overlap"],
- )
- viz.create_box_image(
- spec,
- fig,
- detections,
- start_time,
- start_time + duration,
- duration,
- params,
- max_val,
- plot_class_names=not args_cmd["plot_class_names_off"],
- )
- op_im_file_boxes = os.path.join(
- op_dir, os.path.basename(audio_file)[:-4] + op_str + "_boxes.png"
- )
- fig.savefig(op_im_file_boxes, dpi=dpi)
- plt.close(1)
- spec_with_boxes = plt.imread(op_im_file_boxes)
-
- print(" Saving audio file ...")
- if args["time_expansion_factor"] == 1:
- sampling_rate_op = int(sampling_rate / 10.0)
- else:
- sampling_rate_op = sampling_rate
- op_audio_file = os.path.join(
- op_dir, os.path.basename(audio_file)[:-4] + op_str + ".wav"
- )
- wavfile.write(op_audio_file, sampling_rate_op, audio_orig)
-
- print(" Saving image ...")
- op_im_file = os.path.join(
- op_dir, os.path.basename(audio_file)[:-4] + op_str + ".png"
- )
- plt.imsave(op_im_file, spec, vmin=0, vmax=max_val, cmap="plasma")
- spec_blank = plt.imread(op_im_file)
-
- # create figure
- freq_scale = 1000 # turn Hz to kHz
- min_freq = params["min_freq"] // freq_scale
- max_freq = params["max_freq"] // freq_scale
- y_extent = [0, duration, min_freq, max_freq]
-
- print(" Saving video frames ...")
- # save images that will be combined into video
- # will either plot with or without boxes
- for ii, col in enumerate(
- np.linspace(0, spec.shape[1] - 1, int(fps * duration * 10))
- ):
- if not args_cmd["no_detector"]:
- spec_op = spec_with_boxes.copy()
- if ii > 0:
- spec_op[:, int(col), :] = 1.0
- if reveal_boxes:
- spec_op[:, int(col) + 1 :, :] = spec_blank[
- :, int(col) + 1 :, :
- ]
- elif ii == 0 and reveal_boxes:
- spec_op = spec_blank
-
- if not args_cmd["disable_axis"]:
- plt.close("all")
- fig = plt.figure(
- ii,
- figsize=(
- 1.2 * (spec_op.shape[1] / dpi),
- 1.5 * (spec_op.shape[0] / dpi),
- ),
- dpi=dpi,
- )
- plt.xlabel("Time - seconds")
- plt.ylabel("Frequency - kHz")
- plt.imshow(
- spec_op,
- vmin=0,
- vmax=1.0,
- cmap="plasma",
- extent=y_extent,
- aspect="auto",
- )
- plt.tight_layout()
- fig.savefig(op_dir_tmp + str(ii).zfill(4) + ".png", dpi=dpi)
- else:
- plt.imsave(
- op_dir_tmp + str(ii).zfill(4) + ".png",
- spec_op,
- vmin=0,
- vmax=1.0,
- cmap="plasma",
- )
- else:
- spec_op = spec.copy()
- if ii > 0:
- spec_op[:, int(col)] = max_val
- plt.imsave(
- op_dir_tmp + str(ii).zfill(4) + ".png",
- spec_op,
- vmin=0,
- vmax=max_val,
- cmap="plasma",
- )
-
- print(" Creating video ...")
- op_vid_file = os.path.join(
- op_dir, os.path.basename(audio_file)[:-4] + op_str + ".avi"
- )
- ffmpeg_cmd = (
- "ffmpeg -hide_banner -loglevel panic -y -r {} -f image2 -s {}x{} -i {}%04d.png -i {} -vcodec libx264 "
- "-crf 25 -pix_fmt yuv420p -acodec copy {}".format(
- fps,
- spec.shape[1],
- spec.shape[0],
- op_dir_tmp,
- op_audio_file,
- op_vid_file,
- )
- )
- ffmpeg_cmd = ffmpeg_path + ffmpeg_cmd
- os.system(ffmpeg_cmd)
-
- print(" Deleting temporary files ...")
- if os.path.isdir(op_dir_tmp):
- shutil.rmtree(op_dir_tmp)
diff --git a/scripts/viz_helpers.py b/scripts/viz_helpers.py
deleted file mode 100644
index 13d09b61..00000000
--- a/scripts/viz_helpers.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import os
-import sys
-
-import matplotlib.pyplot as plt
-import numpy as np
-from scipy import ndimage
-
-sys.path.append(os.path.join(".."))
-
-import batdetect2.utils.audio_utils as au
-
-
-def generate_spectrogram_data(
- audio, sampling_rate, params, norm_type="log", smooth_spec=False
-):
- max_freq = round(params["max_freq"] * params["fft_win_length"])
- min_freq = round(params["min_freq"] * params["fft_win_length"])
-
- # create spectrogram - numpy
- spec = au.gen_mag_spectrogram(
- audio, sampling_rate, params["fft_win_length"], params["fft_overlap"]
- )
- # spec = au.gen_mag_spectrogram_pt(audio, sampling_rate, params['fft_win_length'], params['fft_overlap']).numpy()
- if spec.shape[0] < max_freq:
- freq_pad = max_freq - spec.shape[0]
- spec = np.vstack(
- (np.zeros((freq_pad, spec.shape[1]), dtype=np.float32), spec)
- )
- spec = spec[-max_freq : spec.shape[0] - min_freq, :]
-
- if norm_type == "log":
- log_scaling = (
- 2.0
- * (1.0 / sampling_rate)
- * (
- 1.0
- / (
- np.abs(
- np.hanning(
- int(params["fft_win_length"] * sampling_rate)
- )
- )
- ** 2
- ).sum()
- )
- )
- ##log_scaling = 0.01
- spec = np.log(1.0 + log_scaling * spec).astype(np.float32)
- elif norm_type == "pcen":
- spec = au.pcen(spec, sampling_rate)
- else:
- pass
-
- if smooth_spec:
- spec = ndimage.gaussian_filter(spec, 1)
-
- return spec
-
-
-def load_data(
- anns,
- params,
- class_names,
- smooth_spec=False,
- norm_type="log",
- extract_bg=False,
-):
- specs = []
- labels = []
- coords = []
- audios = []
- sampling_rates = []
- file_names = []
- for cur_file in anns:
- sampling_rate, audio_orig = au.load_audio(
- cur_file["file_path"],
- cur_file["time_exp"],
- params["target_samp_rate"],
- params["scale_raw_audio"],
- )
-
- for ann in cur_file["annotation"]:
- if (
- ann["class"] not in params["classes_to_ignore"]
- and ann["class"] in class_names
- ):
- # clip out of bounds
- if ann["low_freq"] < params["min_freq"]:
- ann["low_freq"] = params["min_freq"]
- if ann["high_freq"] > params["max_freq"]:
- ann["high_freq"] = params["max_freq"]
-
- # load cropped audio
- start_samp_diff = int(sampling_rate * ann["start_time"]) - int(
- sampling_rate * params["aud_pad"]
- )
- start_samp = np.maximum(0, start_samp_diff)
- end_samp = np.minimum(
- audio_orig.shape[0],
- int(sampling_rate * ann["end_time"]) * 2
- + int(sampling_rate * params["aud_pad"]),
- )
- audio = audio_orig[start_samp:end_samp]
- if start_samp_diff < 0:
- # need to pad at start if the call is at the very begining
- audio = np.hstack(
- (np.zeros(-start_samp_diff, dtype=np.float32), audio)
- )
-
- nfft = int(params["fft_win_length"] * sampling_rate)
- noverlap = int(params["fft_overlap"] * nfft)
- max_samps = params["spec_width"] * (nfft - noverlap) + noverlap
-
- if max_samps > audio.shape[0]:
- audio = np.hstack(
- (audio, np.zeros(max_samps - audio.shape[0]))
- )
- audio = audio[:max_samps].astype(np.float32)
-
- audio = au.pad_audio(
- audio,
- sampling_rate,
- params["fft_win_length"],
- params["fft_overlap"],
- params["resize_factor"],
- params["spec_divide_factor"],
- )
-
- # generate spectrogram
- spec = generate_spectrogram_data(
- audio, sampling_rate, params, norm_type, smooth_spec
- )[:, : params["spec_width"]]
-
- specs.append(spec[np.newaxis, ...])
- labels.append(ann["class"])
-
- audios.append(audio)
- sampling_rates.append(sampling_rate)
- file_names.append(cur_file["file_path"])
-
- # position in crop
- x1 = int(
- au.time_to_x_coords(
- np.array(params["aud_pad"]),
- sampling_rate,
- params["fft_win_length"],
- params["fft_overlap"],
- )
- )
- y1 = (ann["low_freq"] - params["min_freq"]) * params[
- "fft_win_length"
- ]
- coords.append((y1, x1))
-
- _, file_ids = np.unique(file_names, return_inverse=True)
- labels = np.array([class_names.index(ll) for ll in labels])
-
- # return np.vstack(specs), labels, coords, audios, sampling_rates, file_ids, file_names
- return np.vstack(specs), labels
-
-
-def save_summary_image(
- specs,
- labels,
- species_names,
- params,
- op_file_name="plots/all_species.png",
- order=None,
-):
- # takes the mean for each class and plots it on a grid
- mean_specs = []
- max_band = []
- for ii in range(len(species_names)):
- inds = np.where(labels == ii)[0]
- mu = specs[inds, :].mean(0)
- max_band.append(np.argmax(mu.sum(1)))
- mean_specs.append(mu)
-
- # control the order in which classes are printed
- if order is None:
- order = np.arange(len(species_names))
-
- max_cols = 6
- nrows = int(np.ceil(len(species_names) / max_cols))
- ncols = np.minimum(len(species_names), max_cols)
-
- fig, ax = plt.subplots(
- nrows=nrows,
- ncols=ncols,
- figsize=(ncols * 3.3, nrows * 6),
- gridspec_kw={"wspace": 0, "hspace": 0.2},
- )
- spec_min_max = (
- 0,
- mean_specs[0].shape[1],
- params["min_freq"] / 1000,
- params["max_freq"] / 1000,
- )
- ii = 0
- for row in ax:
-
- if type(row) != np.ndarray:
- row = np.array([row])
-
- for col in row:
- if ii >= len(species_names):
- col.axis("off")
- else:
- inds = np.where(labels == order[ii])[0]
- col.imshow(
- mean_specs[order[ii]],
- extent=spec_min_max,
- cmap="plasma",
- aspect="equal",
- )
- col.grid(color="w", alpha=0.3, linewidth=0.3)
- col.set_xticks([])
- col.title.set_text(str(ii + 1) + " " + species_names[order[ii]])
- col.tick_params(axis="both", which="major", labelsize=7)
- ii += 1
-
- # plt.tight_layout()
- # plt.show()
- plt.savefig(op_file_name)
- plt.close("all")
diff --git a/src/batdetect2/__init__.py b/src/batdetect2/__init__.py
new file mode 100644
index 00000000..42fd4bd6
--- /dev/null
+++ b/src/batdetect2/__init__.py
@@ -0,0 +1,29 @@
+import logging
+import warnings
+from typing import TYPE_CHECKING
+
+from loguru import logger
+
+if TYPE_CHECKING:
+ from batdetect2.api_v2 import BatDetect2API
+
+__all__ = ["BatDetect2API", "__version__"]
+__version__ = "1.1.1"
+
+logger.disable("batdetect2")
+
+# Silences the irrelevant warning
+warnings.filterwarnings("ignore", message="The pynvml package is deprecated")
+warnings.filterwarnings("ignore", message=".*isinstance(treespec, LeafSpec).*")
+
+numba_logger = logging.getLogger("numba")
+numba_logger.setLevel(logging.WARNING)
+
+
+def __getattr__(name: str):
+ if name == "BatDetect2API":
+ from batdetect2.api_v2 import BatDetect2API
+
+ return BatDetect2API
+
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/batdetect2/api.py b/src/batdetect2/api.py
similarity index 83%
rename from batdetect2/api.py
rename to src/batdetect2/api.py
index 86bf55bc..f27b951a 100644
--- a/batdetect2/api.py
+++ b/src/batdetect2/api.py
@@ -32,7 +32,7 @@
for each detection. The CNN features are the output of the CNN before
the final classification layer. You can use these features to train
your own classifier, or to do other processing on the detections.
- They are in the same order as the detections in
+ They are in the same order as the detections in
`results['pred_dict']['annotation']`. Will only be returned if the
`cnn_feats` parameter in the config is set to `True`.
- `spec_slices`: Optional. A list of `numpy` arrays containing the spectrogram
@@ -96,10 +96,9 @@
consult the API documentation in the code.
"""
+
import warnings
-from typing import List, Optional, Tuple, BinaryIO, Any, Union
-from .types import AudioPath
import numpy as np
import torch
@@ -121,12 +120,6 @@
)
from batdetect2.utils.detector_utils import list_audio_files, load_model
-import audioread
-import os
-import soundfile as sf
-import requests
-import io
-
# Remove warnings from torch
warnings.filterwarnings("ignore", category=UserWarning, module="torch")
@@ -171,7 +164,7 @@ def load_audio(
time_exp_fact: float = 1,
target_samp_rate: int = TARGET_SAMPLERATE_HZ,
scale: bool = False,
- max_duration: Optional[float] = None,
+ max_duration: float | None = None,
) -> np.ndarray:
"""Load audio from file.
@@ -209,7 +202,7 @@ def load_audio(
def generate_spectrogram(
audio: np.ndarray,
samp_rate: int = TARGET_SAMPLERATE_HZ,
- config: Optional[SpectrogramParameters] = None,
+ config: SpectrogramParameters | None = None,
device: torch.device = DEVICE,
) -> torch.Tensor:
"""Generate spectrogram from audio array.
@@ -233,11 +226,10 @@ def generate_spectrogram(
if config is None:
config = DEFAULT_SPECTROGRAM_PARAMETERS
- _, spec, _ = du.compute_spectrogram(
+ _, spec = du.compute_spectrogram(
audio,
samp_rate,
config,
- return_np=False,
device=device,
)
@@ -245,89 +237,41 @@ def generate_spectrogram(
def process_file(
- path: AudioPath,
+ audio_file: str,
model: DetectionModel = MODEL,
- config: Optional[ProcessingConfiguration] = None,
+ config: ProcessingConfiguration | None = None,
device: torch.device = DEVICE,
- file_id: Optional[str] = None
) -> du.RunResults:
"""Process audio file with model.
Parameters
----------
- path : AudioPath
- Path to audio data.
+ audio_file : str
+ Path to audio file.
model : DetectionModel, optional
Detection model. Uses default model if not specified.
config : Optional[ProcessingConfiguration], optional
Processing configuration, by default None (uses default parameters).
device : torch.device, optional
Device to use, by default tries to use GPU if available.
- file_id: Optional[str],
- Give the data an id. If path is a string path to a file this can be ignored and
- the file_id will be the basename of the file.
"""
if config is None:
config = CONFIG
return du.process_file(
- path,
+ audio_file,
model,
config,
device,
- file_id
)
-def process_url(
- url: str,
- model: DetectionModel = MODEL,
- config: Optional[ProcessingConfiguration] = None,
- device: torch.device = DEVICE,
- file_id: Optional[str] = None
-) -> du.RunResults:
- """Process audio file with model.
-
- Parameters
- ----------
- url : str
- HTTP URL to load the audio data from
- model : DetectionModel, optional
- Detection model. Uses default model if not specified.
- config : Optional[ProcessingConfiguration], optional
- Processing configuration, by default None (uses default parameters).
- device : torch.device, optional
- Device to use, by default tries to use GPU if available.
- file_id: Optional[str],
- Give the data an id. Defaults to the URL
- """
- if config is None:
- config = CONFIG
-
- if file_id is None:
- file_id = url
-
- response = requests.get(url)
-
- # Raise exception on HTTP error
- response.raise_for_status()
-
- # Retrieve body as raw bytes
- raw_audio_data = response.content
-
- return du.process_file(
- io.BytesIO(raw_audio_data),
- model,
- config,
- device,
- file_id
- )
def process_spectrogram(
spec: torch.Tensor,
samp_rate: int = TARGET_SAMPLERATE_HZ,
model: DetectionModel = MODEL,
- config: Optional[ProcessingConfiguration] = None,
-) -> Tuple[List[Annotation], np.ndarray]:
+ config: ProcessingConfiguration | None = None,
+) -> tuple[list[Annotation], np.ndarray]:
"""Process spectrogram with model.
Parameters
@@ -367,9 +311,9 @@ def process_audio(
audio: np.ndarray,
samp_rate: int = TARGET_SAMPLERATE_HZ,
model: DetectionModel = MODEL,
- config: Optional[ProcessingConfiguration] = None,
+ config: ProcessingConfiguration | None = None,
device: torch.device = DEVICE,
-) -> Tuple[List[Annotation], np.ndarray, torch.Tensor]:
+) -> tuple[list[Annotation], np.ndarray, torch.Tensor]:
"""Process audio array with model.
Parameters
@@ -411,8 +355,8 @@ def process_audio(
def postprocess(
outputs: ModelOutput,
samp_rate: int = TARGET_SAMPLERATE_HZ,
- config: Optional[ProcessingConfiguration] = None,
-) -> Tuple[List[Annotation], np.ndarray]:
+ config: ProcessingConfiguration | None = None,
+) -> tuple[list[Annotation], np.ndarray]:
"""Postprocess model outputs.
Convert model tensor outputs to predicted bounding boxes and
@@ -466,7 +410,9 @@ def print_summary(results: RunResults) -> None:
Detection result.
"""
print("Results for " + results["pred_dict"]["id"])
- print("{} calls detected\n".format(len(results["pred_dict"]["annotation"])))
+ print(
+ "{} calls detected\n".format(len(results["pred_dict"]["annotation"]))
+ )
print("time\tprob\tlfreq\tspecies_name")
for ann in results["pred_dict"]["annotation"]:
diff --git a/src/batdetect2/api_v2.py b/src/batdetect2/api_v2.py
new file mode 100644
index 00000000..d2c0a8fe
--- /dev/null
+++ b/src/batdetect2/api_v2.py
@@ -0,0 +1,1260 @@
+from __future__ import annotations
+
+from pathlib import Path
+from typing import TYPE_CHECKING, Literal
+
+if TYPE_CHECKING:
+ from collections.abc import Sequence
+
+ import numpy as np
+ import torch
+ from soundevent import data
+
+ from batdetect2.audio import AudioConfig, AudioLoader
+ from batdetect2.data import Dataset
+ from batdetect2.evaluate import EvaluationConfig, EvaluatorProtocol
+ from batdetect2.inference import InferenceConfig
+ from batdetect2.logging import (
+ AppLoggingConfig,
+ LoggerConfig,
+ LoggingCallback,
+ )
+ from batdetect2.models import ModelConfig
+ from batdetect2.models.types import ModelProtocol
+ from batdetect2.outputs import (
+ OutputFormatConfig,
+ OutputFormatterProtocol,
+ OutputsConfig,
+ OutputTransformProtocol,
+ )
+ from batdetect2.postprocess import (
+ ClipDetections,
+ Detection,
+ PostprocessorProtocol,
+ )
+ from batdetect2.preprocess import PreprocessorProtocol
+ from batdetect2.targets import (
+ ROIMapperProtocol,
+ TargetConfig,
+ TargetProtocol,
+ )
+ from batdetect2.train import TrainingConfig
+ from batdetect2.train.logging import TrainLoggingContext
+
+
+DEFAULT_CHECKPOINT_DIR: Path = Path("outputs") / "checkpoints"
+DEFAULT_LOGS_DIR: Path = Path("outputs") / "logs"
+DEFAULT_EVAL_DIR: Path = Path("outputs") / "evaluations"
+
+
+class BatDetect2API:
+ """High-level interface for the BatDetect2 workflow.
+
+ Use this to load a model, run inference, inspect detections,
+ evaluate predictions, and train or fine-tune models.
+
+ In most cases, start with :meth:`from_checkpoint` to load a trained model.
+ Use :meth:`from_config` when you want to build a new model with custom
+ configs.
+
+ Examples
+ --------
+ Load the default checkpoint and run prediction on one file.
+
+ >>> from batdetect2.api_v2 import BatDetect2API
+ >>> api = BatDetect2API.from_checkpoint()
+ >>> prediction = api.process_file("recording.wav")
+
+ Load a checkpoint and save predictions for a folder of audio.
+
+ >>> from pathlib import Path
+ >>> api = BatDetect2API.from_checkpoint("uk_same")
+ >>> predictions = api.process_directory("audio")
+ >>> api.save_predictions(predictions, "outputs/")
+ """
+
+ def __init__(
+ self,
+ model_config: ModelConfig,
+ audio_config: AudioConfig,
+ train_config: TrainingConfig,
+ evaluation_config: EvaluationConfig,
+ inference_config: InferenceConfig,
+ outputs_config: OutputsConfig,
+ logging_config: AppLoggingConfig,
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+ audio_loader: AudioLoader,
+ preprocessor: PreprocessorProtocol,
+ postprocessor: PostprocessorProtocol,
+ evaluator: EvaluatorProtocol,
+ formatter: OutputFormatterProtocol,
+ output_transform: OutputTransformProtocol,
+ model: ModelProtocol,
+ ):
+ """Create a fully configured API instance.
+
+ This initializer is mainly for internal use.
+ In most cases, users should create the API with
+ :meth:`from_checkpoint` or :meth:`from_config`.
+
+ Parameters
+ ----------
+ model_config : ModelConfig
+ Model configuration.
+ audio_config : AudioConfig
+ Audio loading configuration.
+ train_config : TrainingConfig
+ Training configuration.
+ evaluation_config : EvaluationConfig
+ Evaluation configuration.
+ inference_config : InferenceConfig
+ Inference configuration.
+ outputs_config : OutputsConfig
+ Output formatting configuration.
+ logging_config : AppLoggingConfig
+ Logging configuration.
+ targets : TargetProtocol
+ Target definition used by the model.
+ roi_mapper : ROIMapperProtocol
+ ROI mapping used for size targets.
+ audio_loader : AudioLoader
+ Audio loader.
+ preprocessor : PreprocessorProtocol
+ Preprocessor used before the detector.
+ postprocessor : PostprocessorProtocol
+ Postprocessor used after the detector.
+ evaluator : EvaluatorProtocol
+ Evaluator used for metrics.
+ formatter : OutputFormatterProtocol
+ Default formatter used to save predictions.
+ output_transform : OutputTransformProtocol
+ Transform that converts model outputs into detections.
+ model : ModelProtocol
+ Model instance.
+ """
+ self.model_config = model_config
+ self.audio_config = audio_config
+ self.train_config = train_config
+ self.evaluation_config = evaluation_config
+ self.inference_config = inference_config
+ self.outputs_config = outputs_config
+ self.logging_config = logging_config
+ self.targets = targets
+ self.roi_mapper = roi_mapper
+ self.audio_loader = audio_loader
+ self.preprocessor = preprocessor
+ self.postprocessor = postprocessor
+ self.evaluator = evaluator
+ self.model = model
+ self.formatter = formatter
+ self.output_transform = output_transform
+
+ self.model.eval()
+
+ def load_annotations(
+ self,
+ path: data.PathLike,
+ base_dir: data.PathLike | None = None,
+ ) -> Dataset:
+ """Load a set of annotations from a dataset config file.
+
+ Parameters
+ ----------
+ path : data.PathLike
+ Path to the dataset config file.
+ base_dir : data.PathLike | None, optional
+ Base directory used to resolve relative paths in the dataset
+ config.
+
+ Returns
+ -------
+ Dataset
+ Loaded dataset of annotations.
+ """
+ from batdetect2.data import load_dataset_from_config
+
+ return load_dataset_from_config(path, base_dir=base_dir)
+
+ def train(
+ self,
+ train_annotations: Sequence[data.ClipAnnotation],
+ val_annotations: Sequence[data.ClipAnnotation] | None = None,
+ train_workers: int = 0,
+ val_workers: int = 0,
+ checkpoint_dir: Path | None = DEFAULT_CHECKPOINT_DIR,
+ log_dir: Path | None = DEFAULT_LOGS_DIR,
+ experiment_name: str | None = None,
+ num_epochs: int | None = None,
+ run_name: str | None = None,
+ seed: int | None = None,
+ audio_config: AudioConfig | None = None,
+ train_config: TrainingConfig | None = None,
+ logger_config: LoggerConfig | None = None,
+ logging_callbacks: Sequence[LoggingCallback[TrainLoggingContext]] = (),
+ ):
+ """Train the current model on a set of annotations.
+
+ Parameters
+ ----------
+ train_annotations : Sequence[data.ClipAnnotation]
+ Training annotations.
+ val_annotations : Sequence[data.ClipAnnotation] | None, optional
+ Validation annotations. If omitted, training runs without a
+ validation set.
+ train_workers : int, optional
+ Number of worker processes for training data loading.
+ val_workers : int, optional
+ Number of worker processes for validation data loading.
+ checkpoint_dir : Path | None, optional
+ Directory where checkpoints are saved.
+ log_dir : Path | None, optional
+ Directory where logs are written.
+ experiment_name : str | None, optional
+ Experiment name used by the configured logger.
+ num_epochs : int | None, optional
+ Maximum number of training epochs.
+ run_name : str | None, optional
+ Run name used by the configured logger.
+ seed : int | None, optional
+ Random seed for reproducibility.
+ audio_config : AudioConfig | None, optional
+ Audio config override.
+ train_config : TrainingConfig | None, optional
+ Training config override.
+ logger_config : LoggerConfig | None, optional
+ Training logger config override.
+ logging_callbacks : Sequence[LoggingCallback[TrainLoggingContext]], optional
+ Extra logging callbacks to run during training setup.
+
+ Returns
+ -------
+ BatDetect2API
+ This API instance with the trained model.
+ """
+ from batdetect2.train import run_train
+
+ self.model.train()
+ run_train(
+ train_annotations=train_annotations,
+ val_annotations=val_annotations,
+ model=self.model,
+ targets=self.targets,
+ roi_mapper=self.roi_mapper,
+ audio_loader=self.audio_loader,
+ preprocessor=self.preprocessor,
+ train_workers=train_workers,
+ val_workers=val_workers,
+ checkpoint_dir=checkpoint_dir,
+ log_dir=log_dir,
+ num_epochs=num_epochs,
+ experiment_name=experiment_name,
+ run_name=run_name,
+ seed=seed,
+ train_config=train_config or self.train_config,
+ audio_config=audio_config or self.audio_config,
+ logger_config=logger_config or self.logging_config.train,
+ logging_callbacks=logging_callbacks,
+ )
+ self.model.eval()
+ return self
+
+ def finetune(
+ self,
+ train_annotations: Sequence[data.ClipAnnotation],
+ targets_config: TargetConfig,
+ val_annotations: Sequence[data.ClipAnnotation] | None = None,
+ trainable: Literal[
+ "all", "heads", "classifier_head", "size_head"
+ ] = "heads",
+ train_workers: int = 0,
+ val_workers: int = 0,
+ checkpoint_dir: Path | None = DEFAULT_CHECKPOINT_DIR,
+ log_dir: Path | None = DEFAULT_LOGS_DIR,
+ experiment_name: str | None = None,
+ num_epochs: int | None = None,
+ run_name: str | None = None,
+ seed: int | None = None,
+ audio_config: AudioConfig | None = None,
+ train_config: TrainingConfig | None = None,
+ logger_config: LoggerConfig | None = None,
+ logging_callbacks: Sequence[LoggingCallback[TrainLoggingContext]] = (),
+ ) -> "BatDetect2API":
+ """Fine-tune the current model for new target sounds.
+
+ Use this when you want to keep the existing model weights but change
+ the target sounds. You can fine-tune the whole model or just the
+ heads.
+
+ Parameters
+ ----------
+ train_annotations : Sequence[data.ClipAnnotation]
+ Training annotations.
+ targets_config : TargetConfig
+ Target definition to train against.
+ val_annotations : Sequence[data.ClipAnnotation] | None, optional
+ Validation annotations.
+ trainable : {"all", "heads", "classifier_head", "size_head"}, optional
+ Which model parameters remain trainable.
+ train_workers : int, optional
+ Number of worker processes for training data loading.
+ val_workers : int, optional
+ Number of worker processes for validation data loading.
+ checkpoint_dir : Path | None, optional
+ Directory where checkpoints are saved.
+ log_dir : Path | None, optional
+ Directory where logs are written.
+ experiment_name : str | None, optional
+ Experiment name used by the configured logger.
+ num_epochs : int | None, optional
+ Maximum number of training epochs.
+ run_name : str | None, optional
+ Run name used by the configured logger.
+ seed : int | None, optional
+ Random seed for reproducibility.
+ audio_config : AudioConfig | None, optional
+ Audio config override.
+ train_config : TrainingConfig | None, optional
+ Training config override.
+ logger_config : LoggerConfig | None, optional
+ Training logger config override.
+ logging_callbacks : Sequence[LoggingCallback[TrainLoggingContext]], optional
+ Extra logging callbacks to run during training setup.
+
+ Returns
+ -------
+ BatDetect2API
+ A new API instance configured for the new targets.
+ """
+ from batdetect2.evaluate import build_evaluator
+ from batdetect2.models import build_model_with_new_targets
+ from batdetect2.outputs import (
+ build_output_formatter,
+ build_output_transform,
+ )
+ from batdetect2.targets import (
+ TargetConfig,
+ build_roi_mapping,
+ build_targets,
+ )
+ from batdetect2.train import run_train
+
+ target_config = TargetConfig.model_validate(targets_config)
+ targets = build_targets(config=target_config)
+ roi_mapper = build_roi_mapping(config=target_config.roi)
+ model = build_model_with_new_targets(
+ model=self.model,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+ output_transform = build_output_transform(
+ config=self.outputs_config.transform,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+ api = BatDetect2API(
+ model_config=self.model_config,
+ audio_config=audio_config or self.audio_config,
+ train_config=train_config or self.train_config,
+ evaluation_config=self.evaluation_config,
+ inference_config=self.inference_config,
+ outputs_config=self.outputs_config,
+ logging_config=self.logging_config,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ audio_loader=self.audio_loader,
+ preprocessor=self.preprocessor,
+ postprocessor=self.postprocessor,
+ evaluator=build_evaluator(
+ config=self.evaluation_config,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ transform=output_transform,
+ ),
+ formatter=build_output_formatter(
+ targets,
+ config=self.outputs_config.format,
+ ),
+ output_transform=output_transform,
+ model=model,
+ )
+
+ api._set_trainable_parameters(trainable)
+ api.model.train()
+
+ run_train(
+ train_annotations=train_annotations,
+ val_annotations=val_annotations,
+ model=api.model,
+ targets=api.targets,
+ roi_mapper=api.roi_mapper,
+ preprocessor=api.preprocessor,
+ audio_loader=api.audio_loader,
+ train_workers=train_workers,
+ val_workers=val_workers,
+ checkpoint_dir=checkpoint_dir,
+ log_dir=log_dir,
+ experiment_name=experiment_name,
+ num_epochs=num_epochs,
+ run_name=run_name,
+ seed=seed,
+ audio_config=api.audio_config,
+ train_config=api.train_config,
+ logger_config=logger_config or api.logging_config.train,
+ logging_callbacks=logging_callbacks,
+ )
+ api.model.eval()
+ return api
+
+ def evaluate(
+ self,
+ test_annotations: Sequence[data.ClipAnnotation],
+ num_workers: int = 0,
+ output_dir: data.PathLike = DEFAULT_EVAL_DIR,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+ save_predictions: bool = True,
+ audio_config: AudioConfig | None = None,
+ evaluation_config: EvaluationConfig | None = None,
+ outputs_config: OutputsConfig | None = None,
+ logger_config: LoggerConfig | None = None,
+ ) -> tuple[dict[str, float], list[ClipDetections]]:
+ """Evaluate the current model on a labelled dataset.
+
+ Parameters
+ ----------
+ test_annotations : Sequence[data.ClipAnnotation]
+ Labelled clips used for evaluation.
+ num_workers : int, optional
+ Number of worker processes for dataset loading.
+ output_dir : data.PathLike, optional
+ Directory where metrics and plots are written.
+ experiment_name : str | None, optional
+ Experiment name used by the configured logger.
+ run_name : str | None, optional
+ Run name used by the configured logger.
+ save_predictions : bool, optional
+ If ``True``, save formatted predictions alongside metrics.
+ audio_config : AudioConfig | None, optional
+ Audio config override.
+ evaluation_config : EvaluationConfig | None, optional
+ Evaluation config override.
+ outputs_config : OutputsConfig | None, optional
+ Output config override.
+ logger_config : LoggerConfig | None, optional
+ Evaluation logger config override.
+
+ Returns
+ -------
+ tuple[dict[str, float], list[ClipDetections]]
+ Evaluation metrics and per-clip predictions.
+ """
+ from batdetect2.evaluate import run_evaluate
+
+ return run_evaluate(
+ self.model,
+ test_annotations,
+ targets=self.targets,
+ roi_mapper=self.roi_mapper,
+ audio_loader=self.audio_loader,
+ preprocessor=self.preprocessor,
+ audio_config=audio_config or self.audio_config,
+ evaluation_config=evaluation_config or self.evaluation_config,
+ output_config=outputs_config or self.outputs_config,
+ logger_config=logger_config or self.logging_config.evaluation,
+ num_workers=num_workers,
+ output_dir=output_dir,
+ experiment_name=experiment_name,
+ run_name=run_name,
+ formatter=self.formatter if save_predictions else None,
+ )
+
+ def evaluate_predictions(
+ self,
+ annotations: Sequence[data.ClipAnnotation],
+ predictions: Sequence[ClipDetections],
+ output_dir: data.PathLike | None = None,
+ ):
+ """Evaluate an existing set of predictions.
+
+ Parameters
+ ----------
+ annotations : Sequence[data.ClipAnnotation]
+ Reference annotations.
+ predictions : Sequence[ClipDetections]
+ Predictions to compare against the annotations.
+ output_dir : data.PathLike | None, optional
+ Directory where metrics and plots are written.
+
+ Returns
+ -------
+ dict[str, float]
+ Computed evaluation metrics.
+ """
+ from batdetect2.evaluate import save_evaluation_results
+
+ clip_evals = self.evaluator.evaluate(
+ annotations,
+ predictions,
+ )
+
+ metrics = self.evaluator.compute_metrics(clip_evals)
+
+ if output_dir is not None:
+ save_evaluation_results(
+ metrics=metrics,
+ plots=self.evaluator.generate_plots(clip_evals),
+ output_dir=output_dir,
+ )
+
+ return metrics
+
+ def load_audio(self, path: data.PathLike) -> np.ndarray:
+ """Load one audio file into a waveform array.
+
+ Parameters
+ ----------
+ path : data.PathLike
+ Path to the audio file.
+
+ Returns
+ -------
+ np.ndarray
+ Audio waveform loaded from disk.
+ """
+ return self.audio_loader.load_file(path)
+
+ def load_recording(self, recording: data.Recording) -> np.ndarray:
+ """Load one recording object into a waveform array.
+
+ Parameters
+ ----------
+ recording : data.Recording
+ Recording object describing the audio to load.
+
+ Returns
+ -------
+ np.ndarray
+ Audio waveform for the requested recording.
+ """
+ return self.audio_loader.load_recording(recording)
+
+ def load_clip(self, clip: data.Clip) -> np.ndarray:
+ """Load one clip object into a waveform array.
+
+ Parameters
+ ----------
+ clip : data.Clip
+ Clip object describing the section of audio to load.
+
+ Returns
+ -------
+ np.ndarray
+ Audio waveform for the requested clip.
+ """
+ return self.audio_loader.load_clip(clip)
+
+ def get_top_class_name(self, detection: Detection) -> str:
+ """Get the name of the highest-confidence class for one detection.
+
+ Parameters
+ ----------
+ detection : Detection
+ Detection whose class scores will be inspected.
+
+ Returns
+ -------
+ str
+ Class name with the highest score.
+ """
+
+ import numpy as np
+
+ top_index = int(np.argmax(detection.class_scores))
+ return self.targets.class_names[top_index]
+
+ def get_class_scores(
+ self,
+ detection: Detection,
+ *,
+ include_top_class: bool = True,
+ sort_descending: bool = True,
+ ) -> list[tuple[str, float]]:
+ """Get class scores as ``(class_name, score)`` pairs.
+
+ Parameters
+ ----------
+ detection : Detection
+ Detection whose class scores will be returned.
+ include_top_class : bool, optional
+ If ``False``, omit the highest-scoring class from the result.
+ sort_descending : bool, optional
+ If ``True``, sort scores from highest to lowest.
+
+ Returns
+ -------
+ list[tuple[str, float]]
+ Class-score pairs for the detection.
+ """
+
+ scores = [
+ (class_name, float(score))
+ for class_name, score in zip(
+ self.targets.class_names,
+ detection.class_scores,
+ strict=True,
+ )
+ ]
+
+ if sort_descending:
+ scores.sort(key=lambda item: item[1], reverse=True)
+
+ if include_top_class:
+ return scores
+
+ top_class_name = self.get_top_class_name(detection)
+ return [
+ (class_name, score)
+ for class_name, score in scores
+ if class_name != top_class_name
+ ]
+
+ def generate_spectrogram(
+ self,
+ audio: np.ndarray,
+ ) -> torch.Tensor:
+ """Convert a waveform array into a spectrogram tensor.
+
+ Parameters
+ ----------
+ audio : np.ndarray
+ Audio waveform.
+
+ Returns
+ -------
+ torch.Tensor
+ Spectrogram tensor ready for model inference.
+ """
+ import torch
+
+ tensor = torch.tensor(audio).unsqueeze(0)
+ return self.preprocessor(tensor)
+
+ def process_file(
+ self,
+ audio_file: data.PathLike,
+ batch_size: int | None = None,
+ detection_threshold: float | None = None,
+ ) -> ClipDetections:
+ """Run inference on one audio file.
+
+ Parameters
+ ----------
+ audio_file : data.PathLike
+ Path to the audio file.
+ batch_size : int | None, optional
+ Batch size override. If omitted, the inference config value is
+ used.
+ detection_threshold : float | None, optional
+ Detection score threshold override.
+
+ Returns
+ -------
+ ClipDetections
+ Predictions for the full recording.
+ """
+ from soundevent import data
+
+ from batdetect2.postprocess import ClipDetections
+
+ recording = data.Recording.from_file(audio_file, compute_hash=False)
+
+ predictions = self.process_files(
+ [audio_file],
+ batch_size=(
+ batch_size
+ if batch_size is not None
+ else self.inference_config.loader.batch_size
+ ),
+ detection_threshold=detection_threshold,
+ )
+ detections = [
+ detection
+ for prediction in predictions
+ for detection in prediction.detections
+ ]
+
+ return ClipDetections(
+ clip=data.Clip(
+ uuid=recording.uuid,
+ recording=recording,
+ start_time=0,
+ end_time=recording.duration,
+ ),
+ detections=detections,
+ )
+
+ def process_audio(
+ self,
+ audio: np.ndarray,
+ detection_threshold: float | None = None,
+ ) -> list[Detection]:
+ """Run inference on a waveform array.
+
+ Parameters
+ ----------
+ audio : np.ndarray
+ Audio waveform.
+ detection_threshold : float | None, optional
+ Detection score threshold override.
+
+ Returns
+ -------
+ list[Detection]
+ Detected calls.
+ """
+ spec = self.generate_spectrogram(audio)
+ return self.process_spectrogram(
+ spec,
+ detection_threshold=detection_threshold,
+ )
+
+ def process_spectrogram(
+ self,
+ spec: torch.Tensor,
+ start_time: float = 0,
+ detection_threshold: float | None = None,
+ ) -> list[Detection]:
+ """Run inference on one spectrogram tensor.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Spectrogram tensor for one recording or clip.
+ start_time : float, optional
+ Start time in seconds used when creating detections.
+ detection_threshold : float | None, optional
+ Detection score threshold override.
+
+ Returns
+ -------
+ list[Detection]
+ Detected calls.
+
+ Raises
+ ------
+ ValueError
+ If a batched spectrogram with more than one item is provided.
+ """
+ if spec.ndim == 4 and spec.shape[0] > 1:
+ raise ValueError("Batched spectrograms not supported.")
+
+ if spec.ndim == 3:
+ spec = spec.unsqueeze(0)
+
+ outputs = self.model.detector(spec)
+
+ detections = self.postprocessor(
+ outputs,
+ detection_threshold=detection_threshold,
+ )[0]
+ return self.output_transform.to_detections(
+ detections=detections,
+ start_time=start_time,
+ )
+
+ def process_directory(
+ self,
+ audio_dir: data.PathLike,
+ detection_threshold: float | None = None,
+ ) -> list[ClipDetections]:
+ """Run inference on all supported audio files in a directory.
+
+ Parameters
+ ----------
+ audio_dir : data.PathLike
+ Directory containing audio files.
+ detection_threshold : float | None, optional
+ Detection score threshold override.
+
+ Returns
+ -------
+ list[ClipDetections]
+ Predictions for all supported audio files found in the directory.
+ """
+ from soundevent.audio.files import get_audio_files
+
+ files = list(get_audio_files(audio_dir))
+ return self.process_files(
+ files,
+ detection_threshold=detection_threshold,
+ )
+
+ def process_files(
+ self,
+ audio_files: Sequence[data.PathLike],
+ batch_size: int | None = None,
+ num_workers: int = 0,
+ audio_config: AudioConfig | None = None,
+ inference_config: InferenceConfig | None = None,
+ output_config: OutputsConfig | None = None,
+ detection_threshold: float | None = None,
+ ) -> list[ClipDetections]:
+ """Run inference on multiple audio files.
+
+ Parameters
+ ----------
+ audio_files : Sequence[data.PathLike]
+ Audio file paths.
+ batch_size : int | None, optional
+ Batch size override.
+ num_workers : int, optional
+ Number of worker processes for audio loading.
+ audio_config : AudioConfig | None, optional
+ Audio config override.
+ inference_config : InferenceConfig | None, optional
+ Inference config override.
+ output_config : OutputsConfig | None, optional
+ Output config override.
+ detection_threshold : float | None, optional
+ Detection score threshold override.
+
+ Returns
+ -------
+ list[ClipDetections]
+ Predictions for each input file.
+ """
+ from batdetect2.inference import process_file_list
+
+ return process_file_list(
+ self.model,
+ audio_files,
+ targets=self.targets,
+ roi_mapper=self.roi_mapper,
+ audio_loader=self.audio_loader,
+ preprocessor=self.preprocessor,
+ output_transform=self.output_transform,
+ batch_size=batch_size,
+ num_workers=num_workers,
+ audio_config=audio_config or self.audio_config,
+ inference_config=inference_config or self.inference_config,
+ output_config=output_config or self.outputs_config,
+ detection_threshold=detection_threshold,
+ )
+
+ def process_clips(
+ self,
+ clips: Sequence[data.Clip],
+ batch_size: int | None = None,
+ num_workers: int = 0,
+ audio_config: AudioConfig | None = None,
+ inference_config: InferenceConfig | None = None,
+ output_config: OutputsConfig | None = None,
+ detection_threshold: float | None = None,
+ ) -> list[ClipDetections]:
+ """Run inference on multiple clip objects.
+
+ Parameters
+ ----------
+ clips : Sequence[data.Clip]
+ Clips to process.
+ batch_size : int | None, optional
+ Batch size override.
+ num_workers : int, optional
+ Number of worker processes for audio loading.
+ audio_config : AudioConfig | None, optional
+ Audio config override.
+ inference_config : InferenceConfig | None, optional
+ Inference config override.
+ output_config : OutputsConfig | None, optional
+ Output config override.
+ detection_threshold : float | None, optional
+ Detection score threshold override.
+
+ Returns
+ -------
+ list[ClipDetections]
+ Predictions for each input clip.
+ """
+ from batdetect2.inference import run_batch_inference
+
+ return run_batch_inference(
+ self.model,
+ clips,
+ targets=self.targets,
+ roi_mapper=self.roi_mapper,
+ audio_loader=self.audio_loader,
+ preprocessor=self.preprocessor,
+ output_transform=self.output_transform,
+ batch_size=batch_size,
+ num_workers=num_workers,
+ audio_config=audio_config or self.audio_config,
+ inference_config=inference_config or self.inference_config,
+ output_config=output_config or self.outputs_config,
+ detection_threshold=detection_threshold,
+ )
+
+ def save_predictions(
+ self,
+ predictions: Sequence[ClipDetections],
+ path: data.PathLike,
+ audio_dir: data.PathLike | None = None,
+ format: str | None = None,
+ config: OutputFormatConfig | None = None,
+ ):
+ """Save predictions to disk in one of the supported output formats.
+
+ Parameters
+ ----------
+ predictions : Sequence[ClipDetections]
+ Predictions to save.
+ path : data.PathLike
+ Output file or directory path, depending on the selected format.
+ audio_dir : data.PathLike | None, optional
+ Audio root directory used when writing relative paths.
+ format : str | None, optional
+ Output format name override.
+ config : OutputFormatConfig | None, optional
+ Output format config override.
+ """
+ from batdetect2.outputs import get_output_formatter
+
+ formatter = self.formatter
+
+ if format is not None or config is not None:
+ format = format or config.name # type: ignore
+ formatter = get_output_formatter(
+ name=format,
+ targets=self.targets,
+ config=config,
+ )
+
+ outs = formatter.format(predictions)
+ formatter.save(outs, audio_dir=audio_dir, path=path)
+
+ def load_predictions(
+ self,
+ path: data.PathLike,
+ format: str | None = None,
+ config: OutputFormatConfig | None = None,
+ ) -> list[object]:
+ """Load predictions from disk.
+
+ Parameters
+ ----------
+ path : data.PathLike
+ Path to a saved prediction file or directory.
+ format : str | None, optional
+ Output format name override.
+ config : OutputFormatConfig | None, optional
+ Output format config override.
+
+ Returns
+ -------
+ list[object]
+ Loaded prediction objects returned by the selected formatter.
+ """
+ from batdetect2.outputs import get_output_formatter
+
+ formatter = self.formatter
+
+ if format is not None or config is not None:
+ format = format or config.name # type: ignore
+ formatter = get_output_formatter(
+ name=format,
+ targets=self.targets,
+ config=config,
+ )
+
+ return formatter.load(path)
+
+ @classmethod
+ def from_config(
+ cls,
+ model_config: ModelConfig | None = None,
+ targets_config: TargetConfig | None = None,
+ audio_config: AudioConfig | None = None,
+ train_config: TrainingConfig | None = None,
+ evaluation_config: EvaluationConfig | None = None,
+ inference_config: InferenceConfig | None = None,
+ outputs_config: OutputsConfig | None = None,
+ logging_config: AppLoggingConfig | None = None,
+ ) -> "BatDetect2API":
+ """Build an API instance from config objects.
+
+ Use this when you want to create a new model without loading a saved
+ checkpoint.
+
+ Parameters
+ ----------
+ model_config : ModelConfig | None, optional
+ Model config. If omitted, the default model config is used.
+ targets_config : TargetConfig | None, optional
+ Target config. If omitted, the default target config is used.
+ audio_config : AudioConfig | None, optional
+ Audio config. If omitted, the default audio config is used.
+ train_config : TrainingConfig | None, optional
+ Training config. If omitted, the default training config is used.
+ evaluation_config : EvaluationConfig | None, optional
+ Evaluation config. If omitted, the default evaluation config is
+ used.
+ inference_config : InferenceConfig | None, optional
+ Inference config. If omitted, the default inference config is used.
+ outputs_config : OutputsConfig | None, optional
+ Output config. If omitted, the default outputs config is used.
+ logging_config : AppLoggingConfig | None, optional
+ Logging config. If omitted, the default logging config is used.
+
+ Returns
+ -------
+ BatDetect2API
+ Configured API instance.
+ """
+ from batdetect2.audio import AudioConfig, build_audio_loader
+ from batdetect2.evaluate import EvaluationConfig, build_evaluator
+ from batdetect2.inference import InferenceConfig
+ from batdetect2.logging import AppLoggingConfig
+ from batdetect2.models import ModelConfig, build_model
+ from batdetect2.outputs import (
+ OutputsConfig,
+ build_output_formatter,
+ build_output_transform,
+ )
+ from batdetect2.postprocess import build_postprocessor
+ from batdetect2.preprocess import build_preprocessor
+ from batdetect2.targets import (
+ TargetConfig,
+ build_roi_mapping,
+ build_targets,
+ )
+ from batdetect2.train import TrainingConfig
+
+ model_config = model_config or ModelConfig()
+ targets_config = targets_config or TargetConfig()
+ audio_config = audio_config or AudioConfig()
+ train_config = train_config or TrainingConfig()
+ evaluation_config = evaluation_config or EvaluationConfig()
+ inference_config = inference_config or InferenceConfig()
+ outputs_config = outputs_config or OutputsConfig()
+ logging_config = logging_config or AppLoggingConfig()
+
+ targets = build_targets(config=targets_config)
+ roi_mapper = build_roi_mapping(config=targets_config.roi)
+
+ audio_loader = build_audio_loader(config=audio_config)
+
+ preprocessor = build_preprocessor(
+ input_samplerate=audio_loader.samplerate,
+ config=model_config.preprocess,
+ )
+
+ postprocessor = build_postprocessor(
+ preprocessor,
+ config=model_config.postprocess,
+ )
+
+ formatter = build_output_formatter(
+ targets,
+ config=outputs_config.format,
+ )
+ output_transform = build_output_transform(
+ config=outputs_config.transform,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+
+ evaluator = build_evaluator(
+ config=evaluation_config,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ transform=output_transform,
+ )
+
+ # NOTE: Build separate instances of preprocessor and postprocessor
+ # to avoid device mismatch errors
+ model = build_model(
+ config=model_config,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ preprocessor=build_preprocessor(
+ input_samplerate=audio_loader.samplerate,
+ config=model_config.preprocess,
+ ),
+ postprocessor=build_postprocessor(
+ preprocessor,
+ config=model_config.postprocess,
+ ),
+ )
+
+ return cls(
+ model_config=model_config,
+ audio_config=audio_config,
+ train_config=train_config,
+ evaluation_config=evaluation_config,
+ inference_config=inference_config,
+ outputs_config=outputs_config,
+ logging_config=logging_config,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ postprocessor=postprocessor,
+ evaluator=evaluator,
+ model=model,
+ formatter=formatter,
+ output_transform=output_transform,
+ )
+
+ @classmethod
+ def from_checkpoint(
+ cls,
+ path: data.PathLike | str | None = None,
+ audio_config: AudioConfig | None = None,
+ train_config: TrainingConfig | None = None,
+ evaluation_config: EvaluationConfig | None = None,
+ inference_config: InferenceConfig | None = None,
+ outputs_config: OutputsConfig | None = None,
+ logging_config: AppLoggingConfig | None = None,
+ ) -> "BatDetect2API":
+ """Build an API instance from a saved checkpoint.
+
+ Parameters
+ ----------
+ path : data.PathLike | str | None, optional
+ Checkpoint path, bundled checkpoint alias, or Hugging Face URI.
+ If omitted, the default bundled checkpoint is used.
+ audio_config : AudioConfig | None, optional
+ Audio config override.
+ train_config : TrainingConfig | None, optional
+ Training config override.
+ evaluation_config : EvaluationConfig | None, optional
+ Evaluation config override.
+ inference_config : InferenceConfig | None, optional
+ Inference config override.
+ outputs_config : OutputsConfig | None, optional
+ Output config override.
+ logging_config : AppLoggingConfig | None, optional
+ Logging config override.
+
+ Returns
+ -------
+ BatDetect2API
+ Configured API instance.
+ """
+ from batdetect2.audio import AudioConfig, build_audio_loader
+ from batdetect2.evaluate import EvaluationConfig, build_evaluator
+ from batdetect2.inference import InferenceConfig
+ from batdetect2.logging import AppLoggingConfig
+ from batdetect2.outputs import (
+ OutputsConfig,
+ build_output_formatter,
+ build_output_transform,
+ )
+ from batdetect2.postprocess import build_postprocessor
+ from batdetect2.preprocess import build_preprocessor
+ from batdetect2.targets import (
+ build_roi_mapping,
+ build_targets,
+ check_target_compatibility,
+ )
+ from batdetect2.train import load_model_from_checkpoint
+
+ model, configs = load_model_from_checkpoint(path)
+
+ model_config = configs.model
+ train_config = train_config or configs.train
+
+ audio_config = audio_config or AudioConfig(
+ samplerate=model_config.samplerate,
+ )
+ evaluation_config = evaluation_config or EvaluationConfig()
+ inference_config = inference_config or InferenceConfig()
+ outputs_config = outputs_config or OutputsConfig()
+ logging_config = logging_config or AppLoggingConfig()
+ targets_config = configs.targets
+
+ targets = build_targets(config=targets_config)
+ roi_mapper = build_roi_mapping(config=targets_config.roi)
+
+ if not check_target_compatibility(targets, model.class_names):
+ raise ValueError(
+ "Provided targets_config is incompatible with the "
+ "checkpoint model: missing one or more model classes."
+ )
+
+ if model.dimension_names != roi_mapper.dimension_names:
+ raise ValueError(
+ "Provided targets_config is incompatible with the "
+ "checkpoint model: mismatched dimension names."
+ )
+
+ audio_loader = build_audio_loader(config=audio_config)
+
+ preprocessor = build_preprocessor(
+ input_samplerate=audio_loader.samplerate,
+ config=model_config.preprocess,
+ )
+
+ postprocessor = build_postprocessor(
+ preprocessor,
+ config=model_config.postprocess,
+ )
+
+ formatter = build_output_formatter(
+ targets,
+ config=outputs_config.format,
+ )
+
+ output_transform = build_output_transform(
+ config=outputs_config.transform,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+
+ evaluator = build_evaluator(
+ config=evaluation_config,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ transform=output_transform,
+ )
+
+ return cls(
+ model_config=model_config,
+ audio_config=audio_config,
+ train_config=train_config,
+ evaluation_config=evaluation_config,
+ inference_config=inference_config,
+ outputs_config=outputs_config,
+ logging_config=logging_config,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ postprocessor=postprocessor,
+ evaluator=evaluator,
+ model=model,
+ formatter=formatter,
+ output_transform=output_transform,
+ )
+
+ def _set_trainable_parameters(
+ self,
+ trainable: Literal["all", "heads", "classifier_head", "size_head"],
+ ) -> None:
+ detector = self.model.detector
+
+ for parameter in detector.parameters():
+ parameter.requires_grad = False
+
+ if trainable == "all":
+ for parameter in detector.parameters():
+ parameter.requires_grad = True
+ return
+
+ if trainable in {"heads", "classifier_head"}:
+ for parameter in detector.classifier_head.parameters():
+ parameter.requires_grad = True
+
+ if trainable in {"heads", "size_head"}:
+ for parameter in detector.size_head.parameters():
+ parameter.requires_grad = True
diff --git a/src/batdetect2/audio/__init__.py b/src/batdetect2/audio/__init__.py
new file mode 100644
index 00000000..9c4c7e84
--- /dev/null
+++ b/src/batdetect2/audio/__init__.py
@@ -0,0 +1,19 @@
+from batdetect2.audio.clips import ClipConfig, build_clipper
+from batdetect2.audio.loader import (
+ TARGET_SAMPLERATE_HZ,
+ AudioConfig,
+ SoundEventAudioLoader,
+ build_audio_loader,
+)
+from batdetect2.audio.types import AudioLoader, ClipperProtocol
+
+__all__ = [
+ "AudioLoader",
+ "ClipperProtocol",
+ "TARGET_SAMPLERATE_HZ",
+ "AudioConfig",
+ "SoundEventAudioLoader",
+ "build_audio_loader",
+ "ClipConfig",
+ "build_clipper",
+]
diff --git a/src/batdetect2/audio/clips.py b/src/batdetect2/audio/clips.py
new file mode 100644
index 00000000..21390c67
--- /dev/null
+++ b/src/batdetect2/audio/clips.py
@@ -0,0 +1,277 @@
+from typing import Annotated, List, Literal
+
+import numpy as np
+from loguru import logger
+from pydantic import Field
+from soundevent import data
+from soundevent.geometry import compute_bounds, intervals_overlap
+
+from batdetect2.audio.types import ClipperProtocol
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+
+DEFAULT_TRAIN_CLIP_DURATION = 0.256
+DEFAULT_MAX_EMPTY_CLIP = 0.1
+
+
+__all__ = [
+ "build_clipper",
+ "ClipConfig",
+ "ClipperImportConfig",
+]
+
+
+clipper_registry: Registry[ClipperProtocol, []] = Registry("clipper")
+
+
+@add_import_config(clipper_registry)
+class ClipperImportConfig(ImportConfig):
+ """Use any callable as a clipper.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class RandomClipConfig(BaseConfig):
+ name: Literal["random_subclip"] = "random_subclip"
+ duration: float = DEFAULT_TRAIN_CLIP_DURATION
+ random: bool = True
+ max_empty: float = DEFAULT_MAX_EMPTY_CLIP
+ min_sound_event_overlap: float = 0
+
+
+class RandomClip:
+ def __init__(
+ self,
+ duration: float = 0.5,
+ max_empty: float = 0.2,
+ random: bool = True,
+ min_sound_event_overlap: float = 0,
+ ):
+ super().__init__()
+ self.duration = duration
+ self.random = random
+ self.max_empty = max_empty
+ self.min_sound_event_overlap = min_sound_event_overlap
+
+ def __call__(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ ) -> data.ClipAnnotation:
+ subclip = self.get_subclip(clip_annotation.clip)
+ sound_events = select_sound_event_annotations(
+ clip_annotation,
+ subclip,
+ min_overlap=self.min_sound_event_overlap,
+ )
+ return clip_annotation.model_copy(
+ update=dict(
+ clip=subclip,
+ sound_events=sound_events,
+ )
+ )
+
+ def get_subclip(self, clip: data.Clip) -> data.Clip:
+ return select_random_subclip(
+ clip,
+ random=self.random,
+ duration=self.duration,
+ max_empty=self.max_empty,
+ )
+
+ @clipper_registry.register(RandomClipConfig)
+ @staticmethod
+ def from_config(config: RandomClipConfig):
+ return RandomClip(
+ duration=config.duration,
+ max_empty=config.max_empty,
+ min_sound_event_overlap=config.min_sound_event_overlap,
+ )
+
+
+def get_subclip_annotation(
+ clip_annotation: data.ClipAnnotation,
+ random: bool = True,
+ duration: float = 0.5,
+ max_empty: float = 0.2,
+ min_sound_event_overlap: float = 0,
+) -> data.ClipAnnotation:
+ clip = clip_annotation.clip
+
+ subclip = select_random_subclip(
+ clip,
+ random=random,
+ duration=duration,
+ max_empty=max_empty,
+ )
+
+ sound_events = select_sound_event_annotations(
+ clip_annotation,
+ subclip,
+ min_overlap=min_sound_event_overlap,
+ )
+
+ return clip_annotation.model_copy(
+ update=dict(
+ clip=subclip,
+ sound_events=sound_events,
+ )
+ )
+
+
+def select_random_subclip(
+ clip: data.Clip,
+ random: bool = True,
+ duration: float = 0.5,
+ max_empty: float = 0.2,
+) -> data.Clip:
+ start_time = clip.start_time
+ end_time = clip.end_time
+
+ if duration > clip.duration + max_empty or not random:
+ return clip.model_copy(
+ update=dict(
+ start_time=start_time,
+ end_time=start_time + duration,
+ )
+ )
+
+ random_start_time = np.random.uniform(
+ low=start_time,
+ high=end_time + max_empty - duration,
+ )
+
+ return clip.model_copy(
+ update=dict(
+ start_time=random_start_time,
+ end_time=random_start_time + duration,
+ )
+ )
+
+
+def select_sound_event_annotations(
+ clip_annotation: data.ClipAnnotation,
+ subclip: data.Clip,
+ min_overlap: float = 0,
+) -> List[data.SoundEventAnnotation]:
+ selected = []
+
+ start_time = subclip.start_time
+ end_time = subclip.end_time
+
+ for sound_event_annotation in clip_annotation.sound_events:
+ geometry = sound_event_annotation.sound_event.geometry
+
+ if geometry is None:
+ continue
+
+ geom_start_time, _, geom_end_time, _ = compute_bounds(geometry)
+
+ if not intervals_overlap(
+ (start_time, end_time),
+ (geom_start_time, geom_end_time),
+ min_absolute_overlap=min_overlap,
+ ):
+ continue
+
+ selected.append(sound_event_annotation)
+
+ return selected
+
+
+class PaddedClipConfig(BaseConfig):
+ name: Literal["whole_audio_padded"] = "whole_audio_padded"
+ chunk_size: float = DEFAULT_TRAIN_CLIP_DURATION
+
+
+class PaddedClip:
+ def __init__(self, chunk_size: float = DEFAULT_TRAIN_CLIP_DURATION):
+ self.chunk_size = chunk_size
+
+ def __call__(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ ) -> data.ClipAnnotation:
+ clip = clip_annotation.clip
+ clip = self.get_subclip(clip)
+ return clip_annotation.model_copy(update=dict(clip=clip))
+
+ def get_subclip(self, clip: data.Clip) -> data.Clip:
+ duration = clip.duration
+
+ target_duration = float(
+ self.chunk_size * np.ceil(duration / self.chunk_size)
+ )
+ clip = clip.model_copy(
+ update=dict(
+ end_time=clip.start_time + target_duration,
+ )
+ )
+ return clip
+
+ @clipper_registry.register(PaddedClipConfig)
+ @staticmethod
+ def from_config(config: PaddedClipConfig):
+ return PaddedClip(chunk_size=config.chunk_size)
+
+
+class FixedDurationClipConfig(BaseConfig):
+ name: Literal["fixed_duration"] = "fixed_duration"
+ duration: float = DEFAULT_TRAIN_CLIP_DURATION
+
+
+class FixedDurationClip:
+ def __init__(self, duration: float = DEFAULT_TRAIN_CLIP_DURATION):
+ self.duration = duration
+
+ def __call__(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ ) -> data.ClipAnnotation:
+ clip = self.get_subclip(clip_annotation.clip)
+ sound_events = select_sound_event_annotations(
+ clip_annotation,
+ clip,
+ min_overlap=0,
+ )
+ return clip_annotation.model_copy(
+ update=dict(
+ clip=clip,
+ sound_events=sound_events,
+ )
+ )
+
+ def get_subclip(self, clip: data.Clip) -> data.Clip:
+ return clip.model_copy(
+ update=dict(
+ end_time=clip.start_time + self.duration,
+ )
+ )
+
+ @clipper_registry.register(FixedDurationClipConfig)
+ @staticmethod
+ def from_config(config: FixedDurationClipConfig):
+ return FixedDurationClip(duration=config.duration)
+
+
+ClipConfig = Annotated[
+ RandomClipConfig | PaddedClipConfig | FixedDurationClipConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_clipper(config: ClipConfig | None = None) -> ClipperProtocol:
+ config = config or RandomClipConfig()
+
+ logger.opt(lazy=True).debug(
+ "Building clipper with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+ return clipper_registry.build(config)
diff --git a/src/batdetect2/audio/loader.py b/src/batdetect2/audio/loader.py
new file mode 100644
index 00000000..60dc8a9f
--- /dev/null
+++ b/src/batdetect2/audio/loader.py
@@ -0,0 +1,316 @@
+import numpy as np
+from numpy.typing import DTypeLike
+from pydantic import Field
+from scipy.signal import resample, resample_poly
+from soundevent import audio, data
+from soundfile import LibsndfileError
+
+from batdetect2.audio.types import AudioLoader
+from batdetect2.core import BaseConfig
+
+__all__ = [
+ "SoundEventAudioLoader",
+ "build_audio_loader",
+ "load_file_audio",
+ "load_recording_audio",
+ "load_clip_audio",
+ "resample_audio",
+]
+
+TARGET_SAMPLERATE_HZ = 256_000
+"""Default target sample rate in Hz used if resampling is enabled."""
+
+
+class ResampleConfig(BaseConfig):
+ """Configuration for audio resampling.
+
+ Attributes
+ ----------
+ enabled : bool, default=True
+ Whether to resample the audio to the target sample rate. If
+ ``False``, the audio is returned at its original sample rate.
+ method : str, default="poly"
+ The resampling algorithm to use. Options:
+
+ - ``"poly"``: Polyphase resampling via
+ ``scipy.signal.resample_poly``. Generally fast and accurate.
+ - ``"fourier"``: FFT-based resampling via
+ ``scipy.signal.resample``. May be preferred for non-integer
+ resampling ratios.
+ """
+
+ enabled: bool = True
+ method: str = "poly"
+
+
+class AudioConfig(BaseConfig):
+ """Configuration for loading and initial audio preprocessing."""
+
+ samplerate: int = Field(default=TARGET_SAMPLERATE_HZ, gt=0)
+ resample: ResampleConfig = Field(default_factory=ResampleConfig)
+
+
+def build_audio_loader(config: AudioConfig | None = None) -> AudioLoader:
+ """Factory function to create an AudioLoader based on configuration."""
+ config = config or AudioConfig()
+ return SoundEventAudioLoader(
+ samplerate=config.samplerate,
+ config=config.resample,
+ )
+
+
+class SoundEventAudioLoader(AudioLoader):
+ """Concrete implementation of the `AudioLoader`."""
+
+ def __init__(
+ self,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+ config: ResampleConfig | None = None,
+ ):
+ self.samplerate = samplerate
+ self.config = config or ResampleConfig()
+
+ def load_file(
+ self,
+ path: data.PathLike,
+ audio_dir: data.PathLike | None = None,
+ ) -> np.ndarray:
+ """Load and preprocess audio directly from a file path."""
+ return load_file_audio(
+ path,
+ samplerate=self.samplerate,
+ config=self.config,
+ audio_dir=audio_dir,
+ )
+
+ def load_recording(
+ self,
+ recording: data.Recording,
+ audio_dir: data.PathLike | None = None,
+ ) -> np.ndarray:
+ """Load and preprocess the entire audio for a Recording object."""
+ return load_recording_audio(
+ recording,
+ samplerate=self.samplerate,
+ config=self.config,
+ audio_dir=audio_dir,
+ )
+
+ def load_clip(
+ self,
+ clip: data.Clip,
+ audio_dir: data.PathLike | None = None,
+ ) -> np.ndarray:
+ """Load and preprocess the audio segment defined by a Clip object."""
+ return load_clip_audio(
+ clip,
+ samplerate=self.samplerate,
+ config=self.config,
+ audio_dir=audio_dir,
+ )
+
+
+def load_file_audio(
+ path: data.PathLike,
+ samplerate: int | None = None,
+ config: ResampleConfig | None = None,
+ audio_dir: data.PathLike | None = None,
+ dtype: DTypeLike = np.float32,
+) -> np.ndarray:
+ """Load and preprocess audio from a file path using specified config."""
+ try:
+ recording = data.Recording.from_file(path)
+ except LibsndfileError as e:
+ raise FileNotFoundError(
+ f"Could not load the recording at path: {path}. Error: {e}"
+ ) from e
+
+ return load_recording_audio(
+ recording,
+ samplerate=samplerate,
+ config=config,
+ dtype=dtype,
+ audio_dir=audio_dir,
+ )
+
+
+def load_recording_audio(
+ recording: data.Recording,
+ samplerate: int | None = None,
+ config: ResampleConfig | None = None,
+ audio_dir: data.PathLike | None = None,
+ dtype: DTypeLike = np.float32,
+) -> np.ndarray:
+ """Load and preprocess the entire audio content of a recording using config."""
+ clip = data.Clip(
+ recording=recording,
+ start_time=0,
+ end_time=recording.duration,
+ )
+ return load_clip_audio(
+ clip,
+ samplerate=samplerate,
+ config=config,
+ dtype=dtype,
+ audio_dir=audio_dir,
+ )
+
+
+def load_clip_audio(
+ clip: data.Clip,
+ samplerate: int | None = None,
+ config: ResampleConfig | None = None,
+ audio_dir: data.PathLike | None = None,
+ dtype: DTypeLike = np.float32,
+) -> np.ndarray:
+ """Load and preprocess a specific audio clip segment based on config."""
+ try:
+ wav = (
+ audio.load_clip(clip, audio_dir=audio_dir)
+ .sel(channel=0)
+ .astype(dtype)
+ )
+ except LibsndfileError as e:
+ raise FileNotFoundError(
+ f"Could not load the recording at path: {clip.recording.path}. "
+ f"Error: {e}"
+ ) from e
+
+ if not config or not config.enabled or samplerate is None:
+ return wav.data.astype(dtype)
+
+ sr = int(1 / wav.time.attrs["step"])
+ return resample_audio(
+ wav.data,
+ sr=sr,
+ samplerate=samplerate,
+ method=config.method,
+ )
+
+
+def resample_audio(
+ wav: np.ndarray,
+ sr: int,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+ method: str = "poly",
+) -> np.ndarray:
+ """Resample an audio waveform to a target sample rate.
+
+ Parameters
+ ----------
+ wav : np.ndarray
+ Input waveform array. The last axis is assumed to be time.
+ sr : int
+ Original sample rate of ``wav`` in Hz.
+ samplerate : int, default=256000
+ Target sample rate in Hz.
+ method : str, default="poly"
+ Resampling algorithm: ``"poly"`` (polyphase) or
+ ``"fourier"`` (FFT-based).
+
+ Returns
+ -------
+ np.ndarray
+ Resampled waveform. If ``sr == samplerate`` the input array is
+ returned unchanged.
+
+ Raises
+ ------
+ NotImplementedError
+ If ``method`` is not ``"poly"`` or ``"fourier"``.
+ """
+ if sr == samplerate:
+ return wav
+
+ if method == "poly":
+ return resample_audio_poly(
+ wav,
+ sr_orig=sr,
+ sr_new=samplerate,
+ )
+ elif method == "fourier":
+ return resample_audio_fourier(
+ wav,
+ sr_orig=sr,
+ sr_new=samplerate,
+ )
+ else:
+ raise NotImplementedError(
+ f"Resampling method '{method}' not implemented"
+ )
+
+
+def resample_audio_poly(
+ array: np.ndarray,
+ sr_orig: int,
+ sr_new: int,
+ axis: int = -1,
+) -> np.ndarray:
+ """Resample a numpy array using `scipy.signal.resample_poly`.
+
+ This method is often preferred for signals when the ratio of new
+ to old sample rates can be expressed as a rational number. It uses
+ polyphase filtering.
+
+ Parameters
+ ----------
+ array : np.ndarray
+ The input array to resample.
+ sr_orig : int
+ The original sample rate in Hz.
+ sr_new : int
+ The target sample rate in Hz.
+ axis : int, default=-1
+ The axis of `array` along which to resample.
+
+ Returns
+ -------
+ np.ndarray
+ The array resampled to the target sample rate.
+
+ Raises
+ ------
+ ValueError
+ If sample rates are not positive.
+ """
+ gcd = np.gcd(sr_orig, sr_new)
+ return resample_poly(
+ array,
+ sr_new // gcd,
+ sr_orig // gcd,
+ axis=axis,
+ )
+
+
+def resample_audio_fourier(
+ array: np.ndarray,
+ sr_orig: int,
+ sr_new: int,
+ axis: int = -1,
+) -> np.ndarray:
+ """Resample a numpy array using ``scipy.signal.resample``.
+
+ This method uses FFTs to resample the signal.
+
+ Parameters
+ ----------
+ array : np.ndarray
+ The input array to resample.
+ sr_orig : int
+ The original sample rate in Hz.
+ sr_new : int
+ The target sample rate in Hz.
+ axis : int, default=-1
+ The axis of ``array`` along which to resample.
+
+ Returns
+ -------
+ np.ndarray
+ The array resampled to the target sample rate.
+ """
+ ratio = sr_new / sr_orig
+ return resample(
+ array,
+ int(array.shape[axis] * ratio),
+ axis=axis,
+ )
diff --git a/src/batdetect2/audio/types.py b/src/batdetect2/audio/types.py
new file mode 100644
index 00000000..87d46f2a
--- /dev/null
+++ b/src/batdetect2/audio/types.py
@@ -0,0 +1,40 @@
+from typing import Protocol
+
+import numpy as np
+from soundevent import data
+
+__all__ = [
+ "AudioLoader",
+ "ClipperProtocol",
+]
+
+
+class AudioLoader(Protocol):
+ samplerate: int
+
+ def load_file(
+ self,
+ path: data.PathLike,
+ audio_dir: data.PathLike | None = None,
+ ) -> np.ndarray: ...
+
+ def load_recording(
+ self,
+ recording: data.Recording,
+ audio_dir: data.PathLike | None = None,
+ ) -> np.ndarray: ...
+
+ def load_clip(
+ self,
+ clip: data.Clip,
+ audio_dir: data.PathLike | None = None,
+ ) -> np.ndarray: ...
+
+
+class ClipperProtocol(Protocol):
+ def __call__(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ ) -> data.ClipAnnotation: ...
+
+ def get_subclip(self, clip: data.Clip) -> data.Clip: ...
diff --git a/src/batdetect2/cli/__init__.py b/src/batdetect2/cli/__init__.py
new file mode 100644
index 00000000..69d75d7c
--- /dev/null
+++ b/src/batdetect2/cli/__init__.py
@@ -0,0 +1,21 @@
+from batdetect2.cli.base import cli
+from batdetect2.cli.compat import detect
+from batdetect2.cli.data import data
+from batdetect2.cli.evaluate import evaluate_command
+from batdetect2.cli.finetune import finetune_command
+from batdetect2.cli.inference import process
+from batdetect2.cli.train import train_command
+
+__all__ = [
+ "cli",
+ "detect",
+ "data",
+ "train_command",
+ "finetune_command",
+ "evaluate_command",
+ "process",
+]
+
+
+if __name__ == "__main__":
+ cli()
diff --git a/src/batdetect2/cli/ascii.py b/src/batdetect2/cli/ascii.py
new file mode 100644
index 00000000..09c7a694
--- /dev/null
+++ b/src/batdetect2/cli/ascii.py
@@ -0,0 +1,22 @@
+BATDETECT_ASCII_ART = """ .
+ =#%: .%%#
+ :%%%: .%%%%.
+ %%%%.-===::%%%%*
+ =%%%%+++++++%%%#.
+ -: .%%%#====+++#%%# .-
+ .+***= . =++. : .=*+#%*= :***.
+ =+****+++==:%+#=+% *##%%%%*=##*#**-=
+ ++***+**+=: ##.. +##%%########**++
+ .++*****#*+- :*:++ ##%#%%%%%####**++
+ .++***+**++++- :#%%%%%####*##***+=
+ .+++***+#+++*########%%%##%#+*****++:
+ .=++++++*+++##%##%%####%%##*:+****+=
+ =++++++====*#%%#%###%%###- +***+++.
+ .+*++++= =+==##########= :****++.
+ =++*+:. .:=#####= .++**++-
+ .****: . -+**++=
+ *###= .****==
+ .#*#- **#*:
+ -### -*##.
+ +*= *#*
+"""
diff --git a/src/batdetect2/cli/base.py b/src/batdetect2/cli/base.py
new file mode 100644
index 00000000..9a991e53
--- /dev/null
+++ b/src/batdetect2/cli/base.py
@@ -0,0 +1,40 @@
+"""BatDetect2 command line interface."""
+
+import click
+
+from batdetect2.cli.ascii import BATDETECT_ASCII_ART
+
+__all__ = [
+ "cli",
+]
+
+
+INFO_STR = """
+BatDetect2
+ Wrap paths that contain spaces in quotes.
+"""
+
+
+@click.group(invoke_without_command=True)
+@click.option(
+ "-v",
+ "--verbose",
+ count=True,
+ help="Increase verbosity. -v for INFO, -vv for DEBUG.",
+)
+@click.pass_context
+def cli(ctx: click.Context, verbose: int = 0):
+ """Run the BatDetect2 CLI.
+
+ Use subcommands to run processing, training, evaluation, and dataset
+ utilities.
+ """
+
+ if ctx.invoked_subcommand is None:
+ click.echo(BATDETECT_ASCII_ART)
+ click.echo(ctx.get_help())
+ ctx.exit()
+
+ from batdetect2.logging import enable_logging
+
+ enable_logging(verbose)
diff --git a/batdetect2/cli.py b/src/batdetect2/cli/compat.py
similarity index 70%
rename from batdetect2/cli.py
rename to src/batdetect2/cli/compat.py
index 8379a004..238e775c 100644
--- a/batdetect2/cli.py
+++ b/src/batdetect2/cli/compat.py
@@ -1,32 +1,24 @@
-"""BatDetect2 command line interface."""
-
import os
import click
-from batdetect2 import api
-from batdetect2.detector.parameters import DEFAULT_MODEL_PATH
-from batdetect2.types import ProcessingConfiguration
-from batdetect2.utils.detector_utils import save_results_to_file
-
-CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
-
-
-INFO_STR = """
-BatDetect2 - Detection and Classification
- Assumes audio files are mono, not stereo.
- Spaces in the input paths will throw an error. Wrap in quotes.
- Input files should be short in duration e.g. < 30 seconds.
-"""
-
+from batdetect2.cli.base import cli
-@click.group()
-def cli():
- """BatDetect2 - Bat Call Detection and Classification."""
- click.echo(INFO_STR)
+DEFAULT_MODEL_PATH = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)),
+ "models",
+ "checkpoints",
+ "Net2DFast_UK_same.pth.tar",
+)
-@cli.command()
+@cli.command(
+ short_help="Legacy detection command.",
+ epilog=(
+ "Deprecated workflow. Prefer `batdetect2 process directory` for "
+ "new analyses."
+ ),
+)
@click.argument(
"audio_dir",
type=click.Path(exists=True),
@@ -45,12 +37,6 @@ def cli():
default=False,
help="Extracts CNN call features",
)
-@click.option(
- "--chunk_size",
- type=float,
- default=2,
- help="Specifies the duration of chunks in seconds. BatDetect2 will divide longer files into smaller chunks and process them independently. Larger chunks increase computation time and memory usage but may provide more contextual information for inference.",
-)
@click.option(
"--spec_features",
is_flag=True,
@@ -86,10 +72,12 @@ def detect(
ann_dir: str,
detection_threshold: float,
time_expansion_factor: int,
- chunk_size: float,
**args,
):
- """Detect bat calls in files in AUDIO_DIR and save predictions to ANN_DIR.
+ """Legacy detection command for directory-based inference.
+
+ Detect bat calls in files in `AUDIO_DIR` and save predictions to
+ `ANN_DIR`.
DETECTION_THRESHOLD is the detection threshold. All predictions with a
score below this threshold will be discarded. Values between 0 and 1.
@@ -99,7 +87,21 @@ def detect(
Spaces in the input paths will throw an error. Wrap in quotes.
Input files should be short in duration e.g. < 30 seconds.
+
+ Note
+ ----
+ This command is kept for backwards compatibility. Prefer
+ `batdetect2 process directory` for new workflows.
"""
+ from batdetect2 import api
+ from batdetect2.utils.detector_utils import save_results_to_file
+
+ message = (
+ "The `batdetect2 detect` command is deprecated. Prefer "
+ "`batdetect2 process directory` for new analyses."
+ )
+ click.secho(f"WARNING: {message}", fg="yellow", err=True)
+
click.echo(f"Loading model: {args['model_path']}")
model, params = api.load_model(args["model_path"])
@@ -115,7 +117,7 @@ def detect(
**args,
"time_expansion": time_expansion_factor,
"spec_slices": False,
- "chunk_size": chunk_size,
+ "chunk_size": 2,
"detection_threshold": detection_threshold,
}
)
@@ -149,13 +151,8 @@ def detect(
click.echo(f" {err}")
-def print_config(config: ProcessingConfiguration):
- """Print the processing configuration."""
+def print_config(config):
+ """Print the processing configuration values."""
click.echo("\nProcessing Configuration:")
click.echo(f"Time Expansion Factor: {config.get('time_expansion')}")
click.echo(f"Detection Threshold: {config.get('detection_threshold')}")
- click.echo(f"Chunk Size: {config.get('chunk_size')}s")
-
-
-if __name__ == "__main__":
- cli()
diff --git a/src/batdetect2/cli/data.py b/src/batdetect2/cli/data.py
new file mode 100644
index 00000000..d488883b
--- /dev/null
+++ b/src/batdetect2/cli/data.py
@@ -0,0 +1,205 @@
+from pathlib import Path
+
+import click
+
+from batdetect2.cli.base import cli
+
+__all__ = ["data"]
+
+
+@cli.group(short_help="Inspect and manage datasets.")
+def data():
+ """Inspect and manage dataset configuration files."""
+
+
+@data.command(short_help="Print dataset summary information.")
+@click.argument(
+ "dataset_config",
+ type=click.Path(exists=True),
+)
+@click.option(
+ "--field",
+ type=str,
+ help=(
+ "Nested field name that contains dataset configuration. "
+ "Use this when the config is wrapped in a larger file."
+ ),
+)
+@click.option(
+ "--targets",
+ "targets_path",
+ type=click.Path(exists=True),
+ help=(
+ "Path to targets config file. If provided, a per-class summary "
+ "table is printed."
+ ),
+)
+@click.option(
+ "--base-dir",
+ type=click.Path(exists=True),
+ help=(
+ "Base directory used to resolve relative recording and annotation "
+ "paths in the dataset config."
+ ),
+)
+def summary(
+ dataset_config: Path,
+ field: str | None = None,
+ targets_path: Path | None = None,
+ base_dir: Path | None = None,
+):
+ """Show dataset size and optional class summary.
+
+ Prints the number of annotated clips. If `--targets` is provided, it also
+ prints a per-class summary table based on the configured targets.
+ """
+ from batdetect2.data import compute_class_summary, load_dataset_from_config
+ from batdetect2.targets import load_targets
+
+ base_dir = base_dir or Path.cwd()
+
+ dataset = load_dataset_from_config(
+ dataset_config,
+ field=field,
+ base_dir=base_dir,
+ )
+
+ click.echo(f"Number of annotated clips: {len(dataset)}")
+
+ if targets_path is None:
+ return
+
+ targets = load_targets(targets_path)
+
+ summary = compute_class_summary(dataset, targets)
+
+ click.echo(summary.sort_values("class_name").to_markdown())
+
+
+@data.command(short_help="Convert dataset config to annotation set.")
+@click.argument(
+ "dataset_config",
+ type=click.Path(exists=True),
+)
+@click.option(
+ "--field",
+ type=str,
+ help=(
+ "Nested field name that contains dataset configuration. "
+ "Use this when the config is wrapped in a larger file."
+ ),
+)
+@click.option(
+ "--output",
+ type=click.Path(exists=False),
+ default="annotations.json",
+)
+@click.option(
+ "--base-dir",
+ type=click.Path(exists=True),
+ help=(
+ "Base directory used to resolve relative recording and annotation "
+ "paths in the dataset config."
+ ),
+)
+@click.option(
+ "--audio-dir",
+ type=click.Path(exists=True),
+ help=(
+ "Directory containing audio files. Output annotation paths are "
+ "made relative to this directory."
+ ),
+)
+@click.option(
+ "--add-source-tag",
+ is_flag=True,
+ help=(
+ "Add a source tag to each clip annotation. This is useful for "
+ "downstream tools that need to know which source the annotations "
+ "came from."
+ ),
+)
+@click.option(
+ "--include-sources",
+ type=str,
+ multiple=True,
+ help=(
+ "Only include sources with the specified names. If provided, only "
+ "sources with matching names will be included in the output."
+ ),
+)
+@click.option(
+ "--exclude-sources",
+ type=str,
+ multiple=True,
+ help=(
+ "Exclude sources with the specified names. If provided, sources with "
+ "matching names will be excluded from the output."
+ ),
+)
+@click.option(
+ "--apply-transforms/--no-apply-transforms",
+ default=True,
+ help=(
+ "Apply any configured sound event transforms to the annotations. "
+ "Defaults to True."
+ ),
+)
+@click.option(
+ "--apply-filters/--no-apply-filters",
+ default=True,
+ help=(
+ "Apply any configured sound event filters to the annotations. "
+ "Defaults to True."
+ ),
+)
+def convert(
+ dataset_config: Path,
+ field: str | None = None,
+ output: Path = Path("annotations.json"),
+ base_dir: Path | None = None,
+ audio_dir: Path | None = None,
+ add_source_tag: bool = True,
+ include_sources: list[str] | None = None,
+ exclude_sources: list[str] | None = None,
+ apply_transforms: bool = True,
+ apply_filters: bool = True,
+):
+ """Convert a dataset config into soundevent annotation-set format.
+
+ Writes a single annotation-set file that can be used by downstream tools.
+ Use `--audio-dir` to control relative audio path handling in the output.
+ """
+ from soundevent import data, io
+
+ from batdetect2.data import load_dataset, load_dataset_config
+
+ base_dir = base_dir or Path.cwd()
+
+ config = load_dataset_config(dataset_config, field=field)
+
+ dataset = load_dataset(
+ config,
+ base_dir=base_dir,
+ add_source_tag=add_source_tag,
+ include_sources=include_sources,
+ exclude_sources=exclude_sources,
+ apply_transforms=apply_transforms,
+ apply_filters=apply_filters,
+ )
+
+ annotation_set = data.AnnotationSet(
+ clip_annotations=list(dataset),
+ name=config.name,
+ description=config.description,
+ )
+
+ if audio_dir:
+ audio_dir = Path(audio_dir)
+
+ if not audio_dir.is_absolute():
+ audio_dir = audio_dir.resolve()
+
+ click.echo(f"Using audio directory: {audio_dir}")
+
+ io.save(annotation_set, output, audio_dir=audio_dir)
diff --git a/src/batdetect2/cli/evaluate.py b/src/batdetect2/cli/evaluate.py
new file mode 100644
index 00000000..c2db7e9f
--- /dev/null
+++ b/src/batdetect2/cli/evaluate.py
@@ -0,0 +1,162 @@
+from pathlib import Path
+
+import click
+from loguru import logger
+
+from batdetect2.cli.base import cli
+
+__all__ = ["evaluate_command"]
+
+
+DEFAULT_OUTPUT_DIR = Path("outputs") / "evaluation"
+
+
+@cli.command(name="evaluate", short_help="Evaluate a model checkpoint.")
+@click.argument("test_dataset", type=click.Path(exists=True))
+@click.option(
+ "--model",
+ "model_path",
+ type=str,
+ help=(
+ "Path to a checkpoint, checkpoint alias, or a Hugging Face "
+ "URI to fine-tune from. Defaults to uk_same"
+ ),
+)
+@click.option(
+ "--audio-config",
+ type=click.Path(exists=True),
+ help="Path to an audio config file.",
+)
+@click.option(
+ "--evaluation-config",
+ type=click.Path(exists=True),
+ help="Path to an evaluation config file.",
+)
+@click.option(
+ "--inference-config",
+ type=click.Path(exists=True),
+ help="Path to an inference config file.",
+)
+@click.option(
+ "--outputs-config",
+ type=click.Path(exists=True),
+ help="Path to an outputs config file.",
+)
+@click.option(
+ "--logging-config",
+ type=click.Path(exists=True),
+ help="Path to a logging config file.",
+)
+@click.option(
+ "--base-dir",
+ type=click.Path(),
+ default=Path.cwd(),
+ show_default=True,
+ help=(
+ "Base directory used to resolve relative paths in the dataset "
+ "configuration."
+ ),
+)
+@click.option(
+ "--output-dir",
+ type=click.Path(),
+ default=DEFAULT_OUTPUT_DIR,
+ show_default=True,
+ help="Directory where evaluation outputs are written.",
+)
+@click.option(
+ "--experiment-name",
+ type=str,
+ help="Experiment name used for logging backends.",
+)
+@click.option(
+ "--run-name",
+ type=str,
+ help="Run name used for logging backends.",
+)
+@click.option(
+ "--workers",
+ "num_workers",
+ type=int,
+ help="Number of worker processes for dataset loading.",
+ default=0,
+)
+def evaluate_command(
+ test_dataset: Path,
+ model_path: str | None = None,
+ base_dir: Path | None = None,
+ audio_config: Path | None = None,
+ evaluation_config: Path | None = None,
+ inference_config: Path | None = None,
+ outputs_config: Path | None = None,
+ logging_config: Path | None = None,
+ output_dir: Path = DEFAULT_OUTPUT_DIR,
+ num_workers: int = 0,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+):
+ """Evaluate a checkpoint on a labelled test dataset.
+
+ This command loads a checkpoint, runs evaluation on ``test_dataset``, and
+ writes metrics to ``output_dir``.
+ """
+ from batdetect2.api_v2 import BatDetect2API
+ from batdetect2.audio import AudioConfig
+ from batdetect2.data import load_dataset_from_config
+ from batdetect2.evaluate import EvaluationConfig
+ from batdetect2.inference import InferenceConfig
+ from batdetect2.logging import AppLoggingConfig
+ from batdetect2.outputs import OutputsConfig
+
+ logger.info("Initiating evaluation process...")
+
+ test_annotations = load_dataset_from_config(
+ test_dataset,
+ base_dir=base_dir,
+ )
+
+ logger.debug(
+ "Loaded {num_annotations} test examples",
+ num_annotations=len(test_annotations),
+ )
+
+ audio_conf = (
+ AudioConfig.load(audio_config) if audio_config is not None else None
+ )
+ eval_conf = (
+ EvaluationConfig.load(evaluation_config)
+ if evaluation_config is not None
+ else None
+ )
+ inference_conf = (
+ InferenceConfig.load(inference_config)
+ if inference_config is not None
+ else None
+ )
+ outputs_conf = (
+ OutputsConfig.load(outputs_config)
+ if outputs_config is not None
+ else None
+ )
+ logging_conf = (
+ AppLoggingConfig.load(logging_config)
+ if logging_config is not None
+ else None
+ )
+
+ api = BatDetect2API.from_checkpoint(
+ model_path,
+ audio_config=audio_conf,
+ evaluation_config=eval_conf,
+ inference_config=inference_conf,
+ outputs_config=outputs_conf,
+ logging_config=logging_conf,
+ )
+
+ api.evaluate(
+ test_annotations,
+ num_workers=num_workers,
+ output_dir=output_dir,
+ experiment_name=experiment_name,
+ run_name=run_name,
+ )
diff --git a/src/batdetect2/cli/finetune.py b/src/batdetect2/cli/finetune.py
new file mode 100644
index 00000000..c98f23ab
--- /dev/null
+++ b/src/batdetect2/cli/finetune.py
@@ -0,0 +1,212 @@
+from pathlib import Path
+from typing import Literal
+
+import click
+from loguru import logger
+
+from batdetect2.cli.base import cli
+
+__all__ = ["finetune_command"]
+
+
+@cli.command(
+ name="finetune", short_help="Fine-tune a checkpoint on new targets."
+)
+@click.argument("train_dataset", type=click.Path(exists=True))
+@click.option(
+ "--targets",
+ "targets_config",
+ required=True,
+ type=click.Path(exists=True),
+ help="Path to the new targets config file.",
+)
+@click.option(
+ "--model",
+ "model_path",
+ type=str,
+ help=(
+ "Path to a checkpoint, checkpoint alias, or a Hugging Face "
+ "URI to fine-tune from. Defaults to uk_same"
+ ),
+)
+@click.option(
+ "--val-dataset",
+ type=click.Path(exists=True),
+ help="Path to validation dataset config file.",
+)
+@click.option(
+ "--base-dir",
+ type=click.Path(exists=True),
+ help=(
+ "Base directory used to resolve relative paths inside the training "
+ "and validation dataset configs."
+ ),
+)
+@click.option(
+ "--training-config",
+ type=click.Path(exists=True),
+ help="Path to training config file.",
+)
+@click.option(
+ "--audio-config",
+ type=click.Path(exists=True),
+ help="Path to audio config file.",
+)
+@click.option(
+ "--logging-config",
+ type=click.Path(exists=True),
+ help="Path to logging config file.",
+)
+@click.option(
+ "--trainable",
+ type=click.Choice(["all", "heads", "classifier_head", "size_head"]),
+ default="heads",
+ show_default=True,
+ help="Which model parameters remain trainable during fine-tuning.",
+)
+@click.option(
+ "--ckpt-dir",
+ type=click.Path(exists=True),
+ help="Directory where checkpoints are saved.",
+)
+@click.option(
+ "--log-dir",
+ type=click.Path(exists=True),
+ help="Directory where logs are written.",
+)
+@click.option(
+ "--train-workers",
+ type=int,
+ default=0,
+ help="Number of worker processes for training data loading.",
+)
+@click.option(
+ "--val-workers",
+ type=int,
+ default=0,
+ help="Number of worker processes for validation data loading.",
+)
+@click.option(
+ "--num-epochs",
+ type=int,
+ help="Maximum number of training epochs.",
+)
+@click.option(
+ "--experiment-name",
+ type=str,
+ help="Experiment name used for logging backends.",
+)
+@click.option(
+ "--run-name",
+ type=str,
+ help="Run name used for logging backends.",
+)
+@click.option(
+ "--seed",
+ type=int,
+ help="Random seed used for reproducibility.",
+)
+def finetune_command(
+ train_dataset: Path,
+ targets_config: Path,
+ model_path: str | None = None,
+ val_dataset: Path | None = None,
+ ckpt_dir: Path | None = None,
+ log_dir: Path | None = None,
+ base_dir: Path | None = None,
+ training_config: Path | None = None,
+ audio_config: Path | None = None,
+ logging_config: Path | None = None,
+ trainable: Literal[
+ "all", "heads", "classifier_head", "size_head"
+ ] = "heads",
+ seed: int | None = None,
+ num_epochs: int | None = None,
+ train_workers: int = 0,
+ val_workers: int = 0,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+):
+ """Fine-tune a BatDetect2 checkpoint on a new target definition."""
+ from batdetect2.api_v2 import BatDetect2API
+ from batdetect2.audio import AudioConfig
+ from batdetect2.data import load_dataset, load_dataset_config
+ from batdetect2.logging import AppLoggingConfig
+ from batdetect2.targets import TargetConfig
+ from batdetect2.train import TrainingConfig
+ from batdetect2.train.logging import (
+ DatasetConfigArtifact,
+ DatasetConfigArtifactLogging,
+ )
+
+ logger.info("Initiating fine-tuning process...")
+
+ target_conf = TargetConfig.load(targets_config)
+ train_conf = (
+ TrainingConfig.load(training_config)
+ if training_config is not None
+ else None
+ )
+ audio_conf = (
+ AudioConfig.load(audio_config) if audio_config is not None else None
+ )
+ logging_conf = (
+ AppLoggingConfig.load(logging_config)
+ if logging_config is not None
+ else None
+ )
+
+ train_dataset_conf = load_dataset_config(train_dataset)
+ train_annotations = load_dataset(train_dataset_conf, base_dir=base_dir)
+
+ val_dataset_conf = (
+ load_dataset_config(val_dataset) if val_dataset else None
+ )
+ val_annotations = (
+ load_dataset(val_dataset_conf, base_dir=base_dir)
+ if val_dataset_conf
+ else None
+ )
+
+ logging_callbacks = [
+ DatasetConfigArtifactLogging(
+ train_dataset_config=DatasetConfigArtifact(
+ filename="train_dataset.yaml",
+ config=train_dataset_conf,
+ ),
+ val_dataset_config=(
+ DatasetConfigArtifact(
+ filename="val_dataset.yaml",
+ config=val_dataset_conf,
+ )
+ if val_dataset_conf
+ else None
+ ),
+ )
+ ]
+
+ api = BatDetect2API.from_checkpoint(
+ model_path,
+ train_config=train_conf,
+ audio_config=audio_conf,
+ logging_config=logging_conf,
+ )
+
+ return api.finetune(
+ train_annotations=train_annotations,
+ val_annotations=val_annotations,
+ targets_config=target_conf,
+ trainable=trainable,
+ train_workers=train_workers,
+ val_workers=val_workers,
+ checkpoint_dir=ckpt_dir,
+ log_dir=log_dir,
+ experiment_name=experiment_name,
+ num_epochs=num_epochs,
+ run_name=run_name,
+ seed=seed,
+ train_config=train_conf,
+ audio_config=audio_conf,
+ logger_config=logging_conf.train if logging_conf is not None else None,
+ logging_callbacks=logging_callbacks,
+ )
diff --git a/src/batdetect2/cli/inference.py b/src/batdetect2/cli/inference.py
new file mode 100644
index 00000000..c5e189bd
--- /dev/null
+++ b/src/batdetect2/cli/inference.py
@@ -0,0 +1,344 @@
+from functools import wraps
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+import click
+from loguru import logger
+
+from batdetect2.cli.base import cli
+
+if TYPE_CHECKING:
+ from batdetect2.api_v2 import BatDetect2API
+ from batdetect2.audio import AudioConfig
+ from batdetect2.inference import InferenceConfig
+ from batdetect2.outputs import OutputsConfig
+
+__all__ = ["process"]
+
+
+@cli.group(name="process", short_help="Run processing workflows.")
+def process() -> None:
+ """Run model inference on audio.
+
+ Choose a subcommand based on how you want to provide input audio.
+ """
+
+
+def common_predict_options(func):
+ """Attach options shared by all ``process`` subcommands."""
+
+ @click.option(
+ "--model",
+ "model_path",
+ type=str,
+ help=(
+ "Path to a checkpoint, checkpoint alias, or a Hugging Face "
+ "URI to fine-tune from. Defaults to uk_same"
+ ),
+ )
+ @click.option(
+ "--audio-config",
+ type=click.Path(exists=True),
+ help=(
+ "Path to an audio config file. Use this to override audio "
+ "loading settings."
+ ),
+ )
+ @click.option(
+ "--inference-config",
+ type=click.Path(exists=True),
+ help=(
+ "Path to an inference config file. Use this to override "
+ "prediction settings."
+ ),
+ )
+ @click.option(
+ "--outputs-config",
+ type=click.Path(exists=True),
+ help=(
+ "Path to an outputs config file. Use this to control the "
+ "saved output format and fields."
+ ),
+ )
+ @click.option(
+ "--logging-config",
+ type=click.Path(exists=True),
+ help=("Path to a logging config file. Use this to change log output."),
+ )
+ @click.option(
+ "--batch-size",
+ type=int,
+ help=(
+ "Batch size for inference. If omitted, the config value is used."
+ ),
+ )
+ @click.option(
+ "--workers",
+ "num_workers",
+ type=int,
+ default=0,
+ show_default=True,
+ help="Number of worker processes for audio loading.",
+ )
+ @click.option(
+ "--format",
+ "format_name",
+ type=str,
+ help=(
+ "Output format name used by the prediction writer. If omitted, "
+ "the loaded outputs config is used, or batdetect2 when no "
+ "outputs config is provided."
+ ),
+ )
+ @click.option(
+ "--detection-threshold",
+ type=click.FloatRange(min=0.0, max=1.0),
+ default=None,
+ help=(
+ "Optional detection score threshold override. If omitted, "
+ "the configured threshold is used."
+ ),
+ )
+ @wraps(func)
+ def wrapped(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ return wrapped
+
+
+def _build_api(
+ model_path: str | None,
+ audio_config: Path | None,
+ inference_config: Path | None,
+ outputs_config: Path | None,
+ logging_config: Path | None,
+) -> "tuple[BatDetect2API, AudioConfig | None, InferenceConfig | None, OutputsConfig | None]":
+ from batdetect2.api_v2 import BatDetect2API
+ from batdetect2.audio import AudioConfig
+ from batdetect2.inference import InferenceConfig
+ from batdetect2.logging import AppLoggingConfig
+ from batdetect2.outputs import OutputsConfig
+
+ audio_conf = (
+ AudioConfig.load(audio_config) if audio_config is not None else None
+ )
+ inference_conf = (
+ InferenceConfig.load(inference_config)
+ if inference_config is not None
+ else None
+ )
+ outputs_conf = (
+ OutputsConfig.load(outputs_config)
+ if outputs_config is not None
+ else None
+ )
+ logging_conf = (
+ AppLoggingConfig.load(logging_config)
+ if logging_config is not None
+ else None
+ )
+
+ api = BatDetect2API.from_checkpoint(
+ path=model_path,
+ audio_config=audio_conf,
+ inference_config=inference_conf,
+ outputs_config=outputs_conf,
+ logging_config=logging_conf,
+ )
+ return api, audio_conf, inference_conf, outputs_conf
+
+
+def _run_prediction(
+ model_path: str | None,
+ audio_files: list[Path],
+ output_path: Path,
+ audio_config: Path | None,
+ inference_config: Path | None,
+ outputs_config: Path | None,
+ logging_config: Path | None,
+ batch_size: int | None,
+ num_workers: int,
+ format_name: str | None,
+ detection_threshold: float | None,
+ audio_dir: Path | None = None,
+) -> None:
+ logger.info("Initiating prediction process...")
+
+ api, audio_conf, inference_conf, outputs_conf = _build_api(
+ model_path,
+ audio_config,
+ inference_config,
+ outputs_config,
+ logging_config,
+ )
+
+ logger.info("Found {num_files} audio files", num_files=len(audio_files))
+
+ predictions = api.process_files(
+ audio_files,
+ batch_size=batch_size,
+ num_workers=num_workers,
+ audio_config=audio_conf,
+ inference_config=inference_conf,
+ output_config=outputs_conf,
+ detection_threshold=detection_threshold,
+ )
+
+ if audio_dir is None:
+ audio_dir = audio_files[0].parent if audio_files else None
+
+ if format_name is None and outputs_conf is None:
+ format_name = "batdetect2"
+
+ api.save_predictions(
+ predictions,
+ path=output_path,
+ audio_dir=audio_dir,
+ format=format_name,
+ )
+
+ logger.info(
+ "Prediction complete. Results saved to {path}", path=output_path
+ )
+
+
+@process.command(
+ name="directory",
+ short_help="Process audio files in a directory.",
+)
+@click.argument("audio_dir", type=click.Path(exists=True))
+@click.argument("output_path", type=click.Path())
+@common_predict_options
+def predict_directory_command(
+ model_path: str | None,
+ audio_dir: Path,
+ output_path: Path,
+ audio_config: Path | None,
+ inference_config: Path | None,
+ outputs_config: Path | None,
+ logging_config: Path | None,
+ batch_size: int | None,
+ num_workers: int,
+ format_name: str | None,
+ detection_threshold: float | None,
+) -> None:
+ """Run processing on all supported audio files in a directory.
+
+ This command scans ``audio_dir`` for audio files, runs processing, and
+ saves the results to ``output_path``.
+ """
+ from soundevent.audio.files import get_audio_files
+
+ audio_files = list(get_audio_files(audio_dir))
+ _run_prediction(
+ model_path=model_path,
+ audio_files=audio_files,
+ output_path=output_path,
+ audio_config=audio_config,
+ inference_config=inference_config,
+ outputs_config=outputs_config,
+ logging_config=logging_config,
+ batch_size=batch_size,
+ num_workers=num_workers,
+ format_name=format_name,
+ detection_threshold=detection_threshold,
+ audio_dir=audio_dir,
+ )
+
+
+@process.command(
+ name="file_list",
+ short_help="Process paths listed in a text file.",
+)
+@click.argument("file_list", type=click.Path(exists=True))
+@click.argument("output_path", type=click.Path())
+@common_predict_options
+def predict_file_list_command(
+ file_list: Path,
+ output_path: Path,
+ model_path: str | None,
+ audio_config: Path | None,
+ inference_config: Path | None,
+ outputs_config: Path | None,
+ logging_config: Path | None,
+ batch_size: int | None,
+ num_workers: int,
+ format_name: str | None,
+ detection_threshold: float | None,
+) -> None:
+ """Run processing on audio files listed in a text file.
+
+ The text file should contain one audio path per line. Empty lines are
+ ignored.
+ """
+ file_list = Path(file_list)
+ audio_files = [
+ Path(line.strip())
+ for line in file_list.read_text().splitlines()
+ if line.strip()
+ ]
+
+ _run_prediction(
+ model_path=model_path,
+ audio_files=audio_files,
+ output_path=output_path,
+ audio_config=audio_config,
+ inference_config=inference_config,
+ outputs_config=outputs_config,
+ logging_config=logging_config,
+ batch_size=batch_size,
+ num_workers=num_workers,
+ format_name=format_name,
+ detection_threshold=detection_threshold,
+ )
+
+
+@process.command(
+ name="dataset",
+ short_help="Process recordings from a dataset config.",
+)
+@click.argument("dataset_path", type=click.Path(exists=True))
+@click.argument("output_path", type=click.Path())
+@common_predict_options
+def predict_dataset_command(
+ dataset_path: Path,
+ output_path: Path,
+ model_path: str | None,
+ audio_config: Path | None,
+ inference_config: Path | None,
+ outputs_config: Path | None,
+ logging_config: Path | None,
+ batch_size: int | None,
+ num_workers: int,
+ format_name: str | None,
+ detection_threshold: float | None,
+) -> None:
+ """Run processing on recordings referenced in a dataset file.
+
+ Recording paths are read from the dataset and each recording is processed
+ once.
+ """
+ from soundevent import io
+
+ dataset_path = Path(dataset_path)
+ dataset = io.load(dataset_path, type="annotation_set")
+ audio_files = sorted(
+ {
+ Path(clip_annotation.clip.recording.path)
+ for clip_annotation in dataset.clip_annotations
+ }
+ )
+
+ _run_prediction(
+ model_path=model_path,
+ audio_files=audio_files,
+ output_path=output_path,
+ audio_config=audio_config,
+ inference_config=inference_config,
+ outputs_config=outputs_config,
+ logging_config=logging_config,
+ batch_size=batch_size,
+ num_workers=num_workers,
+ format_name=format_name,
+ detection_threshold=detection_threshold,
+ )
diff --git a/src/batdetect2/cli/train.py b/src/batdetect2/cli/train.py
new file mode 100644
index 00000000..c577512e
--- /dev/null
+++ b/src/batdetect2/cli/train.py
@@ -0,0 +1,291 @@
+from pathlib import Path
+
+import click
+from loguru import logger
+
+from batdetect2.cli.base import cli
+
+__all__ = ["train_command"]
+
+
+@cli.command(name="train", short_help="Train or fine-tune a model.")
+@click.argument("train_dataset", type=click.Path(exists=True))
+@click.option(
+ "--val-dataset",
+ type=click.Path(exists=True),
+ help="Path to a validation dataset config file.",
+)
+@click.option(
+ "--model",
+ "model_path",
+ type=str,
+ help=(
+ "Path to a checkpoint, bundled checkpoint alias, or Hugging Face "
+ "URI. If omitted, training starts from a fresh model config."
+ ),
+)
+@click.option(
+ "--base-dir",
+ type=click.Path(exists=True),
+ help=(
+ "Base directory used to resolve relative paths inside the training "
+ "and validation dataset configs."
+ ),
+)
+@click.option(
+ "--targets",
+ "targets_config",
+ type=click.Path(exists=True),
+ help="Path to a targets config file.",
+)
+@click.option(
+ "--model-config",
+ type=click.Path(exists=True),
+ help=("Path to model config file. Cannot be used together with --model."),
+)
+@click.option(
+ "--training-config",
+ type=click.Path(exists=True),
+ help="Path to a training config file.",
+)
+@click.option(
+ "--audio-config",
+ type=click.Path(exists=True),
+ help="Path to an audio config file.",
+)
+@click.option(
+ "--evaluation-config",
+ type=click.Path(exists=True),
+ help="Path to an evaluation config file.",
+)
+@click.option(
+ "--inference-config",
+ type=click.Path(exists=True),
+ help="Path to an inference config file.",
+)
+@click.option(
+ "--outputs-config",
+ type=click.Path(exists=True),
+ help="Path to an outputs config file.",
+)
+@click.option(
+ "--logging-config",
+ type=click.Path(exists=True),
+ help="Path to a logging config file.",
+)
+@click.option(
+ "--ckpt-dir",
+ type=click.Path(exists=True),
+ help="Directory where checkpoints are saved.",
+)
+@click.option(
+ "--log-dir",
+ type=click.Path(exists=True),
+ help="Directory where logs are written.",
+)
+@click.option(
+ "--train-workers",
+ type=int,
+ default=0,
+ help="Number of worker processes for training data loading.",
+)
+@click.option(
+ "--val-workers",
+ type=int,
+ default=0,
+ help="Number of worker processes for validation data loading.",
+)
+@click.option(
+ "--num-epochs",
+ type=int,
+ help="Maximum number of training epochs.",
+)
+@click.option(
+ "--experiment-name",
+ type=str,
+ help="Experiment name used for logging backends.",
+)
+@click.option(
+ "--run-name",
+ type=str,
+ help="Run name used for logging backends.",
+)
+@click.option(
+ "--seed",
+ type=int,
+ help="Random seed used for reproducibility.",
+)
+def train_command(
+ train_dataset: Path,
+ val_dataset: Path | None = None,
+ model_path: str | None = None,
+ ckpt_dir: Path | None = None,
+ log_dir: Path | None = None,
+ base_dir: Path | None = None,
+ targets_config: Path | None = None,
+ model_config: Path | None = None,
+ training_config: Path | None = None,
+ audio_config: Path | None = None,
+ evaluation_config: Path | None = None,
+ inference_config: Path | None = None,
+ outputs_config: Path | None = None,
+ logging_config: Path | None = None,
+ seed: int | None = None,
+ num_epochs: int | None = None,
+ train_workers: int = 0,
+ val_workers: int = 0,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+):
+ """Train a BatDetect2 model.
+
+ Start from a fresh model config or continue from an existing checkpoint.
+ Training data are loaded from ``train_dataset``.
+ """
+ from batdetect2.api_v2 import BatDetect2API
+ from batdetect2.audio import AudioConfig
+ from batdetect2.data import load_dataset_config, load_dataset_from_config
+ from batdetect2.evaluate import EvaluationConfig
+ from batdetect2.inference import InferenceConfig
+ from batdetect2.logging import AppLoggingConfig
+ from batdetect2.models import ModelConfig
+ from batdetect2.outputs import OutputsConfig
+ from batdetect2.targets import TargetConfig
+ from batdetect2.train import TrainingConfig
+ from batdetect2.train.logging import (
+ DatasetConfigArtifact,
+ DatasetConfigArtifactLogging,
+ )
+
+ logger.info("Initiating training process...")
+
+ logger.info("Loading configuration...")
+ target_conf = (
+ TargetConfig.load(targets_config)
+ if targets_config is not None
+ else None
+ )
+ model_conf = (
+ ModelConfig.load(model_config) if model_config is not None else None
+ )
+ train_conf = (
+ TrainingConfig.load(training_config)
+ if training_config is not None
+ else None
+ )
+ audio_conf = (
+ AudioConfig.load(audio_config) if audio_config is not None else None
+ )
+ eval_conf = (
+ EvaluationConfig.load(evaluation_config)
+ if evaluation_config is not None
+ else None
+ )
+ inference_conf = (
+ InferenceConfig.load(inference_config)
+ if inference_config is not None
+ else None
+ )
+ outputs_conf = (
+ OutputsConfig.load(outputs_config)
+ if outputs_config is not None
+ else None
+ )
+ logging_conf = (
+ AppLoggingConfig.load(logging_config)
+ if logging_config is not None
+ else None
+ )
+
+ if target_conf is not None:
+ logger.info("Loaded targets configuration.")
+
+ logger.info("Loading training dataset...")
+ train_annotations = load_dataset_from_config(
+ train_dataset,
+ base_dir=base_dir,
+ )
+ logger.debug(
+ "Loaded {num_annotations} training examples",
+ num_annotations=len(train_annotations),
+ )
+
+ val_annotations = None
+ if val_dataset is not None:
+ val_annotations = load_dataset_from_config(
+ val_dataset,
+ base_dir=base_dir,
+ )
+ logger.debug(
+ "Loaded {num_annotations} validation examples",
+ num_annotations=len(val_annotations),
+ )
+ else:
+ logger.debug("No validation directory provided.")
+
+ logger.info("Configuration and data loaded. Starting training...")
+
+ logging_callbacks = [
+ DatasetConfigArtifactLogging(
+ train_dataset_config=DatasetConfigArtifact(
+ filename="train_dataset.yaml",
+ config=load_dataset_config(train_dataset),
+ ),
+ val_dataset_config=(
+ DatasetConfigArtifact(
+ filename="val_dataset.yaml",
+ config=load_dataset_config(val_dataset),
+ )
+ if val_dataset is not None
+ else None
+ ),
+ )
+ ]
+
+ if model_path is not None and model_conf is not None:
+ raise click.UsageError(
+ "--model-config cannot be used with --model. "
+ "Checkpoint model configuration is loaded from the checkpoint."
+ )
+
+ if model_path is not None and target_conf is not None:
+ raise click.UsageError(
+ "--targets cannot be used with --model. "
+ "Checkpoint target configuration is loaded from the checkpoint."
+ )
+
+ if model_path is None:
+ api = BatDetect2API.from_config(
+ model_config=model_conf,
+ targets_config=target_conf,
+ train_config=train_conf,
+ audio_config=audio_conf,
+ evaluation_config=eval_conf,
+ inference_config=inference_conf,
+ outputs_config=outputs_conf,
+ logging_config=logging_conf,
+ )
+ else:
+ api = BatDetect2API.from_checkpoint(
+ model_path,
+ train_config=train_conf,
+ audio_config=audio_conf,
+ evaluation_config=eval_conf,
+ inference_config=inference_conf,
+ outputs_config=outputs_conf,
+ logging_config=logging_conf,
+ )
+
+ return api.train(
+ train_annotations=train_annotations,
+ val_annotations=val_annotations,
+ train_workers=train_workers,
+ val_workers=val_workers,
+ checkpoint_dir=ckpt_dir,
+ log_dir=log_dir,
+ num_epochs=num_epochs,
+ experiment_name=experiment_name,
+ run_name=run_name,
+ seed=seed,
+ logging_callbacks=logging_callbacks,
+ )
diff --git a/src/batdetect2/compat/__init__.py b/src/batdetect2/compat/__init__.py
new file mode 100644
index 00000000..d95a8962
--- /dev/null
+++ b/src/batdetect2/compat/__init__.py
@@ -0,0 +1,15 @@
+from batdetect2.compat.data import (
+ annotation_to_sound_event_annotation,
+ annotation_to_sound_event_prediction,
+ convert_to_annotation_group,
+ file_annotation_to_clip_annotation,
+ load_file_annotation,
+)
+
+__all__ = [
+ "annotation_to_sound_event_annotation",
+ "annotation_to_sound_event_prediction",
+ "convert_to_annotation_group",
+ "file_annotation_to_clip_annotation",
+ "load_file_annotation",
+]
diff --git a/src/batdetect2/compat/data.py b/src/batdetect2/compat/data.py
new file mode 100644
index 00000000..48c15ca0
--- /dev/null
+++ b/src/batdetect2/compat/data.py
@@ -0,0 +1,314 @@
+"""Compatibility functions between old and new data structures."""
+
+import json
+import os
+import uuid
+from pathlib import Path
+from typing import Callable, List
+
+import numpy as np
+from soundevent import data
+from soundevent.geometry import compute_bounds
+from soundevent.types import ClassMapper
+
+from batdetect2.types import (
+ Annotation,
+ AudioLoaderAnnotationGroup,
+ FileAnnotation,
+)
+
+PathLike = Path | str | os.PathLike
+
+__all__ = [
+ "convert_to_annotation_group",
+ "load_file_annotation",
+ "annotation_to_sound_event_annotation",
+ "annotation_to_sound_event_prediction",
+]
+
+SPECIES_TAG_KEY = "species"
+ECHOLOCATION_EVENT = "Echolocation"
+UNKNOWN_CLASS = "__UNKNOWN__"
+
+NAMESPACE = uuid.UUID("97a9776b-c0fd-4c68-accb-0b0ecd719242")
+
+
+EventFn = Callable[[data.SoundEventAnnotation], str | None]
+
+ClassFn = Callable[[data.Recording], int]
+
+IndividualFn = Callable[[data.SoundEventAnnotation], int]
+
+
+def get_recording_class_name(recording: data.Recording) -> str:
+ """Get the class name for a recording."""
+ tag = data.find_tag(recording.tags, label=SPECIES_TAG_KEY)
+ if tag is None:
+ return UNKNOWN_CLASS
+ return tag.value
+
+
+def get_annotation_notes(annotation: data.ClipAnnotation) -> str:
+ """Get the notes for a ClipAnnotation."""
+ all_notes = [
+ *annotation.notes,
+ *annotation.clip.recording.notes,
+ ]
+ messages = [note.message for note in all_notes if note.message is not None]
+ return "\n".join(messages)
+
+
+def convert_to_annotation_group(
+ annotation: data.ClipAnnotation,
+ class_mapper: ClassMapper,
+ event_fn: EventFn = lambda _: ECHOLOCATION_EVENT,
+ class_fn: ClassFn = lambda _: 0,
+ individual_fn: IndividualFn = lambda _: 0,
+) -> AudioLoaderAnnotationGroup:
+ """Convert a ClipAnnotation to an AudioLoaderAnnotationGroup."""
+ recording = annotation.clip.recording
+
+ start_times = []
+ end_times = []
+ low_freqs = []
+ high_freqs = []
+ class_ids = []
+ x_inds = []
+ y_inds = []
+ individual_ids = []
+ annotations: List[Annotation] = []
+ class_id_file = class_fn(recording)
+
+ for sound_event in annotation.sound_events:
+ geometry = sound_event.sound_event.geometry
+
+ if geometry is None:
+ continue
+
+ start_time, low_freq, end_time, high_freq = compute_bounds(geometry)
+ class_id = class_mapper.transform(sound_event) or -1
+ event = event_fn(sound_event) or ""
+ individual_id = individual_fn(sound_event) or -1
+
+ start_times.append(start_time)
+ end_times.append(end_time)
+ low_freqs.append(low_freq)
+ high_freqs.append(high_freq)
+ class_ids.append(class_id)
+ individual_ids.append(individual_id)
+
+ # NOTE: This will be computed later so we just put a placeholder
+ # here for now.
+ x_inds.append(0)
+ y_inds.append(0)
+
+ annotation_entry: Annotation = {
+ "start_time": start_time,
+ "end_time": end_time,
+ "low_freq": low_freq,
+ "high_freq": high_freq,
+ "class_prob": 1.0,
+ "det_prob": 1.0,
+ "individual": "0",
+ "event": event,
+ "class": get_recording_class_name(recording),
+ "class_id": class_id,
+ }
+ annotations.append(annotation_entry)
+
+ return {
+ "id": str(recording.path),
+ "duration": recording.duration,
+ "issues": False,
+ "file_path": str(recording.path),
+ "time_exp": recording.time_expansion,
+ "class_name": get_recording_class_name(recording),
+ "notes": get_annotation_notes(annotation),
+ "annotated": True,
+ "start_times": np.array(start_times),
+ "end_times": np.array(end_times),
+ "low_freqs": np.array(low_freqs),
+ "high_freqs": np.array(high_freqs),
+ "class_ids": np.array(class_ids),
+ "x_inds": np.array(x_inds),
+ "y_inds": np.array(y_inds),
+ "individual_ids": np.array(individual_ids),
+ "annotation": annotations,
+ "class_id_file": class_id_file,
+ }
+
+
+def load_file_annotation(path: PathLike) -> FileAnnotation:
+ """Load annotation from batdetect format."""
+ path = Path(path)
+ return json.loads(path.read_text())
+
+
+def annotation_to_sound_event_annotation(
+ annotation: Annotation,
+ recording: data.Recording,
+ label_key: str = "class",
+ event_key: str = "event",
+ individual_key: str = "individual",
+) -> data.SoundEventAnnotation:
+ """Convert annotation to sound event annotation."""
+ sound_event = data.SoundEvent(
+ uuid=uuid.uuid5(
+ NAMESPACE,
+ f"{recording.hash}_{annotation['start_time']}_{annotation['end_time']}",
+ ),
+ recording=recording,
+ geometry=data.BoundingBox(
+ coordinates=[
+ annotation["start_time"],
+ annotation["low_freq"],
+ annotation["end_time"],
+ annotation["high_freq"],
+ ],
+ ),
+ )
+
+ return data.SoundEventAnnotation(
+ uuid=uuid.uuid5(NAMESPACE, f"{sound_event.uuid}_annotation"),
+ sound_event=sound_event,
+ tags=[
+ data.Tag(key=label_key, value=annotation["class"]),
+ data.Tag(key=event_key, value=annotation["event"]),
+ data.Tag(key=individual_key, value=str(annotation["individual"])),
+ ],
+ )
+
+
+def annotation_to_sound_event_prediction(
+ annotation: Annotation,
+ recording: data.Recording,
+ label_key: str = "class",
+ event_key: str = "event",
+) -> data.SoundEventPrediction:
+ """Convert annotation to sound event annotation."""
+ sound_event = data.SoundEvent(
+ uuid=uuid.uuid5(
+ NAMESPACE,
+ f"{recording.hash}_{annotation['start_time']}_{annotation['end_time']}",
+ ),
+ recording=recording,
+ geometry=data.BoundingBox(
+ coordinates=[
+ annotation["start_time"],
+ annotation["low_freq"],
+ annotation["end_time"],
+ annotation["high_freq"],
+ ],
+ ),
+ )
+
+ return data.SoundEventPrediction(
+ uuid=uuid.uuid5(NAMESPACE, f"{sound_event.uuid}_annotation"),
+ sound_event=sound_event,
+ score=annotation["det_prob"],
+ tags=[
+ data.PredictedTag(
+ score=annotation["class_prob"],
+ tag=data.Tag(key=label_key, value=annotation["class"]),
+ ),
+ data.PredictedTag(
+ score=annotation["det_prob"],
+ tag=data.Tag(key=event_key, value=annotation["event"]),
+ ),
+ ],
+ )
+
+
+def file_annotation_to_clip(
+ file_annotation: FileAnnotation,
+ audio_dir: PathLike | None = None,
+ label_key: str = "class",
+) -> data.Clip:
+ """Convert file annotation to recording."""
+ audio_dir = audio_dir or Path.cwd()
+
+ full_path = Path(audio_dir) / file_annotation["id"]
+
+ if not full_path.exists():
+ raise FileNotFoundError(f"File {full_path} not found.")
+
+ recording = data.Recording.from_file(
+ full_path,
+ time_expansion=file_annotation["time_exp"],
+ tags=[
+ data.Tag(
+ term=data.term_from_key(label_key),
+ value=file_annotation["class_name"],
+ )
+ ],
+ )
+
+ return data.Clip(
+ uuid=uuid.uuid5(NAMESPACE, f"{file_annotation['id']}_clip"),
+ recording=recording,
+ start_time=0,
+ end_time=recording.duration,
+ )
+
+
+def file_annotation_to_clip_annotation(
+ file_annotation: FileAnnotation,
+ clip: data.Clip,
+ label_key: str = "class",
+ event_key: str = "event",
+ individual_key: str = "individual",
+) -> data.ClipAnnotation:
+ """Convert file annotation to clip annotation."""
+ notes = []
+ if file_annotation["notes"]:
+ notes.append(data.Note(message=file_annotation["notes"]))
+
+ return data.ClipAnnotation(
+ uuid=uuid.uuid5(NAMESPACE, f"{file_annotation['id']}_clip_annotation"),
+ clip=clip,
+ notes=notes,
+ tags=[
+ data.Tag(
+ term=data.term_from_key(label_key),
+ value=file_annotation["class_name"],
+ )
+ ],
+ sound_events=[
+ annotation_to_sound_event_annotation(
+ annotation,
+ clip.recording,
+ label_key=label_key,
+ event_key=event_key,
+ individual_key=individual_key,
+ )
+ for annotation in file_annotation["annotation"]
+ ],
+ )
+
+
+def file_annotation_to_annotation_task(
+ file_annotation: FileAnnotation,
+ clip: data.Clip,
+) -> data.AnnotationTask:
+ status_badges = []
+
+ if file_annotation["issues"]:
+ status_badges.append(
+ data.StatusBadge(state=data.AnnotationState.rejected)
+ )
+ elif file_annotation["annotated"]:
+ status_badges.append(
+ data.StatusBadge(state=data.AnnotationState.completed)
+ )
+
+ return data.AnnotationTask(
+ uuid=uuid.uuid5(uuid.NAMESPACE_URL, f"{file_annotation['id']}_task"),
+ clip=clip,
+ status_badges=status_badges,
+ )
+
+
+def list_file_annotations(path: PathLike) -> List[Path]:
+ """List all annotations in a directory."""
+ path = Path(path)
+ return [file for file in path.glob("*.json")]
diff --git a/src/batdetect2/compat/params.py b/src/batdetect2/compat/params.py
new file mode 100644
index 00000000..95d1c31e
--- /dev/null
+++ b/src/batdetect2/compat/params.py
@@ -0,0 +1,152 @@
+# from batdetect2.preprocess import (
+# AmplitudeScaleConfig,
+# AudioConfig,
+# FrequencyConfig,
+# LogScaleConfig,
+# PcenConfig,
+# PreprocessingConfig,
+# ResampleConfig,
+# Scales,
+# SpecSizeConfig,
+# SpectrogramConfig,
+# STFTConfig,
+# )
+# from batdetect2.preprocess.spectrogram import get_spectrogram_resolution
+# from batdetect2.targets import (
+# LabelConfig,
+# TagInfo,
+# TargetConfig,
+# )
+# from batdetect2.train.preprocess import (
+# TrainPreprocessingConfig,
+# )
+#
+#
+# def get_spectrogram_scale(scale: str) -> Scales:
+# if scale == "pcen":
+# return PcenConfig()
+# if scale == "log":
+# return LogScaleConfig()
+# return AmplitudeScaleConfig()
+#
+#
+# def get_preprocessing_config(params: dict) -> PreprocessingConfig:
+# return PreprocessingConfig(
+# audio=AudioConfig(
+# resample=ResampleConfig(
+# samplerate=params["target_samp_rate"],
+# method="poly",
+# ),
+# scale=params["scale_raw_audio"],
+# center=params["scale_raw_audio"],
+# duration=None,
+# ),
+# spectrogram=SpectrogramConfig(
+# stft=STFTConfig(
+# window_duration=params["fft_win_length"],
+# window_overlap=params["fft_overlap"],
+# window_fn="hann",
+# ),
+# frequencies=FrequencyConfig(
+# min_freq=params["min_freq"],
+# max_freq=params["max_freq"],
+# ),
+# scale=get_spectrogram_scale(params["spec_scale"]),
+# spectral_mean_substraction=params["denoise_spec_avg"],
+# size=SpecSizeConfig(
+# height=params["spec_height"],
+# resize_factor=params["resize_factor"],
+# ),
+# peak_normalize=params["max_scale_spec"],
+# ),
+# )
+#
+#
+# def get_training_preprocessing_config(
+# params: dict,
+# ) -> TrainPreprocessingConfig:
+# generic = params["generic_class"][0]
+# preprocessing = get_preprocessing_config(params)
+#
+# freq_bin_width, time_bin_width = get_spectrogram_resolution(
+# preprocessing.spectrogram
+# )
+#
+# return TrainPreprocessingConfig(
+# preprocessing=preprocessing,
+# target=TargetConfig(
+# classes=[
+# TagInfo(key="class", value=class_name)
+# for class_name in params["class_names"]
+# ],
+# generic_class=TagInfo(
+# key="class",
+# value=generic,
+# ),
+# include=[
+# TagInfo(key="event", value=event)
+# for event in params["events_of_interest"]
+# ],
+# exclude=[
+# TagInfo(key="class", value=value)
+# for value in params["classes_to_ignore"]
+# ],
+# ),
+# labels=LabelConfig(
+# position="bottom-left",
+# time_scale=1 / time_bin_width,
+# frequency_scale=1 / freq_bin_width,
+# sigma=params["target_sigma"],
+# ),
+# )
+#
+#
+# # 'standardize_classs_names_ip',
+# # 'convert_to_genus',
+# # 'genus_mapping',
+# # 'standardize_classs_names',
+# # 'genus_names',
+#
+# # ['data_dir',
+# # 'ann_dir',
+# # 'train_split',
+# # 'model_name',
+# # 'num_filters',
+# # 'experiment',
+# # 'model_file_name',
+# # 'op_im_dir',
+# # 'op_im_dir_test',
+# # 'notes',
+# # 'spec_divide_factor',
+# # 'detection_overlap',
+# # 'ignore_start_end',
+# # 'detection_threshold',
+# # 'nms_kernel_size',
+# # 'nms_top_k_per_sec',
+# # 'aug_prob',
+# # 'augment_at_train',
+# # 'augment_at_train_combine',
+# # 'echo_max_delay',
+# # 'stretch_squeeze_delta',
+# # 'mask_max_time_perc',
+# # 'mask_max_freq_perc',
+# # 'spec_amp_scaling',
+# # 'aug_sampling_rates',
+# # 'train_loss',
+# # 'det_loss_weight',
+# # 'size_loss_weight',
+# # 'class_loss_weight',
+# # 'individual_loss_weight',
+# # 'emb_dim',
+# # 'lr',
+# # 'batch_size',
+# # 'num_workers',
+# # 'num_epochs',
+# # 'num_eval_epochs',
+# # 'device',
+# # 'save_test_image_during_train',
+# # 'save_test_image_after_train',
+# # 'train_sets',
+# # 'test_sets',
+# # 'class_inv_freq',
+# # 'ip_height']
diff --git a/src/batdetect2/core/__init__.py b/src/batdetect2/core/__init__.py
new file mode 100644
index 00000000..23b77c57
--- /dev/null
+++ b/src/batdetect2/core/__init__.py
@@ -0,0 +1,15 @@
+from batdetect2.core.configs import BaseConfig, load_config, merge_configs
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+
+__all__ = [
+ "add_import_config",
+ "BaseConfig",
+ "ImportConfig",
+ "load_config",
+ "Registry",
+ "merge_configs",
+]
diff --git a/src/batdetect2/core/arrays.py b/src/batdetect2/core/arrays.py
new file mode 100644
index 00000000..8469b088
--- /dev/null
+++ b/src/batdetect2/core/arrays.py
@@ -0,0 +1,93 @@
+import numpy as np
+import torch
+import xarray as xr
+
+
+def spec_to_xarray(
+ spec: np.ndarray,
+ start_time: float,
+ end_time: float,
+ min_freq: float,
+ max_freq: float,
+) -> xr.DataArray:
+ if spec.ndim != 2:
+ raise ValueError(
+ "Input numpy spectrogram array should be 2-dimensional"
+ )
+
+ height, width = spec.shape
+ return xr.DataArray(
+ data=spec,
+ dims=["frequency", "time"],
+ coords={
+ "frequency": np.linspace(
+ min_freq,
+ max_freq,
+ height,
+ endpoint=False,
+ ),
+ "time": np.linspace(
+ start_time,
+ end_time,
+ width,
+ endpoint=False,
+ ),
+ },
+ )
+
+
+def extend_width(
+ tensor: torch.Tensor,
+ extra: int,
+ axis: int = -1,
+ value: float = 0,
+) -> torch.Tensor:
+ dims = len(tensor.shape)
+ axis = dims - axis % dims - 1
+ pad = [0 for _ in range(2 * dims)]
+ pad[2 * axis + 1] = extra
+ return torch.nn.functional.pad(
+ tensor,
+ pad,
+ mode="constant",
+ value=value,
+ )
+
+
+def adjust_width(
+ tensor: torch.Tensor,
+ width: int,
+ axis: int = -1,
+ value: float = 0,
+) -> torch.Tensor:
+ dims = len(tensor.shape)
+ axis = axis % dims
+ current_width = tensor.shape[axis]
+
+ if current_width == width:
+ return tensor
+
+ if current_width < width:
+ return extend_width(
+ tensor,
+ extra=width - current_width,
+ axis=axis,
+ value=value,
+ )
+
+ slices = [
+ slice(None, None) if index != axis else slice(None, width)
+ for index in range(dims)
+ ]
+ return tensor[tuple(slices)]
+
+
+def slice_tensor(
+ tensor: torch.Tensor,
+ start: int | None = None,
+ end: int | None = None,
+ dim: int = -1,
+) -> torch.Tensor:
+ slices = [slice(None)] * tensor.ndim
+ slices[dim] = slice(start, end)
+ return tensor[tuple(slices)]
diff --git a/src/batdetect2/core/configs.py b/src/batdetect2/core/configs.py
new file mode 100644
index 00000000..0fd7dbab
--- /dev/null
+++ b/src/batdetect2/core/configs.py
@@ -0,0 +1,259 @@
+"""Provides base classes and utilities for loading configurations in BatDetect2.
+
+This module leverages Pydantic for robust configuration handling, ensuring
+that configuration files (typically YAML) adhere to predefined schemas. It
+defines a base configuration class (`BaseConfig`) that enforces strict schema
+validation and a utility function (`load_config`) to load and validate
+configuration data from files, with optional support for accessing nested
+configuration sections.
+"""
+
+from typing import Any, Literal, Type, TypeVar, overload
+
+import yaml
+from deepmerge.merger import Merger
+from pydantic import BaseModel, ConfigDict, TypeAdapter
+from soundevent.data import PathLike
+
+__all__ = [
+ "BaseConfig",
+ "load_config",
+ "merge_configs",
+]
+
+C = TypeVar("C", bound="BaseConfig")
+
+
+class BaseConfig(BaseModel):
+ """Base class for all configuration models in BatDetect2.
+
+ Inherits from Pydantic's `BaseModel` to provide data validation, parsing,
+ and serialization capabilities.
+ """
+
+ model_config = ConfigDict(extra="ignore")
+
+ def to_yaml_string(
+ self,
+ exclude_none: bool = False,
+ exclude_unset: bool = False,
+ exclude_defaults: bool = False,
+ ) -> str:
+ """Converts the Pydantic model instance to a YAML string.
+
+ Parameters
+ ----------
+ exclude_none : bool, default=False
+ Whether to exclude fields whose value is `None`.
+ exclude_unset : bool, default=False
+ Whether to exclude fields that were not explicitly set.
+ exclude_defaults : bool, default=False
+ Whether to exclude fields whose value is the default value.
+
+ Returns
+ -------
+ str
+ A YAML string representation of the model.
+ """
+ return yaml.dump(
+ self.model_dump(
+ mode="json",
+ exclude_none=exclude_none,
+ exclude_unset=exclude_unset,
+ exclude_defaults=exclude_defaults,
+ )
+ )
+
+ @classmethod
+ def from_yaml(cls, yaml_str: str):
+ return cls.model_validate(yaml.safe_load(yaml_str))
+
+ @classmethod
+ def load(
+ cls: Type[C],
+ path: PathLike,
+ field: str | None = None,
+ extra: Literal["ignore", "allow", "forbid"] | None = None,
+ strict: bool | None = None,
+ ) -> C:
+ return load_config(
+ path,
+ schema=cls,
+ field=field,
+ extra=extra,
+ strict=strict,
+ )
+
+
+T = TypeVar("T")
+T_Model = TypeVar("T_Model", bound=BaseModel)
+Schema = Type[T_Model] | TypeAdapter[T]
+
+
+def get_object_field(obj: dict, current_key: str) -> Any:
+ """Access a potentially nested field within a dictionary using dot notation.
+
+ Parameters
+ ----------
+ obj : dict
+ The dictionary (or nested dictionaries) to access.
+ field : str
+ The field name to retrieve. Nested fields are specified using dots
+ (e.g., "parent_key.child_key.target_field").
+
+ Returns
+ -------
+ Any
+ The value found at the specified field path.
+
+ Raises
+ ------
+ KeyError
+ If any part of the field path does not exist in the dictionary
+ structure.
+ TypeError
+ If an intermediate part of the path exists but is not a dictionary,
+ preventing further nesting.
+
+ Examples
+ --------
+ >>> data = {"a": {"b": {"c": 10}}}
+ >>> get_object_field(data, "a.b.c")
+ 10
+ >>> get_object_field(data, "a.b")
+ {'c': 10}
+ >>> get_object_field(data, "a")
+ {'b': {'c': 10}}
+ >>> get_object_field(data, "x")
+ Traceback (most recent call last):
+ ...
+ KeyError: 'x'
+ >>> get_object_field(data, "a.x.c")
+ Traceback (most recent call last):
+ ...
+ KeyError: 'x'
+ """
+ if "." not in current_key:
+ return obj.get(current_key, {})
+
+ current_key, rest = current_key.split(".", 1)
+ subobj = obj[current_key]
+
+ if not isinstance(subobj, dict):
+ raise TypeError(
+ f"Intermediate key '{current_key}' in path '{current_key}' "
+ f"does not lead to a dictionary (found type: {type(subobj)}). "
+ "Cannot access further nested field."
+ )
+
+ return get_object_field(subobj, rest)
+
+
+@overload
+def load_config(
+ path: PathLike,
+ schema: Type[T_Model],
+ field: str | None = None,
+ extra: Literal["ignore", "allow", "forbid"] | None = None,
+ strict: bool | None = None,
+) -> T_Model: ...
+
+
+@overload
+def load_config(
+ path: PathLike,
+ schema: TypeAdapter[T],
+ field: str | None = None,
+ extra: Literal["ignore", "allow", "forbid"] | None = None,
+ strict: bool | None = None,
+) -> T: ...
+
+
+def load_config(
+ path: PathLike,
+ schema: Type[T_Model] | TypeAdapter[T],
+ field: str | None = None,
+ extra: Literal["ignore", "allow", "forbid"] | None = None,
+ strict: bool | None = None,
+) -> T_Model | T:
+ """Load and validate configuration data from a file against a schema.
+
+ Reads a YAML file, optionally extracts a specific section using dot
+ notation, and then validates the resulting data against the provided
+ Pydantic schema.
+
+ Parameters
+ ----------
+ path : PathLike
+ The path to the configuration file (typically `.yaml`).
+ schema : Type[T_Model] | TypeAdapter[T]
+ Either a Pydantic `BaseModel` subclass or a `TypeAdapter` instance
+ that defines the expected structure and types for the configuration
+ data.
+ field : str, optional
+ A dot-separated string indicating a nested section within the YAML
+ file to extract before validation. If None (default), the entire
+ file content is validated against the schema.
+ Example: `"training.optimizer"` would extract the `optimizer` section
+ within the `training` section.
+ extra : Literal["ignore", "allow", "forbid"], optional
+ How to handle extra keys in the configuration data. If None (default),
+ the default behaviour of the schema is used. If "ignore", extra keys
+ are ignored. If "allow", extra keys are allowed and will be accessible
+ as attributes on the resulting model instance. If "forbid", extra
+ keys are forbidden and an exception is raised. See pydantic
+ documentation for more details.
+ strict : bool, optional
+ Whether to enforce types strictly. If None (default), the default
+ behaviour of the schema is used. See pydantic documentation for more
+ details.
+
+ Returns
+ -------
+ T_Model | T
+ An instance of the schema type, populated and validated with
+ data from the configuration file.
+
+ Raises
+ ------
+ FileNotFoundError
+ If the file specified by `path` does not exist.
+ yaml.YAMLError
+ If the file content is not valid YAML.
+ pydantic.ValidationError
+ If the loaded configuration data (after optionally extracting the
+ `field`) does not conform to the provided `schema` (e.g., missing
+ required fields, incorrect types, extra fields if using BaseConfig).
+ KeyError
+ If `field` is provided and specifies a path where intermediate keys
+ do not exist in the loaded YAML data.
+ TypeError
+ If `field` is provided and specifies a path where an intermediate
+ value is not a dictionary, preventing access to nested fields.
+ """
+ with open(path, "r") as file:
+ config = yaml.safe_load(file)
+
+ if field:
+ config = get_object_field(config, field)
+
+ if isinstance(schema, TypeAdapter):
+ return schema.validate_python(config or {}, extra=extra, strict=strict)
+
+ return schema.model_validate(config or {}, extra=extra, strict=strict)
+
+
+default_merger = Merger(
+ [],
+ ["override"],
+ ["override"],
+)
+
+
+def merge_configs(config1: T_Model, config2: T_Model) -> T_Model:
+ """Merge two configuration objects."""
+ model = type(config1)
+ dict1 = config1.model_dump()
+ dict2 = config2.model_dump()
+ merged = default_merger.merge(dict1, dict2)
+ return model.model_validate(merged)
diff --git a/src/batdetect2/core/registries.py b/src/batdetect2/core/registries.py
new file mode 100644
index 00000000..a998c1ac
--- /dev/null
+++ b/src/batdetect2/core/registries.py
@@ -0,0 +1,211 @@
+from typing import (
+ Any,
+ Callable,
+ Concatenate,
+ Generic,
+ ParamSpec,
+ Sequence,
+ Type,
+ TypeVar,
+)
+
+from hydra.utils import instantiate
+from pydantic import BaseModel, Field
+
+from batdetect2.core.configs import BaseConfig
+
+__all__ = [
+ "add_import_config",
+ "ImportConfig",
+ "Registry",
+ "SimpleRegistry",
+]
+
+
+T_Config = TypeVar("T_Config", bound=BaseModel, contravariant=True)
+T_Type = TypeVar("T_Type", covariant=True)
+P_Type = ParamSpec("P_Type")
+T = TypeVar("T")
+
+
+class SimpleRegistry(Generic[T]):
+ def __init__(self, name: str):
+ self._name = name
+ self._registry = {}
+
+ def register(self, name: str):
+ def decorator(obj: T) -> T:
+ self._registry[name] = obj
+ return obj
+
+ return decorator
+
+ def get(self, name: str) -> T:
+ return self._registry[name]
+
+ def has(self, name: str) -> bool:
+ return name in self._registry
+
+
+class Registry(Generic[T_Type, P_Type]):
+ """A generic class to create and manage a registry of items."""
+
+ def __init__(self, name: str, discriminator: str = "name"):
+ self._name = name
+ self._registry: dict[
+ str, Callable[Concatenate[Any, P_Type], T_Type]
+ ] = {}
+ self._discriminator = discriminator
+ self._config_types: dict[str, Type[BaseModel]] = {}
+
+ def register(
+ self,
+ config_cls: Type[T_Config],
+ ):
+ fields = config_cls.model_fields
+
+ if self._discriminator not in fields:
+ raise ValueError(
+ "Configuration object must have "
+ f"a '{self._discriminator}' field."
+ )
+
+ name = fields[self._discriminator].default
+
+ self._config_types[name] = config_cls
+
+ if not isinstance(name, str):
+ raise ValueError(
+ f"'{self._discriminator}' field must be a string literal."
+ )
+
+ def decorator(
+ func: Callable[..., T_Type],
+ ):
+ self._registry[name] = func
+ return func
+
+ return decorator
+
+ def get_config_types(self) -> tuple[Type[BaseModel], ...]:
+ return tuple(self._config_types.values())
+
+ def get_config_type(self, name: str) -> Type[BaseModel]:
+ try:
+ return self._config_types[name]
+ except KeyError as err:
+ raise ValueError(
+ f"No config type with name '{name}' is registered. "
+ f"Existing config types: {list(self._config_types.keys())}"
+ ) from err
+
+ def build(
+ self,
+ config: BaseModel,
+ *args: Any,
+ **kwargs: Any,
+ ) -> T_Type:
+ """Builds a logic instance from a config object."""
+
+ name = getattr(config, self._discriminator) # noqa: B009
+
+ if name is None:
+ raise ValueError(
+ f"Config does not have a '{self._discriminator}' field"
+ )
+
+ if name not in self._registry:
+ raise NotImplementedError(
+ f"No {self._name} with name '{name}' is registered."
+ )
+
+ return self._registry[name](config, *args, **kwargs)
+
+
+class ImportConfig(BaseConfig):
+ """Base config for dynamic instantiation via Hydra.
+
+ Subclass this to create a registry-specific import escape hatch.
+ The subclass must add a discriminator field whose name matches the
+ registry's own discriminator key, with its value fixed to
+ ``Literal["import"]``.
+
+ Attributes
+ ----------
+ target : str
+ Fully-qualified dotted path to the callable to instantiate,
+ e.g. ``"mypackage.module.MyClass"``.
+ arguments : dict[str, Any]
+ Base keyword arguments forwarded to the callable. When the
+ same key also appears in ``kwargs`` passed to ``build()``,
+ the ``kwargs`` value takes priority.
+ """
+
+ target: str
+ arguments: dict[str, Any] = Field(default_factory=dict)
+
+
+T_Import = TypeVar("T_Import", bound=ImportConfig)
+
+
+def add_import_config(
+ registry: Registry[T_Type, P_Type],
+ arg_names: Sequence[str] | None = None,
+) -> Callable[[Type[T_Import]], Type[T_Import]]:
+ """Decorator that registers an ImportConfig subclass as an escape hatch.
+
+ Wraps the decorated class in a builder that calls
+ ``hydra.utils.instantiate`` using ``config.target`` and
+ ``config.arguments``. The builder is registered on *registry*
+ under the discriminator value ``"import"``.
+
+ Parameters
+ ----------
+ registry : Registry
+ The registry instance on which the config should be registered.
+
+ Returns
+ -------
+ Callable[[type[ImportConfig]], type[ImportConfig]]
+ A class decorator that registers the class and returns it
+ unchanged.
+
+ Examples
+ --------
+ Define a per-registry import escape hatch::
+
+ @add_import_config(my_registry)
+ class MyRegistryImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+ """
+
+ def decorator(config_cls: Type[T_Import]) -> Type[T_Import]:
+ def builder(
+ config: T_Import,
+ *args: P_Type.args,
+ **kwargs: P_Type.kwargs,
+ ) -> T_Type:
+ _arg_names = arg_names or []
+
+ if len(args) != len(_arg_names):
+ raise ValueError(
+ "Positional arguments are not supported "
+ "for import escape hatch unless you specify "
+ "the argument names. Use `arg_names` to specify "
+ "the names of the positional arguments."
+ )
+
+ args_dict = {_arg_names[i]: args[i] for i in range(len(args))}
+
+ hydra_cfg = {
+ "_target_": config.target,
+ **config.arguments,
+ **args_dict,
+ **kwargs,
+ }
+ return instantiate(hydra_cfg)
+
+ registry.register(config_cls)(builder)
+ return config_cls
+
+ return decorator
diff --git a/src/batdetect2/data/__init__.py b/src/batdetect2/data/__init__.py
new file mode 100644
index 00000000..29c72701
--- /dev/null
+++ b/src/batdetect2/data/__init__.py
@@ -0,0 +1,45 @@
+from batdetect2.data.annotations import (
+ AnnotatedDataset,
+ AnnotationFormats,
+ AOEFAnnotations,
+ BatDetect2FilesAnnotations,
+ BatDetect2MergedAnnotations,
+ load_annotated_dataset,
+)
+from batdetect2.data.datasets import (
+ Dataset,
+ DatasetConfig,
+ load_dataset,
+ load_dataset_config,
+ load_dataset_from_config,
+)
+from batdetect2.data.summary import (
+ compute_class_summary,
+ extract_recordings_df,
+ extract_sound_events_df,
+)
+
+__all__ = [
+ "Dataset",
+ "AOEFAnnotations",
+ "AnnotatedDataset",
+ "AnnotationFormats",
+ "BatDetect2FilesAnnotations",
+ "BatDetect2MergedAnnotations",
+ "BatDetect2OutputConfig",
+ "DatasetConfig",
+ "OutputFormatConfig",
+ "ParquetOutputConfig",
+ "RawOutputConfig",
+ "SoundEventOutputConfig",
+ "build_output_formatter",
+ "compute_class_summary",
+ "extract_recordings_df",
+ "extract_sound_events_df",
+ "get_output_formatter",
+ "load_annotated_dataset",
+ "load_dataset",
+ "load_dataset_config",
+ "load_dataset_from_config",
+ "load_predictions",
+]
diff --git a/src/batdetect2/data/annotations/__init__.py b/src/batdetect2/data/annotations/__init__.py
new file mode 100644
index 00000000..5735a534
--- /dev/null
+++ b/src/batdetect2/data/annotations/__init__.py
@@ -0,0 +1,91 @@
+"""Handles loading of annotation data from various formats.
+
+This module serves as the central dispatcher for parsing annotation data
+associated with BatDetect2 datasets. Datasets can be composed of multiple
+sources, each potentially using a different annotation format (e.g., the
+standard AOEF/soundevent format, or legacy BatDetect2 formats).
+
+This module defines the `AnnotationFormats` type, which represents the union
+of possible configuration models for these different formats (each identified
+by a unique `format` field). The primary function, `load_annotated_dataset`,
+inspects the configuration for a single data source and calls the appropriate
+format-specific loading function to retrieve the annotations as a standard
+`soundevent.data.AnnotationSet`.
+"""
+
+from typing import Annotated
+
+from pydantic import Field
+from soundevent import data
+
+from batdetect2.data.annotations.aoef import AOEFAnnotations
+from batdetect2.data.annotations.batdetect2 import (
+ AnnotationFilter,
+ BatDetect2FilesAnnotations,
+ BatDetect2MergedAnnotations,
+)
+from batdetect2.data.annotations.registry import annotation_format_registry
+from batdetect2.data.annotations.types import AnnotatedDataset
+
+__all__ = [
+ "AOEFAnnotations",
+ "AnnotatedDataset",
+ "AnnotationFilter",
+ "AnnotationFormats",
+ "BatDetect2FilesAnnotations",
+ "BatDetect2MergedAnnotations",
+ "load_annotated_dataset",
+]
+
+
+AnnotationFormats = Annotated[
+ BatDetect2MergedAnnotations | BatDetect2FilesAnnotations | AOEFAnnotations,
+ Field(discriminator="format"),
+]
+"""Type Alias representing all supported data source configurations.
+
+Each specific configuration model within this union (e.g., `AOEFAnnotations`,
+`BatDetect2FilesAnnotations`) corresponds to a different annotation format
+or storage structure. These models are typically discriminated by a `format`
+field (e.g., `format="aoef"`, `format="batdetect2_files"`), allowing Pydantic
+and functions like `load_annotated_dataset` to determine which format a given
+source configuration represents.
+"""
+
+
+def load_annotated_dataset(
+ dataset: AnnotatedDataset,
+ base_dir: data.PathLike | None = None,
+) -> data.AnnotationSet:
+ """Load annotations for a single data source based on its configuration.
+
+ This function acts as a dispatcher. It inspects the format of the input
+ `dataset` object and delegates to the appropriate format-specific loader
+ registered in the `annotation_format_registry` (e.g.,
+ `AOEFLoader` for `AOEFAnnotations`).
+
+ Parameters
+ ----------
+ dataset : AnnotatedDataset
+ The configuration object for the data source, specifying its format
+ and necessary details (like paths). Must be an instance of one of the
+ types included in the `AnnotationFormats` union.
+ base_dir : Path, optional
+ An optional base directory path. If provided, relative paths within
+ the `dataset` will be resolved relative to this directory by
+ the underlying loading functions. Defaults to None.
+
+ Returns
+ -------
+ soundevent.data.AnnotationSet
+ An AnnotationSet containing the `ClipAnnotation` objects loaded and
+ parsed from the specified data source.
+
+ Raises
+ ------
+ NotImplementedError
+ If the `format` field of `dataset` does not match any registered
+ annotation format loader.
+ """
+ loader = annotation_format_registry.build(dataset)
+ return loader.load(base_dir=base_dir)
diff --git a/src/batdetect2/data/annotations/aoef.py b/src/batdetect2/data/annotations/aoef.py
new file mode 100644
index 00000000..4e0d5ca9
--- /dev/null
+++ b/src/batdetect2/data/annotations/aoef.py
@@ -0,0 +1,291 @@
+"""Loads annotation data specifically from the AOEF / soundevent format.
+
+This module provides the necessary configuration model and loading function
+to handle data sources where annotations are stored in the standard format
+used by the `soundevent` library (often as `.json` or `.aoef` files),
+which includes outputs from annotation tools like Whombat.
+
+It supports loading both simple `AnnotationSet` files and more complex
+`AnnotationProject` files. For `AnnotationProject` files, it offers optional
+filtering capabilities to select only annotations associated with tasks
+that meet specific status criteria (e.g., completed, verified, without issues).
+"""
+
+from pathlib import Path
+from typing import Literal
+from uuid import uuid5
+
+from pydantic import Field
+from soundevent import data, io
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.data.annotations.registry import annotation_format_registry
+from batdetect2.data.annotations.types import (
+ AnnotatedDataset,
+ AnnotationLoader,
+)
+
+__all__ = [
+ "AOEFAnnotations",
+ "AOEFLoader",
+ "load_aoef_annotated_dataset",
+ "AnnotationTaskFilter",
+]
+
+
+class AnnotationTaskFilter(BaseConfig):
+ """Configuration for filtering Annotation Tasks within an AnnotationProject.
+
+ Specifies criteria based on task status badges to select relevant
+ annotations, typically used when loading data from annotation projects
+ that might contain work-in-progress.
+
+ Attributes
+ ----------
+ only_completed : bool, default=True
+ If True, only include annotations from tasks marked as 'completed'.
+ only_verified : bool, default=False
+ If True, only include annotations from tasks marked as 'verified'.
+ exclude_issues : bool, default=True
+ If True, exclude annotations from tasks marked as 'rejected' (indicating
+ issues).
+ """
+
+ only_completed: bool = True
+ only_verified: bool = False
+ exclude_issues: bool = True
+
+
+class AOEFAnnotations(AnnotatedDataset):
+ """Configuration defining a data source stored in AOEF format.
+
+ This model specifies how to load annotations from an AOEF (JSON file) file
+ compatible with the `soundevent` library. It inherits `name`,
+ `description`, and `audio_dir` from `AnnotatedDataset`.
+
+ Attributes
+ ----------
+ format : Literal["aoef"]
+ The fixed format identifier for this configuration type.
+ annotations_path : Path
+ The file system path to the `.aoef` or `.json` file containing the
+ `AnnotationSet` or `AnnotationProject`.
+ filter : AnnotationTaskFilter, optional
+ Configuration for filtering tasks if the `annotations_path` points to
+ an `AnnotationProject`. If omitted, default filtering
+ (only completed, exclude issues, verification not required) is applied
+ to projects. Set explicitly to `None` in config (e.g., `filter: null`)
+ to disable filtering for projects entirely.
+ """
+
+ format: Literal["aoef"] = "aoef"
+
+ annotations_path: Path
+
+ filter: AnnotationTaskFilter | None = Field(
+ default_factory=AnnotationTaskFilter
+ )
+
+
+class AOEFLoader(AnnotationLoader):
+ def __init__(self, config: AOEFAnnotations):
+ self.config = config
+
+ def load(
+ self,
+ base_dir: data.PathLike | None = None,
+ ) -> data.AnnotationSet:
+ return load_aoef_annotated_dataset(self.config, base_dir=base_dir)
+
+ @annotation_format_registry.register(AOEFAnnotations)
+ @staticmethod
+ def from_config(config: AOEFAnnotations):
+ return AOEFLoader(config)
+
+
+def load_aoef_annotated_dataset(
+ dataset: AOEFAnnotations,
+ base_dir: data.PathLike | None = None,
+) -> data.AnnotationSet:
+ """Load annotations from an AnnotationSet or AnnotationProject file.
+
+ Reads the file specified in the `dataset` configuration using
+ `soundevent.io.load`. If the loaded file contains an `AnnotationProject`
+ and filtering is enabled via `dataset.filter`, it applies the filter
+ criteria based on task status and returns a new `AnnotationSet` containing
+ only the selected annotations. If the file contains an `AnnotationSet`,
+ or if it's a project and filtering is disabled, the all annotations are
+ returned.
+
+ Parameters
+ ----------
+ dataset : AOEFAnnotations
+ The configuration object describing the AOEF data source, including
+ the path to the annotation file and optional filtering settings.
+ base_dir : Path, optional
+ An optional base directory. If provided, `dataset.annotations_path`
+ and `dataset.audio_dir` will be resolved relative to this
+ directory. Defaults to None.
+
+ Returns
+ -------
+ soundevent.data.AnnotationSet
+ An AnnotationSet containing the loaded (and potentially filtered)
+ `ClipAnnotation` objects.
+
+ Raises
+ ------
+ FileNotFoundError
+ If the specified `annotations_path` (after resolving `base_dir`)
+ does not exist.
+ ValueError
+ If the loaded file does not contain a valid `AnnotationSet` or
+ `AnnotationProject`.
+ Exception
+ May re-raise errors from `soundevent.io.load` related to parsing
+ or file format issues.
+
+ Notes
+ -----
+ - The `soundevent` library handles parsing of `.json` or `.aoef` formats.
+ - If an `AnnotationProject` is loaded and `dataset.filter` is *not* None,
+ a *new* `AnnotationSet` instance is created containing only the filtered
+ clip annotations.
+ """
+ audio_dir = dataset.audio_dir
+ path = dataset.annotations_path
+
+ if base_dir:
+ audio_dir = base_dir / audio_dir
+ path = base_dir / path
+
+ loaded = io.load(path, audio_dir=audio_dir)
+
+ if not isinstance(loaded, (data.AnnotationSet, data.AnnotationProject)):
+ raise ValueError(
+ f"The file at {path} loaded successfully but does not "
+ "contain a soundevent AnnotationSet or AnnotationProject "
+ f"(loaded type: {type(loaded).__name__})."
+ )
+
+ if isinstance(loaded, data.AnnotationProject) and dataset.filter:
+ loaded = filter_ready_clips(
+ loaded,
+ only_completed=dataset.filter.only_completed,
+ only_verified=dataset.filter.only_verified,
+ exclude_issues=dataset.filter.exclude_issues,
+ )
+
+ return loaded
+
+
+def select_task(
+ annotation_task: data.AnnotationTask,
+ only_completed: bool = True,
+ only_verified: bool = False,
+ exclude_issues: bool = True,
+) -> bool:
+ """Check if an AnnotationTask meets specified status criteria.
+
+ Evaluates the `status_badges` of the task against the filter flags.
+
+ Parameters
+ ----------
+ annotation_task : data.AnnotationTask
+ The annotation task to check.
+ only_completed : bool, default=True
+ Task must be marked 'completed' to pass.
+ only_verified : bool, default=False
+ Task must be marked 'verified' to pass.
+ exclude_issues : bool, default=True
+ Task must *not* be marked 'rejected' (have issues) to pass.
+
+ Returns
+ -------
+ bool
+ True if the task meets all active filter criteria, False otherwise.
+ """
+ has_issues = False
+ is_completed = False
+ is_verified = False
+
+ for badge in annotation_task.status_badges:
+ if badge.state == data.AnnotationState.completed:
+ is_completed = True
+ continue
+
+ if badge.state == data.AnnotationState.rejected:
+ has_issues = True
+ continue
+
+ if badge.state == data.AnnotationState.verified:
+ is_verified = True
+
+ if exclude_issues and has_issues:
+ return False
+
+ if only_verified and not is_verified:
+ return False
+
+ if only_completed and not is_completed:
+ return False
+
+ return True
+
+
+def filter_ready_clips(
+ annotation_project: data.AnnotationProject,
+ only_completed: bool = True,
+ only_verified: bool = False,
+ exclude_issues: bool = True,
+) -> data.AnnotationSet:
+ """Filter AnnotationProject to create an AnnotationSet of 'ready' clips.
+
+ Iterates through tasks in the project, selects tasks meeting the status
+ criteria using `select_task`, and creates a new `AnnotationSet` containing
+ only the `ClipAnnotation` objects associated with those selected tasks.
+
+ Parameters
+ ----------
+ annotation_project : data.AnnotationProject
+ The input annotation project.
+ only_completed : bool, default=True
+ Filter flag passed to `select_task`.
+ only_verified : bool, default=False
+ Filter flag passed to `select_task`.
+ exclude_issues : bool, default=True
+ Filter flag passed to `select_task`.
+
+ Returns
+ -------
+ data.AnnotationSet
+ A new annotation set containing only the clip annotations linked to
+ tasks that satisfied the filtering criteria. The returned set has a
+ deterministic UUID based on the project UUID and filter settings.
+ """
+ ready_clip_uuids = set()
+
+ for annotation_task in annotation_project.tasks:
+ if not select_task(
+ annotation_task,
+ only_completed=only_completed,
+ only_verified=only_verified,
+ exclude_issues=exclude_issues,
+ ):
+ continue
+
+ ready_clip_uuids.add(annotation_task.clip.uuid)
+
+ return data.AnnotationSet(
+ uuid=uuid5(
+ annotation_project.uuid,
+ f"{only_completed}_{only_verified}_{exclude_issues}",
+ ),
+ name=annotation_project.name,
+ description=annotation_project.description,
+ clip_annotations=[
+ annotation
+ for annotation in annotation_project.clip_annotations
+ if annotation.clip.uuid in ready_clip_uuids
+ ],
+ )
diff --git a/src/batdetect2/data/annotations/batdetect2.py b/src/batdetect2/data/annotations/batdetect2.py
new file mode 100644
index 00000000..892fd4f1
--- /dev/null
+++ b/src/batdetect2/data/annotations/batdetect2.py
@@ -0,0 +1,380 @@
+"""Loads annotation data from legacy BatDetect2 JSON formats.
+
+This module provides backward compatibility for loading annotation data stored
+in two related formats used by older BatDetect2 tools:
+
+1. **`batdetect2` format** (Directory-based): Annotations are stored in
+ individual JSON files (one per audio recording) within a specified
+ directory.
+ Each JSON file contains a `FileAnnotation` structure. Loaded via
+ `load_batdetect2_files_annotated_dataset` defined by
+ `BatDetect2FilesAnnotations`.
+2. **`batdetect2_file` format** (Single-file): Annotations for multiple
+ recordings are merged into a single JSON file, containing a list of
+ `FileAnnotation` objects. Loaded via
+ `load_batdetect2_merged_annotated_dataset` defined by
+ `BatDetect2MergedAnnotations`.
+
+Both formats use the same internal structure for annotations per file and
+support filtering based on `annotated` and `issues` flags within that
+structure.
+
+The loading functions convert data from these legacy formats into the modern
+`soundevent` data model (primarily `ClipAnnotation`) and return the results
+aggregated into a `soundevent.data.AnnotationSet`.
+"""
+
+import json
+import os
+from pathlib import Path
+from typing import Literal
+
+from loguru import logger
+from pydantic import Field, ValidationError
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.data.annotations.legacy import (
+ FileAnnotation,
+ file_annotation_to_clip,
+ file_annotation_to_clip_annotation,
+ list_file_annotations,
+ load_file_annotation,
+)
+from batdetect2.data.annotations.registry import annotation_format_registry
+from batdetect2.data.annotations.types import (
+ AnnotatedDataset,
+ AnnotationLoader,
+)
+
+PathLike = Path | str | os.PathLike
+
+
+__all__ = [
+ "load_batdetect2_files_annotated_dataset",
+ "load_batdetect2_merged_annotated_dataset",
+ "BatDetect2FilesAnnotations",
+ "BatDetect2MergedAnnotations",
+ "AnnotationFilter",
+]
+
+
+class AnnotationFilter(BaseConfig):
+ """Configuration for filtering legacy FileAnnotations based on flags.
+
+ Specifies criteria based on boolean flags (`annotated` and `issues`)
+ present within the legacy `FileAnnotation` JSON structure to select which
+ entries (either files or records within a merged file) should be loaded and
+ converted.
+
+ Attributes
+ ----------
+ only_annotated : bool, default=True
+ If True, only process entries where the `annotated` flag in the JSON
+ is set to `True`.
+ exclude_issues : bool, default=True
+ If True, skip processing entries where the `issues` flag in the JSON
+ is set to `True`.
+ """
+
+ only_annotated: bool = True
+ exclude_issues: bool = True
+
+
+class BatDetect2FilesAnnotations(AnnotatedDataset):
+ """Configuration for the legacy 'batdetect2' format (directory-based).
+
+ Defines a data source where annotations are stored as individual JSON files
+ (one per recording, containing a `FileAnnotation` structure) within the
+ `annotations_dir`. Requires a corresponding `audio_dir`. Assumes a naming
+ convention links audio files to JSON files
+ (e.g., `rec.wav` -> `rec.wav.json`).
+
+ Attributes
+ ----------
+ format : Literal["batdetect2"]
+ The fixed format identifier for this configuration type.
+ annotations_dir : Path
+ Path to the directory containing the individual JSON annotation files.
+ filter : AnnotationFilter, optional
+ Configuration for filtering which files to process based on their
+ `annotated` and `issues` flags. Defaults to requiring `annotated=True`
+ and `issues=False`. Set explicitly to `None` in config (e.g.,
+ `filter: null`) to disable filtering.
+ """
+
+ format: Literal["batdetect2"] = "batdetect2"
+ annotations_dir: Path
+
+ filter: AnnotationFilter | None = Field(
+ default_factory=AnnotationFilter,
+ )
+
+
+class BatDetect2MergedAnnotations(AnnotatedDataset):
+ """Configuration for the legacy 'batdetect2_file' format (merged file).
+
+ Defines a data source where annotations for multiple recordings (each as a
+ `FileAnnotation` structure) are stored within a single JSON file specified
+ by `annotations_path`. Audio files are expected in `audio_dir`.
+
+ Inherits `name`, `description`, and `audio_dir` from `AnnotatedDataset`.
+
+ Attributes
+ ----------
+ format : Literal["batdetect2_file"]
+ The fixed format identifier for this configuration type.
+ annotations_path : Path
+ Path to the single JSON file containing a list of `FileAnnotation`
+ objects.
+ filter : AnnotationFilter, optional
+ Configuration for filtering which `FileAnnotation` entries within the
+ merged file to process based on their `annotated` and `issues` flags.
+ Defaults to requiring `annotated=True` and `issues=False`. Set to `None`
+ in config (e.g., `filter: null`) to disable filtering.
+ """
+
+ format: Literal["batdetect2_file"] = "batdetect2_file"
+ annotations_path: Path
+
+ filter: AnnotationFilter | None = Field(
+ default_factory=AnnotationFilter,
+ )
+
+
+def load_batdetect2_files_annotated_dataset(
+ dataset: BatDetect2FilesAnnotations,
+ base_dir: PathLike | None = None,
+) -> data.AnnotationSet:
+ """Load and convert 'batdetect2_file' annotations into an AnnotationSet.
+
+ Scans the specified `annotations_dir` for individual JSON annotation files.
+ For each file: loads the legacy `FileAnnotation`, applies filtering based
+ on `dataset.filter` (`annotated`/`issues` flags), attempts to find the
+ corresponding audio file, converts valid entries to `ClipAnnotation`, and
+ collects them into a single `soundevent.data.AnnotationSet`.
+
+ Parameters
+ ----------
+ dataset : BatDetect2FilesAnnotations
+ Configuration describing the 'batdetect2' (directory) data source.
+ base_dir : PathLike, optional
+ Optional base directory to resolve relative paths in `dataset.audio_dir`
+ and `dataset.annotations_dir`. Defaults to None.
+
+ Returns
+ -------
+ soundevent.data.AnnotationSet
+ An AnnotationSet containing all successfully loaded, filtered, and
+ converted `ClipAnnotation` objects.
+
+ Raises
+ ------
+ FileNotFoundError
+ If the `annotations_dir` or `audio_dir` does not exist. Errors finding
+ individual JSON or audio files during iteration are logged and skipped.
+ """
+ audio_dir = dataset.audio_dir
+ path = dataset.annotations_dir
+
+ if base_dir:
+ audio_dir = base_dir / audio_dir
+ path = base_dir / path
+
+ paths = list_file_annotations(path)
+ logger.debug(
+ "Found {num_files} files in the annotations directory {path}",
+ num_files=len(paths),
+ path=path,
+ )
+
+ annotations = []
+
+ for p in paths:
+ try:
+ file_annotation = load_file_annotation(p)
+ except (FileNotFoundError, ValidationError):
+ logger.warning("Could not load annotations in file {path}", path=p)
+ continue
+
+ if (
+ dataset.filter
+ and dataset.filter.only_annotated
+ and not file_annotation.annotated
+ ):
+ logger.debug(
+ "Annotation in file {path} omited: not annotated",
+ path=p,
+ )
+ continue
+
+ if (
+ dataset.filter
+ and dataset.filter.exclude_issues
+ and file_annotation.issues
+ ):
+ logger.debug(
+ "Annotation in file {path} omited: has issues",
+ path=p,
+ )
+ continue
+
+ try:
+ clip = file_annotation_to_clip(
+ file_annotation,
+ audio_dir=audio_dir,
+ )
+ except FileNotFoundError as err:
+ logger.warning(
+ "Did not find the audio related to the annotation file {path}. Error: {err}",
+ path=p,
+ err=err,
+ )
+ continue
+
+ annotations.append(
+ file_annotation_to_clip_annotation(
+ file_annotation,
+ clip,
+ )
+ )
+
+ return data.AnnotationSet(
+ name=dataset.name,
+ description=dataset.description,
+ clip_annotations=annotations,
+ )
+
+
+def load_batdetect2_merged_annotated_dataset(
+ dataset: BatDetect2MergedAnnotations,
+ base_dir: PathLike | None = None,
+) -> data.AnnotationSet:
+ """Load and convert 'batdetect2_merged' annotations into an AnnotationSet.
+
+ Loads a single JSON file containing a list of legacy `FileAnnotation`
+ objects. For each entry in the list: applies filtering based on
+ `dataset.filter` (`annotated`/`issues` flags), attempts to find the
+ corresponding audio file, converts valid entries to `ClipAnnotation`, and
+ collects them into a single `soundevent.data.AnnotationSet`.
+
+ Parameters
+ ----------
+ dataset : BatDetect2MergedAnnotations
+ Configuration describing the 'batdetect2_file' (merged) data source.
+ base_dir : PathLike, optional
+ Optional base directory to resolve relative paths in `dataset.audio_dir`
+ and `dataset.annotations_path`. Defaults to None.
+
+ Returns
+ -------
+ soundevent.data.AnnotationSet
+ An AnnotationSet containing all successfully loaded, filtered, and
+ converted `ClipAnnotation` objects from the merged file.
+
+ Raises
+ ------
+ FileNotFoundError
+ If the `annotations_path` or `audio_dir` does not exist. Errors
+ finding individual audio files referenced within the JSON are logged
+ and skipped.
+ json.JSONDecodeError
+ If the annotations file is not valid JSON.
+ TypeError
+ If the root JSON structure is not a list.
+ pydantic.ValidationError
+ If entries within the JSON list do not conform to the legacy
+ `FileAnnotation` structure.
+ """
+ audio_dir = dataset.audio_dir
+ path = dataset.annotations_path
+
+ if base_dir:
+ audio_dir = base_dir / audio_dir
+ path = base_dir / path
+
+ content = json.loads(Path(path).read_text())
+
+ if not isinstance(content, list):
+ raise TypeError(
+ f"Expected a list of FileAnnotations, but got {type(content)}",
+ )
+
+ annotations = []
+
+ for ann in content:
+ try:
+ ann = FileAnnotation.model_validate(ann)
+ except ValueError as err:
+ logger.warning("Invalid annotation file: {err}", err=err)
+ continue
+
+ if (
+ dataset.filter
+ and dataset.filter.only_annotated
+ and not ann.annotated
+ ):
+ logger.debug(
+ "Skipping incomplete annotation {ann_id}",
+ ann_id=ann.id,
+ )
+ continue
+
+ if dataset.filter and dataset.filter.exclude_issues and ann.issues:
+ logger.debug(
+ "Skipping annotation with issues {ann_id}",
+ ann_id=ann.id,
+ )
+ continue
+
+ try:
+ clip = file_annotation_to_clip(ann, audio_dir=audio_dir)
+ except FileNotFoundError as err:
+ logger.warning("Error loading annotations: {err}", err=err)
+ continue
+
+ annotations.append(file_annotation_to_clip_annotation(ann, clip))
+
+ return data.AnnotationSet(
+ name=dataset.name,
+ description=dataset.description,
+ clip_annotations=annotations,
+ )
+
+
+class BatDetect2MergedLoader(AnnotationLoader):
+ def __init__(self, config: BatDetect2MergedAnnotations):
+ self.config = config
+
+ def load(
+ self,
+ base_dir: PathLike | None = None,
+ ) -> data.AnnotationSet:
+ return load_batdetect2_merged_annotated_dataset(
+ self.config,
+ base_dir=base_dir,
+ )
+
+ @annotation_format_registry.register(BatDetect2MergedAnnotations)
+ @staticmethod
+ def from_config(config: BatDetect2MergedAnnotations):
+ return BatDetect2MergedLoader(config)
+
+
+class BatDetect2FilesLoader(AnnotationLoader):
+ def __init__(self, config: BatDetect2FilesAnnotations):
+ self.config = config
+
+ def load(
+ self,
+ base_dir: PathLike | None = None,
+ ) -> data.AnnotationSet:
+ return load_batdetect2_files_annotated_dataset(
+ self.config,
+ base_dir=base_dir,
+ )
+
+ @annotation_format_registry.register(BatDetect2FilesAnnotations)
+ @staticmethod
+ def from_config(config: BatDetect2FilesAnnotations):
+ return BatDetect2FilesLoader(config)
diff --git a/src/batdetect2/data/annotations/legacy.py b/src/batdetect2/data/annotations/legacy.py
new file mode 100644
index 00000000..ad946fbd
--- /dev/null
+++ b/src/batdetect2/data/annotations/legacy.py
@@ -0,0 +1,227 @@
+"""Compatibility functions between old and new data structures."""
+
+import os
+import uuid
+from pathlib import Path
+from typing import Callable, List
+
+from pydantic import BaseModel, Field
+from soundevent import data
+
+PathLike = Path | str | os.PathLike
+
+__all__ = []
+
+SPECIES_TAG_KEY = "species"
+ECHOLOCATION_EVENT = "Echolocation"
+UNKNOWN_CLASS = "__UNKNOWN__"
+
+NAMESPACE = uuid.UUID("97a9776b-c0fd-4c68-accb-0b0ecd719242")
+
+CLIP_NAMESPACE = uuid.uuid5(NAMESPACE, "clip")
+CLIP_ANNOTATION_NAMESPACE = uuid.uuid5(NAMESPACE, "clip_annotation")
+RECORDING_NAMESPACE = uuid.uuid5(NAMESPACE, "recording")
+SOUND_EVENT_NAMESPACE = uuid.uuid5(NAMESPACE, "sound_event")
+SOUND_EVENT_ANNOTATION_NAMESPACE = uuid.uuid5(
+ NAMESPACE, "sound_event_annotation"
+)
+
+
+EventFn = Callable[[data.SoundEventAnnotation], str | None]
+
+ClassFn = Callable[[data.Recording], int]
+
+IndividualFn = Callable[[data.SoundEventAnnotation], int]
+
+
+class Annotation(BaseModel):
+ """Annotation class to hold batdetect annotations."""
+
+ label: str = Field(alias="class")
+ event: str
+ individual: int = 0
+
+ start_time: float
+ end_time: float
+ low_freq: float
+ high_freq: float
+
+
+class FileAnnotation(BaseModel):
+ """FileAnnotation class to hold batdetect annotations for a file."""
+
+ id: str
+ duration: float
+ time_exp: float = 1
+
+ label: str = Field(alias="class_name")
+
+ annotation: List[Annotation]
+
+ annotated: bool = False
+ issues: bool = False
+ notes: str = ""
+
+
+def load_file_annotation(path: PathLike) -> FileAnnotation:
+ """Load annotation from batdetect format."""
+ path = Path(path)
+ return FileAnnotation.model_validate_json(path.read_text())
+
+
+def annotation_to_sound_event(
+ annotation: Annotation,
+ recording: data.Recording,
+ label_key: str = "class",
+ event_key: str = "event",
+ individual_key: str = "individual",
+) -> data.SoundEventAnnotation:
+ """Convert annotation to sound event annotation."""
+ sound_event = data.SoundEvent(
+ uuid=uuid.uuid5(
+ SOUND_EVENT_NAMESPACE,
+ f"{recording.uuid}_{annotation.start_time}_{annotation.end_time}",
+ ),
+ recording=recording,
+ geometry=data.BoundingBox(
+ coordinates=[
+ annotation.start_time,
+ annotation.low_freq,
+ annotation.end_time,
+ annotation.high_freq,
+ ],
+ ),
+ )
+
+ return data.SoundEventAnnotation(
+ uuid=uuid.uuid5(
+ SOUND_EVENT_ANNOTATION_NAMESPACE,
+ f"{sound_event.uuid}",
+ ),
+ sound_event=sound_event,
+ tags=get_sound_event_tags(
+ annotation, label_key, event_key, individual_key
+ ),
+ )
+
+
+def get_sound_event_tags(
+ annotation: Annotation,
+ label_key: str = "class",
+ event_key: str = "event",
+ individual_key: str = "individual",
+) -> List[data.Tag]:
+ """Get the tags for a sound event annotation."""
+ tags = []
+
+ if annotation.label:
+ tags.append(data.Tag(key=label_key, value=annotation.label))
+
+ if annotation.event:
+ tags.append(data.Tag(key=event_key, value=annotation.event))
+
+ if annotation.individual is not None:
+ tags.append(
+ data.Tag(key=individual_key, value=str(annotation.individual))
+ )
+
+ return tags
+
+
+def file_annotation_to_clip(
+ file_annotation: FileAnnotation,
+ audio_dir: PathLike | None = None,
+ label_key: str = "class",
+) -> data.Clip:
+ """Convert file annotation to recording."""
+ audio_dir = audio_dir or Path.cwd()
+
+ full_path = Path(audio_dir) / file_annotation.id
+
+ if not full_path.exists():
+ raise FileNotFoundError(f"File {full_path} not found.")
+
+ tags = []
+ if file_annotation.label:
+ tags.append(data.Tag(key=label_key, value=file_annotation.label))
+
+ recording = data.Recording.from_file(
+ full_path,
+ time_expansion=file_annotation.time_exp,
+ tags=tags,
+ )
+ recording.uuid = uuid.uuid5(RECORDING_NAMESPACE, f"{recording.hash}")
+
+ start_time = 0
+ end_time = recording.duration
+ return data.Clip(
+ uuid=uuid.uuid5(
+ CLIP_NAMESPACE,
+ f"{recording.uuid}_{start_time}_{end_time}",
+ ),
+ recording=recording,
+ start_time=start_time,
+ end_time=end_time,
+ )
+
+
+def file_annotation_to_clip_annotation(
+ file_annotation: FileAnnotation,
+ clip: data.Clip,
+ label_key: str = "class",
+ event_key: str = "event",
+ individual_key: str = "individual",
+) -> data.ClipAnnotation:
+ """Convert file annotation to clip annotation."""
+ notes = []
+ if file_annotation.notes:
+ notes.append(data.Note(message=file_annotation.notes))
+
+ tags = []
+ if file_annotation.label:
+ tags.append(data.Tag(key=label_key, value=file_annotation.label))
+
+ return data.ClipAnnotation(
+ uuid=uuid.uuid5(CLIP_ANNOTATION_NAMESPACE, f"{clip.uuid}"),
+ clip=clip,
+ notes=notes,
+ tags=tags,
+ sound_events=[
+ annotation_to_sound_event(
+ annotation,
+ clip.recording,
+ label_key=label_key,
+ event_key=event_key,
+ individual_key=individual_key,
+ )
+ for annotation in file_annotation.annotation
+ ],
+ )
+
+
+def file_annotation_to_annotation_task(
+ file_annotation: FileAnnotation,
+ clip: data.Clip,
+) -> data.AnnotationTask:
+ status_badges = []
+
+ if file_annotation.issues:
+ status_badges.append(
+ data.StatusBadge(state=data.AnnotationState.rejected)
+ )
+ elif file_annotation.annotated:
+ status_badges.append(
+ data.StatusBadge(state=data.AnnotationState.completed)
+ )
+
+ return data.AnnotationTask(
+ uuid=uuid.uuid5(uuid.NAMESPACE_URL, f"{file_annotation.id}_task"),
+ clip=clip,
+ status_badges=status_badges,
+ )
+
+
+def list_file_annotations(path: PathLike) -> List[Path]:
+ """List all annotations in a directory."""
+ path = Path(path)
+ return [file for file in path.glob("*.json")]
diff --git a/src/batdetect2/data/annotations/registry.py b/src/batdetect2/data/annotations/registry.py
new file mode 100644
index 00000000..5f3fa778
--- /dev/null
+++ b/src/batdetect2/data/annotations/registry.py
@@ -0,0 +1,35 @@
+from typing import Literal
+
+from batdetect2.core import ImportConfig, Registry, add_import_config
+from batdetect2.data.annotations.types import AnnotationLoader
+
+__all__ = [
+ "AnnotationFormatImportConfig",
+ "annotation_format_registry",
+]
+
+annotation_format_registry: Registry[AnnotationLoader, []] = Registry(
+ "annotation_format",
+ discriminator="format",
+)
+
+
+@add_import_config(annotation_format_registry)
+class AnnotationFormatImportConfig(ImportConfig):
+ """Import escape hatch for the annotation format registry.
+
+ Use this config to dynamically instantiate any callable as an
+ annotation loader without registering it in
+ ``annotation_format_registry`` ahead of time.
+
+ Parameters
+ ----------
+ format : Literal["import"]
+ Discriminator value; must always be ``"import"``.
+ target : str
+ Fully-qualified dotted path to the callable to instantiate.
+ arguments : dict[str, Any]
+ Keyword arguments forwarded to the callable.
+ """
+
+ format: Literal["import"] = "import"
diff --git a/src/batdetect2/data/annotations/types.py b/src/batdetect2/data/annotations/types.py
new file mode 100644
index 00000000..d7478d2f
--- /dev/null
+++ b/src/batdetect2/data/annotations/types.py
@@ -0,0 +1,47 @@
+from pathlib import Path
+from typing import Protocol
+
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+
+__all__ = [
+ "AnnotatedDataset",
+ "AnnotationLoader",
+]
+
+
+class AnnotatedDataset(BaseConfig):
+ """Represents a single, cohesive source of audio recordings and annotations.
+
+ A source typically groups recordings originating from a specific context,
+ such as a single project, site, deployment, or recordist. All audio files
+ belonging to a source should be located within a single directory,
+ specified by `audio_dir`.
+
+ Annotations associated with these recordings are defined by the
+ `annotations` field, which supports various formats (e.g., AOEF files,
+ specific CSV structures). Crucially, file paths referenced within the
+ annotation data *must* be relative to the `audio_dir`. This ensures that
+ the dataset definition remains portable across different systems and base
+ directories.
+
+ Attributes:
+ name: A unique identifier for this data source.
+ description: Detailed information about the source, including recording
+ methods, annotation procedures, equipment used, potential biases,
+ or any important caveats for users.
+ audio_dir: The file system path to the directory containing the audio
+ recordings for this source.
+ """
+
+ name: str
+ audio_dir: Path
+ description: str = ""
+
+
+class AnnotationLoader(Protocol):
+ def load(
+ self,
+ base_dir: data.PathLike | None = None,
+ ) -> data.AnnotationSet: ...
diff --git a/src/batdetect2/data/conditions/__init__.py b/src/batdetect2/data/conditions/__init__.py
new file mode 100644
index 00000000..1d451ba3
--- /dev/null
+++ b/src/batdetect2/data/conditions/__init__.py
@@ -0,0 +1,83 @@
+from batdetect2.data.conditions.clips import (
+ ClipAllOfConfig,
+ ClipAnnotationCondition,
+ ClipAnnotationConditionConfig,
+ ClipAnnotationConditionImportConfig,
+ ClipAnyOfConfig,
+ ClipNotConfig,
+ RecordingSatisfiesConfig,
+ build_clip_annotation_condition,
+)
+from batdetect2.data.conditions.common import (
+ CsvList,
+ HasAllTagsConfig,
+ HasAnyTagConfig,
+ HasTagConfig,
+ IdInListConfig,
+ JsonList,
+ ListFormatConfig,
+ TagInfo,
+ TxtList,
+)
+from batdetect2.data.conditions.recordings import (
+ PathInListConfig,
+ RecordingAllOfConfig,
+ RecordingAnyOfConfig,
+ RecordingCondition,
+ RecordingConditionConfig,
+ RecordingConditionImportConfig,
+ RecordingNotConfig,
+ build_recording_condition,
+)
+from batdetect2.data.conditions.sound_events import (
+ AllOfConfig,
+ AnyOfConfig,
+ DurationConfig,
+ FrequencyConfig,
+ NotConfig,
+ Operator,
+ SoundEventCondition,
+ SoundEventConditionConfig,
+ SoundEventConditionImportConfig,
+ build_sound_event_condition,
+ filter_clip_annotation,
+)
+
+__all__ = [
+ "AllOfConfig",
+ "AnyOfConfig",
+ "ClipAllOfConfig",
+ "ClipAnnotationCondition",
+ "ClipAnnotationConditionConfig",
+ "ClipAnnotationConditionImportConfig",
+ "ClipAnyOfConfig",
+ "ClipNotConfig",
+ "CsvList",
+ "DurationConfig",
+ "FrequencyConfig",
+ "HasAllTagsConfig",
+ "HasAnyTagConfig",
+ "HasTagConfig",
+ "IdInListConfig",
+ "JsonList",
+ "ListFormatConfig",
+ "NotConfig",
+ "Operator",
+ "PathInListConfig",
+ "RecordingAllOfConfig",
+ "RecordingAnyOfConfig",
+ "RecordingCondition",
+ "RecordingConditionConfig",
+ "RecordingConditionImportConfig",
+ "RecordingNotConfig",
+ "RecordingSatisfiesConfig",
+ "SoundEventCondition",
+ "SoundEventConditionConfig",
+ "SoundEventConditionImportConfig",
+ "TagInfo",
+ "TxtList",
+ "build_clip_annotation_condition",
+ "build_recording_condition",
+ "build_sound_event_condition",
+ "filter_clip_annotation",
+]
diff --git a/src/batdetect2/data/conditions/clips.py b/src/batdetect2/data/conditions/clips.py
new file mode 100644
index 00000000..93dcbbca
--- /dev/null
+++ b/src/batdetect2/data/conditions/clips.py
@@ -0,0 +1,138 @@
+from collections.abc import Callable, Sequence
+from typing import Annotated, Literal
+
+from pydantic import Field
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.data.conditions.common import (
+ HasAllTagsConfig,
+ HasAnyTagConfig,
+ HasTagConfig,
+ IdInListConfig,
+ MultiConditionConfigBase,
+ NotConditionConfigBase,
+ register_all_of_condition,
+ register_any_of_condition,
+ register_has_all_tags_condition,
+ register_has_any_tag_condition,
+ register_has_tag_condition,
+ register_id_in_list_condition,
+ register_not_condition,
+)
+from batdetect2.data.conditions.recordings import (
+ RecordingCondition,
+ RecordingConditionConfig,
+ build_recording_condition,
+)
+
+__all__ = [
+ "ClipAllOfConfig",
+ "ClipAnnotationCondition",
+ "ClipAnnotationConditionConfig",
+ "ClipAnnotationConditionImportConfig",
+ "ClipAnyOfConfig",
+ "ClipNotConfig",
+ "RecordingSatisfiesConfig",
+ "build_clip_annotation_condition",
+]
+
+ClipAnnotationCondition = Callable[[data.ClipAnnotation], bool]
+
+clip_annotation_conditions: Registry[
+ ClipAnnotationCondition,
+ [data.PathLike | None],
+] = Registry("clip_condition")
+
+
+@add_import_config(clip_annotation_conditions, arg_names=["base_dir"])
+class ClipAnnotationConditionImportConfig(ImportConfig):
+ """Use any callable as a clip annotation condition.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any callable
+ to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class RecordingSatisfiesConfig(BaseConfig):
+ name: Literal["recording_satisfies"] = "recording_satisfies"
+ condition: RecordingConditionConfig
+
+
+class RecordingSatisfies:
+ def __init__(self, condition: RecordingCondition):
+ self.condition = condition
+
+ def __call__(self, clip_annotation: data.ClipAnnotation) -> bool:
+ recording = clip_annotation.clip.recording
+ return self.condition(recording)
+
+ @clip_annotation_conditions.register(RecordingSatisfiesConfig)
+ @staticmethod
+ def from_config(
+ config: RecordingSatisfiesConfig,
+ base_dir: data.PathLike | None = None,
+ ) -> "RecordingSatisfies":
+ condition = build_recording_condition(
+ config.condition,
+ base_dir=base_dir,
+ )
+ return RecordingSatisfies(condition)
+
+
+register_has_tag_condition(clip_annotation_conditions, HasTagConfig)
+register_has_all_tags_condition(
+ clip_annotation_conditions,
+ HasAllTagsConfig,
+)
+register_has_any_tag_condition(
+ clip_annotation_conditions,
+ HasAnyTagConfig,
+)
+register_id_in_list_condition(clip_annotation_conditions, IdInListConfig)
+
+
+@register_all_of_condition(clip_annotation_conditions)
+class ClipAllOfConfig(MultiConditionConfigBase):
+ name: Literal["all_of"] = "all_of"
+ conditions: Sequence["ClipAnnotationConditionConfig"]
+
+
+@register_any_of_condition(clip_annotation_conditions)
+class ClipAnyOfConfig(MultiConditionConfigBase):
+ name: Literal["any_of"] = "any_of"
+ conditions: Sequence["ClipAnnotationConditionConfig"]
+
+
+@register_not_condition(clip_annotation_conditions)
+class ClipNotConfig(NotConditionConfigBase):
+ name: Literal["not"] = "not"
+ condition: "ClipAnnotationConditionConfig"
+
+
+ClipAnnotationConditionConfig = Annotated[
+ RecordingSatisfiesConfig
+ | IdInListConfig
+ | HasTagConfig
+ | HasAllTagsConfig
+ | HasAnyTagConfig
+ | ClipAllOfConfig
+ | ClipAnyOfConfig
+ | ClipNotConfig
+ | ClipAnnotationConditionImportConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_clip_annotation_condition(
+ config: ClipAnnotationConditionConfig,
+ base_dir: data.PathLike | None = None,
+) -> ClipAnnotationCondition:
+ return clip_annotation_conditions.build(config, base_dir)
diff --git a/src/batdetect2/data/conditions/common.py b/src/batdetect2/data/conditions/common.py
new file mode 100644
index 00000000..b2fdf219
--- /dev/null
+++ b/src/batdetect2/data/conditions/common.py
@@ -0,0 +1,437 @@
+import csv
+import json
+from collections.abc import Callable, Sequence
+from pathlib import Path
+from typing import (
+ Annotated,
+ Any,
+ Generic,
+ Literal,
+ ParamSpec,
+ Protocol,
+ TypeVar,
+)
+from uuid import UUID
+
+from pydantic import (
+ BaseModel,
+ Field,
+ PlainSerializer,
+ model_validator,
+)
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import Registry
+
+__all__ = [
+ "AllOf",
+ "AnyOf",
+ "Condition",
+ "CsvList",
+ "HasAllTags",
+ "HasAllTagsConfig",
+ "HasAnyTag",
+ "HasAnyTagConfig",
+ "HasTag",
+ "HasTagConfig",
+ "IdInList",
+ "IdInListConfig",
+ "JsonList",
+ "ListLoader",
+ "ListFormatConfig",
+ "MultiConditionConfigBase",
+ "Not",
+ "NotConditionConfigBase",
+ "ObjectWithTags",
+ "ObjectWithUUID",
+ "TxtList",
+ "build_list_loader",
+ "register_all_of_condition",
+ "register_any_of_condition",
+ "register_has_all_tags_condition",
+ "register_has_any_tag_condition",
+ "register_has_tag_condition",
+ "register_id_in_list_condition",
+ "register_not_condition",
+]
+
+
+class ObjectWithTags(Protocol):
+ tags: list[data.Tag]
+
+
+class ObjectWithUUID(Protocol):
+ uuid: UUID
+
+
+ConditionObject = TypeVar("ConditionObject")
+TaggedObject = TypeVar("TaggedObject", bound="ObjectWithTags")
+UUIDObject = TypeVar("UUIDObject", bound="ObjectWithUUID")
+P = ParamSpec("P")
+NotConfigType = TypeVar("NotConfigType", bound="NotConditionConfigBase")
+MultiConfigType = TypeVar(
+ "MultiConfigType",
+ bound="MultiConditionConfigBase",
+)
+Condition = Callable[[ConditionObject], bool]
+
+
+class NotConditionConfigBase(BaseConfig):
+ condition: BaseModel
+
+
+class MultiConditionConfigBase(BaseConfig):
+ conditions: Sequence[BaseModel]
+
+
+class Not(Generic[ConditionObject]):
+ def __init__(self, condition: Condition[ConditionObject]):
+ self.condition = condition
+
+ def __call__(self, obj: ConditionObject) -> bool:
+ return not self.condition(obj)
+
+
+class AllOf(Generic[ConditionObject]):
+ def __init__(self, conditions: Sequence[Condition[ConditionObject]]):
+ self.conditions = list(conditions)
+
+ def __call__(self, obj: ConditionObject) -> bool:
+ return all(condition(obj) for condition in self.conditions)
+
+
+class AnyOf(Generic[ConditionObject]):
+ def __init__(self, conditions: Sequence[Condition[ConditionObject]]):
+ self.conditions = list(conditions)
+
+ def __call__(self, obj: ConditionObject) -> bool:
+ return any(condition(obj) for condition in self.conditions)
+
+
+class HasTag(Generic[TaggedObject]):
+ def __init__(self, tag: data.Tag):
+ self.tag_key = (tag.term.name, tag.value)
+
+ def __call__(self, obj: TaggedObject) -> bool:
+ return any(
+ (tag.term.name, tag.value) == self.tag_key for tag in obj.tags
+ )
+
+
+class HasAllTags(Generic[TaggedObject]):
+ def __init__(self, tags: list[data.Tag]):
+ if not tags:
+ raise ValueError("Need to specify at least one tag")
+
+ self.required_keys = {(tag.term.name, tag.value) for tag in tags}
+
+ def __call__(self, obj: TaggedObject) -> bool:
+ tag_keys = {(tag.term.name, tag.value) for tag in obj.tags}
+ return self.required_keys.issubset(tag_keys)
+
+
+class HasAnyTag(Generic[TaggedObject]):
+ def __init__(self, tags: list[data.Tag]):
+ if not tags:
+ raise ValueError("Need to specify at least one tag")
+
+ self.required_keys = {(tag.term.name, tag.value) for tag in tags}
+
+ def __call__(self, obj: TaggedObject) -> bool:
+ tag_keys = {(tag.term.name, tag.value) for tag in obj.tags}
+ return bool(self.required_keys.intersection(tag_keys))
+
+
+class IdInList(Generic[UUIDObject]):
+ def __init__(self, ids: set[UUID]):
+ self.ids = ids
+
+ def __call__(self, obj: UUIDObject) -> bool:
+ return obj.uuid in self.ids
+
+
+def dump_tag(tag: data.Tag) -> dict[str, Any]:
+ return {"key": tag.term.name, "value": tag.value}
+
+
+TagInfo = Annotated[data.Tag, PlainSerializer(dump_tag)]
+
+
+class HasTagConfig(BaseConfig):
+ name: Literal["has_tag"] = "has_tag"
+ tag: TagInfo
+
+
+class HasAllTagsConfig(BaseConfig):
+ name: Literal["has_all_tags"] = "has_all_tags"
+ tags: list[TagInfo]
+
+
+class HasAnyTagConfig(BaseConfig):
+ name: Literal["has_any_tag"] = "has_any_tag"
+ tags: list[TagInfo]
+
+
+class JsonList(BaseConfig):
+ name: Literal["json"] = "json"
+ field: str | None = None
+
+
+class TxtList(BaseConfig):
+ name: Literal["txt"] = "txt"
+
+
+class CsvList(BaseConfig):
+ name: Literal["csv"] = "csv"
+ column: str
+
+
+ListFormatConfig = Annotated[
+ JsonList | TxtList | CsvList,
+ Field(discriminator="name"),
+]
+
+
+ListLoader = Callable[[Path], list[str]]
+
+list_loaders: Registry[ListLoader, []] = Registry("list_loader")
+
+
+class IdInListConfig(BaseConfig):
+ name: Literal["id_in_list"] = "id_in_list"
+ path: Path
+ format: ListFormatConfig = JsonList()
+
+ @model_validator(mode="before")
+ @classmethod
+ def _normalize_format(cls, values):
+ if not isinstance(values, dict):
+ return values
+
+ format_config = values.get("format")
+
+ if isinstance(format_config, str):
+ values = values.copy()
+ config_class = list_loaders.get_config_type(format_config)
+ values["format"] = config_class().model_dump()
+
+ return values
+
+
+class JsonListLoader:
+ def __init__(self, field: str | None):
+ self.field = field
+
+ def __call__(self, path: Path) -> list[str]:
+ content = json.loads(path.read_text())
+
+ if self.field is not None:
+ if not isinstance(content, dict):
+ raise TypeError(
+ "Expected JSON object with field for 'id_in_list'."
+ )
+
+ if self.field not in content:
+ raise KeyError(f"Field '{self.field}' not found in '{path}'.")
+
+ content = content[self.field]
+
+ if not isinstance(content, list):
+ raise TypeError("Expected JSON list with IDs for 'id_in_list'.")
+
+ return [str(value) for value in content]
+
+ @list_loaders.register(JsonList)
+ @staticmethod
+ def from_config(config: JsonList) -> ListLoader:
+ return JsonListLoader(field=config.field)
+
+
+class TxtListLoader:
+ def __call__(self, path: Path) -> list[str]:
+ return [
+ line.strip()
+ for line in path.read_text().splitlines()
+ if line.strip()
+ ]
+
+ @list_loaders.register(TxtList)
+ @staticmethod
+ def from_config(config: TxtList) -> ListLoader:
+ return TxtListLoader()
+
+
+class CsvListLoader:
+ def __init__(self, column: str):
+ self.column = column
+
+ def __call__(self, path: Path) -> list[str]:
+ with path.open("r", newline="") as csv_file:
+ reader = csv.DictReader(csv_file)
+
+ if reader.fieldnames is None:
+ raise ValueError(
+ f"Expected CSV header row for 'id_in_list' in '{path}'."
+ )
+
+ if self.column not in reader.fieldnames:
+ raise ValueError(
+ f"Column '{self.column}' not found in '{path}'."
+ )
+
+ values = []
+ for row in reader:
+ value = row.get(self.column)
+
+ if value is None:
+ continue
+
+ value = value.strip()
+
+ if not value:
+ continue
+
+ values.append(value)
+
+ return values
+
+ @list_loaders.register(CsvList)
+ @staticmethod
+ def from_config(config: CsvList) -> ListLoader:
+ return CsvListLoader(column=config.column)
+
+
+def build_list_loader(config: ListFormatConfig) -> ListLoader:
+ return list_loaders.build(config)
+
+
+def register_id_in_list_condition(
+ registry: Registry[Condition[UUIDObject], [data.PathLike | None]],
+ config_cls: type[IdInListConfig],
+) -> None:
+ def builder(
+ config: IdInListConfig,
+ base_dir: data.PathLike | None = None,
+ ) -> Condition[UUIDObject]:
+ path = config.path
+
+ if base_dir is not None and not path.is_absolute():
+ path = Path(base_dir) / path
+
+ ids = set()
+ loader = build_list_loader(config.format)
+ values = loader(path)
+ for index, value in enumerate(values):
+ try:
+ ids.add(UUID(value))
+ except ValueError as err:
+ raise ValueError(
+ f"Invalid ID at index {index} in '{path}': {value!r}."
+ ) from err
+
+ return IdInList(ids)
+
+ registry.register(config_cls)(builder)
+
+
+def register_has_tag_condition(
+ registry: Registry[Condition[TaggedObject], P],
+ config_cls: type[HasTagConfig],
+) -> None:
+ def builder(
+ config: HasTagConfig,
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> Condition[TaggedObject]:
+ return HasTag(config.tag)
+
+ registry.register(config_cls)(builder)
+
+
+def register_has_all_tags_condition(
+ registry: Registry[Condition[TaggedObject], P],
+ config_cls: type[HasAllTagsConfig],
+) -> None:
+ def builder(
+ config: HasAllTagsConfig,
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> Condition[TaggedObject]:
+ return HasAllTags(config.tags)
+
+ registry.register(config_cls)(builder)
+
+
+def register_has_any_tag_condition(
+ registry: Registry[Condition[TaggedObject], P],
+ config_cls: type[HasAnyTagConfig],
+) -> None:
+ def builder(
+ config: HasAnyTagConfig,
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> Condition[TaggedObject]:
+ return HasAnyTag(config.tags)
+
+ registry.register(config_cls)(builder)
+
+
+def register_not_condition(
+ registry: Registry[Condition[ConditionObject], P],
+) -> Callable[[type[NotConfigType]], type[NotConfigType]]:
+ def decorator(config_cls: type[NotConfigType]) -> type[NotConfigType]:
+ @registry.register(config_cls)
+ def builder(
+ config: NotConfigType,
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> Condition[ConditionObject]:
+ condition = registry.build(config.condition, *args, **kwargs)
+ return Not(condition)
+
+ return config_cls
+
+ return decorator
+
+
+def register_all_of_condition(
+ registry: Registry[Condition[ConditionObject], P],
+) -> Callable[[type[MultiConfigType]], type[MultiConfigType]]:
+ def decorator(config_cls: type[MultiConfigType]) -> type[MultiConfigType]:
+ @registry.register(config_cls)
+ def builder(
+ config: MultiConfigType,
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> Condition[ConditionObject]:
+ conditions = [
+ registry.build(condition, *args, **kwargs)
+ for condition in config.conditions
+ ]
+ return AllOf(conditions)
+
+ return config_cls
+
+ return decorator
+
+
+def register_any_of_condition(
+ registry: Registry[Condition[ConditionObject], P],
+) -> Callable[[type[MultiConfigType]], type[MultiConfigType]]:
+ def decorator(config_cls: type[MultiConfigType]) -> type[MultiConfigType]:
+ @registry.register(config_cls)
+ def builder(
+ config: MultiConfigType,
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> Condition[ConditionObject]:
+ conditions = [
+ registry.build(condition, *args, **kwargs)
+ for condition in config.conditions
+ ]
+ return AnyOf(conditions)
+
+ return config_cls
+
+ return decorator
diff --git a/src/batdetect2/data/conditions/recordings.py b/src/batdetect2/data/conditions/recordings.py
new file mode 100644
index 00000000..754cf4fb
--- /dev/null
+++ b/src/batdetect2/data/conditions/recordings.py
@@ -0,0 +1,217 @@
+from collections.abc import Callable, Sequence
+from pathlib import Path
+from typing import Annotated, Literal
+
+from loguru import logger
+from pydantic import Field, model_validator
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.data.conditions.common import (
+ HasAllTagsConfig,
+ HasAnyTagConfig,
+ HasTagConfig,
+ IdInListConfig,
+ JsonList,
+ ListFormatConfig,
+ MultiConditionConfigBase,
+ NotConditionConfigBase,
+ build_list_loader,
+ list_loaders,
+ register_all_of_condition,
+ register_any_of_condition,
+ register_has_all_tags_condition,
+ register_has_any_tag_condition,
+ register_has_tag_condition,
+ register_id_in_list_condition,
+ register_not_condition,
+)
+
+__all__ = [
+ "IdInListConfig",
+ "PathInListConfig",
+ "RecordingAllOfConfig",
+ "RecordingAnyOfConfig",
+ "RecordingCondition",
+ "RecordingConditionConfig",
+ "RecordingConditionImportConfig",
+ "RecordingNotConfig",
+ "build_recording_condition",
+]
+
+RecordingCondition = Callable[[data.Recording], bool]
+
+recording_conditions: Registry[RecordingCondition, [data.PathLike | None]] = (
+ Registry("recording_condition")
+)
+
+
+@add_import_config(recording_conditions, arg_names=["base_dir"])
+class RecordingConditionImportConfig(ImportConfig):
+ """Use any callable as a recording condition.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any callable
+ to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+register_id_in_list_condition(recording_conditions, IdInListConfig)
+register_has_tag_condition(recording_conditions, HasTagConfig)
+register_has_all_tags_condition(recording_conditions, HasAllTagsConfig)
+register_has_any_tag_condition(recording_conditions, HasAnyTagConfig)
+
+
+class PathInListConfig(BaseConfig):
+ name: Literal["path_in_list"] = "path_in_list"
+ path: Path
+ format: ListFormatConfig = JsonList()
+ base_dir: Path | None = None
+ on_outside: Literal["allow", "warn", "error"] = "allow"
+
+ @model_validator(mode="before")
+ @classmethod
+ def _normalize_format(cls, values):
+ if not isinstance(values, dict):
+ return values
+
+ format_config = values.get("format")
+
+ if isinstance(format_config, str):
+ values = values.copy()
+ config_class = list_loaders.get_config_type(format_config)
+ values["format"] = config_class().model_dump()
+
+ return values
+
+
+class PathInList:
+ def __init__(
+ self,
+ paths: set[Path],
+ base_dir: Path | None,
+ on_outside: Literal["allow", "warn", "error"],
+ ):
+ self.paths = paths
+ self.base_dir = base_dir
+ self.on_outside = on_outside
+
+ def __call__(self, recording: data.Recording) -> bool:
+ normalized_path = self._normalize_recording_path(recording.path)
+
+ if normalized_path is None:
+ return True
+
+ return normalized_path in self.paths
+
+ def _normalize_recording_path(self, path: data.PathLike) -> Path | None:
+ recording_path = Path(path)
+
+ if self.base_dir is None:
+ return recording_path
+
+ if not recording_path.is_absolute():
+ return recording_path
+
+ try:
+ return recording_path.relative_to(self.base_dir)
+ except ValueError as err:
+ if self.on_outside == "allow":
+ return None
+
+ if self.on_outside == "warn":
+ logger.warning(
+ "Recording path '{}' is outside '{}' in path_in_list; "
+ "allowing.",
+ recording_path,
+ self.base_dir,
+ )
+ return None
+
+ raise ValueError(
+ f"Recording path '{recording_path}' is outside "
+ f"'{self.base_dir}' for 'path_in_list'."
+ ) from err
+
+ @recording_conditions.register(PathInListConfig)
+ @staticmethod
+ def from_config(
+ config: PathInListConfig,
+ base_dir: data.PathLike | None = None,
+ ) -> "PathInList":
+ list_path = config.path
+
+ if base_dir is not None and not list_path.is_absolute():
+ list_path = Path(base_dir) / list_path
+
+ match_base_dir = config.base_dir
+ if (
+ match_base_dir is not None
+ and base_dir is not None
+ and not match_base_dir.is_absolute()
+ ):
+ match_base_dir = Path(base_dir) / match_base_dir
+
+ loader = build_list_loader(config.format)
+
+ paths = {
+ Path(value).relative_to(match_base_dir)
+ if (
+ match_base_dir is not None
+ and Path(value).is_absolute()
+ and Path(value).is_relative_to(match_base_dir)
+ )
+ else Path(value)
+ for value in loader(list_path)
+ }
+
+ return PathInList(
+ paths=paths,
+ base_dir=match_base_dir,
+ on_outside=config.on_outside,
+ )
+
+
+@register_all_of_condition(recording_conditions)
+class RecordingAllOfConfig(MultiConditionConfigBase):
+ name: Literal["all_of"] = "all_of"
+ conditions: Sequence["RecordingConditionConfig"]
+
+
+@register_any_of_condition(recording_conditions)
+class RecordingAnyOfConfig(MultiConditionConfigBase):
+ name: Literal["any_of"] = "any_of"
+ conditions: Sequence["RecordingConditionConfig"]
+
+
+@register_not_condition(recording_conditions)
+class RecordingNotConfig(NotConditionConfigBase):
+ name: Literal["not"] = "not"
+ condition: "RecordingConditionConfig"
+
+
+RecordingConditionConfig = Annotated[
+ IdInListConfig
+ | PathInListConfig
+ | HasTagConfig
+ | HasAllTagsConfig
+ | HasAnyTagConfig
+ | RecordingAllOfConfig
+ | RecordingAnyOfConfig
+ | RecordingNotConfig
+ | RecordingConditionImportConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_recording_condition(
+ config: RecordingConditionConfig,
+ base_dir: data.PathLike | None = None,
+) -> RecordingCondition:
+ return recording_conditions.build(config, base_dir)
diff --git a/src/batdetect2/data/conditions/sound_events.py b/src/batdetect2/data/conditions/sound_events.py
new file mode 100644
index 00000000..fe477100
--- /dev/null
+++ b/src/batdetect2/data/conditions/sound_events.py
@@ -0,0 +1,236 @@
+import operator
+from collections.abc import Callable, Sequence
+from functools import partial
+from typing import Annotated, Literal
+
+from pydantic import Field
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.data.conditions.common import (
+ HasAllTagsConfig,
+ HasAnyTagConfig,
+ HasTagConfig,
+ IdInListConfig,
+ MultiConditionConfigBase,
+ NotConditionConfigBase,
+ register_all_of_condition,
+ register_any_of_condition,
+ register_has_all_tags_condition,
+ register_has_any_tag_condition,
+ register_has_tag_condition,
+ register_id_in_list_condition,
+ register_not_condition,
+)
+
+__all__ = [
+ "AllOfConfig",
+ "AnyOfConfig",
+ "DurationConfig",
+ "FrequencyConfig",
+ "HasAllTagsConfig",
+ "HasAnyTagConfig",
+ "HasTagConfig",
+ "NotConfig",
+ "Operator",
+ "SoundEventCondition",
+ "SoundEventConditionConfig",
+ "SoundEventConditionImportConfig",
+ "build_sound_event_condition",
+ "filter_clip_annotation",
+]
+
+SoundEventCondition = Callable[[data.SoundEventAnnotation], bool]
+
+sound_event_conditions: Registry[
+ SoundEventCondition,
+ [data.PathLike | None],
+] = Registry("sound_event_condition")
+
+
+@add_import_config(sound_event_conditions, arg_names=["base_dir"])
+class SoundEventConditionImportConfig(ImportConfig):
+ """Use any callable as a sound event condition.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+register_has_tag_condition(sound_event_conditions, HasTagConfig)
+register_has_all_tags_condition(sound_event_conditions, HasAllTagsConfig)
+register_has_any_tag_condition(sound_event_conditions, HasAnyTagConfig)
+register_id_in_list_condition(sound_event_conditions, IdInListConfig)
+
+
+Operator = Literal["gt", "gte", "lt", "lte", "eq"]
+
+
+class DurationConfig(BaseConfig):
+ name: Literal["duration"] = "duration"
+ operator: Operator
+ seconds: float
+
+
+def _build_comparator(op: Operator, value: float) -> Callable[[float], bool]:
+ if op == "gt":
+ return partial(operator.lt, value)
+
+ if op == "gte":
+ return partial(operator.le, value)
+
+ if op == "lt":
+ return partial(operator.gt, value)
+
+ if op == "lte":
+ return partial(operator.ge, value)
+
+ if op == "eq":
+ return partial(operator.eq, value)
+
+ raise ValueError(f"Invalid operator {op}")
+
+
+class Duration:
+ def __init__(self, operator: Operator, seconds: float):
+ self.operator = operator
+ self.seconds = seconds
+ self._comparator = _build_comparator(self.operator, self.seconds)
+
+ def __call__(
+ self,
+ sound_event_annotation: data.SoundEventAnnotation,
+ ) -> bool:
+ geometry = sound_event_annotation.sound_event.geometry
+
+ if geometry is None:
+ return False
+
+ start_time, _, end_time, _ = compute_bounds(geometry)
+ duration = end_time - start_time
+
+ return self._comparator(duration)
+
+ @sound_event_conditions.register(DurationConfig)
+ @staticmethod
+ def from_config(
+ config: DurationConfig,
+ base_dir: data.PathLike | None = None,
+ ):
+ _ = base_dir
+ return Duration(operator=config.operator, seconds=config.seconds)
+
+
+class FrequencyConfig(BaseConfig):
+ name: Literal["frequency"] = "frequency"
+ boundary: Literal["low", "high"]
+ operator: Operator
+ hertz: float
+
+
+class Frequency:
+ def __init__(
+ self,
+ operator: Operator,
+ boundary: Literal["low", "high"],
+ hertz: float,
+ ):
+ self.operator = operator
+ self.hertz = hertz
+ self.boundary = boundary
+ self._comparator = _build_comparator(self.operator, self.hertz)
+
+ def __call__(
+ self,
+ sound_event_annotation: data.SoundEventAnnotation,
+ ) -> bool:
+ geometry = sound_event_annotation.sound_event.geometry
+
+ if geometry is None:
+ return False
+
+ if isinstance(geometry, (data.TimeInterval, data.TimeStamp)):
+ return False
+
+ _, low_freq, _, high_freq = compute_bounds(geometry)
+
+ if self.boundary == "low":
+ return self._comparator(low_freq)
+
+ return self._comparator(high_freq)
+
+ @sound_event_conditions.register(FrequencyConfig)
+ @staticmethod
+ def from_config(
+ config: FrequencyConfig,
+ base_dir: data.PathLike | None = None,
+ ):
+ _ = base_dir
+ return Frequency(
+ operator=config.operator,
+ boundary=config.boundary,
+ hertz=config.hertz,
+ )
+
+
+@register_all_of_condition(sound_event_conditions)
+class AllOfConfig(MultiConditionConfigBase):
+ name: Literal["all_of"] = "all_of"
+ conditions: Sequence["SoundEventConditionConfig"]
+
+
+@register_any_of_condition(sound_event_conditions)
+class AnyOfConfig(MultiConditionConfigBase):
+ name: Literal["any_of"] = "any_of"
+ conditions: list["SoundEventConditionConfig"]
+
+
+@register_not_condition(sound_event_conditions)
+class NotConfig(NotConditionConfigBase):
+ name: Literal["not"] = "not"
+ condition: "SoundEventConditionConfig"
+
+
+SoundEventConditionConfig = Annotated[
+ IdInListConfig
+ | HasTagConfig
+ | HasAllTagsConfig
+ | HasAnyTagConfig
+ | DurationConfig
+ | FrequencyConfig
+ | AllOfConfig
+ | AnyOfConfig
+ | NotConfig
+ | SoundEventConditionImportConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_sound_event_condition(
+ config: SoundEventConditionConfig,
+ base_dir: data.PathLike | None = None,
+) -> SoundEventCondition:
+ return sound_event_conditions.build(config, base_dir)
+
+
+def filter_clip_annotation(
+ clip_annotation: data.ClipAnnotation,
+ condition: SoundEventCondition,
+) -> data.ClipAnnotation:
+ return clip_annotation.model_copy(
+ update=dict(
+ sound_events=[
+ sound_event
+ for sound_event in clip_annotation.sound_events
+ if condition(sound_event)
+ ]
+ )
+ )
diff --git a/src/batdetect2/data/datasets.py b/src/batdetect2/data/datasets.py
new file mode 100644
index 00000000..af13f3c4
--- /dev/null
+++ b/src/batdetect2/data/datasets.py
@@ -0,0 +1,335 @@
+"""Defines the overall dataset structure and provides loading/saving utilities.
+
+This module focuses on defining what constitutes a BatDetect2 dataset,
+potentially composed of multiple distinct data sources with varying annotation
+formats. It provides mechanisms to load the annotation metadata from these
+sources into a unified representation.
+
+The core components are:
+- `DatasetConfig`: A configuration class (typically loaded from YAML) that
+ describes the dataset's name, description, and constituent sources.
+- `Dataset`: A type alias representing the loaded dataset as a list of
+ `soundevent.data.ClipAnnotation` objects. Note that this implies all
+ annotation metadata is loaded into memory.
+- Loading functions (`load_dataset`, `load_dataset_from_config`): To parse
+ a `DatasetConfig` and load the corresponding annotation metadata.
+- Saving function (`save_dataset`): To save a loaded list of annotations
+ into a standard `soundevent` format.
+
+"""
+
+from pathlib import Path
+from typing import Sequence
+
+from loguru import logger
+from pydantic import Field
+from soundevent import data, io
+
+from batdetect2.core.configs import BaseConfig, load_config
+from batdetect2.data.annotations import (
+ AnnotatedDataset,
+ AnnotationFormats,
+ load_annotated_dataset,
+)
+from batdetect2.data.conditions import (
+ ClipAnnotationConditionConfig,
+ SoundEventConditionConfig,
+ build_clip_annotation_condition,
+ build_sound_event_condition,
+ filter_clip_annotation,
+)
+from batdetect2.data.transforms import (
+ ApplyAll,
+ SoundEventTransformConfig,
+ build_sound_event_transform,
+ transform_clip_annotation,
+)
+from batdetect2.targets.terms import data_source
+
+__all__ = [
+ "load_dataset",
+ "load_dataset_from_config",
+ "save_dataset",
+ "Dataset",
+ "DatasetConfig",
+]
+
+
+Dataset = Sequence[data.ClipAnnotation]
+"""Type alias for a loaded dataset representation.
+
+Represents an entire dataset *after loading* as a flat Python list containing
+all `soundevent.data.ClipAnnotation` objects gathered from all configured data
+sources.
+"""
+
+
+class DatasetConfig(BaseConfig):
+ """Configuration model defining the structure of a BatDetect2 dataset."""
+
+ name: str
+ description: str
+ sources: list[AnnotationFormats]
+
+ clip_filter: ClipAnnotationConditionConfig | None = None
+ sound_event_filter: SoundEventConditionConfig | None = None
+ sound_event_transforms: list[SoundEventTransformConfig] = Field(
+ default_factory=list
+ )
+
+
+def load_dataset(
+ config: DatasetConfig,
+ base_dir: data.PathLike | None = None,
+ add_source_tag: bool = True,
+ include_sources: list[str] | None = None,
+ exclude_sources: list[str] | None = None,
+ apply_transforms: bool = True,
+ apply_filters: bool = True,
+) -> Dataset:
+ """Load and merge clip annotations from configured dataset sources.
+
+ Loads each source listed in ``config.sources`` and returns a flat
+ collection of ``soundevent.data.ClipAnnotation`` objects. Source tags,
+ dataset-level filters, and dataset-level transforms can be enabled or
+ disabled with flags.
+
+ Parameters
+ ----------
+ config : DatasetConfig
+ Dataset definition containing source configurations, optional
+ clip-level filter, sound-event filter, and optional sound-event
+ transform pipeline.
+ base_dir : data.PathLike, optional
+ Base directory used to resolve relative paths in source
+ configurations.
+ add_source_tag : bool, default=True
+ If True, append a ``data_source`` tag to each clip annotation with
+ the source name.
+ include_sources : list[str], optional
+ Source names to include. If None, all sources are eligible.
+ exclude_sources : list[str], optional
+ Source names to skip after include filtering. If a source appears in
+ both include and exclude lists, it is skipped.
+ apply_transforms : bool, default=True
+ If True, apply transforms defined in
+ ``config.sound_event_transforms``.
+ apply_filters : bool, default=True
+ If True, apply filters defined in ``config.clip_filter`` and
+ ``config.sound_event_filter``.
+
+ Returns
+ -------
+ Dataset
+ Flat collection of clip annotations loaded from the selected sources.
+ """
+ clip_annotations = []
+
+ clip_condition = (
+ build_clip_annotation_condition(
+ config.clip_filter,
+ base_dir=base_dir,
+ )
+ if config.clip_filter is not None
+ else None
+ )
+
+ sound_event_condition = (
+ build_sound_event_condition(
+ config.sound_event_filter,
+ base_dir=base_dir,
+ )
+ if config.sound_event_filter is not None
+ else None
+ )
+
+ transform = (
+ ApplyAll(
+ [
+ build_sound_event_transform(step)
+ for step in config.sound_event_transforms
+ ]
+ )
+ if config.sound_event_transforms
+ else None
+ )
+
+ for source in config.sources:
+ annotated_source = load_annotated_dataset(source, base_dir=base_dir)
+
+ if include_sources and source.name not in include_sources:
+ continue
+
+ if exclude_sources and source.name in exclude_sources:
+ continue
+
+ logger.debug(
+ "Loaded {num_examples} from dataset source '{source_name}'",
+ num_examples=len(annotated_source.clip_annotations),
+ source_name=source.name,
+ )
+
+ for clip_annotation in annotated_source.clip_annotations:
+ if add_source_tag:
+ clip_annotation = insert_source_tag(clip_annotation, source)
+
+ if (
+ clip_condition is not None
+ and apply_filters
+ and not clip_condition(clip_annotation)
+ ):
+ continue
+
+ if sound_event_condition is not None and apply_filters:
+ clip_annotation = filter_clip_annotation(
+ clip_annotation,
+ sound_event_condition,
+ )
+
+ if transform is not None and apply_transforms:
+ clip_annotation = transform_clip_annotation(
+ clip_annotation,
+ transform,
+ )
+
+ clip_annotations.append(clip_annotation)
+
+ return clip_annotations
+
+
+def insert_source_tag(
+ clip_annotation: data.ClipAnnotation,
+ source: AnnotatedDataset,
+) -> data.ClipAnnotation:
+ """Insert the source tag into a ClipAnnotation.
+
+ This function adds a tag to the `ClipAnnotation` object, indicating the
+ source from which it was loaded. The source information is derived from
+ the `recording` attribute of the `ClipAnnotation`.
+
+ Parameters
+ ----------
+ clip_annotation : data.ClipAnnotation
+ The `ClipAnnotation` object to which the source tag will be added.
+
+ Returns
+ -------
+ data.ClipAnnotation
+ The modified `ClipAnnotation` object with the source tag added.
+ """
+ return clip_annotation.model_copy(
+ update=dict(
+ tags=[
+ *clip_annotation.tags,
+ data.Tag(
+ term=data_source,
+ value=source.name,
+ ),
+ ]
+ ),
+ )
+
+
+def load_dataset_config(path: data.PathLike, field: str | None = None):
+ return load_config(path=path, schema=DatasetConfig, field=field)
+
+
+def load_dataset_from_config(
+ path: data.PathLike,
+ field: str | None = None,
+ base_dir: data.PathLike | None = None,
+ add_source_tag: bool = True,
+ include_sources: list[str] | None = None,
+ exclude_sources: list[str] | None = None,
+ apply_transforms: bool = True,
+ apply_filters: bool = True,
+) -> Dataset:
+ """Load a dataset by reading a ``DatasetConfig`` from disk.
+
+ This convenience wrapper first loads a ``DatasetConfig`` from ``path``
+ and optional ``field``, then delegates to :func:`load_dataset`.
+
+ Parameters
+ ----------
+ path : data.PathLike
+ Path to a configuration file containing a ``DatasetConfig``.
+ field : str, optional
+ Dot-separated field path to a nested config section. If None, the
+ full file is parsed as ``DatasetConfig``.
+ base_dir : data.PathLike, optional
+ Base directory used to resolve relative paths in source
+ configurations.
+ add_source_tag : bool, default=True
+ If True, append a ``data_source`` tag to each clip annotation.
+ include_sources : list[str], optional
+ Source names to include. If None, all sources are eligible.
+ exclude_sources : list[str], optional
+ Source names to skip after include filtering.
+ apply_transforms : bool, default=True
+ If True, apply transforms defined in the loaded config.
+ apply_filters : bool, default=True
+ If True, apply clip and sound-event filters defined in the loaded
+ config.
+
+ Returns
+ -------
+ Dataset
+ Flat collection of clip annotations loaded from the selected sources.
+ """
+ config = load_config(
+ path=path,
+ schema=DatasetConfig,
+ field=field,
+ )
+ return load_dataset(
+ config,
+ base_dir=base_dir,
+ add_source_tag=add_source_tag,
+ include_sources=include_sources,
+ exclude_sources=exclude_sources,
+ apply_transforms=apply_transforms,
+ apply_filters=apply_filters,
+ )
+
+
+def save_dataset(
+ dataset: Dataset,
+ path: data.PathLike,
+ name: str | None = None,
+ description: str | None = None,
+ audio_dir: Path | None = None,
+) -> None:
+ """Save a loaded dataset (list of ClipAnnotations) to a file.
+
+ Wraps the provided list of `ClipAnnotation` objects into a
+ `soundevent.data.AnnotationSet` and saves it using `soundevent.io.save`.
+ This saves the aggregated annotation metadata in the standard soundevent
+ format.
+
+ Note: This function saves the *loaded annotation data*, not the original
+ `DatasetConfig` structure that defined how the data was assembled from
+ various sources.
+
+ Parameters
+ ----------
+ dataset : Dataset (List[data.ClipAnnotation])
+ The list of clip annotations to save (typically the result of
+ `load_dataset` or a split thereof).
+ path : data.PathLike
+ The output file path (e.g., 'train_annotations.json',
+ 'val_annotations.json'). The format is determined by `soundevent.io`.
+ name : str, optional
+ An optional name to assign to the saved `AnnotationSet`.
+ description : str, optional
+ An optional description to assign to the saved `AnnotationSet`.
+ audio_dir : Path, optional
+ Passed to `soundevent.io.save`. May be used to relativize audio file
+ paths within the saved annotations if applicable to the save format.
+ """
+
+ annotation_set = data.AnnotationSet(
+ name=name,
+ description=description,
+ clip_annotations=list(dataset),
+ )
+ io.save(annotation_set, path, audio_dir=audio_dir)
diff --git a/src/batdetect2/data/iterators.py b/src/batdetect2/data/iterators.py
new file mode 100644
index 00000000..92164502
--- /dev/null
+++ b/src/batdetect2/data/iterators.py
@@ -0,0 +1,50 @@
+from collections.abc import Generator
+
+from soundevent import data
+
+from batdetect2.data.datasets import Dataset
+from batdetect2.targets.types import TargetProtocol
+
+
+def iterate_over_sound_events(
+ dataset: Dataset,
+ targets: TargetProtocol,
+) -> Generator[tuple[str | None, data.SoundEventAnnotation], None, None]:
+ """Iterate over sound events in a dataset.
+
+ Parameters
+ ----------
+ dataset : Dataset
+ The dataset containing clip annotations, each of which may contain
+ multiple sound event annotations.
+ targets : TargetProtocol
+ An object implementing the `TargetProtocol`, which provides methods
+ for filtering, transforming, and encoding sound events.
+
+ Yields
+ ------
+ tuple[Optional[str], data.SoundEventAnnotation]
+ A tuple containing:
+ - The encoded class name (str) for the sound event, or None if it
+ cannot be encoded to a specific class.
+ - The sound event annotation itself, after passing all specified
+ filtering and transformation steps.
+
+ Notes
+ -----
+ The processing order for each sound event is:
+ 1. Filtering (if `apply_filter` is True). Events failing the filter are
+ skipped.
+ 2. Transformation (if `apply_transform` is True).
+ 3. Encoding to determine class name and check for genericity (if
+ `exclude_generic` is True). Events with a `None` class name are skipped
+ if `exclude_generic` is True.
+ """
+ for clip_annotation in dataset:
+ for sound_event_annotation in clip_annotation.sound_events:
+ if not targets.filter(sound_event_annotation):
+ continue
+
+ class_name = targets.encode_class(sound_event_annotation)
+
+ yield class_name, sound_event_annotation
diff --git a/src/batdetect2/data/split.py b/src/batdetect2/data/split.py
new file mode 100644
index 00000000..2a0f9518
--- /dev/null
+++ b/src/batdetect2/data/split.py
@@ -0,0 +1,75 @@
+from sklearn.model_selection import train_test_split
+
+from batdetect2.data.datasets import Dataset
+from batdetect2.data.summary import (
+ extract_recordings_df,
+ extract_sound_events_df,
+)
+from batdetect2.targets.types import TargetProtocol
+
+
+def split_dataset_by_recordings(
+ dataset: Dataset,
+ targets: TargetProtocol,
+ train_size: float = 0.75,
+ random_state: int | None = None,
+) -> tuple[Dataset, Dataset]:
+ recordings = extract_recordings_df(dataset)
+
+ sound_events = extract_sound_events_df(
+ dataset,
+ targets,
+ exclude_non_target=True,
+ exclude_generic=True,
+ )
+
+ majority_class = (
+ sound_events.groupby("recording_id") # type: ignore
+ .apply(
+ lambda group: (
+ group["class_name"]
+ .value_counts()
+ .sort_values(ascending=False)
+ .index[0]
+ ),
+ include_groups=False,
+ )
+ .rename("class_name")
+ .to_frame()
+ .reset_index()
+ )
+
+ train, test = train_test_split(
+ majority_class["recording_id"],
+ stratify=majority_class["class_name"],
+ train_size=train_size,
+ random_state=random_state,
+ )
+
+ train_ids_set = set(train.values)
+ test_ids_set = set(test.values)
+
+ extra = set(recordings["recording_id"]) - train_ids_set - test_ids_set
+
+ if extra:
+ train_extra, test_extra = train_test_split(
+ list(extra),
+ train_size=train_size,
+ random_state=random_state,
+ )
+ train_ids_set.update(train_extra)
+ test_ids_set.update(test_extra)
+
+ train_dataset = [
+ clip_annotation
+ for clip_annotation in dataset
+ if str(clip_annotation.clip.recording.uuid) in train_ids_set
+ ]
+
+ test_dataset = [
+ clip_annotation
+ for clip_annotation in dataset
+ if str(clip_annotation.clip.recording.uuid) in test_ids_set
+ ]
+
+ return train_dataset, test_dataset
diff --git a/src/batdetect2/data/summary.py b/src/batdetect2/data/summary.py
new file mode 100644
index 00000000..7d81ef63
--- /dev/null
+++ b/src/batdetect2/data/summary.py
@@ -0,0 +1,196 @@
+import pandas as pd
+from soundevent.geometry import compute_bounds
+
+from batdetect2.data.datasets import Dataset
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "extract_recordings_df",
+ "extract_sound_events_df",
+ "compute_class_summary",
+]
+
+
+def extract_recordings_df(dataset: Dataset) -> pd.DataFrame:
+ """Extract recording metadata into a pandas DataFrame.
+
+ Parameters
+ ----------
+ dataset : List[data.ClipAnnotation]
+ A list of clip annotations from which to extract recording information.
+
+ Returns
+ -------
+ pd.DataFrame
+ A DataFrame where each row corresponds to a recording, containing
+ metadata such as duration, path, sample rate, and other properties.
+ """
+ recordings = []
+
+ for clip_annotation in dataset:
+ clip = clip_annotation.clip
+ recording = clip.recording
+ recordings.append(
+ {
+ "clip_annotation_id": str(clip_annotation.uuid),
+ "recording_id": str(recording.uuid),
+ "duration": clip.duration,
+ "filename": recording.path.name,
+ **recording.model_dump(
+ mode="json",
+ include={
+ "samplerate",
+ "hash",
+ "path",
+ "date",
+ "time",
+ "latitude",
+ "longitude",
+ },
+ ),
+ }
+ )
+
+ return pd.DataFrame(recordings)
+
+
+def extract_sound_events_df(
+ dataset: Dataset,
+ targets: TargetProtocol,
+ exclude_non_target: bool = True,
+ exclude_generic: bool = True,
+) -> pd.DataFrame:
+ """Extract sound event data into a pandas DataFrame.
+
+ This function iterates through all sound events in the provided dataset,
+ applies filtering and classification logic based on the `targets`
+ protocol, and compiles the results into a structured DataFrame.
+
+ Parameters
+ ----------
+ dataset : List[data.ClipAnnotation]
+ The dataset containing clip annotations with sound events.
+ targets : TargetProtocol
+ An object that provides methods to filter (`filter`) and classify
+ (`encode_class`) sound events.
+ exclude_non_target : bool, default=True
+ If True, sound events that do not pass the `targets.filter()` check
+ are excluded from the output.
+ exclude_generic : bool, default=True
+ If True, sound events that are classified with a `None` class name
+ by `targets.encode_class()` are excluded.
+
+ Returns
+ -------
+ pd.DataFrame
+ A DataFrame where each row represents a single sound event, including
+ its bounding box, class name, and other relevant attributes.
+ """
+ sound_events = []
+
+ for clip_annotation in dataset:
+ for sound_event in clip_annotation.sound_events:
+ is_target = targets.filter(sound_event)
+
+ if not is_target and exclude_non_target:
+ continue
+
+ if sound_event.sound_event.geometry is None:
+ continue
+
+ class_name = targets.encode_class(sound_event)
+
+ if class_name is None:
+ if exclude_generic:
+ continue
+ else:
+ class_name = targets.detection_class_name
+
+ start_time, low_freq, end_time, high_freq = compute_bounds(
+ sound_event.sound_event.geometry
+ )
+
+ sound_events.append(
+ {
+ "clip_annotation_id": str(clip_annotation.uuid),
+ "sound_event_id": str(sound_event.uuid),
+ "recording_id": str(
+ sound_event.sound_event.recording.uuid
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "low_freq": low_freq,
+ "high_freq": high_freq,
+ "is_target": is_target,
+ "class_name": class_name,
+ }
+ )
+
+ return pd.DataFrame(sound_events)
+
+
+def compute_class_summary(
+ dataset: Dataset,
+ targets: TargetProtocol,
+) -> pd.DataFrame:
+ """Compute a summary of sound event statistics grouped by class.
+
+ This function generates a high-level summary DataFrame that provides
+ key metrics for each class identified in the dataset. It calculates
+ the total number of calls, the number of unique recordings containing
+ each class, the total duration of those recordings, and the call rate.
+
+ Parameters
+ ----------
+ dataset : List[data.ClipAnnotation]
+ The dataset to be summarized.
+ targets : TargetProtocol
+ An object providing the classification logic for sound events.
+
+ Returns
+ -------
+ pd.DataFrame
+ A DataFrame indexed by class name, with columns for 'num calls',
+ 'num recordings', 'duration', and 'call_rate'.
+ """
+ sound_events = extract_sound_events_df(
+ dataset,
+ targets,
+ exclude_generic=False,
+ exclude_non_target=True,
+ )
+
+ recordings = extract_recordings_df(dataset)
+
+ num_calls = (
+ sound_events.groupby("class_name")
+ .size()
+ .sort_values(ascending=False)
+ .rename("num calls")
+ )
+ num_recs = (
+ sound_events.groupby("class_name")["clip_annotation_id"]
+ .nunique()
+ .sort_values(ascending=False)
+ .rename("num recordings")
+ )
+ durations = (
+ sound_events.groupby("class_name") # ty: ignore[no-matching-overload]
+ .apply(
+ lambda group: recordings[
+ recordings["clip_annotation_id"].isin(
+ group["clip_annotation_id"]
+ )
+ ]["duration"].sum(),
+ include_groups=False,
+ )
+ .sort_values(ascending=False)
+ .rename("duration")
+ )
+ return (
+ num_calls.to_frame()
+ .join(num_recs)
+ .join(durations)
+ .sort_values("num calls", ascending=False)
+ .assign(call_rate=lambda df: df["num calls"] / df["duration"])
+ )
diff --git a/src/batdetect2/data/transforms.py b/src/batdetect2/data/transforms.py
new file mode 100644
index 00000000..63bede73
--- /dev/null
+++ b/src/batdetect2/data/transforms.py
@@ -0,0 +1,260 @@
+from collections.abc import Callable
+from typing import Annotated, Dict, List, Literal
+
+from pydantic import Field
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.data.conditions import (
+ SoundEventCondition,
+ SoundEventConditionConfig,
+ build_sound_event_condition,
+)
+
+SoundEventTransform = Callable[
+ [data.SoundEventAnnotation],
+ data.SoundEventAnnotation,
+]
+
+transforms: Registry[SoundEventTransform, []] = Registry("transform")
+
+
+@add_import_config(transforms)
+class SoundEventTransformImportConfig(ImportConfig):
+ """Use any callable as a sound event transform.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class SetFrequencyBoundConfig(BaseConfig):
+ name: Literal["set_frequency"] = "set_frequency"
+ boundary: Literal["low", "high"] = "low"
+ hertz: float
+
+
+class SetFrequencyBound:
+ def __init__(self, hertz: float, boundary: Literal["low", "high"] = "low"):
+ self.hertz = hertz
+ self.boundary = boundary
+
+ def __call__(
+ self,
+ sound_event_annotation: data.SoundEventAnnotation,
+ ) -> data.SoundEventAnnotation:
+ sound_event = sound_event_annotation.sound_event
+ geometry = sound_event.geometry
+
+ if geometry is None:
+ return sound_event_annotation
+
+ if not isinstance(geometry, data.BoundingBox):
+ return sound_event_annotation
+
+ start_time, low_freq, end_time, high_freq = geometry.coordinates
+
+ if self.boundary == "low":
+ low_freq = self.hertz
+ high_freq = max(high_freq, low_freq)
+
+ elif self.boundary == "high":
+ high_freq = self.hertz
+ low_freq = min(high_freq, low_freq)
+
+ geometry = data.BoundingBox(
+ coordinates=[start_time, low_freq, end_time, high_freq],
+ )
+
+ sound_event = sound_event.model_copy(update=dict(geometry=geometry))
+ return sound_event_annotation.model_copy(
+ update=dict(sound_event=sound_event)
+ )
+
+ @transforms.register(SetFrequencyBoundConfig)
+ @staticmethod
+ def from_config(config: SetFrequencyBoundConfig):
+ return SetFrequencyBound(hertz=config.hertz, boundary=config.boundary)
+
+
+class ApplyIfConfig(BaseConfig):
+ name: Literal["apply_if"] = "apply_if"
+ transform: "SoundEventTransformConfig"
+ condition: SoundEventConditionConfig
+
+
+class ApplyIf:
+ def __init__(
+ self,
+ condition: SoundEventCondition,
+ transform: SoundEventTransform,
+ ):
+ self.condition = condition
+ self.transform = transform
+
+ def __call__(
+ self,
+ sound_event_annotation: data.SoundEventAnnotation,
+ ) -> data.SoundEventAnnotation:
+ if not self.condition(sound_event_annotation):
+ return sound_event_annotation
+
+ return self.transform(sound_event_annotation)
+
+ @transforms.register(ApplyIfConfig)
+ @staticmethod
+ def from_config(config: ApplyIfConfig):
+ transform = build_sound_event_transform(config.transform)
+ condition = build_sound_event_condition(config.condition)
+ return ApplyIf(condition=condition, transform=transform)
+
+
+class ReplaceTagConfig(BaseConfig):
+ name: Literal["replace_tag"] = "replace_tag"
+ original: data.Tag
+ replacement: data.Tag
+
+
+class ReplaceTag:
+ def __init__(
+ self,
+ original: data.Tag,
+ replacement: data.Tag,
+ ):
+ self.original = original
+ self.replacement = replacement
+
+ def __call__(
+ self,
+ sound_event_annotation: data.SoundEventAnnotation,
+ ) -> data.SoundEventAnnotation:
+ tags = []
+
+ for tag in sound_event_annotation.tags:
+ if tag == self.original:
+ tags.append(self.replacement)
+ else:
+ tags.append(tag)
+
+ return sound_event_annotation.model_copy(update=dict(tags=tags))
+
+ @transforms.register(ReplaceTagConfig)
+ @staticmethod
+ def from_config(config: ReplaceTagConfig):
+ return ReplaceTag(
+ original=config.original, replacement=config.replacement
+ )
+
+
+class MapTagValueConfig(BaseConfig):
+ name: Literal["map_tag_value"] = "map_tag_value"
+ tag_key: str
+ value_mapping: Dict[str, str]
+ target_key: str | None = None
+
+
+class MapTagValue:
+ def __init__(
+ self,
+ tag_key: str,
+ value_mapping: Dict[str, str],
+ target_key: str | None = None,
+ ):
+ self.tag_key = tag_key
+ self.value_mapping = value_mapping
+ self.target_key = target_key
+
+ def __call__(
+ self,
+ sound_event_annotation: data.SoundEventAnnotation,
+ ) -> data.SoundEventAnnotation:
+ tags = []
+
+ for tag in sound_event_annotation.tags:
+ if tag.key != self.tag_key:
+ tags.append(tag)
+ continue
+
+ value = self.value_mapping.get(tag.value)
+
+ if value is None:
+ tags.append(tag)
+ continue
+
+ if self.target_key is None:
+ tags.append(tag.model_copy(update=dict(value=value)))
+ else:
+ tags.append(data.Tag(key=self.target_key, value=value))
+
+ return sound_event_annotation.model_copy(update=dict(tags=tags))
+
+ @transforms.register(MapTagValueConfig)
+ @staticmethod
+ def from_config(config: MapTagValueConfig):
+ return MapTagValue(
+ tag_key=config.tag_key,
+ value_mapping=config.value_mapping,
+ target_key=config.target_key,
+ )
+
+
+class ApplyAllConfig(BaseConfig):
+ name: Literal["apply_all"] = "apply_all"
+ steps: List["SoundEventTransformConfig"] = Field(default_factory=list)
+
+
+class ApplyAll:
+ def __init__(self, steps: List[SoundEventTransform]):
+ self.steps = steps
+
+ def __call__(
+ self,
+ sound_event_annotation: data.SoundEventAnnotation,
+ ) -> data.SoundEventAnnotation:
+ for step in self.steps:
+ sound_event_annotation = step(sound_event_annotation)
+
+ return sound_event_annotation
+
+ @transforms.register(ApplyAllConfig)
+ @staticmethod
+ def from_config(config: ApplyAllConfig):
+ steps = [build_sound_event_transform(step) for step in config.steps]
+ return ApplyAll(steps)
+
+
+SoundEventTransformConfig = Annotated[
+ SetFrequencyBoundConfig
+ | ReplaceTagConfig
+ | MapTagValueConfig
+ | ApplyIfConfig
+ | ApplyAllConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_sound_event_transform(
+ config: SoundEventTransformConfig,
+) -> SoundEventTransform:
+ return transforms.build(config)
+
+
+def transform_clip_annotation(
+ clip_annotation: data.ClipAnnotation,
+ transform: SoundEventTransform,
+) -> data.ClipAnnotation:
+ return clip_annotation.model_copy(
+ update=dict(
+ sound_events=[
+ transform(sound_event)
+ for sound_event in clip_annotation.sound_events
+ ]
+ )
+ )
diff --git a/batdetect2/evaluate/__init__.py b/src/batdetect2/detector/__init__.py
similarity index 100%
rename from batdetect2/evaluate/__init__.py
rename to src/batdetect2/detector/__init__.py
diff --git a/batdetect2/detector/compute_features.py b/src/batdetect2/detector/compute_features.py
similarity index 89%
rename from batdetect2/detector/compute_features.py
rename to src/batdetect2/detector/compute_features.py
index b53b0cbf..1ee90972 100644
--- a/batdetect2/detector/compute_features.py
+++ b/src/batdetect2/detector/compute_features.py
@@ -1,5 +1,6 @@
"""Functions to compute features from predictions."""
-from typing import Dict, Optional
+
+from typing import Dict, List
import numpy as np
@@ -7,15 +8,26 @@
from batdetect2.detector.parameters import MAX_FREQ_HZ, MIN_FREQ_HZ
-def convert_int_to_freq(spec_ind, spec_height, min_freq, max_freq):
+def convert_int_to_freq(
+ spec_ind: int,
+ spec_height: int,
+ min_freq: float,
+ max_freq: float,
+) -> int:
"""Convert spectrogram index to frequency in Hz.""" ""
spec_ind = spec_height - spec_ind
- return round(
- (spec_ind / float(spec_height)) * (max_freq - min_freq) + min_freq, 2
+ return int(
+ round(
+ (spec_ind / float(spec_height)) * (max_freq - min_freq) + min_freq,
+ 2,
+ )
)
-def extract_spec_slices(spec, pred_nms):
+def extract_spec_slices(
+ spec: np.ndarray,
+ pred_nms: types.PredictionResults,
+) -> List[np.ndarray]:
"""Extract spectrogram slices from spectrogram.
The slices are extracted based on detected call locations.
@@ -74,7 +86,7 @@ def compute_bandwidth(
def compute_max_power_bb(
prediction: types.Prediction,
- spec: Optional[np.ndarray] = None,
+ spec: np.ndarray | None = None,
min_freq: int = MIN_FREQ_HZ,
max_freq: int = MAX_FREQ_HZ,
**_,
@@ -109,7 +121,7 @@ def compute_max_power_bb(
return int(
convert_int_to_freq(
- y_high + max_power_ind,
+ int(y_high + max_power_ind),
spec.shape[0],
min_freq,
max_freq,
@@ -119,7 +131,7 @@ def compute_max_power_bb(
def compute_max_power(
prediction: types.Prediction,
- spec: Optional[np.ndarray] = None,
+ spec: np.ndarray | None = None,
min_freq: int = MIN_FREQ_HZ,
max_freq: int = MAX_FREQ_HZ,
**_,
@@ -135,19 +147,17 @@ def compute_max_power(
spec_call = spec[:, x_start:x_end]
power_per_freq_band = np.sum(spec_call, axis=1)
max_power_ind = np.argmax(power_per_freq_band)
- return int(
- convert_int_to_freq(
- max_power_ind,
- spec.shape[0],
- min_freq,
- max_freq,
- )
+ return convert_int_to_freq(
+ int(max_power_ind),
+ spec.shape[0],
+ min_freq,
+ max_freq,
)
def compute_max_power_first(
prediction: types.Prediction,
- spec: Optional[np.ndarray] = None,
+ spec: np.ndarray | None = None,
min_freq: int = MIN_FREQ_HZ,
max_freq: int = MAX_FREQ_HZ,
**_,
@@ -164,19 +174,17 @@ def compute_max_power_first(
first_half = spec_call[:, : int(spec_call.shape[1] / 2)]
power_per_freq_band = np.sum(first_half, axis=1)
max_power_ind = np.argmax(power_per_freq_band)
- return int(
- convert_int_to_freq(
- max_power_ind,
- spec.shape[0],
- min_freq,
- max_freq,
- )
+ return convert_int_to_freq(
+ int(max_power_ind),
+ spec.shape[0],
+ min_freq,
+ max_freq,
)
def compute_max_power_second(
prediction: types.Prediction,
- spec: Optional[np.ndarray] = None,
+ spec: np.ndarray | None = None,
min_freq: int = MIN_FREQ_HZ,
max_freq: int = MAX_FREQ_HZ,
**_,
@@ -193,19 +201,17 @@ def compute_max_power_second(
second_half = spec_call[:, int(spec_call.shape[1] / 2) :]
power_per_freq_band = np.sum(second_half, axis=1)
max_power_ind = np.argmax(power_per_freq_band)
- return int(
- convert_int_to_freq(
- max_power_ind,
- spec.shape[0],
- min_freq,
- max_freq,
- )
+ return convert_int_to_freq(
+ int(max_power_ind),
+ spec.shape[0],
+ min_freq,
+ max_freq,
)
def compute_call_interval(
prediction: types.Prediction,
- previous: Optional[types.Prediction] = None,
+ previous: types.Prediction | None = None,
**_,
) -> float:
"""Compute time between this call and the previous call in seconds."""
@@ -236,7 +242,7 @@ def get_feats(
spec: np.ndarray,
pred_nms: types.PredictionResults,
params: types.FeatureExtractionParameters,
-):
+) -> np.ndarray:
"""Extract features from spectrogram based on detected call locations.
The features extracted are:
diff --git a/batdetect2/detector/model_helpers.py b/src/batdetect2/detector/model_helpers.py
similarity index 95%
rename from batdetect2/detector/model_helpers.py
rename to src/batdetect2/detector/model_helpers.py
index 789bdb62..01ef3da0 100644
--- a/batdetect2/detector/model_helpers.py
+++ b/src/batdetect2/detector/model_helpers.py
@@ -53,7 +53,13 @@ def forward(self, x):
class ConvBlockDownCoordF(nn.Module):
def __init__(
- self, in_chn, out_chn, ip_height, k_size=3, pad_size=1, stride=1
+ self,
+ in_chn,
+ out_chn,
+ ip_height,
+ k_size=3,
+ pad_size=1,
+ stride=1,
):
super(ConvBlockDownCoordF, self).__init__()
self.coords = nn.Parameter(
@@ -79,7 +85,13 @@ def forward(self, x):
class ConvBlockDownStandard(nn.Module):
def __init__(
- self, in_chn, out_chn, ip_height=None, k_size=3, pad_size=1, stride=1
+ self,
+ in_chn,
+ out_chn,
+ ip_height=None,
+ k_size=3,
+ pad_size=1,
+ stride=1,
):
super(ConvBlockDownStandard, self).__init__()
self.conv = nn.Conv2d(
diff --git a/batdetect2/detector/models.py b/src/batdetect2/detector/models.py
similarity index 87%
rename from batdetect2/detector/models.py
rename to src/batdetect2/detector/models.py
index 56e63f36..2bffeab3 100644
--- a/batdetect2/detector/models.py
+++ b/src/batdetect2/detector/models.py
@@ -1,5 +1,4 @@
import torch
-import torch.fft
import torch.nn.functional as F
from torch import nn
@@ -95,7 +94,10 @@ def __init__(
num_filts // 4, 2, kernel_size=1, padding=0
)
self.conv_classes_op = nn.Conv2d(
- num_filts // 4, self.num_classes + 1, kernel_size=1, padding=0
+ num_filts // 4,
+ self.num_classes + 1,
+ kernel_size=1,
+ padding=0,
)
if self.emb_dim > 0:
@@ -103,15 +105,15 @@ def __init__(
num_filts, self.emb_dim, kernel_size=1, padding=0
)
- def forward(self, ip, return_feats=False) -> ModelOutput:
+ def forward(self, spec: torch.Tensor) -> ModelOutput:
# encoder
- x1 = self.conv_dn_0(ip)
+ x1 = self.conv_dn_0(spec)
x2 = self.conv_dn_1(x1)
x3 = self.conv_dn_2(x2)
- x3 = F.relu(self.conv_dn_3_bn(self.conv_dn_3(x3)), inplace=True)
+ x3 = F.relu_(self.conv_dn_3_bn(self.conv_dn_3(x3)))
# bottleneck
- x = F.relu(self.conv_1d_bn(self.conv_1d(x3)), inplace=True)
+ x = F.relu_(self.conv_1d_bn(self.conv_1d(x3)))
x = self.att(x)
x = x.repeat([1, 1, self.bneck_height * 4, 1])
@@ -121,13 +123,13 @@ def forward(self, ip, return_feats=False) -> ModelOutput:
x = self.conv_up_4(x + x1)
# output
- x = F.relu(self.conv_op_bn(self.conv_op(x)), inplace=True)
+ x = F.relu_(self.conv_op_bn(self.conv_op(x)))
cls = self.conv_classes_op(x)
comb = torch.softmax(cls, 1)
return ModelOutput(
pred_det=comb[:, :-1, :, :].sum(1).unsqueeze(1),
- pred_size=F.relu(self.conv_size_op(x), inplace=True),
+ pred_size=F.relu(self.conv_size_op(x)),
pred_class=comb,
pred_class_un_norm=cls,
features=x,
@@ -207,7 +209,10 @@ def __init__(
num_filts // 4, 2, kernel_size=1, padding=0
)
self.conv_classes_op = nn.Conv2d(
- num_filts // 4, self.num_classes + 1, kernel_size=1, padding=0
+ num_filts // 4,
+ self.num_classes + 1,
+ kernel_size=1,
+ padding=0,
)
if self.emb_dim > 0:
@@ -215,26 +220,26 @@ def __init__(
num_filts, self.emb_dim, kernel_size=1, padding=0
)
- def forward(self, ip, return_feats=False) -> ModelOutput:
- x1 = self.conv_dn_0(ip)
+ def forward(self, spec: torch.Tensor) -> ModelOutput:
+ x1 = self.conv_dn_0(spec)
x2 = self.conv_dn_1(x1)
x3 = self.conv_dn_2(x2)
- x3 = F.relu(self.conv_dn_3_bn(self.conv_dn_3(x3)), inplace=True)
+ x3 = F.relu_(self.conv_dn_3_bn(self.conv_dn_3(x3)))
- x = F.relu(self.conv_1d_bn(self.conv_1d(x3)), inplace=True)
+ x = F.relu_(self.conv_1d_bn(self.conv_1d(x3)))
x = x.repeat([1, 1, self.bneck_height * 4, 1])
x = self.conv_up_2(x + x3)
x = self.conv_up_3(x + x2)
x = self.conv_up_4(x + x1)
- x = F.relu(self.conv_op_bn(self.conv_op(x)), inplace=True)
+ x = F.relu_(self.conv_op_bn(self.conv_op(x)))
cls = self.conv_classes_op(x)
comb = torch.softmax(cls, 1)
return ModelOutput(
pred_det=comb[:, :-1, :, :].sum(1).unsqueeze(1),
- pred_size=F.relu(self.conv_size_op(x), inplace=True),
+ pred_size=F.relu_(self.conv_size_op(x)),
pred_class=comb,
pred_class_un_norm=cls,
features=x,
@@ -324,13 +329,13 @@ def __init__(
num_filts, self.emb_dim, kernel_size=1, padding=0
)
- def forward(self, ip, return_feats=False) -> ModelOutput:
- x1 = self.conv_dn_0(ip)
+ def forward(self, spec: torch.Tensor) -> ModelOutput:
+ x1 = self.conv_dn_0(spec)
x2 = self.conv_dn_1(x1)
x3 = self.conv_dn_2(x2)
- x3 = F.relu(self.conv_dn_3_bn(self.conv_dn_3(x3)), inplace=True)
+ x3 = F.relu_(self.conv_dn_3_bn(self.conv_dn_3(x3)))
- x = F.relu(self.conv_1d_bn(self.conv_1d(x3)), inplace=True)
+ x = F.relu_(self.conv_1d_bn(self.conv_1d(x3)))
x = self.att(x)
x = x.repeat([1, 1, self.bneck_height * 4, 1])
@@ -338,15 +343,13 @@ def forward(self, ip, return_feats=False) -> ModelOutput:
x = self.conv_up_3(x + x2)
x = self.conv_up_4(x + x1)
- x = F.relu(self.conv_op_bn(self.conv_op(x)), inplace=True)
+ x = F.relu_(self.conv_op_bn(self.conv_op(x)))
cls = self.conv_classes_op(x)
comb = torch.softmax(cls, 1)
- pred_emb = (self.conv_emb(x) if self.emb_dim > 0 else None,)
-
return ModelOutput(
pred_det=comb[:, :-1, :, :].sum(1).unsqueeze(1),
- pred_size=F.relu(self.conv_size_op(x), inplace=True),
+ pred_size=F.relu_(self.conv_size_op(x)),
pred_class=comb,
pred_class_un_norm=cls,
features=x,
diff --git a/src/batdetect2/detector/parameters.py b/src/batdetect2/detector/parameters.py
new file mode 100644
index 00000000..b440f5cd
--- /dev/null
+++ b/src/batdetect2/detector/parameters.py
@@ -0,0 +1,237 @@
+import datetime
+import os
+from pathlib import Path
+from typing import List
+
+from pydantic import BaseModel, Field, computed_field
+
+from batdetect2.train.legacy.train_utils import (
+ get_genus_mapping,
+ get_short_class_names,
+)
+from batdetect2.types import ProcessingConfiguration, SpectrogramParameters
+
+TARGET_SAMPLERATE_HZ = 256000
+FFT_WIN_LENGTH_S = 512 / 256000.0
+FFT_OVERLAP = 0.75
+MAX_FREQ_HZ = 120000
+MIN_FREQ_HZ = 10000
+RESIZE_FACTOR = 0.5
+SPEC_DIVIDE_FACTOR = 32
+SPEC_HEIGHT = 256
+SCALE_RAW_AUDIO = False
+DETECTION_THRESHOLD = 0.01
+NMS_KERNEL_SIZE = 9
+NMS_TOP_K_PER_SEC = 200
+SPEC_SCALE = "pcen"
+DENOISE_SPEC_AVG = True
+MAX_SCALE_SPEC = False
+
+
+DEFAULT_MODEL_PATH = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)),
+ "models",
+ "checkpoints",
+ "Net2DFast_UK_same.pth.tar",
+)
+
+
+DEFAULT_SPECTROGRAM_PARAMETERS: SpectrogramParameters = {
+ "fft_win_length": FFT_WIN_LENGTH_S,
+ "fft_overlap": FFT_OVERLAP,
+ "spec_height": SPEC_HEIGHT,
+ "resize_factor": RESIZE_FACTOR,
+ "spec_divide_factor": SPEC_DIVIDE_FACTOR,
+ "max_freq": MAX_FREQ_HZ,
+ "min_freq": MIN_FREQ_HZ,
+ "spec_scale": SPEC_SCALE,
+ "denoise_spec_avg": DENOISE_SPEC_AVG,
+ "max_scale_spec": MAX_SCALE_SPEC,
+}
+
+
+DEFAULT_PROCESSING_CONFIGURATIONS: ProcessingConfiguration = {
+ "detection_threshold": DETECTION_THRESHOLD,
+ "spec_slices": False,
+ "chunk_size": 3,
+ "spec_features": False,
+ "cnn_features": False,
+ "quiet": True,
+ "target_samp_rate": TARGET_SAMPLERATE_HZ,
+ "fft_win_length": FFT_WIN_LENGTH_S,
+ "fft_overlap": FFT_OVERLAP,
+ "resize_factor": RESIZE_FACTOR,
+ "spec_divide_factor": SPEC_DIVIDE_FACTOR,
+ "spec_height": SPEC_HEIGHT,
+ "scale_raw_audio": SCALE_RAW_AUDIO,
+ "class_names": [],
+ "time_expansion": 1,
+ "top_n": 3,
+ "return_raw_preds": False,
+ "max_duration": None,
+ "nms_kernel_size": NMS_KERNEL_SIZE,
+ "max_freq": MAX_FREQ_HZ,
+ "min_freq": MIN_FREQ_HZ,
+ "nms_top_k_per_sec": NMS_TOP_K_PER_SEC,
+ "spec_scale": SPEC_SCALE,
+ "denoise_spec_avg": DENOISE_SPEC_AVG,
+ "max_scale_spec": MAX_SCALE_SPEC,
+}
+
+
+def mk_dir(path):
+ if not os.path.isdir(path):
+ os.makedirs(path)
+
+
+AUG_SAMPLING_RATES = [
+ 220500,
+ 256000,
+ 300000,
+ 312500,
+ 384000,
+ 441000,
+ 500000,
+]
+CLASSES_TO_IGNORE = ["", " ", "Unknown", "Not Bat"]
+GENERIC_CLASSES = ["Bat"]
+EVENTS_OF_INTEREST = ["Echolocation"]
+
+
+class TrainingParameters(BaseModel):
+ # Net2DFast, Net2DSkip, Net2DSimple, Net2DSkipDS, Net2DRN
+ model_name: str = "Net2DFast"
+ num_filters: int = 128
+
+ experiment: Path
+ model_file_name: Path
+
+ op_im_dir: Path
+ op_im_dir_test: Path
+
+ notes: str = ""
+
+ target_samp_rate: int = TARGET_SAMPLERATE_HZ
+ fft_win_length: float = FFT_WIN_LENGTH_S
+ fft_overlap: float = FFT_OVERLAP
+
+ max_freq: int = MAX_FREQ_HZ
+ min_freq: int = MIN_FREQ_HZ
+
+ resize_factor: float = RESIZE_FACTOR
+ spec_height: int = SPEC_HEIGHT
+ spec_train_width: int = 512
+ spec_divide_factor: int = SPEC_DIVIDE_FACTOR
+
+ denoise_spec_avg: bool = DENOISE_SPEC_AVG
+ scale_raw_audio: bool = SCALE_RAW_AUDIO
+ max_scale_spec: bool = MAX_SCALE_SPEC
+ spec_scale: str = SPEC_SCALE
+
+ detection_overlap: float = 0.01
+ ignore_start_end: float = 0.01
+ detection_threshold: float = DETECTION_THRESHOLD
+ nms_kernel_size: int = NMS_KERNEL_SIZE
+ nms_top_k_per_sec: int = NMS_TOP_K_PER_SEC
+
+ aug_prob: float = 0.20
+ augment_at_train: bool = True
+ augment_at_train_combine: bool = True
+ echo_max_delay: float = 0.005
+ stretch_squeeze_delta: float = 0.04
+ mask_max_time_perc: float = 0.05
+ mask_max_freq_perc: float = 0.10
+ spec_amp_scaling: float = 2.0
+ aug_sampling_rates: List[int] = AUG_SAMPLING_RATES
+
+ train_loss: str = "focal"
+ det_loss_weight: float = 1.0
+ size_loss_weight: float = 0.1
+ class_loss_weight: float = 2.0
+ individual_loss_weight: float = 0.0
+
+ lr: float = 0.001
+ batch_size: int = 8
+ num_workers: int = 4
+ num_epochs: int = 200
+ num_eval_epochs: int = 5
+ device: str = "cuda"
+ save_test_image_during_train: bool = False
+ save_test_image_after_train: bool = True
+
+ convert_to_genus: bool = False
+ class_names: List[str] = Field(default_factory=list)
+ classes_to_ignore: List[str] = Field(
+ default_factory=lambda: CLASSES_TO_IGNORE
+ )
+ generic_class: List[str] = Field(default_factory=lambda: GENERIC_CLASSES)
+ events_of_interest: List[str] = Field(
+ default_factory=lambda: EVENTS_OF_INTEREST
+ )
+ standardize_classs_names: List[str] = Field(default_factory=list)
+
+ @computed_field
+ @property
+ def emb_dim(self) -> int:
+ if self.individual_loss_weight == 0.0:
+ return 0
+ return 3
+
+ @computed_field
+ @property
+ def genus_mapping(self) -> List[int]:
+ _, mapping = get_genus_mapping(self.class_names)
+ return mapping
+
+ @computed_field
+ @property
+ def genus_classes(self) -> List[str]:
+ names, _ = get_genus_mapping(self.class_names)
+ return names
+
+ @computed_field
+ @property
+ def class_names_short(self) -> List[str]:
+ return get_short_class_names(self.class_names)
+
+
+def get_params(
+ make_dirs: bool = False,
+ exps_dir: str = "../../experiments/",
+ model_name: str | None = None,
+ experiment: Path | str | None = None,
+ **kwargs,
+) -> TrainingParameters:
+ experiments_dir = Path(exps_dir)
+
+ now_str = datetime.datetime.now().strftime("%Y_%m_%d__%H_%M_%S")
+
+ if model_name is None:
+ model_name = f"{now_str}.pth.tar"
+
+ if experiment is None:
+ experiment = experiments_dir / now_str
+ experiment = Path(experiment)
+
+ model_file_name = experiment / model_name
+ op_ims_dir = experiment / "op_ims"
+ op_ims_test_dir = experiment / "op_ims_test"
+
+ params = TrainingParameters(
+ model_name=model_name,
+ experiment=experiment,
+ model_file_name=model_file_name,
+ op_im_dir=op_ims_dir,
+ op_im_dir_test=op_ims_test_dir,
+ **kwargs,
+ )
+
+ if make_dirs:
+ mk_dir(experiment)
+ mk_dir(params.model_file_name.parent)
+ if params.save_test_image_during_train:
+ mk_dir(params.op_im_dir)
+ if params.save_test_image_after_train:
+ mk_dir(params.op_im_dir_test)
+
+ return params
diff --git a/batdetect2/detector/post_process.py b/src/batdetect2/detector/post_process.py
similarity index 96%
rename from batdetect2/detector/post_process.py
rename to src/batdetect2/detector/post_process.py
index b47eec69..ff2a6b5b 100644
--- a/batdetect2/detector/post_process.py
+++ b/src/batdetect2/detector/post_process.py
@@ -1,5 +1,4 @@
"""Post-processing of the output of the model."""
-from typing import List, Tuple, Union
import numpy as np
import torch
@@ -44,7 +43,7 @@ def run_nms(
outputs: ModelOutput,
params: NonMaximumSuppressionConfig,
sampling_rate: np.ndarray,
-) -> Tuple[List[PredictionResults], List[np.ndarray]]:
+) -> tuple[list[PredictionResults], list[np.ndarray]]:
"""Run non-maximum suppression on the output of the model.
Model outputs processed are expected to have a batch dimension.
@@ -72,8 +71,8 @@ def run_nms(
scores, y_pos, x_pos = get_topk_scores(pred_det_nms, top_k)
# loop over batch to save outputs
- preds: List[PredictionResults] = []
- feats: List[np.ndarray] = []
+ preds: list[PredictionResults] = []
+ feats: list[np.ndarray] = []
for num_detection in range(pred_det_nms.shape[0]):
# get valid indices
inds_ord = torch.argsort(x_pos[num_detection, :])
@@ -150,7 +149,7 @@ def run_nms(
def non_max_suppression(
heat: torch.Tensor,
- kernel_size: Union[int, Tuple[int, int]],
+ kernel_size: int | tuple[int, int],
):
# kernel can be an int or list/tuple
if isinstance(kernel_size, int):
diff --git a/src/batdetect2/evaluate/__init__.py b/src/batdetect2/evaluate/__init__.py
new file mode 100644
index 00000000..09e7bb7e
--- /dev/null
+++ b/src/batdetect2/evaluate/__init__.py
@@ -0,0 +1,32 @@
+from batdetect2.evaluate.config import EvaluationConfig
+from batdetect2.evaluate.evaluate import DEFAULT_EVAL_DIR, run_evaluate
+from batdetect2.evaluate.evaluator import Evaluator, build_evaluator
+from batdetect2.evaluate.results import save_evaluation_results
+from batdetect2.evaluate.tasks import TaskConfig, build_task
+from batdetect2.evaluate.types import (
+ AffinityFunction,
+ ClipMatches,
+ EvaluationTaskProtocol,
+ EvaluatorProtocol,
+ MetricsProtocol,
+ PlotterProtocol,
+)
+
+__all__ = [
+ "AffinityFunction",
+ "ClipMatches",
+ "DEFAULT_EVAL_DIR",
+ "EvaluationConfig",
+ "EvaluationTaskProtocol",
+ "Evaluator",
+ "EvaluatorProtocol",
+ "MatchEvaluation",
+ "MatcherProtocol",
+ "MetricsProtocol",
+ "PlotterProtocol",
+ "TaskConfig",
+ "build_evaluator",
+ "build_task",
+ "run_evaluate",
+ "save_evaluation_results",
+]
diff --git a/src/batdetect2/evaluate/affinity.py b/src/batdetect2/evaluate/affinity.py
new file mode 100644
index 00000000..89284eb9
--- /dev/null
+++ b/src/batdetect2/evaluate/affinity.py
@@ -0,0 +1,227 @@
+from typing import Annotated, Literal
+
+from pydantic import Field
+from soundevent import data
+from soundevent.geometry import (
+ buffer_geometry,
+ compute_bbox_iou,
+ compute_geometric_iou,
+ compute_temporal_closeness,
+ compute_temporal_iou,
+)
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.evaluate.types import AffinityFunction
+from batdetect2.postprocess.types import Detection
+
+affinity_functions: Registry[AffinityFunction, []] = Registry(
+ "affinity_function"
+)
+
+
+@add_import_config(affinity_functions)
+class AffinityFunctionImportConfig(ImportConfig):
+ """Use any callable as an affinity function.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class TimeAffinityConfig(BaseConfig):
+ name: Literal["time_affinity"] = "time_affinity"
+ position: Literal["start", "end", "center"] | float = "start"
+ max_distance: float = 0.01
+
+
+class TimeAffinity(AffinityFunction):
+ def __init__(
+ self,
+ max_distance: float = 0.01,
+ position: Literal["start", "end", "center"] | float = "start",
+ ):
+ if position == "start":
+ position = 0
+ elif position == "end":
+ position = 1
+ elif position == "center":
+ position = 0.5
+
+ self.position = position
+ self.max_distance = max_distance
+
+ def __call__(
+ self,
+ detection: Detection,
+ ground_truth: data.SoundEventAnnotation,
+ ) -> float:
+ target_geometry = ground_truth.sound_event.geometry
+ source_geometry = detection.geometry
+ return compute_temporal_closeness(
+ target_geometry,
+ source_geometry,
+ ratio=self.position,
+ max_distance=self.max_distance,
+ )
+
+ @affinity_functions.register(TimeAffinityConfig)
+ @staticmethod
+ def from_config(config: TimeAffinityConfig):
+ return TimeAffinity(
+ max_distance=config.max_distance,
+ position=config.position,
+ )
+
+
+class IntervalIOUConfig(BaseConfig):
+ name: Literal["interval_iou"] = "interval_iou"
+ time_buffer: float = 0.0
+
+
+class IntervalIOU(AffinityFunction):
+ def __init__(self, time_buffer: float):
+ if time_buffer < 0:
+ raise ValueError("time_buffer must be non-negative")
+
+ self.time_buffer = time_buffer
+
+ def __call__(
+ self,
+ detection: Detection,
+ ground_truth: data.SoundEventAnnotation,
+ ) -> float:
+ target_geometry = ground_truth.sound_event.geometry
+ source_geometry = detection.geometry
+
+ if self.time_buffer > 0:
+ target_geometry = buffer_geometry(
+ target_geometry,
+ time=self.time_buffer,
+ )
+ source_geometry = buffer_geometry(
+ source_geometry,
+ time=self.time_buffer,
+ )
+
+ return compute_temporal_iou(target_geometry, source_geometry)
+
+ @affinity_functions.register(IntervalIOUConfig)
+ @staticmethod
+ def from_config(config: IntervalIOUConfig):
+ return IntervalIOU(time_buffer=config.time_buffer)
+
+
+class BBoxIOUConfig(BaseConfig):
+ name: Literal["bbox_iou"] = "bbox_iou"
+ time_buffer: float = 0.0
+ freq_buffer: float = 0.0
+
+
+class BBoxIOU(AffinityFunction):
+ def __init__(self, time_buffer: float, freq_buffer: float):
+ if time_buffer < 0:
+ raise ValueError("time_buffer must be non-negative")
+
+ if freq_buffer < 0:
+ raise ValueError("freq_buffer must be non-negative")
+
+ self.time_buffer = time_buffer
+ self.freq_buffer = freq_buffer
+
+ def __call__(
+ self,
+ detection: Detection,
+ ground_truth: data.SoundEventAnnotation,
+ ):
+ target_geometry = ground_truth.sound_event.geometry
+ source_geometry = detection.geometry
+
+ if self.time_buffer > 0 or self.freq_buffer > 0:
+ target_geometry = buffer_geometry(
+ target_geometry,
+ time=self.time_buffer,
+ freq=self.freq_buffer,
+ )
+ source_geometry = buffer_geometry(
+ source_geometry,
+ time=self.time_buffer,
+ freq=self.freq_buffer,
+ )
+
+ return compute_bbox_iou(target_geometry, source_geometry)
+
+ @affinity_functions.register(BBoxIOUConfig)
+ @staticmethod
+ def from_config(config: BBoxIOUConfig):
+ return BBoxIOU(
+ time_buffer=config.time_buffer,
+ freq_buffer=config.freq_buffer,
+ )
+
+
+class GeometricIOUConfig(BaseConfig):
+ name: Literal["geometric_iou"] = "geometric_iou"
+ time_buffer: float = 0.0
+ freq_buffer: float = 0.0
+
+
+class GeometricIOU(AffinityFunction):
+ def __init__(self, time_buffer: float = 0, freq_buffer: float = 0):
+ if time_buffer < 0:
+ raise ValueError("time_buffer must be non-negative")
+
+ if freq_buffer < 0:
+ raise ValueError("freq_buffer must be non-negative")
+
+ self.time_buffer = time_buffer
+ self.freq_buffer = freq_buffer
+
+ def __call__(
+ self,
+ detection: Detection,
+ ground_truth: data.SoundEventAnnotation,
+ ):
+ target_geometry = ground_truth.sound_event.geometry
+ source_geometry = detection.geometry
+
+ if self.time_buffer > 0 or self.freq_buffer > 0:
+ target_geometry = buffer_geometry(
+ target_geometry,
+ time=self.time_buffer,
+ freq=self.freq_buffer,
+ )
+ source_geometry = buffer_geometry(
+ source_geometry,
+ time=self.time_buffer,
+ freq=self.freq_buffer,
+ )
+
+ return compute_geometric_iou(target_geometry, source_geometry)
+
+ @affinity_functions.register(GeometricIOUConfig)
+ @staticmethod
+ def from_config(config: GeometricIOUConfig):
+ return GeometricIOU(time_buffer=config.time_buffer)
+
+
+AffinityConfig = Annotated[
+ TimeAffinityConfig
+ | IntervalIOUConfig
+ | BBoxIOUConfig
+ | GeometricIOUConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_affinity_function(
+ config: AffinityConfig | None = None,
+) -> AffinityFunction:
+ config = config or GeometricIOUConfig()
+ return affinity_functions.build(config)
diff --git a/src/batdetect2/evaluate/config.py b/src/batdetect2/evaluate/config.py
new file mode 100644
index 00000000..ebeb8a79
--- /dev/null
+++ b/src/batdetect2/evaluate/config.py
@@ -0,0 +1,45 @@
+from typing import List
+
+from pydantic import Field
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.evaluate.tasks import TaskConfig
+from batdetect2.evaluate.tasks.classification import ClassificationTaskConfig
+from batdetect2.evaluate.tasks.detection import DetectionTaskConfig
+
+__all__ = [
+ "EvaluationConfig",
+]
+
+
+def _default_tasks() -> list[TaskConfig]:
+ return [
+ DetectionTaskConfig(),
+ ClassificationTaskConfig(),
+ ]
+
+
+class EvaluationConfig(BaseConfig):
+ tasks: List[TaskConfig] = Field(default_factory=_default_tasks)
+
+
+def get_default_eval_config() -> EvaluationConfig:
+ return EvaluationConfig.model_validate(
+ {
+ "tasks": [
+ {
+ "name": "sound_event_detection",
+ "plots": [
+ {"name": "pr_curve"},
+ {"name": "score_distribution"},
+ ],
+ },
+ {
+ "name": "sound_event_classification",
+ "plots": [
+ {"name": "pr_curve"},
+ ],
+ },
+ ]
+ }
+ )
diff --git a/src/batdetect2/evaluate/dataset.py b/src/batdetect2/evaluate/dataset.py
new file mode 100644
index 00000000..5fe43eb5
--- /dev/null
+++ b/src/batdetect2/evaluate/dataset.py
@@ -0,0 +1,139 @@
+from typing import List, NamedTuple, Sequence
+
+import torch
+from loguru import logger
+from pydantic import Field
+from soundevent import data
+from torch.utils.data import DataLoader, Dataset
+
+from batdetect2.audio import ClipConfig, build_audio_loader, build_clipper
+from batdetect2.audio.clips import PaddedClipConfig
+from batdetect2.audio.types import AudioLoader, ClipperProtocol
+from batdetect2.core import BaseConfig
+from batdetect2.core.arrays import adjust_width
+from batdetect2.preprocess import build_preprocessor
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "TestDataset",
+ "build_test_dataset",
+ "build_test_loader",
+]
+
+
+class TestExample(NamedTuple):
+ spec: torch.Tensor
+ idx: torch.Tensor
+ start_time: torch.Tensor
+ end_time: torch.Tensor
+
+
+class TestDataset(Dataset[TestExample]):
+ clip_annotations: List[data.ClipAnnotation]
+
+ def __init__(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader,
+ preprocessor: PreprocessorProtocol,
+ clipper: ClipperProtocol | None = None,
+ audio_dir: data.PathLike | None = None,
+ ):
+ self.clip_annotations = list(clip_annotations)
+ self.clipper = clipper
+ self.preprocessor = preprocessor
+ self.audio_loader = audio_loader
+ self.audio_dir = audio_dir
+
+ def __len__(self):
+ return len(self.clip_annotations)
+
+ def __getitem__(self, index: int) -> TestExample:
+ clip_annotation = self.clip_annotations[index]
+
+ if self.clipper is not None:
+ clip_annotation = self.clipper(clip_annotation)
+
+ clip = clip_annotation.clip
+ wav = self.audio_loader.load_clip(clip, audio_dir=self.audio_dir)
+ wav_tensor = torch.tensor(wav).unsqueeze(0)
+ spectrogram = self.preprocessor(wav_tensor)
+ return TestExample(
+ spec=spectrogram,
+ idx=torch.tensor(index),
+ start_time=torch.tensor(clip.start_time),
+ end_time=torch.tensor(clip.end_time),
+ )
+
+
+class TestLoaderConfig(BaseConfig):
+ clipping_strategy: ClipConfig = Field(
+ default_factory=lambda: PaddedClipConfig()
+ )
+
+
+def build_test_loader(
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ config: TestLoaderConfig | None = None,
+ num_workers: int = 0,
+) -> DataLoader[TestExample]:
+ logger.info("Building test data loader...")
+ config = config or TestLoaderConfig()
+ logger.opt(lazy=True).debug(
+ "Test data loader config: \n{config}",
+ config=lambda: config.to_yaml_string(exclude_none=True),
+ )
+
+ test_dataset = build_test_dataset(
+ clip_annotations,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ config=config,
+ )
+
+ return DataLoader(
+ test_dataset,
+ batch_size=1,
+ shuffle=False,
+ num_workers=num_workers,
+ collate_fn=_collate_fn,
+ )
+
+
+def build_test_dataset(
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ config: TestLoaderConfig | None = None,
+) -> TestDataset:
+ logger.info("Building training dataset...")
+ config = config or TestLoaderConfig()
+
+ clipper = build_clipper(config=config.clipping_strategy)
+
+ if audio_loader is None:
+ audio_loader = build_audio_loader()
+
+ if preprocessor is None:
+ preprocessor = build_preprocessor()
+
+ return TestDataset(
+ clip_annotations,
+ audio_loader=audio_loader,
+ clipper=clipper,
+ preprocessor=preprocessor,
+ )
+
+
+def _collate_fn(batch: List[TestExample]) -> TestExample:
+ max_width = max(item.spec.shape[-1] for item in batch)
+ return TestExample(
+ spec=torch.stack(
+ [adjust_width(item.spec, max_width) for item in batch]
+ ),
+ idx=torch.stack([item.idx for item in batch]),
+ start_time=torch.stack([item.start_time for item in batch]),
+ end_time=torch.stack([item.end_time for item in batch]),
+ )
diff --git a/src/batdetect2/evaluate/evaluate.py b/src/batdetect2/evaluate/evaluate.py
new file mode 100644
index 00000000..ddaa4597
--- /dev/null
+++ b/src/batdetect2/evaluate/evaluate.py
@@ -0,0 +1,87 @@
+from pathlib import Path
+from typing import Sequence
+
+from lightning import Trainer
+from soundevent import data
+
+from batdetect2.audio import AudioConfig, build_audio_loader
+from batdetect2.audio.types import AudioLoader
+from batdetect2.evaluate import EvaluationConfig
+from batdetect2.evaluate.dataset import build_test_loader
+from batdetect2.evaluate.evaluator import build_evaluator
+from batdetect2.evaluate.lightning import EvaluationModule
+from batdetect2.logging import CSVLoggerConfig, LoggerConfig, build_logger
+from batdetect2.models.types import ModelProtocol
+from batdetect2.outputs import OutputsConfig, build_output_transform
+from batdetect2.outputs.types import OutputFormatterProtocol
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+
+DEFAULT_EVAL_DIR: Path = Path("outputs") / "evaluations"
+
+
+def run_evaluate(
+ model: ModelProtocol,
+ test_annotations: Sequence[data.ClipAnnotation],
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ audio_config: AudioConfig | None = None,
+ evaluation_config: EvaluationConfig | None = None,
+ output_config: OutputsConfig | None = None,
+ logger_config: LoggerConfig | None = None,
+ formatter: OutputFormatterProtocol | None = None,
+ num_workers: int = 0,
+ output_dir: data.PathLike = DEFAULT_EVAL_DIR,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+) -> tuple[dict[str, float], list[ClipDetections]]:
+
+ audio_config = audio_config or AudioConfig()
+ evaluation_config = evaluation_config or EvaluationConfig()
+ output_config = output_config or OutputsConfig()
+
+ audio_loader = audio_loader or build_audio_loader(config=audio_config)
+
+ preprocessor = preprocessor or model.preprocessor
+
+ loader = build_test_loader(
+ test_annotations,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ num_workers=num_workers,
+ )
+
+ output_transform = build_output_transform(
+ config=output_config.transform,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+ evaluator = build_evaluator(
+ config=evaluation_config,
+ targets=targets,
+ transform=output_transform,
+ )
+
+ logger = build_logger(
+ logger_config or CSVLoggerConfig(),
+ log_dir=Path(output_dir),
+ experiment_name=experiment_name,
+ run_name=run_name,
+ )
+ module = EvaluationModule(
+ model,
+ evaluator,
+ )
+ trainer = Trainer(logger=logger, enable_checkpointing=False)
+ metrics = trainer.test(module, loader)
+
+ if formatter is not None and logger.log_dir is not None:
+ formatter.save(
+ module.predictions,
+ path=Path(logger.log_dir) / "predictions",
+ )
+
+ return metrics, module.predictions # type: ignore
diff --git a/src/batdetect2/evaluate/evaluator.py b/src/batdetect2/evaluate/evaluator.py
new file mode 100644
index 00000000..6481fa9d
--- /dev/null
+++ b/src/batdetect2/evaluate/evaluator.py
@@ -0,0 +1,92 @@
+from typing import Any, Dict, Iterable, List, Sequence, Tuple
+
+from matplotlib.figure import Figure
+from soundevent import data
+
+from batdetect2.evaluate.config import EvaluationConfig
+from batdetect2.evaluate.tasks import build_task
+from batdetect2.evaluate.types import EvaluationTaskProtocol, EvaluatorProtocol
+from batdetect2.outputs import OutputTransformProtocol, build_output_transform
+from batdetect2.postprocess.types import ClipDetections, ClipDetectionsTensor
+from batdetect2.targets import build_roi_mapping, build_targets
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+
+__all__ = [
+ "Evaluator",
+ "build_evaluator",
+]
+
+
+class Evaluator:
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ transform: OutputTransformProtocol,
+ tasks: Sequence[EvaluationTaskProtocol],
+ ):
+ self.targets = targets
+ self.transform = transform
+ self.tasks = tasks
+
+ def to_clip_detections_batch(
+ self,
+ clip_detections: Sequence[ClipDetectionsTensor],
+ clips: Sequence[data.Clip],
+ ) -> list[ClipDetections]:
+ return [
+ self.transform.to_clip_detections(detections=dets, clip=clip)
+ for dets, clip in zip(clip_detections, clips, strict=False)
+ ]
+
+ def evaluate(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ predictions: Sequence[ClipDetections],
+ ) -> List[Any]:
+ return [
+ task.evaluate(clip_annotations, predictions) for task in self.tasks
+ ]
+
+ def compute_metrics(self, eval_outputs: List[Any]) -> Dict[str, float]:
+ results = {}
+
+ for task, outputs in zip(self.tasks, eval_outputs, strict=False):
+ results.update(task.compute_metrics(outputs))
+
+ return results
+
+ def generate_plots(
+ self,
+ eval_outputs: List[Any],
+ ) -> Iterable[Tuple[str, Figure]]:
+ for task, outputs in zip(self.tasks, eval_outputs, strict=False):
+ for name, fig in task.generate_plots(outputs):
+ yield name, fig
+
+
+def build_evaluator(
+ config: EvaluationConfig | dict | None = None,
+ targets: TargetProtocol | None = None,
+ roi_mapper: ROIMapperProtocol | None = None,
+ transform: OutputTransformProtocol | None = None,
+) -> EvaluatorProtocol:
+ targets = targets or build_targets()
+
+ roi_mapper = roi_mapper or build_roi_mapping()
+
+ if config is None:
+ config = EvaluationConfig()
+
+ if not isinstance(config, EvaluationConfig):
+ config = EvaluationConfig.model_validate(config)
+
+ transform = transform or build_output_transform(
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+
+ return Evaluator(
+ targets=targets,
+ transform=transform,
+ tasks=[build_task(task, targets=targets) for task in config.tasks],
+ )
diff --git a/batdetect2/train/evaluate.py b/src/batdetect2/evaluate/legacy/evaluate.py
similarity index 96%
rename from batdetect2/train/evaluate.py
rename to src/batdetect2/evaluate/legacy/evaluate.py
index a926fbb5..1ec2c9f7 100755
--- a/batdetect2/train/evaluate.py
+++ b/src/batdetect2/evaluate/legacy/evaluate.py
@@ -1,20 +1,14 @@
import numpy as np
-from sklearn.metrics import (
- accuracy_score,
- auc,
- balanced_accuracy_score,
- roc_curve,
-)
+from sklearn.metrics import auc, roc_curve
def compute_error_auc(op_str, gt, pred, prob):
-
# classification error
- pred_int = (pred > prob).astype(np.int)
+ pred_int = (pred > prob).astype(np.int32)
class_acc = (pred_int == gt).mean() * 100.0
# ROC - area under curve
- fpr, tpr, thresholds = roc_curve(gt, pred)
+ fpr, tpr, _ = roc_curve(gt, pred)
roc_auc = auc(fpr, tpr)
print(
@@ -25,7 +19,6 @@ def compute_error_auc(op_str, gt, pred, prob):
def calc_average_precision(recall, precision):
-
precision[np.isnan(precision)] = 0
recall[np.isnan(recall)] = 0
@@ -91,7 +84,6 @@ def compute_pre_rec(
pred_class = []
file_ids = []
for pid, pp in enumerate(preds):
-
# filter predicted calls that are too near the start or end of the file
file_dur = gts[pid]["duration"]
valid_inds = (pp["start_times"] >= ignore_start_end) & (
@@ -129,7 +121,7 @@ def compute_pre_rec(
file_ids.append([pid] * valid_inds.sum())
confidence = np.hstack(confidence)
- file_ids = np.hstack(file_ids).astype(np.int)
+ file_ids = np.hstack(file_ids).astype(int)
pred_boxes = np.vstack(pred_boxes)
if len(pred_class) > 0:
pred_class = np.hstack(pred_class)
@@ -141,7 +133,6 @@ def compute_pre_rec(
gt_generic_class = []
num_positives = 0
for gg in gts:
-
# filter ground truth calls that are too near the start or end of the file
file_dur = gg["duration"]
valid_inds = (gg["start_times"] >= ignore_start_end) & (
@@ -150,8 +141,7 @@ def compute_pre_rec(
# note, files with the incorrect duration will cause a problem
if (gg["start_times"] > file_dur).sum() > 0:
- print("Error: file duration incorrect for", gg["id"])
- assert False
+ raise ValueError(f"Error: file duration incorrect for {gg['id']}")
boxes = np.vstack(
(
@@ -197,6 +187,8 @@ def compute_pre_rec(
gt_id = file_ids[ind]
valid_det = False
+ det_ind = 0
+
if gt_boxes[gt_id].shape[0] > 0:
# compute overlap
valid_det, det_ind = compute_affinity_1d(
@@ -205,7 +197,6 @@ def compute_pre_rec(
# valid detection that has not already been assigned
if valid_det and (gt_assigned[gt_id][det_ind] == 0):
-
count_as_true_pos = True
if eval_mode == "top_class" and (
gt_class[gt_id][det_ind] != pred_class[ind]
@@ -227,7 +218,7 @@ def compute_pre_rec(
# store threshold values - used for plotting
conf_sorted = np.sort(confidence)[::-1][valid_inds]
thresholds = np.linspace(0.1, 0.9, 9)
- thresholds_inds = np.zeros(len(thresholds), dtype=np.int)
+ thresholds_inds = np.zeros(len(thresholds), dtype=int)
for ii, tt in enumerate(thresholds):
thresholds_inds[ii] = np.argmin(conf_sorted > tt)
thresholds_inds[thresholds_inds == 0] = -1
@@ -339,7 +330,7 @@ def compute_file_accuracy(gts, preds, num_classes):
).mean(0)
best_thresh = np.argmax(acc_per_thresh)
best_acc = acc_per_thresh[best_thresh]
- pred_valid = pred_valid_all[:, best_thresh].astype(np.int).tolist()
+ pred_valid = pred_valid_all[:, best_thresh].astype(int).tolist()
res = {}
res["num_valid_files"] = len(gt_valid)
diff --git a/batdetect2/evaluate/evaluate_models.py b/src/batdetect2/evaluate/legacy/evaluate_models.py
similarity index 98%
rename from batdetect2/evaluate/evaluate_models.py
rename to src/batdetect2/evaluate/legacy/evaluate_models.py
index 3303c92b..d67cfad8 100644
--- a/batdetect2/evaluate/evaluate_models.py
+++ b/src/batdetect2/evaluate/legacy/evaluate_models.py
@@ -7,20 +7,19 @@
import json
import os
-import torch
import numpy as np
import pandas as pd
+import torch
from sklearn.ensemble import RandomForestClassifier
-from batdetect2.detector import parameters
-import batdetect2.train.evaluate as evl
-import batdetect2.train.train_utils as tu
+import batdetect2.evaluate.legacy.evaluate_models as evl
+import batdetect2.train.legacy.train_utils as tu
import batdetect2.utils.detector_utils as du
import batdetect2.utils.plot_utils as pu
+from batdetect2.detector import parameters
def get_blank_annotation(ip_str):
-
res = {}
res["class_name"] = ""
res["duration"] = -1
@@ -77,7 +76,6 @@ def create_genus_mapping(gt_test, preds, class_names):
def load_tadarida_pred(ip_dir, dataset, file_of_interest):
-
res, ann = get_blank_annotation("Generated by Tadarida")
# create the annotations in the correct format
@@ -120,7 +118,6 @@ def load_sonobat_meta(
class_names,
only_accepted_species=True,
):
-
sp_dict = {}
for ss in class_names:
sp_key = ss.split(" ")[0][:3] + ss.split(" ")[1][:3]
@@ -182,7 +179,6 @@ def load_sonobat_meta(
def load_sonobat_preds(dataset, id, sb_meta, set_class_name=None):
-
# create the annotations in the correct format
res, ann = get_blank_annotation("Generated by Sonobat")
res_c = copy.deepcopy(res)
@@ -221,7 +217,6 @@ def load_sonobat_preds(dataset, id, sb_meta, set_class_name=None):
def bb_overlap(bb_g_in, bb_p_in):
-
freq_scale = 10000000.0 # ensure that both axis are roughly the same range
bb_g = [
bb_g_in["start_time"],
@@ -330,7 +325,8 @@ def load_gt_data(datasets, events_of_interest, class_names, classes_to_ignore):
for dd in datasets:
print("\n" + dd["dataset_name"])
gt_dataset = tu.load_set_of_anns(
- [dd], events_of_interest=events_of_interest, verbose=True
+ [dd],
+ events_of_interest=events_of_interest,
)
gt_dataset = [
parse_data(gg, class_names, classes_to_ignore, False)
@@ -361,7 +357,7 @@ def train_rf_model(x_train, y_train, num_classes, seed=2001):
clf = RandomForestClassifier(random_state=seed, n_jobs=-1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_train)
- tr_acc = (y_pred == y_train).mean()
+ (y_pred == y_train).mean()
# print('Train acc', round(tr_acc*100, 2))
return clf, un_train_class
@@ -454,7 +450,7 @@ def add_root_path_back(data_sets, ann_path, wav_path):
def check_classes_in_train(gt_list, class_names):
- num_gt_total = np.sum([gg["start_times"].shape[0] for gg in gt_list])
+ np.sum([gg["start_times"].shape[0] for gg in gt_list])
num_with_no_class = 0
for gt in gt_list:
for cc in gt["class_names"]:
@@ -464,7 +460,6 @@ def check_classes_in_train(gt_list, class_names):
if __name__ == "__main__":
-
parser = argparse.ArgumentParser()
parser.add_argument(
"op_dir",
@@ -553,7 +548,9 @@ def check_classes_in_train(gt_list, class_names):
test_dict["dataset_name"] = args["test_file"].replace(".json", "")
test_dict["is_test"] = True
test_dict["is_binary"] = True
- test_dict["ann_path"] = os.path.join(args["ann_dir"], args["test_file"])
+ test_dict["ann_path"] = os.path.join(
+ args["ann_dir"], args["test_file"]
+ )
test_dict["wav_path"] = args["data_dir"]
test_sets = [test_dict]
@@ -572,7 +569,7 @@ def check_classes_in_train(gt_list, class_names):
num_with_no_class = check_classes_in_train(gt_test, class_names)
if total_num_calls == num_with_no_class:
print("Classes from the test set are not in the train set.")
- assert False
+ raise AssertionError()
# only need the train data if evaluating Sonobat or Tadarida
if args["sb_ip_dir"] != "" or args["td_ip_dir"] != "":
@@ -746,7 +743,7 @@ def check_classes_in_train(gt_list, class_names):
# check if the class names are the same
if params_bd["class_names"] != class_names:
print("Warning: Class names are not the same as the trained model")
- assert False
+ raise AssertionError()
run_config = {
**bd_args,
@@ -756,7 +753,7 @@ def check_classes_in_train(gt_list, class_names):
preds_bd = []
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- for ii, gg in enumerate(gt_test):
+ for gg in gt_test:
pred = du.process_file(
gg["file_path"],
model,
diff --git a/batdetect2/evaluate/readme.md b/src/batdetect2/evaluate/legacy/readme.md
similarity index 100%
rename from batdetect2/evaluate/readme.md
rename to src/batdetect2/evaluate/legacy/readme.md
diff --git a/src/batdetect2/evaluate/lightning.py b/src/batdetect2/evaluate/lightning.py
new file mode 100644
index 00000000..881b6920
--- /dev/null
+++ b/src/batdetect2/evaluate/lightning.py
@@ -0,0 +1,75 @@
+from typing import Any, List
+
+from lightning import LightningModule
+from soundevent import data
+from torch.utils.data import DataLoader
+
+from batdetect2.evaluate.dataset import TestDataset, TestExample
+from batdetect2.evaluate.types import EvaluatorProtocol
+from batdetect2.logging import get_image_logger
+from batdetect2.models.types import ModelProtocol
+from batdetect2.postprocess.types import ClipDetections
+
+
+class EvaluationModule(LightningModule):
+ def __init__(
+ self,
+ model: ModelProtocol,
+ evaluator: EvaluatorProtocol,
+ ):
+ super().__init__()
+
+ self.model = model
+ self.evaluator = evaluator
+
+ self.clip_annotations: List[data.ClipAnnotation] = []
+ self.predictions: List[ClipDetections] = []
+
+ def test_step(self, batch: TestExample, batch_idx: int):
+ dataset = self.get_dataset()
+ clip_annotations = [
+ dataset.clip_annotations[int(example_idx)]
+ for example_idx in batch.idx
+ ]
+
+ outputs = self.model.detector(batch.spec)
+ clip_detections = self.model.postprocessor(outputs)
+ predictions = self.evaluator.to_clip_detections_batch(
+ clip_detections,
+ [clip_annotation.clip for clip_annotation in clip_annotations],
+ )
+
+ self.clip_annotations.extend(clip_annotations)
+ self.predictions.extend(predictions)
+
+ def on_test_epoch_start(self):
+ self.clip_annotations = []
+ self.predictions = []
+
+ def on_test_epoch_end(self):
+ clip_evals = self.evaluator.evaluate(
+ self.clip_annotations,
+ self.predictions,
+ )
+ self.log_metrics(clip_evals)
+ self.generate_plots(clip_evals)
+
+ def generate_plots(self, evaluated_clips: Any):
+ plotter = get_image_logger(self.logger) # type: ignore
+
+ if plotter is None:
+ return
+
+ for figure_name, fig in self.evaluator.generate_plots(evaluated_clips):
+ plotter(figure_name, fig, self.global_step)
+
+ def log_metrics(self, evaluated_clips: Any):
+ metrics = self.evaluator.compute_metrics(evaluated_clips)
+ self.log_dict(metrics)
+
+ def get_dataset(self) -> TestDataset:
+ dataloaders = self.trainer.test_dataloaders
+ assert isinstance(dataloaders, DataLoader)
+ dataset = dataloaders.dataset
+ assert isinstance(dataset, TestDataset)
+ return dataset
diff --git a/batdetect2/finetune/__init__.py b/src/batdetect2/evaluate/metrics/__init__.py
similarity index 100%
rename from batdetect2/finetune/__init__.py
rename to src/batdetect2/evaluate/metrics/__init__.py
diff --git a/src/batdetect2/evaluate/metrics/classification.py b/src/batdetect2/evaluate/metrics/classification.py
new file mode 100644
index 00000000..8be33a51
--- /dev/null
+++ b/src/batdetect2/evaluate/metrics/classification.py
@@ -0,0 +1,310 @@
+from collections import defaultdict
+from dataclasses import dataclass
+from typing import (
+ Annotated,
+ Callable,
+ Dict,
+ List,
+ Literal,
+ Mapping,
+ Sequence,
+ Tuple,
+)
+
+import numpy as np
+from pydantic import Field
+from sklearn import metrics
+from soundevent import data
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.evaluate.metrics.common import (
+ average_precision,
+ compute_precision_recall,
+)
+from batdetect2.postprocess.types import Detection
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "ClassificationMetric",
+ "ClassificationMetricConfig",
+ "ClassificationMetricImportConfig",
+ "build_classification_metric",
+ "compute_precision_recall_curves",
+]
+
+
+@dataclass
+class MatchEval:
+ clip: data.Clip
+ gt: data.SoundEventAnnotation | None
+ pred: Detection | None
+
+ is_prediction: bool
+ is_ground_truth: bool
+ is_generic: bool
+ true_class: str | None
+ score: float
+
+
+@dataclass
+class ClipEval:
+ clip: data.Clip
+ matches: Mapping[str, List[MatchEval]]
+
+
+ClassificationMetric = Callable[[Sequence[ClipEval]], Dict[str, float]]
+
+
+classification_metrics: Registry[ClassificationMetric, [TargetProtocol]] = (
+ Registry("classification_metric")
+)
+
+
+@add_import_config(classification_metrics)
+class ClassificationMetricImportConfig(ImportConfig):
+ """Use any callable as a classification metric.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class BaseClassificationConfig(BaseConfig):
+ include: List[str] | None = None
+ exclude: List[str] | None = None
+
+
+class BaseClassificationMetric:
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ include: List[str] | None = None,
+ exclude: List[str] | None = None,
+ ):
+ self.targets = targets
+ self.include = include
+ self.exclude = exclude
+
+ def include_class(self, class_name: str) -> bool:
+ if self.include is not None:
+ return class_name in self.include
+
+ if self.exclude is not None:
+ return class_name not in self.exclude
+
+ return True
+
+
+class ClassificationAveragePrecisionConfig(BaseClassificationConfig):
+ name: Literal["average_precision"] = "average_precision"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+ label: str = "average_precision"
+
+
+class ClassificationAveragePrecision(BaseClassificationMetric):
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ label: str = "average_precision",
+ include: List[str] | None = None,
+ exclude: List[str] | None = None,
+ ):
+ super().__init__(include=include, exclude=exclude, targets=targets)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+ self.label = label
+
+ def __call__(
+ self, clip_evaluations: Sequence[ClipEval]
+ ) -> Dict[str, float]:
+ y_true, y_score, num_positives = _extract_per_class_metric_data(
+ clip_evaluations,
+ ignore_non_predictions=self.ignore_non_predictions,
+ ignore_generic=self.ignore_generic,
+ )
+
+ class_scores = {
+ class_name: average_precision(
+ y_true[class_name],
+ y_score[class_name],
+ num_positives=num_positives[class_name],
+ )
+ for class_name in self.targets.class_names
+ }
+
+ mean_score = float(
+ np.mean([v for v in class_scores.values() if not np.isnan(v)])
+ )
+
+ return {
+ f"mean_{self.label}": mean_score,
+ **{
+ f"{self.label}/{class_name}": score
+ for class_name, score in class_scores.items()
+ if self.include_class(class_name)
+ },
+ }
+
+ @classification_metrics.register(ClassificationAveragePrecisionConfig)
+ @staticmethod
+ def from_config(
+ config: ClassificationAveragePrecisionConfig,
+ targets: TargetProtocol,
+ ):
+ return ClassificationAveragePrecision(
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ label=config.label,
+ include=config.include,
+ exclude=config.exclude,
+ )
+
+
+class ClassificationROCAUCConfig(BaseClassificationConfig):
+ name: Literal["roc_auc"] = "roc_auc"
+ label: str = "roc_auc"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+
+
+class ClassificationROCAUC(BaseClassificationMetric):
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ label: str = "roc_auc",
+ include: List[str] | None = None,
+ exclude: List[str] | None = None,
+ ):
+ self.targets = targets
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+ self.label = label
+ self.include = include
+ self.exclude = exclude
+
+ def __call__(
+ self, clip_evaluations: Sequence[ClipEval]
+ ) -> Dict[str, float]:
+ y_true, y_score, _ = _extract_per_class_metric_data(
+ clip_evaluations,
+ ignore_non_predictions=self.ignore_non_predictions,
+ ignore_generic=self.ignore_generic,
+ )
+
+ class_scores = {}
+
+ for class_name in self.targets.class_names:
+ if len(y_true[class_name]) == 0:
+ class_scores[class_name] = np.nan
+ continue
+
+ class_scores[class_name] = float(
+ metrics.roc_auc_score(
+ y_true[class_name],
+ y_score[class_name],
+ )
+ )
+
+ mean_score = float(
+ np.mean([v for v in class_scores.values() if v != np.nan])
+ )
+
+ return {
+ f"mean_{self.label}": mean_score,
+ **{
+ f"{self.label}/{class_name}": score
+ for class_name, score in class_scores.items()
+ if self.include_class(class_name)
+ },
+ }
+
+ @classification_metrics.register(ClassificationROCAUCConfig)
+ @staticmethod
+ def from_config(
+ config: ClassificationROCAUCConfig, targets: TargetProtocol
+ ):
+ return ClassificationROCAUC(
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ label=config.label,
+ )
+
+
+ClassificationMetricConfig = Annotated[
+ ClassificationAveragePrecisionConfig | ClassificationROCAUCConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_classification_metric(
+ config: ClassificationMetricConfig,
+ targets: TargetProtocol,
+) -> ClassificationMetric:
+ return classification_metrics.build(config, targets)
+
+
+def _extract_per_class_metric_data(
+ clip_evaluations: Sequence[ClipEval],
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+):
+ y_true = defaultdict(list)
+ y_score = defaultdict(list)
+ num_positives = defaultdict(lambda: 0)
+
+ for clip_eval in clip_evaluations:
+ for class_name, matches in clip_eval.matches.items():
+ for m in matches:
+ # Exclude matches with ground truth sounds where the class
+ # is unknown
+ if m.is_generic and ignore_generic:
+ continue
+
+ is_class = m.true_class == class_name
+
+ if is_class:
+ num_positives[class_name] += 1
+
+ # Ignore matches that don't correspond to a prediction
+ if not m.is_prediction and ignore_non_predictions:
+ continue
+
+ y_true[class_name].append(is_class)
+ y_score[class_name].append(m.score)
+
+ return y_true, y_score, num_positives
+
+
+def compute_precision_recall_curves(
+ clip_evaluations: Sequence[ClipEval],
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+) -> Dict[str, Tuple[np.ndarray, np.ndarray, np.ndarray]]:
+ y_true, y_score, num_positives = _extract_per_class_metric_data(
+ clip_evaluations,
+ ignore_non_predictions=ignore_non_predictions,
+ ignore_generic=ignore_generic,
+ )
+
+ return {
+ class_name: compute_precision_recall(
+ y_true[class_name],
+ y_score[class_name],
+ num_positives=num_positives[class_name],
+ )
+ for class_name in y_true
+ }
diff --git a/src/batdetect2/evaluate/metrics/clip_classification.py b/src/batdetect2/evaluate/metrics/clip_classification.py
new file mode 100644
index 00000000..8595a68b
--- /dev/null
+++ b/src/batdetect2/evaluate/metrics/clip_classification.py
@@ -0,0 +1,147 @@
+from collections import defaultdict
+from dataclasses import dataclass
+from typing import Annotated, Callable, Dict, Literal, Sequence, Set
+
+import numpy as np
+from pydantic import Field
+from sklearn import metrics
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.evaluate.metrics.common import average_precision
+
+
+@dataclass
+class ClipEval:
+ true_classes: Set[str]
+ class_scores: Dict[str, float]
+
+
+ClipClassificationMetric = Callable[[Sequence[ClipEval]], Dict[str, float]]
+
+clip_classification_metrics: Registry[ClipClassificationMetric, []] = Registry(
+ "clip_classification_metric"
+)
+
+
+@add_import_config(clip_classification_metrics)
+class ClipClassificationMetricImportConfig(ImportConfig):
+ """Use any callable as a clip classification metric.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class ClipClassificationAveragePrecisionConfig(BaseConfig):
+ name: Literal["average_precision"] = "average_precision"
+ label: str = "average_precision"
+
+
+class ClipClassificationAveragePrecision:
+ def __init__(self, label: str = "average_precision"):
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ y_true = defaultdict(list)
+ y_score = defaultdict(list)
+
+ for clip_eval in clip_evaluations:
+ for class_name, score in clip_eval.class_scores.items():
+ y_true[class_name].append(class_name in clip_eval.true_classes)
+ y_score[class_name].append(score)
+
+ class_scores = {
+ class_name: float(
+ average_precision(
+ y_true=y_true[class_name],
+ y_score=y_score[class_name],
+ )
+ )
+ for class_name in y_true
+ }
+
+ mean = np.mean([v for v in class_scores.values() if not np.isnan(v)])
+
+ return {
+ f"mean_{self.label}": float(mean),
+ **{
+ f"{self.label}/{class_name}": score
+ for class_name, score in class_scores.items()
+ if not np.isnan(score)
+ },
+ }
+
+ @clip_classification_metrics.register(
+ ClipClassificationAveragePrecisionConfig
+ )
+ @staticmethod
+ def from_config(config: ClipClassificationAveragePrecisionConfig):
+ return ClipClassificationAveragePrecision(label=config.label)
+
+
+class ClipClassificationROCAUCConfig(BaseConfig):
+ name: Literal["roc_auc"] = "roc_auc"
+ label: str = "roc_auc"
+
+
+class ClipClassificationROCAUC:
+ def __init__(self, label: str = "roc_auc"):
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ y_true = defaultdict(list)
+ y_score = defaultdict(list)
+
+ for clip_eval in clip_evaluations:
+ for class_name, score in clip_eval.class_scores.items():
+ y_true[class_name].append(class_name in clip_eval.true_classes)
+ y_score[class_name].append(score)
+
+ class_scores = {
+ class_name: float(
+ metrics.roc_auc_score(
+ y_true=y_true[class_name],
+ y_score=y_score[class_name],
+ )
+ )
+ for class_name in y_true
+ }
+
+ mean = np.mean([v for v in class_scores.values() if not np.isnan(v)])
+
+ return {
+ f"mean_{self.label}": float(mean),
+ **{
+ f"{self.label}/{class_name}": score
+ for class_name, score in class_scores.items()
+ if not np.isnan(score)
+ },
+ }
+
+ @clip_classification_metrics.register(ClipClassificationROCAUCConfig)
+ @staticmethod
+ def from_config(config: ClipClassificationROCAUCConfig):
+ return ClipClassificationROCAUC(label=config.label)
+
+
+ClipClassificationMetricConfig = Annotated[
+ ClipClassificationAveragePrecisionConfig | ClipClassificationROCAUCConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_clip_metric(config: ClipClassificationMetricConfig):
+ return clip_classification_metrics.build(config)
diff --git a/src/batdetect2/evaluate/metrics/clip_detection.py b/src/batdetect2/evaluate/metrics/clip_detection.py
new file mode 100644
index 00000000..f21bebf4
--- /dev/null
+++ b/src/batdetect2/evaluate/metrics/clip_detection.py
@@ -0,0 +1,186 @@
+from dataclasses import dataclass
+from typing import Annotated, Callable, Dict, Literal, Sequence
+
+import numpy as np
+from pydantic import Field
+from sklearn import metrics
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.evaluate.metrics.common import average_precision
+
+
+@dataclass
+class ClipEval:
+ gt_det: bool
+ score: float
+
+
+ClipDetectionMetric = Callable[[Sequence[ClipEval]], Dict[str, float]]
+
+clip_detection_metrics: Registry[ClipDetectionMetric, []] = Registry(
+ "clip_detection_metric"
+)
+
+
+@add_import_config(clip_detection_metrics)
+class ClipDetectionMetricImportConfig(ImportConfig):
+ """Use any callable as a clip detection metric.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class ClipDetectionAveragePrecisionConfig(BaseConfig):
+ name: Literal["average_precision"] = "average_precision"
+ label: str = "average_precision"
+
+
+class ClipDetectionAveragePrecision:
+ def __init__(self, label: str = "average_precision"):
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ y_true = []
+ y_score = []
+
+ for clip_eval in clip_evaluations:
+ y_true.append(clip_eval.gt_det)
+ y_score.append(clip_eval.score)
+
+ score = average_precision(y_true=y_true, y_score=y_score)
+ return {self.label: score}
+
+ @clip_detection_metrics.register(ClipDetectionAveragePrecisionConfig)
+ @staticmethod
+ def from_config(config: ClipDetectionAveragePrecisionConfig):
+ return ClipDetectionAveragePrecision(label=config.label)
+
+
+class ClipDetectionROCAUCConfig(BaseConfig):
+ name: Literal["roc_auc"] = "roc_auc"
+ label: str = "roc_auc"
+
+
+class ClipDetectionROCAUC:
+ def __init__(self, label: str = "roc_auc"):
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ y_true = []
+ y_score = []
+
+ for clip_eval in clip_evaluations:
+ y_true.append(clip_eval.gt_det)
+ y_score.append(clip_eval.score)
+
+ score = float(metrics.roc_auc_score(y_true=y_true, y_score=y_score))
+ return {self.label: score}
+
+ @clip_detection_metrics.register(ClipDetectionROCAUCConfig)
+ @staticmethod
+ def from_config(config: ClipDetectionROCAUCConfig):
+ return ClipDetectionROCAUC(label=config.label)
+
+
+class ClipDetectionRecallConfig(BaseConfig):
+ name: Literal["recall"] = "recall"
+ threshold: float = 0.5
+ label: str = "recall"
+
+
+class ClipDetectionRecall:
+ def __init__(self, threshold: float, label: str = "recall"):
+ self.threshold = threshold
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ num_positives = 0
+ true_positives = 0
+
+ for clip_eval in clip_evaluations:
+ if clip_eval.gt_det:
+ num_positives += 1
+
+ if clip_eval.score >= self.threshold and clip_eval.gt_det:
+ true_positives += 1
+
+ if num_positives == 0:
+ return {self.label: np.nan}
+
+ score = true_positives / num_positives
+ return {self.label: score}
+
+ @clip_detection_metrics.register(ClipDetectionRecallConfig)
+ @staticmethod
+ def from_config(config: ClipDetectionRecallConfig):
+ return ClipDetectionRecall(
+ threshold=config.threshold, label=config.label
+ )
+
+
+class ClipDetectionPrecisionConfig(BaseConfig):
+ name: Literal["precision"] = "precision"
+ threshold: float = 0.5
+ label: str = "precision"
+
+
+class ClipDetectionPrecision:
+ def __init__(self, threshold: float, label: str = "precision"):
+ self.threshold = threshold
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ num_detections = 0
+ true_positives = 0
+ for clip_eval in clip_evaluations:
+ if clip_eval.score >= self.threshold:
+ num_detections += 1
+
+ if clip_eval.score >= self.threshold and clip_eval.gt_det:
+ true_positives += 1
+
+ if num_detections == 0:
+ return {self.label: np.nan}
+
+ score = true_positives / num_detections
+ return {self.label: score}
+
+ @clip_detection_metrics.register(ClipDetectionPrecisionConfig)
+ @staticmethod
+ def from_config(config: ClipDetectionPrecisionConfig):
+ return ClipDetectionPrecision(
+ threshold=config.threshold, label=config.label
+ )
+
+
+ClipDetectionMetricConfig = Annotated[
+ ClipDetectionAveragePrecisionConfig
+ | ClipDetectionROCAUCConfig
+ | ClipDetectionRecallConfig
+ | ClipDetectionPrecisionConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_clip_metric(config: ClipDetectionMetricConfig):
+ return clip_detection_metrics.build(config)
diff --git a/src/batdetect2/evaluate/metrics/common.py b/src/batdetect2/evaluate/metrics/common.py
new file mode 100644
index 00000000..9a4da37e
--- /dev/null
+++ b/src/batdetect2/evaluate/metrics/common.py
@@ -0,0 +1,70 @@
+from typing import Tuple
+
+import numpy as np
+
+__all__ = [
+ "compute_precision_recall",
+ "average_precision",
+]
+
+
+def compute_precision_recall(
+ y_true,
+ y_score,
+ num_positives: int | None = None,
+) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+ y_true = np.array(y_true)
+ y_score = np.array(y_score)
+
+ if num_positives is None:
+ num_positives = y_true.sum()
+
+ # Sort by score
+ sort_ind = np.argsort(y_score)[::-1]
+ y_true_sorted = y_true[sort_ind]
+ y_score_sorted = y_score[sort_ind]
+
+ false_pos_c = np.cumsum(1 - y_true_sorted)
+ true_pos_c = np.cumsum(y_true_sorted)
+
+ recall = true_pos_c / num_positives
+ precision = true_pos_c / np.maximum(
+ true_pos_c + false_pos_c,
+ np.finfo(np.float64).eps,
+ )
+
+ precision[np.isnan(precision)] = 0
+ recall[np.isnan(recall)] = 0
+ return precision, recall, y_score_sorted
+
+
+def average_precision(
+ y_true,
+ y_score,
+ num_positives: int | None = None,
+) -> float:
+ if num_positives == 0:
+ return np.nan
+
+ precision, recall, _ = compute_precision_recall(
+ y_true,
+ y_score,
+ num_positives=num_positives,
+ )
+
+ # pascal 12 way
+ return _average_precision(recall, precision)
+
+
+def _average_precision(
+ recall: np.ndarray,
+ precision: np.ndarray,
+) -> float:
+ # pascal 12 way
+ mprec = np.hstack((0, precision, 0))
+ mrec = np.hstack((0, recall, 1))
+ for ii in range(mprec.shape[0] - 2, -1, -1):
+ mprec[ii] = np.maximum(mprec[ii], mprec[ii + 1])
+ inds = np.where(np.not_equal(mrec[1:], mrec[:-1]))[0] + 1
+ ave_prec = ((mrec[inds] - mrec[inds - 1]) * mprec[inds]).sum()
+ return ave_prec
diff --git a/src/batdetect2/evaluate/metrics/detection.py b/src/batdetect2/evaluate/metrics/detection.py
new file mode 100644
index 00000000..c0ae0c94
--- /dev/null
+++ b/src/batdetect2/evaluate/metrics/detection.py
@@ -0,0 +1,242 @@
+from dataclasses import dataclass
+from typing import (
+ Annotated,
+ Callable,
+ Dict,
+ List,
+ Literal,
+ Sequence,
+)
+
+import numpy as np
+from pydantic import Field
+from sklearn import metrics
+from soundevent import data
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.evaluate.metrics.common import average_precision
+from batdetect2.postprocess.types import Detection
+
+__all__ = [
+ "DetectionMetricConfig",
+ "DetectionMetric",
+ "DetectionMetricImportConfig",
+ "build_detection_metric",
+]
+
+
+@dataclass
+class MatchEval:
+ gt: data.SoundEventAnnotation | None
+ pred: Detection | None
+
+ is_prediction: bool
+ is_ground_truth: bool
+ score: float
+
+
+@dataclass
+class ClipEval:
+ clip: data.Clip
+ matches: List[MatchEval]
+
+
+DetectionMetric = Callable[[Sequence[ClipEval]], Dict[str, float]]
+
+
+detection_metrics: Registry[DetectionMetric, []] = Registry("detection_metric")
+
+
+@add_import_config(detection_metrics)
+class DetectionMetricImportConfig(ImportConfig):
+ """Use any callable as a detection metric.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class DetectionAveragePrecisionConfig(BaseConfig):
+ name: Literal["average_precision"] = "average_precision"
+ label: str = "average_precision"
+ ignore_non_predictions: bool = True
+
+
+class DetectionAveragePrecision:
+ def __init__(self, label: str, ignore_non_predictions: bool = True):
+ self.ignore_non_predictions = ignore_non_predictions
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evals: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ y_true = []
+ y_score = []
+ num_positives = 0
+
+ for clip_eval in clip_evals:
+ for m in clip_eval.matches:
+ num_positives += int(m.is_ground_truth)
+
+ # Ignore matches that don't correspond to a prediction
+ if not m.is_prediction and self.ignore_non_predictions:
+ continue
+
+ y_true.append(m.is_ground_truth)
+ y_score.append(m.score)
+
+ ap = average_precision(y_true, y_score, num_positives=num_positives)
+ return {self.label: float(ap)}
+
+ @detection_metrics.register(DetectionAveragePrecisionConfig)
+ @staticmethod
+ def from_config(config: DetectionAveragePrecisionConfig):
+ return DetectionAveragePrecision(
+ label=config.label,
+ ignore_non_predictions=config.ignore_non_predictions,
+ )
+
+
+class DetectionROCAUCConfig(BaseConfig):
+ name: Literal["roc_auc"] = "roc_auc"
+ label: str = "roc_auc"
+ ignore_non_predictions: bool = True
+
+
+class DetectionROCAUC:
+ def __init__(
+ self,
+ label: str = "roc_auc",
+ ignore_non_predictions: bool = True,
+ ):
+ self.label = label
+ self.ignore_non_predictions = ignore_non_predictions
+
+ def __call__(self, clip_evals: Sequence[ClipEval]) -> Dict[str, float]:
+ y_true: List[bool] = []
+ y_score: List[float] = []
+
+ for clip_eval in clip_evals:
+ for m in clip_eval.matches:
+ if not m.is_prediction and self.ignore_non_predictions:
+ # Ignore matches that don't correspond to a prediction
+ continue
+
+ y_true.append(m.is_ground_truth)
+ y_score.append(m.score)
+
+ if len(y_true) == 0:
+ return {self.label: np.nan}
+
+ score = float(metrics.roc_auc_score(y_true, y_score))
+ return {self.label: score}
+
+ @detection_metrics.register(DetectionROCAUCConfig)
+ @staticmethod
+ def from_config(config: DetectionROCAUCConfig):
+ return DetectionROCAUC(
+ label=config.label,
+ ignore_non_predictions=config.ignore_non_predictions,
+ )
+
+
+class DetectionRecallConfig(BaseConfig):
+ name: Literal["recall"] = "recall"
+ label: str = "recall"
+ threshold: float = 0.5
+
+
+class DetectionRecall:
+ def __init__(self, threshold: float, label: str = "recall"):
+ self.label = label
+ self.threshold = threshold
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ num_positives = 0
+ true_positives = 0
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ if m.is_ground_truth:
+ num_positives += 1
+
+ if m.score >= self.threshold and m.is_ground_truth:
+ true_positives += 1
+
+ if num_positives == 0:
+ return {self.label: np.nan}
+
+ score = true_positives / num_positives
+ return {self.label: score}
+
+ @detection_metrics.register(DetectionRecallConfig)
+ @staticmethod
+ def from_config(config: DetectionRecallConfig):
+ return DetectionRecall(threshold=config.threshold, label=config.label)
+
+
+class DetectionPrecisionConfig(BaseConfig):
+ name: Literal["precision"] = "precision"
+ label: str = "precision"
+ threshold: float = 0.5
+
+
+class DetectionPrecision:
+ def __init__(self, threshold: float, label: str = "precision"):
+ self.threshold = threshold
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ num_detections = 0
+ true_positives = 0
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ is_detection = m.score >= self.threshold
+
+ if is_detection:
+ num_detections += 1
+
+ if is_detection and m.is_ground_truth:
+ true_positives += 1
+
+ if num_detections == 0:
+ return {self.label: np.nan}
+
+ score = true_positives / num_detections
+ return {self.label: score}
+
+ @detection_metrics.register(DetectionPrecisionConfig)
+ @staticmethod
+ def from_config(config: DetectionPrecisionConfig):
+ return DetectionPrecision(
+ threshold=config.threshold,
+ label=config.label,
+ )
+
+
+DetectionMetricConfig = Annotated[
+ DetectionAveragePrecisionConfig
+ | DetectionROCAUCConfig
+ | DetectionRecallConfig
+ | DetectionPrecisionConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_detection_metric(config: DetectionMetricConfig):
+ return detection_metrics.build(config)
diff --git a/src/batdetect2/evaluate/metrics/top_class.py b/src/batdetect2/evaluate/metrics/top_class.py
new file mode 100644
index 00000000..e131e6f6
--- /dev/null
+++ b/src/batdetect2/evaluate/metrics/top_class.py
@@ -0,0 +1,386 @@
+from dataclasses import dataclass
+from typing import (
+ Annotated,
+ Callable,
+ Dict,
+ List,
+ Literal,
+ Sequence,
+)
+
+import numpy as np
+from pydantic import Field
+from sklearn import metrics, preprocessing
+from soundevent import data
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.evaluate.metrics.common import average_precision
+from batdetect2.postprocess.types import Detection
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "TopClassMetricConfig",
+ "TopClassMetric",
+ "TopClassMetricImportConfig",
+ "build_top_class_metric",
+]
+
+
+@dataclass
+class MatchEval:
+ clip: data.Clip
+ gt: data.SoundEventAnnotation | None
+ pred: Detection | None
+
+ is_ground_truth: bool
+ is_generic: bool
+ is_prediction: bool
+ pred_class: str | None
+ true_class: str | None
+ score: float
+
+
+@dataclass
+class ClipEval:
+ clip: data.Clip
+ matches: List[MatchEval]
+
+
+TopClassMetric = Callable[[Sequence[ClipEval]], Dict[str, float]]
+
+
+top_class_metrics: Registry[TopClassMetric, []] = Registry("top_class_metric")
+
+
+@add_import_config(top_class_metrics)
+class TopClassMetricImportConfig(ImportConfig):
+ """Use any callable as a top-class metric.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class TopClassAveragePrecisionConfig(BaseConfig):
+ name: Literal["average_precision"] = "average_precision"
+ label: str = "average_precision"
+ ignore_generic: bool = True
+ ignore_non_predictions: bool = True
+
+
+class TopClassAveragePrecision:
+ def __init__(
+ self,
+ ignore_generic: bool = True,
+ ignore_non_predictions: bool = True,
+ label: str = "average_precision",
+ ):
+ self.ignore_generic = ignore_generic
+ self.ignore_non_predictions = ignore_non_predictions
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evals: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ y_true = []
+ y_score = []
+ num_positives = 0
+
+ for clip_eval in clip_evals:
+ for m in clip_eval.matches:
+ if m.is_generic and self.ignore_generic:
+ # Ignore gt sounds with unknown class
+ continue
+
+ num_positives += int(m.is_ground_truth)
+
+ if not m.is_prediction and self.ignore_non_predictions:
+ # Ignore non predictions
+ continue
+
+ y_true.append(m.pred_class == m.true_class)
+ y_score.append(m.score)
+
+ score = average_precision(y_true, y_score, num_positives=num_positives)
+ return {self.label: score}
+
+ @top_class_metrics.register(TopClassAveragePrecisionConfig)
+ @staticmethod
+ def from_config(config: TopClassAveragePrecisionConfig):
+ return TopClassAveragePrecision(
+ ignore_generic=config.ignore_generic,
+ label=config.label,
+ )
+
+
+class TopClassROCAUCConfig(BaseConfig):
+ name: Literal["roc_auc"] = "roc_auc"
+ ignore_generic: bool = True
+ ignore_non_predictions: bool = True
+ label: str = "roc_auc"
+
+
+class TopClassROCAUC:
+ def __init__(
+ self,
+ ignore_generic: bool = True,
+ ignore_non_predictions: bool = True,
+ label: str = "roc_auc",
+ ):
+ self.ignore_generic = ignore_generic
+ self.ignore_non_predictions = ignore_non_predictions
+ self.label = label
+
+ def __call__(self, clip_evals: Sequence[ClipEval]) -> Dict[str, float]:
+ y_true: List[bool] = []
+ y_score: List[float] = []
+
+ for clip_eval in clip_evals:
+ for m in clip_eval.matches:
+ if m.is_generic and self.ignore_generic:
+ # Ignore gt sounds with unknown class
+ continue
+
+ if not m.is_prediction and self.ignore_non_predictions:
+ # Ignore non predictions
+ continue
+
+ y_true.append(m.pred_class == m.true_class)
+ y_score.append(m.score)
+
+ score = float(metrics.roc_auc_score(y_true, y_score))
+ return {self.label: score}
+
+ @top_class_metrics.register(TopClassROCAUCConfig)
+ @staticmethod
+ def from_config(config: TopClassROCAUCConfig):
+ return TopClassROCAUC(
+ ignore_generic=config.ignore_generic,
+ label=config.label,
+ )
+
+
+class TopClassRecallConfig(BaseConfig):
+ name: Literal["recall"] = "recall"
+ threshold: float = 0.5
+ label: str = "recall"
+
+
+class TopClassRecall:
+ def __init__(self, threshold: float, label: str = "recall"):
+ self.threshold = threshold
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ num_positives = 0
+ true_positives = 0
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ if m.is_ground_truth:
+ num_positives += 1
+
+ if m.score >= self.threshold and m.pred_class == m.true_class:
+ true_positives += 1
+
+ if num_positives == 0:
+ return {self.label: np.nan}
+
+ score = true_positives / num_positives
+ return {self.label: score}
+
+ @top_class_metrics.register(TopClassRecallConfig)
+ @staticmethod
+ def from_config(config: TopClassRecallConfig):
+ return TopClassRecall(
+ threshold=config.threshold,
+ label=config.label,
+ )
+
+
+class TopClassPrecisionConfig(BaseConfig):
+ name: Literal["precision"] = "precision"
+ threshold: float = 0.5
+ label: str = "precision"
+
+
+class TopClassPrecision:
+ def __init__(self, threshold: float, label: str = "precision"):
+ self.threshold = threshold
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ num_detections = 0
+ true_positives = 0
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ is_detection = m.score >= self.threshold
+
+ if is_detection:
+ num_detections += 1
+
+ if is_detection and m.pred_class == m.true_class:
+ true_positives += 1
+
+ if num_detections == 0:
+ return {self.label: np.nan}
+
+ score = true_positives / num_detections
+ return {self.label: score}
+
+ @top_class_metrics.register(TopClassPrecisionConfig)
+ @staticmethod
+ def from_config(config: TopClassPrecisionConfig):
+ return TopClassPrecision(
+ threshold=config.threshold,
+ label=config.label,
+ )
+
+
+class BalancedAccuracyConfig(BaseConfig):
+ name: Literal["balanced_accuracy"] = "balanced_accuracy"
+ label: str = "balanced_accuracy"
+ exclude_noise: bool = False
+ noise_class: str = "noise"
+
+
+class BalancedAccuracy:
+ def __init__(
+ self,
+ exclude_noise: bool = True,
+ noise_class: str = "noise",
+ label: str = "balanced_accuracy",
+ ):
+ self.exclude_noise = exclude_noise
+ self.noise_class = noise_class
+ self.label = label
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Dict[str, float]:
+ y_true: List[str] = []
+ y_pred: List[str] = []
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ if m.is_generic:
+ # Ignore matches that correspond to a sound event
+ # with unknown class
+ continue
+
+ if not m.is_ground_truth and self.exclude_noise:
+ # Ignore predictions that were not matched to a
+ # ground truth
+ continue
+
+ if m.pred_class is None and self.exclude_noise:
+ # Ignore non-predictions
+ continue
+
+ y_true.append(m.true_class or self.noise_class)
+ y_pred.append(m.pred_class or self.noise_class)
+
+ encoder = preprocessing.LabelEncoder()
+ encoder.fit(list(set(y_true) | set(y_pred)))
+
+ y_true = encoder.transform(y_true)
+ y_pred = encoder.transform(y_pred)
+ score = metrics.balanced_accuracy_score(y_true, y_pred)
+ return {self.label: score}
+
+ @top_class_metrics.register(BalancedAccuracyConfig)
+ @staticmethod
+ def from_config(config: BalancedAccuracyConfig):
+ return BalancedAccuracy(
+ exclude_noise=config.exclude_noise,
+ noise_class=config.noise_class,
+ label=config.label,
+ )
+
+
+TopClassMetricConfig = Annotated[
+ TopClassAveragePrecisionConfig
+ | TopClassROCAUCConfig
+ | TopClassRecallConfig
+ | TopClassPrecisionConfig
+ | BalancedAccuracyConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_top_class_metric(config: TopClassMetricConfig):
+ return top_class_metrics.build(config)
+
+
+def compute_confusion_matrix(
+ clip_evaluations: Sequence[ClipEval],
+ targets: TargetProtocol,
+ threshold: float = 0.2,
+ normalize: Literal["true", "pred", "all", "none"] = "true",
+ exclude_generic: bool = True,
+ exclude_false_positives: bool = True,
+ exclude_false_negatives: bool = True,
+ noise_class: str = "noise",
+):
+ y_true: List[str] = []
+ y_pred: List[str] = []
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ true_class = m.true_class
+ pred_class = m.pred_class
+
+ if not m.is_prediction and exclude_false_negatives:
+ # Ignore matches that don't correspond to a prediction
+ continue
+
+ if not m.is_ground_truth and exclude_false_positives:
+ # Ignore matches that don't correspond to a ground truth
+ continue
+
+ if m.score < threshold:
+ if exclude_false_negatives:
+ continue
+
+ pred_class = noise_class
+
+ if m.is_generic:
+ if exclude_generic:
+ # Ignore gt sounds with unknown class
+ continue
+
+ true_class = targets.detection_class_name
+
+ y_true.append(true_class or noise_class)
+ y_pred.append(pred_class or noise_class)
+
+ labels = sorted(targets.class_names)
+
+ if not exclude_generic:
+ labels.append(targets.detection_class_name)
+
+ if not exclude_false_positives or not exclude_false_negatives:
+ labels.append(noise_class)
+
+ return metrics.confusion_matrix(
+ y_true,
+ y_pred,
+ labels=labels,
+ normalize=normalize,
+ ), labels
diff --git a/batdetect2/train/__init__.py b/src/batdetect2/evaluate/plots/__init__.py
similarity index 100%
rename from batdetect2/train/__init__.py
rename to src/batdetect2/evaluate/plots/__init__.py
diff --git a/src/batdetect2/evaluate/plots/base.py b/src/batdetect2/evaluate/plots/base.py
new file mode 100644
index 00000000..ee4028c5
--- /dev/null
+++ b/src/batdetect2/evaluate/plots/base.py
@@ -0,0 +1,52 @@
+import matplotlib.pyplot as plt
+from matplotlib.figure import Figure
+
+from batdetect2.core import BaseConfig
+from batdetect2.targets.types import TargetProtocol
+
+
+class BasePlotConfig(BaseConfig):
+ label: str = "plot"
+ theme: str = "default"
+ title: str | None = None
+ figsize: tuple[int, int] = (10, 10)
+ dpi: int = 100
+
+
+class BasePlot:
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ label: str = "plot",
+ figsize: tuple[int, int] = (10, 10),
+ title: str | None = None,
+ dpi: int = 100,
+ theme: str = "default",
+ ):
+ self.targets = targets
+ self.label = label
+ self.figsize = figsize
+ self.dpi = dpi
+ self.theme = theme
+ self.title = title
+
+ def create_figure(self) -> Figure:
+ plt.style.use(self.theme)
+ fig = plt.figure(figsize=self.figsize, dpi=self.dpi)
+
+ if self.title is not None:
+ fig.suptitle(self.title)
+
+ return fig
+
+ @classmethod
+ def build(cls, config: BasePlotConfig, targets: TargetProtocol, **kwargs):
+ return cls(
+ targets=targets,
+ figsize=config.figsize,
+ dpi=config.dpi,
+ theme=config.theme,
+ label=config.label,
+ title=config.title,
+ **kwargs,
+ )
diff --git a/src/batdetect2/evaluate/plots/classification.py b/src/batdetect2/evaluate/plots/classification.py
new file mode 100644
index 00000000..400dbbcc
--- /dev/null
+++ b/src/batdetect2/evaluate/plots/classification.py
@@ -0,0 +1,348 @@
+from typing import (
+ Annotated,
+ Callable,
+ Iterable,
+ Literal,
+ Sequence,
+ Tuple,
+)
+
+import matplotlib.pyplot as plt
+from matplotlib.figure import Figure
+from pydantic import Field
+from sklearn import metrics
+
+from batdetect2.core import ImportConfig, Registry, add_import_config
+from batdetect2.evaluate.metrics.classification import (
+ ClipEval,
+ _extract_per_class_metric_data,
+ compute_precision_recall_curves,
+)
+from batdetect2.evaluate.plots.base import BasePlot, BasePlotConfig
+from batdetect2.plotting.metrics import (
+ plot_pr_curve,
+ plot_pr_curves,
+ plot_roc_curve,
+ plot_roc_curves,
+ plot_threshold_precision_curve,
+ plot_threshold_precision_curves,
+ plot_threshold_recall_curve,
+ plot_threshold_recall_curves,
+)
+from batdetect2.targets.types import TargetProtocol
+
+ClassificationPlotter = Callable[
+ [Sequence[ClipEval]], Iterable[Tuple[str, Figure]]
+]
+
+classification_plots: Registry[ClassificationPlotter, [TargetProtocol]] = (
+ Registry("classification_plot")
+)
+
+
+@add_import_config(classification_plots)
+class ClassificationPlotImportConfig(ImportConfig):
+ """Use any callable as a classification plot.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class PRCurveConfig(BasePlotConfig):
+ name: Literal["pr_curve"] = "pr_curve"
+ label: str = "pr_curve"
+ title: str | None = "Classification Precision-Recall Curve"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+ separate_figures: bool = False
+
+
+class PRCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ separate_figures: bool = False,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+ self.separate_figures = separate_figures
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ data = compute_precision_recall_curves(
+ clip_evaluations,
+ ignore_non_predictions=self.ignore_non_predictions,
+ ignore_generic=self.ignore_generic,
+ )
+
+ if not self.separate_figures:
+ fig = self.create_figure()
+ ax = fig.subplots()
+ plot_pr_curves(data, ax=ax)
+ yield self.label, fig
+ return
+
+ for class_name, (precision, recall, thresholds) in data.items():
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ ax = plot_pr_curve(precision, recall, thresholds, ax=ax)
+ ax.set_title(class_name)
+ yield f"{self.label}/{class_name}", fig
+ plt.close(fig)
+
+ @classification_plots.register(PRCurveConfig)
+ @staticmethod
+ def from_config(config: PRCurveConfig, targets: TargetProtocol):
+ return PRCurve.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ separate_figures=config.separate_figures,
+ )
+
+
+class ThresholdPrecisionCurveConfig(BasePlotConfig):
+ name: Literal["threshold_precision_curve"] = "threshold_precision_curve"
+ label: str = "threshold_precision_curve"
+ title: str | None = "Classification Threshold-Precision Curve"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+ separate_figures: bool = False
+
+
+class ThresholdPrecisionCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ separate_figures: bool = False,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+ self.separate_figures = separate_figures
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ data = compute_precision_recall_curves(
+ clip_evaluations,
+ ignore_non_predictions=self.ignore_non_predictions,
+ ignore_generic=self.ignore_generic,
+ )
+
+ if not self.separate_figures:
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_threshold_precision_curves(data, ax=ax)
+
+ yield self.label, fig
+
+ return
+
+ for class_name, (precision, _, thresholds) in data.items():
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ ax = plot_threshold_precision_curve(
+ thresholds,
+ precision,
+ ax=ax,
+ )
+
+ ax.set_title(class_name)
+
+ yield f"{self.label}/{class_name}", fig
+
+ plt.close(fig)
+
+ @classification_plots.register(ThresholdPrecisionCurveConfig)
+ @staticmethod
+ def from_config(
+ config: ThresholdPrecisionCurveConfig, targets: TargetProtocol
+ ):
+ return ThresholdPrecisionCurve.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ separate_figures=config.separate_figures,
+ )
+
+
+class ThresholdRecallCurveConfig(BasePlotConfig):
+ name: Literal["threshold_recall_curve"] = "threshold_recall_curve"
+ label: str = "threshold_recall_curve"
+ title: str | None = "Classification Threshold-Recall Curve"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+ separate_figures: bool = False
+
+
+class ThresholdRecallCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ separate_figures: bool = False,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+ self.separate_figures = separate_figures
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ data = compute_precision_recall_curves(
+ clip_evaluations,
+ ignore_non_predictions=self.ignore_non_predictions,
+ ignore_generic=self.ignore_generic,
+ )
+
+ if not self.separate_figures:
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_threshold_recall_curves(data, ax=ax, add_legend=True)
+
+ yield self.label, fig
+
+ return
+
+ for class_name, (_, recall, thresholds) in data.items():
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ ax = plot_threshold_recall_curve(
+ thresholds,
+ recall,
+ ax=ax,
+ )
+
+ ax.set_title(class_name)
+
+ yield f"{self.label}/{class_name}", fig
+
+ plt.close(fig)
+
+ @classification_plots.register(ThresholdRecallCurveConfig)
+ @staticmethod
+ def from_config(
+ config: ThresholdRecallCurveConfig, targets: TargetProtocol
+ ):
+ return ThresholdRecallCurve.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ separate_figures=config.separate_figures,
+ )
+
+
+class ROCCurveConfig(BasePlotConfig):
+ name: Literal["roc_curve"] = "roc_curve"
+ label: str = "roc_curve"
+ title: str | None = "Classification ROC Curve"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+ separate_figures: bool = False
+
+
+class ROCCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ separate_figures: bool = False,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+ self.separate_figures = separate_figures
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ y_true, y_score, _ = _extract_per_class_metric_data(
+ clip_evaluations,
+ ignore_non_predictions=self.ignore_non_predictions,
+ ignore_generic=self.ignore_generic,
+ )
+
+ data = {
+ class_name: metrics.roc_curve(
+ y_true[class_name],
+ y_score[class_name],
+ )
+ for class_name in self.targets.class_names
+ }
+
+ if not self.separate_figures:
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_roc_curves(data, ax=ax)
+
+ yield self.label, fig
+
+ return
+
+ for class_name, (fpr, tpr, thresholds) in data.items():
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ ax = plot_roc_curve(fpr, tpr, thresholds, ax=ax)
+ ax.set_title(class_name)
+
+ yield f"{self.label}/{class_name}", fig
+
+ plt.close(fig)
+
+ @classification_plots.register(ROCCurveConfig)
+ @staticmethod
+ def from_config(config: ROCCurveConfig, targets: TargetProtocol):
+ return ROCCurve.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ separate_figures=config.separate_figures,
+ )
+
+
+ClassificationPlotConfig = Annotated[
+ PRCurveConfig
+ | ROCCurveConfig
+ | ThresholdPrecisionCurveConfig
+ | ThresholdRecallCurveConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_classification_plotter(
+ config: ClassificationPlotConfig,
+ targets: TargetProtocol,
+) -> ClassificationPlotter:
+ return classification_plots.build(config, targets)
diff --git a/src/batdetect2/evaluate/plots/clip_classification.py b/src/batdetect2/evaluate/plots/clip_classification.py
new file mode 100644
index 00000000..481b9a18
--- /dev/null
+++ b/src/batdetect2/evaluate/plots/clip_classification.py
@@ -0,0 +1,196 @@
+from typing import (
+ Annotated,
+ Callable,
+ Iterable,
+ Literal,
+ Sequence,
+ Tuple,
+)
+
+import matplotlib.pyplot as plt
+from matplotlib.figure import Figure
+from pydantic import Field
+from sklearn import metrics
+
+from batdetect2.core import ImportConfig, Registry, add_import_config
+from batdetect2.evaluate.metrics.clip_classification import ClipEval
+from batdetect2.evaluate.metrics.common import compute_precision_recall
+from batdetect2.evaluate.plots.base import BasePlot, BasePlotConfig
+from batdetect2.plotting.metrics import (
+ plot_pr_curve,
+ plot_pr_curves,
+ plot_roc_curve,
+ plot_roc_curves,
+)
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "ClipClassificationPlotConfig",
+ "ClipClassificationPlotImportConfig",
+ "ClipClassificationPlotter",
+ "build_clip_classification_plotter",
+]
+
+ClipClassificationPlotter = Callable[
+ [Sequence[ClipEval]], Iterable[Tuple[str, Figure]]
+]
+
+clip_classification_plots: Registry[
+ ClipClassificationPlotter, [TargetProtocol]
+] = Registry("clip_classification_plot")
+
+
+@add_import_config(clip_classification_plots)
+class ClipClassificationPlotImportConfig(ImportConfig):
+ """Use any callable as a clip classification plot.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class PRCurveConfig(BasePlotConfig):
+ name: Literal["pr_curve"] = "pr_curve"
+ label: str = "pr_curve"
+ title: str | None = "Clip Classification Precision-Recall Curve"
+ separate_figures: bool = False
+
+
+class PRCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ separate_figures: bool = False,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.separate_figures = separate_figures
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ data = {}
+
+ for class_name in self.targets.class_names:
+ y_true = [class_name in c.true_classes for c in clip_evaluations]
+ y_score = [
+ c.class_scores.get(class_name, 0) for c in clip_evaluations
+ ]
+
+ precision, recall, thresholds = compute_precision_recall(
+ y_true,
+ y_score,
+ )
+
+ data[class_name] = (precision, recall, thresholds)
+
+ if not self.separate_figures:
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_pr_curves(data, ax=ax)
+
+ yield self.label, fig
+
+ return
+
+ for class_name, (precision, recall, thresholds) in data.items():
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ ax = plot_pr_curve(precision, recall, thresholds, ax=ax)
+ ax.set_title(class_name)
+
+ yield f"{self.label}/{class_name}", fig
+
+ plt.close(fig)
+
+ @clip_classification_plots.register(PRCurveConfig)
+ @staticmethod
+ def from_config(config: PRCurveConfig, targets: TargetProtocol):
+ return PRCurve.build(
+ config=config,
+ targets=targets,
+ separate_figures=config.separate_figures,
+ )
+
+
+class ROCCurveConfig(BasePlotConfig):
+ name: Literal["roc_curve"] = "roc_curve"
+ label: str = "roc_curve"
+ title: str | None = "Clip Classification ROC Curve"
+ separate_figures: bool = False
+
+
+class ROCCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ separate_figures: bool = False,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.separate_figures = separate_figures
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ data = {}
+
+ for class_name in self.targets.class_names:
+ y_true = [class_name in c.true_classes for c in clip_evaluations]
+ y_score = [
+ c.class_scores.get(class_name, 0) for c in clip_evaluations
+ ]
+
+ fpr, tpr, thresholds = metrics.roc_curve(
+ y_true,
+ y_score,
+ )
+
+ data[class_name] = (fpr, tpr, thresholds)
+
+ if not self.separate_figures:
+ fig = self.create_figure()
+ ax = fig.subplots()
+ plot_roc_curves(data, ax=ax)
+ yield self.label, fig
+
+ return
+
+ for class_name, (fpr, tpr, thresholds) in data.items():
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ ax = plot_roc_curve(fpr, tpr, thresholds, ax=ax)
+ ax.set_title(class_name)
+
+ yield f"{self.label}/{class_name}", fig
+
+ plt.close(fig)
+
+ @clip_classification_plots.register(ROCCurveConfig)
+ @staticmethod
+ def from_config(config: ROCCurveConfig, targets: TargetProtocol):
+ return ROCCurve.build(
+ config=config,
+ targets=targets,
+ separate_figures=config.separate_figures,
+ )
+
+
+ClipClassificationPlotConfig = Annotated[
+ PRCurveConfig | ROCCurveConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_clip_classification_plotter(
+ config: ClipClassificationPlotConfig,
+ targets: TargetProtocol,
+) -> ClipClassificationPlotter:
+ return clip_classification_plots.build(config, targets)
diff --git a/src/batdetect2/evaluate/plots/clip_detection.py b/src/batdetect2/evaluate/plots/clip_detection.py
new file mode 100644
index 00000000..fc8a6b89
--- /dev/null
+++ b/src/batdetect2/evaluate/plots/clip_detection.py
@@ -0,0 +1,169 @@
+from typing import (
+ Annotated,
+ Callable,
+ Iterable,
+ Literal,
+ Sequence,
+ Tuple,
+)
+
+import pandas as pd
+import seaborn as sns
+from matplotlib.figure import Figure
+from pydantic import Field
+from sklearn import metrics
+
+from batdetect2.core import ImportConfig, Registry, add_import_config
+from batdetect2.evaluate.metrics.clip_detection import ClipEval
+from batdetect2.evaluate.metrics.common import compute_precision_recall
+from batdetect2.evaluate.plots.base import BasePlot, BasePlotConfig
+from batdetect2.plotting.metrics import plot_pr_curve, plot_roc_curve
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "ClipDetectionPlotConfig",
+ "ClipDetectionPlotImportConfig",
+ "ClipDetectionPlotter",
+ "build_clip_detection_plotter",
+]
+
+ClipDetectionPlotter = Callable[
+ [Sequence[ClipEval]], Iterable[Tuple[str, Figure]]
+]
+
+
+clip_detection_plots: Registry[ClipDetectionPlotter, [TargetProtocol]] = (
+ Registry("clip_detection_plot")
+)
+
+
+@add_import_config(clip_detection_plots)
+class ClipDetectionPlotImportConfig(ImportConfig):
+ """Use any callable as a clip detection plot.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class PRCurveConfig(BasePlotConfig):
+ name: Literal["pr_curve"] = "pr_curve"
+ label: str = "pr_curve"
+ title: str | None = "Clip Detection Precision-Recall Curve"
+
+
+class PRCurve(BasePlot):
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ y_true = [c.gt_det for c in clip_evaluations]
+ y_score = [c.score for c in clip_evaluations]
+
+ precision, recall, thresholds = compute_precision_recall(
+ y_true,
+ y_score,
+ )
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+ plot_pr_curve(precision, recall, thresholds, ax=ax)
+ yield self.label, fig
+
+ @clip_detection_plots.register(PRCurveConfig)
+ @staticmethod
+ def from_config(config: PRCurveConfig, targets: TargetProtocol):
+ return PRCurve.build(
+ config=config,
+ targets=targets,
+ )
+
+
+class ROCCurveConfig(BasePlotConfig):
+ name: Literal["roc_curve"] = "roc_curve"
+ label: str = "roc_curve"
+ title: str | None = "Clip Detection ROC Curve"
+
+
+class ROCCurve(BasePlot):
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ y_true = [c.gt_det for c in clip_evaluations]
+ y_score = [c.score for c in clip_evaluations]
+
+ fpr, tpr, thresholds = metrics.roc_curve(
+ y_true,
+ y_score,
+ )
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+ plot_roc_curve(fpr, tpr, thresholds, ax=ax)
+ yield self.label, fig
+
+ @clip_detection_plots.register(ROCCurveConfig)
+ @staticmethod
+ def from_config(config: ROCCurveConfig, targets: TargetProtocol):
+ return ROCCurve.build(
+ config=config,
+ targets=targets,
+ )
+
+
+class ScoreDistributionPlotConfig(BasePlotConfig):
+ name: Literal["score_distribution"] = "score_distribution"
+ label: str = "score_distribution"
+ title: str | None = "Clip Detection Score Distribution"
+
+
+class ScoreDistributionPlot(BasePlot):
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ y_true = [c.gt_det for c in clip_evaluations]
+ y_score = [c.score for c in clip_evaluations]
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ df = pd.DataFrame({"is_true": y_true, "score": y_score})
+ sns.histplot(
+ data=df,
+ x="score",
+ binwidth=0.025,
+ binrange=(0, 1),
+ hue="is_true",
+ ax=ax,
+ stat="probability",
+ common_norm=False,
+ )
+
+ yield self.label, fig
+
+ @clip_detection_plots.register(ScoreDistributionPlotConfig)
+ @staticmethod
+ def from_config(
+ config: ScoreDistributionPlotConfig, targets: TargetProtocol
+ ):
+ return ScoreDistributionPlot.build(
+ config=config,
+ targets=targets,
+ )
+
+
+ClipDetectionPlotConfig = Annotated[
+ PRCurveConfig | ROCCurveConfig | ScoreDistributionPlotConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_clip_detection_plotter(
+ config: ClipDetectionPlotConfig,
+ targets: TargetProtocol,
+) -> ClipDetectionPlotter:
+ return clip_detection_plots.build(config, targets)
diff --git a/src/batdetect2/evaluate/plots/detection.py b/src/batdetect2/evaluate/plots/detection.py
new file mode 100644
index 00000000..a99f8c40
--- /dev/null
+++ b/src/batdetect2/evaluate/plots/detection.py
@@ -0,0 +1,318 @@
+import random
+from typing import (
+ Annotated,
+ Callable,
+ Iterable,
+ Literal,
+ Sequence,
+ Tuple,
+)
+
+import matplotlib.pyplot as plt
+import pandas as pd
+import seaborn as sns
+from matplotlib.figure import Figure
+from pydantic import Field
+from sklearn import metrics
+
+from batdetect2.audio import AudioConfig, build_audio_loader
+from batdetect2.audio.types import AudioLoader
+from batdetect2.core import ImportConfig, Registry, add_import_config
+from batdetect2.evaluate.metrics.common import compute_precision_recall
+from batdetect2.evaluate.metrics.detection import ClipEval
+from batdetect2.evaluate.plots.base import BasePlot, BasePlotConfig
+from batdetect2.plotting.detections import plot_clip_detections
+from batdetect2.plotting.metrics import plot_pr_curve, plot_roc_curve
+from batdetect2.preprocess import PreprocessingConfig, build_preprocessor
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.targets.types import TargetProtocol
+
+DetectionPlotter = Callable[[Sequence[ClipEval]], Iterable[Tuple[str, Figure]]]
+
+detection_plots: Registry[DetectionPlotter, [TargetProtocol]] = Registry(
+ name="detection_plot"
+)
+
+
+@add_import_config(detection_plots)
+class DetectionPlotImportConfig(ImportConfig):
+ """Use any callable as a detection plot.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class PRCurveConfig(BasePlotConfig):
+ name: Literal["pr_curve"] = "pr_curve"
+ label: str = "pr_curve"
+ title: str | None = "Detection Precision-Recall Curve"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+
+
+class PRCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+
+ def __call__(
+ self,
+ clip_evals: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ y_true = []
+ y_score = []
+ num_positives = 0
+
+ for clip_eval in clip_evals:
+ for m in clip_eval.matches:
+ num_positives += int(m.is_ground_truth)
+
+ # Ignore matches that don't correspond to a prediction
+ if not m.is_prediction and self.ignore_non_predictions:
+ continue
+
+ y_true.append(m.is_ground_truth)
+ y_score.append(m.score)
+
+ precision, recall, thresholds = compute_precision_recall(
+ y_true,
+ y_score,
+ num_positives=num_positives,
+ )
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_pr_curve(precision, recall, thresholds, ax=ax)
+
+ yield self.label, fig
+
+ @detection_plots.register(PRCurveConfig)
+ @staticmethod
+ def from_config(config: PRCurveConfig, targets: TargetProtocol):
+ return PRCurve.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ )
+
+
+class ROCCurveConfig(BasePlotConfig):
+ name: Literal["roc_curve"] = "roc_curve"
+ label: str = "roc_curve"
+ title: str | None = "Detection ROC Curve"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+
+
+class ROCCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ y_true = []
+ y_score = []
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ if not m.is_prediction and self.ignore_non_predictions:
+ # Ignore matches that don't correspond to a prediction
+ continue
+
+ y_true.append(m.is_ground_truth)
+ y_score.append(m.score)
+
+ fpr, tpr, thresholds = metrics.roc_curve(
+ y_true,
+ y_score,
+ )
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_roc_curve(fpr, tpr, thresholds, ax=ax)
+
+ yield self.label, fig
+
+ @detection_plots.register(ROCCurveConfig)
+ @staticmethod
+ def from_config(config: ROCCurveConfig, targets: TargetProtocol):
+ return ROCCurve.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ )
+
+
+class ScoreDistributionPlotConfig(BasePlotConfig):
+ name: Literal["score_distribution"] = "score_distribution"
+ label: str = "score_distribution"
+ title: str | None = "Detection Score Distribution"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+
+
+class ScoreDistributionPlot(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ y_true = []
+ y_score = []
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ if not m.is_prediction and self.ignore_non_predictions:
+ # Ignore matches that don't correspond to a prediction
+ continue
+
+ y_true.append(m.is_ground_truth)
+ y_score.append(m.score)
+
+ df = pd.DataFrame({"is_true": y_true, "score": y_score})
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ sns.histplot(
+ data=df,
+ x="score",
+ binwidth=0.025,
+ binrange=(0, 1),
+ hue="is_true",
+ ax=ax,
+ stat="probability",
+ common_norm=False,
+ )
+
+ yield self.label, fig
+
+ @detection_plots.register(ScoreDistributionPlotConfig)
+ @staticmethod
+ def from_config(
+ config: ScoreDistributionPlotConfig, targets: TargetProtocol
+ ):
+ return ScoreDistributionPlot.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ )
+
+
+class ExampleDetectionPlotConfig(BasePlotConfig):
+ name: Literal["example_detection"] = "example_detection"
+ label: str = "example_detection"
+ title: str | None = "Example Detection"
+ figsize: tuple[int, int] = (10, 4)
+ num_examples: int = 5
+ threshold: float = 0.2
+ audio: AudioConfig = Field(default_factory=AudioConfig)
+ preprocessing: PreprocessingConfig = Field(
+ default_factory=PreprocessingConfig
+ )
+
+
+class ExampleDetectionPlot(BasePlot):
+ def __init__(
+ self,
+ *args,
+ num_examples: int = 5,
+ threshold: float = 0.2,
+ audio_loader: AudioLoader,
+ preprocessor: PreprocessorProtocol,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.num_examples = num_examples
+ self.audio_loader = audio_loader
+ self.threshold = threshold
+ self.preprocessor = preprocessor
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[Tuple[str, Figure]]:
+ sample = clip_evaluations
+
+ if self.num_examples < len(sample):
+ sample = random.sample(sample, self.num_examples)
+
+ for num_example, clip_eval in enumerate(sample):
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_clip_detections(
+ clip_eval,
+ ax=ax,
+ audio_loader=self.audio_loader,
+ preprocessor=self.preprocessor,
+ )
+
+ yield f"{self.label}/example_{num_example}", fig
+
+ plt.close(fig)
+
+ @detection_plots.register(ExampleDetectionPlotConfig)
+ @staticmethod
+ def from_config(
+ config: ExampleDetectionPlotConfig,
+ targets: TargetProtocol,
+ ):
+ return ExampleDetectionPlot.build(
+ config=config,
+ targets=targets,
+ num_examples=config.num_examples,
+ audio_loader=build_audio_loader(config.audio),
+ preprocessor=build_preprocessor(config.preprocessing),
+ )
+
+
+DetectionPlotConfig = Annotated[
+ PRCurveConfig
+ | ROCCurveConfig
+ | ScoreDistributionPlotConfig
+ | ExampleDetectionPlotConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_detection_plotter(
+ config: DetectionPlotConfig,
+ targets: TargetProtocol,
+) -> DetectionPlotter:
+ return detection_plots.build(config, targets)
diff --git a/src/batdetect2/evaluate/plots/top_class.py b/src/batdetect2/evaluate/plots/top_class.py
new file mode 100644
index 00000000..d48ce878
--- /dev/null
+++ b/src/batdetect2/evaluate/plots/top_class.py
@@ -0,0 +1,426 @@
+import random
+from collections import defaultdict
+from dataclasses import dataclass, field
+from typing import (
+ Annotated,
+ Callable,
+ Iterable,
+ Literal,
+ Sequence,
+)
+
+import matplotlib.pyplot as plt
+import pandas as pd
+from matplotlib.figure import Figure
+from pydantic import Field
+from sklearn import metrics
+
+from batdetect2.audio import AudioConfig, build_audio_loader
+from batdetect2.audio.types import AudioLoader
+from batdetect2.core import ImportConfig, Registry, add_import_config
+from batdetect2.evaluate.metrics.common import compute_precision_recall
+from batdetect2.evaluate.metrics.top_class import (
+ ClipEval,
+ MatchEval,
+ compute_confusion_matrix,
+)
+from batdetect2.evaluate.plots.base import BasePlot, BasePlotConfig
+from batdetect2.plotting.gallery import plot_match_gallery
+from batdetect2.plotting.metrics import plot_pr_curve, plot_roc_curve
+from batdetect2.preprocess import PreprocessingConfig, build_preprocessor
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.targets.types import TargetProtocol
+
+TopClassPlotter = Callable[[Sequence[ClipEval]], Iterable[tuple[str, Figure]]]
+
+top_class_plots: Registry[TopClassPlotter, [TargetProtocol]] = Registry(
+ name="top_class_plot"
+)
+
+
+@add_import_config(top_class_plots)
+class TopClassPlotImportConfig(ImportConfig):
+ """Use any callable as a top-class plot.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class PRCurveConfig(BasePlotConfig):
+ name: Literal["pr_curve"] = "pr_curve"
+ label: str = "pr_curve"
+ title: str | None = "Top Class Precision-Recall Curve"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+
+
+class PRCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[tuple[str, Figure]]:
+ y_true = []
+ y_score = []
+ num_positives = 0
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ if m.is_generic and self.ignore_generic:
+ # Ignore gt sounds with unknown class
+ continue
+
+ num_positives += int(m.is_ground_truth)
+
+ if not m.is_prediction and self.ignore_non_predictions:
+ # Ignore non predictions
+ continue
+
+ y_true.append(m.pred_class == m.true_class)
+ y_score.append(m.score)
+
+ precision, recall, thresholds = compute_precision_recall(
+ y_true,
+ y_score,
+ num_positives=num_positives,
+ )
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_pr_curve(precision, recall, thresholds, ax=ax)
+
+ yield self.label, fig
+
+ @top_class_plots.register(PRCurveConfig)
+ @staticmethod
+ def from_config(config: PRCurveConfig, targets: TargetProtocol):
+ return PRCurve.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ )
+
+
+class ROCCurveConfig(BasePlotConfig):
+ name: Literal["roc_curve"] = "roc_curve"
+ label: str = "roc_curve"
+ title: str | None = "Top Class ROC Curve"
+ ignore_non_predictions: bool = True
+ ignore_generic: bool = True
+
+
+class ROCCurve(BasePlot):
+ def __init__(
+ self,
+ *args,
+ ignore_non_predictions: bool = True,
+ ignore_generic: bool = True,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.ignore_non_predictions = ignore_non_predictions
+ self.ignore_generic = ignore_generic
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[tuple[str, Figure]]:
+ y_true = []
+ y_score = []
+
+ for clip_eval in clip_evaluations:
+ for m in clip_eval.matches:
+ if m.is_generic and self.ignore_generic:
+ # Ignore gt sounds with unknown class
+ continue
+
+ if not m.is_prediction and self.ignore_non_predictions:
+ # Ignore non predictions
+ continue
+
+ y_true.append(m.pred_class == m.true_class)
+ y_score.append(m.score)
+
+ fpr, tpr, thresholds = metrics.roc_curve(
+ y_true,
+ y_score,
+ )
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ plot_roc_curve(fpr, tpr, thresholds, ax=ax)
+
+ yield self.label, fig
+
+ @top_class_plots.register(ROCCurveConfig)
+ @staticmethod
+ def from_config(config: ROCCurveConfig, targets: TargetProtocol):
+ return ROCCurve.build(
+ config=config,
+ targets=targets,
+ ignore_non_predictions=config.ignore_non_predictions,
+ ignore_generic=config.ignore_generic,
+ )
+
+
+class ConfusionMatrixConfig(BasePlotConfig):
+ name: Literal["confusion_matrix"] = "confusion_matrix"
+ title: str | None = "Top Class Confusion Matrix"
+ figsize: tuple[int, int] = (10, 10)
+ label: str = "confusion_matrix"
+ exclude_generic: bool = True
+ exclude_noise: bool = False
+ noise_class: str = "noise"
+ normalize: Literal["true", "pred", "all", "none"] = "true"
+ threshold: float = 0.2
+ add_colorbar: bool = True
+ cmap: str = "Blues"
+
+
+class ConfusionMatrix(BasePlot):
+ def __init__(
+ self,
+ *args,
+ exclude_generic: bool = True,
+ exclude_false_positives: bool = True,
+ exclude_false_negatives: bool = True,
+ exclude_noise: bool = False,
+ noise_class: str = "noise",
+ add_colorbar: bool = True,
+ normalize: Literal["true", "pred", "all", "none"] = "true",
+ cmap: str = "Blues",
+ threshold: float = 0.2,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.exclude_generic = exclude_generic
+ self.exclude_false_positives = exclude_false_positives
+ self.exclude_false_negatives = exclude_false_negatives
+ self.exclude_noise = exclude_noise
+ self.noise_class = noise_class
+ self.normalize: Literal["true", "pred", "all", "none"] = normalize
+ self.add_colorbar = add_colorbar
+ self.threshold = threshold
+ self.cmap = cmap
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[tuple[str, Figure]]:
+ cm, labels = compute_confusion_matrix(
+ clip_evaluations,
+ self.targets,
+ threshold=self.threshold,
+ normalize=self.normalize,
+ exclude_generic=self.exclude_generic,
+ exclude_false_positives=self.exclude_false_positives,
+ exclude_false_negatives=self.exclude_false_negatives,
+ noise_class=self.noise_class,
+ )
+
+ fig = self.create_figure()
+ ax = fig.subplots()
+
+ metrics.ConfusionMatrixDisplay(cm, display_labels=labels).plot(
+ ax=ax,
+ xticks_rotation="vertical",
+ cmap=self.cmap,
+ colorbar=self.add_colorbar,
+ values_format=".2f",
+ )
+
+ yield self.label, fig
+
+ @top_class_plots.register(ConfusionMatrixConfig)
+ @staticmethod
+ def from_config(config: ConfusionMatrixConfig, targets: TargetProtocol):
+ return ConfusionMatrix.build(
+ config=config,
+ targets=targets,
+ exclude_generic=config.exclude_generic,
+ exclude_noise=config.exclude_noise,
+ noise_class=config.noise_class,
+ add_colorbar=config.add_colorbar,
+ normalize=config.normalize,
+ cmap=config.cmap,
+ )
+
+
+class ExampleClassificationPlotConfig(BasePlotConfig):
+ name: Literal["example_classification"] = "example_classification"
+ label: str = "example_classification"
+ title: str | None = "Example Classification"
+ num_examples: int = 4
+ threshold: float = 0.2
+ audio: AudioConfig = Field(default_factory=AudioConfig)
+ preprocessing: PreprocessingConfig = Field(
+ default_factory=PreprocessingConfig
+ )
+
+
+class ExampleClassificationPlot(BasePlot):
+ def __init__(
+ self,
+ *args,
+ num_examples: int = 4,
+ threshold: float = 0.2,
+ audio_loader: AudioLoader,
+ preprocessor: PreprocessorProtocol,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.num_examples = num_examples
+ self.audio_loader = audio_loader
+ self.threshold = threshold
+ self.preprocessor = preprocessor
+ self.num_examples = num_examples
+
+ def __call__(
+ self,
+ clip_evaluations: Sequence[ClipEval],
+ ) -> Iterable[tuple[str, Figure]]:
+ grouped = group_matches(clip_evaluations, threshold=self.threshold)
+
+ for class_name, matches in grouped.items():
+ true_positives: list[MatchEval] = get_binned_sample(
+ matches.true_positives,
+ n_examples=self.num_examples,
+ )
+
+ false_positives: list[MatchEval] = get_binned_sample(
+ matches.false_positives,
+ n_examples=self.num_examples,
+ )
+
+ false_negatives: list[MatchEval] = random.sample(
+ matches.false_negatives,
+ k=min(self.num_examples, len(matches.false_negatives)),
+ )
+
+ cross_triggers: list[MatchEval] = get_binned_sample(
+ matches.cross_triggers, n_examples=self.num_examples
+ )
+
+ fig = self.create_figure()
+
+ fig = plot_match_gallery(
+ true_positives,
+ false_positives,
+ false_negatives,
+ cross_triggers,
+ preprocessor=self.preprocessor,
+ audio_loader=self.audio_loader,
+ n_examples=self.num_examples,
+ fig=fig,
+ )
+
+ if self.title is not None:
+ fig.suptitle(f"{self.title}: {class_name}")
+ else:
+ fig.suptitle(class_name)
+
+ yield f"{self.label}/{class_name}", fig
+
+ plt.close(fig)
+
+ @top_class_plots.register(ExampleClassificationPlotConfig)
+ @staticmethod
+ def from_config(
+ config: ExampleClassificationPlotConfig,
+ targets: TargetProtocol,
+ ):
+ return ExampleClassificationPlot.build(
+ config=config,
+ targets=targets,
+ num_examples=config.num_examples,
+ threshold=config.threshold,
+ audio_loader=build_audio_loader(config.audio),
+ preprocessor=build_preprocessor(config.preprocessing),
+ )
+
+
+TopClassPlotConfig = Annotated[
+ PRCurveConfig
+ | ROCCurveConfig
+ | ConfusionMatrixConfig
+ | ExampleClassificationPlotConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_top_class_plotter(
+ config: TopClassPlotConfig,
+ targets: TargetProtocol,
+) -> TopClassPlotter:
+ return top_class_plots.build(config, targets)
+
+
+@dataclass
+class ClassMatches:
+ false_positives: list[MatchEval] = field(default_factory=list)
+ false_negatives: list[MatchEval] = field(default_factory=list)
+ true_positives: list[MatchEval] = field(default_factory=list)
+ cross_triggers: list[MatchEval] = field(default_factory=list)
+
+
+def group_matches(
+ clip_evals: Sequence[ClipEval],
+ threshold: float = 0.2,
+) -> dict[str, ClassMatches]:
+ class_examples = defaultdict(ClassMatches)
+
+ for clip_eval in clip_evals:
+ for match in clip_eval.matches:
+ gt_class = match.true_class
+ pred_class = match.pred_class
+ is_pred = match.score >= threshold
+
+ if not is_pred and gt_class is not None:
+ class_examples[gt_class].false_negatives.append(match)
+ continue
+
+ if not is_pred:
+ continue
+
+ if gt_class is None:
+ class_examples[pred_class].false_positives.append(match)
+ continue
+
+ if gt_class != pred_class:
+ class_examples[pred_class].cross_triggers.append(match)
+ continue
+
+ class_examples[gt_class].true_positives.append(match)
+
+ return class_examples
+
+
+def get_binned_sample(matches: list[MatchEval], n_examples: int = 5):
+ if len(matches) < n_examples:
+ return matches
+
+ indices, pred_scores = zip(
+ *[(index, match.score) for index, match in enumerate(matches)],
+ strict=False,
+ )
+
+ bins = pd.qcut(pred_scores, q=n_examples, labels=False, duplicates="drop")
+ df = pd.DataFrame({"indices": indices, "bins": bins})
+ sample = df.groupby("bins").sample(1)
+ return [matches[ind] for ind in sample["indices"]]
diff --git a/src/batdetect2/evaluate/results.py b/src/batdetect2/evaluate/results.py
new file mode 100644
index 00000000..81af6534
--- /dev/null
+++ b/src/batdetect2/evaluate/results.py
@@ -0,0 +1,27 @@
+import json
+from pathlib import Path
+from typing import Iterable
+
+from matplotlib.figure import Figure
+from soundevent import data
+
+__all__ = ["save_evaluation_results"]
+
+
+def save_evaluation_results(
+ metrics: dict[str, float],
+ plots: Iterable[tuple[str, Figure]],
+ output_dir: data.PathLike,
+) -> None:
+ """Save evaluation metrics and plots to disk."""
+
+ output_path = Path(output_dir)
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ metrics_path = output_path / "metrics.json"
+ metrics_path.write_text(json.dumps(metrics))
+
+ for figure_name, figure in plots:
+ figure_path = output_path / figure_name
+ figure_path.parent.mkdir(parents=True, exist_ok=True)
+ figure.savefig(figure_path)
diff --git a/src/batdetect2/evaluate/tasks/__init__.py b/src/batdetect2/evaluate/tasks/__init__.py
new file mode 100644
index 00000000..08a173f6
--- /dev/null
+++ b/src/batdetect2/evaluate/tasks/__init__.py
@@ -0,0 +1,63 @@
+from typing import Annotated, Sequence
+
+from pydantic import Field
+from soundevent import data
+
+from batdetect2.evaluate.tasks.base import BaseTaskConfig, tasks_registry
+from batdetect2.evaluate.tasks.classification import ClassificationTaskConfig
+from batdetect2.evaluate.tasks.clip_classification import (
+ ClipClassificationTaskConfig,
+)
+from batdetect2.evaluate.tasks.clip_detection import ClipDetectionTaskConfig
+from batdetect2.evaluate.tasks.detection import DetectionTaskConfig
+from batdetect2.evaluate.tasks.top_class import TopClassDetectionTaskConfig
+from batdetect2.evaluate.types import EvaluationTaskProtocol
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets import build_targets
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "TaskConfig",
+ "build_task",
+ "evaluate_task",
+]
+
+
+TaskConfig = Annotated[
+ ClassificationTaskConfig
+ | DetectionTaskConfig
+ | ClipDetectionTaskConfig
+ | ClipClassificationTaskConfig
+ | TopClassDetectionTaskConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_task(
+ config: TaskConfig,
+ targets: TargetProtocol | None = None,
+) -> EvaluationTaskProtocol:
+ targets = targets or build_targets()
+ return tasks_registry.build(config, targets)
+
+
+def evaluate_task(
+ clip_annotations: Sequence[data.ClipAnnotation],
+ predictions: Sequence[ClipDetections],
+ task: str | None = None,
+ targets: TargetProtocol | None = None,
+ config: TaskConfig | dict | None = None,
+):
+ if isinstance(config, BaseTaskConfig):
+ task_obj = build_task(config, targets)
+ return task_obj.evaluate(clip_annotations, predictions)
+
+ if task is None:
+ raise ValueError(
+ "Task must be specified if a full config is not provided.",
+ )
+
+ config_class = tasks_registry.get_config_type(task)
+ config = config_class.model_validate(config or {}) # type: ignore
+ task_obj = build_task(config, targets) # type: ignore
+ return task_obj.evaluate(clip_annotations, predictions)
diff --git a/src/batdetect2/evaluate/tasks/base.py b/src/batdetect2/evaluate/tasks/base.py
new file mode 100644
index 00000000..f734c4a8
--- /dev/null
+++ b/src/batdetect2/evaluate/tasks/base.py
@@ -0,0 +1,224 @@
+from typing import (
+ Callable,
+ Dict,
+ Generic,
+ Iterable,
+ List,
+ Literal,
+ Sequence,
+ Tuple,
+ TypeVar,
+)
+
+from loguru import logger
+from matplotlib.figure import Figure
+from pydantic import Field
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.evaluate.affinity import (
+ AffinityConfig,
+ TimeAffinityConfig,
+ build_affinity_function,
+)
+from batdetect2.evaluate.types import (
+ AffinityFunction,
+ EvaluationTaskProtocol,
+)
+from batdetect2.postprocess.types import ClipDetections, Detection
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "BaseTaskConfig",
+ "BaseTask",
+ "TaskImportConfig",
+]
+
+tasks_registry: Registry[EvaluationTaskProtocol, [TargetProtocol]] = Registry(
+ "tasks"
+)
+
+
+@add_import_config(tasks_registry)
+class TaskImportConfig(ImportConfig):
+ """Use any callable as an evaluation task.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+T_Output = TypeVar("T_Output")
+
+
+class BaseTaskConfig(BaseConfig):
+ prefix: str
+
+ ignore_start_end: float = 0.01
+
+
+class BaseTask(EvaluationTaskProtocol, Generic[T_Output]):
+ targets: TargetProtocol
+
+ metrics: List[Callable[[Sequence[T_Output]], Dict[str, float]]]
+
+ plots: List[Callable[[Sequence[T_Output]], Iterable[Tuple[str, Figure]]]]
+
+ prefix: str
+
+ ignore_start_end: float
+
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ metrics: List[Callable[[Sequence[T_Output]], Dict[str, float]]],
+ prefix: str,
+ plots: List[
+ Callable[[Sequence[T_Output]], Iterable[Tuple[str, Figure]]]
+ ]
+ | None = None,
+ ignore_start_end: float = 0.01,
+ ):
+ self.prefix = prefix
+ self.targets = targets
+ self.metrics = metrics
+ self.plots = plots or []
+ self.ignore_start_end = ignore_start_end
+
+ def compute_metrics(
+ self,
+ eval_outputs: List[T_Output],
+ ) -> Dict[str, float]:
+ scores = [metric(eval_outputs) for metric in self.metrics]
+ return {
+ f"{self.prefix}/{name}": score
+ for metric_output in scores
+ for name, score in metric_output.items()
+ }
+
+ def generate_plots(
+ self, eval_outputs: List[T_Output]
+ ) -> Iterable[Tuple[str, Figure]]:
+ for plot in self.plots:
+ try:
+ for name, fig in plot(eval_outputs):
+ yield f"{self.prefix}/{name}", fig
+ except Exception as e:
+ logger.error(f"Error plotting {self.prefix}: {e}")
+ continue
+
+ def evaluate(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ predictions: Sequence[ClipDetections],
+ ) -> List[T_Output]:
+ return [
+ self.evaluate_clip(clip_annotation, preds)
+ for clip_annotation, preds in zip(
+ clip_annotations, predictions, strict=False
+ )
+ ]
+
+ def evaluate_clip(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ prediction: ClipDetections,
+ ) -> T_Output: ... # ty: ignore[empty-body]
+
+ def include_sound_event_annotation(
+ self,
+ sound_event_annotation: data.SoundEventAnnotation,
+ clip: data.Clip,
+ ) -> bool:
+ if not self.targets.filter(sound_event_annotation):
+ return False
+
+ geometry = sound_event_annotation.sound_event.geometry
+ return is_in_bounds(
+ geometry,
+ clip,
+ self.ignore_start_end,
+ )
+
+ def include_prediction(
+ self,
+ prediction: Detection,
+ clip: data.Clip,
+ ) -> bool:
+ return is_in_bounds(
+ prediction.geometry,
+ clip,
+ self.ignore_start_end,
+ )
+
+
+class BaseSEDTaskConfig(BaseTaskConfig):
+ affinity: AffinityConfig = Field(default_factory=TimeAffinityConfig)
+ affinity_threshold: float = 0
+ strict_match: bool = True
+
+
+class BaseSEDTask(BaseTask[T_Output]):
+ affinity: AffinityFunction
+
+ def __init__(
+ self,
+ prefix: str,
+ targets: TargetProtocol,
+ metrics: List[Callable[[Sequence[T_Output]], Dict[str, float]]],
+ affinity: AffinityFunction,
+ plots: List[
+ Callable[[Sequence[T_Output]], Iterable[Tuple[str, Figure]]]
+ ]
+ | None = None,
+ affinity_threshold: float = 0,
+ ignore_start_end: float = 0.01,
+ strict_match: bool = True,
+ ):
+ super().__init__(
+ prefix=prefix,
+ metrics=metrics,
+ plots=plots,
+ targets=targets,
+ ignore_start_end=ignore_start_end,
+ )
+ self.affinity = affinity
+ self.affinity_threshold = affinity_threshold
+ self.strict_match = strict_match
+
+ @classmethod
+ def build(
+ cls,
+ config: BaseSEDTaskConfig,
+ targets: TargetProtocol,
+ **kwargs,
+ ):
+ affinity = build_affinity_function(config.affinity)
+ return cls(
+ affinity=affinity,
+ affinity_threshold=config.affinity_threshold,
+ prefix=config.prefix,
+ ignore_start_end=config.ignore_start_end,
+ strict_match=config.strict_match,
+ targets=targets,
+ **kwargs,
+ )
+
+
+def is_in_bounds(
+ geometry: data.Geometry,
+ clip: data.Clip,
+ buffer: float,
+) -> bool:
+ start_time = compute_bounds(geometry)[0]
+ return (start_time >= clip.start_time + buffer) and (
+ start_time <= clip.end_time - buffer
+ )
diff --git a/src/batdetect2/evaluate/tasks/classification.py b/src/batdetect2/evaluate/tasks/classification.py
new file mode 100644
index 00000000..5dbbeaa9
--- /dev/null
+++ b/src/batdetect2/evaluate/tasks/classification.py
@@ -0,0 +1,161 @@
+from functools import partial
+from typing import Literal
+
+from pydantic import Field
+from soundevent import data
+from soundevent.evaluation import match_detections_and_gts
+
+from batdetect2.evaluate.metrics.classification import (
+ ClassificationAveragePrecisionConfig,
+ ClassificationMetricConfig,
+ ClipEval,
+ MatchEval,
+ build_classification_metric,
+)
+from batdetect2.evaluate.plots.classification import (
+ ClassificationPlotConfig,
+ build_classification_plotter,
+)
+from batdetect2.evaluate.tasks.base import (
+ BaseSEDTask,
+ BaseSEDTaskConfig,
+ tasks_registry,
+)
+from batdetect2.postprocess.types import ClipDetections, Detection
+from batdetect2.targets.types import TargetProtocol
+
+
+def _default_metrics() -> list[ClassificationMetricConfig]:
+ return [ClassificationAveragePrecisionConfig()]
+
+
+class ClassificationTaskConfig(BaseSEDTaskConfig):
+ name: Literal["sound_event_classification"] = "sound_event_classification"
+ prefix: str = "classification"
+ metrics: list[ClassificationMetricConfig] = Field(
+ default_factory=_default_metrics
+ )
+ plots: list[ClassificationPlotConfig] = Field(default_factory=list)
+ include_generics: bool = True
+
+
+class ClassificationTask(BaseSEDTask[ClipEval]):
+ def __init__(
+ self,
+ *args,
+ include_generics: bool = True,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.include_generics = include_generics
+
+ def evaluate_clip(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ prediction: ClipDetections,
+ ) -> ClipEval:
+ clip = clip_annotation.clip
+
+ preds = [
+ pred
+ for pred in prediction.detections
+ if self.include_prediction(pred, clip)
+ ]
+
+ all_gts = [
+ sound_event
+ for sound_event in clip_annotation.sound_events
+ if self.include_sound_event_annotation(sound_event, clip)
+ ]
+
+ per_class_matches = {}
+
+ for class_name in self.targets.class_names:
+ class_idx = self.targets.class_names.index(class_name)
+
+ # Only match to targets of the given class
+ gts = [
+ sound_event
+ for sound_event in all_gts
+ if is_target_class(
+ sound_event,
+ class_name,
+ self.targets,
+ include_generics=self.include_generics,
+ )
+ ]
+
+ matches = []
+
+ for match in match_detections_and_gts(
+ detections=preds,
+ ground_truths=gts,
+ affinity=self.affinity,
+ score=partial(get_class_score, class_idx=class_idx),
+ strict_match=self.strict_match,
+ affinity_threshold=self.affinity_threshold,
+ ):
+ true_class = (
+ self.targets.encode_class(match.annotation)
+ if match.annotation is not None
+ else None
+ )
+ matches.append(
+ MatchEval(
+ clip=clip,
+ gt=match.annotation,
+ pred=match.prediction,
+ is_prediction=match.prediction is not None,
+ is_ground_truth=match.annotation is not None,
+ is_generic=match.annotation is not None
+ and true_class is None,
+ true_class=true_class,
+ score=match.prediction_score,
+ )
+ )
+
+ per_class_matches[class_name] = matches
+
+ return ClipEval(clip=clip, matches=per_class_matches)
+
+ @tasks_registry.register(ClassificationTaskConfig)
+ @staticmethod
+ def from_config(
+ config: ClassificationTaskConfig,
+ targets: TargetProtocol,
+ ):
+ metrics = [
+ build_classification_metric(metric, targets)
+ for metric in config.metrics
+ ]
+ plots = [
+ build_classification_plotter(plot, targets)
+ for plot in config.plots
+ ]
+ return ClassificationTask.build(
+ config=config,
+ plots=plots,
+ targets=targets,
+ metrics=metrics,
+ include_generics=config.include_generics,
+ )
+
+
+def get_class_score(pred: Detection, class_idx: int) -> float:
+ return pred.class_scores[class_idx]
+
+
+def is_target_class(
+ sound_event: data.SoundEventAnnotation,
+ class_name: str,
+ targets: TargetProtocol,
+ include_generics: bool = True,
+) -> bool:
+ sound_event_class = targets.encode_class(sound_event)
+
+ if sound_event_class is None and include_generics:
+ # Sound events that are generic could be of the given
+ # class
+ return True
+
+ return sound_event_class == class_name
diff --git a/src/batdetect2/evaluate/tasks/clip_classification.py b/src/batdetect2/evaluate/tasks/clip_classification.py
new file mode 100644
index 00000000..de76cdec
--- /dev/null
+++ b/src/batdetect2/evaluate/tasks/clip_classification.py
@@ -0,0 +1,88 @@
+from collections import defaultdict
+from typing import Literal
+
+from pydantic import Field
+from soundevent import data
+
+from batdetect2.evaluate.metrics.clip_classification import (
+ ClipClassificationAveragePrecisionConfig,
+ ClipClassificationMetricConfig,
+ ClipEval,
+ build_clip_metric,
+)
+from batdetect2.evaluate.plots.clip_classification import (
+ ClipClassificationPlotConfig,
+ build_clip_classification_plotter,
+)
+from batdetect2.evaluate.tasks.base import (
+ BaseTask,
+ BaseTaskConfig,
+ tasks_registry,
+)
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets.types import TargetProtocol
+
+
+def _default_metrics() -> list[ClipClassificationMetricConfig]:
+ return [ClipClassificationAveragePrecisionConfig()]
+
+
+class ClipClassificationTaskConfig(BaseTaskConfig):
+ name: Literal["clip_classification"] = "clip_classification"
+ prefix: str = "clip_classification"
+ metrics: list[ClipClassificationMetricConfig] = Field(
+ default_factory=_default_metrics
+ )
+ plots: list[ClipClassificationPlotConfig] = Field(default_factory=list)
+
+
+class ClipClassificationTask(BaseTask[ClipEval]):
+ def evaluate_clip(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ prediction: ClipDetections,
+ ) -> ClipEval:
+ clip = clip_annotation.clip
+
+ gt_classes = set()
+ for sound_event in clip_annotation.sound_events:
+ if not self.include_sound_event_annotation(sound_event, clip):
+ continue
+
+ class_name = self.targets.encode_class(sound_event)
+
+ if class_name is None:
+ continue
+
+ gt_classes.add(class_name)
+
+ pred_scores = defaultdict(float)
+ for pred in prediction.detections:
+ if not self.include_prediction(pred, clip):
+ continue
+
+ for class_idx, class_name in enumerate(self.targets.class_names):
+ pred_scores[class_name] = max(
+ float(pred.class_scores[class_idx]),
+ pred_scores[class_name],
+ )
+
+ return ClipEval(true_classes=gt_classes, class_scores=pred_scores)
+
+ @tasks_registry.register(ClipClassificationTaskConfig)
+ @staticmethod
+ def from_config(
+ config: ClipClassificationTaskConfig,
+ targets: TargetProtocol,
+ ):
+ metrics = [build_clip_metric(metric) for metric in config.metrics]
+ plots = [
+ build_clip_classification_plotter(plot, targets)
+ for plot in config.plots
+ ]
+ return ClipClassificationTask(
+ prefix=config.prefix,
+ plots=plots,
+ metrics=metrics,
+ targets=targets,
+ )
diff --git a/src/batdetect2/evaluate/tasks/clip_detection.py b/src/batdetect2/evaluate/tasks/clip_detection.py
new file mode 100644
index 00000000..09ddf44d
--- /dev/null
+++ b/src/batdetect2/evaluate/tasks/clip_detection.py
@@ -0,0 +1,79 @@
+from typing import Literal
+
+from pydantic import Field
+from soundevent import data
+
+from batdetect2.evaluate.metrics.clip_detection import (
+ ClipDetectionAveragePrecisionConfig,
+ ClipDetectionMetricConfig,
+ ClipEval,
+ build_clip_metric,
+)
+from batdetect2.evaluate.plots.clip_detection import (
+ ClipDetectionPlotConfig,
+ build_clip_detection_plotter,
+)
+from batdetect2.evaluate.tasks.base import (
+ BaseTask,
+ BaseTaskConfig,
+ tasks_registry,
+)
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets.types import TargetProtocol
+
+
+def _default_metrics() -> list[ClipDetectionMetricConfig]:
+ return [ClipDetectionAveragePrecisionConfig()]
+
+
+class ClipDetectionTaskConfig(BaseTaskConfig):
+ name: Literal["clip_detection"] = "clip_detection"
+ prefix: str = "clip_detection"
+ metrics: list[ClipDetectionMetricConfig] = Field(
+ default_factory=_default_metrics
+ )
+ plots: list[ClipDetectionPlotConfig] = Field(default_factory=list)
+
+
+class ClipDetectionTask(BaseTask[ClipEval]):
+ def evaluate_clip(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ prediction: ClipDetections,
+ ) -> ClipEval:
+ clip = clip_annotation.clip
+
+ gt_det = any(
+ self.include_sound_event_annotation(sound_event, clip)
+ for sound_event in clip_annotation.sound_events
+ )
+
+ pred_score = 0
+ for pred in prediction.detections:
+ if not self.include_prediction(pred, clip):
+ continue
+
+ pred_score = max(pred_score, pred.detection_score)
+
+ return ClipEval(
+ gt_det=gt_det,
+ score=pred_score,
+ )
+
+ @tasks_registry.register(ClipDetectionTaskConfig)
+ @staticmethod
+ def from_config(
+ config: ClipDetectionTaskConfig,
+ targets: TargetProtocol,
+ ):
+ metrics = [build_clip_metric(metric) for metric in config.metrics]
+ plots = [
+ build_clip_detection_plotter(plot, targets)
+ for plot in config.plots
+ ]
+ return ClipDetectionTask(
+ prefix=config.prefix,
+ metrics=metrics,
+ targets=targets,
+ plots=plots,
+ )
diff --git a/src/batdetect2/evaluate/tasks/detection.py b/src/batdetect2/evaluate/tasks/detection.py
new file mode 100644
index 00000000..480abd67
--- /dev/null
+++ b/src/batdetect2/evaluate/tasks/detection.py
@@ -0,0 +1,95 @@
+from typing import Literal
+
+from pydantic import Field
+from soundevent import data
+from soundevent.evaluation import match_detections_and_gts
+
+from batdetect2.evaluate.metrics.detection import (
+ ClipEval,
+ DetectionAveragePrecisionConfig,
+ DetectionMetricConfig,
+ MatchEval,
+ build_detection_metric,
+)
+from batdetect2.evaluate.plots.detection import (
+ DetectionPlotConfig,
+ build_detection_plotter,
+)
+from batdetect2.evaluate.tasks.base import (
+ BaseSEDTask,
+ BaseSEDTaskConfig,
+ tasks_registry,
+)
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets.types import TargetProtocol
+
+
+def _default_metrics() -> list[DetectionMetricConfig]:
+ return [DetectionAveragePrecisionConfig()]
+
+
+class DetectionTaskConfig(BaseSEDTaskConfig):
+ name: Literal["sound_event_detection"] = "sound_event_detection"
+ prefix: str = "detection"
+ metrics: list[DetectionMetricConfig] = Field(
+ default_factory=_default_metrics
+ )
+ plots: list[DetectionPlotConfig] = Field(default_factory=list)
+
+
+class DetectionTask(BaseSEDTask[ClipEval]):
+ def evaluate_clip(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ prediction: ClipDetections,
+ ) -> ClipEval:
+ clip = clip_annotation.clip
+
+ gts = [
+ sound_event
+ for sound_event in clip_annotation.sound_events
+ if self.include_sound_event_annotation(sound_event, clip)
+ ]
+ preds = [
+ pred
+ for pred in prediction.detections
+ if self.include_prediction(pred, clip)
+ ]
+
+ matches = []
+ for match in match_detections_and_gts(
+ detections=preds,
+ ground_truths=gts,
+ affinity=self.affinity,
+ score=lambda pred: pred.detection_score,
+ strict_match=self.strict_match,
+ affinity_threshold=self.affinity_threshold,
+ ):
+ matches.append(
+ MatchEval(
+ gt=match.annotation,
+ pred=match.prediction,
+ is_prediction=match.prediction is not None,
+ is_ground_truth=match.annotation is not None,
+ score=match.prediction_score,
+ )
+ )
+
+ return ClipEval(clip=clip, matches=matches)
+
+ @tasks_registry.register(DetectionTaskConfig)
+ @staticmethod
+ def from_config(
+ config: DetectionTaskConfig,
+ targets: TargetProtocol,
+ ):
+ metrics = [build_detection_metric(metric) for metric in config.metrics]
+ plots = [
+ build_detection_plotter(plot, targets) for plot in config.plots
+ ]
+ return DetectionTask.build(
+ config=config,
+ metrics=metrics,
+ targets=targets,
+ plots=plots,
+ )
diff --git a/src/batdetect2/evaluate/tasks/top_class.py b/src/batdetect2/evaluate/tasks/top_class.py
new file mode 100644
index 00000000..d37d9a9d
--- /dev/null
+++ b/src/batdetect2/evaluate/tasks/top_class.py
@@ -0,0 +1,114 @@
+from typing import Literal
+
+from pydantic import Field
+from soundevent import data
+from soundevent.evaluation import match_detections_and_gts
+
+from batdetect2.evaluate.metrics.top_class import (
+ ClipEval,
+ MatchEval,
+ TopClassAveragePrecisionConfig,
+ TopClassMetricConfig,
+ build_top_class_metric,
+)
+from batdetect2.evaluate.plots.top_class import (
+ TopClassPlotConfig,
+ build_top_class_plotter,
+)
+from batdetect2.evaluate.tasks.base import (
+ BaseSEDTask,
+ BaseSEDTaskConfig,
+ tasks_registry,
+)
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets.types import TargetProtocol
+
+
+def _default_metrics() -> list[TopClassMetricConfig]:
+ return [TopClassAveragePrecisionConfig()]
+
+
+class TopClassDetectionTaskConfig(BaseSEDTaskConfig):
+ name: Literal["top_class_detection"] = "top_class_detection"
+ prefix: str = "top_class"
+ metrics: list[TopClassMetricConfig] = Field(
+ default_factory=_default_metrics
+ )
+ plots: list[TopClassPlotConfig] = Field(default_factory=list)
+
+
+class TopClassDetectionTask(BaseSEDTask[ClipEval]):
+ def evaluate_clip(
+ self,
+ clip_annotation: data.ClipAnnotation,
+ prediction: ClipDetections,
+ ) -> ClipEval:
+ clip = clip_annotation.clip
+
+ gts = [
+ sound_event
+ for sound_event in clip_annotation.sound_events
+ if self.include_sound_event_annotation(sound_event, clip)
+ ]
+ preds = [
+ pred
+ for pred in prediction.detections
+ if self.include_prediction(pred, clip)
+ ]
+
+ matches = []
+ for match in match_detections_and_gts(
+ ground_truths=gts,
+ detections=preds,
+ affinity=self.affinity,
+ score=lambda pred: pred.class_scores.max(),
+ strict_match=self.strict_match,
+ affinity_threshold=self.affinity_threshold,
+ ):
+ gt = match.annotation
+ pred = match.prediction
+ true_class = (
+ self.targets.encode_class(gt) if gt is not None else None
+ )
+
+ class_idx = (
+ pred.class_scores.argmax() if pred is not None else None
+ )
+ pred_class = (
+ self.targets.class_names[class_idx]
+ if class_idx is not None
+ else None
+ )
+
+ matches.append(
+ MatchEval(
+ clip=clip,
+ gt=gt,
+ pred=pred,
+ is_ground_truth=gt is not None,
+ is_prediction=pred is not None,
+ true_class=true_class,
+ is_generic=gt is not None and true_class is None,
+ pred_class=pred_class,
+ score=match.prediction_score,
+ )
+ )
+
+ return ClipEval(clip=clip, matches=matches)
+
+ @tasks_registry.register(TopClassDetectionTaskConfig)
+ @staticmethod
+ def from_config(
+ config: TopClassDetectionTaskConfig,
+ targets: TargetProtocol,
+ ):
+ metrics = [build_top_class_metric(metric) for metric in config.metrics]
+ plots = [
+ build_top_class_plotter(plot, targets) for plot in config.plots
+ ]
+ return TopClassDetectionTask.build(
+ config=config,
+ plots=plots,
+ metrics=metrics,
+ targets=targets,
+ )
diff --git a/src/batdetect2/evaluate/types.py b/src/batdetect2/evaluate/types.py
new file mode 100644
index 00000000..7fbe3d9b
--- /dev/null
+++ b/src/batdetect2/evaluate/types.py
@@ -0,0 +1,147 @@
+from dataclasses import dataclass
+from typing import Generic, Iterable, Protocol, Sequence, TypeVar
+
+from matplotlib.figure import Figure
+from soundevent import data
+
+from batdetect2.outputs.types import OutputTransformProtocol
+from batdetect2.postprocess.types import (
+ ClipDetections,
+ ClipDetectionsTensor,
+ Detection,
+)
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "AffinityFunction",
+ "ClipMatches",
+ "EvaluationTaskProtocol",
+ "EvaluatorProtocol",
+ "MatchEvaluation",
+ "MatcherProtocol",
+ "MetricsProtocol",
+ "PlotterProtocol",
+]
+
+
+@dataclass
+class MatchEvaluation:
+ clip: data.Clip
+ sound_event_annotation: data.SoundEventAnnotation | None
+ gt_det: bool
+ gt_class: str | None
+ gt_geometry: data.Geometry | None
+ pred_score: float
+ pred_class_scores: dict[str, float]
+ pred_geometry: data.Geometry | None
+ affinity: float
+
+ @property
+ def top_class(self) -> str | None:
+ if not self.pred_class_scores:
+ return None
+ return max(self.pred_class_scores, key=self.pred_class_scores.get) # type: ignore
+
+ @property
+ def is_prediction(self) -> bool:
+ return self.pred_geometry is not None
+
+ @property
+ def is_generic(self) -> bool:
+ return self.gt_det and self.gt_class is None
+
+ @property
+ def top_class_score(self) -> float:
+ pred_class = self.top_class
+ if pred_class is None:
+ return 0
+ return self.pred_class_scores[pred_class]
+
+
+@dataclass
+class ClipMatches:
+ clip: data.Clip
+ matches: list[MatchEvaluation]
+
+
+class MatcherProtocol(Protocol):
+ def __call__(
+ self,
+ ground_truth: Sequence[data.Geometry],
+ predictions: Sequence[data.Geometry],
+ scores: Sequence[float],
+ ) -> Iterable[tuple[int | None, int | None, float]]: ...
+
+
+class AffinityFunction(Protocol):
+ def __call__(
+ self,
+ detection: Detection,
+ ground_truth: data.SoundEventAnnotation,
+ ) -> float: ...
+
+
+class MetricsProtocol(Protocol):
+ def __call__(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ predictions: Sequence[Sequence[Detection]],
+ ) -> dict[str, float]: ...
+
+
+class PlotterProtocol(Protocol):
+ def __call__(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ predictions: Sequence[Sequence[Detection]],
+ ) -> Iterable[tuple[str, Figure]]: ...
+
+
+EvaluationOutput = TypeVar("EvaluationOutput")
+
+
+class EvaluationTaskProtocol(Protocol, Generic[EvaluationOutput]):
+ targets: TargetProtocol
+
+ def evaluate(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ predictions: Sequence[ClipDetections],
+ ) -> EvaluationOutput: ...
+
+ def compute_metrics(
+ self,
+ eval_outputs: EvaluationOutput,
+ ) -> dict[str, float]: ...
+
+ def generate_plots(
+ self,
+ eval_outputs: EvaluationOutput,
+ ) -> Iterable[tuple[str, Figure]]: ...
+
+
+class EvaluatorProtocol(Protocol, Generic[EvaluationOutput]):
+ targets: TargetProtocol
+ transform: OutputTransformProtocol
+
+ def to_clip_detections_batch(
+ self,
+ clip_detections: Sequence[ClipDetectionsTensor],
+ clips: Sequence[data.Clip],
+ ) -> list[ClipDetections]: ...
+
+ def evaluate(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ predictions: Sequence[ClipDetections],
+ ) -> EvaluationOutput: ...
+
+ def compute_metrics(
+ self,
+ eval_outputs: EvaluationOutput,
+ ) -> dict[str, float]: ...
+
+ def generate_plots(
+ self,
+ eval_outputs: EvaluationOutput,
+ ) -> Iterable[tuple[str, Figure]]: ...
diff --git a/batdetect2/utils/__init__.py b/src/batdetect2/finetune/__init__.py
similarity index 100%
rename from batdetect2/utils/__init__.py
rename to src/batdetect2/finetune/__init__.py
diff --git a/batdetect2/finetune/finetune_model.py b/src/batdetect2/finetune/finetune_model.py
similarity index 50%
rename from batdetect2/finetune/finetune_model.py
rename to src/batdetect2/finetune/finetune_model.py
index 77a2711c..5e4db807 100644
--- a/batdetect2/finetune/finetune_model.py
+++ b/src/batdetect2/finetune/finetune_model.py
@@ -1,33 +1,31 @@
import argparse
-import glob
-import json
import os
-import sys
+import warnings
+from typing import List
-import matplotlib.pyplot as plt
-import numpy as np
import torch
-import torch.nn.functional as F
+import torch.utils.data
from torch.optim.lr_scheduler import CosineAnnealingLR
-import batdetect2.detector.models as models
import batdetect2.detector.parameters as parameters
-import batdetect2.detector.post_process as pp
-import batdetect2.train.audio_dataloader as adl
-import batdetect2.train.evaluate as evl
+import batdetect2.train.legacy.audio_dataloader as adl
+import batdetect2.train.legacy.train_model as tm
+import batdetect2.train.legacy.train_utils as tu
import batdetect2.train.losses as losses
-import batdetect2.train.train_model as tm
-import batdetect2.train.train_utils as tu
import batdetect2.utils.detector_utils as du
import batdetect2.utils.plot_utils as pu
+from batdetect2 import types
+from batdetect2.detector.models import Net2DFast
-if __name__ == "__main__":
- info_str = "\nBatDetect - Finetune Model\n"
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- print(info_str)
+
+def parse_arugments():
parser = argparse.ArgumentParser()
parser.add_argument(
- "audio_path", type=str, help="Input directory for audio"
+ "audio_path",
+ type=str,
+ help="Input directory for audio",
)
parser.add_argument(
"train_ann_path",
@@ -39,7 +37,15 @@
type=str,
help="Path to where test annotation file is stored",
)
- parser.add_argument("model_path", type=str, help="Path to pretrained model")
+ parser.add_argument(
+ "model_path", type=str, help="Path to pretrained model"
+ )
+ parser.add_argument(
+ "--experiment_dir",
+ type=str,
+ default=os.path.join(BASE_DIR, "experiments"),
+ help="Path to where experiment files are stored",
+ )
parser.add_argument(
"--op_model_name",
type=str,
@@ -71,107 +77,64 @@
parser.add_argument(
"--notes", type=str, default="", help="Notes to save in text file"
)
- args = vars(parser.parse_args())
+ args = parser.parse_args()
+ return args
- params = parameters.get_params(True, "../../experiments/")
- if torch.cuda.is_available():
- params["device"] = "cuda"
- else:
- params["device"] = "cpu"
- print(
- "\nNote, this will be a lot faster if you use computer with a GPU.\n"
- )
- print("\nAudio directory: " + args["audio_path"])
- print("Train file: " + args["train_ann_path"])
- print("Test file: " + args["test_ann_path"])
- print("Loading model: " + args["model_path"])
+def select_device(warn=True) -> str:
+ if torch.cuda.is_available():
+ return "cuda"
- dataset_name = (
- os.path.basename(args["train_ann_path"])
- .replace(".json", "")
- .replace("_TRAIN", "")
- )
+ if warn:
+ warnings.warn(
+ "No GPU available, using the CPU instead. Please consider using a GPU "
+ "to speed up training.",
+ stacklevel=2,
+ )
- if args["train_from_scratch"]:
- print("\nTraining model from scratch i.e. not using pretrained weights")
- model, params_train = du.load_model(args["model_path"], False)
- else:
- model, params_train = du.load_model(args["model_path"], True)
- model.to(params["device"])
+ return "cpu"
- params["num_epochs"] = args["num_epochs"]
- if args["op_model_name"] != "":
- params["model_file_name"] = args["op_model_name"]
- classes_to_ignore = params["classes_to_ignore"] + params["generic_class"]
- # save notes file
- params["notes"] = args["notes"]
- if args["notes"] != "":
- tu.write_notes_file(params["experiment"] + "notes.txt", args["notes"])
-
- # load train annotations
- train_sets = []
+def load_annotations(
+ dataset_name: str,
+ ann_path: str,
+ audio_path: str,
+ classes_to_ignore: List[str] | None = None,
+ events_of_interest: List[str] | None = None,
+) -> List[types.FileAnnotation]:
+ train_sets: List[types.DatasetDict] = []
train_sets.append(
- tu.get_blank_dataset_dict(
- dataset_name, False, args["train_ann_path"], args["audio_path"]
- )
- )
- params["train_sets"] = [
tu.get_blank_dataset_dict(
dataset_name,
- False,
- os.path.basename(args["train_ann_path"]),
- args["audio_path"],
+ is_test=False,
+ ann_path=ann_path,
+ wav_path=audio_path,
)
- ]
-
- print("\nTrain set:")
- (
- data_train,
- params["class_names"],
- params["class_inv_freq"],
- ) = tu.load_set_of_anns(
- train_sets, classes_to_ignore, params["events_of_interest"]
)
- print("Number of files", len(data_train))
- params["genus_names"], params["genus_mapping"] = tu.get_genus_mapping(
- params["class_names"]
- )
- params["class_names_short"] = tu.get_short_class_names(
- params["class_names"]
+ return tu.load_set_of_anns(
+ train_sets,
+ events_of_interest=events_of_interest,
+ classes_to_ignore=classes_to_ignore,
)
- # load test annotations
- test_sets = []
- test_sets.append(
- tu.get_blank_dataset_dict(
- dataset_name, True, args["test_ann_path"], args["audio_path"]
- )
- )
- params["test_sets"] = [
- tu.get_blank_dataset_dict(
- dataset_name,
- True,
- os.path.basename(args["test_ann_path"]),
- args["audio_path"],
- )
- ]
-
- print("\nTest set:")
- data_test, _, _ = tu.load_set_of_anns(
- test_sets, classes_to_ignore, params["events_of_interest"]
- )
- print("Number of files", len(data_test))
+def finetune_model(
+ model: types.DetectionModel,
+ data_train: List[types.FileAnnotation],
+ data_test: List[types.FileAnnotation],
+ params: parameters.TrainingParameters,
+ model_params: types.ModelParameters,
+ finetune_only_last_layer: bool = False,
+ save_images: bool = True,
+):
# train loader
train_dataset = adl.AudioLoader(data_train, params, is_train=True)
train_loader = torch.utils.data.DataLoader(
train_dataset,
- batch_size=params["batch_size"],
+ batch_size=params.batch_size,
shuffle=True,
- num_workers=params["num_workers"],
+ num_workers=params.num_workers,
pin_memory=True,
)
@@ -181,32 +144,36 @@
test_dataset,
batch_size=1,
shuffle=False,
- num_workers=params["num_workers"],
+ num_workers=params.num_workers,
pin_memory=True,
)
inputs_train = next(iter(train_loader))
- params["ip_height"] = inputs_train["spec"].shape[2]
+ params.ip_height = inputs_train["spec"].shape[2]
print("\ntrain batch size :", inputs_train["spec"].shape)
- assert params_train["model_name"] == "Net2DFast"
+ # Check that the model is the same as the one used to train the pretrained
+ # weights
+ assert model_params["model_name"] == "Net2DFast"
+ assert isinstance(model, Net2DFast)
print(
- "\n\nSOME hyperparams need to be the same as the loaded model (e.g. FFT) - currently they are getting overwritten.\n\n"
+ "\n\nSOME hyperparams need to be the same as the loaded model "
+ "(e.g. FFT) - currently they are getting overwritten.\n\n"
)
# set the number of output classes
num_filts = model.conv_classes_op.in_channels
- k_size = model.conv_classes_op.kernel_size
- pad = model.conv_classes_op.padding
+ (k_size,) = model.conv_classes_op.kernel_size
+ (pad,) = model.conv_classes_op.padding
model.conv_classes_op = torch.nn.Conv2d(
num_filts,
- len(params["class_names"]) + 1,
+ len(params.class_names) + 1,
kernel_size=k_size,
padding=pad,
)
- model.conv_classes_op.to(params["device"])
+ model.conv_classes_op.to(params.device)
- if args["finetune_only_last_layer"]:
+ if finetune_only_last_layer:
print("\nOnly finetuning the final layers.\n")
train_layers_i = [
"conv_classes",
@@ -223,19 +190,26 @@
else:
param.requires_grad = False
- optimizer = torch.optim.Adam(model.parameters(), lr=params["lr"])
+ optimizer = torch.optim.Adam(
+ model.parameters(),
+ lr=params.lr,
+ )
scheduler = CosineAnnealingLR(
- optimizer, params["num_epochs"] * len(train_loader)
+ optimizer,
+ params.num_epochs * len(train_loader),
)
- if params["train_loss"] == "mse":
+
+ if params.train_loss == "mse":
det_criterion = losses.mse_loss
- elif params["train_loss"] == "focal":
+ elif params.train_loss == "focal":
det_criterion = losses.focal_loss
+ else:
+ raise ValueError("Unknown loss function")
# plotting
train_plt_ls = pu.LossPlotter(
- params["experiment"] + "train_loss.png",
- params["num_epochs"] + 1,
+ params.experiment / "train_loss.png",
+ params.num_epochs + 1,
["train_loss"],
None,
None,
@@ -243,8 +217,8 @@
logy=True,
)
test_plt_ls = pu.LossPlotter(
- params["experiment"] + "test_loss.png",
- params["num_epochs"] + 1,
+ params.experiment / "test_loss.png",
+ params.num_epochs + 1,
["test_loss"],
None,
None,
@@ -252,24 +226,24 @@
logy=True,
)
test_plt = pu.LossPlotter(
- params["experiment"] + "test.png",
- params["num_epochs"] + 1,
+ params.experiment / "test.png",
+ params.num_epochs + 1,
["avg_prec", "rec_at_x", "avg_prec_class", "file_acc", "top_class"],
[0, 1],
None,
["epoch", ""],
)
test_plt_class = pu.LossPlotter(
- params["experiment"] + "test_avg_prec.png",
- params["num_epochs"] + 1,
- params["class_names_short"],
+ params.experiment / "test_avg_prec.png",
+ params.num_epochs + 1,
+ params.class_names_short,
[0, 1],
- params["class_names_short"],
+ params.class_names_short,
["epoch", "avg_prec"],
)
# main train loop
- for epoch in range(0, params["num_epochs"] + 1):
+ for epoch in range(0, params.num_epochs + 1):
train_loss = tm.train(
model,
epoch,
@@ -281,10 +255,14 @@
)
train_plt_ls.update_and_save(epoch, [train_loss["train_loss"]])
- if epoch % params["num_eval_epochs"] == 0:
+ if epoch % params.num_eval_epochs == 0:
# detection accuracy on test set
test_res, test_loss = tm.test(
- model, epoch, test_loader, det_criterion, params
+ model,
+ epoch,
+ test_loader,
+ det_criterion,
+ params,
)
test_plt_ls.update_and_save(epoch, [test_loss["test_loss"]])
test_plt.update_and_save(
@@ -301,18 +279,106 @@
epoch, [rs["avg_prec"] for rs in test_res["class_pr"]]
)
pu.plot_pr_curve_class(
- params["experiment"], "test_pr", "test_pr", test_res
+ params.experiment, "test_pr", "test_pr", test_res
)
# save finetuned model
- print("saving model to: " + params["model_file_name"])
+ print(f"saving model to: {params.model_file_name}")
op_state = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"params": params,
}
- torch.save(op_state, params["model_file_name"])
+ torch.save(op_state, params.model_file_name)
# save an image with associated prediction for each batch in the test set
- if not args["do_not_save_images"]:
+ if save_images:
tm.save_images_batch(model, test_loader, params)
+
+
+def main():
+ info_str = "\nBatDetect - Finetune Model\n"
+ print(info_str)
+
+ args = parse_arugments()
+
+ # Load experiment parameters
+ params = parameters.get_params(
+ make_dirs=True,
+ exps_dir=args.experiment_dir,
+ device=select_device(),
+ num_epochs=args.num_epochs,
+ notes=args.notes,
+ )
+
+ print("\nAudio directory: " + args.audio_path)
+ print("Train file: " + args.train_ann_path)
+ print("Test file: " + args.test_ann_path)
+ print("Loading model: " + args.model_path)
+
+ if args.train_from_scratch:
+ print(
+ "\nTraining model from scratch i.e. not using pretrained weights"
+ )
+
+ model, model_params = du.load_model(
+ args.model_path,
+ load_weights=not args.train_from_scratch,
+ device=params.device,
+ )
+
+ if args.op_model_name != "":
+ params.model_file_name = args.op_model_name
+
+ classes_to_ignore = params.classes_to_ignore + params.generic_class
+
+ # save notes file
+ if params.notes:
+ tu.write_notes_file(
+ params.experiment / "notes.txt",
+ args.notes,
+ )
+
+ # NOTE:??
+ dataset_name = (
+ os.path.basename(args.train_ann_path)
+ .replace(".json", "")
+ .replace("_TRAIN", "")
+ )
+
+ # ==== LOAD DATA ====
+
+ # load train annotations
+ data_train = load_annotations(
+ dataset_name,
+ args.train_ann_path,
+ args.audio_path,
+ params.events_of_interest,
+ )
+ print("\nTrain set:")
+ print("Number of files", len(data_train))
+
+ # load test annotations
+ data_test = load_annotations(
+ dataset_name,
+ args.test_ann_path,
+ args.audio_path,
+ classes_to_ignore,
+ params.events_of_interest,
+ )
+ print("\nTrain set:")
+ print("Number of files", len(data_train))
+
+ finetune_model(
+ model,
+ data_train,
+ data_test,
+ params,
+ model_params,
+ finetune_only_last_layer=args.finetune_only_last_layer,
+ save_images=args.do_not_save_images,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/batdetect2/finetune/prep_data_finetune.py b/src/batdetect2/finetune/prep_data_finetune.py
new file mode 100644
index 00000000..87d7ba37
--- /dev/null
+++ b/src/batdetect2/finetune/prep_data_finetune.py
@@ -0,0 +1,234 @@
+import argparse
+import json
+import os
+from collections import Counter
+
+import numpy as np
+from sklearn.model_selection import StratifiedGroupKFold
+
+import batdetect2.train.train_utils as tu
+from batdetect2 import types
+
+
+def print_dataset_stats(
+ data: list[types.FileAnnotation],
+ classes_to_ignore: list[str] | None = None,
+) -> Counter[str]:
+ print("Num files:", len(data))
+ counts, _ = tu.get_class_names(data, classes_to_ignore)
+ if len(counts) > 0:
+ tu.report_class_counts(counts)
+ return counts
+
+
+def load_file_names(file_name: str) -> list[str]:
+ if not os.path.isfile(file_name):
+ raise FileNotFoundError(f"Input file not found - {file_name}")
+
+ with open(file_name) as da:
+ files = [line.rstrip() for line in da.readlines()]
+
+ for path in files:
+ if path.lower()[-3:] != "wav":
+ raise ValueError(
+ f"Invalid file name - {path}. Must be a .wav file"
+ )
+
+ return files
+
+
+def parse_args():
+ info_str = "\nBatDetect - Prepare Data for Finetuning\n"
+ print(info_str)
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "dataset_name", type=str, help="Name to call your dataset"
+ )
+ parser.add_argument(
+ "audio_dir", type=str, help="Input directory for audio"
+ )
+ parser.add_argument(
+ "ann_dir",
+ type=str,
+ help="Input directory for where the audio annotations are stored",
+ )
+ parser.add_argument(
+ "op_dir",
+ type=str,
+ help="Path where the train and test splits will be stored",
+ )
+ parser.add_argument(
+ "--percent_val",
+ type=float,
+ default=0.20,
+ help="Hold out this much data for validation. Should be number between 0 and 1",
+ )
+ parser.add_argument(
+ "--rand_seed",
+ type=int,
+ default=2001,
+ help="Random seed used for creating the validation split",
+ )
+ parser.add_argument(
+ "--train_file",
+ type=str,
+ default="",
+ help="Text file where each line is a wav file in train split",
+ )
+ parser.add_argument(
+ "--test_file",
+ type=str,
+ default="",
+ help="Text file where each line is a wav file in test split",
+ )
+ parser.add_argument(
+ "--input_class_names",
+ type=str,
+ default="",
+ help='Specify names of classes that you want to change. Separate with ";"',
+ )
+ parser.add_argument(
+ "--output_class_names",
+ type=str,
+ default="",
+ help='New class names to use instead. One to one mapping with "--input_class_names". \
+ Separate with ";"',
+ )
+ return parser.parse_args()
+
+
+def split_data(
+ data: list[types.FileAnnotation],
+ train_file: str,
+ test_file: str,
+ n_splits: int = 5,
+ random_state: int = 0,
+) -> tuple[list[types.FileAnnotation], list[types.FileAnnotation]]:
+ if train_file != "" and test_file != "":
+ # user has specifed the train / test split
+ mapping = {
+ file_annotation["id"]: file_annotation for file_annotation in data
+ }
+ train_files = load_file_names(train_file)
+ test_files = load_file_names(test_file)
+ data_train = [
+ mapping[file_id] for file_id in train_files if file_id in mapping
+ ]
+ data_test = [
+ mapping[file_id] for file_id in test_files if file_id in mapping
+ ]
+ return data_train, data_test
+
+ # NOTE: Using StratifiedGroupKFold to ensure that the same file does not
+ # appear in both the training and test sets and trying to keep the
+ # distribution of classes the same in both sets.
+ splitter = StratifiedGroupKFold(
+ n_splits=n_splits,
+ shuffle=True,
+ random_state=random_state,
+ )
+ anns = np.array(
+ [
+ [dd["id"], ann["class"], ann["event"]]
+ for dd in data
+ for ann in dd["annotation"]
+ ]
+ )
+ y = anns[:, 1]
+ group = anns[:, 0]
+
+ train_idx, test_idx = next(splitter.split(X=anns, y=y, groups=group))
+ train_ids = set(anns[train_idx, 0])
+ test_ids = set(anns[test_idx, 0])
+
+ assert not (train_ids & test_ids)
+ data_train = [dd for dd in data if dd["id"] in train_ids]
+ data_test = [dd for dd in data if dd["id"] in test_ids]
+ return data_train, data_test
+
+
+def main():
+ args = parse_args()
+
+ np.random.seed(args.rand_seed)
+
+ classes_to_ignore = ["", " ", "Unknown", "Not Bat"]
+ events_of_interest = ["Echolocation"]
+
+ name_dict = None
+ if args.input_class_names != "" and args.output_class_names != "":
+ # change the names of the classes
+ ip_names = args.input_class_names.split(";")
+ op_names = args.output_class_names.split(";")
+ name_dict = dict(zip(ip_names, op_names, strict=False))
+
+ # load annotations
+ data_all = tu.load_set_of_anns(
+ [
+ {
+ "dataset_name": args.dataset_name,
+ "ann_path": args.ann_dir,
+ "wav_path": args.audio_dir,
+ "is_test": False,
+ "is_binary": False,
+ }
+ ],
+ classes_to_ignore=classes_to_ignore,
+ events_of_interest=events_of_interest,
+ convert_to_genus=False,
+ filter_issues=True,
+ name_replace=name_dict,
+ )
+
+ print("Dataset name: " + args.dataset_name)
+ print("Audio directory: " + args.audio_dir)
+ print("Annotation directory: " + args.ann_dir)
+ print("Ouput directory: " + args.op_dir)
+ print("Num annotated files: " + str(len(data_all)))
+
+ data_train, data_test = split_data(
+ data=data_all,
+ train_file=args.train_file,
+ test_file=args.test_file,
+ n_splits=5,
+ random_state=args.rand_seed,
+ )
+
+ if not os.path.isdir(args.op_dir):
+ os.makedirs(args.op_dir)
+ op_name = os.path.join(args.op_dir, args.dataset_name)
+ op_name_train = op_name + "_TRAIN.json"
+ op_name_test = op_name + "_TEST.json"
+
+ print("\nSplit: Train")
+ class_un_train = print_dataset_stats(data_train, classes_to_ignore)
+
+ print("\nSplit: Test")
+ class_un_test = print_dataset_stats(data_test, classes_to_ignore)
+
+ if len(data_train) > 0 and len(data_test) > 0:
+ if set(class_un_train.keys()) != set(class_un_test.keys()):
+ raise RuntimeError(
+ "Error: some classes are not in both the training and test sets."
+ 'Try a different random seed "--rand_seed".'
+ )
+
+ print("\n")
+ if len(data_train) == 0:
+ print("No train annotations to save")
+ else:
+ print("Saving: ", op_name_train)
+ with open(op_name_train, "w") as da:
+ json.dump(data_train, da, indent=2)
+
+ if len(data_test) == 0:
+ print("No test annotations to save")
+ else:
+ print("Saving: ", op_name_test)
+ with open(op_name_test, "w") as da:
+ json.dump(data_test, da, indent=2)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/batdetect2/finetune/readme.md b/src/batdetect2/finetune/readme.md
similarity index 100%
rename from batdetect2/finetune/readme.md
rename to src/batdetect2/finetune/readme.md
diff --git a/src/batdetect2/inference/__init__.py b/src/batdetect2/inference/__init__.py
new file mode 100644
index 00000000..5cd37bc1
--- /dev/null
+++ b/src/batdetect2/inference/__init__.py
@@ -0,0 +1,10 @@
+from batdetect2.inference.batch import process_file_list, run_batch_inference
+from batdetect2.inference.clips import get_clips_from_files
+from batdetect2.inference.config import InferenceConfig
+
+__all__ = [
+ "process_file_list",
+ "run_batch_inference",
+ "InferenceConfig",
+ "get_clips_from_files",
+]
diff --git a/src/batdetect2/inference/batch.py b/src/batdetect2/inference/batch.py
new file mode 100644
index 00000000..d91df296
--- /dev/null
+++ b/src/batdetect2/inference/batch.py
@@ -0,0 +1,126 @@
+from typing import Sequence
+
+from lightning import Trainer
+from soundevent import data
+
+from batdetect2.audio import AudioConfig
+from batdetect2.audio.loader import build_audio_loader
+from batdetect2.audio.types import AudioLoader
+from batdetect2.inference.clips import get_clips_from_files
+from batdetect2.inference.config import InferenceConfig
+from batdetect2.inference.dataset import build_inference_loader
+from batdetect2.inference.lightning import InferenceModule
+from batdetect2.models.types import ModelProtocol
+from batdetect2.outputs import (
+ OutputsConfig,
+ OutputTransformProtocol,
+ build_output_transform,
+)
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+
+
+def run_batch_inference(
+ model: ModelProtocol,
+ clips: Sequence[data.Clip],
+ targets: TargetProtocol | None = None,
+ roi_mapper: ROIMapperProtocol | None = None,
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ audio_config: AudioConfig | None = None,
+ output_transform: OutputTransformProtocol | None = None,
+ output_config: OutputsConfig | None = None,
+ inference_config: InferenceConfig | None = None,
+ detection_threshold: float | None = None,
+ num_workers: int = 1,
+ batch_size: int | None = None,
+) -> list[ClipDetections]:
+ audio_config = audio_config or AudioConfig(
+ samplerate=model.preprocessor.input_samplerate,
+ )
+ output_config = output_config or OutputsConfig()
+ inference_config = inference_config or InferenceConfig()
+
+ audio_loader = audio_loader or build_audio_loader(config=audio_config)
+
+ preprocessor = preprocessor or model.preprocessor
+
+ if targets is None:
+ raise ValueError(
+ "targets must be provided when running batch inference."
+ )
+
+ if roi_mapper is None:
+ raise ValueError(
+ "roi_mapper must be provided when running batch inference."
+ )
+
+ output_transform = output_transform or build_output_transform(
+ config=output_config.transform,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+
+ loader = build_inference_loader(
+ clips,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ config=inference_config.loader,
+ num_workers=num_workers,
+ batch_size=batch_size,
+ )
+
+ module = InferenceModule(
+ model,
+ output_transform=output_transform,
+ detection_threshold=detection_threshold,
+ )
+ trainer = Trainer(enable_checkpointing=False, logger=False)
+ outputs = trainer.predict(module, loader)
+ return [
+ clip_prediction
+ for clip_predictions in outputs # type: ignore
+ for clip_prediction in clip_predictions
+ ]
+
+
+def process_file_list(
+ model: ModelProtocol,
+ paths: Sequence[data.PathLike],
+ targets: TargetProtocol | None = None,
+ roi_mapper: ROIMapperProtocol | None = None,
+ audio_loader: AudioLoader | None = None,
+ audio_config: AudioConfig | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ inference_config: InferenceConfig | None = None,
+ output_config: OutputsConfig | None = None,
+ output_transform: OutputTransformProtocol | None = None,
+ detection_threshold: float | None = None,
+ batch_size: int | None = None,
+ num_workers: int = 0,
+) -> list[ClipDetections]:
+ inference_config = inference_config or InferenceConfig()
+ clip_config = inference_config.clipping
+ clips = get_clips_from_files(
+ paths,
+ duration=clip_config.duration,
+ overlap=clip_config.overlap,
+ max_empty=clip_config.max_empty,
+ discard_empty=clip_config.discard_empty,
+ )
+ return run_batch_inference(
+ model,
+ clips,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ batch_size=batch_size,
+ num_workers=num_workers,
+ output_config=output_config,
+ audio_config=audio_config,
+ output_transform=output_transform,
+ inference_config=inference_config,
+ detection_threshold=detection_threshold,
+ )
diff --git a/src/batdetect2/inference/clips.py b/src/batdetect2/inference/clips.py
new file mode 100644
index 00000000..0f486d2c
--- /dev/null
+++ b/src/batdetect2/inference/clips.py
@@ -0,0 +1,75 @@
+from typing import List, Sequence
+from uuid import uuid5
+
+import numpy as np
+from soundevent import data
+
+
+def get_clips_from_files(
+ paths: Sequence[data.PathLike],
+ duration: float,
+ overlap: float = 0.0,
+ max_empty: float = 0.0,
+ discard_empty: bool = True,
+ compute_hash: bool = False,
+) -> List[data.Clip]:
+ clips: List[data.Clip] = []
+
+ for path in paths:
+ recording = data.Recording.from_file(path, compute_hash=compute_hash)
+ clips.extend(
+ get_recording_clips(
+ recording,
+ duration,
+ overlap=overlap,
+ max_empty=max_empty,
+ discard_empty=discard_empty,
+ )
+ )
+
+ return clips
+
+
+def get_recording_clips(
+ recording: data.Recording,
+ duration: float,
+ overlap: float = 0.0,
+ max_empty: float = 0.0,
+ discard_empty: bool = True,
+) -> Sequence[data.Clip]:
+ start_time = 0
+ recording_duration = recording.duration
+ hop = duration * (1 - overlap)
+
+ num_clips = int(np.ceil(recording_duration / hop))
+
+ if num_clips == 0:
+ # This should only happen if the clip's duration is zero,
+ # which should never happen in practice, but just in case...
+ return []
+
+ clips = []
+ for i in range(num_clips):
+ start = start_time + i * hop
+ end = start + duration
+
+ if end > recording_duration:
+ empty_duration = end - recording_duration
+
+ if empty_duration > max_empty and discard_empty:
+ # Discard clips that contain too much empty space
+ continue
+
+ clips.append(
+ data.Clip(
+ uuid=uuid5(recording.uuid, f"{start}_{end}"),
+ recording=recording,
+ start_time=start,
+ end_time=end,
+ )
+ )
+
+ if discard_empty:
+ clips = [clip for clip in clips if clip.duration > max_empty]
+
+ return clips
diff --git a/src/batdetect2/inference/config.py b/src/batdetect2/inference/config.py
new file mode 100644
index 00000000..1db715da
--- /dev/null
+++ b/src/batdetect2/inference/config.py
@@ -0,0 +1,21 @@
+from pydantic import Field
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.inference.dataset import InferenceLoaderConfig
+
+__all__ = ["InferenceConfig"]
+
+
+class ClipingConfig(BaseConfig):
+ enabled: bool = True
+ duration: float = 0.5
+ overlap: float = 0.0
+ max_empty: float = 0.0
+ discard_empty: bool = True
+
+
+class InferenceConfig(BaseConfig):
+ loader: InferenceLoaderConfig = Field(
+ default_factory=InferenceLoaderConfig
+ )
+ clipping: ClipingConfig = Field(default_factory=ClipingConfig)
diff --git a/src/batdetect2/inference/dataset.py b/src/batdetect2/inference/dataset.py
new file mode 100644
index 00000000..aaeae7f3
--- /dev/null
+++ b/src/batdetect2/inference/dataset.py
@@ -0,0 +1,122 @@
+from typing import NamedTuple, Sequence
+
+import torch
+from loguru import logger
+from soundevent import data
+from torch.utils.data import DataLoader, Dataset
+
+from batdetect2.audio import build_audio_loader
+from batdetect2.audio.types import AudioLoader
+from batdetect2.core import BaseConfig
+from batdetect2.core.arrays import adjust_width
+from batdetect2.preprocess import build_preprocessor
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "InferenceDataset",
+ "build_inference_dataset",
+ "build_inference_loader",
+]
+
+
+DEFAULT_INFERENCE_CLIP_DURATION = 0.512
+
+
+class DatasetItem(NamedTuple):
+ spec: torch.Tensor
+ idx: torch.Tensor
+ start_time: torch.Tensor
+ end_time: torch.Tensor
+
+
+class InferenceDataset(Dataset[DatasetItem]):
+ clips: list[data.Clip]
+
+ def __init__(
+ self,
+ clips: Sequence[data.Clip],
+ audio_loader: AudioLoader,
+ preprocessor: PreprocessorProtocol,
+ audio_dir: data.PathLike | None = None,
+ ):
+ self.clips = list(clips)
+ self.preprocessor = preprocessor
+ self.audio_loader = audio_loader
+ self.audio_dir = audio_dir
+
+ def __len__(self):
+ return len(self.clips)
+
+ def __getitem__(self, index: int) -> DatasetItem:
+ clip = self.clips[index]
+ wav = self.audio_loader.load_clip(clip, audio_dir=self.audio_dir)
+ wav_tensor = torch.tensor(wav).unsqueeze(0)
+ spectrogram = self.preprocessor(wav_tensor)
+ return DatasetItem(
+ spec=spectrogram,
+ idx=torch.tensor(index),
+ start_time=torch.tensor(clip.start_time),
+ end_time=torch.tensor(clip.end_time),
+ )
+
+
+class InferenceLoaderConfig(BaseConfig):
+ batch_size: int = 8
+
+
+def build_inference_loader(
+ clips: Sequence[data.Clip],
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ config: InferenceLoaderConfig | None = None,
+ num_workers: int = 0,
+ batch_size: int | None = None,
+) -> DataLoader[DatasetItem]:
+ logger.info("Building inference data loader...")
+ config = config or InferenceLoaderConfig()
+
+ inference_dataset = build_inference_dataset(
+ clips,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ )
+
+ batch_size = batch_size or config.batch_size
+
+ return DataLoader(
+ inference_dataset,
+ batch_size=batch_size,
+ shuffle=False,
+ num_workers=num_workers,
+ collate_fn=_collate_fn,
+ )
+
+
+def build_inference_dataset(
+ clips: Sequence[data.Clip],
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+) -> InferenceDataset:
+ if audio_loader is None:
+ audio_loader = build_audio_loader()
+
+ if preprocessor is None:
+ preprocessor = build_preprocessor()
+
+ return InferenceDataset(
+ clips,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ )
+
+
+def _collate_fn(batch: list[DatasetItem]) -> DatasetItem:
+ max_width = max(item.spec.shape[-1] for item in batch)
+ return DatasetItem(
+ spec=torch.stack(
+ [adjust_width(item.spec, max_width) for item in batch]
+ ),
+ idx=torch.stack([item.idx for item in batch]),
+ start_time=torch.stack([item.start_time for item in batch]),
+ end_time=torch.stack([item.end_time for item in batch]),
+ )
diff --git a/src/batdetect2/inference/lightning.py b/src/batdetect2/inference/lightning.py
new file mode 100644
index 00000000..d879d837
--- /dev/null
+++ b/src/batdetect2/inference/lightning.py
@@ -0,0 +1,73 @@
+from typing import Sequence
+
+from lightning import LightningModule
+from torch.utils.data import DataLoader
+
+from batdetect2.inference.dataset import DatasetItem, InferenceDataset
+from batdetect2.models.types import ModelProtocol
+from batdetect2.outputs import OutputTransformProtocol, build_output_transform
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+
+
+class InferenceModule(LightningModule):
+ def __init__(
+ self,
+ model: ModelProtocol,
+ targets: TargetProtocol | None = None,
+ roi_mapper: ROIMapperProtocol | None = None,
+ output_transform: OutputTransformProtocol | None = None,
+ detection_threshold: float | None = None,
+ ):
+ super().__init__()
+ self.model = model
+ self.detection_threshold = detection_threshold
+
+ if output_transform is None and targets is None:
+ raise ValueError(
+ "targets must be provided when building inference output "
+ "transforms."
+ )
+
+ if output_transform is None and roi_mapper is None:
+ raise ValueError(
+ "roi_mapper must be provided when building inference output "
+ "transforms."
+ )
+
+ self.output_transform = output_transform or build_output_transform(
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+
+ def predict_step(
+ self,
+ batch: DatasetItem,
+ batch_idx: int,
+ dataloader_idx: int = 0,
+ ) -> Sequence[ClipDetections]:
+ dataset = self.get_dataset()
+
+ clips = [dataset.clips[int(example_idx)] for example_idx in batch.idx]
+
+ outputs = self.model.detector(batch.spec)
+
+ clip_detections = self.model.postprocessor(
+ outputs,
+ detection_threshold=self.detection_threshold,
+ )
+
+ return [
+ self.output_transform.to_clip_detections(
+ detections=clip_dets,
+ clip=clip,
+ )
+ for clip, clip_dets in zip(clips, clip_detections, strict=True)
+ ]
+
+ def get_dataset(self) -> InferenceDataset:
+ dataloaders = self.trainer.predict_dataloaders
+ assert isinstance(dataloaders, DataLoader)
+ dataset = dataloaders.dataset
+ assert isinstance(dataset, InferenceDataset)
+ return dataset
diff --git a/src/batdetect2/logging.py b/src/batdetect2/logging.py
new file mode 100644
index 00000000..58374070
--- /dev/null
+++ b/src/batdetect2/logging.py
@@ -0,0 +1,441 @@
+from __future__ import annotations
+
+import io
+import sys
+from collections.abc import Callable
+from functools import partial
+from pathlib import Path
+from typing import (
+ TYPE_CHECKING,
+ Annotated,
+ Any,
+ Dict,
+ Generic,
+ Literal,
+ Protocol,
+ TypeVar,
+)
+
+from loguru import logger
+from pydantic import Field
+
+from batdetect2.core.configs import BaseConfig
+
+if TYPE_CHECKING:
+ import numpy as np
+ import pandas as pd
+ from lightning.pytorch.loggers import Logger
+ from matplotlib.figure import Figure
+ from soundevent import data
+
+DEFAULT_LOGS_DIR: Path = Path("outputs") / "logs"
+
+__all__ = [
+ "AppLoggingConfig",
+ "BaseLoggerConfig",
+ "CSVLoggerConfig",
+ "DEFAULT_LOGS_DIR",
+ "DVCLiveConfig",
+ "LoggerConfig",
+ "MLFlowLoggerConfig",
+ "LoggingCallback",
+ "TensorBoardLoggerConfig",
+ "build_logger",
+ "enable_logging",
+ "get_image_logger",
+ "get_table_logger",
+ "log_artifact_file",
+ "log_config_artifact",
+ "log_csv_artifact",
+]
+
+
+def enable_logging(level: int):
+ logger.remove()
+
+ if level == 0:
+ log_level = "WARNING"
+ elif level == 1:
+ log_level = "INFO"
+ else:
+ log_level = "DEBUG"
+
+ logger.add(sys.stderr, level=log_level)
+ logger.enable("batdetect2")
+
+
+class BaseLoggerConfig(BaseConfig):
+ log_dir: Path = DEFAULT_LOGS_DIR
+ experiment_name: str | None = None
+ run_name: str | None = None
+
+
+class DVCLiveConfig(BaseLoggerConfig):
+ name: Literal["dvclive"] = "dvclive"
+ prefix: str = ""
+ log_model: bool | Literal["all"] = False
+ monitor_system: bool = False
+
+
+class CSVLoggerConfig(BaseLoggerConfig):
+ name: Literal["csv"] = "csv"
+ flush_logs_every_n_steps: int = 100
+
+
+class TensorBoardLoggerConfig(BaseLoggerConfig):
+ name: Literal["tensorboard"] = "tensorboard"
+ log_graph: bool = False
+
+
+class MLFlowLoggerConfig(BaseLoggerConfig):
+ name: Literal["mlflow"] = "mlflow"
+ tracking_uri: str | None = "http://localhost:5000"
+ tags: dict[str, Any] | None = None
+ log_model: bool = False
+
+
+LoggerConfig = Annotated[
+ DVCLiveConfig
+ | CSVLoggerConfig
+ | TensorBoardLoggerConfig
+ | MLFlowLoggerConfig,
+ Field(discriminator="name"),
+]
+
+
+class AppLoggingConfig(BaseConfig):
+ train: LoggerConfig = Field(default_factory=CSVLoggerConfig)
+ evaluation: LoggerConfig = Field(default_factory=CSVLoggerConfig)
+ inference: LoggerConfig = Field(default_factory=CSVLoggerConfig)
+
+
+T = TypeVar("T", bound=LoggerConfig, contravariant=True)
+
+
+class LoggerBuilder(Protocol, Generic[T]):
+ def __call__(
+ self,
+ config: T,
+ log_dir: Path | None = None,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+ ) -> Logger: ...
+
+
+LoggingContext = TypeVar("LoggingContext", contravariant=True)
+
+
+class LoggingCallback(Protocol, Generic[LoggingContext]):
+ def run(
+ self,
+ logger: Logger,
+ artifact_path: Path,
+ context: LoggingContext,
+ ) -> None: ...
+
+
+def create_dvclive_logger(
+ config: DVCLiveConfig,
+ log_dir: Path | None = None,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+) -> Logger:
+ try:
+ from dvclive.lightning import DVCLiveLogger
+ except ImportError as error:
+ raise ValueError(
+ "DVCLive is not installed and cannot be used for logging"
+ "Make sure you have it installed by running `pip install dvclive`"
+ "or `uv add dvclive`"
+ ) from error
+
+ return DVCLiveLogger(
+ dir=log_dir if log_dir is not None else config.log_dir,
+ run_name=run_name if run_name is not None else config.run_name,
+ experiment=experiment_name
+ if experiment_name is not None
+ else config.experiment_name,
+ prefix=config.prefix,
+ log_model=config.log_model,
+ monitor_system=config.monitor_system,
+ )
+
+
+def create_csv_logger(
+ config: CSVLoggerConfig,
+ log_dir: Path | None = None,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+) -> Logger:
+ from lightning.pytorch.loggers import CSVLogger
+
+ if log_dir is None:
+ log_dir = Path(config.log_dir)
+
+ if run_name is None:
+ run_name = config.run_name
+
+ if experiment_name is None:
+ experiment_name = config.experiment_name
+
+ name = run_name
+
+ if run_name is not None and experiment_name is not None:
+ name = str(Path(experiment_name) / run_name)
+
+ return CSVLogger(
+ save_dir=str(log_dir),
+ name=name,
+ flush_logs_every_n_steps=config.flush_logs_every_n_steps,
+ )
+
+
+def create_tensorboard_logger(
+ config: TensorBoardLoggerConfig,
+ log_dir: Path | None = None,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+) -> Logger:
+ from lightning.pytorch.loggers import TensorBoardLogger
+
+ if log_dir is None:
+ log_dir = Path(config.log_dir)
+
+ if run_name is None:
+ run_name = config.run_name
+
+ if experiment_name is None:
+ experiment_name = config.experiment_name
+
+ name = run_name
+
+ if name is None:
+ name = experiment_name
+
+ if run_name is not None and experiment_name is not None:
+ name = str(Path(experiment_name) / run_name)
+
+ return TensorBoardLogger(
+ save_dir=str(log_dir),
+ name=name,
+ log_graph=config.log_graph,
+ )
+
+
+def create_mlflow_logger(
+ config: MLFlowLoggerConfig,
+ log_dir: data.PathLike | None = None,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+) -> Logger:
+ try:
+ from lightning.pytorch.loggers import MLFlowLogger
+ except ImportError as error:
+ raise ValueError(
+ "MLFlow is not installed and cannot be used for logging. "
+ "Make sure you have it installed by running `pip install mlflow` "
+ "or `uv add mlflow`"
+ ) from error
+
+ if experiment_name is None:
+ experiment_name = config.experiment_name or "Default"
+
+ if log_dir is None:
+ log_dir = config.log_dir
+
+ return MLFlowLogger(
+ experiment_name=experiment_name
+ if experiment_name is not None
+ else config.experiment_name,
+ run_name=run_name if run_name is not None else config.run_name,
+ save_dir=str(log_dir),
+ tracking_uri=config.tracking_uri,
+ tags=config.tags,
+ log_model=config.log_model,
+ )
+
+
+LOGGER_FACTORY: Dict[str, LoggerBuilder] = {
+ "dvclive": create_dvclive_logger,
+ "csv": create_csv_logger,
+ "tensorboard": create_tensorboard_logger,
+ "mlflow": create_mlflow_logger,
+}
+
+
+def build_logger(
+ config: LoggerConfig,
+ log_dir: Path | None = None,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+) -> Logger:
+ logger.opt(lazy=True).debug(
+ "Building logger with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+
+ logger_type = config.name
+ if logger_type not in LOGGER_FACTORY:
+ raise ValueError(f"Unknown logger type: {logger_type}")
+
+ creation_func = LOGGER_FACTORY[logger_type]
+ return creation_func(
+ config,
+ log_dir=log_dir,
+ experiment_name=experiment_name,
+ run_name=run_name,
+ )
+
+
+def log_artifact_file(
+ runtime_logger: Logger,
+ path: Path,
+ artifact_path: str = "artifacts",
+) -> None:
+ from lightning.pytorch.loggers import (
+ CSVLogger,
+ MLFlowLogger,
+ TensorBoardLogger,
+ )
+
+ if isinstance(runtime_logger, MLFlowLogger):
+ runtime_logger.experiment.log_artifact( # type: ignore[call-arg]
+ local_path=str(path),
+ artifact_path=artifact_path,
+ run_id=runtime_logger.run_id,
+ )
+ return
+
+ experiment = getattr(runtime_logger, "experiment", None)
+ if experiment is not None and hasattr(experiment, "log_artifact"):
+ experiment.log_artifact(path=path, name=path.name, copy=True)
+ return
+
+ if isinstance(runtime_logger, (CSVLogger, TensorBoardLogger)):
+ return
+
+ logger.warning(
+ "Skipping artifact logging for unsupported logger type {logger_type}",
+ logger_type=type(runtime_logger).__name__,
+ )
+
+
+def log_config_artifact(
+ logger: Logger,
+ config: BaseConfig,
+ filename: str,
+ artifact_path: Path,
+) -> None:
+ artifact_path.mkdir(parents=True, exist_ok=True)
+ path = artifact_path / filename
+ path.write_text(config.to_yaml_string())
+ log_artifact_file(
+ logger,
+ path,
+ artifact_path=artifact_path.name,
+ )
+
+
+def log_csv_artifact(
+ logger: Logger,
+ df: pd.DataFrame,
+ filename: str,
+ artifact_path: Path,
+) -> None:
+ artifact_path.mkdir(parents=True, exist_ok=True)
+ path = artifact_path / filename
+ df.to_csv(path, index=False)
+ log_artifact_file(
+ logger,
+ path,
+ artifact_path=artifact_path.name,
+ )
+
+
+PlotLogger = Callable[[str, "Figure", int], None]
+
+
+def get_image_logger(logger: Logger) -> PlotLogger | None:
+ from lightning.pytorch.loggers import (
+ CSVLogger,
+ MLFlowLogger,
+ TensorBoardLogger,
+ )
+
+ if isinstance(logger, TensorBoardLogger):
+ return logger.experiment.add_figure
+
+ if isinstance(logger, MLFlowLogger):
+
+ def plot_figure(name, figure, step):
+ image = _convert_figure_to_array(figure)
+ name = name.replace("/", "_")
+ return logger.experiment.log_image(
+ logger.run_id,
+ image,
+ key=name,
+ step=step,
+ )
+
+ return plot_figure
+
+ if isinstance(logger, CSVLogger):
+ return partial(save_figure, dir=Path(logger.log_dir))
+
+
+TableLogger = Callable[[str, "pd.DataFrame", int], None]
+
+
+def get_table_logger(logger: Logger) -> TableLogger | None:
+ from lightning.pytorch.loggers import (
+ CSVLogger,
+ MLFlowLogger,
+ TensorBoardLogger,
+ )
+
+ if isinstance(logger, TensorBoardLogger):
+ return partial(save_table, dir=Path(logger.log_dir))
+
+ if isinstance(logger, MLFlowLogger):
+
+ def plot_figure(name: str, df: pd.DataFrame, step: int):
+ return logger.experiment.log_table(
+ logger.run_id,
+ data=df,
+ artifact_file=f"{name}_step_{step}.json",
+ )
+
+ return plot_figure
+
+ if isinstance(logger, CSVLogger):
+ return partial(save_table, dir=Path(logger.log_dir))
+
+
+def save_table(name: str, df: pd.DataFrame, step: int, dir: Path) -> None:
+ path = dir / "tables" / f"{name}_step_{step}.csv"
+
+ if not path.parent.exists():
+ path.parent.mkdir(parents=True)
+
+ df.to_csv(path, index=False)
+
+
+def save_figure(name: str, fig: Figure, step: int, dir: Path) -> None:
+ path = dir / "plots" / f"{name}_step_{step}.png"
+
+ if not path.parent.exists():
+ path.parent.mkdir(parents=True)
+
+ fig.savefig(path, transparent=True, bbox_inches="tight")
+
+
+def _convert_figure_to_array(figure: Figure) -> np.ndarray:
+ import numpy as np
+
+ with io.BytesIO() as buff:
+ figure.savefig(buff, format="raw")
+ buff.seek(0)
+ data = np.frombuffer(buff.getvalue(), dtype=np.uint8)
+ w, h = figure.canvas.get_width_height()
+ im = data.reshape((int(h), int(w), -1))
+ return im
diff --git a/src/batdetect2/models/__init__.py b/src/batdetect2/models/__init__.py
new file mode 100644
index 00000000..ee96d939
--- /dev/null
+++ b/src/batdetect2/models/__init__.py
@@ -0,0 +1,321 @@
+"""Neural network model definitions and builders for BatDetect2.
+
+This package contains the PyTorch implementations of the deep neural network
+architectures used to detect and classify bat echolocation calls in
+spectrograms. Components are designed to be combined through configuration
+objects, making it easy to experiment with different architectures.
+
+Key submodules
+--------------
+- ``blocks``: Reusable convolutional building blocks (downsampling,
+ upsampling, attention, coord-conv variants).
+- ``encoder``: The downsampling path; reduces spatial resolution whilst
+ extracting increasingly abstract features.
+- ``bottleneck``: The central component connecting encoder to decoder;
+ optionally applies self-attention along the time axis.
+- ``decoder``: The upsampling path; reconstructs high-resolution feature
+ maps using bottleneck output and skip connections from the encoder.
+- ``backbones``: Assembles encoder, bottleneck, and decoder into a complete
+ U-Net-style feature extraction backbone.
+- ``heads``: Lightweight 1×1 convolutional heads that produce detection,
+ classification, and bounding-box size predictions from backbone features.
+- ``detectors``: Combines a backbone with prediction heads into the final
+ end-to-end ``Detector`` model.
+
+The primary entry point for building a full, ready-to-use BatDetect2 model
+is the ``build_model`` factory function exported from this module.
+"""
+
+import torch
+from pydantic import Field
+
+from batdetect2.audio.loader import TARGET_SAMPLERATE_HZ
+from batdetect2.core.configs import BaseConfig
+from batdetect2.models.backbones import (
+ BackboneConfig,
+ UNetBackbone,
+ UNetBackboneConfig,
+ build_backbone,
+ load_backbone_config,
+)
+from batdetect2.models.blocks import (
+ ConvConfig,
+ FreqCoordConvDownConfig,
+ FreqCoordConvUpConfig,
+ StandardConvDownConfig,
+ StandardConvUpConfig,
+)
+from batdetect2.models.bottleneck import (
+ Bottleneck,
+ BottleneckConfig,
+ build_bottleneck,
+)
+from batdetect2.models.decoder import (
+ DEFAULT_DECODER_CONFIG,
+ DecoderConfig,
+ build_decoder,
+)
+from batdetect2.models.detectors import Detector, build_detector
+from batdetect2.models.encoder import (
+ DEFAULT_ENCODER_CONFIG,
+ EncoderConfig,
+ build_encoder,
+)
+from batdetect2.models.heads import BBoxHead, ClassifierHead, DetectorHead
+from batdetect2.models.types import DetectorProtocol, ModelProtocol
+from batdetect2.postprocess.config import PostprocessConfig
+from batdetect2.postprocess.types import (
+ ClipDetectionsTensor,
+ PostprocessorProtocol,
+)
+from batdetect2.preprocess.config import PreprocessingConfig
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+
+__all__ = [
+ "BBoxHead",
+ "UNetBackbone",
+ "BackboneConfig",
+ "Bottleneck",
+ "BottleneckConfig",
+ "ClassifierHead",
+ "ConvConfig",
+ "DEFAULT_DECODER_CONFIG",
+ "DEFAULT_ENCODER_CONFIG",
+ "DecoderConfig",
+ "Detector",
+ "DetectorHead",
+ "EncoderConfig",
+ "FreqCoordConvDownConfig",
+ "FreqCoordConvUpConfig",
+ "StandardConvDownConfig",
+ "StandardConvUpConfig",
+ "build_backbone",
+ "build_bottleneck",
+ "build_decoder",
+ "build_encoder",
+ "build_detector",
+ "load_backbone_config",
+ "Model",
+ "ModelConfig",
+ "build_model",
+ "build_model_with_new_targets",
+]
+
+
+class ModelConfig(BaseConfig):
+ """Complete configuration describing a BatDetect2 model.
+
+ Bundles every parameter that defines a model's behaviour: the input
+ sample rate, backbone architecture, preprocessing pipeline,
+ postprocessing pipeline, and detection targets.
+
+ Attributes
+ ----------
+ samplerate : int
+ Expected input audio sample rate in Hz. Audio must be resampled
+ to this rate before being passed to the model. Defaults to
+ ``TARGET_SAMPLERATE_HZ`` (256 000 Hz).
+ architecture : BackboneConfig
+ Configuration for the encoder-decoder backbone network. Defaults
+ to ``UNetBackboneConfig()``.
+ preprocess : PreprocessingConfig
+ Parameters for the audio-to-spectrogram preprocessing pipeline
+ (STFT, frequency crop, transforms, resize). Defaults to
+ ``PreprocessingConfig()``.
+ postprocess : PostprocessConfig
+ Parameters for converting raw model outputs into detections (NMS
+ kernel, thresholds, top-k limit). Defaults to
+ ``PostprocessConfig()``.
+ """
+
+ samplerate: int = Field(default=TARGET_SAMPLERATE_HZ, gt=0)
+ architecture: BackboneConfig = Field(default_factory=UNetBackboneConfig)
+ preprocess: PreprocessingConfig = Field(
+ default_factory=PreprocessingConfig
+ )
+ postprocess: PostprocessConfig = Field(default_factory=PostprocessConfig)
+
+
+class Model(torch.nn.Module):
+ """End-to-end BatDetect2 model wrapping preprocessing and postprocessing.
+
+ Combines a preprocessor, a detection model, and a postprocessor into a
+ single PyTorch module. Calling ``forward`` on a raw waveform tensor
+ returns a list of detection tensors ready for downstream use.
+
+ This class is the top-level object produced by ``build_model``. Most
+ users will not need to construct it directly.
+
+ Attributes
+ ----------
+ detector : DetectorProtocol
+ The neural network that processes spectrograms and produces raw
+ detection, classification, and bounding-box outputs.
+ preprocessor : PreprocessorProtocol
+ Converts a raw waveform tensor into a spectrogram tensor accepted by
+ ``detector``.
+ postprocessor : PostprocessorProtocol
+ Converts the raw ``ModelOutput`` from ``detector`` into a list of
+ per-clip detection tensors.
+ class_names : list[str]
+ Class names corresponding to the model classification outputs.
+ dimension_names : list[str]
+ Size-dimension names corresponding to the model size outputs.
+ """
+
+ detector: DetectorProtocol
+ preprocessor: PreprocessorProtocol
+ postprocessor: PostprocessorProtocol
+ class_names: list[str]
+ dimension_names: list[str]
+ _config: dict[str, object]
+
+ def __init__(
+ self,
+ detector: DetectorProtocol,
+ preprocessor: PreprocessorProtocol,
+ postprocessor: PostprocessorProtocol,
+ class_names: list[str],
+ dimension_names: list[str],
+ config: dict[str, object],
+ ):
+ super().__init__()
+ self.detector = detector
+ self.preprocessor = preprocessor
+ self.postprocessor = postprocessor
+ self.class_names = class_names
+ self.dimension_names = dimension_names
+ self._config = config
+
+ def get_config(self) -> dict[str, object]:
+ """Return the model configuration as plain JSON-serializable data."""
+
+ return dict(self._config)
+
+ def forward(self, wav: torch.Tensor) -> list[ClipDetectionsTensor]:
+ """Run the full detection pipeline on a waveform tensor.
+
+ Converts the waveform to a spectrogram, passes it through the
+ detector, and postprocesses the raw outputs into detection tensors.
+
+ Parameters
+ ----------
+ wav : torch.Tensor
+ Raw audio waveform tensor. The exact expected shape depends on
+ the preprocessor, but is typically ``(batch, samples)`` or
+ ``(batch, channels, samples)``.
+
+ Returns
+ -------
+ list[ClipDetectionsTensor]
+ One detection tensor per clip in the batch. Each tensor encodes
+ the detected events (locations, class scores, sizes) for that
+ clip.
+ """
+ spec = self.preprocessor(wav)
+ outputs = self.detector(spec)
+ return self.postprocessor(outputs)
+
+
+def build_model(
+ config: ModelConfig | dict | None = None,
+ class_names: list[str] | None = None,
+ dimension_names: list[str] | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ postprocessor: PostprocessorProtocol | None = None,
+) -> ModelProtocol:
+ """Build a complete, ready-to-use BatDetect2 model.
+
+ Assembles a ``Model`` instance from a ``ModelConfig`` and optional
+ component overrides. Any component argument left as ``None`` is built
+ from the configuration. Passing a pre-built component overrides the
+ corresponding config fields for that component only.
+
+ Parameters
+ ----------
+ config : ModelConfig, optional
+ Full model configuration (samplerate, architecture, preprocessing,
+ postprocessing). Defaults to ``ModelConfig()`` if not provided.
+ class_names : list[str], optional
+ Class names used to size the classifier head. Required when building
+ a new model.
+ dimension_names : list[str], optional
+ Dimension names used to size the bbox head. Required when building a
+ new model.
+ preprocessor : PreprocessorProtocol, optional
+ Pre-built preprocessor. If given, overrides
+ ``config.preprocess`` and ``config.samplerate`` for the
+ preprocessing step.
+ postprocessor : PostprocessorProtocol, optional
+ Pre-built postprocessor. If given, overrides
+ ``config.postprocess``. When omitted and a custom
+ ``preprocessor`` is supplied, the default postprocessor is built
+ using that preprocessor so that frequency and time scaling remain
+ consistent.
+
+ Returns
+ -------
+ ModelProtocol
+ A fully assembled ``Model`` instance ready for inference or
+ training.
+ """
+ from batdetect2.postprocess import build_postprocessor
+ from batdetect2.preprocess import build_preprocessor
+
+ config = config or ModelConfig()
+
+ if isinstance(config, dict):
+ config = ModelConfig.model_validate(config)
+
+ if class_names is None:
+ raise ValueError("class_names must be provided when building a model.")
+
+ if dimension_names is None:
+ raise ValueError(
+ "dimension_names must be provided when building a model."
+ )
+
+ preprocessor = preprocessor or build_preprocessor(
+ config=config.preprocess,
+ input_samplerate=config.samplerate,
+ )
+ postprocessor = postprocessor or build_postprocessor(
+ preprocessor=preprocessor,
+ config=config.postprocess,
+ )
+ detector = build_detector(
+ class_names=class_names,
+ dimension_names=dimension_names,
+ config=config.architecture,
+ )
+ return Model(
+ detector=detector,
+ postprocessor=postprocessor,
+ preprocessor=preprocessor,
+ class_names=class_names,
+ dimension_names=dimension_names,
+ config=config.model_dump(mode="json"),
+ )
+
+
+def build_model_with_new_targets(
+ model: ModelProtocol,
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+) -> ModelProtocol:
+ """Build a new model with a different target set."""
+ detector = build_detector(
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ backbone=model.detector.backbone,
+ )
+
+ return Model(
+ detector=detector,
+ postprocessor=model.postprocessor,
+ preprocessor=model.preprocessor,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ config=model.get_config(),
+ )
diff --git a/src/batdetect2/models/backbones.py b/src/batdetect2/models/backbones.py
new file mode 100644
index 00000000..03a62d3d
--- /dev/null
+++ b/src/batdetect2/models/backbones.py
@@ -0,0 +1,401 @@
+"""Assembles a complete encoder-decoder backbone network.
+
+This module defines ``UNetBackboneConfig`` and the ``UNetBackbone``
+``nn.Module``, together with the ``build_backbone`` and
+``load_backbone_config`` helpers.
+
+A backbone combines three components built from the sibling modules:
+
+1. **Encoder** (``batdetect2.models.encoder``) – reduces spatial resolution
+ while extracting hierarchical features and storing skip-connection tensors.
+2. **Bottleneck** (``batdetect2.models.bottleneck``) – processes the
+ lowest-resolution features, optionally applying self-attention.
+3. **Decoder** (``batdetect2.models.decoder``) – restores spatial resolution
+ using bottleneck features and skip connections from the encoder.
+
+The resulting ``UNetBackbone`` takes a spectrogram tensor as input and returns
+a high-resolution feature map consumed by the prediction heads in
+``batdetect2.models.detectors``.
+
+Input padding is handled automatically: the backbone pads the input to be
+divisible by the total downsampling factor and strips the padding from the
+output so that the output spatial dimensions always match the input spatial
+dimensions.
+"""
+
+from typing import Annotated, Literal
+
+import torch
+import torch.nn.functional as F
+from loguru import logger
+from pydantic import Field, TypeAdapter
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig, load_config
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.models.bottleneck import (
+ DEFAULT_BOTTLENECK_CONFIG,
+ BottleneckConfig,
+ build_bottleneck,
+)
+from batdetect2.models.decoder import (
+ DEFAULT_DECODER_CONFIG,
+ DecoderConfig,
+ build_decoder,
+)
+from batdetect2.models.encoder import (
+ DEFAULT_ENCODER_CONFIG,
+ EncoderConfig,
+ build_encoder,
+)
+from batdetect2.models.types import (
+ BackboneProtocol,
+ BottleneckProtocol,
+ DecoderProtocol,
+ EncoderProtocol,
+)
+
+__all__ = [
+ "BackboneImportConfig",
+ "UNetBackbone",
+ "BackboneConfig",
+ "load_backbone_config",
+ "build_backbone",
+]
+
+
+class UNetBackboneConfig(BaseConfig):
+ """Configuration for a U-Net-style encoder-decoder backbone.
+
+ All fields have sensible defaults that reproduce the standard BatDetect2
+ architecture, so you can start with ``UNetBackboneConfig()`` and override
+ only the fields you want to change.
+
+ Attributes
+ ----------
+ name : str
+ Discriminator field used by the backbone registry; always
+ ``"UNetBackbone"``.
+ input_height : int
+ Number of frequency bins in the input spectrogram. Defaults to
+ ``128``.
+ in_channels : int
+ Number of channels in the input spectrogram (e.g. ``1`` for a
+ standard mel-spectrogram). Defaults to ``1``.
+ encoder : EncoderConfig
+ Configuration for the downsampling path. Defaults to
+ ``DEFAULT_ENCODER_CONFIG``.
+ bottleneck : BottleneckConfig
+ Configuration for the bottleneck. Defaults to
+ ``DEFAULT_BOTTLENECK_CONFIG``.
+ decoder : DecoderConfig
+ Configuration for the upsampling path. Defaults to
+ ``DEFAULT_DECODER_CONFIG``.
+ """
+
+ name: Literal["UNetBackbone"] = "UNetBackbone"
+ input_height: int = 128
+ in_channels: int = 1
+ encoder: EncoderConfig = DEFAULT_ENCODER_CONFIG
+ bottleneck: BottleneckConfig = DEFAULT_BOTTLENECK_CONFIG
+ decoder: DecoderConfig = DEFAULT_DECODER_CONFIG
+
+
+backbone_registry: Registry[BackboneProtocol, []] = Registry("backbone")
+
+
+@add_import_config(backbone_registry)
+class BackboneImportConfig(ImportConfig):
+ """Use any callable as a backbone model.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class UNetBackbone(torch.nn.Module):
+ """U-Net-style encoder-decoder backbone network.
+
+ Combines an encoder, a bottleneck, and a decoder into a single module
+ that produces a high-resolution feature map from an input spectrogram.
+ Skip connections from each encoder stage are added element-wise to the
+ corresponding decoder stage input.
+
+ Input spectrograms of arbitrary width are handled automatically: the
+ backbone pads the input so that its dimensions are divisible by
+ ``divide_factor`` and removes the padding from the output.
+
+ Instances are typically created via ``build_backbone``.
+
+ Attributes
+ ----------
+ input_height : int
+ Expected height (frequency bins) of the input spectrogram.
+ out_channels : int
+ Number of channels in the output feature map (taken from the
+ decoder's output channel count).
+ encoder : EncoderProtocol
+ The instantiated encoder module.
+ decoder : DecoderProtocol
+ The instantiated decoder module.
+ bottleneck : BottleneckProtocol
+ The instantiated bottleneck module.
+ divide_factor : int
+ The total spatial downsampling factor applied by the encoder
+ (``input_height // encoder.output_height``). The input width is
+ padded to be a multiple of this value before processing.
+ """
+
+ def __init__(
+ self,
+ input_height: int,
+ encoder: EncoderProtocol,
+ decoder: DecoderProtocol,
+ bottleneck: BottleneckProtocol,
+ ):
+ """Initialise the backbone network.
+
+ Parameters
+ ----------
+ input_height : int
+ Expected height (frequency bins) of the input spectrogram.
+ encoder : EncoderProtocol
+ An initialised encoder module.
+ decoder : DecoderProtocol
+ An initialised decoder module. Its ``output_height`` must equal
+ ``input_height``; a ``ValueError`` is raised otherwise.
+ bottleneck : BottleneckProtocol
+ An initialised bottleneck module.
+ """
+ super().__init__()
+ self.input_height = input_height
+
+ self.encoder = encoder
+ self.decoder = decoder
+ self.bottleneck = bottleneck
+
+ self.out_channels = decoder.out_channels
+
+ # Down/Up scaling factor. Need to ensure inputs are divisible by
+ # this factor in order to be processed by the down/up scaling layers
+ # and recover the correct shape
+ self.divide_factor = input_height // self.encoder.output_height
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor:
+ """Produce a feature map from an input spectrogram.
+
+ Pads the input if necessary, runs it through the encoder, then
+ the bottleneck, then the decoder (incorporating encoder skip
+ connections), and finally removes any padding added earlier.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input spectrogram tensor, shape
+ ``(B, C_in, H_in, W_in)``. ``H_in`` must equal
+ ``self.input_height``; ``W_in`` can be any positive integer.
+
+ Returns
+ -------
+ torch.Tensor
+ Feature map tensor, shape ``(B, C_out, H_in, W_in)``, where
+ ``C_out`` is ``self.out_channels``. The spatial dimensions
+ always match those of the input.
+ """
+ spec, h_pad, w_pad = _pad_adjust(spec, factor=self.divide_factor)
+
+ # encoder
+ residuals = self.encoder(spec)
+
+ # bottleneck
+ x = self.bottleneck(residuals[-1])
+
+ # decoder
+ x = self.decoder(x, residuals=residuals)
+
+ # Restore original size
+ x = _restore_pad(x, h_pad=h_pad, w_pad=w_pad)
+
+ return x
+
+ @backbone_registry.register(UNetBackboneConfig)
+ @staticmethod
+ def from_config(config: UNetBackboneConfig) -> BackboneProtocol:
+ encoder = build_encoder(
+ in_channels=config.in_channels,
+ input_height=config.input_height,
+ config=config.encoder,
+ )
+
+ bottleneck = build_bottleneck(
+ input_height=encoder.output_height,
+ in_channels=encoder.out_channels,
+ config=config.bottleneck,
+ )
+
+ decoder = build_decoder(
+ in_channels=bottleneck.out_channels,
+ input_height=encoder.output_height,
+ config=config.decoder,
+ )
+
+ if decoder.output_height != config.input_height:
+ raise ValueError(
+ "Invalid configuration: Decoder output height "
+ f"({decoder.output_height}) must match the Backbone input height "
+ f"({config.input_height}). Check encoder/decoder layer "
+ "configurations and input/bottleneck heights."
+ )
+
+ return UNetBackbone(
+ input_height=config.input_height,
+ encoder=encoder,
+ decoder=decoder,
+ bottleneck=bottleneck,
+ )
+
+
+BackboneConfig = Annotated[
+ UNetBackboneConfig | BackboneImportConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_backbone(config: BackboneConfig | None = None) -> BackboneProtocol:
+ """Build a backbone network from configuration.
+
+ Looks up the backbone class corresponding to ``config.name`` in the
+ backbone registry and calls its ``from_config`` method. If no
+ configuration is provided, a default ``UNetBackbone`` is returned.
+
+ Parameters
+ ----------
+ config : BackboneConfig, optional
+ A configuration object describing the desired backbone. Currently
+ ``UNetBackboneConfig`` is the only supported type. Defaults to
+ ``UNetBackboneConfig()`` if not provided.
+
+ Returns
+ -------
+ BackboneProtocol
+ An initialised backbone module.
+ """
+ config = config or UNetBackboneConfig()
+ logger.opt(lazy=True).debug(
+ "Building model backbone with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+ return backbone_registry.build(config)
+
+
+def _pad_adjust(
+ spec: torch.Tensor,
+ factor: int = 32,
+) -> tuple[torch.Tensor, int, int]:
+ """Pad a tensor's height and width to be divisible by ``factor``.
+
+ Adds zero-padding to the bottom and right edges of the tensor so that
+ both dimensions are exact multiples of ``factor``. If both dimensions
+ are already divisible, the tensor is returned unchanged.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input tensor, typically shape ``(B, C, H, W)``.
+ factor : int, default=32
+ The factor that both H and W should be divisible by after padding.
+
+ Returns
+ -------
+ tuple[torch.Tensor, int, int]
+ - Padded tensor.
+ - Number of rows added to the height (``h_pad``).
+ - Number of columns added to the width (``w_pad``).
+ """
+ h, w = spec.shape[-2:]
+ h_pad = -h % factor
+ w_pad = -w % factor
+
+ if h_pad == 0 and w_pad == 0:
+ return spec, 0, 0
+
+ return F.pad(spec, (0, w_pad, 0, h_pad)), h_pad, w_pad
+
+
+def _restore_pad(
+ x: torch.Tensor, h_pad: int = 0, w_pad: int = 0
+) -> torch.Tensor:
+ """Remove padding previously added by ``_pad_adjust``.
+
+ Trims ``h_pad`` rows from the bottom and ``w_pad`` columns from the
+ right of the tensor, restoring its original spatial dimensions.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Padded tensor, typically shape ``(B, C, H_padded, W_padded)``.
+ h_pad : int, default=0
+ Number of rows to remove from the bottom.
+ w_pad : int, default=0
+ Number of columns to remove from the right.
+
+ Returns
+ -------
+ torch.Tensor
+ Tensor with padding removed, shape
+ ``(B, C, H_padded - h_pad, W_padded - w_pad)``.
+ """
+ if h_pad > 0:
+ x = x[..., :-h_pad, :]
+
+ if w_pad > 0:
+ x = x[..., :-w_pad]
+
+ return x
+
+
+def load_backbone_config(
+ path: data.PathLike,
+ field: str | None = None,
+) -> BackboneConfig:
+ """Load a backbone configuration from a YAML or JSON file.
+
+ Reads the file at ``path``, optionally descends into a named sub-field,
+ and validates the result against the ``BackboneConfig`` discriminated
+ union.
+
+ Parameters
+ ----------
+ path : PathLike
+ Path to the configuration file. Both YAML and JSON formats are
+ supported.
+ field : str, optional
+ Dot-separated key path to the sub-field that contains the backbone
+ configuration (e.g. ``"model"``). If ``None``, the root of the
+ file is used.
+
+ Returns
+ -------
+ BackboneConfig
+ A validated backbone configuration object (currently always a
+ ``UNetBackboneConfig`` instance).
+
+ Raises
+ ------
+ FileNotFoundError
+ If ``path`` does not exist.
+ ValidationError
+ If the loaded data does not conform to a known ``BackboneConfig``
+ schema.
+ """
+ return load_config(
+ path,
+ schema=TypeAdapter(BackboneConfig),
+ field=field,
+ )
diff --git a/src/batdetect2/models/blocks.py b/src/batdetect2/models/blocks.py
new file mode 100644
index 00000000..ccce61cf
--- /dev/null
+++ b/src/batdetect2/models/blocks.py
@@ -0,0 +1,1099 @@
+"""Reusable convolutional building blocks for BatDetect2 models.
+
+This module provides a collection of ``torch.nn.Module`` subclasses that form
+the fundamental building blocks for the encoder-decoder backbone used in
+BatDetect2. All blocks follow a consistent interface: they store
+``in_channels`` and ``out_channels`` as attributes and implement a
+``get_output_height`` method that reports how a given input height maps to an
+output height (e.g., halved by downsampling blocks, doubled by upsampling
+blocks).
+
+Available block families
+------------------------
+Standard blocks
+ ``ConvBlock`` – convolution + batch normalisation + ReLU, no change in
+ spatial resolution.
+
+Downsampling blocks
+ ``StandardConvDownBlock`` – convolution then 2×2 max-pooling, halves H
+ and W.
+ ``FreqCoordConvDownBlock`` – like ``StandardConvDownBlock`` but prepends
+ a normalised frequency-coordinate channel before the convolution
+ (CoordConv concept), helping filters learn frequency-position-dependent
+ patterns.
+
+Upsampling blocks
+ ``StandardConvUpBlock`` – bilinear interpolation then convolution,
+ doubles H and W.
+ ``FreqCoordConvUpBlock`` – like ``StandardConvUpBlock`` but prepends a
+ frequency-coordinate channel after upsampling.
+
+Bottleneck blocks
+ ``VerticalConv`` – 1-D convolution whose kernel spans the entire
+ frequency axis, collapsing H to 1 whilst preserving W.
+ ``SelfAttention`` – scaled dot-product self-attention along the time
+ axis; typically follows a ``VerticalConv``.
+
+Group block
+ ``LayerGroup`` – chains several blocks sequentially into one unit,
+ useful when a single encoder or decoder "stage" requires more than one
+ operation.
+
+Factory function
+----------------
+``build_layer`` creates any of the above blocks from the matching
+configuration object (one of the ``*Config`` classes exported here), using
+a discriminated-union ``name`` field to dispatch to the correct class.
+"""
+
+from typing import Annotated, Literal
+
+import torch
+import torch.nn.functional as F
+from pydantic import Field
+from torch import nn
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+
+__all__ = [
+ "BlockImportConfig",
+ "ConvBlock",
+ "LayerGroupConfig",
+ "VerticalConv",
+ "FreqCoordConvDownBlock",
+ "StandardConvDownBlock",
+ "FreqCoordConvUpBlock",
+ "StandardConvUpBlock",
+ "SelfAttention",
+ "ConvConfig",
+ "FreqCoordConvDownConfig",
+ "StandardConvDownConfig",
+ "FreqCoordConvUpConfig",
+ "StandardConvUpConfig",
+ "LayerConfig",
+ "build_layer",
+]
+
+
+class Block(nn.Module):
+ """Abstract base class for all BatDetect2 building blocks.
+
+ Subclasses must set ``in_channels`` and ``out_channels`` as integer
+ attributes so that factory functions can wire blocks together without
+ inspecting configuration objects at runtime. They may also override
+ ``get_output_height`` when the block changes the height dimension (e.g.
+ downsampling or upsampling blocks).
+
+ Attributes
+ ----------
+ in_channels : int
+ Number of channels expected in the input tensor.
+ out_channels : int
+ Number of channels produced in the output tensor.
+ """
+
+ in_channels: int
+ out_channels: int
+
+ def get_output_height(self, input_height: int) -> int:
+ """Return the output height for a given input height.
+
+ The default implementation returns ``input_height`` unchanged,
+ which is correct for blocks that do not alter spatial resolution.
+ Override this in downsampling (returns ``input_height // 2``) or
+ upsampling (returns ``input_height * 2``) subclasses.
+
+ Parameters
+ ----------
+ input_height : int
+ Height (number of frequency bins) of the input feature map.
+
+ Returns
+ -------
+ int
+ Height of the output feature map.
+ """
+ return input_height
+
+
+block_registry: Registry[Block, [int, int]] = Registry("block")
+
+
+@add_import_config(block_registry)
+class BlockImportConfig(ImportConfig):
+ """Use any callable as a model block.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class SelfAttentionConfig(BaseConfig):
+ """Configuration for a ``SelfAttention`` block.
+
+ Attributes
+ ----------
+ name : str
+ Discriminator field; always ``"SelfAttention"``.
+ attention_channels : int
+ Dimensionality of the query, key, and value projections.
+ temperature : float
+ Scaling factor applied to the weighted values before the final
+ linear projection. Defaults to ``1``.
+ """
+
+ name: Literal["SelfAttention"] = "SelfAttention"
+ attention_channels: int
+ temperature: float = 1
+
+
+class SelfAttention(Block):
+ """Self-attention block operating along the time axis.
+
+ Applies a scaled dot-product self-attention mechanism across the time
+ steps of an input feature map. Before attention is computed the height
+ dimension (frequency axis) is expected to have been reduced to 1, e.g.
+ by a preceding ``VerticalConv`` layer.
+
+ For each time step the block computes query, key, and value projections
+ with learned linear weights, then calculates attention weights from the
+ query–key dot products divided by ``temperature × attention_channels``.
+ The weighted sum of values is projected back to ``in_channels`` via a
+ final linear layer, and the height dimension is restored so that the
+ output shape matches the input shape.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of input channels (features per time step). The output will
+ also have ``in_channels`` channels.
+ attention_channels : int
+ Dimensionality of the query, key, and value projections.
+ temperature : float, default=1.0
+ Divisor applied together with ``attention_channels`` when scaling
+ the dot-product scores before softmax. Larger values produce softer
+ (more uniform) attention distributions.
+
+ Attributes
+ ----------
+ key_fun : nn.Linear
+ Linear projection for keys.
+ value_fun : nn.Linear
+ Linear projection for values.
+ query_fun : nn.Linear
+ Linear projection for queries.
+ pro_fun : nn.Linear
+ Final linear projection applied to the attended values.
+ temperature : float
+ Scaling divisor used when computing attention scores.
+ att_dim : int
+ Dimensionality of the attention space (``attention_channels``).
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ attention_channels: int,
+ temperature: float = 1.0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = in_channels
+
+ # Note, does not encode position information (absolute or relative)
+ self.temperature = temperature
+ self.att_dim = attention_channels
+ self.output_channels = in_channels
+
+ self.key_fun = nn.Linear(in_channels, attention_channels)
+ self.value_fun = nn.Linear(in_channels, attention_channels)
+ self.query_fun = nn.Linear(in_channels, attention_channels)
+ self.pro_fun = nn.Linear(attention_channels, in_channels)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply self-attention along the time dimension.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor with shape ``(B, C, 1, W)``. The height dimension
+ must be 1 (i.e. the frequency axis should already have been
+ collapsed by a preceding ``VerticalConv`` layer).
+
+ Returns
+ -------
+ torch.Tensor
+ Output tensor with the same shape ``(B, C, 1, W)`` as the
+ input, with each time step updated by attended context from all
+ other time steps.
+ """
+
+ x = x.squeeze(2).permute(0, 2, 1)
+
+ key = torch.matmul(
+ x, self.key_fun.weight.T
+ ) + self.key_fun.bias.unsqueeze(0).unsqueeze(0)
+ query = torch.matmul(
+ x, self.query_fun.weight.T
+ ) + self.query_fun.bias.unsqueeze(0).unsqueeze(0)
+ value = torch.matmul(
+ x, self.value_fun.weight.T
+ ) + self.value_fun.bias.unsqueeze(0).unsqueeze(0)
+
+ kk_qq = torch.bmm(key, query.permute(0, 2, 1)) / (
+ self.temperature * self.att_dim
+ )
+ att_weights = F.softmax(kk_qq, 1)
+ att = torch.bmm(value.permute(0, 2, 1), att_weights)
+
+ op = torch.matmul(
+ att.permute(0, 2, 1), self.pro_fun.weight.T
+ ) + self.pro_fun.bias.unsqueeze(0).unsqueeze(0)
+ op = op.permute(0, 2, 1).unsqueeze(2)
+
+ return op
+
+ def compute_attention_weights(self, x: torch.Tensor) -> torch.Tensor:
+ """Return the softmax attention weight matrix.
+
+ Useful for visualising which time steps attend to which others.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor with shape ``(B, C, 1, W)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Attention weight matrix with shape ``(B, W, W)``. Entry
+ ``[b, i, j]`` is the attention weight that time step ``i``
+ assigns to time step ``j`` in batch item ``b``.
+ """
+ x = x.squeeze(2).permute(0, 2, 1)
+
+ key = torch.matmul(
+ x, self.key_fun.weight.T
+ ) + self.key_fun.bias.unsqueeze(0).unsqueeze(0)
+ query = torch.matmul(
+ x, self.query_fun.weight.T
+ ) + self.query_fun.bias.unsqueeze(0).unsqueeze(0)
+
+ kk_qq = torch.bmm(key, query.permute(0, 2, 1)) / (
+ self.temperature * self.att_dim
+ )
+ att_weights = F.softmax(kk_qq, 1)
+ return att_weights
+
+ @block_registry.register(SelfAttentionConfig)
+ @staticmethod
+ def from_config(
+ config: SelfAttentionConfig,
+ input_channels: int,
+ input_height: int,
+ ) -> "SelfAttention":
+ return SelfAttention(
+ in_channels=input_channels,
+ attention_channels=config.attention_channels,
+ temperature=config.temperature,
+ )
+
+
+class ConvConfig(BaseConfig):
+ """Configuration for a basic ConvBlock."""
+
+ name: Literal["ConvBlock"] = "ConvBlock"
+ """Discriminator field indicating the block type."""
+
+ out_channels: int
+ """Number of output channels."""
+
+ kernel_size: int = 3
+ """Size of the square convolutional kernel."""
+
+ pad_size: int = 1
+ """Padding size."""
+
+
+class ConvBlock(Block):
+ """Basic Convolutional Block.
+
+ A standard building block consisting of a 2D convolution, followed by
+ batch normalization and a ReLU activation function.
+
+ Sequence: Conv2d -> BatchNorm2d -> ReLU.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the input tensor.
+ out_channels : int
+ Number of channels produced by the convolution.
+ kernel_size : int, default=3
+ Size of the square convolutional kernel.
+ pad_size : int, default=1
+ Amount of padding added to preserve spatial dimensions.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int = 3,
+ pad_size: int = 1,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.conv = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=pad_size,
+ )
+ self.batch_norm = nn.BatchNorm2d(out_channels)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply Conv -> BN -> ReLU.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor, shape `(B, C_in, H, W)`.
+
+ Returns
+ -------
+ torch.Tensor
+ Output tensor, shape `(B, C_out, H, W)`.
+ """
+ return F.relu_(self.batch_norm(self.conv(x)))
+
+ @block_registry.register(ConvConfig)
+ @staticmethod
+ def from_config(
+ config: ConvConfig,
+ input_channels: int,
+ input_height: int,
+ ):
+ return ConvBlock(
+ in_channels=input_channels,
+ out_channels=config.out_channels,
+ kernel_size=config.kernel_size,
+ pad_size=config.pad_size,
+ )
+
+
+class VerticalConvConfig(BaseConfig):
+ """Configuration for a ``VerticalConv`` block.
+
+ Attributes
+ ----------
+ name : str
+ Discriminator field; always ``"VerticalConv"``.
+ channels : int
+ Number of output channels produced by the vertical convolution.
+ """
+
+ name: Literal["VerticalConv"] = "VerticalConv"
+ channels: int
+
+
+class VerticalConv(Block):
+ """Convolutional layer that aggregates features across the entire height.
+
+ Applies a 2D convolution using a kernel with shape `(input_height, 1)`.
+ This collapses the height dimension (H) to 1 while preserving the width (W),
+ effectively summarizing features across the full vertical extent (e.g.,
+ frequency axis) at each time step. Followed by BatchNorm and ReLU.
+
+ Useful for summarizing frequency information before applying operations
+ along the time axis (like SelfAttention).
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the input tensor.
+ out_channels : int
+ Number of channels produced by the convolution.
+ input_height : int
+ The height (H dimension) of the input tensor. The convolutional kernel
+ will be sized `(input_height, 1)`.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ input_height: int,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.conv = nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=(input_height, 1),
+ padding=0,
+ )
+ self.bn = nn.BatchNorm2d(out_channels)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply Vertical Conv -> BN -> ReLU.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor, shape `(B, C_in, H, W)`, where H must match the
+ `input_height` provided during initialization.
+
+ Returns
+ -------
+ torch.Tensor
+ Output tensor, shape `(B, C_out, 1, W)`.
+ """
+ return F.relu_(self.bn(self.conv(x)))
+
+ @block_registry.register(VerticalConvConfig)
+ @staticmethod
+ def from_config(
+ config: VerticalConvConfig,
+ input_channels: int,
+ input_height: int,
+ ):
+ return VerticalConv(
+ in_channels=input_channels,
+ out_channels=config.channels,
+ input_height=input_height,
+ )
+
+
+class FreqCoordConvDownConfig(BaseConfig):
+ """Configuration for a FreqCoordConvDownBlock."""
+
+ name: Literal["FreqCoordConvDown"] = "FreqCoordConvDown"
+ """Discriminator field indicating the block type."""
+
+ out_channels: int
+ """Number of output channels."""
+
+ kernel_size: int = 3
+ """Size of the square convolutional kernel."""
+
+ pad_size: int = 1
+ """Padding size."""
+
+
+class FreqCoordConvDownBlock(Block):
+ """Downsampling Conv Block incorporating Frequency Coordinate features.
+
+ This block implements a downsampling step (Conv2d + MaxPool2d) commonly
+ used in CNN encoders. Before the convolution, it concatenates an extra
+ channel representing the normalized vertical coordinate (frequency) to the
+ input tensor.
+
+ The purpose of adding coordinate features is to potentially help the
+ convolutional filters become spatially aware, allowing them to learn
+ patterns that might depend on the relative frequency position within the
+ spectrogram.
+
+ Sequence: Concat Coords -> Conv -> MaxPool -> BatchNorm -> ReLU.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the input tensor.
+ out_channels : int
+ Number of output channels after the convolution.
+ input_height : int
+ Height (H dimension, frequency bins) of the input tensor to this block.
+ Used to generate the coordinate features.
+ kernel_size : int, default=3
+ Size of the square convolutional kernel.
+ pad_size : int, default=1
+ Padding added before convolution.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ input_height: int,
+ kernel_size: int = 3,
+ pad_size: int = 1,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ self.coords = nn.Parameter(
+ torch.linspace(-1, 1, input_height)[None, None, ..., None],
+ requires_grad=False,
+ )
+ self.conv = nn.Conv2d(
+ in_channels + 1,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=pad_size,
+ stride=1,
+ )
+ self.batch_norm = nn.BatchNorm2d(out_channels)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply CoordF -> Conv -> MaxPool -> BN -> ReLU.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor, shape `(B, C_in, H, W)`, where H must match
+ `input_height`.
+
+ Returns
+ -------
+ torch.Tensor
+ Output tensor, shape `(B, C_out, H/2, W/2)` (due to MaxPool).
+ """
+ freq_info = self.coords.repeat(x.shape[0], 1, 1, x.shape[3])
+ x = torch.cat((x, freq_info), 1)
+ x = F.max_pool2d(self.conv(x), 2, 2)
+ x = F.relu(self.batch_norm(x), inplace=True)
+ return x
+
+ def get_output_height(self, input_height: int) -> int:
+ return input_height // 2
+
+ @block_registry.register(FreqCoordConvDownConfig)
+ @staticmethod
+ def from_config(
+ config: FreqCoordConvDownConfig,
+ input_channels: int,
+ input_height: int,
+ ):
+ return FreqCoordConvDownBlock(
+ in_channels=input_channels,
+ out_channels=config.out_channels,
+ input_height=input_height,
+ kernel_size=config.kernel_size,
+ pad_size=config.pad_size,
+ )
+
+
+class StandardConvDownConfig(BaseConfig):
+ """Configuration for a StandardConvDownBlock."""
+
+ name: Literal["StandardConvDown"] = "StandardConvDown"
+ """Discriminator field indicating the block type."""
+
+ out_channels: int
+ """Number of output channels."""
+
+ kernel_size: int = 3
+ """Size of the square convolutional kernel."""
+
+ pad_size: int = 1
+ """Padding size."""
+
+
+class StandardConvDownBlock(Block):
+ """Standard Downsampling Convolutional Block.
+
+ A basic downsampling block consisting of a 2D convolution, followed by
+ 2x2 max pooling, batch normalization, and ReLU activation.
+
+ Sequence: Conv -> MaxPool -> BN -> ReLU.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the input tensor.
+ out_channels : int
+ Number of output channels after the convolution.
+ kernel_size : int, default=3
+ Size of the square convolutional kernel.
+ pad_size : int, default=1
+ Padding added before convolution.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int = 3,
+ pad_size: int = 1,
+ ):
+ super(StandardConvDownBlock, self).__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.conv = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=pad_size,
+ stride=1,
+ )
+ self.batch_norm = nn.BatchNorm2d(out_channels)
+
+ def forward(self, x):
+ """Apply Conv -> MaxPool -> BN -> ReLU.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor, shape `(B, C_in, H, W)`.
+
+ Returns
+ -------
+ torch.Tensor
+ Output tensor, shape `(B, C_out, H/2, W/2)`.
+ """
+ x = F.max_pool2d(self.conv(x), 2, 2)
+ return F.relu(self.batch_norm(x), inplace=True)
+
+ def get_output_height(self, input_height: int) -> int:
+ return input_height // 2
+
+ @block_registry.register(StandardConvDownConfig)
+ @staticmethod
+ def from_config(
+ config: StandardConvDownConfig,
+ input_channels: int,
+ input_height: int,
+ ):
+ return StandardConvDownBlock(
+ in_channels=input_channels,
+ out_channels=config.out_channels,
+ kernel_size=config.kernel_size,
+ pad_size=config.pad_size,
+ )
+
+
+class FreqCoordConvUpConfig(BaseConfig):
+ """Configuration for a FreqCoordConvUpBlock."""
+
+ name: Literal["FreqCoordConvUp"] = "FreqCoordConvUp"
+ """Discriminator field indicating the block type."""
+
+ out_channels: int
+ """Number of output channels."""
+
+ kernel_size: int = 3
+ """Size of the square convolutional kernel."""
+
+ pad_size: int = 1
+ """Padding size."""
+
+ up_mode: str = "bilinear"
+ """Interpolation mode for upsampling (e.g., "nearest", "bilinear")."""
+
+ up_scale: tuple[int, int] = (2, 2)
+ """Scaling factor for height and width during upsampling."""
+
+
+class FreqCoordConvUpBlock(Block):
+ """Upsampling Conv Block incorporating Frequency Coordinate features.
+
+ This block implements an upsampling step followed by a convolution,
+ commonly used in CNN decoders. Before the convolution, it concatenates an
+ extra channel representing the normalized vertical coordinate (frequency)
+ of the *upsampled* feature map.
+
+ The goal is to provide spatial awareness (frequency position) to the
+ filters during the decoding/upsampling process.
+
+ Sequence: Interpolate -> Concat Coords -> Conv -> BatchNorm -> ReLU.
+
+ Parameters
+ ----------
+ in_channels
+ Number of channels in the input tensor (before upsampling).
+ out_channels
+ Number of output channels after the convolution.
+ input_height
+ Height (H dimension, frequency bins) of the tensor *before* upsampling.
+ Used to calculate the height for coordinate feature generation after
+ upsampling.
+ kernel_size
+ Size of the square convolutional kernel.
+ pad_size
+ Padding added before convolution.
+ up_mode
+ Interpolation mode for upsampling (e.g., "nearest", "bilinear",
+ "bicubic").
+ up_scale
+ Scaling factor for height and width during upsampling
+ (typically (2, 2)).
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ input_height: int,
+ kernel_size: int = 3,
+ pad_size: int = 1,
+ up_mode: str = "bilinear",
+ up_scale: tuple[int, int] = (2, 2),
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ self.up_scale = up_scale
+ self.up_mode = up_mode
+ self.coords = nn.Parameter(
+ torch.linspace(-1, 1, input_height * up_scale[0])[
+ None, None, ..., None
+ ],
+ requires_grad=False,
+ )
+ self.conv = nn.Conv2d(
+ in_channels + 1,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=pad_size,
+ )
+ self.batch_norm = nn.BatchNorm2d(out_channels)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply Interpolate -> Concat Coords -> Conv -> BN -> ReLU.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor, shape `(B, C_in, H_in, W_in)`, where H_in should match
+ `input_height` used during initialization.
+
+ Returns
+ -------
+ torch.Tensor
+ Output tensor, shape `(B, C_out, H_in * scale_h, W_in * scale_w)`.
+ """
+ op = F.interpolate(
+ x,
+ size=(
+ x.shape[-2] * self.up_scale[0],
+ x.shape[-1] * self.up_scale[1],
+ ),
+ mode=self.up_mode,
+ align_corners=False,
+ )
+ freq_info = self.coords.repeat(op.shape[0], 1, 1, op.shape[3])
+ op = torch.cat((op, freq_info), 1)
+ op = self.conv(op)
+ op = F.relu(self.batch_norm(op), inplace=True)
+ return op
+
+ def get_output_height(self, input_height: int) -> int:
+ return input_height * 2
+
+ @block_registry.register(FreqCoordConvUpConfig)
+ @staticmethod
+ def from_config(
+ config: FreqCoordConvUpConfig,
+ input_channels: int,
+ input_height: int,
+ ):
+ return FreqCoordConvUpBlock(
+ in_channels=input_channels,
+ out_channels=config.out_channels,
+ input_height=input_height,
+ kernel_size=config.kernel_size,
+ pad_size=config.pad_size,
+ up_mode=config.up_mode,
+ up_scale=config.up_scale,
+ )
+
+
+class StandardConvUpConfig(BaseConfig):
+ """Configuration for a StandardConvUpBlock."""
+
+ name: Literal["StandardConvUp"] = "StandardConvUp"
+ """Discriminator field indicating the block type."""
+
+ out_channels: int
+ """Number of output channels."""
+
+ kernel_size: int = 3
+ """Size of the square convolutional kernel."""
+
+ pad_size: int = 1
+ """Padding size."""
+
+ up_mode: str = "bilinear"
+ """Interpolation mode for upsampling (e.g., "nearest", "bilinear")."""
+
+ up_scale: tuple[int, int] = (2, 2)
+ """Scaling factor for height and width during upsampling."""
+
+
+class StandardConvUpBlock(Block):
+ """Standard Upsampling Convolutional Block.
+
+ A basic upsampling block used in CNN decoders. It first upsamples the input
+ feature map using interpolation, then applies a 2D convolution, batch
+ normalization, and ReLU activation. Does not use coordinate features.
+
+ Sequence: Interpolate -> Conv -> BN -> ReLU.
+
+ Parameters
+ ----------
+ in_channels
+ Number of channels in the input tensor (before upsampling).
+ out_channels
+ Number of output channels after the convolution.
+ kernel_size
+ Size of the square convolutional kernel.
+ pad_size
+ Padding added before convolution.
+ up_mode
+ Interpolation mode for upsampling (e.g., "nearest", "bilinear").
+ up_scale
+ Scaling factor for height and width during upsampling.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int = 3,
+ pad_size: int = 1,
+ up_mode: str = "bilinear",
+ up_scale: tuple[int, int] = (2, 2),
+ ):
+ super(StandardConvUpBlock, self).__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.up_scale = up_scale
+ self.up_mode = up_mode
+ self.conv = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=pad_size,
+ )
+ self.batch_norm = nn.BatchNorm2d(out_channels)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply Interpolate -> Conv -> BN -> ReLU.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor, shape `(B, C_in, H_in, W_in)`.
+
+ Returns
+ -------
+ torch.Tensor
+ Output tensor, shape `(B, C_out, H_in * scale_h, W_in * scale_w)`.
+ """
+ op = F.interpolate(
+ x,
+ size=(
+ x.shape[-2] * self.up_scale[0],
+ x.shape[-1] * self.up_scale[1],
+ ),
+ mode=self.up_mode,
+ align_corners=False,
+ )
+ op = self.conv(op)
+ op = F.relu(self.batch_norm(op), inplace=True)
+ return op
+
+ def get_output_height(self, input_height: int) -> int:
+ return input_height * 2
+
+ @block_registry.register(StandardConvUpConfig)
+ @staticmethod
+ def from_config(
+ config: StandardConvUpConfig,
+ input_channels: int,
+ input_height: int,
+ ):
+ return StandardConvUpBlock(
+ in_channels=input_channels,
+ out_channels=config.out_channels,
+ kernel_size=config.kernel_size,
+ pad_size=config.pad_size,
+ up_mode=config.up_mode,
+ up_scale=config.up_scale,
+ )
+
+
+class LayerGroupConfig(BaseConfig):
+ """Configuration for a ``LayerGroup`` — a sequential chain of blocks.
+
+ Use this when a single encoder or decoder stage needs more than one
+ block. The blocks are executed in the order they appear in ``layers``,
+ with channel counts and heights propagated automatically.
+
+ Attributes
+ ----------
+ name : str
+ Discriminator field; always ``"LayerGroup"``.
+ layers : List[LayerConfig]
+ Ordered list of block configurations to chain together.
+ """
+
+ name: Literal["LayerGroup"] = "LayerGroup"
+ layers: list["LayerConfig"]
+
+
+LayerConfig = Annotated[
+ ConvConfig
+ | FreqCoordConvDownConfig
+ | StandardConvDownConfig
+ | FreqCoordConvUpConfig
+ | StandardConvUpConfig
+ | SelfAttentionConfig
+ | LayerGroupConfig,
+ Field(discriminator="name"),
+]
+"""Type alias for the discriminated union of block configuration models."""
+
+
+class LayerGroup(nn.Module):
+ """Sequential chain of blocks that acts as a single composite block.
+
+ Wraps multiple ``Block`` instances in an ``nn.Sequential`` container,
+ exposing the same ``in_channels``, ``out_channels``, and
+ ``get_output_height`` interface as a regular ``Block`` so it can be
+ used transparently wherever a single block is expected.
+
+ Instances are typically constructed by ``build_layer`` when given a
+ ``LayerGroupConfig``; you rarely need to create them directly.
+
+ Parameters
+ ----------
+ layers : list[Block]
+ Pre-built block instances to chain, in execution order.
+ input_height : int
+ Height of the tensor entering the first block.
+ input_channels : int
+ Number of channels in the tensor entering the first block.
+
+ Attributes
+ ----------
+ in_channels : int
+ Number of input channels (taken from the first block).
+ out_channels : int
+ Number of output channels (taken from the last block).
+ layers : nn.Sequential
+ The wrapped sequence of block modules.
+ """
+
+ def __init__(
+ self,
+ layers: list[Block],
+ input_height: int,
+ input_channels: int,
+ ):
+ super().__init__()
+ self.in_channels = input_channels
+ self.out_channels = (
+ layers[-1].out_channels if layers else input_channels
+ )
+ self.layers = nn.Sequential(*layers)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Pass input through all blocks in sequence.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input feature map, shape ``(B, C_in, H, W)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Output feature map after all blocks have been applied.
+ """
+ return self.layers(x)
+
+ def get_output_height(self, input_height: int) -> int:
+ """Compute the output height by propagating through all blocks.
+
+ Parameters
+ ----------
+ input_height : int
+ Height of the input feature map.
+
+ Returns
+ -------
+ int
+ Height after all blocks in the group have been applied.
+ """
+ for block in self.layers:
+ input_height = block.get_output_height(input_height) # type: ignore
+ return input_height
+
+ @block_registry.register(LayerGroupConfig)
+ @staticmethod
+ def from_config(
+ config: LayerGroupConfig,
+ input_channels: int,
+ input_height: int,
+ ):
+ layers = []
+
+ for layer_config in config.layers:
+ layer = build_layer(
+ input_height=input_height,
+ in_channels=input_channels,
+ config=layer_config,
+ )
+ layers.append(layer)
+ input_height = layer.get_output_height(input_height)
+ input_channels = layer.out_channels
+
+ return LayerGroup(
+ layers=layers,
+ input_height=input_height,
+ input_channels=input_channels,
+ )
+
+
+def build_layer(
+ input_height: int,
+ in_channels: int,
+ config: LayerConfig,
+) -> Block:
+ """Build a block from its configuration object.
+
+ Looks up the block class corresponding to ``config.name`` in the
+ internal block registry and instantiates it with the given input
+ dimensions. This is the standard way to construct blocks when
+ assembling an encoder or decoder from a configuration file.
+
+ Parameters
+ ----------
+ input_height : int
+ Height (number of frequency bins) of the input tensor to this
+ block. Required for blocks whose kernel size depends on the input
+ height (e.g. ``VerticalConv``) and for coordinate-aware blocks.
+ in_channels : int
+ Number of channels in the input tensor to this block.
+ config : LayerConfig
+ A configuration object for the desired block type. The ``name``
+ field selects the block class; remaining fields supply its
+ parameters.
+
+ Returns
+ -------
+ Block
+ An initialised block module ready to be added to an
+ ``nn.Sequential`` or ``nn.ModuleList``.
+
+ Raises
+ ------
+ KeyError
+ If ``config.name`` does not correspond to a registered block type.
+ ValueError
+ If the configuration parameters are invalid for the chosen block.
+ """
+ return block_registry.build(config, in_channels, input_height)
diff --git a/src/batdetect2/models/bottleneck.py b/src/batdetect2/models/bottleneck.py
new file mode 100644
index 00000000..9b2154ac
--- /dev/null
+++ b/src/batdetect2/models/bottleneck.py
@@ -0,0 +1,258 @@
+"""Bottleneck component for encoder-decoder network architectures.
+
+The bottleneck sits between the encoder (downsampling path) and the decoder
+(upsampling path) and processes the lowest-resolution, highest-channel feature
+map produced by the encoder.
+
+This module provides:
+
+- ``BottleneckConfig`` – configuration dataclass describing the number of
+ internal channels and an optional sequence of additional layers (currently
+ only ``SelfAttention`` is supported).
+- ``Bottleneck`` – the ``torch.nn.Module`` implementation. It first applies a
+ ``VerticalConv`` to collapse the frequency axis to a single bin, optionally
+ runs one or more additional layers (e.g. self-attention along the time axis),
+ then repeats the output along the height dimension to restore the original
+ frequency resolution before passing features to the decoder.
+- ``build_bottleneck`` – factory function that constructs a ``Bottleneck``
+ instance from a ``BottleneckConfig`` and the encoder's output dimensions.
+"""
+
+from typing import Annotated, List
+
+import torch
+from pydantic import Field
+from torch import nn
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.models.blocks import (
+ Block,
+ SelfAttentionConfig,
+ VerticalConv,
+ build_layer,
+)
+from batdetect2.models.types import BottleneckProtocol
+
+__all__ = [
+ "BottleneckConfig",
+ "Bottleneck",
+ "build_bottleneck",
+]
+
+
+class Bottleneck(Block):
+ """Bottleneck module for encoder-decoder architectures.
+
+ Processes the lowest-resolution feature map that links the encoder and
+ decoder. The sequence of operations is:
+
+ 1. ``VerticalConv`` – collapses the frequency axis (height) to a single
+ bin by applying a convolution whose kernel spans the full height.
+ 2. Optional additional layers (e.g. ``SelfAttention``) – applied while
+ the feature map has height 1, so they operate purely along the time
+ axis.
+ 3. Height restoration – the single-bin output is repeated along the
+ height axis to restore the original frequency resolution, producing
+ a tensor that the decoder can accept.
+
+ Parameters
+ ----------
+ input_height : int
+ Height (number of frequency bins) of the input tensor. Must be
+ positive.
+ in_channels : int
+ Number of channels in the input tensor from the encoder. Must be
+ positive.
+ out_channels : int
+ Number of output channels after the bottleneck. Must be positive.
+ bottleneck_channels : int, optional
+ Number of internal channels used by the ``VerticalConv`` layer.
+ Defaults to ``out_channels`` if not provided.
+ layers : List[torch.nn.Module], optional
+ Additional modules (e.g. ``SelfAttention``) to apply after the
+ ``VerticalConv`` and before height restoration.
+
+ Attributes
+ ----------
+ in_channels : int
+ Number of input channels accepted by the bottleneck.
+ out_channels : int
+ Number of output channels produced by the bottleneck.
+ input_height : int
+ Expected height of the input tensor.
+ bottleneck_channels : int
+ Number of channels used internally by the vertical convolution.
+ conv_vert : VerticalConv
+ The vertical convolution layer.
+ layers : nn.ModuleList
+ Additional layers applied after the vertical convolution.
+ """
+
+ def __init__(
+ self,
+ input_height: int,
+ in_channels: int,
+ out_channels: int,
+ bottleneck_channels: int | None = None,
+ layers: List[torch.nn.Module] | None = None,
+ ) -> None:
+ """Initialise the Bottleneck layer.
+
+ Parameters
+ ----------
+ input_height : int
+ Height (number of frequency bins) of the input tensor.
+ in_channels : int
+ Number of channels in the input tensor.
+ out_channels : int
+ Number of channels in the output tensor.
+ bottleneck_channels : int, optional
+ Number of internal channels for the ``VerticalConv``. Defaults
+ to ``out_channels``.
+ layers : List[torch.nn.Module], optional
+ Additional modules applied after the ``VerticalConv``, such as
+ a ``SelfAttention`` block.
+ """
+ super().__init__()
+ self.in_channels = in_channels
+ self.input_height = input_height
+ self.out_channels = out_channels
+
+ self.bottleneck_channels = (
+ bottleneck_channels
+ if bottleneck_channels is not None
+ else out_channels
+ )
+ self.layers = nn.ModuleList(layers or [])
+
+ self.conv_vert = VerticalConv(
+ in_channels=in_channels,
+ out_channels=self.bottleneck_channels,
+ input_height=input_height,
+ )
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Process the encoder's bottleneck features.
+
+ Applies vertical convolution, optional additional layers, then
+ restores the height dimension by repetition.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor from the encoder, shape
+ ``(B, C_in, H_in, W)``. ``C_in`` must match
+ ``self.in_channels`` and ``H_in`` must match
+ ``self.input_height``.
+
+ Returns
+ -------
+ torch.Tensor
+ Output tensor with shape ``(B, C_out, H_in, W)``. The height
+ ``H_in`` is restored by repeating the single-bin result.
+ """
+ x = self.conv_vert(x)
+
+ for layer in self.layers:
+ x = layer(x)
+
+ return x.repeat([1, 1, self.input_height, 1])
+
+
+BottleneckLayerConfig = Annotated[
+ SelfAttentionConfig,
+ Field(discriminator="name"),
+]
+"""Type alias for the discriminated union of block configs usable in the Bottleneck."""
+
+
+class BottleneckConfig(BaseConfig):
+ """Configuration for the bottleneck component.
+
+ Attributes
+ ----------
+ channels : int
+ Number of output channels produced by the bottleneck. This value
+ is also used as the dimensionality of any optional layers (e.g.
+ self-attention). Must be positive.
+ layers : List[BottleneckLayerConfig]
+ Ordered list of additional block configurations to apply after the
+ initial ``VerticalConv``. Currently only ``SelfAttentionConfig`` is
+ supported. Defaults to an empty list (no extra layers).
+ """
+
+ channels: int
+ layers: List[BottleneckLayerConfig] = Field(default_factory=list)
+
+
+DEFAULT_BOTTLENECK_CONFIG: BottleneckConfig = BottleneckConfig(
+ channels=256,
+ layers=[
+ SelfAttentionConfig(attention_channels=256),
+ ],
+)
+
+
+def build_bottleneck(
+ input_height: int,
+ in_channels: int,
+ config: BottleneckConfig | None = None,
+) -> BottleneckProtocol:
+ """Build a ``Bottleneck`` module from configuration.
+
+ Constructs a ``Bottleneck`` instance whose internal channel count and
+ optional extra layers (e.g. self-attention) are controlled by
+ ``config``. If no configuration is provided, the default
+ ``DEFAULT_BOTTLENECK_CONFIG`` is used, which includes a
+ ``SelfAttention`` layer.
+
+ Parameters
+ ----------
+ input_height : int
+ Height (number of frequency bins) of the input tensor from the
+ encoder. Must be positive.
+ in_channels : int
+ Number of channels in the input tensor from the encoder. Must be
+ positive.
+ config : BottleneckConfig, optional
+ Configuration specifying the output channel count and any
+ additional layers. Uses ``DEFAULT_BOTTLENECK_CONFIG`` if ``None``.
+
+ Returns
+ -------
+ BottleneckProtocol
+ An initialised ``Bottleneck`` module.
+
+ Raises
+ ------
+ AssertionError
+ If any configured layer changes the height of the feature map
+ (bottleneck layers must preserve height so that it can be restored
+ by repetition).
+ """
+ config = config or DEFAULT_BOTTLENECK_CONFIG
+
+ current_channels = in_channels
+ current_height = input_height
+
+ layers = []
+
+ for layer_config in config.layers:
+ layer = build_layer(
+ input_height=current_height,
+ in_channels=current_channels,
+ config=layer_config,
+ )
+ current_height = layer.get_output_height(current_height)
+ current_channels = layer.out_channels
+ assert current_height == input_height, (
+ "Bottleneck layers should not change the spectrogram height"
+ )
+ layers.append(layer)
+
+ return Bottleneck(
+ input_height=input_height,
+ in_channels=in_channels,
+ out_channels=config.channels,
+ layers=layers,
+ )
diff --git a/batdetect2/models/Net2DFast_UK_same.pth.tar b/src/batdetect2/models/checkpoints/Net2DFast_UK_same.pth.tar
similarity index 100%
rename from batdetect2/models/Net2DFast_UK_same.pth.tar
rename to src/batdetect2/models/checkpoints/Net2DFast_UK_same.pth.tar
diff --git a/src/batdetect2/models/checkpoints/batdetect2_uk_same.ckpt b/src/batdetect2/models/checkpoints/batdetect2_uk_same.ckpt
new file mode 100644
index 00000000..b849167a
Binary files /dev/null and b/src/batdetect2/models/checkpoints/batdetect2_uk_same.ckpt differ
diff --git a/batdetect2/models/readme.md b/src/batdetect2/models/checkpoints/readme.md
similarity index 100%
rename from batdetect2/models/readme.md
rename to src/batdetect2/models/checkpoints/readme.md
diff --git a/src/batdetect2/models/decoder.py b/src/batdetect2/models/decoder.py
new file mode 100644
index 00000000..82e11506
--- /dev/null
+++ b/src/batdetect2/models/decoder.py
@@ -0,0 +1,268 @@
+"""Decoder (upsampling path) for the BatDetect2 backbone.
+
+This module defines ``DecoderConfig`` and the ``Decoder`` ``nn.Module``,
+together with the ``build_decoder`` factory function.
+
+In a U-Net-style network the decoder progressively restores the spatial
+resolution of the feature map back towards the input resolution. At each
+stage it combines the upsampled features with the corresponding skip-connection
+tensor from the encoder (the residual) by element-wise addition before passing
+the result to the upsampling block.
+
+The decoder is fully configurable: the type, number, and parameters of the
+upsampling blocks are described by a ``DecoderConfig`` object containing an
+ordered list of block configuration objects (see ``batdetect2.models.blocks``
+for available block types).
+
+A default configuration ``DEFAULT_DECODER_CONFIG`` is provided and used by
+``build_decoder`` when no explicit configuration is supplied.
+"""
+
+from typing import Annotated, List
+
+import torch
+from pydantic import Field
+from torch import nn
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.models.blocks import (
+ ConvConfig,
+ FreqCoordConvUpConfig,
+ LayerGroupConfig,
+ StandardConvUpConfig,
+ build_layer,
+)
+
+__all__ = [
+ "DecoderConfig",
+ "Decoder",
+ "build_decoder",
+ "DEFAULT_DECODER_CONFIG",
+]
+
+DecoderLayerConfig = Annotated[
+ ConvConfig
+ | FreqCoordConvUpConfig
+ | StandardConvUpConfig
+ | LayerGroupConfig,
+ Field(discriminator="name"),
+]
+"""Type alias for the discriminated union of block configs usable in Decoder."""
+
+
+class DecoderConfig(BaseConfig):
+ """Configuration for the sequential ``Decoder`` module.
+
+ Attributes
+ ----------
+ layers : List[DecoderLayerConfig]
+ Ordered list of block configuration objects defining the decoder's
+ upsampling stages (from deepest to shallowest). Each entry
+ specifies the block type (via its ``name`` field) and any
+ block-specific parameters such as ``out_channels``. Input channels
+ for each block are inferred automatically from the output of the
+ previous block. Must contain at least one entry.
+ """
+
+ layers: List[DecoderLayerConfig] = Field(min_length=1)
+
+
+class Decoder(nn.Module):
+ """Sequential decoder module composed of configurable upsampling layers.
+
+ Executes a series of upsampling blocks in order, adding the
+ corresponding encoder skip-connection tensor (residual) to the feature
+ map before each block. The residuals are consumed in reverse order (from
+ deepest encoder layer to shallowest) to match the spatial resolutions at
+ each decoder stage.
+
+ Instances are typically created by ``build_decoder``.
+
+ Attributes
+ ----------
+ in_channels : int
+ Number of channels expected in the input tensor (bottleneck output).
+ out_channels : int
+ Number of channels in the final output feature map.
+ input_height : int
+ Height (frequency bins) of the input tensor.
+ output_height : int
+ Height (frequency bins) of the output tensor.
+ layers : nn.ModuleList
+ Sequence of instantiated upsampling block modules.
+ depth : int
+ Number of upsampling layers.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ input_height: int,
+ output_height: int,
+ layers: List[nn.Module],
+ ):
+ """Initialise the Decoder module.
+
+ This constructor is typically called by the ``build_decoder``
+ factory function.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the input tensor (bottleneck output).
+ out_channels : int
+ Number of channels produced by the final layer.
+ input_height : int
+ Height of the input tensor (bottleneck output height).
+ output_height : int
+ Height of the output tensor after all layers have been applied.
+ layers : List[nn.Module]
+ Pre-built upsampling block modules in execution order (deepest
+ stage first).
+ """
+ super().__init__()
+
+ self.input_height = input_height
+ self.output_height = output_height
+
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ self.layers = nn.ModuleList(layers)
+ self.depth = len(self.layers)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ residuals: List[torch.Tensor],
+ ) -> torch.Tensor:
+ """Pass input through all decoder layers, incorporating skip connections.
+
+ At each stage the corresponding residual tensor is added
+ element-wise to ``x`` before it is passed to the upsampling block.
+ Residuals are consumed in reverse order — the last element of
+ ``residuals`` (the output of the shallowest encoder layer) is added
+ at the first decoder stage, and the first element (output of the
+ deepest encoder layer) is added at the last decoder stage.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Bottleneck feature map, shape ``(B, C_in, H_in, W)``.
+ residuals : List[torch.Tensor]
+ Skip-connection tensors from the encoder, ordered from shallowest
+ (index 0) to deepest (index -1). Must contain exactly
+ ``self.depth`` tensors. Each tensor must have the same spatial
+ dimensions and channel count as ``x`` at the corresponding
+ decoder stage.
+
+ Returns
+ -------
+ torch.Tensor
+ Decoded feature map, shape ``(B, C_out, H_out, W)``.
+
+ Raises
+ ------
+ ValueError
+ If the number of ``residuals`` does not equal ``self.depth``.
+ """
+ if len(residuals) != len(self.layers):
+ raise ValueError(
+ f"Incorrect number of residuals provided. "
+ f"Expected {len(self.layers)} (matching the number of layers), "
+ f"but got {len(residuals)}."
+ )
+
+ for layer, res in zip(self.layers, residuals[::-1], strict=False):
+ x = layer(x + res)
+
+ return x
+
+
+DEFAULT_DECODER_CONFIG: DecoderConfig = DecoderConfig(
+ layers=[
+ FreqCoordConvUpConfig(out_channels=64),
+ FreqCoordConvUpConfig(out_channels=32),
+ LayerGroupConfig(
+ layers=[
+ FreqCoordConvUpConfig(out_channels=32),
+ ConvConfig(out_channels=32),
+ ]
+ ),
+ ],
+)
+"""Default decoder configuration used in standard BatDetect2 models.
+
+Mirrors ``DEFAULT_ENCODER_CONFIG`` in reverse. Assumes the bottleneck
+output has 256 channels and height 16, and produces:
+
+- Stage 1 (``FreqCoordConvUp``): 64 channels, height 32.
+- Stage 2 (``FreqCoordConvUp``): 32 channels, height 64.
+- Stage 3 (``LayerGroup``):
+
+ - ``FreqCoordConvUp``: 32 channels, height 128.
+ - ``ConvBlock``: 32 channels, height 128 (final feature map).
+"""
+
+
+def build_decoder(
+ in_channels: int,
+ input_height: int,
+ config: DecoderConfig | None = None,
+) -> Decoder:
+ """Build a ``Decoder`` from configuration.
+
+ Constructs a sequential ``Decoder`` by iterating over the block
+ configurations in ``config.layers``, building each block with
+ ``build_layer``, and tracking the channel count and feature-map height
+ as they change through the sequence.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the input tensor (bottleneck output). Must
+ be positive.
+ input_height : int
+ Height (number of frequency bins) of the input tensor. Must be
+ positive.
+ config : DecoderConfig, optional
+ Configuration specifying the layer sequence. Defaults to
+ ``DEFAULT_DECODER_CONFIG`` if not provided.
+
+ Returns
+ -------
+ Decoder
+ An initialised ``Decoder`` module.
+
+ Raises
+ ------
+ ValueError
+ If ``in_channels`` or ``input_height`` are not positive.
+ KeyError
+ If a layer configuration specifies an unknown block type.
+ """
+ config = config or DEFAULT_DECODER_CONFIG
+
+ current_channels = in_channels
+ current_height = input_height
+
+ layers = []
+
+ for layer_config in config.layers:
+ layer = build_layer(
+ in_channels=current_channels,
+ input_height=current_height,
+ config=layer_config,
+ )
+ current_height = layer.get_output_height(current_height)
+ current_channels = layer.out_channels
+ layers.append(layer)
+
+ return Decoder(
+ in_channels=in_channels,
+ out_channels=current_channels,
+ input_height=input_height,
+ output_height=current_height,
+ layers=layers,
+ )
diff --git a/src/batdetect2/models/detectors.py b/src/batdetect2/models/detectors.py
new file mode 100644
index 00000000..586becaf
--- /dev/null
+++ b/src/batdetect2/models/detectors.py
@@ -0,0 +1,186 @@
+"""Assembles the complete BatDetect2 detection model.
+
+This module defines the ``Detector`` class, which combines a backbone
+feature extractor with prediction heads for detection, classification, and
+bounding-box size regression.
+
+Components
+----------
+- ``Detector`` - the ``torch.nn.Module`` that wires together a backbone
+ (``BackboneProtocol``) with a ``ClassifierHead`` and a ``BBoxHead`` to
+ produce a ``ModelOutput`` tuple from an input spectrogram.
+- ``build_detector`` – factory function that builds a ready-to-use
+ ``Detector`` from a backbone configuration and a target class count.
+
+Note that ``Detector`` operates purely on spectrogram tensors; raw audio
+preprocessing and output postprocessing are handled by
+``batdetect2.preprocess`` and ``batdetect2.postprocess`` respectively.
+"""
+
+import torch
+
+from batdetect2.models.backbones import BackboneConfig, build_backbone
+from batdetect2.models.heads import BBoxHead, ClassifierHead
+from batdetect2.models.types import (
+ BackboneProtocol,
+ ClassifierHeadProtocol,
+ DetectorProtocol,
+ ModelOutput,
+ SizeHeadProtocol,
+)
+
+__all__ = [
+ "Detector",
+ "build_detector",
+]
+
+
+class Detector(torch.nn.Module):
+ """Complete BatDetect2 detection and classification model.
+
+ Combines a backbone feature extractor with two prediction heads:
+
+ - ``ClassifierHead``: predicts per-class probabilities at each
+ time–frequency location.
+ - ``BBoxHead``: predicts call duration and bandwidth at each location.
+
+ The detection probability map is derived from the class probabilities by
+ summing across the class dimension (i.e. the probability that *any* class
+ is present), rather than from a separate detection head.
+
+ Instances are typically created via ``build_detector``.
+
+ Attributes
+ ----------
+ backbone : BackboneProtocol
+ The feature extraction backbone.
+ num_classes : int
+ Number of target classes (inferred from the classifier head).
+ classifier_head : ClassifierHead
+ Produces per-class probability maps from backbone features.
+ bbox_head : BBoxHead
+ Produces duration and bandwidth predictions from backbone features.
+ """
+
+ backbone: BackboneProtocol
+
+ def __init__(
+ self,
+ backbone: BackboneProtocol,
+ classifier_head: ClassifierHeadProtocol,
+ size_head: SizeHeadProtocol,
+ ):
+ """Initialise the Detector model.
+
+ This constructor is typically called by the ``build_detector``
+ factory function.
+
+ Parameters
+ ----------
+ backbone : BackboneProtocol
+ An initialised backbone module (e.g. built by
+ ``build_backbone``).
+ classifier_head : ClassifierHead
+ An initialised classification head. The ``num_classes``
+ attribute is read from this head.
+ bbox_head : BBoxHead
+ An initialised bounding-box size prediction head.
+ """
+ super().__init__()
+
+ self.backbone = backbone
+ self.num_classes = classifier_head.num_classes
+ self.classifier_head = classifier_head
+ self.size_head = size_head
+
+ def forward(self, spec: torch.Tensor) -> ModelOutput:
+ """Run the complete detection model on an input spectrogram.
+
+ Passes the spectrogram through the backbone to produce a feature
+ map, then applies the classifier and bounding-box heads. The
+ detection probability map is derived by summing the per-class
+ probability maps across the class dimension; no separate detection
+ head is used.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input spectrogram tensor, shape
+ ``(batch_size, channels, frequency_bins, time_bins)``.
+
+ Returns
+ -------
+ ModelOutput
+ A named tuple with four fields:
+
+ - ``detection_probs`` – ``(B, 1, H, W)`` – probability that a
+ call of any class is present at each location. Derived by
+ summing ``class_probs`` over the class dimension.
+ - ``size_preds`` – ``(B, 2, H, W)`` – scaled duration (channel
+ 0) and bandwidth (channel 1) predictions at each location.
+ - ``class_probs`` – ``(B, num_classes, H, W)`` – per-class
+ probabilities at each location.
+ - ``features`` – ``(B, C_out, H, W)`` – raw backbone feature
+ map.
+ """
+ features = self.backbone(spec)
+ classification = self.classifier_head(features)
+ detection = classification.sum(dim=1, keepdim=True)
+ size_preds = self.size_head(features)
+ return ModelOutput(
+ detection_probs=detection,
+ size_preds=size_preds,
+ class_probs=classification,
+ features=features,
+ )
+
+
+def build_detector(
+ class_names: list[str],
+ dimension_names: list[str],
+ config: BackboneConfig | None = None,
+ backbone: BackboneProtocol | None = None,
+) -> DetectorProtocol:
+ """Build a complete BatDetect2 detection model.
+
+ Constructs a backbone from ``config``, attaches a ``ClassifierHead``
+ and a ``BBoxHead`` sized to the backbone's output channel count, and
+ returns them wrapped in a ``Detector``.
+
+ Parameters
+ ----------
+ num_classes : int
+ Number of target bat species or call types to predict. Must be
+ positive.
+ config : BackboneConfig, optional
+ Backbone architecture configuration. Defaults to
+ ``UNetBackboneConfig()`` (the standard BatDetect2 architecture) if
+ not provided.
+
+ Returns
+ -------
+ DetectorProtocol
+ An initialised ``Detector`` instance ready for training or
+ inference.
+
+ Raises
+ ------
+ ValueError
+ If ``num_classes`` is not positive, or if the backbone
+ configuration is invalid.
+ """
+ backbone = backbone or build_backbone(config=config)
+
+ classifier_head = ClassifierHead(
+ class_names=class_names,
+ in_channels=backbone.out_channels,
+ )
+ bbox_head = BBoxHead(
+ in_channels=backbone.out_channels,
+ dimension_names=dimension_names,
+ )
+ return Detector(
+ backbone=backbone,
+ classifier_head=classifier_head,
+ size_head=bbox_head,
+ )
diff --git a/src/batdetect2/models/encoder.py b/src/batdetect2/models/encoder.py
new file mode 100644
index 00000000..24650e51
--- /dev/null
+++ b/src/batdetect2/models/encoder.py
@@ -0,0 +1,316 @@
+"""Encoder (downsampling path) for the BatDetect2 backbone.
+
+This module defines ``EncoderConfig`` and the ``Encoder`` ``nn.Module``,
+together with the ``build_encoder`` factory function.
+
+In a U-Net-style network the encoder progressively reduces the spatial
+resolution of the spectrogram whilst increasing the number of feature
+channels. Each layer in the encoder produces a feature map that is stored
+for use as a skip connection in the corresponding decoder layer.
+
+The encoder is fully configurable: the type, number, and parameters of the
+downsampling blocks are described by an ``EncoderConfig`` object containing
+an ordered list of block configuration objects (see ``batdetect2.models.blocks``
+for available block types).
+
+``Encoder.forward`` returns the outputs of *all* encoder layers as a list,
+so that skip connections are available to the decoder.
+``Encoder.encode`` returns only the final output (the input to the bottleneck).
+
+A default configuration ``DEFAULT_ENCODER_CONFIG`` is provided and used by
+``build_encoder`` when no explicit configuration is supplied.
+"""
+
+from typing import Annotated, List
+
+import torch
+from pydantic import Field
+from torch import nn
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.models.blocks import (
+ ConvConfig,
+ FreqCoordConvDownConfig,
+ LayerGroupConfig,
+ StandardConvDownConfig,
+ build_layer,
+)
+
+__all__ = [
+ "EncoderConfig",
+ "Encoder",
+ "build_encoder",
+ "DEFAULT_ENCODER_CONFIG",
+]
+
+EncoderLayerConfig = Annotated[
+ ConvConfig
+ | FreqCoordConvDownConfig
+ | StandardConvDownConfig
+ | LayerGroupConfig,
+ Field(discriminator="name"),
+]
+"""Type alias for the discriminated union of block configs usable in Encoder."""
+
+
+class EncoderConfig(BaseConfig):
+ """Configuration for the sequential ``Encoder`` module.
+
+ Attributes
+ ----------
+ layers : List[EncoderLayerConfig]
+ Ordered list of block configuration objects defining the encoder's
+ downsampling stages. Each entry specifies the block type (via its
+ ``name`` field) and any block-specific parameters such as
+ ``out_channels``. Input channels for each block are inferred
+ automatically from the output of the previous block. Must contain
+ at least one entry.
+ """
+
+ layers: List[EncoderLayerConfig] = Field(min_length=1)
+
+
+class Encoder(nn.Module):
+ """Sequential encoder module composed of configurable downsampling layers.
+
+ Executes a series of downsampling blocks in order, storing the output of
+ each block so that it can be passed as a skip connection to the
+ corresponding decoder layer.
+
+ ``forward`` returns the outputs of *all* layers (useful when skip
+ connections are needed). ``encode`` returns only the final output
+ (the input to the bottleneck).
+
+ Attributes
+ ----------
+ in_channels : int
+ Number of channels expected in the input tensor.
+ input_height : int
+ Height (frequency bins) expected in the input tensor.
+ out_channels : int
+ Number of channels in the final output tensor (bottleneck input).
+ output_height : int
+ Height (frequency bins) of the final output tensor.
+ layers : nn.ModuleList
+ Sequence of instantiated downsampling block modules.
+ depth : int
+ Number of downsampling layers.
+ """
+
+ def __init__(
+ self,
+ output_channels: int,
+ output_height: int,
+ layers: List[nn.Module],
+ input_height: int = 128,
+ in_channels: int = 1,
+ ):
+ """Initialise the Encoder module.
+
+ This constructor is typically called by the ``build_encoder`` factory
+ function, which takes care of building the ``layers`` list from a
+ configuration object.
+
+ Parameters
+ ----------
+ output_channels : int
+ Number of channels produced by the final layer.
+ output_height : int
+ Height of the output tensor after all layers have been applied.
+ layers : List[nn.Module]
+ Pre-built downsampling block modules in execution order.
+ input_height : int, default=128
+ Expected height of the input tensor (frequency bins).
+ in_channels : int, default=1
+ Expected number of channels in the input tensor.
+ """
+ super().__init__()
+
+ self.in_channels = in_channels
+ self.input_height = input_height
+ self.out_channels = output_channels
+ self.output_height = output_height
+
+ self.layers = nn.ModuleList(layers)
+ self.depth = len(self.layers)
+
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
+ """Pass input through all encoder layers and return every output.
+
+ Used when skip connections are needed (e.g. in a U-Net decoder).
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input spectrogram feature map, shape ``(B, C_in, H_in, W)``.
+ ``C_in`` must match ``self.in_channels`` and ``H_in`` must
+ match ``self.input_height``.
+
+ Returns
+ -------
+ List[torch.Tensor]
+ Output tensors from every layer in order.
+ ``outputs[0]`` is the output of the first (shallowest) layer;
+ ``outputs[-1]`` is the output of the last (deepest) layer,
+ which serves as the input to the bottleneck.
+
+ Raises
+ ------
+ ValueError
+ If the input channel count or height does not match the
+ expected values.
+ """
+ if x.shape[1] != self.in_channels:
+ raise ValueError(
+ f"Input tensor has {x.shape[1]} channels, "
+ f"but encoder expects {self.in_channels}."
+ )
+
+ if x.shape[2] != self.input_height:
+ raise ValueError(
+ f"Input tensor height {x.shape[2]} does not match "
+ f"encoder expected input_height {self.input_height}."
+ )
+
+ outputs = []
+
+ for layer in self.layers:
+ x = layer(x)
+ outputs.append(x)
+
+ return outputs
+
+ def encode(self, x: torch.Tensor) -> torch.Tensor:
+ """Pass input through all encoder layers and return only the final output.
+
+ Use this when skip connections are not needed and you only require
+ the bottleneck feature map.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input spectrogram feature map, shape ``(B, C_in, H_in, W)``.
+ Must satisfy the same shape requirements as ``forward``.
+
+ Returns
+ -------
+ torch.Tensor
+ Output of the last encoder layer, shape
+ ``(B, C_out, H_out, W)``, where ``C_out`` is
+ ``self.out_channels`` and ``H_out`` is ``self.output_height``.
+
+ Raises
+ ------
+ ValueError
+ If the input channel count or height does not match the
+ expected values.
+ """
+ if x.shape[1] != self.in_channels:
+ raise ValueError(
+ f"Input tensor has {x.shape[1]} channels, "
+ f"but encoder expects {self.in_channels}."
+ )
+
+ if x.shape[2] != self.input_height:
+ raise ValueError(
+ f"Input tensor height {x.shape[2]} does not match "
+ f"encoder expected input_height {self.input_height}."
+ )
+
+ for layer in self.layers:
+ x = layer(x)
+
+ return x
+
+
+DEFAULT_ENCODER_CONFIG: EncoderConfig = EncoderConfig(
+ layers=[
+ FreqCoordConvDownConfig(out_channels=32),
+ FreqCoordConvDownConfig(out_channels=64),
+ LayerGroupConfig(
+ layers=[
+ FreqCoordConvDownConfig(out_channels=128),
+ ConvConfig(out_channels=256),
+ ]
+ ),
+ ],
+)
+"""Default encoder configuration used in standard BatDetect2 models.
+
+Assumes a 1-channel input with 128 frequency bins and produces the
+following feature maps:
+
+- Stage 1 (``FreqCoordConvDown``): 32 channels, height 64.
+- Stage 2 (``FreqCoordConvDown``): 64 channels, height 32.
+- Stage 3 (``LayerGroup``):
+
+ - ``FreqCoordConvDown``: 128 channels, height 16.
+ - ``ConvBlock``: 256 channels, height 16 (bottleneck input).
+"""
+
+
+def build_encoder(
+ in_channels: int,
+ input_height: int,
+ config: EncoderConfig | None = None,
+) -> Encoder:
+ """Build an ``Encoder`` from configuration.
+
+ Constructs a sequential ``Encoder`` by iterating over the block
+ configurations in ``config.layers``, building each block with
+ ``build_layer``, and tracking the channel count and feature-map height
+ as they change through the sequence.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the input spectrogram tensor. Must be
+ positive.
+ input_height : int
+ Height (number of frequency bins) of the input spectrogram.
+ Must be positive and should be divisible by
+ ``2 ** (number of downsampling stages)`` to avoid size mismatches
+ later in the network.
+ config : EncoderConfig, optional
+ Configuration specifying the layer sequence. Defaults to
+ ``DEFAULT_ENCODER_CONFIG`` if not provided.
+
+ Returns
+ -------
+ Encoder
+ An initialised ``Encoder`` module.
+
+ Raises
+ ------
+ ValueError
+ If ``in_channels`` or ``input_height`` are not positive.
+ KeyError
+ If a layer configuration specifies an unknown block type.
+ """
+ if in_channels <= 0 or input_height <= 0:
+ raise ValueError("in_channels and input_height must be positive.")
+
+ config = config or DEFAULT_ENCODER_CONFIG
+
+ current_channels = in_channels
+ current_height = input_height
+
+ layers = []
+
+ for layer_config in config.layers:
+ layer = build_layer(
+ in_channels=current_channels,
+ input_height=current_height,
+ config=layer_config,
+ )
+ layers.append(layer)
+ current_height = layer.get_output_height(current_height)
+ current_channels = layer.out_channels
+
+ return Encoder(
+ input_height=input_height,
+ layers=layers,
+ in_channels=in_channels,
+ output_channels=current_channels,
+ output_height=current_height,
+ )
diff --git a/src/batdetect2/models/heads.py b/src/batdetect2/models/heads.py
new file mode 100644
index 00000000..250ddb6f
--- /dev/null
+++ b/src/batdetect2/models/heads.py
@@ -0,0 +1,200 @@
+"""Prediction heads attached to the backbone feature map.
+
+Each head is a lightweight ``torch.nn.Module`` that applies a 1×1
+convolution to map backbone feature channels to one specific type of
+output required by BatDetect2:
+
+- ``DetectorHead``: single-channel detection probability heatmap (sigmoid
+ activation).
+- ``ClassifierHead``: multi-class probability map over the target bat
+ species / call types (softmax activation).
+- ``BBoxHead``: two-channel map of predicted call duration (time axis) and
+ bandwidth (frequency axis) at each location (no activation; raw
+ regression output).
+
+All three heads share the same input feature map produced by the backbone,
+so they can be evaluated in parallel in a single forward pass.
+"""
+
+import torch
+from torch import nn
+
+__all__ = [
+ "ClassifierHead",
+ "DetectorHead",
+ "BBoxHead",
+]
+
+
+class ClassifierHead(nn.Module):
+ """Prediction head for species / call-type classification probabilities.
+
+ Takes a backbone feature map and produces a probability map where each
+ channel corresponds to a target class. Internally the 1×1 convolution
+ maps ``in_channels`` to ``num_classes + 1`` logits (the extra channel
+ represents a generic background / unknown category); a softmax is then
+ applied across the channel dimension and the background channel is
+ discarded before returning.
+
+ Parameters
+ ----------
+ num_classes : int
+ Number of target classes (bat species or call types) to predict,
+ excluding the background category. Must be positive.
+ in_channels : int
+ Number of channels in the backbone feature map. Must be positive.
+
+ Attributes
+ ----------
+ num_classes : int
+ Number of specific output classes (background excluded).
+ in_channels : int
+ Number of input channels expected.
+ classifier : nn.Conv2d
+ 1×1 convolution with ``num_classes + 1`` output channels.
+ """
+
+ def __init__(self, class_names: list[str], in_channels: int):
+ """Initialise the ClassifierHead."""
+ super().__init__()
+
+ self.class_names = class_names
+ self.num_classes = len(class_names)
+ self.in_channels = in_channels
+
+ self.classifier = nn.Conv2d(
+ self.in_channels,
+ self.num_classes + 1,
+ kernel_size=1,
+ padding=0,
+ )
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ """Compute per-class probabilities from backbone features.
+
+ Parameters
+ ----------
+ features : torch.Tensor
+ Backbone feature map, shape ``(B, C_in, H, W)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Class probability map, shape ``(B, num_classes, H, W)``.
+ Values are softmax probabilities in the range [0, 1] and
+ sum to less than 1 per location (the background probability
+ is discarded).
+ """
+ logits = self.classifier(features)
+ probs = torch.softmax(logits, dim=1)
+ return probs[:, :-1]
+
+
+class DetectorHead(nn.Module):
+ """Prediction head for detection probability (is a call present here?).
+
+ Produces a single-channel heatmap where each value indicates the
+ probability ([0, 1]) that a bat call of *any* species is present at
+ that time–frequency location in the spectrogram.
+
+ Applies a 1×1 convolution mapping ``in_channels`` → 1, followed by
+ sigmoid activation.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the backbone feature map. Must be positive.
+
+ Attributes
+ ----------
+ in_channels : int
+ Number of input channels expected.
+ detector : nn.Conv2d
+ 1×1 convolution with a single output channel.
+ """
+
+ def __init__(self, in_channels: int):
+ """Initialise the DetectorHead."""
+ super().__init__()
+ self.in_channels = in_channels
+
+ self.detector = nn.Conv2d(
+ in_channels=self.in_channels,
+ out_channels=1,
+ kernel_size=1,
+ padding=0,
+ )
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ """Compute detection probabilities from backbone features.
+
+ Parameters
+ ----------
+ features : torch.Tensor
+ Backbone feature map, shape ``(B, C_in, H, W)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Detection probability heatmap, shape ``(B, 1, H, W)``.
+ Values are in the range [0, 1].
+ """
+ return torch.sigmoid(self.detector(features))
+
+
+class BBoxHead(nn.Module):
+ """Prediction head for bounding box size (duration and bandwidth).
+
+ Produces a two-channel map where channel 0 predicts the scaled duration
+ (time-axis extent) and channel 1 predicts the scaled bandwidth
+ (frequency-axis extent) of the call at each spectrogram location.
+
+ Applies a 1×1 convolution mapping ``in_channels`` → 2 with no
+ activation function (raw regression output). The predicted values are
+ in a scaled space and must be converted to real units (seconds and Hz)
+ during postprocessing.
+
+ Parameters
+ ----------
+ in_channels : int
+ Number of channels in the backbone feature map. Must be positive.
+
+ Attributes
+ ----------
+ in_channels : int
+ Number of input channels expected.
+ bbox : nn.Conv2d
+ 1×1 convolution with 2 output channels (duration, bandwidth).
+ """
+
+ def __init__(self, dimension_names: list[str], in_channels: int):
+ """Initialise the BBoxHead."""
+ super().__init__()
+ self.in_channels = in_channels
+ self.dimension_names = dimension_names
+ self.num_sizes = len(dimension_names)
+
+ self.bbox = nn.Conv2d(
+ in_channels=self.in_channels,
+ out_channels=self.num_sizes,
+ kernel_size=1,
+ padding=0,
+ )
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ """Predict call duration and bandwidth from backbone features.
+
+ Parameters
+ ----------
+ features : torch.Tensor
+ Backbone feature map, shape ``(B, C_in, H, W)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Size prediction tensor, shape ``(B, 2, H, W)``. Channel 0 is
+ the predicted scaled duration; channel 1 is the predicted
+ scaled bandwidth. Values must be rescaled to real units during
+ postprocessing.
+ """
+ return self.bbox(features)
diff --git a/src/batdetect2/models/types.py b/src/batdetect2/models/types.py
new file mode 100644
index 00000000..ca2f236d
--- /dev/null
+++ b/src/batdetect2/models/types.py
@@ -0,0 +1,125 @@
+from typing import TYPE_CHECKING, Any, NamedTuple, Protocol
+
+import torch
+
+if TYPE_CHECKING:
+ from batdetect2.postprocess.types import PostprocessorProtocol
+ from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "BackboneProtocol",
+ "BlockProtocol",
+ "BottleneckProtocol",
+ "ClassifierHeadProtocol",
+ "DecoderProtocol",
+ "DetectorProtocol",
+ "EncoderProtocol",
+ "ModelOutput",
+ "ModelProtocol",
+ "ModuleProtocol",
+ "SizeHeadProtocol",
+]
+
+
+class ModuleProtocol(Protocol):
+ def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
+
+ def train(self, mode: bool = True) -> torch.nn.Module: ...
+
+ def eval(self) -> torch.nn.Module: ...
+
+ def state_dict(
+ self, *args: Any, **kwargs: Any
+ ) -> dict[str, torch.Tensor]: ...
+
+ def load_state_dict(self, *args: Any, **kwargs: Any) -> Any: ...
+
+ def parameters(self) -> Any: ...
+
+
+class BlockProtocol(ModuleProtocol, Protocol):
+ in_channels: int
+ out_channels: int
+
+ def __call__(self, x: torch.Tensor) -> torch.Tensor: ...
+
+ def get_output_height(self, input_height: int) -> int: ...
+
+
+class EncoderProtocol(ModuleProtocol, Protocol):
+ in_channels: int
+ out_channels: int
+ input_height: int
+ output_height: int
+
+ def __call__(self, x: torch.Tensor) -> list[torch.Tensor]: ...
+
+
+class BottleneckProtocol(ModuleProtocol, Protocol):
+ in_channels: int
+ out_channels: int
+ input_height: int
+
+ def __call__(self, x: torch.Tensor) -> torch.Tensor: ...
+
+
+class DecoderProtocol(ModuleProtocol, Protocol):
+ in_channels: int
+ out_channels: int
+ input_height: int
+ output_height: int
+ depth: int
+
+ def __call__(
+ self,
+ x: torch.Tensor,
+ residuals: list[torch.Tensor],
+ ) -> torch.Tensor: ...
+
+
+class ModelOutput(NamedTuple):
+ detection_probs: torch.Tensor
+ size_preds: torch.Tensor
+ class_probs: torch.Tensor
+ features: torch.Tensor
+
+
+class BackboneProtocol(ModuleProtocol, Protocol):
+ input_height: int
+ out_channels: int
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor: ...
+
+
+class ClassifierHeadProtocol(ModuleProtocol, Protocol):
+ num_classes: int
+ in_channels: int
+ class_names: list[str]
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor: ...
+
+
+class SizeHeadProtocol(ModuleProtocol, Protocol):
+ in_channels: int
+ num_sizes: int
+ dimension_names: list[str]
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor: ...
+
+
+class DetectorProtocol(ModuleProtocol, Protocol):
+ backbone: BackboneProtocol
+ classifier_head: ClassifierHeadProtocol
+ size_head: SizeHeadProtocol
+
+ def forward(self, spec: torch.Tensor) -> ModelOutput: ...
+
+
+class ModelProtocol(ModuleProtocol, Protocol):
+ detector: DetectorProtocol
+ preprocessor: "PreprocessorProtocol"
+ postprocessor: "PostprocessorProtocol"
+ class_names: list[str]
+ dimension_names: list[str]
+
+ def get_config(self) -> dict[str, Any]: ...
diff --git a/src/batdetect2/outputs/__init__.py b/src/batdetect2/outputs/__init__.py
new file mode 100644
index 00000000..68fae45a
--- /dev/null
+++ b/src/batdetect2/outputs/__init__.py
@@ -0,0 +1,35 @@
+from batdetect2.outputs.config import OutputsConfig
+from batdetect2.outputs.formats import (
+ BatDetect2OutputConfig,
+ OutputFormatConfig,
+ ParquetOutputConfig,
+ RawOutputConfig,
+ SoundEventOutputConfig,
+ build_output_formatter,
+ get_output_formatter,
+ load_predictions,
+)
+from batdetect2.outputs.transforms import (
+ OutputTransformConfig,
+ build_output_transform,
+)
+from batdetect2.outputs.types import (
+ OutputFormatterProtocol,
+ OutputTransformProtocol,
+)
+
+__all__ = [
+ "BatDetect2OutputConfig",
+ "OutputFormatConfig",
+ "OutputFormatterProtocol",
+ "OutputTransformConfig",
+ "OutputTransformProtocol",
+ "OutputsConfig",
+ "ParquetOutputConfig",
+ "RawOutputConfig",
+ "SoundEventOutputConfig",
+ "build_output_formatter",
+ "build_output_transform",
+ "get_output_formatter",
+ "load_predictions",
+]
diff --git a/src/batdetect2/outputs/config.py b/src/batdetect2/outputs/config.py
new file mode 100644
index 00000000..be3a78b6
--- /dev/null
+++ b/src/batdetect2/outputs/config.py
@@ -0,0 +1,15 @@
+from pydantic import Field
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.outputs.formats import OutputFormatConfig
+from batdetect2.outputs.formats.raw import RawOutputConfig
+from batdetect2.outputs.transforms import OutputTransformConfig
+
+__all__ = ["OutputsConfig"]
+
+
+class OutputsConfig(BaseConfig):
+ format: OutputFormatConfig = Field(default_factory=RawOutputConfig)
+ transform: OutputTransformConfig = Field(
+ default_factory=OutputTransformConfig
+ )
diff --git a/src/batdetect2/outputs/formats/__init__.py b/src/batdetect2/outputs/formats/__init__.py
new file mode 100644
index 00000000..0244b835
--- /dev/null
+++ b/src/batdetect2/outputs/formats/__init__.py
@@ -0,0 +1,83 @@
+from typing import Annotated
+
+from pydantic import Field
+from soundevent.data import PathLike
+
+from batdetect2.outputs.formats.base import (
+ OutputFormatterProtocol,
+ output_formatters,
+)
+from batdetect2.outputs.formats.batdetect2 import BatDetect2OutputConfig
+from batdetect2.outputs.formats.parquet import ParquetOutputConfig
+from batdetect2.outputs.formats.raw import RawOutputConfig
+from batdetect2.outputs.formats.soundevent import SoundEventOutputConfig
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "BatDetect2OutputConfig",
+ "OutputFormatConfig",
+ "ParquetOutputConfig",
+ "RawOutputConfig",
+ "SoundEventOutputConfig",
+ "build_output_formatter",
+ "get_output_formatter",
+ "load_predictions",
+]
+
+
+OutputFormatConfig = Annotated[
+ BatDetect2OutputConfig
+ | ParquetOutputConfig
+ | SoundEventOutputConfig
+ | RawOutputConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_output_formatter(
+ targets: TargetProtocol | None = None,
+ config: OutputFormatConfig | None = None,
+) -> OutputFormatterProtocol:
+ """Construct the final output formatter."""
+ from batdetect2.targets import build_targets
+
+ config = config or RawOutputConfig()
+
+ targets = targets or build_targets()
+ return output_formatters.build(config, targets)
+
+
+def get_output_formatter(
+ name: str | None = None,
+ targets: TargetProtocol | None = None,
+ config: OutputFormatConfig | None = None,
+) -> OutputFormatterProtocol:
+ """Get the output formatter by name."""
+
+ if config is None:
+ if name is None:
+ raise ValueError("Either config or name must be provided.")
+
+ config_class = output_formatters.get_config_type(name)
+ config = config_class() # type: ignore
+
+ if config.name != name: # type: ignore
+ raise ValueError(
+ f"Config name {config.name} does not match formatter name {name}" # type: ignore
+ )
+
+ return build_output_formatter(targets, config)
+
+
+def load_predictions(
+ path: PathLike,
+ format: str | None = "raw",
+ config: OutputFormatConfig | None = None,
+ targets: TargetProtocol | None = None,
+):
+ """Load predictions from a file."""
+ from batdetect2.targets import build_targets
+
+ targets = targets or build_targets()
+ formatter = get_output_formatter(format, targets, config)
+ return formatter.load(path)
diff --git a/src/batdetect2/outputs/formats/base.py b/src/batdetect2/outputs/formats/base.py
new file mode 100644
index 00000000..12c12bdf
--- /dev/null
+++ b/src/batdetect2/outputs/formats/base.py
@@ -0,0 +1,50 @@
+from pathlib import Path
+from typing import Literal
+
+from soundevent.data import PathLike
+
+from batdetect2.core import ImportConfig, Registry, add_import_config
+from batdetect2.outputs.types import OutputFormatterProtocol
+from batdetect2.targets.types import TargetProtocol
+
+__all__ = [
+ "OutputFormatterProtocol",
+ "PredictionFormatterImportConfig",
+ "make_path_relative",
+ "output_formatters",
+]
+
+
+def make_path_relative(path: PathLike, audio_dir: PathLike) -> Path:
+ path = Path(path)
+ audio_dir = Path(audio_dir)
+
+ if path.is_absolute():
+ if not path.is_relative_to(audio_dir):
+ raise ValueError(
+ f"Audio file {path} is not in audio_dir {audio_dir}"
+ )
+
+ return path.relative_to(audio_dir)
+
+ audio_parts = audio_dir.parts
+ if audio_parts and path.parts[: len(audio_parts)] == audio_parts:
+ return Path(*path.parts[len(audio_parts) :])
+
+ return path
+
+
+output_formatters: Registry[OutputFormatterProtocol, [TargetProtocol]] = (
+ Registry(name="output_formatter")
+)
+
+
+@add_import_config(output_formatters)
+class PredictionFormatterImportConfig(ImportConfig):
+ """Use any callable as a prediction formatter.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
diff --git a/src/batdetect2/outputs/formats/batdetect2.py b/src/batdetect2/outputs/formats/batdetect2.py
new file mode 100644
index 00000000..d913ea62
--- /dev/null
+++ b/src/batdetect2/outputs/formats/batdetect2.py
@@ -0,0 +1,351 @@
+import json
+from pathlib import Path
+from typing import List, Literal, Sequence, TypedDict, cast
+
+import numpy as np
+import pandas as pd
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.core import BaseConfig
+from batdetect2.outputs.formats.base import (
+ make_path_relative,
+ output_formatters,
+)
+from batdetect2.outputs.types import OutputFormatterProtocol
+from batdetect2.postprocess.types import ClipDetections, Detection
+from batdetect2.targets.types import TargetProtocol
+
+try:
+ from typing import NotRequired # type: ignore
+except ImportError:
+ from typing_extensions import NotRequired
+
+DictWithClass = TypedDict("DictWithClass", {"class": str})
+
+
+class Annotation(DictWithClass, total=False):
+ start_time: float
+ end_time: float
+ low_freq: float
+ high_freq: float
+ class_prob: float
+ det_prob: float
+ individual: str
+ event: str
+ cnn_features: NotRequired[list[float]] # ty: ignore[invalid-type-form]
+
+
+class FileAnnotation(TypedDict):
+ id: str
+ annotated: bool
+ duration: float
+ issues: bool
+ time_exp: float
+ class_name: str
+ notes: str
+ annotation: List[Annotation]
+ file_path: NotRequired[str] # ty: ignore[invalid-type-form]
+
+
+class BatDetect2OutputConfig(BaseConfig):
+ name: Literal["batdetect2"] = "batdetect2"
+
+ event_name: str = "Echolocation"
+ annotation_note: str = "Automatically generated."
+ class_label_mode: Literal["class_name", "decoded_tag"] = "decoded_tag"
+ decoded_label_key: str = "dwc:scientificName"
+ fallback_to_class_name: bool = True
+ write_detection_csv: bool = True
+ write_cnn_features_csv: bool = False
+ save_if_empty: bool = False
+ preserve_audio_tree: bool = True
+ include_file_path: bool = False
+
+
+class BatDetect2Formatter(OutputFormatterProtocol[FileAnnotation]):
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ event_name: str,
+ annotation_note: str,
+ class_label_mode: Literal["class_name", "decoded_tag"] = "decoded_tag",
+ decoded_label_key: str = "dwc:scientificName",
+ fallback_to_class_name: bool = True,
+ write_detection_csv: bool = True,
+ write_cnn_features_csv: bool = False,
+ save_if_empty: bool = False,
+ preserve_audio_tree: bool = True,
+ include_file_path: bool = False,
+ ):
+ self.targets = targets
+ self.event_name = event_name
+ self.annotation_note = annotation_note
+ self.class_label_mode = class_label_mode
+ self.decoded_label_key = decoded_label_key
+ self.fallback_to_class_name = fallback_to_class_name
+ self.write_detection_csv = write_detection_csv
+ self.write_cnn_features_csv = write_cnn_features_csv
+ self.save_if_empty = save_if_empty
+ self.preserve_audio_tree = preserve_audio_tree
+ self.include_file_path = include_file_path
+
+ def format(
+ self, predictions: Sequence[ClipDetections]
+ ) -> List[FileAnnotation]:
+ return [
+ self.format_prediction(prediction) for prediction in predictions
+ ]
+
+ def save(
+ self,
+ predictions: Sequence[FileAnnotation],
+ path: data.PathLike,
+ audio_dir: data.PathLike | None = None,
+ ) -> None:
+ path = Path(path)
+
+ if not path.is_dir():
+ path.mkdir(parents=True)
+
+ for prediction in predictions:
+ annotations = prediction["annotation"]
+
+ if not annotations and not self.save_if_empty:
+ continue
+
+ pred_path = self.get_output_path(prediction, path, audio_dir)
+ pred_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # make a copy of the prediction
+ data = dict(prediction)
+
+ raw_file_path = data.get("file_path")
+ if audio_dir is not None and isinstance(raw_file_path, str):
+ data["file_path"] = str(
+ make_path_relative(raw_file_path, audio_dir)
+ )
+
+ if not self.include_file_path:
+ data.pop("file_path", None)
+
+ annotations = cast(list[Annotation], data["annotation"])
+ data["annotation"] = [
+ {
+ key: value
+ for key, value in annotation.items()
+ if key != "cnn_features"
+ }
+ for annotation in annotations
+ ]
+
+ pred_path.write_text(json.dumps(data, indent=2, sort_keys=True))
+
+ if self.write_detection_csv:
+ self.save_detection_csv(
+ prediction,
+ pred_path.with_suffix(".csv"),
+ )
+
+ if self.write_cnn_features_csv:
+ self.save_cnn_features_csv(
+ prediction,
+ pred_path.with_name(pred_path.stem + "_cnn_features.csv"),
+ )
+
+ def load(self, path: data.PathLike) -> List[FileAnnotation]:
+ path = Path(path)
+
+ if path.is_file():
+ files = [path] if path.suffix == ".json" else []
+ else:
+ files = sorted(path.rglob("*.json"))
+
+ if not files:
+ return []
+
+ return [
+ json.loads(file.read_text()) for file in files if file.is_file()
+ ]
+
+ def get_output_path(
+ self,
+ prediction: FileAnnotation,
+ output_dir: Path,
+ audio_dir: data.PathLike | None,
+ ) -> Path:
+ if (
+ self.preserve_audio_tree
+ and audio_dir is not None
+ and "file_path" in prediction
+ ):
+ relative_path = make_path_relative(
+ prediction["file_path"],
+ audio_dir,
+ )
+ return (
+ output_dir / relative_path.parent / f"{prediction['id']}.json"
+ )
+
+ return output_dir / f"{prediction['id']}.json"
+
+ def save_detection_csv(
+ self,
+ prediction: FileAnnotation,
+ path: Path,
+ ) -> None:
+ annotations = prediction["annotation"]
+ if not annotations:
+ return
+
+ preds_df = pd.DataFrame(annotations)[
+ [
+ "det_prob",
+ "start_time",
+ "end_time",
+ "high_freq",
+ "low_freq",
+ "class",
+ "class_prob",
+ ]
+ ]
+ preds_df.to_csv(path, sep=",")
+
+ def save_cnn_features_csv(
+ self, prediction: FileAnnotation, path: Path
+ ) -> None:
+ annotations = prediction["annotation"]
+
+ if not annotations:
+ return
+
+ cnn_features = [
+ annotation["cnn_features"]
+ for annotation in annotations
+ if "cnn_features" in annotation
+ ]
+
+ if not cnn_features:
+ return
+
+ cnn_feats_df = pd.DataFrame(
+ cnn_features,
+ columns=[str(ii) for ii in range(len(cnn_features[0]))],
+ )
+
+ cnn_feats_df.to_csv(
+ path,
+ sep=",",
+ index=False,
+ float_format="%.5f",
+ )
+
+ def get_class_name(self, class_index: int) -> str:
+ class_name = self.targets.class_names[class_index]
+
+ if self.class_label_mode == "class_name":
+ return class_name
+
+ tags = self.targets.decode_class(class_name)
+ default = class_name if self.fallback_to_class_name else None
+ decoded = data.find_tag_value(
+ tags,
+ key=self.decoded_label_key,
+ default=default,
+ )
+
+ if decoded is None:
+ raise ValueError(
+ "Could not decode class label using key "
+ f"{self.decoded_label_key!r} for class {class_name!r}."
+ )
+
+ return decoded
+
+ def get_recording_class(self, detections: Sequence[Detection]) -> str:
+ if not detections:
+ return "None"
+
+ class_scores = np.stack(
+ [detection.class_scores for detection in detections],
+ axis=1,
+ )
+ detection_scores = np.array(
+ [detection.detection_score for detection in detections],
+ dtype=np.float32,
+ )
+ weighted_scores = (class_scores * detection_scores).sum(axis=1)
+
+ total = weighted_scores.sum()
+
+ if total <= 0:
+ return "None"
+
+ top_class_index = int(np.argmax(weighted_scores / total))
+ return self.get_class_name(top_class_index)
+
+ def format_prediction(self, prediction: ClipDetections) -> FileAnnotation:
+ recording = prediction.clip.recording
+
+ annotations = [
+ self.format_sound_event_prediction(pred)
+ for pred in prediction.detections
+ ]
+
+ file_annotation = FileAnnotation(
+ id=recording.path.name,
+ annotated=False,
+ duration=round(float(recording.duration), 4),
+ issues=False,
+ time_exp=recording.time_expansion,
+ class_name=self.get_recording_class(prediction.detections),
+ notes=self.annotation_note,
+ annotation=annotations,
+ file_path=str(recording.path),
+ )
+
+ return file_annotation
+
+ def format_sound_event_prediction(
+ self, prediction: Detection
+ ) -> Annotation:
+ start_time, low_freq, end_time, high_freq = compute_bounds(
+ prediction.geometry
+ )
+
+ top_class_index = int(np.argmax(prediction.class_scores))
+ top_class_score = float(prediction.class_scores[top_class_index])
+ top_class = self.get_class_name(top_class_index)
+ annotation: Annotation = {
+ "start_time": round(float(start_time), 4),
+ "end_time": round(float(end_time), 4),
+ "low_freq": int(low_freq),
+ "high_freq": int(high_freq),
+ "class_prob": round(top_class_score, 3),
+ "det_prob": round(float(prediction.detection_score), 3),
+ "individual": "-1",
+ "event": self.event_name,
+ "class": top_class,
+ }
+
+ if self.write_cnn_features_csv:
+ annotation["cnn_features"] = prediction.features.tolist() # type: ignore[index]
+
+ return annotation
+
+ @output_formatters.register(BatDetect2OutputConfig)
+ @staticmethod
+ def from_config(config: BatDetect2OutputConfig, targets: TargetProtocol):
+ return BatDetect2Formatter(
+ targets,
+ event_name=config.event_name,
+ annotation_note=config.annotation_note,
+ class_label_mode=config.class_label_mode,
+ decoded_label_key=config.decoded_label_key,
+ fallback_to_class_name=config.fallback_to_class_name,
+ write_detection_csv=config.write_detection_csv,
+ write_cnn_features_csv=config.write_cnn_features_csv,
+ save_if_empty=config.save_if_empty,
+ preserve_audio_tree=config.preserve_audio_tree,
+ include_file_path=config.include_file_path,
+ )
diff --git a/src/batdetect2/outputs/formats/parquet.py b/src/batdetect2/outputs/formats/parquet.py
new file mode 100644
index 00000000..909fa9d9
--- /dev/null
+++ b/src/batdetect2/outputs/formats/parquet.py
@@ -0,0 +1,188 @@
+from pathlib import Path
+from typing import List, Literal, Sequence
+from uuid import UUID
+
+import numpy as np
+import pandas as pd
+from loguru import logger
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.core import BaseConfig
+from batdetect2.outputs.formats.base import (
+ make_path_relative,
+ output_formatters,
+)
+from batdetect2.outputs.types import OutputFormatterProtocol
+from batdetect2.postprocess.types import ClipDetections, Detection
+from batdetect2.targets.types import TargetProtocol
+
+
+class ParquetOutputConfig(BaseConfig):
+ name: Literal["parquet"] = "parquet"
+
+ include_class_scores: bool = True
+ include_features: bool = True
+ include_geometry: bool = True
+
+
+class ParquetFormatter(OutputFormatterProtocol[ClipDetections]):
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ include_class_scores: bool = True,
+ include_features: bool = True,
+ include_geometry: bool = True,
+ ):
+ self.targets = targets
+ self.include_class_scores = include_class_scores
+ self.include_features = include_features
+ self.include_geometry = include_geometry
+
+ def format(
+ self,
+ predictions: Sequence[ClipDetections],
+ ) -> List[ClipDetections]:
+ return list(predictions)
+
+ def save(
+ self,
+ predictions: Sequence[ClipDetections],
+ path: data.PathLike,
+ audio_dir: data.PathLike | None = None,
+ ) -> None:
+ path = Path(path)
+
+ if not path.parent.exists():
+ path.parent.mkdir(parents=True)
+
+ if path.suffix != ".parquet":
+ if path.is_dir() or not path.suffix:
+ path = path / "predictions.parquet"
+
+ rows = []
+ for prediction in predictions:
+ clip = prediction.clip
+ recording = clip.recording
+
+ if audio_dir is not None:
+ recording = recording.model_copy(
+ update=dict(
+ path=make_path_relative(recording.path, audio_dir)
+ )
+ )
+
+ recording_json = recording.model_dump_json(exclude_none=True)
+
+ for pred in prediction.detections:
+ row = {
+ "clip_uuid": str(clip.uuid),
+ "clip_start_time": clip.start_time,
+ "clip_end_time": clip.end_time,
+ "recording_info": recording_json,
+ "detection_score": pred.detection_score,
+ }
+
+ if self.include_geometry:
+ start_time, low_freq, end_time, high_freq = compute_bounds(
+ pred.geometry
+ )
+ row["start_time"] = start_time
+ row["low_freq"] = low_freq
+ row["end_time"] = end_time
+ row["high_freq"] = high_freq
+ row["geometry"] = pred.geometry.model_dump_json()
+
+ if self.include_class_scores:
+ row["class_scores"] = pred.class_scores.tolist()
+
+ if self.include_features:
+ row["features"] = pred.features.tolist()
+
+ rows.append(row)
+
+ if not rows:
+ logger.warning("No predictions to save.")
+ return
+
+ df = pd.DataFrame(rows)
+ logger.info(f"Saving {len(df)} predictions to {path}")
+ df.to_parquet(path, index=False)
+
+ def load(self, path: data.PathLike) -> List[ClipDetections]:
+ path = Path(path)
+ if path.is_dir():
+ files = list(path.glob("*.parquet"))
+ if not files:
+ return []
+ dfs = [pd.read_parquet(f) for f in files]
+ df = pd.concat(dfs, ignore_index=True)
+ else:
+ df = pd.read_parquet(path)
+
+ predictions_by_clip = {}
+
+ for _, row in df.iterrows():
+ clip_uuid = row["clip_uuid"]
+
+ if clip_uuid not in predictions_by_clip:
+ recording = data.Recording.model_validate_json(
+ row["recording_info"]
+ )
+ clip = data.Clip(
+ uuid=UUID(clip_uuid),
+ recording=recording,
+ start_time=row["clip_start_time"],
+ end_time=row["clip_end_time"],
+ )
+ predictions_by_clip[clip_uuid] = {"clip": clip, "preds": []}
+
+ if "geometry" in row and row["geometry"]:
+ geometry = data.geometry_validate(row["geometry"])
+ else:
+ geometry = data.BoundingBox.model_construct(
+ coordinates=[
+ row["start_time"],
+ row["low_freq"],
+ row["end_time"],
+ row["high_freq"],
+ ]
+ )
+
+ class_scores = (
+ np.array(row["class_scores"])
+ if "class_scores" in row
+ else np.zeros(len(self.targets.class_names))
+ )
+ features = (
+ np.array(row["features"]) if "features" in row else np.zeros(0)
+ )
+
+ pred = Detection(
+ geometry=geometry,
+ detection_score=row["detection_score"],
+ class_scores=class_scores,
+ features=features,
+ )
+ predictions_by_clip[clip_uuid]["preds"].append(pred)
+
+ results = []
+ for clip_data in predictions_by_clip.values():
+ results.append(
+ ClipDetections(
+ clip=clip_data["clip"],
+ detections=clip_data["preds"],
+ )
+ )
+
+ return results
+
+ @output_formatters.register(ParquetOutputConfig)
+ @staticmethod
+ def from_config(config: ParquetOutputConfig, targets: TargetProtocol):
+ return ParquetFormatter(
+ targets,
+ include_class_scores=config.include_class_scores,
+ include_features=config.include_features,
+ include_geometry=config.include_geometry,
+ )
diff --git a/src/batdetect2/outputs/formats/raw.py b/src/batdetect2/outputs/formats/raw.py
new file mode 100644
index 00000000..c4150dfe
--- /dev/null
+++ b/src/batdetect2/outputs/formats/raw.py
@@ -0,0 +1,239 @@
+from collections import defaultdict
+from pathlib import Path
+from typing import List, Literal, Sequence
+from uuid import UUID, uuid4
+
+import numpy as np
+import xarray as xr
+from loguru import logger
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.core import BaseConfig
+from batdetect2.outputs.formats.base import (
+ make_path_relative,
+ output_formatters,
+)
+from batdetect2.outputs.types import OutputFormatterProtocol
+from batdetect2.postprocess.types import ClipDetections, Detection
+from batdetect2.targets.types import TargetProtocol
+
+
+class RawOutputConfig(BaseConfig):
+ name: Literal["raw"] = "raw"
+
+ include_class_scores: bool = True
+ include_features: bool = True
+ include_geometry: bool = True
+
+
+class RawFormatter(OutputFormatterProtocol[ClipDetections]):
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ include_class_scores: bool = True,
+ include_features: bool = True,
+ include_geometry: bool = True,
+ parse_full_geometry: bool = False,
+ ):
+ self.targets = targets
+ self.include_class_scores = include_class_scores
+ self.include_features = include_features
+ self.include_geometry = include_geometry
+ self.parse_full_geometry = parse_full_geometry
+
+ def format(
+ self,
+ predictions: Sequence[ClipDetections],
+ ) -> List[ClipDetections]:
+ return list(predictions)
+
+ def save(
+ self,
+ predictions: Sequence[ClipDetections],
+ path: data.PathLike,
+ audio_dir: data.PathLike | None = None,
+ ) -> None:
+ path = Path(path)
+
+ if not path.exists():
+ path.mkdir(parents=True)
+
+ for prediction in predictions:
+ logger.debug(f"Saving clip predictions {prediction.clip.uuid}")
+ clip = prediction.clip
+ dataset = self.pred_to_xr(prediction, audio_dir)
+ dataset.to_netcdf(path / f"{clip.uuid}.nc")
+
+ def load(self, path: data.PathLike) -> List[ClipDetections]:
+ path = Path(path)
+ files = list(path.glob("*.nc"))
+ predictions: List[ClipDetections] = []
+
+ for filepath in files:
+ logger.debug(f"Loading clip predictions {filepath}")
+ clip_data = xr.load_dataset(filepath)
+ prediction = self.pred_from_xr(clip_data)
+ predictions.append(prediction)
+
+ return predictions
+
+ def pred_to_xr(
+ self,
+ prediction: ClipDetections,
+ audio_dir: data.PathLike | None = None,
+ ) -> xr.Dataset:
+ clip = prediction.clip
+ recording = clip.recording
+ num_features = 0
+
+ if audio_dir is not None:
+ recording = recording.model_copy(
+ update=dict(path=make_path_relative(recording.path, audio_dir))
+ )
+
+ values = defaultdict(list)
+
+ for pred in prediction.detections:
+ detection_id = str(uuid4())
+
+ values["detection_id"].append(detection_id)
+ values["detection_score"].append(pred.detection_score)
+
+ start_time, low_freq, end_time, high_freq = compute_bounds(
+ pred.geometry
+ )
+
+ values["start_time"].append(start_time)
+ values["end_time"].append(end_time)
+ values["low_freq"].append(low_freq)
+ values["high_freq"].append(high_freq)
+
+ values["geometry"].append(pred.geometry.model_dump_json())
+
+ top_class_index = int(np.argmax(pred.class_scores))
+ top_class_score = float(pred.class_scores[top_class_index])
+ top_class = self.targets.class_names[top_class_index]
+
+ values["top_class"].append(top_class)
+ values["top_class_score"].append(top_class_score)
+
+ values["class_scores"].append(pred.class_scores)
+ values["features"].append(pred.features)
+
+ num_features = len(pred.features)
+
+ data_vars = {
+ "score": (["detection"], values["detection_score"]),
+ "start_time": (["detection"], values["start_time"]),
+ "end_time": (["detection"], values["end_time"]),
+ "low_freq": (["detection"], values["low_freq"]),
+ "high_freq": (["detection"], values["high_freq"]),
+ "top_class": (["detection"], values["top_class"]),
+ "top_class_score": (["detection"], values["top_class_score"]),
+ }
+
+ coords = {
+ "detection": ("detection", values["detection_id"]),
+ "clip_start": clip.start_time,
+ "clip_end": clip.end_time,
+ "clip_id": str(clip.uuid),
+ }
+
+ if self.include_class_scores:
+ class_scores = np.stack(values["class_scores"], axis=0)
+ data_vars["class_scores"] = (
+ ["detection", "classes"],
+ class_scores,
+ )
+ coords["classes"] = ("classes", self.targets.class_names)
+
+ if self.include_features:
+ features = np.stack(values["features"], axis=0)
+ data_vars["features"] = (["detection", "feature"], features)
+ coords["feature"] = ("feature", np.arange(num_features))
+
+ if self.include_geometry:
+ data_vars["geometry"] = (["detection"], values["geometry"])
+
+ return xr.Dataset(
+ data_vars=data_vars,
+ coords=coords,
+ attrs={
+ "recording": recording.model_dump_json(exclude_none=True),
+ },
+ )
+
+ def pred_from_xr(self, dataset: xr.Dataset) -> ClipDetections:
+ clip_data = dataset
+
+ recording = data.Recording.model_validate_json(
+ clip_data.attrs["recording"]
+ )
+
+ clip_id = clip_data.clip_id.item()
+ clip = data.Clip(
+ recording=recording,
+ uuid=UUID(clip_id),
+ start_time=clip_data.clip_start,
+ end_time=clip_data.clip_end,
+ )
+
+ sound_events = []
+
+ for detection in clip_data.coords["detection"]:
+ detection_data = clip_data.sel(detection=detection)
+ score = detection_data.score.item()
+
+ if "geometry" in clip_data and self.parse_full_geometry:
+ geometry = data.geometry_validate(
+ detection_data.geometry.item()
+ )
+ else:
+ start_time = detection_data.start_time.item()
+ end_time = detection_data.end_time.item()
+ low_freq = detection_data.low_freq.item()
+ high_freq = detection_data.high_freq.item()
+ geometry = data.BoundingBox.model_construct(
+ coordinates=[start_time, low_freq, end_time, high_freq]
+ )
+
+ if "class_scores" in detection_data:
+ class_scores = detection_data.class_scores.data
+ else:
+ class_scores = np.zeros(len(self.targets.class_names))
+ class_index = self.targets.class_names.index(
+ detection_data.top_class.item()
+ )
+ class_scores[class_index] = (
+ detection_data.top_class_score.item()
+ )
+
+ if "features" in detection_data:
+ features = detection_data.features.data
+ else:
+ features = np.zeros(0)
+
+ sound_events.append(
+ Detection(
+ geometry=geometry,
+ detection_score=score,
+ class_scores=class_scores,
+ features=features,
+ )
+ )
+
+ return ClipDetections(
+ clip=clip,
+ detections=sound_events,
+ )
+
+ @output_formatters.register(RawOutputConfig)
+ @staticmethod
+ def from_config(config: RawOutputConfig, targets: TargetProtocol):
+ return RawFormatter(
+ targets,
+ include_class_scores=config.include_class_scores,
+ include_features=config.include_features,
+ include_geometry=config.include_geometry,
+ )
diff --git a/src/batdetect2/outputs/formats/soundevent.py b/src/batdetect2/outputs/formats/soundevent.py
new file mode 100644
index 00000000..fd70a11e
--- /dev/null
+++ b/src/batdetect2/outputs/formats/soundevent.py
@@ -0,0 +1,128 @@
+from pathlib import Path
+from typing import List, Literal, Sequence
+
+import numpy as np
+from soundevent import data, io
+
+from batdetect2.core import BaseConfig
+from batdetect2.outputs.formats.base import (
+ output_formatters,
+)
+from batdetect2.outputs.types import OutputFormatterProtocol
+from batdetect2.postprocess.types import ClipDetections, Detection
+from batdetect2.targets.types import TargetProtocol
+
+
+class SoundEventOutputConfig(BaseConfig):
+ name: Literal["soundevent"] = "soundevent"
+ top_k: int | None = 1
+ min_score: float | None = None
+
+
+class SoundEventOutputFormatter(OutputFormatterProtocol[data.ClipPrediction]):
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ top_k: int | None = 1,
+ min_score: float | None = 0,
+ ):
+ self.targets = targets
+ self.top_k = top_k
+ self.min_score = min_score
+
+ def format(
+ self,
+ predictions: Sequence[ClipDetections],
+ ) -> List[data.ClipPrediction]:
+ return [
+ self.format_prediction(prediction) for prediction in predictions
+ ]
+
+ def save(
+ self,
+ predictions: Sequence[data.ClipPrediction],
+ path: data.PathLike,
+ audio_dir: data.PathLike | None = None,
+ ) -> None:
+ run = data.PredictionSet(clip_predictions=list(predictions))
+
+ path = Path(path)
+
+ if not path.suffix == ".json":
+ path = Path(path).with_suffix(".json")
+
+ io.save(run, path, audio_dir=audio_dir)
+
+ def load(self, path: data.PathLike) -> List[data.ClipPrediction]:
+ path = Path(path)
+ run = io.load(path, type="prediction_set")
+ return run.clip_predictions
+
+ def format_prediction(
+ self,
+ prediction: ClipDetections,
+ ) -> data.ClipPrediction:
+ recording = prediction.clip.recording
+ return data.ClipPrediction(
+ clip=prediction.clip,
+ sound_events=[
+ self.format_sound_event_prediction(pred, recording)
+ for pred in prediction.detections
+ ],
+ )
+
+ def format_sound_event_prediction(
+ self,
+ prediction: Detection,
+ recording: data.Recording,
+ ) -> data.SoundEventPrediction:
+ return data.SoundEventPrediction(
+ sound_event=data.SoundEvent(
+ recording=recording,
+ geometry=prediction.geometry,
+ ),
+ score=prediction.detection_score,
+ tags=self.get_sound_event_tags(prediction),
+ )
+
+ def get_sound_event_tags(
+ self, prediction: Detection
+ ) -> List[data.PredictedTag]:
+ sorted_indices = np.argsort(prediction.class_scores)[::-1]
+
+ tags = [
+ data.PredictedTag(
+ tag=tag,
+ score=prediction.detection_score,
+ )
+ for tag in self.targets.detection_class_tags
+ ]
+
+ top_k = self.top_k or len(sorted_indices)
+
+ for ind in sorted_indices[:top_k]:
+ score = float(prediction.class_scores[ind])
+
+ if self.min_score is not None and score < self.min_score:
+ break
+
+ class_name = self.targets.class_names[ind]
+ class_tags = self.targets.decode_class(class_name)
+ tags.extend(
+ data.PredictedTag(
+ tag=tag,
+ score=score,
+ )
+ for tag in class_tags
+ )
+
+ return tags
+
+ @output_formatters.register(SoundEventOutputConfig)
+ @staticmethod
+ def from_config(config: SoundEventOutputConfig, targets: TargetProtocol):
+ return SoundEventOutputFormatter(
+ targets,
+ top_k=config.top_k,
+ min_score=config.min_score,
+ )
diff --git a/src/batdetect2/outputs/transforms/__init__.py b/src/batdetect2/outputs/transforms/__init__.py
new file mode 100644
index 00000000..52141497
--- /dev/null
+++ b/src/batdetect2/outputs/transforms/__init__.py
@@ -0,0 +1,184 @@
+from collections.abc import Sequence
+
+from pydantic import Field
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.outputs.transforms.clip_transforms import (
+ ClipDetectionsTransformConfig,
+)
+from batdetect2.outputs.transforms.clip_transforms import (
+ clip_transforms as clip_transform_registry,
+)
+from batdetect2.outputs.transforms.decoding import to_detections
+from batdetect2.outputs.transforms.detection_transforms import (
+ DetectionTransformConfig,
+ shift_detections_to_start_time,
+)
+from batdetect2.outputs.transforms.detection_transforms import (
+ detection_transforms as detection_transform_registry,
+)
+from batdetect2.outputs.types import (
+ ClipDetectionsTransform,
+ DetectionTransform,
+ OutputTransformProtocol,
+)
+from batdetect2.postprocess.types import (
+ ClipDetections,
+ ClipDetectionsTensor,
+ Detection,
+)
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+
+__all__ = [
+ "ClipDetectionsTransformConfig",
+ "DetectionTransformConfig",
+ "OutputTransform",
+ "OutputTransformConfig",
+ "build_output_transform",
+]
+
+
+class OutputTransformConfig(BaseConfig):
+ detection_transforms: list[DetectionTransformConfig] = Field(
+ default_factory=list
+ )
+ clip_transforms: list[ClipDetectionsTransformConfig] = Field(
+ default_factory=list
+ )
+
+
+class OutputTransform(OutputTransformProtocol):
+ detection_transform_steps: list[DetectionTransform]
+ clip_transform_steps: list[ClipDetectionsTransform]
+
+ def __init__(
+ self,
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+ detection_transform_steps: Sequence[DetectionTransform] = (),
+ clip_transform_steps: Sequence[ClipDetectionsTransform] = (),
+ ):
+ self.targets = targets
+ self.roi_mapper = roi_mapper
+ self.detection_transform_steps = list(detection_transform_steps)
+ self.clip_transform_steps = list(clip_transform_steps)
+
+ def __call__(
+ self,
+ predictions: Sequence[ClipDetections],
+ ) -> list[ClipDetections]:
+ return [
+ self._transform_prediction(prediction)
+ for prediction in predictions
+ ]
+
+ def _transform_prediction(
+ self,
+ prediction: ClipDetections,
+ ) -> ClipDetections:
+ detections = shift_detections_to_start_time(
+ prediction.detections,
+ start_time=prediction.clip.start_time,
+ )
+ detections = self.transform_detections(detections)
+ return self.transform_clip_detections(
+ ClipDetections(clip=prediction.clip, detections=detections)
+ )
+
+ def to_detections(
+ self,
+ detections: ClipDetectionsTensor,
+ start_time: float = 0,
+ ) -> list[Detection]:
+ decoded = to_detections(
+ detections.numpy(),
+ targets=self.targets,
+ roi_mapper=self.roi_mapper,
+ )
+ shifted = shift_detections_to_start_time(
+ decoded,
+ start_time=start_time,
+ )
+ return self.transform_detections(shifted)
+
+ def to_clip_detections(
+ self,
+ detections: ClipDetectionsTensor,
+ clip: data.Clip,
+ ) -> ClipDetections:
+ prediction = ClipDetections(
+ clip=clip,
+ detections=self.to_detections(
+ detections,
+ start_time=clip.start_time,
+ ),
+ )
+ return self.transform_clip_detections(prediction)
+
+ def transform_detections(
+ self,
+ detections: Sequence[Detection],
+ ) -> list[Detection]:
+ out: list[Detection] = []
+ for detection in detections:
+ transformed = self.transform_detection(detection)
+
+ if transformed is None:
+ continue
+
+ out.append(transformed)
+
+ return out
+
+ def transform_detection(
+ self,
+ detection: Detection,
+ ) -> Detection | None:
+ for transform in self.detection_transform_steps:
+ transformed = transform(detection)
+
+ if transformed is None:
+ return None
+
+ detection = transformed
+
+ return detection
+
+ def transform_clip_detections(
+ self,
+ prediction: ClipDetections,
+ ) -> ClipDetections:
+ for transform in self.clip_transform_steps:
+ prediction = transform(prediction)
+ return prediction
+
+
+def build_output_transform(
+ config: OutputTransformConfig | dict | None = None,
+ targets: TargetProtocol | None = None,
+ roi_mapper: ROIMapperProtocol | None = None,
+) -> OutputTransformProtocol:
+ from batdetect2.targets import build_roi_mapping, build_targets
+
+ if config is None:
+ config = OutputTransformConfig()
+
+ if not isinstance(config, OutputTransformConfig):
+ config = OutputTransformConfig.model_validate(config)
+
+ targets = targets or build_targets()
+ roi_mapper = roi_mapper or build_roi_mapping()
+
+ return OutputTransform(
+ targets=targets,
+ roi_mapper=roi_mapper,
+ detection_transform_steps=[
+ detection_transform_registry.build(transform_config)
+ for transform_config in config.detection_transforms
+ ],
+ clip_transform_steps=[
+ clip_transform_registry.build(transform_config)
+ for transform_config in config.clip_transforms
+ ],
+ )
diff --git a/src/batdetect2/outputs/transforms/clip_transforms.py b/src/batdetect2/outputs/transforms/clip_transforms.py
new file mode 100644
index 00000000..93a8016e
--- /dev/null
+++ b/src/batdetect2/outputs/transforms/clip_transforms.py
@@ -0,0 +1,166 @@
+from typing import Annotated, Literal
+
+from pydantic import Field
+from soundevent.geometry import compute_bounds
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.outputs.types import ClipDetectionsTransform
+from batdetect2.postprocess.types import ClipDetections, Detection
+
+__all__ = [
+ "ClipDetectionsTransformConfig",
+ "clip_transforms",
+]
+
+
+clip_transforms: Registry[ClipDetectionsTransform, []] = Registry(
+ "clip_detection_transform"
+)
+
+
+@add_import_config(clip_transforms)
+class ClipDetectionsTransformImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+
+class RemoveAboveNyquistConfig(BaseConfig):
+ """Configuration for `RemoveAboveNyquist`.
+
+ Defines parameters for removing detections above the Nyquist frequency.
+
+ Attributes
+ ----------
+ name : Literal["remove_above_nyquist"]
+ The unique identifier for this transform type.
+ min_freq : float
+ The minimum frequency (in Hz) for detections to be kept.
+ """
+
+ name: Literal["remove_above_nyquist"] = "remove_above_nyquist"
+ mode: Literal["low_freq", "high_freq"] = "high_freq"
+ buffer: float = 0
+
+
+class RemoveAboveNyquist:
+ def __init__(self, mode: Literal["low_freq", "high_freq"], buffer: float):
+ self.mode = mode
+ self.buffer = buffer
+
+ def __call__(self, detections: ClipDetections) -> ClipDetections:
+ recording = detections.clip.recording
+ nyquist = recording.samplerate / 2
+ threshold = nyquist - self.buffer
+
+ return ClipDetections(
+ clip=detections.clip,
+ detections=[
+ detection
+ for detection in detections.detections
+ if self._is_below_threshold(detection, threshold)
+ ],
+ )
+
+ def _is_below_threshold(
+ self,
+ detection: Detection,
+ threshold: float,
+ ) -> bool:
+ _, low_freq, _, high_freq = compute_bounds(detection.geometry)
+
+ if self.mode == "low_freq":
+ return low_freq < threshold
+
+ return high_freq < threshold
+
+ @clip_transforms.register(RemoveAboveNyquistConfig)
+ @staticmethod
+ def from_config(config: RemoveAboveNyquistConfig):
+ return RemoveAboveNyquist(
+ mode=config.mode,
+ buffer=config.buffer,
+ )
+
+
+class RemoveAtEdgesConfig(BaseConfig):
+ """Configuration for `RemoveAtEdges`.
+
+ Defines parameters for removing detections at the edges of the clip.
+
+ Attributes
+ ----------
+ name : Literal["remove_at_edges"]
+ The unique identifier for this transform type.
+ buffer : float
+ The amount of time (in seconds) to remove detections from the edge.
+ mode : Literal["start_time", "end_time", "both"]
+ Criteria for removing detections at the edges of the clip.
+ If "start_time", remove detections with a start time within the
+ buffer. If "end_time", remove detections with an end time within
+ the buffer. If "both", remove detections with a start time within
+ the buffer or an end time within the buffer.
+ """
+
+ name: Literal["remove_at_edges"] = "remove_at_edges"
+ buffer: float = 0.1
+ mode: Literal["start_time", "end_time", "both"] = "both"
+
+
+class RemoveAtEdges:
+ def __init__(
+ self,
+ buffer: float,
+ mode: Literal["start_time", "end_time", "both"],
+ ):
+ self.buffer = buffer
+ self.mode = mode
+
+ def __call__(self, detections: ClipDetections) -> ClipDetections:
+ clip = detections.clip
+ start = clip.start_time + self.buffer
+ end = clip.end_time - self.buffer
+
+ return ClipDetections(
+ clip=detections.clip,
+ detections=[
+ detection
+ for detection in detections.detections
+ if self._is_within_buffer(detection, start, end)
+ ],
+ )
+
+ def _is_within_buffer(
+ self,
+ detection: Detection,
+ start: float,
+ end: float,
+ ) -> bool:
+ start_time, _, end_time, _ = compute_bounds(detection.geometry)
+
+ if self.mode == "start_time":
+ return (start_time >= start) and (start_time <= end)
+
+ if self.mode == "end_time":
+ return (end_time >= start) and (end_time <= end)
+
+ return (start_time >= start) and (end_time <= end)
+
+ @clip_transforms.register(RemoveAtEdgesConfig)
+ @staticmethod
+ def from_config(config: RemoveAtEdgesConfig):
+ return RemoveAtEdges(
+ buffer=config.buffer,
+ mode=config.mode,
+ )
+
+
+ClipDetectionsTransformConfig = Annotated[
+ ClipDetectionsTransformImportConfig
+ | RemoveAboveNyquistConfig
+ | RemoveAtEdgesConfig,
+ Field(discriminator="name"),
+]
diff --git a/src/batdetect2/outputs/transforms/decoding.py b/src/batdetect2/outputs/transforms/decoding.py
new file mode 100644
index 00000000..298226fd
--- /dev/null
+++ b/src/batdetect2/outputs/transforms/decoding.py
@@ -0,0 +1,174 @@
+"""Decode extracted tensors into output-friendly detection objects."""
+
+from typing import List
+
+import numpy as np
+from soundevent import data
+
+from batdetect2.postprocess.types import ClipDetectionsArray, Detection
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+
+__all__ = [
+ "DEFAULT_CLASSIFICATION_THRESHOLD",
+ "convert_raw_prediction_to_sound_event_prediction",
+ "convert_raw_predictions_to_clip_prediction",
+ "get_class_tags",
+ "get_generic_tags",
+ "get_prediction_features",
+ "to_detections",
+]
+
+
+DEFAULT_CLASSIFICATION_THRESHOLD = 0.1
+
+
+def to_detections(
+ detections: ClipDetectionsArray,
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+) -> List[Detection]:
+ predictions = []
+
+ for score, class_scores, time, freq, dims, feats in zip(
+ detections.scores,
+ detections.class_scores,
+ detections.times,
+ detections.frequencies,
+ detections.sizes,
+ detections.features,
+ strict=False,
+ ):
+ highest_scoring_class = targets.class_names[class_scores.argmax()]
+
+ geom = roi_mapper.decode(
+ (time, freq),
+ dims,
+ class_name=highest_scoring_class,
+ )
+
+ predictions.append(
+ Detection(
+ detection_score=score,
+ geometry=geom,
+ class_scores=class_scores,
+ features=feats,
+ )
+ )
+
+ return predictions
+
+
+def convert_raw_predictions_to_clip_prediction(
+ raw_predictions: List[Detection],
+ clip: data.Clip,
+ targets: TargetProtocol,
+ classification_threshold: float = DEFAULT_CLASSIFICATION_THRESHOLD,
+ top_class_only: bool = False,
+) -> data.ClipPrediction:
+ return data.ClipPrediction(
+ clip=clip,
+ sound_events=[
+ convert_raw_prediction_to_sound_event_prediction(
+ prediction,
+ recording=clip.recording,
+ targets=targets,
+ classification_threshold=classification_threshold,
+ top_class_only=top_class_only,
+ )
+ for prediction in raw_predictions
+ ],
+ )
+
+
+def convert_raw_prediction_to_sound_event_prediction(
+ raw_prediction: Detection,
+ recording: data.Recording,
+ targets: TargetProtocol,
+ classification_threshold: float | None = DEFAULT_CLASSIFICATION_THRESHOLD,
+ top_class_only: bool = False,
+):
+ sound_event = data.SoundEvent(
+ recording=recording,
+ geometry=raw_prediction.geometry,
+ features=get_prediction_features(raw_prediction.features),
+ )
+
+ tags = [
+ *get_generic_tags(
+ raw_prediction.detection_score,
+ generic_class_tags=targets.detection_class_tags,
+ ),
+ *get_class_tags(
+ raw_prediction.class_scores,
+ targets=targets,
+ top_class_only=top_class_only,
+ threshold=classification_threshold,
+ ),
+ ]
+
+ return data.SoundEventPrediction(
+ sound_event=sound_event,
+ score=raw_prediction.detection_score,
+ tags=tags,
+ )
+
+
+def get_generic_tags(
+ detection_score: float,
+ generic_class_tags: List[data.Tag],
+) -> List[data.PredictedTag]:
+ return [
+ data.PredictedTag(tag=tag, score=detection_score)
+ for tag in generic_class_tags
+ ]
+
+
+def get_prediction_features(features: np.ndarray) -> List[data.Feature]:
+ return [
+ data.Feature(
+ term=data.Term(
+ name=f"batdetect2:f{index}",
+ label=f"BatDetect Feature {index}",
+ definition="Automatically extracted features by BatDetect2",
+ ),
+ value=value,
+ )
+ for index, value in enumerate(features)
+ ]
+
+
+def get_class_tags(
+ class_scores: np.ndarray,
+ targets: TargetProtocol,
+ top_class_only: bool = False,
+ threshold: float | None = DEFAULT_CLASSIFICATION_THRESHOLD,
+) -> List[data.PredictedTag]:
+ tags = []
+
+ for class_name, score in _iterate_sorted(
+ class_scores,
+ targets.class_names,
+ ):
+ if threshold is not None and score < threshold:
+ continue
+
+ class_tags = targets.decode_class(class_name)
+
+ for tag in class_tags:
+ tags.append(
+ data.PredictedTag(
+ tag=tag,
+ score=score,
+ )
+ )
+
+ if top_class_only:
+ break
+
+ return tags
+
+
+def _iterate_sorted(array: np.ndarray, class_names: List[str]):
+ indices = np.argsort(-array)
+ for index in indices:
+ yield str(class_names[index]), float(array[index])
diff --git a/src/batdetect2/outputs/transforms/detection_transforms.py b/src/batdetect2/outputs/transforms/detection_transforms.py
new file mode 100644
index 00000000..b5fcc1be
--- /dev/null
+++ b/src/batdetect2/outputs/transforms/detection_transforms.py
@@ -0,0 +1,173 @@
+from collections.abc import Sequence
+from dataclasses import replace
+from typing import Annotated, Literal
+
+from pydantic import Field
+from soundevent.geometry import compute_bounds, shift_geometry
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.outputs.types import DetectionTransform
+from batdetect2.postprocess.types import Detection
+
+__all__ = [
+ "DetectionTransformConfig",
+ "detection_transforms",
+ "shift_detection_time",
+ "shift_detections_to_start_time",
+]
+
+
+detection_transforms: Registry[DetectionTransform, []] = Registry(
+ "detection_transform"
+)
+
+
+@add_import_config(detection_transforms)
+class DetectionTransformImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+
+class FilterByFrequencyConfig(BaseConfig):
+ """Configuration for `FilterByFrequency`.
+
+ Defines parameters for filtering detections by frequency.
+
+ Attributes
+ ----------
+ name : Literal["filter_by_frequency"]
+ The unique identifier for this transform type.
+ min_freq : float
+ The minimum frequency (in Hz) for detections to be kept.
+ max_freq : float
+ The maximum frequency (in Hz) for detections to be kept.
+ mode : Literal["low_freq", "high_freq", "both"]
+ Criteria for filtering detections by frequency.
+ If "low_freq", keep detections with a low frequency within the
+ specified range. If "high_freq", keep detections with a high
+ frequency within the specified range. If "both", keep detections
+ with a low frequency within the specified range or a high frequency
+ within the specified range.
+ """
+
+ name: Literal["filter_by_frequency"] = "filter_by_frequency"
+ min_freq: float = 0
+ max_freq: float = float("inf")
+ mode: Literal["low_freq", "high_freq", "both"] = "both"
+
+
+class FilterByFrequency:
+ def __init__(
+ self,
+ min_freq: float = 0,
+ max_freq: float = float("inf"),
+ mode: Literal["low_freq", "high_freq", "both"] = "both",
+ ):
+ self.min_freq = min_freq
+ self.max_freq = max_freq
+ self.mode = mode
+
+ def __call__(self, detection: Detection) -> Detection | None:
+ if self._is_within_frequency_range(detection):
+ return detection
+
+ def _is_within_frequency_range(self, detection: Detection) -> bool:
+ _, low_freq, _, high_freq = compute_bounds(detection.geometry)
+
+ if self.mode == "low_freq":
+ return (low_freq >= self.min_freq) and (low_freq <= self.max_freq)
+
+ if self.mode == "high_freq":
+ return (high_freq >= self.min_freq) and (
+ high_freq <= self.max_freq
+ )
+
+ return (low_freq >= self.min_freq) or (high_freq <= self.max_freq)
+
+ @detection_transforms.register(FilterByFrequencyConfig)
+ @staticmethod
+ def from_config(config: FilterByFrequencyConfig):
+ return FilterByFrequency(
+ min_freq=config.min_freq,
+ max_freq=config.max_freq,
+ mode=config.mode,
+ )
+
+
+class FilterByDurationConfig(BaseConfig):
+ """Configuration for `FilterByDuration`.
+
+ Defines parameters for filtering detections by duration.
+
+ Attributes
+ ----------
+ name : Literal["filter_by_duration"]
+ The unique identifier for this transform type.
+ min_duration : float
+ The minimum duration (in seconds) for detections to be kept.
+ max_duration : float
+ The maximum duration (in seconds) for detections to be kept.
+ """
+
+ name: Literal["filter_by_duration"] = "filter_by_duration"
+ min_duration: float = 0
+ max_duration: float = float("inf")
+
+
+class FilterByDuration:
+ def __init__(
+ self,
+ min_duration: float = 0,
+ max_duration: float = float("inf"),
+ ):
+ self.min_duration = min_duration
+ self.max_duration = max_duration
+
+ def __call__(self, detection: Detection) -> Detection | None:
+ if self._is_within_duration_range(detection):
+ return detection
+
+ def _is_within_duration_range(self, detection: Detection) -> bool:
+ start_time, _, end_time, _ = compute_bounds(detection.geometry)
+ duration = end_time - start_time
+ return (duration >= self.min_duration) and (
+ duration <= self.max_duration
+ )
+
+ @detection_transforms.register(FilterByDurationConfig)
+ @staticmethod
+ def from_config(config: FilterByDurationConfig):
+ return FilterByDuration(
+ min_duration=config.min_duration,
+ max_duration=config.max_duration,
+ )
+
+
+DetectionTransformConfig = Annotated[
+ DetectionTransformImportConfig
+ | FilterByFrequencyConfig
+ | FilterByDurationConfig,
+ Field(discriminator="name"),
+]
+
+
+def shift_detection_time(detection: Detection, time: float) -> Detection:
+ geometry = shift_geometry(detection.geometry, time=time)
+ return replace(detection, geometry=geometry)
+
+
+def shift_detections_to_start_time(
+ detections: Sequence[Detection],
+ start_time: float = 0,
+) -> list[Detection]:
+ if start_time == 0:
+ return list(detections)
+
+ return [
+ shift_detection_time(detection, time=start_time)
+ for detection in detections
+ ]
diff --git a/src/batdetect2/outputs/types.py b/src/batdetect2/outputs/types.py
new file mode 100644
index 00000000..97d3e14f
--- /dev/null
+++ b/src/batdetect2/outputs/types.py
@@ -0,0 +1,61 @@
+from collections.abc import Callable, Sequence
+from typing import Generic, Protocol, TypeVar
+
+from soundevent import data
+from soundevent.data import PathLike
+
+from batdetect2.postprocess.types import (
+ ClipDetections,
+ ClipDetectionsTensor,
+ Detection,
+)
+
+__all__ = [
+ "ClipDetectionsTransform",
+ "DetectionTransform",
+ "OutputFormatterProtocol",
+ "OutputTransformProtocol",
+]
+
+T = TypeVar("T")
+
+
+class OutputFormatterProtocol(Protocol, Generic[T]):
+ def format(self, predictions: Sequence[ClipDetections]) -> list[T]: ...
+
+ def save(
+ self,
+ predictions: Sequence[T],
+ path: PathLike,
+ audio_dir: PathLike | None = None,
+ ) -> None: ...
+
+ def load(self, path: PathLike) -> list[T]: ...
+
+
+DetectionTransform = Callable[[Detection], Detection | None]
+ClipDetectionsTransform = Callable[[ClipDetections], ClipDetections]
+
+
+class OutputTransformProtocol(Protocol):
+ def to_detections(
+ self,
+ detections: ClipDetectionsTensor,
+ start_time: float = 0,
+ ) -> list[Detection]: ...
+
+ def to_clip_detections(
+ self,
+ detections: ClipDetectionsTensor,
+ clip: data.Clip,
+ ) -> ClipDetections: ...
+
+ def transform_detections(
+ self,
+ detections: Sequence[Detection],
+ ) -> list[Detection]: ...
+
+ def transform_clip_detections(
+ self,
+ prediction: ClipDetections,
+ ) -> ClipDetections: ...
diff --git a/src/batdetect2/plotting/__init__.py b/src/batdetect2/plotting/__init__.py
new file mode 100644
index 00000000..5287b798
--- /dev/null
+++ b/src/batdetect2/plotting/__init__.py
@@ -0,0 +1,31 @@
+from batdetect2.plotting.clip_annotations import plot_clip_annotation
+from batdetect2.plotting.clip_predictions import plot_clip_prediction
+from batdetect2.plotting.clips import plot_clip
+from batdetect2.plotting.common import plot_spectrogram
+from batdetect2.plotting.gallery import plot_match_gallery
+from batdetect2.plotting.heatmaps import (
+ plot_classification_heatmap,
+ plot_detection_heatmap,
+ plot_size_heatmap,
+)
+from batdetect2.plotting.matches import (
+ plot_cross_trigger_match,
+ plot_false_negative_match,
+ plot_false_positive_match,
+ plot_true_positive_match,
+)
+
+__all__ = [
+ "plot_clip",
+ "plot_clip_annotation",
+ "plot_clip_prediction",
+ "plot_cross_trigger_match",
+ "plot_false_negative_match",
+ "plot_false_positive_match",
+ "plot_spectrogram",
+ "plot_true_positive_match",
+ "plot_detection_heatmap",
+ "plot_classification_heatmap",
+ "plot_size_heatmap",
+ "plot_match_gallery",
+]
diff --git a/src/batdetect2/plotting/clip_annotations.py b/src/batdetect2/plotting/clip_annotations.py
new file mode 100644
index 00000000..67aaf944
--- /dev/null
+++ b/src/batdetect2/plotting/clip_annotations.py
@@ -0,0 +1,76 @@
+from matplotlib.axes import Axes
+from soundevent import data, plot
+
+from batdetect2.plotting.clips import plot_clip
+from batdetect2.plotting.common import create_ax
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+
+__all__ = [
+ "plot_clip_annotation",
+]
+
+
+def plot_clip_annotation(
+ clip_annotation: data.ClipAnnotation,
+ preprocessor: PreprocessorProtocol | None = None,
+ figsize: tuple[int, int] | None = None,
+ ax: Axes | None = None,
+ audio_dir: data.PathLike | None = None,
+ add_points: bool = False,
+ cmap: str = "gray",
+ alpha: float = 1,
+ linewidth: float = 1,
+ fill: bool = False,
+) -> Axes:
+ ax = plot_clip(
+ clip_annotation.clip,
+ preprocessor=preprocessor,
+ figsize=figsize,
+ ax=ax,
+ audio_dir=audio_dir,
+ spec_cmap=cmap,
+ )
+
+ plot.plot_annotations(
+ clip_annotation.sound_events,
+ ax=ax,
+ time_offset=0.004,
+ freq_offset=2_000,
+ add_points=add_points,
+ alpha=alpha,
+ linewidth=linewidth,
+ facecolor="none" if not fill else None,
+ )
+ return ax
+
+
+def plot_anchor_points(
+ clip_annotation: data.ClipAnnotation,
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+ figsize: tuple[int, int] | None = None,
+ ax: Axes | None = None,
+ size: int = 1,
+ color: str = "red",
+ marker: str = "x",
+ alpha: float = 1,
+) -> Axes:
+ ax = create_ax(ax=ax, figsize=figsize)
+
+ positions = []
+
+ for sound_event in clip_annotation.sound_events:
+ if not targets.filter(sound_event):
+ continue
+
+ class_name = targets.encode_class(sound_event)
+ position, _ = roi_mapper.encode(
+ sound_event.sound_event,
+ class_name=class_name,
+ )
+ positions.append(position)
+
+ X, Y = zip(*positions, strict=False)
+ ax.scatter(X, Y, s=size, c=color, marker=marker, alpha=alpha)
+ return ax
diff --git a/src/batdetect2/plotting/clip_predictions.py b/src/batdetect2/plotting/clip_predictions.py
new file mode 100644
index 00000000..d0b24fd2
--- /dev/null
+++ b/src/batdetect2/plotting/clip_predictions.py
@@ -0,0 +1,137 @@
+from typing import Iterable
+
+from matplotlib.axes import Axes
+from soundevent import data
+from soundevent.geometry.operations import Positions, get_geometry_point
+from soundevent.plot.common import create_axes
+from soundevent.plot.geometries import plot_geometry
+from soundevent.plot.tags import TagColorMapper, add_tags_legend, plot_tag
+
+from batdetect2.plotting.clips import plot_clip
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "plot_clip_prediction",
+]
+
+
+def plot_clip_prediction(
+ clip_prediction: data.ClipPrediction,
+ preprocessor: PreprocessorProtocol | None = None,
+ figsize: tuple[int, int] | None = None,
+ ax: Axes | None = None,
+ audio_dir: data.PathLike | None = None,
+ add_legend: bool = False,
+ spec_cmap: str = "gray",
+ linewidth: float = 1,
+ fill: bool = False,
+) -> Axes:
+ ax = plot_clip(
+ clip_prediction.clip,
+ preprocessor=preprocessor,
+ figsize=figsize,
+ ax=ax,
+ audio_dir=audio_dir,
+ spec_cmap=spec_cmap,
+ )
+
+ plot_predictions(
+ clip_prediction.sound_events,
+ ax=ax,
+ time_offset=0.004,
+ freq_offset=2_000,
+ add_points=False,
+ linewidth=linewidth,
+ facecolor="none" if not fill else None,
+ legend=add_legend,
+ )
+ return ax
+
+
+def plot_predictions(
+ predictions: Iterable[data.SoundEventPrediction],
+ ax: Axes | None = None,
+ position: Positions = "top-right",
+ color_mapper: TagColorMapper | None = None,
+ time_offset: float = 0.001,
+ freq_offset: float = 1000,
+ legend: bool = True,
+ max_alpha: float = 0.5,
+ color: str | None = None,
+ **kwargs,
+):
+ """Plot an prediction."""
+ if ax is None:
+ ax = create_axes(**kwargs)
+
+ if color_mapper is None:
+ color_mapper = TagColorMapper()
+
+ for prediction in predictions:
+ ax = plot_prediction(
+ prediction,
+ ax=ax,
+ position=position,
+ color_mapper=color_mapper,
+ time_offset=time_offset,
+ freq_offset=freq_offset,
+ max_alpha=max_alpha,
+ color=color,
+ **kwargs,
+ )
+
+ if legend:
+ ax = add_tags_legend(ax, color_mapper)
+
+ return ax
+
+
+def plot_prediction(
+ prediction: data.SoundEventPrediction,
+ ax: Axes | None = None,
+ position: Positions = "top-right",
+ color_mapper: TagColorMapper | None = None,
+ time_offset: float = 0.001,
+ freq_offset: float = 1000,
+ max_alpha: float = 0.5,
+ alpha: float | None = None,
+ color: str | None = None,
+ **kwargs,
+) -> Axes:
+ """Plot an annotation."""
+ geometry = prediction.sound_event.geometry
+
+ if geometry is None:
+ raise ValueError("Annotation does not have a geometry.")
+
+ if ax is None:
+ ax = create_axes(**kwargs)
+
+ if color_mapper is None:
+ color_mapper = TagColorMapper()
+
+ if alpha is None:
+ alpha = min(prediction.score * max_alpha, 1)
+
+ ax = plot_geometry(
+ geometry,
+ ax=ax,
+ color=color,
+ alpha=alpha,
+ **kwargs,
+ )
+
+ x, y = get_geometry_point(geometry, position=position)
+
+ for index, tag in enumerate(prediction.tags):
+ color = color_mapper.get_color(tag.tag)
+ ax = plot_tag(
+ time=x + time_offset,
+ frequency=y - index * freq_offset,
+ color=color,
+ ax=ax,
+ alpha=min(tag.score, prediction.score),
+ **kwargs,
+ )
+
+ return ax
diff --git a/src/batdetect2/plotting/clips.py b/src/batdetect2/plotting/clips.py
new file mode 100644
index 00000000..9edf99f1
--- /dev/null
+++ b/src/batdetect2/plotting/clips.py
@@ -0,0 +1,48 @@
+import matplotlib.pyplot as plt
+import torch
+from matplotlib.axes import Axes
+from soundevent import data
+
+from batdetect2.audio import build_audio_loader
+from batdetect2.audio.types import AudioLoader
+from batdetect2.plotting.common import plot_spectrogram
+from batdetect2.preprocess import build_preprocessor
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "plot_clip",
+]
+
+
+def plot_clip(
+ clip: data.Clip,
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ figsize: tuple[int, int] | None = None,
+ ax: Axes | None = None,
+ audio_dir: data.PathLike | None = None,
+ spec_cmap: str = "gray",
+) -> Axes:
+ if ax is None:
+ _, ax = plt.subplots(figsize=figsize)
+
+ if preprocessor is None:
+ preprocessor = build_preprocessor()
+
+ if audio_loader is None:
+ audio_loader = build_audio_loader()
+
+ wav = torch.tensor(audio_loader.load_clip(clip, audio_dir=audio_dir))
+ spec = preprocessor(wav)
+
+ plot_spectrogram(
+ spec,
+ start_time=clip.start_time,
+ end_time=clip.end_time,
+ min_freq=preprocessor.min_freq,
+ max_freq=preprocessor.max_freq,
+ ax=ax,
+ cmap=spec_cmap,
+ )
+
+ return ax
diff --git a/src/batdetect2/plotting/common.py b/src/batdetect2/plotting/common.py
new file mode 100644
index 00000000..425a40d4
--- /dev/null
+++ b/src/batdetect2/plotting/common.py
@@ -0,0 +1,73 @@
+"""General plotting utilities."""
+
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+from matplotlib import axes
+
+__all__ = [
+ "create_ax",
+]
+
+
+def create_ax(
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ **kwargs,
+) -> axes.Axes:
+ """Create a new axis if none is provided"""
+ if ax is None:
+ _, ax = plt.subplots(figsize=figsize, nrows=1, ncols=1, **kwargs)
+
+ return ax
+
+
+def plot_spectrogram(
+ spec: torch.Tensor | np.ndarray,
+ start_time: float | None = None,
+ end_time: float | None = None,
+ min_freq: float | None = None,
+ max_freq: float | None = None,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ add_colorbar: bool = False,
+ colorbar_kwargs: dict | None = None,
+ vmin: float | None = None,
+ vmax: float | None = None,
+ cmap="gray",
+) -> axes.Axes:
+ if isinstance(spec, torch.Tensor):
+ spec = spec.numpy()
+
+ spec = spec.squeeze()
+
+ ax = create_ax(ax=ax, figsize=figsize)
+
+ if start_time is None:
+ start_time = 0
+
+ if end_time is None:
+ end_time = spec.shape[-1]
+
+ if min_freq is None:
+ min_freq = 0
+
+ if max_freq is None:
+ max_freq = spec.shape[-2]
+
+ mappable = ax.pcolormesh(
+ np.linspace(start_time, end_time, spec.shape[-1] + 1, endpoint=True),
+ np.linspace(min_freq, max_freq, spec.shape[-2] + 1, endpoint=True),
+ spec,
+ cmap=cmap,
+ vmin=vmin,
+ vmax=vmax,
+ )
+
+ ax.set_xlim(start_time, end_time)
+ ax.set_ylim(min_freq, max_freq)
+
+ if add_colorbar:
+ plt.colorbar(mappable, ax=ax, **(colorbar_kwargs or {}))
+
+ return ax
diff --git a/src/batdetect2/plotting/detections.py b/src/batdetect2/plotting/detections.py
new file mode 100644
index 00000000..4bbaf274
--- /dev/null
+++ b/src/batdetect2/plotting/detections.py
@@ -0,0 +1,111 @@
+from matplotlib import axes, patches
+from soundevent.plot import plot_geometry
+
+from batdetect2.evaluate.metrics.detection import ClipEval
+from batdetect2.plotting.clips import (
+ AudioLoader,
+ PreprocessorProtocol,
+ plot_clip,
+)
+from batdetect2.plotting.common import create_ax
+
+__all__ = [
+ "plot_clip_detections",
+]
+
+
+def plot_clip_detections(
+ clip_eval: ClipEval,
+ figsize: tuple[int, int] = (10, 10),
+ ax: axes.Axes | None = None,
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ threshold: float = 0.2,
+ add_legend: bool = True,
+ add_title: bool = True,
+ fill: bool = False,
+ linewidth: float = 1.0,
+ gt_color: str = "green",
+ gt_linestyle: str = "-",
+ true_pred_color: str = "yellow",
+ true_pred_linestyle: str = "--",
+ false_pred_color: str = "blue",
+ false_pred_linestyle: str = "-",
+ missed_gt_color: str = "red",
+ missed_gt_linestyle: str = "-",
+) -> axes.Axes:
+ ax = create_ax(figsize=figsize, ax=ax)
+
+ plot_clip(
+ clip_eval.clip,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ ax=ax,
+ )
+
+ for m in clip_eval.matches:
+ is_match = (
+ m.pred is not None and m.gt is not None and m.score >= threshold
+ )
+
+ if m.pred is not None:
+ color = true_pred_color if is_match else false_pred_color
+ plot_geometry(
+ m.pred.geometry,
+ ax=ax,
+ add_points=False,
+ facecolor="none" if not fill else color,
+ alpha=m.pred.detection_score,
+ linewidth=linewidth,
+ linestyle=true_pred_linestyle
+ if is_match
+ else missed_gt_linestyle,
+ color=color,
+ )
+
+ if m.gt is not None:
+ color = gt_color if is_match else missed_gt_color
+ plot_geometry(
+ m.gt.sound_event.geometry,
+ ax=ax,
+ add_points=False,
+ linewidth=linewidth,
+ facecolor="none" if not fill else color,
+ linestyle=gt_linestyle if is_match else false_pred_linestyle,
+ color=color,
+ )
+
+ if add_title:
+ ax.set_title(clip_eval.clip.recording.path.name)
+
+ if add_legend:
+ ax.legend(
+ handles=[
+ patches.Patch(
+ label="found GT",
+ edgecolor=gt_color,
+ facecolor="none" if not fill else gt_color,
+ linestyle=gt_linestyle,
+ ),
+ patches.Patch(
+ label="missed GT",
+ edgecolor=missed_gt_color,
+ facecolor="none" if not fill else missed_gt_color,
+ linestyle=missed_gt_linestyle,
+ ),
+ patches.Patch(
+ label="true Det",
+ edgecolor=true_pred_color,
+ facecolor="none" if not fill else true_pred_color,
+ linestyle=true_pred_linestyle,
+ ),
+ patches.Patch(
+ label="false Det",
+ edgecolor=false_pred_color,
+ facecolor="none" if not fill else false_pred_color,
+ linestyle=false_pred_linestyle,
+ ),
+ ]
+ )
+
+ return ax
diff --git a/src/batdetect2/plotting/gallery.py b/src/batdetect2/plotting/gallery.py
new file mode 100644
index 00000000..63fb9324
--- /dev/null
+++ b/src/batdetect2/plotting/gallery.py
@@ -0,0 +1,118 @@
+from typing import Sequence
+
+import matplotlib.pyplot as plt
+from matplotlib.figure import Figure
+
+from batdetect2.audio.types import AudioLoader
+from batdetect2.plotting.matches import (
+ MatchProtocol,
+ plot_cross_trigger_match,
+ plot_false_negative_match,
+ plot_false_positive_match,
+ plot_true_positive_match,
+)
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = ["plot_match_gallery"]
+
+
+def plot_match_gallery(
+ true_positives: Sequence[MatchProtocol],
+ false_positives: Sequence[MatchProtocol],
+ false_negatives: Sequence[MatchProtocol],
+ cross_triggers: Sequence[MatchProtocol],
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ n_examples: int = 5,
+ duration: float = 0.1,
+ fig: Figure | None = None,
+):
+ if fig is None:
+ fig = plt.figure(figsize=(20, 20))
+
+ axes = fig.subplots(
+ nrows=4,
+ ncols=n_examples,
+ sharex="none",
+ sharey="row",
+ )
+
+ for tp_ax, tp_match in zip(
+ axes[0], true_positives[:n_examples], strict=False
+ ):
+ try:
+ plot_true_positive_match(
+ tp_match,
+ ax=tp_ax,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ duration=duration,
+ )
+ except (
+ ValueError,
+ AssertionError,
+ RuntimeError,
+ FileNotFoundError,
+ ):
+ continue
+
+ for fp_ax, fp_match in zip(
+ axes[1], false_positives[:n_examples], strict=False
+ ):
+ try:
+ plot_false_positive_match(
+ fp_match,
+ ax=fp_ax,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ duration=duration,
+ )
+ except (
+ ValueError,
+ AssertionError,
+ RuntimeError,
+ FileNotFoundError,
+ ):
+ continue
+
+ for fn_ax, fn_match in zip(
+ axes[2], false_negatives[:n_examples], strict=False
+ ):
+ try:
+ plot_false_negative_match(
+ fn_match,
+ ax=fn_ax,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ duration=duration,
+ )
+ except (
+ ValueError,
+ AssertionError,
+ RuntimeError,
+ FileNotFoundError,
+ ):
+ continue
+
+ for ct_ax, ct_match in zip(
+ axes[3], cross_triggers[:n_examples], strict=False
+ ):
+ try:
+ plot_cross_trigger_match(
+ ct_match,
+ ax=ct_ax,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ duration=duration,
+ )
+ except (
+ ValueError,
+ AssertionError,
+ RuntimeError,
+ FileNotFoundError,
+ ):
+ continue
+
+ fig.tight_layout()
+
+ return fig
diff --git a/src/batdetect2/plotting/heatmaps.py b/src/batdetect2/plotting/heatmaps.py
new file mode 100644
index 00000000..9f4021fa
--- /dev/null
+++ b/src/batdetect2/plotting/heatmaps.py
@@ -0,0 +1,205 @@
+"""Plot heatmaps."""
+
+import numpy as np
+import torch
+from matplotlib import axes, patches
+from matplotlib.cm import get_cmap
+from matplotlib.colors import Colormap, LinearSegmentedColormap, to_rgba
+
+from batdetect2.plotting.common import create_ax
+
+__all__ = [
+ "plot_detection_heatmap",
+ "plot_classification_heatmap",
+ "plot_size_heatmap",
+]
+
+
+def plot_detection_heatmap(
+ heatmap: torch.Tensor | np.ndarray,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] = (10, 10),
+ threshold: float | None = None,
+ alpha: float = 1,
+ cmap: str | Colormap = "jet",
+ color: str | None = None,
+) -> axes.Axes:
+ ax = create_ax(ax, figsize=figsize)
+
+ if isinstance(heatmap, torch.Tensor):
+ heatmap = heatmap.numpy()
+
+ heatmap = heatmap.squeeze()
+
+ if threshold is not None:
+ heatmap = np.ma.masked_where(
+ heatmap < threshold,
+ heatmap,
+ )
+
+ if color is not None:
+ cmap = create_colormap(color)
+
+ ax.pcolormesh(
+ heatmap,
+ vmax=1,
+ vmin=0,
+ cmap=cmap,
+ alpha=alpha,
+ )
+
+ return ax
+
+
+def plot_classification_heatmap(
+ heatmap: torch.Tensor | np.ndarray,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] = (10, 10),
+ class_names: list[str] | None = None,
+ threshold: float | None = 0.1,
+ alpha: float = 1,
+ cmap: str | Colormap = "tab20",
+):
+ ax = create_ax(ax, figsize=figsize)
+
+ if isinstance(heatmap, torch.Tensor):
+ heatmap = heatmap.numpy()
+
+ if heatmap.ndim == 4:
+ heatmap = heatmap[0]
+
+ if heatmap.ndim != 3:
+ raise ValueError("Expecting a 3-dimensional array")
+
+ num_classes = heatmap.shape[0]
+
+ if class_names is None:
+ class_names = [f"class_{i}" for i in range(num_classes)]
+
+ if len(class_names) != num_classes:
+ raise ValueError("Inconsistent number of class names")
+
+ if not isinstance(cmap, Colormap):
+ cmap = get_cmap(cmap)
+
+ handles = []
+
+ for index, class_heatmap in enumerate(heatmap):
+ class_name = class_names[index]
+
+ color = cmap(index / num_classes)
+
+ max = class_heatmap.max()
+
+ if max == 0:
+ continue
+
+ if threshold is not None:
+ class_heatmap = np.ma.masked_where(
+ class_heatmap < threshold,
+ class_heatmap,
+ )
+
+ ax.pcolormesh(
+ class_heatmap,
+ vmax=1,
+ vmin=0,
+ cmap=create_colormap(color),
+ alpha=alpha,
+ )
+
+ handles.append(patches.Patch(color=color, label=class_name))
+
+ ax.legend(handles=handles)
+ return ax
+
+
+def plot_size_heatmap(
+ heatmap: torch.Tensor | np.ndarray,
+ dimension_names: list[str],
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] = (10, 10),
+ color: str = "crimson",
+ size: float = 20,
+ fontsize: float = 8,
+) -> axes.Axes:
+ """Plot sparse size labels from a size heatmap.
+
+ Parameters
+ ----------
+ heatmap : torch.Tensor | np.ndarray
+ Size heatmap with shape ``[num_dims, height, width]``. Entries are
+ expected to be zero everywhere except at labelled positions.
+ dimension_names : list[str]
+ Names corresponding to the first heatmap dimension.
+ ax : matplotlib.axes.Axes | None, default=None
+ Axis to plot on. If ``None``, a new axis is created.
+ figsize : tuple[int, int], default=(10, 10)
+ Figure size used when creating a new axis.
+ color : str, default="crimson"
+ Color used for scatter points and text labels.
+ size : float, default=20
+ Marker size for plotted points.
+ fontsize : float, default=8
+ Font size used for the text labels.
+
+ Returns
+ -------
+ matplotlib.axes.Axes
+ Axis containing the plotted size labels.
+ """
+ ax = create_ax(ax, figsize=figsize)
+
+ if isinstance(heatmap, torch.Tensor):
+ heatmap = heatmap.numpy()
+
+ if heatmap.ndim == 4:
+ heatmap = heatmap[0]
+
+ if heatmap.ndim != 3:
+ raise ValueError("Expecting a 3-dimensional array")
+
+ if len(dimension_names) != heatmap.shape[0]:
+ raise ValueError("Inconsistent number of dimension names")
+
+ point_mask = np.any(heatmap != 0, axis=0)
+ rows, cols = np.nonzero(point_mask)
+
+ if len(rows) == 0:
+ return ax
+
+ ax.scatter(cols, rows, c=color, s=size)
+
+ for row, col in zip(rows, cols, strict=False):
+ values = heatmap[:, row, col]
+ labels = [
+ f"{name}={value:.2f}"
+ for name, value in zip(
+ dimension_names,
+ values,
+ strict=False,
+ )
+ if value != 0
+ ]
+ ax.text(
+ float(col),
+ float(row),
+ "\n".join(labels),
+ fontsize=fontsize,
+ color=color,
+ va="bottom",
+ ha="left",
+ )
+
+ ax.set_xlim(0, heatmap.shape[2])
+ ax.set_ylim(0, heatmap.shape[1])
+ return ax
+
+
+def create_colormap(
+ color: str | tuple[float, float, float, float],
+) -> Colormap:
+ (r, g, b, a) = to_rgba(color)
+ return LinearSegmentedColormap.from_list(
+ "cmap", colors=[(0, 0, 0, 0), (r, g, b, a)]
+ )
diff --git a/batdetect2/plot.py b/src/batdetect2/plotting/legacy/plot.py
similarity index 74%
rename from batdetect2/plot.py
rename to src/batdetect2/plotting/legacy/plot.py
index bf05cc81..6051abec 100644
--- a/batdetect2/plot.py
+++ b/src/batdetect2/plotting/legacy/plot.py
@@ -1,11 +1,11 @@
"""Plot functions to visualize detections and spectrograms."""
-from typing import List, Optional, Tuple, Union, cast
+from typing import cast
+import matplotlib.ticker as tick
import numpy as np
import torch
from matplotlib import axes, patches
-import matplotlib.ticker as tick
from matplotlib import pyplot as plt
from batdetect2.detector.parameters import DEFAULT_PROCESSING_CONFIGURATIONS
@@ -24,10 +24,10 @@
def spectrogram(
- spec: Union[torch.Tensor, np.ndarray],
- config: Optional[ProcessingConfiguration] = None,
- ax: Optional[axes.Axes] = None,
- figsize: Optional[Tuple[int, int]] = None,
+ spec: torch.Tensor | np.ndarray,
+ config: ProcessingConfiguration | None = None,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
cmap: str = "plasma",
start_time: float = 0,
) -> axes.Axes:
@@ -35,18 +35,18 @@ def spectrogram(
Parameters
----------
- spec (Union[torch.Tensor, np.ndarray]): Spectrogram to plot.
- config (Optional[ProcessingConfiguration], optional): Configuration
+ spec: Spectrogram to plot.
+ config: Configuration
used to compute the spectrogram. Defaults to None. If None,
the default configuration will be used.
- ax (Optional[axes.Axes], optional): Matplotlib axes object.
+ ax: Matplotlib axes object.
Defaults to None. if provided, the spectrogram will be plotted
on this axes.
- figsize (Optional[Tuple[int, int]], optional): Figure size.
+ figsize: Figure size.
Defaults to None. If `ax` is None, this will be used to create
a new figure of the given size.
- cmap (str, optional): Colormap to use. Defaults to "plasma".
- start_time (float, optional): Start time of the spectrogram.
+ cmap: Colormap to use. Defaults to "plasma".
+ start_time: Start time of the spectrogram.
Defaults to 0. This is useful if plotting a spectrogram
of a segment of a longer audio file.
@@ -103,11 +103,11 @@ def y_fmt(x, _):
def spectrogram_with_detections(
- spec: Union[torch.Tensor, np.ndarray],
- dets: List[Annotation],
- config: Optional[ProcessingConfiguration] = None,
- ax: Optional[axes.Axes] = None,
- figsize: Optional[Tuple[int, int]] = None,
+ spec: torch.Tensor | np.ndarray,
+ dets: list[Annotation],
+ config: ProcessingConfiguration | None = None,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
cmap: str = "plasma",
with_names: bool = True,
start_time: float = 0,
@@ -117,21 +117,21 @@ def spectrogram_with_detections(
Parameters
----------
- spec (Union[torch.Tensor, np.ndarray]): Spectrogram to plot.
- detections (List[Annotation]): List of detections.
- config (Optional[ProcessingConfiguration], optional): Configuration
+ spec: Spectrogram to plot.
+ detections: List of detections.
+ config: Configuration
used to compute the spectrogram. Defaults to None. If None,
the default configuration will be used.
- ax (Optional[axes.Axes], optional): Matplotlib axes object.
+ ax: Matplotlib axes object.
Defaults to None. if provided, the spectrogram will be plotted
on this axes.
- figsize (Optional[Tuple[int, int]], optional): Figure size.
+ figsize: Figure size.
Defaults to None. If `ax` is None, this will be used to create
a new figure of the given size.
- cmap (str, optional): Colormap to use. Defaults to "plasma".
- with_names (bool, optional): Whether to plot the name of the
+ cmap: Colormap to use. Defaults to "plasma".
+ with_names: Whether to plot the name of the
predicted class next to the detection. Defaults to True.
- start_time (float, optional): Start time of the spectrogram.
+ start_time: Start time of the spectrogram.
Defaults to 0. This is useful if plotting a spectrogram
of a segment of a longer audio file.
**kwargs: Additional keyword arguments to pass to the
@@ -167,9 +167,9 @@ def spectrogram_with_detections(
def detections(
- dets: List[Annotation],
- ax: Optional[axes.Axes] = None,
- figsize: Optional[Tuple[int, int]] = None,
+ dets: list[Annotation],
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
with_names: bool = True,
**kwargs,
) -> axes.Axes:
@@ -177,14 +177,14 @@ def detections(
Parameters
----------
- dets (List[Annotation]): List of detections.
- ax (Optional[axes.Axes], optional): Matplotlib axes object.
+ dets: List of detections.
+ ax: Matplotlib axes object.
Defaults to None. if provided, the spectrogram will be plotted
on this axes.
- figsize (Optional[Tuple[int, int]], optional): Figure size.
+ figsize: Figure size.
Defaults to None. If `ax` is None, this will be used to create
a new figure of the given size.
- with_names (bool, optional): Whether to plot the name of the
+ with_names: Whether to plot the name of the
predicted class next to the detection. Defaults to True.
**kwargs: Additional keyword arguments to pass to the
`plot.detection` function.
@@ -213,8 +213,8 @@ def detections(
def detection(
det: Annotation,
- ax: Optional[axes.Axes] = None,
- figsize: Optional[Tuple[int, int]] = None,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
linewidth: float = 1,
edgecolor: str = "w",
facecolor: str = "none",
@@ -224,19 +224,19 @@ def detection(
Parameters
----------
- det (Annotation): Detection to plot.
- ax (Optional[axes.Axes], optional): Matplotlib axes object. Defaults
+ det: Detection to plot.
+ ax: Matplotlib axes object. Defaults
to None. If provided, the spectrogram will be plotted on this axes.
- figsize (Optional[Tuple[int, int]], optional): Figure size. Defaults
+ figsize: Figure size. Defaults
to None. If `ax` is None, this will be used to create a new figure
of the given size.
- linewidth (float, optional): Line width of the detection.
+ linewidth: Line width of the detection.
Defaults to 1.
- edgecolor (str, optional): Edge color of the detection.
+ edgecolor: Edge color of the detection.
Defaults to "w", i.e. white.
- facecolor (str, optional): Face color of the detection.
+ facecolor: Face color of the detection.
Defaults to "none", i.e. transparent.
- with_name (bool, optional): Whether to plot the name of the
+ with_name: Whether to plot the name of the
predicted class next to the detection. Defaults to True.
Returns
@@ -277,22 +277,22 @@ def detection(
def _compute_spec_extent(
- shape: Tuple[int, int],
+ shape: tuple[int, int],
params: SpectrogramParameters,
-) -> Tuple[float, float, float, float]:
+) -> tuple[float, float, float, float]:
"""Compute the extent of a spectrogram.
Parameters
----------
- shape (Tuple[int, int]): Shape of the spectrogram.
+ shape: Shape of the spectrogram.
The first dimension is the frequency axis and the second
dimension is the time axis.
- params (SpectrogramParameters): Spectrogram parameters.
+ params: Spectrogram parameters.
Should be the same as the ones used to compute the spectrogram.
Returns
-------
- Tuple[float, float, float, float]: Extent of the spectrogram.
+ tuple[float, float, float, float]: Extent of the spectrogram.
The first two values are the minimum and maximum time values,
the last two values are the minimum and maximum frequency values.
"""
@@ -306,6 +306,9 @@ def _compute_spec_extent(
# If the spectrogram is not resized, the duration is correct
# but if it is resized, the duration needs to be adjusted
+ # NOTE: For now we can only detect if the spectrogram is resized
+ # by checking if the height is equal to the specified height,
+ # but this could fail.
resize_factor = params["resize_factor"]
spec_height = params["spec_height"]
if spec_height * resize_factor == shape[0]:
diff --git a/src/batdetect2/plotting/matches.py b/src/batdetect2/plotting/matches.py
new file mode 100644
index 00000000..9806f5f0
--- /dev/null
+++ b/src/batdetect2/plotting/matches.py
@@ -0,0 +1,341 @@
+from typing import Protocol
+
+from matplotlib.axes import Axes
+from soundevent import data, plot
+from soundevent.geometry import compute_bounds
+
+from batdetect2.audio.types import AudioLoader
+from batdetect2.plotting.clips import plot_clip
+from batdetect2.postprocess.types import Detection
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "plot_false_positive_match",
+ "plot_true_positive_match",
+ "plot_false_negative_match",
+ "plot_cross_trigger_match",
+]
+
+
+class MatchProtocol(Protocol):
+ clip: data.Clip
+ gt: data.SoundEventAnnotation | None
+ pred: Detection | None
+ score: float
+ true_class: str | None
+
+
+DEFAULT_DURATION = 0.05
+DEFAULT_FALSE_POSITIVE_COLOR = "orange"
+DEFAULT_FALSE_NEGATIVE_COLOR = "red"
+DEFAULT_TRUE_POSITIVE_COLOR = "green"
+DEFAULT_CROSS_TRIGGER_COLOR = "orange"
+DEFAULT_ANNOTATION_LINE_STYLE = "-"
+DEFAULT_PREDICTION_LINE_STYLE = "--"
+
+
+def plot_false_positive_match(
+ match: MatchProtocol,
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ figsize: tuple[int, int] | None = None,
+ ax: Axes | None = None,
+ audio_dir: data.PathLike | None = None,
+ duration: float = DEFAULT_DURATION,
+ use_score: bool = True,
+ add_spectrogram: bool = True,
+ add_text: bool = True,
+ add_points: bool = False,
+ add_title: bool = True,
+ fill: bool = False,
+ spec_cmap: str = "gray",
+ color: str = DEFAULT_FALSE_POSITIVE_COLOR,
+ fontsize: float | str = "small",
+) -> Axes:
+ assert match.pred is not None
+
+ start_time, _, _, high_freq = compute_bounds(match.pred.geometry)
+
+ clip = data.Clip(
+ start_time=max(
+ start_time - duration / 2,
+ 0,
+ ),
+ end_time=min(
+ start_time + duration / 2,
+ match.clip.recording.duration,
+ ),
+ recording=match.clip.recording,
+ )
+
+ if add_spectrogram:
+ ax = plot_clip(
+ clip,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ figsize=figsize,
+ ax=ax,
+ audio_dir=audio_dir,
+ spec_cmap=spec_cmap,
+ )
+
+ ax = plot.plot_geometry(
+ match.pred.geometry,
+ ax=ax,
+ add_points=add_points,
+ facecolor="none" if not fill else None,
+ alpha=match.score if use_score else 1,
+ color=color,
+ )
+
+ if add_text:
+ ax.text(
+ start_time,
+ high_freq,
+ f"score={match.score:.2f}",
+ va="top",
+ ha="right",
+ color=color,
+ fontsize=fontsize,
+ )
+
+ if add_title:
+ ax.set_title("False Positive")
+
+ return ax
+
+
+def plot_false_negative_match(
+ match: MatchProtocol,
+ audio_loader: AudioLoader | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ figsize: tuple[int, int] | None = None,
+ ax: Axes | None = None,
+ audio_dir: data.PathLike | None = None,
+ duration: float = DEFAULT_DURATION,
+ add_spectrogram: bool = True,
+ add_points: bool = False,
+ add_title: bool = True,
+ fill: bool = False,
+ spec_cmap: str = "gray",
+ color: str = DEFAULT_FALSE_NEGATIVE_COLOR,
+) -> Axes:
+ assert match.gt is not None
+
+ geometry = match.gt.sound_event.geometry
+ assert geometry is not None
+
+ start_time = compute_bounds(geometry)[0]
+
+ clip = data.Clip(
+ start_time=max(
+ start_time - duration / 2,
+ 0,
+ ),
+ end_time=min(
+ start_time + duration / 2,
+ match.clip.recording.duration,
+ ),
+ recording=match.clip.recording,
+ )
+
+ if add_spectrogram:
+ ax = plot_clip(
+ clip,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ figsize=figsize,
+ ax=ax,
+ audio_dir=audio_dir,
+ spec_cmap=spec_cmap,
+ )
+
+ ax = plot.plot_geometry(
+ geometry,
+ ax=ax,
+ add_points=add_points,
+ facecolor="none" if not fill else None,
+ alpha=1,
+ color=color,
+ )
+
+ if add_title:
+ ax.set_title("False Negative")
+
+ return ax
+
+
+def plot_true_positive_match(
+ match: MatchProtocol,
+ preprocessor: PreprocessorProtocol | None = None,
+ audio_loader: AudioLoader | None = None,
+ figsize: tuple[int, int] | None = None,
+ ax: Axes | None = None,
+ audio_dir: data.PathLike | None = None,
+ duration: float = DEFAULT_DURATION,
+ use_score: bool = True,
+ add_spectrogram: bool = True,
+ add_points: bool = False,
+ add_text: bool = True,
+ fill: bool = False,
+ spec_cmap: str = "gray",
+ color: str = DEFAULT_TRUE_POSITIVE_COLOR,
+ fontsize: float | str = "small",
+ annotation_linestyle: str = DEFAULT_ANNOTATION_LINE_STYLE,
+ prediction_linestyle: str = DEFAULT_PREDICTION_LINE_STYLE,
+ add_title: bool = True,
+) -> Axes:
+ assert match.gt is not None
+ assert match.pred is not None
+
+ geometry = match.gt.sound_event.geometry
+ assert geometry is not None
+
+ start_time, _, _, high_freq = compute_bounds(geometry)
+
+ clip = data.Clip(
+ start_time=max(
+ start_time - duration / 2,
+ 0,
+ ),
+ end_time=min(
+ start_time + duration / 2,
+ match.clip.recording.duration,
+ ),
+ recording=match.clip.recording,
+ )
+
+ if add_spectrogram:
+ ax = plot_clip(
+ clip,
+ ax=ax,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ figsize=figsize,
+ audio_dir=audio_dir,
+ spec_cmap=spec_cmap,
+ )
+
+ ax = plot.plot_geometry(
+ geometry,
+ ax=ax,
+ add_points=add_points,
+ facecolor="none" if not fill else None,
+ alpha=1,
+ color=color,
+ linestyle=annotation_linestyle,
+ )
+
+ plot.plot_geometry(
+ match.pred.geometry,
+ ax=ax,
+ add_points=add_points,
+ facecolor="none" if not fill else None,
+ alpha=match.score if use_score else 1,
+ color=color,
+ linestyle=prediction_linestyle,
+ )
+
+ if add_text:
+ ax.text(
+ start_time,
+ high_freq,
+ f"score={match.score:.2f}",
+ va="top",
+ ha="right",
+ color=color,
+ fontsize=fontsize,
+ )
+
+ if add_title:
+ ax.set_title("True Positive")
+
+ return ax
+
+
+def plot_cross_trigger_match(
+ match: MatchProtocol,
+ preprocessor: PreprocessorProtocol | None = None,
+ audio_loader: AudioLoader | None = None,
+ figsize: tuple[int, int] | None = None,
+ ax: Axes | None = None,
+ audio_dir: data.PathLike | None = None,
+ duration: float = DEFAULT_DURATION,
+ use_score: bool = True,
+ add_spectrogram: bool = True,
+ add_points: bool = False,
+ add_text: bool = True,
+ add_title: bool = True,
+ fill: bool = False,
+ spec_cmap: str = "gray",
+ color: str = DEFAULT_CROSS_TRIGGER_COLOR,
+ fontsize: float | str = "small",
+ annotation_linestyle: str = DEFAULT_ANNOTATION_LINE_STYLE,
+ prediction_linestyle: str = DEFAULT_PREDICTION_LINE_STYLE,
+) -> Axes:
+ assert match.gt is not None
+ assert match.pred is not None
+
+ geometry = match.gt.sound_event.geometry
+ assert geometry is not None
+
+ start_time, _, _, high_freq = compute_bounds(geometry)
+
+ clip = data.Clip(
+ start_time=max(
+ start_time - duration / 2,
+ 0,
+ ),
+ end_time=min(
+ start_time + duration / 2,
+ match.clip.recording.duration,
+ ),
+ recording=match.clip.recording,
+ )
+
+ if add_spectrogram:
+ ax = plot_clip(
+ clip,
+ audio_loader=audio_loader,
+ preprocessor=preprocessor,
+ figsize=figsize,
+ ax=ax,
+ audio_dir=audio_dir,
+ spec_cmap=spec_cmap,
+ )
+
+ ax = plot.plot_geometry(
+ geometry,
+ ax=ax,
+ add_points=add_points,
+ facecolor="none" if not fill else None,
+ alpha=1,
+ color=color,
+ linestyle=annotation_linestyle,
+ )
+
+ ax = plot.plot_geometry(
+ match.pred.geometry,
+ ax=ax,
+ add_points=add_points,
+ facecolor="none" if not fill else None,
+ alpha=match.score if use_score else 1,
+ color=color,
+ linestyle=prediction_linestyle,
+ )
+
+ if add_text:
+ ax.text(
+ start_time,
+ high_freq,
+ f"score={match.score:.2f}\nclass={match.true_class}",
+ va="top",
+ ha="right",
+ color=color,
+ fontsize=fontsize,
+ )
+
+ if add_title:
+ ax.set_title("Cross Trigger")
+
+ return ax
diff --git a/src/batdetect2/plotting/metrics.py b/src/batdetect2/plotting/metrics.py
new file mode 100644
index 00000000..b267a002
--- /dev/null
+++ b/src/batdetect2/plotting/metrics.py
@@ -0,0 +1,302 @@
+import numpy as np
+import seaborn as sns
+from cycler import cycler
+from matplotlib import axes
+
+from batdetect2.evaluate.metrics.common import _average_precision
+from batdetect2.plotting.common import create_ax
+
+
+def set_default_styler(ax: axes.Axes) -> axes.Axes:
+ color_cycler = cycler(color=sns.color_palette("muted"))
+ style_cycler = cycler(linestyle=["-", "--", ":"]) * cycler(
+ marker=["o", "s", "^"]
+ )
+ custom_cycler = color_cycler * len(style_cycler) + style_cycler * len(
+ color_cycler
+ )
+
+ ax.set_prop_cycle(custom_cycler)
+ return ax
+
+
+def set_default_style(ax: axes.Axes) -> axes.Axes:
+ ax = set_default_styler(ax)
+ ax.spines.right.set_visible(False)
+ ax.spines.top.set_visible(False)
+ return ax
+
+
+def plot_pr_curve(
+ precision: np.ndarray,
+ recall: np.ndarray,
+ thresholds: np.ndarray,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ color: str | tuple[float, float, float] | None = None,
+ add_labels: bool = True,
+ add_legend: bool = False,
+ marker: str | tuple[int, int, float] | None = "o",
+ markeredgecolor: str | tuple[float, float, float] | None = None,
+ markersize: float | None = None,
+ linestyle: str | tuple[int, ...] | None = None,
+ linewidth: float | None = None,
+ label: str = "PR Curve",
+) -> axes.Axes:
+ ax = create_ax(ax=ax, figsize=figsize)
+
+ ax = set_default_style(ax)
+
+ ax.plot(
+ recall,
+ precision,
+ color=color,
+ label=label,
+ marker=marker,
+ markeredgecolor=markeredgecolor,
+ markevery=_get_marker_positions(thresholds),
+ markersize=markersize,
+ linestyle=linestyle,
+ linewidth=linewidth,
+ )
+
+ ax.set_xlim(0, 1.05)
+ ax.set_ylim(0, 1.05)
+
+ if add_legend:
+ ax.legend()
+
+ if add_labels:
+ ax.set_xlabel("Recall")
+ ax.set_ylabel("Precision")
+
+ return ax
+
+
+def plot_pr_curves(
+ data: dict[str, tuple[np.ndarray, np.ndarray, np.ndarray]],
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ add_legend: bool = True,
+ add_labels: bool = True,
+ include_ap: bool = False,
+) -> axes.Axes:
+ ax = create_ax(ax=ax, figsize=figsize)
+ ax = set_default_style(ax)
+
+ for name, (precision, recall, thresholds) in data.items():
+ label = name
+
+ if include_ap:
+ label += f" (AP={_average_precision(recall, precision):.2f})"
+
+ ax.plot(
+ recall,
+ precision,
+ label=label,
+ markevery=_get_marker_positions(thresholds),
+ )
+
+ ax.set_xlim(0, 1.05)
+ ax.set_ylim(0, 1.05)
+
+ if add_labels:
+ ax.set_xlabel("Recall")
+ ax.set_ylabel("Precision")
+
+ if add_legend:
+ ax.legend(
+ bbox_to_anchor=(1.05, 1),
+ loc="upper left",
+ borderaxespad=0.0,
+ )
+ return ax
+
+
+def plot_threshold_precision_curve(
+ threshold: np.ndarray,
+ precision: np.ndarray,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ add_labels: bool = True,
+):
+ ax = create_ax(ax=ax, figsize=figsize)
+
+ ax = set_default_style(ax)
+
+ ax.plot(threshold, precision, markevery=_get_marker_positions(threshold))
+
+ ax.set_xlim(0, 1.05)
+ ax.set_ylim(0, 1.05)
+
+ if add_labels:
+ ax.set_xlabel("Threshold")
+ ax.set_ylabel("Precision")
+
+ return ax
+
+
+def plot_threshold_precision_curves(
+ data: dict[str, tuple[np.ndarray, np.ndarray, np.ndarray]],
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ add_legend: bool = True,
+ add_labels: bool = True,
+):
+ ax = create_ax(ax=ax, figsize=figsize)
+ ax = set_default_style(ax)
+
+ for name, (precision, _, thresholds) in data.items():
+ ax.plot(
+ thresholds,
+ precision,
+ label=name,
+ markevery=_get_marker_positions(thresholds),
+ )
+
+ if add_legend:
+ ax.legend(
+ bbox_to_anchor=(1.05, 1),
+ loc="upper left",
+ borderaxespad=0.0,
+ )
+
+ ax.set_xlim(0, 1.05)
+ ax.set_ylim(0, 1.05)
+
+ if add_labels:
+ ax.set_xlabel("Threshold")
+ ax.set_ylabel("Precision")
+
+ return ax
+
+
+def plot_threshold_recall_curve(
+ threshold: np.ndarray,
+ recall: np.ndarray,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ add_labels: bool = True,
+):
+ ax = create_ax(ax=ax, figsize=figsize)
+
+ ax = set_default_style(ax)
+
+ ax.plot(threshold, recall, markevery=_get_marker_positions(threshold))
+
+ ax.set_xlim(0, 1.05)
+ ax.set_ylim(0, 1.05)
+
+ if add_labels:
+ ax.set_xlabel("Threshold")
+ ax.set_ylabel("Recall")
+
+ return ax
+
+
+def plot_threshold_recall_curves(
+ data: dict[str, tuple[np.ndarray, np.ndarray, np.ndarray]],
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ add_legend: bool = True,
+ add_labels: bool = True,
+):
+ ax = create_ax(ax=ax, figsize=figsize)
+ ax = set_default_style(ax)
+
+ for name, (_, recall, thresholds) in data.items():
+ ax.plot(
+ thresholds,
+ recall,
+ label=name,
+ markevery=_get_marker_positions(thresholds),
+ )
+
+ if add_legend:
+ ax.legend(
+ bbox_to_anchor=(1.05, 1),
+ loc="upper left",
+ borderaxespad=0.0,
+ )
+
+ ax.set_xlim(0, 1.05)
+ ax.set_ylim(0, 1.05)
+
+ if add_labels:
+ ax.set_xlabel("Threshold")
+ ax.set_ylabel("Recall")
+
+ return ax
+
+
+def plot_roc_curve(
+ fpr: np.ndarray,
+ tpr: np.ndarray,
+ thresholds: np.ndarray,
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ add_labels: bool = True,
+) -> axes.Axes:
+ ax = create_ax(ax=ax, figsize=figsize)
+
+ ax = set_default_style(ax)
+
+ ax.plot(
+ fpr,
+ tpr,
+ markevery=_get_marker_positions(thresholds),
+ )
+
+ ax.set_xlim(0, 1.05)
+ ax.set_ylim(0, 1.05)
+
+ if add_labels:
+ ax.set_xlabel("False Positive Rate")
+ ax.set_ylabel("True Positive Rate")
+
+ return ax
+
+
+def plot_roc_curves(
+ data: dict[str, tuple[np.ndarray, np.ndarray, np.ndarray]],
+ ax: axes.Axes | None = None,
+ figsize: tuple[int, int] | None = None,
+ add_legend: bool = True,
+ add_labels: bool = True,
+) -> axes.Axes:
+ ax = create_ax(ax=ax, figsize=figsize)
+ ax = set_default_style(ax)
+
+ for name, (fpr, tpr, thresholds) in data.items():
+ ax.plot(
+ fpr,
+ tpr,
+ label=name,
+ markevery=_get_marker_positions(thresholds),
+ )
+
+ if add_legend:
+ ax.legend(
+ bbox_to_anchor=(1.05, 1),
+ loc="upper left",
+ borderaxespad=0.0,
+ )
+
+ ax.set_xlim(0, 1.05)
+ ax.set_ylim(0, 1.05)
+
+ if add_labels:
+ ax.set_xlabel("False Positive Rate")
+ ax.set_ylabel("True Positive Rate")
+
+ return ax
+
+
+def _get_marker_positions(
+ thresholds: np.ndarray,
+ n_points: int = 11,
+) -> np.ndarray:
+ size = len(thresholds)
+ cut_points = np.linspace(0, 1, n_points)
+ indices = np.searchsorted(thresholds[::-1], cut_points)
+ return np.clip(size - indices, 0, size - 1)
diff --git a/src/batdetect2/postprocess/__init__.py b/src/batdetect2/postprocess/__init__.py
new file mode 100644
index 00000000..35cc61dc
--- /dev/null
+++ b/src/batdetect2/postprocess/__init__.py
@@ -0,0 +1,37 @@
+"""Main entry point for the BatDetect2 Postprocessing pipeline."""
+
+from batdetect2.postprocess.config import (
+ DEFAULT_CLASSIFICATION_THRESHOLD,
+ DEFAULT_DETECTION_THRESHOLD,
+ PostprocessConfig,
+)
+from batdetect2.postprocess.nms import non_max_suppression
+from batdetect2.postprocess.postprocessor import (
+ Postprocessor,
+ build_postprocessor,
+)
+from batdetect2.postprocess.types import (
+ ClipDetections,
+ ClipDetectionsArray,
+ ClipDetectionsTensor,
+ ClipPrediction,
+ Detection,
+ GeometryDecoder,
+ PostprocessorProtocol,
+)
+
+__all__ = [
+ "ClipDetections",
+ "ClipDetectionsArray",
+ "ClipDetectionsTensor",
+ "ClipPrediction",
+ "Detection",
+ "GeometryDecoder",
+ "PostprocessConfig",
+ "Postprocessor",
+ "PostprocessorProtocol",
+ "build_postprocessor",
+ "non_max_suppression",
+ "DEFAULT_CLASSIFICATION_THRESHOLD",
+ "DEFAULT_DETECTION_THRESHOLD",
+]
diff --git a/src/batdetect2/postprocess/config.py b/src/batdetect2/postprocess/config.py
new file mode 100644
index 00000000..0fa1db22
--- /dev/null
+++ b/src/batdetect2/postprocess/config.py
@@ -0,0 +1,51 @@
+from pydantic import Field
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.postprocess.nms import NMS_KERNEL_SIZE
+
+__all__ = [
+ "PostprocessConfig",
+]
+
+DEFAULT_DETECTION_THRESHOLD = 0.01
+DEFAULT_CLASSIFICATION_THRESHOLD = 0.1
+
+
+TOP_K_PER_SEC = 100
+
+
+class PostprocessConfig(BaseConfig):
+ """Configuration settings for the postprocessing pipeline.
+
+ Defines tunable parameters that control how raw model outputs are
+ converted into final detections.
+
+ Attributes
+ ----------
+ nms_kernel_size : int, default=NMS_KERNEL_SIZE
+ Size (pixels) of the kernel/neighborhood for Non-Maximum Suppression.
+ Used to suppress weaker detections near stronger peaks. Must be
+ positive.
+ detection_threshold : float, default=DEFAULT_DETECTION_THRESHOLD
+ Minimum confidence score from the detection heatmap required to
+ consider a point as a potential detection. Must be >= 0.
+ classification_threshold : float, default=DEFAULT_CLASSIFICATION_THRESHOLD
+ Minimum confidence score for a specific class prediction to be included
+ in the decoded tags for a detection. Must be >= 0.
+ top_k_per_sec : int, default=TOP_K_PER_SEC
+ Desired maximum number of detections per second of audio. Used by
+ `get_max_detections` to calculate an absolute limit based on clip
+ duration before applying `extract_detections_from_array`. Must be
+ positive.
+ """
+
+ nms_kernel_size: int = Field(default=NMS_KERNEL_SIZE, gt=0)
+ detection_threshold: float = Field(
+ default=DEFAULT_DETECTION_THRESHOLD,
+ ge=0,
+ )
+ classification_threshold: float = Field(
+ default=DEFAULT_CLASSIFICATION_THRESHOLD,
+ ge=0,
+ )
+ top_k_per_sec: int = Field(default=TOP_K_PER_SEC, gt=0)
diff --git a/src/batdetect2/postprocess/extraction.py b/src/batdetect2/postprocess/extraction.py
new file mode 100644
index 00000000..b963e470
--- /dev/null
+++ b/src/batdetect2/postprocess/extraction.py
@@ -0,0 +1,89 @@
+"""Extracts associated data for detected points from model output arrays.
+
+This module implements a Step 4 in the BatDetect2 postprocessing pipeline.
+After candidate detection points (time, frequency, score) have been identified,
+this module extracts the corresponding values from other raw model output
+arrays, such as:
+
+- Predicted bounding box sizes (width, height).
+- Class probability scores for each defined target class.
+- Intermediate feature vectors.
+
+It uses coordinate-based indexing provided by `xarray` to ensure that the
+correct values are retrieved from the original heatmaps/feature maps at the
+precise time-frequency location of each detection. The final output aggregates
+all extracted information into a structured `xarray.Dataset`.
+"""
+
+from typing import List
+
+import torch
+
+from batdetect2.postprocess.types import ClipDetectionsTensor
+
+__all__ = [
+ "extract_detection_peaks",
+]
+
+
+def extract_detection_peaks(
+ detection_heatmap: torch.Tensor,
+ size_heatmap: torch.Tensor,
+ feature_heatmap: torch.Tensor,
+ classification_heatmap: torch.Tensor,
+ max_detections: int = 200,
+ threshold: float | None = None,
+) -> List[ClipDetectionsTensor]:
+ height = detection_heatmap.shape[-2]
+ width = detection_heatmap.shape[-1]
+
+ freqs, times = torch.meshgrid(
+ torch.arange(height, dtype=torch.int32),
+ torch.arange(width, dtype=torch.int32),
+ indexing="ij",
+ )
+
+ freqs = freqs.flatten().to(detection_heatmap.device)
+ times = times.flatten().to(detection_heatmap.device)
+
+ output_size_preds = size_heatmap.detach()
+ output_features = feature_heatmap.detach()
+ output_class_probs = classification_heatmap.detach()
+
+ predictions = []
+ for idx, item in enumerate(detection_heatmap):
+ item = item.squeeze().flatten() # Remove channel dim
+ indices = torch.argsort(item, descending=True)[:max_detections]
+
+ detection_scores = item.take(indices)
+ detection_freqs = freqs.take(indices)
+ detection_times = times.take(indices)
+
+ if threshold is not None:
+ mask = detection_scores >= threshold
+
+ detection_scores = detection_scores[mask]
+ detection_times = detection_times[mask]
+ detection_freqs = detection_freqs[mask]
+
+ sizes = output_size_preds[idx, :, detection_freqs, detection_times].T
+ features = output_features[idx, :, detection_freqs, detection_times].T
+ class_scores = output_class_probs[
+ idx,
+ :,
+ detection_freqs,
+ detection_times,
+ ].T
+
+ predictions.append(
+ ClipDetectionsTensor(
+ scores=detection_scores,
+ sizes=sizes,
+ features=features,
+ class_scores=class_scores,
+ times=detection_times.to(torch.float32) / width,
+ frequencies=(detection_freqs.to(torch.float32) / height),
+ )
+ )
+
+ return predictions
diff --git a/src/batdetect2/postprocess/nms.py b/src/batdetect2/postprocess/nms.py
new file mode 100644
index 00000000..6a41430a
--- /dev/null
+++ b/src/batdetect2/postprocess/nms.py
@@ -0,0 +1,94 @@
+"""Performs Non-Maximum Suppression (NMS) on detection heatmaps.
+
+This module provides functionality to apply Non-Maximum Suppression, a common
+technique used after model inference, particularly in object detection and peak
+detection tasks.
+
+In the context of BatDetect2 postprocessing, NMS is applied
+to the raw detection heatmap output by the neural network. Its purpose is to
+isolate distinct detection peaks by suppressing (setting to zero) nearby heatmap
+activations that have lower scores than a local maximum. This helps prevent
+multiple, overlapping detections originating from the same sound event.
+"""
+
+import torch
+
+NMS_KERNEL_SIZE = 9
+"""Default kernel size (pixels) for Non-Maximum Suppression.
+
+Specifies the side length of the square neighborhood used by default in
+`non_max_suppression` to find local maxima. A 9x9 neighborhood is often
+a reasonable starting point for typical spectrogram resolutions used in
+BatDetect2.
+"""
+
+
+def non_max_suppression(
+ tensor: torch.Tensor,
+ kernel_size: int | tuple[int, int] = NMS_KERNEL_SIZE,
+) -> torch.Tensor:
+ """Apply Non-Maximum Suppression (NMS) to a tensor, typically a heatmap.
+
+ This function identifies local maxima within a defined neighborhood for
+ each point in the input tensor. Values that are *not* the maximum within
+ their neighborhood are suppressed (set to zero). This is commonly used on
+ detection probability heatmaps to isolate distinct peaks corresponding to
+ individual detections and remove redundant lower scores nearby.
+
+ The implementation uses efficient 2D max pooling to find the maximum value
+ in the neighborhood of each point.
+
+ Parameters
+ ----------
+ tensor
+ Input tensor, typically representing a detection heatmap. Must be a
+ 3D (C, H, W) or 4D (N, C, H, W) tensor as required by the underlying
+ `torch.nn.functional.max_pool2d` operation.
+ kernel_size
+ Size of the sliding window neighborhood used to find local maxima.
+ If an integer `k` is provided, a square kernel of size `(k, k)` is used.
+ If a tuple `(h, w)` is provided, a rectangular kernel of height `h`
+ and width `w` is used. The kernel size should typically be odd to
+ have a well-defined center.
+
+ Returns
+ -------
+ torch.Tensor
+ A tensor of the same shape as the input, where only local maxima within
+ their respective neighborhoods (defined by `kernel_size`) retain their
+ original values. All other values are set to zero.
+
+ Raises
+ ------
+ TypeError
+ If `kernel_size` is not an int or a tuple of two ints.
+ RuntimeError
+ If the input `tensor` does not have 3 or 4 dimensions (as required
+ by `max_pool2d`).
+
+ Notes
+ -----
+ - The function assumes higher values in the tensor indicate stronger peaks.
+ - Choosing an appropriate `kernel_size` is important. It should be large
+ enough to cover the typical "footprint" of a single detection peak plus
+ some surrounding context, effectively preventing multiple detections for
+ the same event. A size that is too large might suppress nearby distinct
+ events.
+ """
+ if isinstance(kernel_size, int):
+ kernel_size_h = kernel_size
+ kernel_size_w = kernel_size
+ else:
+ kernel_size_h, kernel_size_w = kernel_size
+
+ pad_h = (kernel_size_h - 1) // 2
+ pad_w = (kernel_size_w - 1) // 2
+
+ hmax = torch.nn.functional.max_pool2d(
+ tensor,
+ (kernel_size_h, kernel_size_w),
+ stride=1,
+ padding=(pad_h, pad_w),
+ )
+ keep = (hmax == tensor).float()
+ return tensor * keep
diff --git a/src/batdetect2/postprocess/postprocessor.py b/src/batdetect2/postprocess/postprocessor.py
new file mode 100644
index 00000000..b6864ea2
--- /dev/null
+++ b/src/batdetect2/postprocess/postprocessor.py
@@ -0,0 +1,119 @@
+import torch
+from loguru import logger
+
+from batdetect2.models.types import ModelOutput
+from batdetect2.postprocess.config import (
+ PostprocessConfig,
+)
+from batdetect2.postprocess.extraction import extract_detection_peaks
+from batdetect2.postprocess.nms import NMS_KERNEL_SIZE, non_max_suppression
+from batdetect2.postprocess.types import (
+ ClipDetectionsTensor,
+ PostprocessorProtocol,
+)
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "build_postprocessor",
+ "Postprocessor",
+]
+
+
+def build_postprocessor(
+ preprocessor: PreprocessorProtocol,
+ config: PostprocessConfig | None = None,
+) -> PostprocessorProtocol:
+ """Factory function to build the standard postprocessor."""
+ config = config or PostprocessConfig()
+ logger.opt(lazy=True).debug(
+ "Building postprocessor with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+ return Postprocessor(
+ samplerate=preprocessor.output_samplerate,
+ min_freq=preprocessor.min_freq,
+ max_freq=preprocessor.max_freq,
+ top_k_per_sec=config.top_k_per_sec,
+ detection_threshold=config.detection_threshold,
+ )
+
+
+class Postprocessor(torch.nn.Module, PostprocessorProtocol):
+ """Standard implementation of the postprocessing pipeline."""
+
+ def __init__(
+ self,
+ samplerate: float,
+ min_freq: float,
+ max_freq: float,
+ top_k_per_sec: int = 200,
+ detection_threshold: float = 0.01,
+ nms_kernel_size: int | tuple[int, int] = NMS_KERNEL_SIZE,
+ ):
+ """Initialize the Postprocessor."""
+ super().__init__()
+
+ self.output_samplerate = samplerate
+ self.min_freq = min_freq
+ self.max_freq = max_freq
+ self.top_k_per_sec = top_k_per_sec
+ self.detection_threshold = detection_threshold
+ self.nms_kernel_size = nms_kernel_size
+
+ def forward(
+ self,
+ output: ModelOutput,
+ detection_threshold: float | None = None,
+ ) -> list[ClipDetectionsTensor]:
+ threshold = (
+ self.detection_threshold
+ if detection_threshold is None
+ else detection_threshold
+ )
+
+ detection_heatmap = non_max_suppression(
+ output.detection_probs.detach(),
+ kernel_size=self.nms_kernel_size,
+ )
+
+ width = output.detection_probs.shape[-1]
+ duration = width / self.output_samplerate
+ max_detections = int(self.top_k_per_sec * duration)
+ detections = extract_detection_peaks(
+ detection_heatmap,
+ size_heatmap=output.size_preds,
+ feature_heatmap=output.features,
+ classification_heatmap=output.class_probs,
+ max_detections=max_detections,
+ threshold=threshold,
+ )
+
+ return [
+ map_detection_to_clip(
+ detection,
+ start_time=0,
+ end_time=duration,
+ min_freq=self.min_freq,
+ max_freq=self.max_freq,
+ )
+ for detection in detections
+ ]
+
+
+def map_detection_to_clip(
+ detections: ClipDetectionsTensor,
+ start_time: float,
+ end_time: float,
+ min_freq: float,
+ max_freq: float,
+) -> ClipDetectionsTensor:
+ duration = end_time - start_time
+ bandwidth = max_freq - min_freq
+ return ClipDetectionsTensor(
+ scores=detections.scores,
+ sizes=detections.sizes,
+ features=detections.features,
+ class_scores=detections.class_scores,
+ times=(detections.times * duration + start_time),
+ frequencies=(detections.frequencies * bandwidth + min_freq),
+ )
diff --git a/src/batdetect2/postprocess/remapping.py b/src/batdetect2/postprocess/remapping.py
new file mode 100644
index 00000000..def0b049
--- /dev/null
+++ b/src/batdetect2/postprocess/remapping.py
@@ -0,0 +1,75 @@
+"""Remaps raw model output tensors to coordinate-aware xarray DataArrays.
+
+This module provides utility functions to convert the raw numerical outputs
+(typically PyTorch tensors) from the BatDetect2 DNN model into
+`xarray.DataArray` objects. This step adds coordinate information
+(time in seconds, frequency in Hz) back to the model's predictions, making them
+interpretable in the context of the original audio signal and facilitating
+subsequent processing steps.
+
+Functions are provided for common BatDetect2 output types: detection heatmaps,
+classification probability maps, size prediction maps, and potentially
+intermediate features.
+"""
+
+from typing import Dict, List
+
+import numpy as np
+import torch
+import xarray as xr
+from soundevent.arrays import Dimensions
+
+from batdetect2.preprocess import MAX_FREQ, MIN_FREQ
+
+__all__ = [
+ "to_xarray",
+]
+
+
+def to_xarray(
+ array: torch.Tensor | np.ndarray,
+ start_time: float,
+ end_time: float,
+ min_freq: float = MIN_FREQ,
+ max_freq: float = MAX_FREQ,
+ name: str = "xarray",
+ extra_dims: List[str] | None = None,
+ extra_coords: Dict[str, np.ndarray] | None = None,
+) -> xr.DataArray:
+ if isinstance(array, torch.Tensor):
+ array = array.detach().cpu().numpy()
+
+ extra_ndims = array.ndim - 2
+
+ if extra_ndims < 0:
+ raise ValueError(
+ "Input array must have at least 2 dimensions, "
+ f"got shape {array.shape}"
+ )
+
+ width = array.shape[-1]
+ height = array.shape[-2]
+
+ times = np.linspace(start_time, end_time, width, endpoint=False)
+ freqs = np.linspace(min_freq, max_freq, height, endpoint=False)
+
+ if extra_dims is None:
+ extra_dims = [f"dim_{i}" for i in range(extra_ndims)]
+
+ if extra_coords is None:
+ extra_coords = {}
+
+ return xr.DataArray(
+ data=array,
+ dims=[
+ *extra_dims,
+ Dimensions.frequency.value,
+ Dimensions.time.value,
+ ],
+ coords={
+ **extra_coords,
+ Dimensions.frequency.value: freqs,
+ Dimensions.time.value: times,
+ },
+ name=name,
+ )
diff --git a/src/batdetect2/postprocess/types.py b/src/batdetect2/postprocess/types.py
new file mode 100644
index 00000000..10d15113
--- /dev/null
+++ b/src/batdetect2/postprocess/types.py
@@ -0,0 +1,88 @@
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, NamedTuple, Protocol
+
+import numpy as np
+import torch
+from soundevent import data
+
+from batdetect2.targets.types import Position, Size
+
+if TYPE_CHECKING:
+ from batdetect2.models.types import ModelOutput
+
+__all__ = [
+ "ClipDetections",
+ "ClipDetectionsArray",
+ "ClipDetectionsTensor",
+ "ClipPrediction",
+ "Detection",
+ "GeometryDecoder",
+ "PostprocessorProtocol",
+]
+
+
+class GeometryDecoder(Protocol):
+ def __call__(
+ self,
+ position: Position,
+ size: Size,
+ class_name: str | None = None,
+ ) -> data.Geometry: ...
+
+
+@dataclass
+class Detection:
+ geometry: data.Geometry
+ detection_score: float
+ class_scores: np.ndarray
+ features: np.ndarray
+
+
+class ClipDetectionsArray(NamedTuple):
+ scores: np.ndarray
+ sizes: np.ndarray
+ class_scores: np.ndarray
+ times: np.ndarray
+ frequencies: np.ndarray
+ features: np.ndarray
+
+
+class ClipDetectionsTensor(NamedTuple):
+ scores: torch.Tensor
+ sizes: torch.Tensor
+ class_scores: torch.Tensor
+ times: torch.Tensor
+ frequencies: torch.Tensor
+ features: torch.Tensor
+
+ def numpy(self) -> ClipDetectionsArray:
+ return ClipDetectionsArray(
+ scores=self.scores.detach().cpu().numpy(),
+ sizes=self.sizes.detach().cpu().numpy(),
+ class_scores=self.class_scores.detach().cpu().numpy(),
+ times=self.times.detach().cpu().numpy(),
+ frequencies=self.frequencies.detach().cpu().numpy(),
+ features=self.features.detach().cpu().numpy(),
+ )
+
+
+@dataclass
+class ClipDetections:
+ clip: data.Clip
+ detections: list[Detection]
+
+
+@dataclass
+class ClipPrediction:
+ clip: data.Clip
+ detection_score: float
+ class_scores: np.ndarray
+
+
+class PostprocessorProtocol(Protocol):
+ def __call__(
+ self,
+ output: "ModelOutput",
+ *,
+ detection_threshold: float | None = None,
+ ) -> list[ClipDetectionsTensor]: ...
diff --git a/src/batdetect2/preprocess/__init__.py b/src/batdetect2/preprocess/__init__.py
new file mode 100644
index 00000000..dadf9c67
--- /dev/null
+++ b/src/batdetect2/preprocess/__init__.py
@@ -0,0 +1,17 @@
+"""Main entry point for the BatDetect2 preprocessing subsystem."""
+
+from batdetect2.audio import TARGET_SAMPLERATE_HZ
+from batdetect2.preprocess.config import PreprocessingConfig
+from batdetect2.preprocess.preprocessor import Preprocessor, build_preprocessor
+from batdetect2.preprocess.spectrogram import MAX_FREQ, MIN_FREQ
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "PreprocessorProtocol",
+ "MAX_FREQ",
+ "MIN_FREQ",
+ "PreprocessingConfig",
+ "Preprocessor",
+ "TARGET_SAMPLERATE_HZ",
+ "build_preprocessor",
+]
diff --git a/src/batdetect2/preprocess/audio.py b/src/batdetect2/preprocess/audio.py
new file mode 100644
index 00000000..9d5c7f05
--- /dev/null
+++ b/src/batdetect2/preprocess/audio.py
@@ -0,0 +1,240 @@
+"""Audio-level transforms applied to waveforms before spectrogram computation.
+
+This module defines ``torch.nn.Module`` transforms that operate on raw
+audio tensors and the Pydantic configuration classes that control them.
+Each transform is registered in the ``audio_transforms`` registry so that
+the pipeline can be assembled from a configuration object.
+
+The supported transforms are:
+
+* ``CenterAudio`` — subtract the DC offset (mean) from the waveform.
+* ``ScaleAudio`` — peak-normalise the waveform to the range ``[-1, 1]``.
+* ``FixDuration`` — truncate or zero-pad the waveform to a fixed length.
+"""
+
+from typing import Annotated, Literal
+
+import torch
+from pydantic import Field
+
+from batdetect2.audio import TARGET_SAMPLERATE_HZ
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.preprocess.common import center_tensor, peak_normalize
+
+__all__ = [
+ "AudioTransformImportConfig",
+ "CenterAudioConfig",
+ "ScaleAudioConfig",
+ "FixDurationConfig",
+ "build_audio_transform",
+]
+
+
+audio_transforms: Registry[torch.nn.Module, [int]] = Registry(
+ "audio_transform"
+)
+"""Registry mapping audio transform config classes to their builder methods."""
+
+
+@add_import_config(audio_transforms)
+class AudioTransformImportConfig(ImportConfig):
+ """Use any callable as an audio transform.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class CenterAudioConfig(BaseConfig):
+ """Configuration for the DC-offset removal transform.
+
+ Attributes
+ ----------
+ name : str
+ Fixed identifier; always ``"center_audio"``.
+ """
+
+ name: Literal["center_audio"] = "center_audio"
+
+
+class CenterAudio(torch.nn.Module):
+ """Remove the DC offset from an audio waveform.
+
+ Subtracts the global mean of the waveform from every sample,
+ centring the signal around zero. This is useful when an analogue
+ recording chain introduces a constant voltage bias.
+ """
+
+ def forward(self, wav: torch.Tensor) -> torch.Tensor:
+ """Subtract the mean from the waveform.
+
+ Parameters
+ ----------
+ wav : torch.Tensor
+ Input waveform tensor of shape ``(samples,)`` or
+ ``(channels, samples)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Zero-centred waveform with the same shape as the input.
+ """
+ return center_tensor(wav)
+
+ @audio_transforms.register(CenterAudioConfig)
+ @staticmethod
+ def from_config(config: CenterAudioConfig, samplerate: int):
+ return CenterAudio()
+
+
+class ScaleAudioConfig(BaseConfig):
+ """Configuration for the peak-normalisation transform.
+
+ Attributes
+ ----------
+ name : str
+ Fixed identifier; always ``"scale_audio"``.
+ """
+
+ name: Literal["scale_audio"] = "scale_audio"
+
+
+class ScaleAudio(torch.nn.Module):
+ """Peak-normalise an audio waveform to the range ``[-1, 1]``.
+
+ Divides the waveform by its largest absolute sample value. If the
+ waveform is identically zero it is returned unchanged.
+ """
+
+ def forward(self, wav: torch.Tensor) -> torch.Tensor:
+ """Peak-normalise the waveform.
+
+ Parameters
+ ----------
+ wav : torch.Tensor
+ Input waveform tensor of any shape.
+
+ Returns
+ -------
+ torch.Tensor
+ Normalised waveform with the same shape as the input and
+ values in the range ``[-1, 1]``.
+ """
+ return peak_normalize(wav)
+
+ @audio_transforms.register(ScaleAudioConfig)
+ @staticmethod
+ def from_config(config: ScaleAudioConfig, samplerate: int):
+ return ScaleAudio()
+
+
+class FixDurationConfig(BaseConfig):
+ """Configuration for the fixed-duration transform.
+
+ Attributes
+ ----------
+ name : str
+ Fixed identifier; always ``"fix_duration"``.
+ duration : float, default=0.5
+ Target duration in seconds. The waveform will be truncated or
+ zero-padded to match this length.
+ """
+
+ name: Literal["fix_duration"] = "fix_duration"
+ duration: float = 0.5
+
+
+class FixDuration(torch.nn.Module):
+ """Ensure a waveform has exactly a specified number of samples.
+
+ If the input is longer than the target length it is truncated from
+ the end. If it is shorter, it is zero-padded at the end.
+
+ Parameters
+ ----------
+ samplerate : int
+ Sample rate of the audio in Hz. Used with ``duration`` to
+ compute the target number of samples.
+ duration : float
+ Target duration in seconds.
+ """
+
+ def __init__(self, samplerate: int, duration: float):
+ super().__init__()
+ self.samplerate = samplerate
+ self.duration = duration
+ self.length = int(samplerate * duration)
+
+ def forward(self, wav: torch.Tensor) -> torch.Tensor:
+ """Truncate or pad the waveform to the target length.
+
+ Parameters
+ ----------
+ wav : torch.Tensor
+ Input waveform tensor of shape ``(samples,)`` or
+ ``(channels, samples)``. The last dimension is adjusted.
+
+ Returns
+ -------
+ torch.Tensor
+ Waveform with exactly ``self.length`` samples along the last
+ dimension.
+ """
+ length = wav.shape[-1]
+
+ if length == self.length:
+ return wav
+
+ if length > self.length:
+ return wav[: self.length]
+
+ return torch.nn.functional.pad(wav, (0, self.length - length))
+
+ @audio_transforms.register(FixDurationConfig)
+ @staticmethod
+ def from_config(config: FixDurationConfig, samplerate: int):
+ return FixDuration(samplerate=samplerate, duration=config.duration)
+
+
+AudioTransform = Annotated[
+ FixDurationConfig | ScaleAudioConfig | CenterAudioConfig,
+ Field(discriminator="name"),
+]
+"""Discriminated union of all audio transform configuration types.
+
+Use this type when a field should accept any of the supported audio
+transforms. Pydantic will select the correct config class based on the
+``name`` field.
+"""
+
+
+def build_audio_transform(
+ config: AudioTransform,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+) -> torch.nn.Module:
+ """Build an audio transform module from a configuration object.
+
+ Parameters
+ ----------
+ config : AudioTransform
+ A configuration object for one of the supported audio transforms
+ (``CenterAudioConfig``, ``ScaleAudioConfig``, or
+ ``FixDurationConfig``).
+ samplerate : int, default=256000
+ Sample rate of the audio in Hz. Passed to the transform builder;
+ some transforms (e.g. ``FixDuration``) use it to convert seconds
+ to samples.
+
+ Returns
+ -------
+ torch.nn.Module
+ The constructed audio transform module.
+ """
+ return audio_transforms.build(config, samplerate)
diff --git a/src/batdetect2/preprocess/common.py b/src/batdetect2/preprocess/common.py
new file mode 100644
index 00000000..be27b186
--- /dev/null
+++ b/src/batdetect2/preprocess/common.py
@@ -0,0 +1,60 @@
+"""Shared tensor primitives used across the preprocessing pipeline.
+
+This module provides small, stateless helper functions that operate on
+PyTorch tensors. They are used by both audio-level and spectrogram-level
+transforms, and are kept here to avoid duplication.
+"""
+
+import torch
+
+__all__ = [
+ "center_tensor",
+ "peak_normalize",
+]
+
+
+def center_tensor(tensor: torch.Tensor) -> torch.Tensor:
+ """Subtract the mean of a tensor from all of its values.
+
+ This centres the signal around zero, removing any constant DC offset.
+
+ Parameters
+ ----------
+ tensor : torch.Tensor
+ Input tensor of any shape.
+
+ Returns
+ -------
+ torch.Tensor
+ A new tensor of the same shape and dtype with the global mean
+ subtracted from every element.
+ """
+ return tensor - tensor.mean()
+
+
+def peak_normalize(tensor: torch.Tensor) -> torch.Tensor:
+ """Scale a tensor so that its largest absolute value equals one.
+
+ Divides the tensor by its peak absolute value. If the tensor is
+ identically zero, it is returned unchanged (no division by zero).
+
+ Parameters
+ ----------
+ tensor : torch.Tensor
+ Input tensor of any shape.
+
+ Returns
+ -------
+ torch.Tensor
+ A new tensor of the same shape and dtype with values in the range
+ ``[-1, 1]`` (or exactly ``[0, 0]`` for a zero tensor).
+ """
+ max_value = tensor.abs().max()
+
+ denominator = torch.where(
+ max_value == 0,
+ torch.tensor(1.0, device=tensor.device, dtype=tensor.dtype),
+ max_value,
+ )
+
+ return tensor / denominator
diff --git a/src/batdetect2/preprocess/config.py b/src/batdetect2/preprocess/config.py
new file mode 100644
index 00000000..5ebb0993
--- /dev/null
+++ b/src/batdetect2/preprocess/config.py
@@ -0,0 +1,75 @@
+"""Configuration for the full batdetect2 preprocessing pipeline.
+
+This module defines :class:`PreprocessingConfig`, which aggregates all
+configuration needed to convert a raw audio waveform into a normalised
+spectrogram ready for the detection model.
+"""
+
+from typing import List
+
+from pydantic import Field
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.preprocess.audio import AudioTransform
+from batdetect2.preprocess.spectrogram import (
+ FrequencyConfig,
+ PcenConfig,
+ ResizeConfig,
+ SpectralMeanSubtractionConfig,
+ SpectrogramTransform,
+ STFTConfig,
+)
+
+__all__ = [
+ "AudioTransform",
+ "PreprocessingConfig",
+]
+
+
+def _default_spectrogram_transforms() -> list[SpectrogramTransform]:
+ return [
+ PcenConfig(),
+ SpectralMeanSubtractionConfig(),
+ ]
+
+
+class PreprocessingConfig(BaseConfig):
+ """Unified configuration for the audio preprocessing pipeline.
+
+ Aggregates the parameters for every stage of the pipeline:
+ audio-level transforms, STFT computation, frequency cropping,
+ spectrogram-level transforms, and the final resize step.
+
+ Attributes
+ ----------
+ audio_transforms : list of AudioTransform, default=[]
+ Ordered list of transforms applied to the raw audio waveform
+ before the STFT is computed. Each entry is a configuration
+ object for one of the supported audio transforms
+ (``"center_audio"``, ``"scale_audio"``, or ``"fix_duration"``).
+ spectrogram_transforms : list of SpectrogramTransform
+ Ordered list of transforms applied to the cropped spectrogram
+ after the STFT and frequency crop steps. Defaults to
+ ``[PcenConfig(), SpectralMeanSubtractionConfig()]``, which
+ applies PCEN followed by spectral mean subtraction.
+ stft : STFTConfig
+ Parameters for the Short-Time Fourier Transform (window
+ duration, overlap, and window function).
+ frequencies : FrequencyConfig
+ Frequency range (in Hz) to retain after the STFT.
+ size : ResizeConfig
+ Target height (number of frequency bins) and time-axis scaling
+ factor for the final resize step.
+ """
+
+ audio_transforms: List[AudioTransform] = Field(default_factory=list)
+
+ spectrogram_transforms: List[SpectrogramTransform] = Field(
+ default_factory=_default_spectrogram_transforms
+ )
+
+ stft: STFTConfig = Field(default_factory=STFTConfig)
+
+ frequencies: FrequencyConfig = Field(default_factory=FrequencyConfig)
+
+ size: ResizeConfig = Field(default_factory=ResizeConfig)
diff --git a/src/batdetect2/preprocess/preprocessor.py b/src/batdetect2/preprocess/preprocessor.py
new file mode 100644
index 00000000..e8fcc913
--- /dev/null
+++ b/src/batdetect2/preprocess/preprocessor.py
@@ -0,0 +1,253 @@
+"""Assembles the full batdetect2 preprocessing pipeline.
+
+This module defines :class:`Preprocessor`, the concrete implementation of
+:class:`~batdetect2.preprocess.types.PreprocessorProtocol`, and the
+:func:`build_preprocessor` factory function that constructs it from a
+:class:`~batdetect2.preprocess.config.PreprocessingConfig`.
+
+The preprocessing pipeline converts a raw audio waveform (as a
+``torch.Tensor``) into a normalised, cropped, and resized spectrogram ready
+for the detection model. The stages are applied in this order:
+
+1. **Audio transforms** — optional waveform-level operations such as DC
+ removal, peak normalisation, or duration fixing.
+2. **STFT** — Short-Time Fourier Transform to produce an amplitude
+ spectrogram.
+3. **Frequency crop** — retain only the frequency band of interest.
+4. **Spectrogram transforms** — normalisation operations such as PCEN and
+ spectral mean subtraction.
+5. **Resize** — scale the spectrogram to the model's expected height and
+ reduce the time resolution.
+"""
+
+import torch
+from loguru import logger
+
+from batdetect2.audio import TARGET_SAMPLERATE_HZ
+from batdetect2.preprocess.audio import build_audio_transform
+from batdetect2.preprocess.config import PreprocessingConfig
+from batdetect2.preprocess.spectrogram import (
+ _spec_params_from_config,
+ build_spectrogram_builder,
+ build_spectrogram_crop,
+ build_spectrogram_resizer,
+ build_spectrogram_transform,
+)
+from batdetect2.preprocess.types import PreprocessorProtocol
+
+__all__ = [
+ "Preprocessor",
+ "build_preprocessor",
+]
+
+
+class Preprocessor(torch.nn.Module, PreprocessorProtocol):
+ """Standard implementation of the :class:`~batdetect2.preprocess.types.PreprocessorProtocol`.
+
+ Wraps all preprocessing stages as ``torch.nn.Module`` submodules so
+ that parameters (e.g. PCEN filter coefficients) can be tracked and
+ moved between devices.
+
+ Parameters
+ ----------
+ config : PreprocessingConfig
+ Full pipeline configuration.
+ input_samplerate : int
+ Sample rate of the audio that will be passed to this preprocessor,
+ in Hz.
+
+ Attributes
+ ----------
+ input_samplerate : int
+ Sample rate of the input audio in Hz.
+ output_samplerate : float
+ Effective frame rate of the output spectrogram in frames per second.
+ Computed from the STFT hop length and the time-axis resize factor.
+ min_freq : float
+ Lower bound of the retained frequency band in Hz.
+ max_freq : float
+ Upper bound of the retained frequency band in Hz.
+ """
+
+ input_samplerate: int
+ output_samplerate: float
+
+ max_freq: float
+ min_freq: float
+
+ def __init__(
+ self,
+ config: PreprocessingConfig,
+ input_samplerate: int,
+ ) -> None:
+ super().__init__()
+
+ self.audio_transforms = torch.nn.Sequential(
+ *(
+ build_audio_transform(step, samplerate=input_samplerate)
+ for step in config.audio_transforms
+ )
+ )
+
+ self.spectrogram_transforms = torch.nn.Sequential(
+ *(
+ build_spectrogram_transform(step, samplerate=input_samplerate)
+ for step in config.spectrogram_transforms
+ )
+ )
+
+ self.spectrogram_builder = build_spectrogram_builder(
+ config.stft,
+ samplerate=input_samplerate,
+ )
+
+ self.spectrogram_crop = build_spectrogram_crop(
+ config.frequencies,
+ stft=config.stft,
+ samplerate=input_samplerate,
+ )
+
+ self.spectrogram_resizer = build_spectrogram_resizer(config.size)
+
+ self.min_freq = config.frequencies.min_freq
+ self.max_freq = config.frequencies.max_freq
+
+ self.input_samplerate = input_samplerate
+ self.output_samplerate = compute_output_samplerate(
+ config,
+ input_samplerate=input_samplerate,
+ )
+
+ def forward(self, wav: torch.Tensor) -> torch.Tensor:
+ """Run the full preprocessing pipeline on a waveform.
+
+ Applies audio transforms, then the STFT, then
+ :meth:`process_spectrogram`.
+
+ Parameters
+ ----------
+ wav : torch.Tensor
+ Input waveform of shape ``(samples,)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Preprocessed spectrogram of shape
+ ``(freq_bins, time_frames)``.
+ """
+ wav = self.audio_transforms(wav)
+ spec = self.spectrogram_builder(wav)
+ return self.process_spectrogram(spec)
+
+ def generate_spectrogram(self, wav: torch.Tensor) -> torch.Tensor:
+ """Compute the raw STFT spectrogram without any further processing.
+
+ Parameters
+ ----------
+ wav : torch.Tensor
+ Input waveform of shape ``(samples,)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Amplitude spectrogram of shape ``(n_fft//2 + 1, time_frames)``
+ with no frequency cropping, normalisation, or resizing applied.
+ """
+ return self.spectrogram_builder(wav)
+
+ def process_audio(self, wav: torch.Tensor) -> torch.Tensor:
+ """Alias for :meth:`forward`.
+
+ Parameters
+ ----------
+ wav : torch.Tensor
+ Input waveform of shape ``(samples,)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Preprocessed spectrogram (same as calling the object directly).
+ """
+ return self(wav)
+
+ def process_spectrogram(self, spec: torch.Tensor) -> torch.Tensor:
+ """Apply the post-STFT processing stages to an existing spectrogram.
+
+ Applies frequency cropping, spectrogram-level transforms (e.g.
+ PCEN, spectral mean subtraction), and the final resize step.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Raw amplitude spectrogram of shape
+ ``(..., n_fft//2 + 1, time_frames)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Normalised and resized spectrogram of shape
+ ``(..., height, scaled_time_frames)``.
+ """
+ spec = self.spectrogram_crop(spec)
+ spec = self.spectrogram_transforms(spec)
+ return self.spectrogram_resizer(spec)
+
+
+def compute_output_samplerate(
+ config: PreprocessingConfig,
+ input_samplerate: int = TARGET_SAMPLERATE_HZ,
+) -> float:
+ """Compute the effective frame rate of the preprocessor's output.
+
+ The output frame rate (in frames per second) depends on the STFT hop
+ length and the time-axis resize factor applied by the final resize step.
+
+ Parameters
+ ----------
+ config : PreprocessingConfig
+ Pipeline configuration.
+ input_samplerate : int, default=256000
+ Sample rate of the input audio in Hz.
+
+ Returns
+ -------
+ float
+ Output frame rate in frames per second.
+ For example, at the default settings (256 kHz, hop=128,
+ resize_factor=0.5) this equals ``1000.0``.
+ """
+ _, hop_size = _spec_params_from_config(
+ config.stft, samplerate=input_samplerate
+ )
+ factor = config.size.resize_factor
+ return input_samplerate * factor / hop_size
+
+
+def build_preprocessor(
+ config: PreprocessingConfig | None = None,
+ input_samplerate: int = TARGET_SAMPLERATE_HZ,
+) -> PreprocessorProtocol:
+ """Build the standard preprocessor from a configuration object.
+
+ Parameters
+ ----------
+ config : PreprocessingConfig, optional
+ Pipeline configuration. If ``None``, the default
+ ``PreprocessingConfig()`` is used (PCEN + spectral mean
+ subtraction, 256 kHz, standard STFT parameters).
+ input_samplerate : int, default=256000
+ Sample rate of the audio that will be fed to the preprocessor,
+ in Hz.
+
+ Returns
+ -------
+ PreprocessorProtocol
+ A :class:`Preprocessor` instance ready to convert waveforms to
+ spectrograms.
+ """
+ config = config or PreprocessingConfig()
+ logger.opt(lazy=True).debug(
+ "Building preprocessor with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+ return Preprocessor(config=config, input_samplerate=input_samplerate)
diff --git a/src/batdetect2/preprocess/spectrogram.py b/src/batdetect2/preprocess/spectrogram.py
new file mode 100644
index 00000000..9d983655
--- /dev/null
+++ b/src/batdetect2/preprocess/spectrogram.py
@@ -0,0 +1,820 @@
+"""Computes spectrograms from audio waveforms with configurable parameters.
+
+This module defines the STFT-based spectrogram builder and a collection of
+spectrogram-level transforms (PCEN, spectral mean subtraction, amplitude
+scaling, peak normalisation, frequency cropping, and resizing) that form the
+signal-processing stage of the batdetect2 preprocessing pipeline.
+
+Each transform is paired with a Pydantic configuration class and registered
+in the ``spectrogram_transforms`` registry so that the pipeline can be fully
+specified via a YAML or Python configuration object.
+"""
+
+from typing import Annotated, Callable, Literal
+
+import numpy as np
+import torch
+import torchaudio
+from pydantic import Field
+
+from batdetect2.audio import TARGET_SAMPLERATE_HZ
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.preprocess.common import peak_normalize
+
+__all__ = [
+ "STFTConfig",
+ "SpectrogramTransformImportConfig",
+ "build_spectrogram_transform",
+ "build_spectrogram_builder",
+]
+
+
+MIN_FREQ = 10_000
+"""Default minimum frequency (Hz) for spectrogram frequency cropping."""
+
+MAX_FREQ = 120_000
+"""Default maximum frequency (Hz) for spectrogram frequency cropping."""
+
+
+class STFTConfig(BaseConfig):
+ """Configuration for the Short-Time Fourier Transform (STFT).
+
+ Attributes
+ ----------
+ window_duration : float, default=0.002
+ Duration of the STFT analysis window in seconds (e.g. 0.002 for
+ 2 ms). Must be > 0. A longer window gives finer frequency resolution
+ but coarser time resolution.
+ window_overlap : float, default=0.75
+ Fraction of overlap between consecutive windows (e.g. 0.75 for
+ 75 %). Must be >= 0 and < 1. Higher overlap gives finer time
+ resolution at the cost of more computation.
+ window_fn : str, default="hann"
+ Name of the tapering window applied to each frame before the FFT.
+ Supported values: ``"hann"``, ``"hamming"``, ``"kaiser"``,
+ ``"blackman"``, ``"bartlett"``.
+
+ Notes
+ -----
+ At the default sample rate of 256 kHz, ``window_duration=0.002`` and
+ ``window_overlap=0.75`` give ``n_fft=512`` and ``hop_length=128``.
+ """
+
+ window_duration: float = Field(default=0.002, gt=0)
+ window_overlap: float = Field(default=0.75, ge=0, lt=1)
+ window_fn: str = "hann"
+
+
+def build_spectrogram_builder(
+ config: STFTConfig,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+) -> torch.nn.Module:
+ """Build a torchaudio STFT spectrogram module from an ``STFTConfig``.
+
+ Parameters
+ ----------
+ config : STFTConfig
+ STFT parameters (window duration, overlap, and window function).
+ samplerate : int, default=256000
+ Sample rate of the input audio in Hz. Used to convert the
+ window duration into a number of samples.
+
+ Returns
+ -------
+ torch.nn.Module
+ A ``torchaudio.transforms.Spectrogram`` module configured to
+ produce an amplitude (``power=1``) spectrogram with centred
+ frames.
+ """
+ n_fft, hop_length = _spec_params_from_config(config, samplerate=samplerate)
+ return torchaudio.transforms.Spectrogram(
+ n_fft=n_fft,
+ hop_length=hop_length,
+ window_fn=get_spectrogram_window(config.window_fn),
+ center=True,
+ power=1,
+ )
+
+
+def get_spectrogram_window(name: str) -> Callable[..., torch.Tensor]:
+ """Return the PyTorch window function matching the given name.
+
+ Parameters
+ ----------
+ name : str
+ Name of the window function. One of ``"hann"``, ``"hamming"``,
+ ``"kaiser"``, ``"blackman"``, or ``"bartlett"``.
+
+ Returns
+ -------
+ Callable[..., torch.Tensor]
+ A PyTorch window function that accepts a window length and returns
+ a 1-D tensor of weights.
+
+ Raises
+ ------
+ NotImplementedError
+ If ``name`` does not match any supported window function.
+ """
+ if name == "hann":
+ return torch.hann_window
+
+ if name == "hamming":
+ return torch.hamming_window
+
+ if name == "kaiser":
+ return torch.kaiser_window
+
+ if name == "blackman":
+ return torch.blackman_window
+
+ if name == "bartlett":
+ return torch.bartlett_window
+
+ raise NotImplementedError(
+ f"Spectrogram window function {name} not implemented"
+ )
+
+
+def _spec_params_from_config(
+ config: STFTConfig,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+) -> tuple[int, int]:
+ """Compute ``n_fft`` and ``hop_length`` from an ``STFTConfig``.
+
+ Parameters
+ ----------
+ config : STFTConfig
+ STFT parameters.
+ samplerate : int, default=256000
+ Sample rate of the input audio in Hz.
+
+ Returns
+ -------
+ tuple[int, int]
+ A pair ``(n_fft, hop_length)`` giving the FFT size and the step
+ between consecutive frames in samples.
+ """
+ n_fft = int(samplerate * config.window_duration)
+ hop_length = int(n_fft * (1 - config.window_overlap))
+ return n_fft, hop_length
+
+
+def _frequency_to_index(
+ freq: float,
+ n_fft: int,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+) -> int | None:
+ """Convert a frequency in Hz to the nearest STFT frequency bin index.
+
+ Parameters
+ ----------
+ freq : float
+ Frequency in Hz to convert.
+ n_fft : int
+ FFT size used by the STFT.
+ samplerate : int, default=256000
+ Sample rate of the audio in Hz.
+
+ Returns
+ -------
+ int or None
+ The bin index corresponding to ``freq``, or ``None`` if the
+ frequency is outside the valid range (i.e. <= 0 Hz or >= the
+ Nyquist frequency).
+ """
+ alpha = freq * 2 / samplerate
+ height = np.floor(n_fft / 2) + 1
+ index = int(np.floor(alpha * height))
+
+ if index <= 0:
+ return None
+
+ if index >= height:
+ return None
+
+ return index
+
+
+class FrequencyConfig(BaseConfig):
+ """Configuration for frequency axis parameters.
+
+ Attributes
+ ----------
+ max_freq : int, default=120000
+ Maximum frequency in Hz to retain after STFT. Frequency bins
+ above this value are discarded. Must be >= 0.
+ min_freq : int, default=10000
+ Minimum frequency in Hz to retain after STFT. Frequency bins
+ below this value are discarded. Must be >= 0.
+ """
+
+ max_freq: int = Field(default=MAX_FREQ, ge=0)
+ min_freq: int = Field(default=MIN_FREQ, ge=0)
+
+
+class FrequencyCrop(torch.nn.Module):
+ """Crop a spectrogram to a specified frequency band.
+
+ On construction the Hz boundaries are converted to STFT bin indices.
+ During the forward pass the spectrogram is sliced along its
+ frequency axis (second-to-last dimension) to retain only the bins
+ that fall within ``[min_freq, max_freq)``.
+
+ Parameters
+ ----------
+ samplerate : int
+ Sample rate of the audio in Hz.
+ n_fft : int
+ FFT size used by the STFT.
+ min_freq : int, optional
+ Lower frequency bound in Hz. If ``None``, no lower crop is
+ applied and the DC bin is retained.
+ max_freq : int, optional
+ Upper frequency bound in Hz. If ``None``, no upper crop is
+ applied and all bins up to Nyquist are retained.
+ """
+
+ def __init__(
+ self,
+ samplerate: int,
+ n_fft: int,
+ min_freq: int | None = None,
+ max_freq: int | None = None,
+ ):
+ super().__init__()
+ self.n_fft = n_fft
+ self.samplerate = samplerate
+ self.min_freq = min_freq
+ self.max_freq = max_freq
+
+ low_index = None
+ if min_freq is not None:
+ low_index = _frequency_to_index(
+ min_freq,
+ n_fft=self.n_fft,
+ samplerate=self.samplerate,
+ )
+ self.low_index = low_index
+
+ high_index = None
+ if max_freq is not None:
+ high_index = _frequency_to_index(
+ max_freq,
+ n_fft=self.n_fft,
+ samplerate=self.samplerate,
+ )
+ self.high_index = high_index
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor:
+ """Crop the spectrogram to the configured frequency band.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Spectrogram tensor of shape ``(..., freq_bins, time_frames)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Cropped spectrogram with shape
+ ``(..., n_retained_bins, time_frames)``.
+ """
+ low_index = self.low_index
+ if low_index is None:
+ low_index = 0
+
+ if self.high_index is None:
+ length = spec.shape[-2] - low_index
+ else:
+ length = self.high_index - low_index
+
+ return torch.narrow(
+ spec,
+ dim=-2,
+ start=low_index,
+ length=length,
+ )
+
+
+def build_spectrogram_crop(
+ config: FrequencyConfig,
+ stft: STFTConfig | None = None,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+) -> torch.nn.Module:
+ """Build a ``FrequencyCrop`` module from configuration objects.
+
+ Parameters
+ ----------
+ config : FrequencyConfig
+ Frequency boundary configuration specifying ``min_freq`` and
+ ``max_freq`` in Hz.
+ stft : STFTConfig, optional
+ STFT configuration used to derive ``n_fft``. Defaults to
+ ``STFTConfig()`` if not provided.
+ samplerate : int, default=256000
+ Sample rate of the audio in Hz.
+
+ Returns
+ -------
+ torch.nn.Module
+ A ``FrequencyCrop`` module ready to crop spectrograms.
+ """
+ stft = stft or STFTConfig()
+ n_fft, _ = _spec_params_from_config(stft, samplerate=samplerate)
+ return FrequencyCrop(
+ samplerate=samplerate,
+ n_fft=n_fft,
+ min_freq=config.min_freq,
+ max_freq=config.max_freq,
+ )
+
+
+class ResizeConfig(BaseConfig):
+ """Configuration for the final spectrogram resize step.
+
+ Attributes
+ ----------
+ name : str
+ Fixed identifier; always ``"resize_spec"``.
+ height : int, default=128
+ Target number of frequency bins in the output spectrogram.
+ The spectrogram is resized to this height using bilinear
+ interpolation.
+ resize_factor : float, default=0.5
+ Fraction by which the time axis is scaled. For example, ``0.5``
+ halves the number of time frames, reducing computational cost
+ downstream.
+ """
+
+ name: Literal["resize_spec"] = "resize_spec"
+ height: int = 128
+ resize_factor: float = 0.5
+
+
+class ResizeSpec(torch.nn.Module):
+ """Resize a spectrogram to a fixed height and scaled width.
+
+ Uses bilinear interpolation so it handles arbitrary input shapes
+ gracefully. Input tensors with fewer than four dimensions are
+ temporarily unsqueezed to satisfy ``torch.nn.functional.interpolate``.
+
+ Parameters
+ ----------
+ height : int
+ Target number of frequency bins (output height).
+ time_factor : float
+ Multiplicative scaling applied to the time axis length.
+ """
+
+ def __init__(self, height: int, time_factor: float):
+ super().__init__()
+ self.height = height
+ self.time_factor = time_factor
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor:
+ """Resize the spectrogram to the configured output dimensions.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input spectrogram of shape ``(..., freq_bins, time_frames)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Resized spectrogram with shape
+ ``(..., height, int(time_factor * time_frames))``.
+ """
+ current_length = spec.shape[-1]
+ target_length = int(self.time_factor * current_length)
+
+ original_ndim = spec.ndim
+ while spec.ndim < 4:
+ spec = spec.unsqueeze(0)
+
+ resized = torch.nn.functional.interpolate(
+ spec,
+ size=(self.height, target_length),
+ mode="bilinear",
+ )
+
+ while resized.ndim != original_ndim:
+ resized = resized.squeeze(0)
+
+ return resized
+
+
+def build_spectrogram_resizer(config: ResizeConfig) -> torch.nn.Module:
+ """Build a ``ResizeSpec`` module from a ``ResizeConfig``.
+
+ Parameters
+ ----------
+ config : ResizeConfig
+ Resize configuration specifying ``height`` and ``resize_factor``.
+
+ Returns
+ -------
+ torch.nn.Module
+ A ``ResizeSpec`` module configured with the given parameters.
+ """
+ return ResizeSpec(height=config.height, time_factor=config.resize_factor)
+
+
+spectrogram_transforms: Registry[torch.nn.Module, [int]] = Registry(
+ "spectrogram_transform"
+)
+
+
+@add_import_config(spectrogram_transforms)
+class SpectrogramTransformImportConfig(ImportConfig):
+ """Use any callable as a spectrogram transform.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class PcenConfig(BaseConfig):
+ """Configuration for Per-Channel Energy Normalisation (PCEN).
+
+ PCEN is a frontend processing technique that replaces simple log
+ compression. It applies a learnable automatic gain control followed
+ by a stabilised root compression, making the representation more
+ robust to variations in recording level.
+
+ Attributes
+ ----------
+ name : str
+ Fixed identifier; always ``"pcen"``.
+ time_constant : float, default=0.4
+ Time constant (in seconds) of the IIR smoothing filter used
+ for the background estimate. Larger values produce a slower-
+ adapting background.
+ gain : float, default=0.98
+ Exponent controlling how strongly the background estimate
+ suppresses the signal.
+ bias : float, default=2
+ Stabilisation bias added inside the root-compression step to
+ avoid division by zero.
+ power : float, default=0.5
+ Root-compression exponent. A value of 0.5 gives square-root
+ compression, similar to log compression but differentiable at
+ zero.
+ """
+
+ name: Literal["pcen"] = "pcen"
+ time_constant: float = 0.4
+ gain: float = 0.98
+ bias: float = 2
+ power: float = 0.5
+
+
+class PCEN(torch.nn.Module):
+ """Per-Channel Energy Normalisation (PCEN) transform.
+
+ Applies automatic gain control and root compression to a spectrogram.
+ The background estimate is computed with a first-order IIR filter
+ applied along the time axis.
+
+ Parameters
+ ----------
+ smoothing_constant : float
+ IIR filter coefficient ``alpha``. Derived from the time constant
+ and sample rate via ``_compute_smoothing_constant``.
+ gain : float, default=0.98
+ AGC gain exponent.
+ bias : float, default=2.0
+ Root-compression stabilisation bias.
+ power : float, default=0.5
+ Root-compression exponent.
+ eps : float, default=1e-6
+ Small constant for numerical stability.
+ dtype : torch.dtype, default=torch.float32
+ Floating-point precision used for internal computation.
+
+ Notes
+ -----
+ The smoothing constant is computed to match the original batdetect2
+ implementation for numerical compatibility. See
+ ``_compute_smoothing_constant`` for details.
+ """
+
+ def __init__(
+ self,
+ smoothing_constant: float,
+ gain: float = 0.98,
+ bias: float = 2.0,
+ power: float = 0.5,
+ eps: float = 1e-6,
+ dtype=torch.float32,
+ ):
+ super().__init__()
+ self.smoothing_constant = smoothing_constant
+ self.gain = torch.tensor(gain, dtype=dtype)
+ self.bias = torch.tensor(bias, dtype=dtype)
+ self.power = torch.tensor(power, dtype=dtype)
+ self.eps = torch.tensor(eps, dtype=dtype)
+ self.dtype = dtype
+
+ self._b = torch.tensor([self.smoothing_constant, 0.0], dtype=dtype)
+ self._a = torch.tensor(
+ [1.0, self.smoothing_constant - 1.0], dtype=dtype
+ )
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor:
+ """Apply PCEN to a spectrogram.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input amplitude spectrogram of shape
+ ``(..., freq_bins, time_frames)``.
+
+ Returns
+ -------
+ torch.Tensor
+ PCEN-normalised spectrogram with the same shape and dtype as
+ the input.
+ """
+ S = spec.to(self.dtype) * 2**31
+
+ M = (
+ torchaudio.functional.lfilter(
+ S,
+ self._a,
+ self._b,
+ clamp=False,
+ )
+ ).clamp(min=0)
+
+ smooth = torch.exp(
+ -self.gain * (torch.log(self.eps) + torch.log1p(M / self.eps))
+ )
+
+ return (
+ (self.bias**self.power)
+ * torch.expm1(self.power * torch.log1p(S * smooth / self.bias))
+ ).to(spec.dtype)
+
+ @spectrogram_transforms.register(PcenConfig)
+ @staticmethod
+ def from_config(config: PcenConfig, samplerate: int):
+ smooth = _compute_smoothing_constant(samplerate, config.time_constant)
+ return PCEN(
+ smoothing_constant=smooth,
+ gain=config.gain,
+ bias=config.bias,
+ power=config.power,
+ )
+
+
+def _compute_smoothing_constant(
+ samplerate: int,
+ time_constant: float,
+) -> float:
+ """Compute the IIR smoothing coefficient for PCEN.
+
+ Parameters
+ ----------
+ samplerate : int
+ Sample rate of the audio in Hz.
+ time_constant : float
+ Desired smoothing time constant in seconds.
+
+ Returns
+ -------
+ float
+ IIR filter coefficient ``alpha`` used by ``PCEN``.
+
+ Notes
+ -----
+ The hop length (512) and the sample-rate divisor (10) are fixed to
+ reproduce the numerical behaviour of the original batdetect2
+ implementation, which used ``librosa.pcen`` with ``sr=samplerate/10``
+ and the default ``hop_length=512``. These values do not reflect the
+ actual STFT hop length used in the pipeline; they are retained
+ solely for backward compatibility.
+ """
+ # NOTE: These parameters are fixed to match the original implementation.
+ hop_length = 512
+ sr = samplerate / 10
+ t_frames = time_constant * sr / float(hop_length)
+ return (np.sqrt(1 + 4 * t_frames**2) - 1) / (2 * t_frames**2)
+
+
+class ScaleAmplitudeConfig(BaseConfig):
+ """Configuration for amplitude scaling of a spectrogram.
+
+ Attributes
+ ----------
+ name : str
+ Fixed identifier; always ``"scale_amplitude"``.
+ scale : str, default="db"
+ Scaling mode. Either ``"db"`` (convert amplitude to decibels
+ using ``torchaudio.transforms.AmplitudeToDB``) or ``"power"``
+ (square the amplitude values).
+ """
+
+ name: Literal["scale_amplitude"] = "scale_amplitude"
+ scale: Literal["power", "db"] = "db"
+
+
+class ToPower(torch.nn.Module):
+ """Square the values of a spectrogram (amplitude → power).
+
+ Raises each element to the power of two, converting an amplitude
+ spectrogram into a power spectrogram.
+ """
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor:
+ """Square all elements of the spectrogram.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input amplitude spectrogram.
+
+ Returns
+ -------
+ torch.Tensor
+ Power spectrogram (same shape as input).
+ """
+ return spec**2
+
+
+_scalers = {
+ "db": torchaudio.transforms.AmplitudeToDB,
+ "power": ToPower,
+}
+
+
+class ScaleAmplitude(torch.nn.Module):
+ """Convert spectrogram amplitude values to a different scale.
+
+ Supports conversion to decibels (dB) or to power (squared amplitude).
+
+ Parameters
+ ----------
+ scale : str
+ Either ``"db"`` or ``"power"``.
+ """
+
+ def __init__(self, scale: Literal["power", "db"]):
+ super().__init__()
+ self.scale = scale
+ self.scaler = _scalers[scale]()
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor:
+ """Apply the configured amplitude scaling.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input spectrogram tensor.
+
+ Returns
+ -------
+ torch.Tensor
+ Scaled spectrogram with the same shape as the input.
+ """
+ return self.scaler(spec)
+
+ @spectrogram_transforms.register(ScaleAmplitudeConfig)
+ @staticmethod
+ def from_config(config: ScaleAmplitudeConfig, samplerate: int):
+ return ScaleAmplitude(scale=config.scale)
+
+
+class SpectralMeanSubtractionConfig(BaseConfig):
+ """Configuration for spectral mean subtraction.
+
+ Attributes
+ ----------
+ name : str
+ Fixed identifier; always ``"spectral_mean_subtraction"``.
+ """
+
+ name: Literal["spectral_mean_subtraction"] = "spectral_mean_subtraction"
+
+
+class SpectralMeanSubtraction(torch.nn.Module):
+ """Remove the time-averaged background noise from a spectrogram.
+
+ For each frequency bin, the mean value across all time frames is
+ computed and subtracted. The result is then clamped to zero so that
+ no values fall below the baseline. This is a simple form of spectral
+ denoising that suppresses stationary background noise.
+ """
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor:
+ """Subtract the time-axis mean from each frequency bin.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input spectrogram of shape ``(..., freq_bins, time_frames)``.
+
+ Returns
+ -------
+ torch.Tensor
+ Denoised spectrogram with the same shape as the input. All
+ values are non-negative (clamped to 0).
+ """
+ mean = spec.mean(-1, keepdim=True)
+ return (spec - mean).clamp(min=0)
+
+ @spectrogram_transforms.register(SpectralMeanSubtractionConfig)
+ @staticmethod
+ def from_config(
+ config: SpectralMeanSubtractionConfig,
+ samplerate: int,
+ ):
+ return SpectralMeanSubtraction()
+
+
+class PeakNormalizeConfig(BaseConfig):
+ """Configuration for peak normalisation of a spectrogram.
+
+ Attributes
+ ----------
+ name : str
+ Fixed identifier; always ``"peak_normalize"``.
+ """
+
+ name: Literal["peak_normalize"] = "peak_normalize"
+
+
+class PeakNormalize(torch.nn.Module):
+ """Scale a spectrogram so that its largest absolute value equals one.
+
+ Wraps :func:`batdetect2.preprocess.common.peak_normalize` as a
+ ``torch.nn.Module`` for use inside a sequential transform pipeline.
+ """
+
+ def forward(self, spec: torch.Tensor) -> torch.Tensor:
+ """Peak-normalise the spectrogram.
+
+ Parameters
+ ----------
+ spec : torch.Tensor
+ Input spectrogram tensor of any shape.
+
+ Returns
+ -------
+ torch.Tensor
+ Normalised spectrogram where the maximum absolute value is 1.
+ If the input is identically zero, it is returned unchanged.
+ """
+ return peak_normalize(spec)
+
+ @spectrogram_transforms.register(PeakNormalizeConfig)
+ @staticmethod
+ def from_config(config: PeakNormalizeConfig, samplerate: int):
+ return PeakNormalize()
+
+
+SpectrogramTransform = Annotated[
+ PcenConfig
+ | ScaleAmplitudeConfig
+ | SpectralMeanSubtractionConfig
+ | PeakNormalizeConfig,
+ Field(discriminator="name"),
+]
+"""Discriminated union of all spectrogram transform configuration types.
+
+Use this type when a field should accept any of the supported spectrogram
+transforms. Pydantic will select the correct config class based on the
+``name`` field.
+"""
+
+
+def build_spectrogram_transform(
+ config: SpectrogramTransform,
+ samplerate: int,
+) -> torch.nn.Module:
+ """Build a spectrogram transform module from a configuration object.
+
+ Parameters
+ ----------
+ config : SpectrogramTransform
+ A configuration object for one of the supported spectrogram
+ transforms (PCEN, amplitude scaling, spectral mean subtraction,
+ or peak normalisation).
+ samplerate : int
+ Sample rate of the audio in Hz. Some transforms (e.g. PCEN) use
+ this to set internal parameters.
+
+ Returns
+ -------
+ torch.nn.Module
+ The constructed transform module.
+ """
+ return spectrogram_transforms.build(config, samplerate)
diff --git a/src/batdetect2/preprocess/types.py b/src/batdetect2/preprocess/types.py
new file mode 100644
index 00000000..39485e9c
--- /dev/null
+++ b/src/batdetect2/preprocess/types.py
@@ -0,0 +1,31 @@
+from typing import Protocol
+
+import numpy as np
+import torch
+
+__all__ = [
+ "PreprocessorProtocol",
+ "SpectrogramBuilder",
+]
+
+
+class SpectrogramBuilder(Protocol):
+ def __call__(self, wav: torch.Tensor) -> torch.Tensor: ...
+
+
+class PreprocessorProtocol(Protocol):
+ max_freq: float
+ min_freq: float
+ input_samplerate: int
+ output_samplerate: float
+
+ def __call__(self, wav: torch.Tensor) -> torch.Tensor: ...
+
+ def generate_spectrogram(self, wav: torch.Tensor) -> torch.Tensor: ...
+
+ def process_audio(self, wav: torch.Tensor) -> torch.Tensor: ...
+
+ def process_spectrogram(self, spec: torch.Tensor) -> torch.Tensor: ...
+
+ def process_numpy(self, wav: np.ndarray) -> np.ndarray:
+ return self(torch.tensor(wav)).numpy()
diff --git a/src/batdetect2/targets/__init__.py b/src/batdetect2/targets/__init__.py
new file mode 100644
index 00000000..f4fc12d3
--- /dev/null
+++ b/src/batdetect2/targets/__init__.py
@@ -0,0 +1,70 @@
+"""BatDetect2 Target Definition system."""
+
+from batdetect2.targets.classes import (
+ TargetClassConfig,
+ build_sound_event_decoder,
+ build_sound_event_encoder,
+ get_class_names_from_config,
+)
+from batdetect2.targets.config import TargetConfig, build_default_target_config
+from batdetect2.targets.rois import (
+ AnchorBBoxMapperConfig,
+ ROIMapperConfig,
+ ROIMapperProtocol,
+ ROIMappingConfig,
+ build_roi_mapper,
+ build_roi_mapping,
+)
+from batdetect2.targets.targets import (
+ Targets,
+ build_targets,
+ iterate_encoded_sound_events,
+ load_targets,
+)
+from batdetect2.targets.terms import (
+ call_type,
+ data_source,
+ generic_class,
+ individual,
+)
+from batdetect2.targets.types import (
+ Position,
+ ROITargetMapper,
+ Size,
+ SoundEventDecoder,
+ SoundEventEncoder,
+ SoundEventFilter,
+ TargetProtocol,
+)
+from batdetect2.targets.utils import check_target_compatibility
+
+__all__ = [
+ "AnchorBBoxMapperConfig",
+ "Position",
+ "ROIMapperConfig",
+ "ROIMapperProtocol",
+ "ROIMappingConfig",
+ "ROITargetMapper",
+ "Size",
+ "SoundEventDecoder",
+ "SoundEventEncoder",
+ "SoundEventFilter",
+ "TargetClassConfig",
+ "TargetConfig",
+ "TargetProtocol",
+ "Targets",
+ "build_default_target_config",
+ "build_roi_mapper",
+ "build_roi_mapping",
+ "build_sound_event_decoder",
+ "build_sound_event_encoder",
+ "build_targets",
+ "call_type",
+ "check_target_compatibility",
+ "data_source",
+ "generic_class",
+ "get_class_names_from_config",
+ "individual",
+ "iterate_encoded_sound_events",
+ "load_targets",
+]
diff --git a/src/batdetect2/targets/classes.py b/src/batdetect2/targets/classes.py
new file mode 100644
index 00000000..78c66a83
--- /dev/null
+++ b/src/batdetect2/targets/classes.py
@@ -0,0 +1,239 @@
+from typing import Dict, List
+
+from pydantic import Field, PrivateAttr, computed_field, model_validator
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.data.conditions import (
+ AllOfConfig,
+ HasAllTagsConfig,
+ HasAnyTagConfig,
+ HasTagConfig,
+ NotConfig,
+ SoundEventCondition,
+ SoundEventConditionConfig,
+ TagInfo,
+ build_sound_event_condition,
+)
+from batdetect2.targets.terms import call_type, generic_class
+from batdetect2.targets.types import SoundEventDecoder, SoundEventEncoder
+
+__all__ = [
+ "build_sound_event_decoder",
+ "build_sound_event_encoder",
+ "get_class_names_from_config",
+]
+
+
+class TargetClassConfig(BaseConfig):
+ """Defines a target class of sound events."""
+
+ name: str
+
+ condition_input: SoundEventConditionConfig | None = Field(
+ alias="match_if",
+ default=None,
+ exclude=True,
+ )
+
+ tags: List[data.Tag] | None = Field(default=None, exclude=True)
+
+ assign_tags: List[TagInfo] = Field(default_factory=list)
+
+ _match_if: SoundEventConditionConfig = PrivateAttr()
+
+ @model_validator(mode="after")
+ def _process_tags(self) -> "TargetClassConfig":
+ if self.tags and self.condition_input:
+ raise ValueError("Use either 'tags' or 'match_if', not both.")
+
+ if self.condition_input is not None:
+ self._match_if = self.condition_input
+ return self
+
+ if self.tags is None:
+ raise ValueError(
+ f"Class '{self.name}' must have a 'tags' or 'match_if' rule."
+ )
+
+ self._match_if = HasAllTagsConfig(tags=self.tags)
+
+ if not self.assign_tags:
+ self.assign_tags = self.tags
+
+ return self
+
+ @computed_field
+ @property
+ def match_if(self) -> SoundEventConditionConfig:
+ return self._match_if
+
+
+DEFAULT_DETECTION_CLASS = TargetClassConfig(
+ name="bat",
+ match_if=AllOfConfig(
+ conditions=[
+ HasTagConfig(tag=data.Tag(term=call_type, value="Echolocation")),
+ NotConfig(
+ condition=HasAnyTagConfig(
+ tags=[
+ data.Tag(term=call_type, value="Feeding"),
+ data.Tag(term=call_type, value="Social"),
+ data.Tag(term=call_type, value="Unknown"),
+ data.Tag(term=generic_class, value="Unknown"),
+ data.Tag(term=generic_class, value="Not Bat"),
+ data.Tag(term=call_type, value="Not Bat"),
+ ]
+ )
+ ),
+ ]
+ ),
+ assign_tags=[
+ data.Tag(term=call_type, value="Echolocation"),
+ data.Tag(key="order", value="Chiroptera"),
+ ],
+)
+
+
+DEFAULT_CLASSES = [
+ TargetClassConfig(
+ name="barbar",
+ tags=[data.Tag(key="class", value="Barbastella barbastellus")],
+ ),
+ TargetClassConfig(
+ name="eptser",
+ tags=[data.Tag(key="class", value="Eptesicus serotinus")],
+ ),
+ TargetClassConfig(
+ name="myoalc",
+ tags=[data.Tag(key="class", value="Myotis alcathoe")],
+ ),
+ TargetClassConfig(
+ name="myobec",
+ tags=[data.Tag(key="class", value="Myotis bechsteinii")],
+ ),
+ TargetClassConfig(
+ name="myobra",
+ tags=[data.Tag(key="class", value="Myotis brandtii")],
+ ),
+ TargetClassConfig(
+ name="myodau",
+ tags=[data.Tag(key="class", value="Myotis daubentonii")],
+ ),
+ TargetClassConfig(
+ name="myomys",
+ tags=[data.Tag(key="class", value="Myotis mystacinus")],
+ ),
+ TargetClassConfig(
+ name="myonat",
+ tags=[data.Tag(key="class", value="Myotis nattereri")],
+ ),
+ TargetClassConfig(
+ name="nyclei",
+ tags=[data.Tag(key="class", value="Nyctalus leisleri")],
+ ),
+ TargetClassConfig(
+ name="nycnoc",
+ tags=[data.Tag(key="class", value="Nyctalus noctula")],
+ ),
+ TargetClassConfig(
+ name="pipnat",
+ tags=[data.Tag(key="class", value="Pipistrellus nathusii")],
+ ),
+ TargetClassConfig(
+ name="pippip",
+ tags=[data.Tag(key="class", value="Pipistrellus pipistrellus")],
+ ),
+ TargetClassConfig(
+ name="pippyg",
+ tags=[data.Tag(key="class", value="Pipistrellus pygmaeus")],
+ ),
+ TargetClassConfig(
+ name="pleaur",
+ tags=[data.Tag(key="class", value="Plecotus auritus")],
+ ),
+ TargetClassConfig(
+ name="pleaus",
+ tags=[data.Tag(key="class", value="Plecotus austriacus")],
+ ),
+ TargetClassConfig(
+ name="rhifer",
+ tags=[data.Tag(key="class", value="Rhinolophus ferrumequinum")],
+ ),
+ TargetClassConfig(
+ name="rhihip",
+ tags=[data.Tag(key="class", value="Rhinolophus hipposideros")],
+ ),
+]
+
+
+def get_class_names_from_config(configs: List[TargetClassConfig]) -> List[str]:
+ """Extract the list of class names from a ClassesConfig object.
+
+ Parameters
+ ----------
+ config : ClassesConfig
+ The loaded classes configuration object.
+
+ Returns
+ -------
+ List[str]
+ An ordered list of unique class names defined in the configuration.
+ """
+ return [class_info.name for class_info in configs]
+
+
+def build_sound_event_encoder(
+ configs: List[TargetClassConfig],
+) -> SoundEventEncoder:
+ """Build a sound event encoder function from the classes configuration."""
+ conditions = {
+ class_config.name: build_sound_event_condition(class_config.match_if)
+ for class_config in configs
+ }
+
+ return SoundEventClassifier(conditions)
+
+
+class SoundEventClassifier:
+ def __init__(self, mapping: Dict[str, SoundEventCondition]):
+ self.mapping = mapping
+
+ def __call__(
+ self, sound_event_annotation: data.SoundEventAnnotation
+ ) -> str | None:
+ for name, condition in self.mapping.items():
+ if condition(sound_event_annotation):
+ return name
+
+
+def build_sound_event_decoder(
+ configs: List[TargetClassConfig],
+ raise_on_unmapped: bool = False,
+) -> SoundEventDecoder:
+ """Build a sound event decoder function from the classes configuration."""
+ mapping = {
+ class_config.name: class_config.assign_tags for class_config in configs
+ }
+ return TagDecoder(mapping, raise_on_unknown=raise_on_unmapped)
+
+
+class TagDecoder:
+ def __init__(
+ self,
+ mapping: Dict[str, List[data.Tag]],
+ raise_on_unknown: bool = True,
+ ):
+ self.mapping = mapping
+ self.raise_on_unknown = raise_on_unknown
+
+ def __call__(self, class_name: str) -> List[data.Tag]:
+ tags = self.mapping.get(class_name)
+
+ if tags is None:
+ if self.raise_on_unknown:
+ raise ValueError("Invalid class name")
+
+ tags = []
+
+ return tags
diff --git a/src/batdetect2/targets/config.py b/src/batdetect2/targets/config.py
new file mode 100644
index 00000000..b254decb
--- /dev/null
+++ b/src/batdetect2/targets/config.py
@@ -0,0 +1,63 @@
+from collections import Counter
+from typing import List
+
+from pydantic import Field, field_validator
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.targets.classes import (
+ DEFAULT_CLASSES,
+ DEFAULT_DETECTION_CLASS,
+ TargetClassConfig,
+)
+from batdetect2.targets.rois import ROIMappingConfig
+
+__all__ = [
+ "TargetConfig",
+ "build_default_target_config",
+]
+
+
+class TargetConfig(BaseConfig):
+ detection_target: TargetClassConfig = Field(
+ default=DEFAULT_DETECTION_CLASS
+ )
+
+ classification_targets: List[TargetClassConfig] = Field(
+ default_factory=lambda: DEFAULT_CLASSES
+ )
+
+ roi: ROIMappingConfig = Field(default_factory=ROIMappingConfig)
+
+ @field_validator("classification_targets")
+ def check_unique_class_names(cls, v: List[TargetClassConfig]):
+ """Ensure all defined class names are unique."""
+ names = [c.name for c in v]
+
+ if len(names) != len(set(names)):
+ name_counts = Counter(names)
+ duplicates = [
+ name for name, count in name_counts.items() if count > 1
+ ]
+ raise ValueError(
+ "Class names must be unique. Found duplicates: "
+ f"{', '.join(duplicates)}"
+ )
+ return v
+
+
+def build_default_target_config(class_names: list[str]) -> TargetConfig:
+ """Build a default target configuration object."""
+ return TargetConfig(
+ detection_target=DEFAULT_DETECTION_CLASS,
+ classification_targets=[
+ TargetClassConfig(
+ name=class_name,
+ tags=[
+ data.Tag(key="class", value=class_name),
+ ],
+ )
+ for class_name in class_names
+ ],
+ roi=ROIMappingConfig(),
+ )
diff --git a/src/batdetect2/targets/rois.py b/src/batdetect2/targets/rois.py
new file mode 100644
index 00000000..4114e1ab
--- /dev/null
+++ b/src/batdetect2/targets/rois.py
@@ -0,0 +1,741 @@
+"""Map geometric ROIs to target representations and back.
+
+This module defines a standardized interface (`ROITargetMapper`) for converting
+a sound event ROI into a target representation for model training and decoding
+model outputs back into approximate geometries.
+
+Core operations:
+
+- Encode a `soundevent.data.SoundEvent` into a reference `Position`
+ `(time, frequency)` and a `Size` array.
+- Decode a `Position` and `Size` array into an approximate
+ `soundevent.data.Geometry` (usually a `BoundingBox`).
+
+The specific mapping depends on the selected mapper implementation. Config
+objects provide mapper-specific parameters such as anchor choice and scaling.
+This module focuses on the geometric part of target definition.
+"""
+
+from typing import Annotated, Literal
+
+import numpy as np
+from pydantic import Field
+from soundevent import data
+
+from batdetect2.audio import AudioConfig, build_audio_loader
+from batdetect2.audio.types import AudioLoader
+from batdetect2.core import ImportConfig, Registry, add_import_config
+from batdetect2.core.arrays import spec_to_xarray
+from batdetect2.core.configs import BaseConfig
+from batdetect2.preprocess import PreprocessingConfig, build_preprocessor
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.targets.types import (
+ Position,
+ ROIMapperProtocol,
+ ROITargetMapper,
+ Size,
+)
+
+__all__ = [
+ "Anchor",
+ "AnchorBBoxMapper",
+ "AnchorBBoxMapperConfig",
+ "DEFAULT_ANCHOR",
+ "DEFAULT_FREQUENCY_SCALE",
+ "DEFAULT_TIME_SCALE",
+ "PeakEnergyBBoxMapper",
+ "PeakEnergyBBoxMapperConfig",
+ "ROIMappingConfig",
+ "ROIMapperProtocol",
+ "ROIMapperConfig",
+ "ROIMapperImportConfig",
+ "ROITargetMapper",
+ "SIZE_HEIGHT",
+ "SIZE_ORDER",
+ "SIZE_WIDTH",
+ "build_roi_mapping",
+ "build_roi_mapper",
+]
+
+Anchor = Literal[
+ "bottom-left",
+ "bottom-right",
+ "top-left",
+ "top-right",
+ "center-left",
+ "center-right",
+ "top-center",
+ "bottom-center",
+ "center",
+ "centroid",
+ "point_on_surface",
+]
+
+SIZE_WIDTH = "width"
+"""Standard name for the width/time dimension component ('width')."""
+
+SIZE_HEIGHT = "height"
+"""Standard name for the height/frequency dimension component ('height')."""
+
+SIZE_ORDER = (SIZE_WIDTH, SIZE_HEIGHT)
+"""Standard order of dimensions for size arrays ([width, height])."""
+
+DEFAULT_TIME_SCALE = 1000.0
+"""Default scaling factor for time duration."""
+
+DEFAULT_FREQUENCY_SCALE = 1 / 859.375
+"""Default scaling factor for frequency bandwidth."""
+
+DEFAULT_ANCHOR = "bottom-left"
+"""Default reference position within the geometry ('bottom-left' corner)."""
+
+
+roi_mapper_registry: Registry[ROITargetMapper, []] = Registry("roi_mapper")
+
+
+@add_import_config(roi_mapper_registry)
+class ROIMapperImportConfig(ImportConfig):
+ """Use any callable as an ROI mapper.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class AnchorBBoxMapperConfig(BaseConfig):
+ """Configuration for `AnchorBBoxMapper`.
+
+ Defines parameters for converting ROIs into targets using a fixed anchor
+ point on the bounding box.
+
+ Attributes
+ ----------
+ name : Literal["anchor_bbox"]
+ The unique identifier for this mapper type.
+ anchor : Anchor
+ Specifies the anchor point within the bounding box to use as the
+ target's reference position (e.g., "center", "bottom-left").
+ time_scale : float
+ Scaling factor applied to the time duration (width) of the ROI.
+ frequency_scale : float
+ Scaling factor applied to the frequency bandwidth (height) of the ROI.
+ """
+
+ name: Literal["anchor_bbox"] = "anchor_bbox"
+ anchor: Anchor = DEFAULT_ANCHOR
+ time_scale: float = DEFAULT_TIME_SCALE
+ frequency_scale: float = DEFAULT_FREQUENCY_SCALE
+
+
+class AnchorBBoxMapper(ROITargetMapper):
+ """Maps ROIs using a bounding box anchor point and width/height.
+
+ This class implements the `ROITargetMapper` protocol for `BoundingBox`
+ geometries.
+
+ Encoding uses a fixed anchor point on the bounding box for `position`
+ (for example, ``bottom-left``). The `size` is a 2-element array with
+ scaled width and height.
+
+ Decoding reconstructs a `BoundingBox` from anchor position and scaled
+ width/height.
+
+ Attributes
+ ----------
+ dimension_names : list[str]
+ The output dimension names: `['width', 'height']`.
+ anchor : Anchor
+ The configured anchor point type (e.g., "center", "bottom-left").
+ time_scale : float
+ The scaling factor for the time dimension (width).
+ frequency_scale : float
+ The scaling factor for the frequency dimension (height).
+ """
+
+ dimension_names = [SIZE_WIDTH, SIZE_HEIGHT]
+
+ def __init__(
+ self,
+ anchor: Anchor = DEFAULT_ANCHOR,
+ time_scale: float = DEFAULT_TIME_SCALE,
+ frequency_scale: float = DEFAULT_FREQUENCY_SCALE,
+ ):
+ """Initialize the BBoxEncoder.
+
+ Parameters
+ ----------
+ anchor : Anchor
+ Reference point type within the bounding box.
+ time_scale : float
+ Scaling factor for time duration (width).
+ frequency_scale : float
+ Scaling factor for frequency bandwidth (height).
+ """
+ self.anchor: Anchor = anchor
+ self.time_scale = time_scale
+ self.frequency_scale = frequency_scale
+
+ def encode(self, sound_event: data.SoundEvent) -> tuple[Position, Size]:
+ """Encode a SoundEvent into an anchor position and scaled box size.
+
+ The position is determined by the configured anchor on the sound
+ event's bounding box. The size is the scaled width and height.
+
+ Parameters
+ ----------
+ sound_event : data.SoundEvent
+ The input sound event with a geometry.
+
+ Returns
+ -------
+ tuple[Position, Size]
+ A tuple of (anchor_position, [scaled_width, scaled_height]).
+ """
+ from soundevent import geometry
+
+ geom = sound_event.geometry
+
+ if geom is None:
+ raise ValueError(
+ "Cannot encode the geometry of a sound event without geometry."
+ f" Sound event: {sound_event}"
+ )
+
+ position = geometry.get_geometry_point(geom, position=self.anchor)
+
+ start_time, low_freq, end_time, high_freq = geometry.compute_bounds(
+ geom
+ )
+
+ size = np.array(
+ [
+ (end_time - start_time) * self.time_scale,
+ (high_freq - low_freq) * self.frequency_scale,
+ ]
+ )
+
+ return position, size
+
+ def decode(
+ self,
+ position: Position,
+ size: Size,
+ ) -> data.Geometry:
+ """Recover a BoundingBox from an anchor position and scaled size.
+
+ Un-scales the input dimensions and reconstructs a
+ `soundevent.data.BoundingBox` relative to the given anchor position.
+
+ Parameters
+ ----------
+ position : Position
+ Reference anchor position (time, frequency).
+ size : Size
+ NumPy array containing the scaled [width, height].
+
+ Returns
+ -------
+ data.BoundingBox
+ The reconstructed bounding box.
+
+ Raises
+ ------
+ ValueError
+ If `size` does not have the expected shape (length 2).
+ """
+
+ if size.ndim != 1 or size.shape[0] != 2:
+ raise ValueError(
+ "Dimension array does not have the expected shape. "
+ f"({size.shape = }) != ([2])"
+ )
+
+ width, height = size
+ return _build_bounding_box(
+ position,
+ duration=float(width) / self.time_scale,
+ bandwidth=float(height) / self.frequency_scale,
+ anchor=self.anchor,
+ )
+
+ @roi_mapper_registry.register(AnchorBBoxMapperConfig)
+ @staticmethod
+ def from_config(config: AnchorBBoxMapperConfig):
+ return AnchorBBoxMapper(
+ anchor=config.anchor,
+ time_scale=config.time_scale,
+ frequency_scale=config.frequency_scale,
+ )
+
+
+class PeakEnergyBBoxMapperConfig(BaseConfig):
+ """Configuration for `PeakEnergyBBoxMapper`.
+
+ Attributes
+ ----------
+ name : Literal["peak_energy_bbox"]
+ The unique identifier for this mapper type.
+ preprocessing : PreprocessingConfig
+ Configuration for the spectrogram preprocessor needed to find the
+ peak energy.
+ loading_buffer : float
+ Seconds to add to each side of the ROI when loading audio to ensure
+ the peak is captured accurately, avoiding boundary effects.
+ time_scale : float
+ Scaling factor applied to the time dimensions.
+ frequency_scale : float
+ Scaling factor applied to the frequency dimensions.
+ """
+
+ name: Literal["peak_energy_bbox"] = "peak_energy_bbox"
+ audio: AudioConfig = Field(default_factory=AudioConfig)
+ preprocessing: PreprocessingConfig = Field(
+ default_factory=PreprocessingConfig
+ )
+ loading_buffer: float = 0.01
+ time_scale: float = DEFAULT_TIME_SCALE
+ frequency_scale: float = DEFAULT_FREQUENCY_SCALE
+
+
+class PeakEnergyBBoxMapper(ROITargetMapper):
+ """Maps ROIs using the peak energy point and distances to edges.
+
+ This class implements the `ROITargetMapper` protocol.
+
+ Encoding sets `position` to the (time, frequency) coordinate of peak energy
+ inside the sound event bounding box. The `size` is a 4-element array with
+ scaled distances from the peak point to left, bottom, right, and top edges.
+
+ Decoding reconstructs a `BoundingBox` by applying the unscaled distances to
+ the peak-energy position.
+
+ Attributes
+ ----------
+ dimension_names : list[str]
+ The output dimension names: `['left', 'bottom', 'right', 'top']`.
+ preprocessor : PreprocessorProtocol
+ The spectrogram preprocessor instance.
+ time_scale : float
+ The scaling factor for time-based distances.
+ frequency_scale : float
+ The scaling factor for frequency-based distances.
+ loading_buffer : float
+ The buffer used for loading audio around the ROI.
+ """
+
+ dimension_names = ["left", "bottom", "right", "top"]
+
+ def __init__(
+ self,
+ preprocessor: PreprocessorProtocol,
+ audio_loader: AudioLoader,
+ time_scale: float = DEFAULT_TIME_SCALE,
+ frequency_scale: float = DEFAULT_FREQUENCY_SCALE,
+ loading_buffer: float = 0.01,
+ ):
+ """Initialize the PeakEnergyBBoxMapper.
+
+ Parameters
+ ----------
+ preprocessor : PreprocessorProtocol
+ An initialized preprocessor for generating spectrograms.
+ time_scale : float
+ Scaling factor for time dimensions (left, right distances).
+ frequency_scale : float
+ Scaling factor for frequency dimensions (bottom, top distances).
+ loading_buffer : float
+ Buffer in seconds to add when loading audio clips.
+ """
+ self.preprocessor = preprocessor
+ self.audio_loader = audio_loader
+ self.time_scale = time_scale
+ self.frequency_scale = frequency_scale
+ self.loading_buffer = loading_buffer
+
+ def encode(
+ self,
+ sound_event: data.SoundEvent,
+ ) -> tuple[Position, Size]:
+ """Encode a SoundEvent into a peak energy position and edge distances.
+
+ Finds the peak energy coordinates within the event's bounding box
+ and calculates the scaled distances from this point to the box edges.
+
+ Parameters
+ ----------
+ sound_event : data.SoundEvent
+ The input sound event with a geometry and associated recording.
+
+ Returns
+ -------
+ tuple[Position, Size]
+ A tuple of (peak_position, [l, b, r, t] distances).
+ """
+ from soundevent import geometry
+
+ geom = sound_event.geometry
+
+ if geom is None:
+ raise ValueError(
+ "Cannot encode the geometry of a sound event without geometry."
+ f" Sound event: {sound_event}"
+ )
+
+ start_time, low_freq, end_time, high_freq = geometry.compute_bounds(
+ geom
+ )
+
+ time, freq = get_peak_energy_coordinates(
+ recording=sound_event.recording,
+ audio_loader=self.audio_loader,
+ preprocessor=self.preprocessor,
+ start_time=start_time,
+ end_time=end_time,
+ low_freq=low_freq,
+ high_freq=high_freq,
+ loading_buffer=self.loading_buffer,
+ )
+
+ size = np.array(
+ [
+ (time - start_time) * self.time_scale,
+ (freq - low_freq) * self.frequency_scale,
+ (end_time - time) * self.time_scale,
+ (high_freq - freq) * self.frequency_scale,
+ ]
+ )
+
+ return (time, freq), size
+
+ def decode(self, position: Position, size: Size) -> data.Geometry:
+ """Recover a BoundingBox from a peak position and edge distances.
+
+ Parameters
+ ----------
+ position : Position
+ The reference peak energy position (time, frequency).
+ size : Size
+ NumPy array with scaled distances [left, bottom, right, top].
+
+ Returns
+ -------
+ data.BoundingBox
+ The reconstructed bounding box.
+ """
+ time, freq = position
+ left, bottom, right, top = size
+
+ return data.BoundingBox(
+ coordinates=[
+ time - max(0, float(left)) / self.time_scale,
+ freq - max(0, float(bottom)) / self.frequency_scale,
+ time + max(0, float(right)) / self.time_scale,
+ freq + max(0, float(top)) / self.frequency_scale,
+ ]
+ )
+
+ @roi_mapper_registry.register(PeakEnergyBBoxMapperConfig)
+ @staticmethod
+ def from_config(config: PeakEnergyBBoxMapperConfig):
+ audio_loader = build_audio_loader(config=config.audio)
+ preprocessor = build_preprocessor(
+ config.preprocessing,
+ input_samplerate=audio_loader.samplerate,
+ )
+ return PeakEnergyBBoxMapper(
+ preprocessor=preprocessor,
+ audio_loader=audio_loader,
+ time_scale=config.time_scale,
+ frequency_scale=config.frequency_scale,
+ loading_buffer=config.loading_buffer,
+ )
+
+
+ROIMapperConfig = Annotated[
+ AnchorBBoxMapperConfig | PeakEnergyBBoxMapperConfig,
+ Field(discriminator="name"),
+]
+"""A discriminated union of all supported ROI mapper configurations.
+
+This type allows for selecting and configuring different `ROITargetMapper`
+implementations by using the `name` field as a discriminator.
+"""
+
+
+class ROIMappingConfig(BaseConfig):
+ """Configuration for class-aware ROI mapping.
+
+ Attributes
+ ----------
+ default : ROIMapperConfig
+ Default mapper used when no class-specific override exists.
+ overrides : dict[str, ROIMapperConfig]
+ Optional class-specific mapper overrides by class name.
+ """
+
+ default: ROIMapperConfig = Field(default_factory=AnchorBBoxMapperConfig)
+ overrides: dict[str, ROIMapperConfig] = Field(default_factory=dict)
+
+
+class ClassAwareROIMapper(ROIMapperProtocol):
+ """Apply a default ROI mapper with optional per-class overrides."""
+
+ dimension_names: list[str]
+
+ def __init__(
+ self,
+ default_mapper: ROITargetMapper,
+ overrides: dict[str, ROITargetMapper] | None = None,
+ ):
+ self.default_mapper = default_mapper
+ self.overrides = overrides or {}
+ self.dimension_names = list(default_mapper.dimension_names)
+
+ def encode(
+ self,
+ sound_event: data.SoundEvent,
+ class_name: str | None = None,
+ ) -> tuple[Position, Size]:
+ mapper = self._select_mapper(class_name)
+ return mapper.encode(sound_event)
+
+ def decode(
+ self,
+ position: Position,
+ size: Size,
+ class_name: str | None = None,
+ ) -> data.Geometry:
+ mapper = self._select_mapper(class_name)
+ return mapper.decode(position, size)
+
+ def _select_mapper(self, class_name: str | None = None) -> ROITargetMapper:
+ if class_name is not None and class_name in self.overrides:
+ return self.overrides[class_name]
+
+ return self.default_mapper
+
+
+def build_roi_mapper(
+ config: ROIMapperConfig | None = None,
+) -> ROITargetMapper:
+ """Factory function to create an ROITargetMapper from a config object.
+
+ Parameters
+ ----------
+ config : ROIMapperConfig
+ A configuration object specifying the mapper type and its parameters.
+
+ Returns
+ -------
+ ROITargetMapper
+ An initialized ROI mapper instance.
+
+ Raises
+ ------
+ NotImplementedError
+ If the `name` in the config does not correspond to a known mapper.
+ """
+ config = config or AnchorBBoxMapperConfig()
+ return roi_mapper_registry.build(config)
+
+
+def build_roi_mapping(
+ config: ROIMappingConfig | None = None,
+) -> ROIMapperProtocol:
+ """Build a class-aware ROI mapper and validate consistency."""
+ config = config or ROIMappingConfig()
+
+ default_mapper = build_roi_mapper(config.default)
+ overrides = {
+ class_name: build_roi_mapper(mapper_config)
+ for class_name, mapper_config in config.overrides.items()
+ }
+
+ expected = list(default_mapper.dimension_names)
+
+ for class_name, mapper in overrides.items():
+ actual = list(mapper.dimension_names)
+
+ if actual != expected:
+ raise ValueError(
+ "All ROI mappers must share the same dimension order. "
+ f"Default dimensions: {expected}, "
+ f"class '{class_name}' dimensions: {actual}."
+ )
+
+ return ClassAwareROIMapper(
+ default_mapper=default_mapper,
+ overrides=overrides,
+ )
+
+
+VALID_ANCHORS = [
+ "bottom-left",
+ "bottom-right",
+ "top-left",
+ "top-right",
+ "center-left",
+ "center-right",
+ "top-center",
+ "bottom-center",
+ "center",
+ "centroid",
+ "point_on_surface",
+]
+
+
+def _build_bounding_box(
+ pos: tuple[float, float],
+ duration: float,
+ bandwidth: float,
+ anchor: Anchor = DEFAULT_ANCHOR,
+) -> data.BoundingBox:
+ """Construct a BoundingBox from a reference point, size, and position type.
+
+ Internal helper for `BBoxEncoder.decode`. Calculates the box
+ coordinates [start_time, low_freq, end_time, high_freq] based on where
+ the input `pos` (time, freq) is located relative to the box (e.g.,
+ center, corner).
+
+ Parameters
+ ----------
+ pos
+ Reference position (time, frequency).
+ duration
+ The required *unscaled* duration (width) of the bounding box.
+ bandwidth
+ The required *unscaled* frequency bandwidth (height) of the bounding
+ box.
+ anchor
+ Specifies which part of the bounding box the input `pos` corresponds to.
+
+ Returns
+ -------
+ data.BoundingBox
+ The constructed bounding box object.
+
+ Raises
+ ------
+ ValueError
+ If `anchor` is not a recognized value or format.
+ """
+ time, freq = map(float, pos)
+ duration = max(0, duration)
+ bandwidth = max(0, bandwidth)
+ if anchor in ["center", "centroid", "point_on_surface"]:
+ return data.BoundingBox(
+ coordinates=[
+ max(time - duration / 2, 0),
+ max(freq - bandwidth / 2, 0),
+ max(time + duration / 2, 0),
+ max(freq + bandwidth / 2, 0),
+ ]
+ )
+
+ if anchor not in VALID_ANCHORS:
+ raise ValueError(
+ f"Invalid anchor: {anchor}. Valid options are: {VALID_ANCHORS}"
+ )
+
+ y, x = anchor.split("-")
+
+ start_time = {
+ "left": time,
+ "center": time - duration / 2,
+ "right": time - duration,
+ }[x]
+
+ low_freq = {
+ "bottom": freq,
+ "center": freq - bandwidth / 2,
+ "top": freq - bandwidth,
+ }[y]
+
+ return data.BoundingBox(
+ coordinates=[
+ max(0, start_time),
+ max(0, low_freq),
+ max(0, start_time + duration),
+ max(0, low_freq + bandwidth),
+ ]
+ )
+
+
+def get_peak_energy_coordinates(
+ recording: data.Recording,
+ audio_loader: AudioLoader,
+ preprocessor: PreprocessorProtocol,
+ start_time: float = 0,
+ end_time: float | None = None,
+ low_freq: float = 0,
+ high_freq: float | None = None,
+ loading_buffer: float = 0.05,
+) -> Position:
+ """Find the coordinates of the highest energy point in a spectrogram.
+
+ Generates a spectrogram for a specified time-frequency region of a
+ recording and returns the (time, frequency) coordinates of the pixel with
+ the maximum value.
+
+ Parameters
+ ----------
+ recording : data.Recording
+ The recording to analyze.
+ preprocessor : PreprocessorProtocol
+ The processor to convert audio to a spectrogram.
+ start_time : float, default=0
+ The start time of the region of interest.
+ end_time : float, optional
+ The end time of the region of interest. Defaults to recording duration.
+ low_freq : float, default=0
+ The low frequency of the region of interest.
+ high_freq : float, optional
+ The high frequency of the region of interest. Defaults to Nyquist.
+ loading_buffer : float, default=0.05
+ Buffer in seconds to add around the time range when loading the clip
+ to mitigate border effects from transformations like STFT.
+
+ Returns
+ -------
+ Position
+ A (time, frequency) tuple for the peak energy location.
+ """
+ if end_time is None:
+ end_time = recording.duration
+ end_time = min(end_time, recording.duration)
+
+ if high_freq is None:
+ high_freq = recording.samplerate / 2
+
+ clip_start = max(0, start_time - loading_buffer)
+ clip_end = min(recording.duration, end_time + loading_buffer)
+
+ clip = data.Clip(
+ recording=recording,
+ start_time=clip_start,
+ end_time=clip_end,
+ )
+
+ wav = audio_loader.load_clip(clip)
+ spec = preprocessor.process_numpy(wav)
+ spec = spec_to_xarray(
+ spec,
+ clip.start_time,
+ clip.end_time,
+ min_freq=preprocessor.min_freq,
+ max_freq=preprocessor.max_freq,
+ )
+ low_freq = max(low_freq, preprocessor.min_freq)
+ high_freq = min(high_freq, preprocessor.max_freq)
+ selection = spec.sel(
+ time=slice(start_time, end_time),
+ frequency=slice(low_freq, high_freq),
+ )
+
+ index = selection.argmax(dim=["time", "frequency"])
+ point = selection.isel(index)
+ peak_time: float = point.time.item()
+ peak_freq: float = point.frequency.item()
+ return peak_time, peak_freq
diff --git a/src/batdetect2/targets/targets.py b/src/batdetect2/targets/targets.py
new file mode 100644
index 00000000..9c8bb1ef
--- /dev/null
+++ b/src/batdetect2/targets/targets.py
@@ -0,0 +1,238 @@
+from typing import Iterable
+
+from loguru import logger
+from soundevent import data
+
+from batdetect2.data.conditions import build_sound_event_condition
+from batdetect2.targets.classes import (
+ DEFAULT_CLASSES,
+ DEFAULT_DETECTION_CLASS,
+ build_sound_event_decoder,
+ build_sound_event_encoder,
+ get_class_names_from_config,
+)
+from batdetect2.targets.config import TargetConfig
+from batdetect2.targets.types import (
+ Position,
+ ROIMapperProtocol,
+ Size,
+ TargetProtocol,
+)
+
+
+class Targets(TargetProtocol):
+ """Encapsulates the configured target class definition pipeline.
+
+ This class implements the `TargetProtocol`, holding the configured
+ functions for filtering, encoding (tags to class name), and decoding
+ (class name to tags). Geometry ROI mapping is handled separately by
+ ``ROIMapperProtocol``.
+
+ Instances are typically created using the `build_targets` factory function
+ or the `load_targets` convenience loader.
+
+ Attributes
+ ----------
+ class_names
+ An ordered list of the unique names of the specific target classes
+ defined in the configuration.
+ generic_class_tags
+ A list of `soundevent.data.Tag` objects representing the configured
+ generic class category (used when no specific class matches).
+ """
+
+ class_names: list[str]
+ detection_class_tags: list[data.Tag]
+ detection_class_name: str
+
+ def __init__(self, config: TargetConfig):
+ """Initialize the Targets object."""
+ self.config = config
+
+ self._filter_fn = build_sound_event_condition(
+ self.config.detection_target.match_if
+ )
+ self._encode_fn = build_sound_event_encoder(
+ self.config.classification_targets
+ )
+ self._decode_fn = build_sound_event_decoder(
+ self.config.classification_targets
+ )
+
+ self.class_names = get_class_names_from_config(
+ self.config.classification_targets
+ )
+
+ self.detection_class_name = self.config.detection_target.name
+ self.detection_class_tags = self.config.detection_target.assign_tags
+
+ @classmethod
+ def from_config(cls, config: dict) -> "Targets":
+ """Build a Targets object from a serialized config dictionary."""
+ validated_config = TargetConfig.model_validate(config)
+ return cls(config=validated_config)
+
+ def get_config(self) -> dict:
+ """Return the serialized target config used to build this object."""
+ return self.config.model_dump(mode="json")
+
+ def filter(self, sound_event: data.SoundEventAnnotation) -> bool:
+ """Apply the configured filter to a sound event annotation.
+
+ Parameters
+ ----------
+ sound_event : data.SoundEventAnnotation
+ The annotation to filter.
+
+ Returns
+ -------
+ bool
+ True if the annotation should be kept (passes the filter),
+ False otherwise. If no filter was configured, always returns True.
+ """
+ return self._filter_fn(sound_event)
+
+ def encode_class(
+ self, sound_event: data.SoundEventAnnotation
+ ) -> str | None:
+ """Encode a sound event annotation to its target class name.
+
+ Applies the configured class definition rules (including priority)
+ to determine the specific class name for the annotation.
+
+ Parameters
+ ----------
+ sound_event : data.SoundEventAnnotation
+ The annotation to encode. Note: This should typically be called
+ *after* applying any transformations via the `transform` method.
+
+ Returns
+ -------
+ str or None
+ The name of the matched target class, or None if the annotation
+ does not match any specific class rule (i.e., it belongs to the
+ generic category).
+ """
+ return self._encode_fn(sound_event)
+
+ def decode_class(self, class_label: str) -> list[data.Tag]:
+ """Decode a predicted class name back into representative tags.
+
+ Uses the configured mapping (based on `TargetClass.output_tags` or
+ `TargetClass.tags`) to convert a class name string into a list of
+ `soundevent.data.Tag` objects.
+
+ Parameters
+ ----------
+ class_label : str
+ The class name to decode.
+
+ Returns
+ -------
+ list[data.Tag]
+ The list of tags corresponding to the input class name.
+ """
+ return self._decode_fn(class_label)
+
+
+DEFAULT_TARGET_CONFIG: TargetConfig = TargetConfig(
+ classification_targets=DEFAULT_CLASSES,
+ detection_target=DEFAULT_DETECTION_CLASS,
+)
+
+
+def build_targets(config: TargetConfig | dict | None = None) -> Targets:
+ """Build a Targets object from a loaded TargetConfig.
+
+ Parameters
+ ----------
+ config : TargetConfig
+ The loaded and validated unified target configuration object.
+
+ Returns
+ -------
+ Targets
+ An initialized `Targets` object ready for use.
+
+ Raises
+ ------
+ KeyError
+ If term keys or derivation function keys specified in the `config`
+ are not found in their respective registries.
+ ImportError, AttributeError, TypeError
+ If dynamic import of a derivation function fails (when configured).
+ """
+ config = config or DEFAULT_TARGET_CONFIG
+
+ if not isinstance(config, TargetConfig):
+ config = TargetConfig.model_validate(config)
+
+ logger.opt(lazy=True).debug(
+ "Building targets with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+
+ return Targets(config=config)
+
+
+def load_targets(
+ config_path: data.PathLike,
+ field: str | None = None,
+) -> Targets:
+ """Load a Targets object directly from a configuration file.
+
+ This convenience factory method loads the `TargetConfig` from the
+ specified file path and then calls `Targets.from_config` to build
+ the fully initialized `Targets` object.
+
+ Parameters
+ ----------
+ config_path : data.PathLike
+ Path to the configuration file (e.g., YAML).
+ field : str, optional
+ Dot-separated path to a nested section within the file containing
+ the target configuration. If None, the entire file content is used.
+
+ Returns
+ -------
+ Targets
+ An initialized `Targets` object ready for use.
+
+ Raises
+ ------
+ FileNotFoundError, yaml.YAMLError, pydantic.ValidationError, KeyError,
+ TypeError
+ Errors raised during file loading or validation via
+ ``TargetConfig.load``.
+ KeyError, ImportError, AttributeError, TypeError
+ Errors raised during the build process by `Targets.from_config`
+ (e.g., missing keys in registries, failed imports).
+ """
+ config = TargetConfig.load(
+ config_path,
+ field=field,
+ )
+ return build_targets(config)
+
+
+def iterate_encoded_sound_events(
+ sound_events: Iterable[data.SoundEventAnnotation],
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+) -> Iterable[tuple[str | None, Position, Size]]:
+ for sound_event in sound_events:
+ if not targets.filter(sound_event):
+ continue
+
+ geometry = sound_event.sound_event.geometry
+
+ if geometry is None:
+ continue
+
+ class_name = targets.encode_class(sound_event)
+ position, size = roi_mapper.encode(
+ sound_event.sound_event,
+ class_name=class_name,
+ )
+
+ yield class_name, position, size
diff --git a/src/batdetect2/targets/terms.py b/src/batdetect2/targets/terms.py
new file mode 100644
index 00000000..c804593f
--- /dev/null
+++ b/src/batdetect2/targets/terms.py
@@ -0,0 +1,87 @@
+"""Manages the vocabulary for defining training targets."""
+
+from soundevent import data, terms
+
+__all__ = [
+ "call_type",
+ "individual",
+ "data_source",
+ "generic_class",
+]
+
+# The default key used to reference the 'generic_class' term.
+# Often used implicitly when defining classification targets.
+GENERIC_CLASS_KEY = "class"
+
+
+data_source = data.Term(
+ name="dcterms:source",
+ label="Source",
+ uri="http://purl.org/dc/terms/source",
+ definition=(
+ "A related resource from which the described resource is derived."
+ ),
+)
+
+call_type = data.Term(
+ name="soundevent:call_type",
+ label="Call Type",
+ definition=(
+ "A broad categorization of animal vocalizations based on their "
+ "intended function or purpose (e.g., social, distress, mating, "
+ "territorial, echolocation)."
+ ),
+)
+"""Term representing the broad functional category of a vocalization."""
+
+individual = data.Term(
+ name="soundevent:individual",
+ label="Individual",
+ definition=(
+ "An id for an individual animal. In the context of bioacoustic "
+ "annotation, this term is used to label vocalizations that are "
+ "attributed to a specific individual."
+ ),
+)
+"""Term used for tags identifying a specific individual animal."""
+
+dataset_split = data.Term(
+ name="batdetect2:split",
+ label="Dataset Split",
+ definition=(
+ "Identifies the specific data partition (e.g., 'train', 'test') "
+ "that the item belongs to within an experimental setup. "
+ "The expected value is a literal text string."
+ ),
+)
+"""Custom metadata term defining the machine learning partition of an item."""
+
+generic_class = data.Term(
+ name="soundevent:class",
+ label="Class",
+ definition=(
+ "A generic term representing the name of a class within a "
+ "classification model. Its specific meaning is determined by "
+ "the model's application."
+ ),
+)
+"""Generic term representing a classification model's output class label."""
+
+terms.register_term_set(
+ terms.TermSet(
+ terms=[
+ generic_class,
+ individual,
+ call_type,
+ data_source,
+ ],
+ aliases={
+ "class": generic_class.name,
+ "individual": individual.name,
+ "event": call_type.name,
+ "source": data_source.name,
+ "call_type": call_type.name,
+ },
+ ),
+ override_existing=True,
+)
diff --git a/src/batdetect2/targets/types.py b/src/batdetect2/targets/types.py
new file mode 100644
index 00000000..0558b3c2
--- /dev/null
+++ b/src/batdetect2/targets/types.py
@@ -0,0 +1,82 @@
+from collections.abc import Callable
+from typing import Protocol
+
+import numpy as np
+from soundevent import data
+
+__all__ = [
+ "Position",
+ "ROIMapperProtocol",
+ "ROITargetMapper",
+ "Size",
+ "SoundEventDecoder",
+ "SoundEventEncoder",
+ "SoundEventFilter",
+ "TargetProtocol",
+]
+
+SoundEventEncoder = Callable[[data.SoundEventAnnotation], str | None]
+SoundEventDecoder = Callable[[str], list[data.Tag]]
+SoundEventFilter = Callable[[data.SoundEventAnnotation], bool]
+
+Position = tuple[float, float]
+Size = np.ndarray
+
+
+class TargetProtocol(Protocol):
+ class_names: list[str]
+ detection_class_tags: list[data.Tag]
+ detection_class_name: str
+
+ @classmethod
+ def from_config(cls, config: dict) -> "TargetProtocol": ...
+
+ def get_config(self) -> dict: ...
+
+ def filter(self, sound_event: data.SoundEventAnnotation) -> bool: ...
+
+ def encode_class(
+ self,
+ sound_event: data.SoundEventAnnotation,
+ ) -> str | None: ...
+
+ def decode_class(self, class_label: str) -> list[data.Tag]: ...
+
+
+class ROIMapperProtocol(Protocol):
+ dimension_names: list[str]
+
+ def encode(
+ self,
+ sound_event: data.SoundEvent,
+ class_name: str | None = None,
+ ) -> tuple[Position, Size]: ...
+
+ def decode(
+ self,
+ position: Position,
+ size: Size,
+ class_name: str | None = None,
+ ) -> data.Geometry: ...
+
+ def encode_roi(
+ self,
+ sound_event: data.SoundEventAnnotation,
+ ) -> tuple[Position, Size]: ...
+
+ def decode_roi(
+ self,
+ position: Position,
+ size: Size,
+ class_name: str | None = None,
+ ) -> data.Geometry: ...
+
+
+class ROITargetMapper(Protocol):
+ dimension_names: list[str]
+
+ def encode(
+ self, sound_event: data.SoundEvent
+ ) -> tuple[Position, Size]: ...
+
+ def decode(self, position: Position, size: Size) -> data.Geometry: ...
diff --git a/src/batdetect2/targets/utils.py b/src/batdetect2/targets/utils.py
new file mode 100644
index 00000000..679a6c69
--- /dev/null
+++ b/src/batdetect2/targets/utils.py
@@ -0,0 +1,29 @@
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from batdetect2.targets.types import TargetProtocol
+
+
+def check_target_compatibility(
+ targets: "TargetProtocol",
+ class_names: list[str],
+) -> bool:
+ """Check if a target definition can decode a model's outputs.
+
+ Parameters
+ ----------
+ targets : TargetProtocol
+ Target definition that would be used with the model outputs.
+ class_names : list[str]
+ Class names produced by the model checkpoint.
+
+ Returns
+ -------
+ bool
+ True when every model class name exists in the provided targets,
+ False otherwise.
+ """
+ target_class_names = set(targets.class_names)
+ model_class_names = set(class_names)
+
+ return model_class_names.issubset(target_class_names)
diff --git a/src/batdetect2/train/__init__.py b/src/batdetect2/train/__init__.py
new file mode 100644
index 00000000..27f55397
--- /dev/null
+++ b/src/batdetect2/train/__init__.py
@@ -0,0 +1,34 @@
+from batdetect2.train.checkpoints import (
+ DEFAULT_CHECKPOINT_DIR,
+ resolve_checkpoint_path,
+)
+from batdetect2.train.config import TrainingConfig
+from batdetect2.train.lightning import (
+ TrainingModule,
+ load_model_from_checkpoint,
+)
+from batdetect2.train.logging import (
+ ConfigHyperparameterLogging,
+ DatasetConfigArtifact,
+ DatasetConfigArtifactLogging,
+ DataSummaryArtifactLogging,
+ TargetConfigArtifactLogging,
+ TrainLoggingContext,
+)
+from batdetect2.train.train import build_trainer, run_train
+
+__all__ = [
+ "ConfigHyperparameterLogging",
+ "DataSummaryArtifactLogging",
+ "DEFAULT_CHECKPOINT_DIR",
+ "DatasetConfigArtifact",
+ "DatasetConfigArtifactLogging",
+ "TargetConfigArtifactLogging",
+ "TrainLoggingContext",
+ "TrainingConfig",
+ "TrainingModule",
+ "build_trainer",
+ "load_model_from_checkpoint",
+ "resolve_checkpoint_path",
+ "run_train",
+]
diff --git a/src/batdetect2/train/augmentations.py b/src/batdetect2/train/augmentations.py
new file mode 100644
index 00000000..19a1e5f2
--- /dev/null
+++ b/src/batdetect2/train/augmentations.py
@@ -0,0 +1,708 @@
+"""Applies data augmentation techniques to BatDetect2 training examples."""
+
+import warnings
+from collections.abc import Sequence
+from typing import Annotated, Callable, List, Literal, Tuple
+
+import numpy as np
+import torch
+from loguru import logger
+from pydantic import Field
+from soundevent import data
+from soundevent.geometry import scale_geometry, shift_geometry
+
+from batdetect2.audio.clips import get_subclip_annotation
+from batdetect2.audio.loader import TARGET_SAMPLERATE_HZ
+from batdetect2.audio.types import AudioLoader
+from batdetect2.core.arrays import adjust_width
+from batdetect2.core.configs import BaseConfig
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+from batdetect2.train.types import Augmentation
+
+__all__ = [
+ "AugmentationConfig",
+ "AugmentationsConfig",
+ "AudioAugmentationImportConfig",
+ "DEFAULT_AUGMENTATION_CONFIG",
+ "AddEchoConfig",
+ "AudioSource",
+ "MaskFrequencyConfig",
+ "MixAudioConfig",
+ "MaskTimeConfig",
+ "ScaleVolumeConfig",
+ "SpecAugmentationImportConfig",
+ "WarpConfig",
+ "add_echo",
+ "build_augmentations",
+ "mask_frequency",
+ "mask_time",
+ "mix_audio",
+ "scale_volume",
+ "warp_spectrogram",
+]
+
+
+AudioSource = Callable[[float], tuple[torch.Tensor, data.ClipAnnotation]]
+
+audio_augmentations: Registry[Augmentation, [int, AudioSource | None]] = (
+ Registry(name="audio_augmentation")
+)
+
+spec_augmentations: Registry[Augmentation, []] = Registry(
+ name="spec_augmentation"
+)
+
+
+@add_import_config(audio_augmentations)
+class AudioAugmentationImportConfig(ImportConfig):
+ """Use any callable as an audio augmentation.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+@add_import_config(spec_augmentations)
+class SpecAugmentationImportConfig(ImportConfig):
+ """Use any callable as a spectrogram augmentation.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any
+ callable to use it instead of a built-in option.
+ """
+
+ name: Literal["import"] = "import"
+
+
+class MixAudioConfig(BaseConfig):
+ """Configuration for MixUp augmentation (mixing two examples)."""
+
+ name: Literal["mix_audio"] = "mix_audio"
+
+ probability: float = 0.2
+ """Probability of applying this augmentation to an example."""
+
+ min_weight: float = 0.3
+ """Minimum mixing weight (lambda) applied to the primary example."""
+
+ max_weight: float = 0.7
+ """Maximum mixing weight (lambda) applied to the primary example."""
+
+
+class MixAudio(torch.nn.Module):
+ """Callable class for MixUp augmentation, handling example fetching."""
+
+ def __init__(
+ self,
+ example_source: AudioSource,
+ min_weight: float = 0.3,
+ max_weight: float = 0.7,
+ ):
+ """Initialize the AudioMixer."""
+ super().__init__()
+ self.min_weight = min_weight
+ self.example_source = example_source
+ self.max_weight = max_weight
+
+ def __call__(
+ self,
+ wav: torch.Tensor,
+ clip_annotation: data.ClipAnnotation,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ """Fetch another example and perform mixup."""
+ other_wav, other_clip_annotation = self.example_source(
+ clip_annotation.clip.duration
+ )
+ weight = np.random.uniform(self.min_weight, self.max_weight)
+ mixed_audio = mix_audio(wav, other_wav, weight=weight)
+ mixed_annotations = combine_clip_annotations(
+ clip_annotation,
+ other_clip_annotation,
+ )
+ return mixed_audio, mixed_annotations
+
+ @audio_augmentations.register(MixAudioConfig)
+ @staticmethod
+ def from_config(
+ config: MixAudioConfig,
+ samplerate: int,
+ source: AudioSource | None,
+ ):
+ if source is None:
+ warnings.warn(
+ "Mix audio augmentation ('mix_audio') requires an "
+ "'example_source' callable to be provided.",
+ stacklevel=2,
+ )
+ return lambda wav, clip_annotation: (wav, clip_annotation)
+
+ return MixAudio(
+ example_source=source,
+ min_weight=config.min_weight,
+ max_weight=config.max_weight,
+ )
+
+
+def mix_audio(
+ wav1: torch.Tensor,
+ wav2: torch.Tensor,
+ weight: float,
+) -> torch.Tensor:
+ """Combine two training examples."""
+ wav2 = adjust_width(wav2, wav1.shape[-1])
+ return weight * wav1 + (1 - weight) * wav2
+
+
+def shift_sound_event_annotation(
+ sound_event_annotation: data.SoundEventAnnotation,
+ time: float,
+) -> data.SoundEventAnnotation:
+ sound_event = sound_event_annotation.sound_event
+ geometry = sound_event.geometry
+
+ if geometry is None:
+ return sound_event_annotation
+
+ sound_event = sound_event.model_copy(
+ update=dict(geometry=shift_geometry(geometry, time=time))
+ )
+ return sound_event_annotation.model_copy(
+ update=dict(sound_event=sound_event)
+ )
+
+
+def combine_clip_annotations(
+ clip_annotation1: data.ClipAnnotation,
+ clip_annotation2: data.ClipAnnotation,
+) -> data.ClipAnnotation:
+ time_shift = (
+ clip_annotation1.clip.start_time - clip_annotation2.clip.start_time
+ )
+ return clip_annotation1.model_copy(
+ update=dict(
+ sound_events=[
+ *clip_annotation1.sound_events,
+ *[
+ shift_sound_event_annotation(sound_event, time=time_shift)
+ for sound_event in clip_annotation2.sound_events
+ ],
+ ]
+ )
+ )
+
+
+class AddEchoConfig(BaseConfig):
+ """Configuration for adding synthetic echo/reverb."""
+
+ name: Literal["add_echo"] = "add_echo"
+ probability: float = 0.2
+ max_delay: float = 0.005
+ min_weight: float = 0.0
+ max_weight: float = 1.0
+
+
+class AddEcho(torch.nn.Module):
+ def __init__(
+ self,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+ min_weight: float = 0.1,
+ max_weight: float = 1.0,
+ max_delay: float = 0.005,
+ ):
+ super().__init__()
+ self.samplerate = samplerate
+ self.min_weight = min_weight
+ self.max_weight = max_weight
+ self.max_delay_s = max_delay
+ self.max_delay = int(max_delay * samplerate)
+
+ def forward(
+ self,
+ wav: torch.Tensor,
+ clip_annotation: data.ClipAnnotation,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ delay = np.random.randint(0, self.max_delay)
+ weight = np.random.uniform(self.min_weight, self.max_weight)
+ return add_echo(wav, delay=delay, weight=weight), clip_annotation
+
+ @audio_augmentations.register(AddEchoConfig)
+ @staticmethod
+ def from_config(
+ config: AddEchoConfig,
+ samplerate: int,
+ source: AudioSource | None,
+ ):
+ return AddEcho(
+ samplerate=samplerate,
+ min_weight=config.min_weight,
+ max_weight=config.max_weight,
+ max_delay=config.max_delay,
+ )
+
+
+def add_echo(
+ wav: torch.Tensor,
+ delay: int,
+ weight: float,
+) -> torch.Tensor:
+ """Add a synthetic echo to the audio waveform."""
+
+ slices = [slice(None)] * wav.ndim
+ slices[-1] = slice(None, -delay)
+ audio_delay = adjust_width(wav[tuple(slices)], wav.shape[-1]).roll(
+ delay, dims=-1
+ )
+ return mix_audio(wav, audio_delay, weight)
+
+
+class ScaleVolumeConfig(BaseConfig):
+ """Configuration for random volume scaling of the spectrogram."""
+
+ name: Literal["scale_volume"] = "scale_volume"
+ probability: float = 0.2
+ min_scaling: float = 0.0
+ max_scaling: float = 2.0
+
+
+class ScaleVolume(torch.nn.Module):
+ def __init__(self, min_scaling: float = 0.0, max_scaling: float = 2.0):
+ super().__init__()
+ self.min_scaling = min_scaling
+ self.max_scaling = max_scaling
+
+ def forward(
+ self,
+ spec: torch.Tensor,
+ clip_annotation: data.ClipAnnotation,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ factor = np.random.uniform(self.min_scaling, self.max_scaling)
+ return scale_volume(spec, factor=factor), clip_annotation
+
+ @spec_augmentations.register(ScaleVolumeConfig)
+ @staticmethod
+ def from_config(config: ScaleVolumeConfig):
+ return ScaleVolume(
+ min_scaling=config.min_scaling,
+ max_scaling=config.max_scaling,
+ )
+
+
+def scale_volume(spec: torch.Tensor, factor: float) -> torch.Tensor:
+ """Scale the amplitude of the spectrogram by a factor."""
+ return spec * factor
+
+
+class WarpConfig(BaseConfig):
+ name: Literal["warp"] = "warp"
+ probability: float = 0.2
+ delta: float = 0.04
+
+
+class Warp(torch.nn.Module):
+ def __init__(self, delta: float = 0.04) -> None:
+ super().__init__()
+ self.delta = delta
+
+ def forward(
+ self,
+ spec: torch.Tensor,
+ clip_annotation: data.ClipAnnotation,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ factor = np.random.uniform(1 - self.delta, 1 + self.delta)
+ return (
+ warp_spectrogram(spec, factor=factor),
+ warp_clip_annotation(clip_annotation, factor=factor),
+ )
+
+ @spec_augmentations.register(WarpConfig)
+ @staticmethod
+ def from_config(config: WarpConfig):
+ return Warp(delta=config.delta)
+
+
+def warp_sound_event_annotation(
+ sound_event_annotation: data.SoundEventAnnotation,
+ factor: float,
+ anchor: float,
+) -> data.SoundEventAnnotation:
+ sound_event = sound_event_annotation.sound_event
+ geometry = sound_event.geometry
+
+ if geometry is None:
+ return sound_event_annotation
+
+ sound_event = sound_event.model_copy(
+ update=dict(
+ geometry=scale_geometry(
+ geometry,
+ time=1 / factor,
+ time_anchor=anchor,
+ )
+ ),
+ )
+ return sound_event_annotation.model_copy(
+ update=dict(sound_event=sound_event)
+ )
+
+
+def warp_clip_annotation(
+ clip_annotation: data.ClipAnnotation,
+ factor: float,
+) -> data.ClipAnnotation:
+ return clip_annotation.model_copy(
+ update=dict(
+ sound_events=[
+ warp_sound_event_annotation(
+ sound_event,
+ factor=factor,
+ anchor=clip_annotation.clip.start_time,
+ )
+ for sound_event in clip_annotation.sound_events
+ ]
+ )
+ )
+
+
+def warp_spectrogram(
+ spec: torch.Tensor,
+ factor: float,
+) -> torch.Tensor:
+ """Apply time warping by resampling the time axis."""
+ width = spec.shape[-1]
+ height = spec.shape[-2]
+ target_shape = [height, width]
+ new_width = int(target_shape[-1] * factor)
+ return torch.nn.functional.interpolate(
+ adjust_width(spec, new_width).unsqueeze(0),
+ size=target_shape,
+ mode="bilinear",
+ ).squeeze(0)
+
+
+class MaskTimeConfig(BaseConfig):
+ name: Literal["mask_time"] = "mask_time"
+ probability: float = 0.2
+ max_perc: float = 0.05
+ max_masks: int = 3
+
+
+class MaskTime(torch.nn.Module):
+ def __init__(
+ self,
+ max_perc: float = 0.05,
+ max_masks: int = 3,
+ mask_heatmaps: bool = False,
+ ) -> None:
+ super().__init__()
+ self.max_perc = max_perc
+ self.max_masks = max_masks
+ self.mask_heatmaps = mask_heatmaps
+
+ def forward(
+ self,
+ spec: torch.Tensor,
+ clip_annotation: data.ClipAnnotation,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ num_masks = np.random.randint(1, self.max_masks + 1)
+ width = spec.shape[-1]
+
+ mask_size = np.random.randint(
+ low=0,
+ high=int(self.max_perc * width),
+ size=num_masks,
+ )
+ mask_start = np.random.randint(
+ low=0,
+ high=width - mask_size,
+ size=num_masks,
+ )
+ masks = [
+ (start, start + size)
+ for start, size in zip(mask_start, mask_size, strict=False)
+ ]
+ return mask_time(spec, masks), clip_annotation
+
+ @spec_augmentations.register(MaskTimeConfig)
+ @staticmethod
+ def from_config(config: MaskTimeConfig):
+ return MaskTime(
+ max_perc=config.max_perc,
+ max_masks=config.max_masks,
+ )
+
+
+def mask_time(
+ spec: torch.Tensor,
+ masks: List[Tuple[int, int]],
+ value: float = 0,
+) -> torch.Tensor:
+ """Apply time masking to the spectrogram."""
+ for start, end in masks:
+ slices = [slice(None)] * spec.ndim
+ slices[-1] = slice(start, end)
+ spec[tuple(slices)] = value
+
+ return spec
+
+
+class MaskFrequencyConfig(BaseConfig):
+ name: Literal["mask_freq"] = "mask_freq"
+ probability: float = 0.2
+ max_perc: float = 0.10
+ max_masks: int = 3
+ mask_heatmaps: bool = False
+
+
+class MaskFrequency(torch.nn.Module):
+ def __init__(
+ self,
+ max_perc: float = 0.10,
+ max_masks: int = 3,
+ mask_heatmaps: bool = False,
+ ) -> None:
+ super().__init__()
+ self.max_perc = max_perc
+ self.max_masks = max_masks
+ self.mask_heatmaps = mask_heatmaps
+
+ def forward(
+ self,
+ spec: torch.Tensor,
+ clip_annotation: data.ClipAnnotation,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ num_masks = np.random.randint(1, self.max_masks + 1)
+ height = spec.shape[-2]
+
+ mask_size = np.random.randint(
+ low=0,
+ high=int(self.max_perc * height),
+ size=num_masks,
+ )
+ mask_start = np.random.randint(
+ low=0,
+ high=height - mask_size,
+ size=num_masks,
+ )
+ masks = [
+ (start, start + size)
+ for start, size in zip(mask_start, mask_size, strict=False)
+ ]
+ return mask_frequency(spec, masks), clip_annotation
+
+ @spec_augmentations.register(MaskFrequencyConfig)
+ @staticmethod
+ def from_config(config: MaskFrequencyConfig):
+ return MaskFrequency(
+ max_perc=config.max_perc,
+ max_masks=config.max_masks,
+ )
+
+
+def mask_frequency(
+ spec: torch.Tensor,
+ masks: List[Tuple[int, int]],
+) -> torch.Tensor:
+ """Apply frequency masking to the spectrogram."""
+ for start, end in masks:
+ slices = [slice(None)] * spec.ndim
+ slices[-2] = slice(start, end)
+ spec[tuple(slices)] = 0
+
+ return spec
+
+
+AudioAugmentationConfig = Annotated[
+ MixAudioConfig | AddEchoConfig,
+ Field(discriminator="name"),
+]
+
+
+SpectrogramAugmentationConfig = Annotated[
+ ScaleVolumeConfig | WarpConfig | MaskFrequencyConfig | MaskTimeConfig,
+ Field(discriminator="name"),
+]
+
+AugmentationConfig = Annotated[
+ MixAudioConfig
+ | AddEchoConfig
+ | ScaleVolumeConfig
+ | WarpConfig
+ | MaskFrequencyConfig
+ | MaskTimeConfig,
+ Field(discriminator="name"),
+]
+"""Type alias for the discriminated union of individual augmentation config."""
+
+
+class AugmentationsConfig(BaseConfig):
+ """Configuration for a sequence of data augmentations."""
+
+ enabled: bool = True
+
+ audio: List[AudioAugmentationConfig] = Field(default_factory=list)
+
+ spectrogram: List[SpectrogramAugmentationConfig] = Field(
+ default_factory=list
+ )
+
+
+class MaybeApply(torch.nn.Module):
+ """Applies an augmentation function probabilistically."""
+
+ def __init__(
+ self,
+ augmentation: Augmentation,
+ probability: float = 0.2,
+ ):
+ """Initialize the wrapper."""
+ super().__init__()
+ self.augmentation = augmentation
+ self.probability = probability
+
+ def __call__(
+ self,
+ tensor: torch.Tensor,
+ clip_annotation: data.ClipAnnotation,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ """Apply the wrapped augmentation with configured probability."""
+ if np.random.random() > self.probability:
+ return tensor, clip_annotation
+
+ return self.augmentation(tensor, clip_annotation)
+
+
+DEFAULT_AUGMENTATION_CONFIG: AugmentationsConfig = AugmentationsConfig(
+ enabled=True,
+ audio=[
+ MixAudioConfig(),
+ AddEchoConfig(),
+ ],
+ spectrogram=[
+ ScaleVolumeConfig(),
+ WarpConfig(),
+ MaskTimeConfig(),
+ MaskFrequencyConfig(),
+ ],
+)
+
+
+class AugmentationSequence(torch.nn.Module):
+ def __init__(self, augmentations: List[torch.nn.Module]):
+ super().__init__()
+ self.augmentations = torch.nn.ModuleList(augmentations)
+
+ def forward(
+ self,
+ tensor: torch.Tensor,
+ clip_annotation: data.ClipAnnotation,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ for aug in self.augmentations:
+ tensor, clip_annotation = aug(tensor, clip_annotation)
+
+ return tensor, clip_annotation
+
+
+def build_audio_augmentations(
+ steps: Sequence[AudioAugmentationConfig] | None = None,
+ samplerate: int = TARGET_SAMPLERATE_HZ,
+ audio_source: AudioSource | None = None,
+) -> Augmentation | None:
+ if not steps:
+ return None
+
+ augmentations = []
+
+ for step_config in steps:
+ augmentation = audio_augmentations.build(
+ step_config, samplerate, audio_source
+ )
+
+ if augmentation is None:
+ continue
+
+ augmentations.append(
+ MaybeApply(
+ augmentation=augmentation,
+ probability=step_config.probability,
+ )
+ )
+
+ return AugmentationSequence(augmentations)
+
+
+def build_spectrogram_augmentations(
+ steps: Sequence[SpectrogramAugmentationConfig] | None = None,
+) -> Augmentation | None:
+ if not steps:
+ return None
+
+ augmentations = []
+
+ for step_config in steps:
+ augmentation = spec_augmentations.build(step_config)
+
+ if augmentation is None:
+ continue
+
+ augmentations.append(
+ MaybeApply(
+ augmentation=augmentation,
+ probability=step_config.probability,
+ )
+ )
+
+ return AugmentationSequence(augmentations)
+
+
+def build_augmentations(
+ samplerate: int,
+ config: AugmentationsConfig | None = None,
+ audio_source: AudioSource | None = None,
+) -> Tuple[Augmentation | None, Augmentation | None]:
+ """Build a composite augmentation pipeline function from configuration."""
+ config = config or DEFAULT_AUGMENTATION_CONFIG
+
+ logger.opt(lazy=True).debug(
+ "Building augmentations with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+
+ audio_augmentation = build_audio_augmentations(
+ steps=config.audio,
+ samplerate=samplerate,
+ audio_source=audio_source,
+ )
+
+ spectrogram_augmentation = build_spectrogram_augmentations(
+ steps=config.spectrogram,
+ )
+
+ return audio_augmentation, spectrogram_augmentation
+
+
+class RandomAudioSource:
+ def __init__(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader,
+ ):
+ self.audio_loader = audio_loader
+ self.clip_annotations = clip_annotations
+
+ def __call__(
+ self,
+ duration: float,
+ ) -> Tuple[torch.Tensor, data.ClipAnnotation]:
+ index = int(np.random.randint(len(self.clip_annotations)))
+ clip_annotation = get_subclip_annotation(
+ self.clip_annotations[index],
+ duration=duration,
+ max_empty=0,
+ )
+ wav = self.audio_loader.load_clip(clip_annotation.clip)
+ return torch.from_numpy(wav).unsqueeze(0), clip_annotation
diff --git a/src/batdetect2/train/callbacks.py b/src/batdetect2/train/callbacks.py
new file mode 100644
index 00000000..0f9a0625
--- /dev/null
+++ b/src/batdetect2/train/callbacks.py
@@ -0,0 +1,127 @@
+from typing import Any, List
+
+from lightning import LightningModule, Trainer
+from lightning.pytorch.callbacks import Callback
+from soundevent import data
+from torch.utils.data import DataLoader
+
+from batdetect2.evaluate.types import EvaluatorProtocol
+from batdetect2.logging import get_image_logger
+from batdetect2.models.types import ModelOutput
+from batdetect2.outputs import OutputTransformProtocol, build_output_transform
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+from batdetect2.train.dataset import ValidationDataset
+from batdetect2.train.lightning import TrainingModule
+from batdetect2.train.types import TrainExample
+
+
+class ValidationMetrics(Callback):
+ def __init__(
+ self,
+ evaluator: EvaluatorProtocol,
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+ output_transform: OutputTransformProtocol | None = None,
+ ):
+ super().__init__()
+
+ self.evaluator = evaluator
+ self.targets = targets
+ self.roi_mapper = roi_mapper
+ self.output_transform = output_transform
+
+ self._clip_annotations: List[data.ClipAnnotation] = []
+ self._predictions: List[ClipDetections] = []
+
+ def get_dataset(self, trainer: Trainer) -> ValidationDataset:
+ dataloaders = trainer.val_dataloaders
+ assert isinstance(dataloaders, DataLoader)
+ dataset = dataloaders.dataset
+ assert isinstance(dataset, ValidationDataset)
+ return dataset
+
+ def generate_plots(
+ self,
+ eval_outputs: Any,
+ pl_module: LightningModule,
+ ):
+ plotter = get_image_logger(pl_module.logger) # type: ignore
+
+ if plotter is None:
+ return
+
+ for figure_name, fig in self.evaluator.generate_plots(eval_outputs):
+ plotter(figure_name, fig, pl_module.global_step)
+
+ def log_metrics(
+ self,
+ eval_outputs: Any,
+ pl_module: LightningModule,
+ ):
+ metrics = self.evaluator.compute_metrics(eval_outputs)
+ pl_module.log_dict(metrics)
+
+ def on_validation_epoch_end(
+ self,
+ trainer: Trainer,
+ pl_module: LightningModule,
+ ) -> None:
+ eval_outputs = self.evaluator.evaluate(
+ self._clip_annotations,
+ self._predictions,
+ )
+
+ self.log_metrics(eval_outputs, pl_module)
+ self.generate_plots(eval_outputs, pl_module)
+
+ return super().on_validation_epoch_end(trainer, pl_module)
+
+ def on_validation_epoch_start(
+ self,
+ trainer: Trainer,
+ pl_module: LightningModule,
+ ) -> None:
+ self._clip_annotations = []
+ self._predictions = []
+ return super().on_validation_epoch_start(trainer, pl_module)
+
+ def on_validation_batch_end( # type: ignore
+ self,
+ trainer: Trainer,
+ pl_module: TrainingModule,
+ outputs: ModelOutput,
+ batch: TrainExample,
+ batch_idx: int,
+ dataloader_idx: int = 0,
+ ) -> None:
+ model = pl_module.model
+ if self.output_transform is None:
+ self.output_transform = build_output_transform(
+ targets=self.targets,
+ roi_mapper=self.roi_mapper,
+ )
+
+ output_transform = self.output_transform
+ assert output_transform is not None
+
+ dataset = self.get_dataset(trainer)
+
+ clip_annotations = [
+ dataset.clip_annotations[int(example_idx)]
+ for example_idx in batch.idx
+ ]
+
+ clip_detections = model.postprocessor(outputs)
+ predictions = [
+ output_transform.to_clip_detections(
+ detections=clip_dets,
+ clip=clip_annotation.clip,
+ )
+ for clip_annotation, clip_dets in zip(
+ clip_annotations, clip_detections, strict=False
+ )
+ ]
+
+ self._clip_annotations.extend(clip_annotations)
+ self._predictions.extend(predictions)
diff --git a/src/batdetect2/train/checkpoints.py b/src/batdetect2/train/checkpoints.py
new file mode 100644
index 00000000..be1c165c
--- /dev/null
+++ b/src/batdetect2/train/checkpoints.py
@@ -0,0 +1,150 @@
+from pathlib import Path
+from typing import Literal
+
+from lightning.pytorch.callbacks import Callback, ModelCheckpoint
+from soundevent.data import PathLike
+
+from batdetect2.core import BaseConfig
+
+__all__ = [
+ "CheckpointConfig",
+ "DEFAULT_CHECKPOINT",
+ "build_checkpoint_callback",
+ "get_bundled_checkpoint_names",
+ "resolve_checkpoint_path",
+]
+
+PACKAGE_ROOT = Path(__file__).resolve().parents[1]
+DEFAULT_CHECKPOINT_DIR: Path = Path("outputs") / "checkpoints"
+DEFAULT_CHECKPOINT = "uk_same"
+CHECKPOINT_ALIASES = {
+ DEFAULT_CHECKPOINT: PACKAGE_ROOT
+ / "models"
+ / "checkpoints"
+ / "batdetect2_uk_same.ckpt",
+ "batdetect2_uk_same": PACKAGE_ROOT
+ / "models"
+ / "checkpoints"
+ / "batdetect2_uk_same.ckpt",
+}
+
+
+class CheckpointConfig(BaseConfig):
+ checkpoint_dir: Path = DEFAULT_CHECKPOINT_DIR
+ monitor: str | None = None
+ mode: str = "max"
+ save_top_k: int = 1
+ # Save distributable inference checkpoints by default.
+ save_weights_only: bool = True
+ filename: str | None = None
+ save_last: bool | Literal["link"] = "link"
+ every_n_epochs: int | None = 1
+
+
+def build_checkpoint_callback(
+ config: CheckpointConfig | None = None,
+ checkpoint_dir: Path | None = None,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+) -> Callback:
+ config = config or CheckpointConfig()
+
+ if checkpoint_dir is None:
+ checkpoint_dir = config.checkpoint_dir
+
+ checkpoint_dir = Path(checkpoint_dir)
+
+ if experiment_name is not None:
+ checkpoint_dir = checkpoint_dir / experiment_name
+
+ if run_name is not None:
+ checkpoint_dir = checkpoint_dir / run_name
+
+ Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
+
+ return ModelCheckpoint(
+ dirpath=str(checkpoint_dir),
+ save_top_k=config.save_top_k,
+ save_weights_only=config.save_weights_only,
+ monitor=config.monitor,
+ mode=config.mode,
+ filename=config.filename,
+ save_last=config.save_last,
+ every_n_epochs=config.every_n_epochs,
+ )
+
+
+def get_bundled_checkpoint_names() -> tuple[str, ...]:
+ """Return the supported bundled checkpoint aliases."""
+ return tuple(CHECKPOINT_ALIASES.keys())
+
+
+def resolve_checkpoint_from_huggingface(path: str) -> Path:
+ """Resolve a Hugging Face checkpoint URI."""
+ try:
+ from huggingface_hub import hf_hub_download
+ except ImportError as error:
+ raise ValueError(
+ "Hugging Face checkpoint support is not installed. "
+ "Install it with `pip install batdetect2[huggingface]`."
+ ) from error
+
+ repo_id, filename = _parse_huggingface_uri(path)
+ return Path(hf_hub_download(repo_id=repo_id, filename=filename))
+
+
+def resolve_checkpoint_path(path: PathLike | str | None = None) -> Path:
+ """Resolve a local path, alias, or Hugging Face checkpoint URI.
+
+ Parameters
+ ----------
+ path : PathLike | str | None
+ Local checkpoint path, checkpoint alias, or a Hugging Face
+ URI of the form ``hf://owner/repo/path/to/checkpoint.ckpt``. If
+ omitted, the default alias checkpoint is used.
+
+ Returns
+ -------
+ Path
+ Resolved local filesystem path to the checkpoint.
+ """
+ if path is None:
+ path = DEFAULT_CHECKPOINT
+
+ if isinstance(path, str) and path.startswith("hf://"):
+ return resolve_checkpoint_from_huggingface(path)
+
+ if isinstance(path, str) and path in CHECKPOINT_ALIASES:
+ return Path(CHECKPOINT_ALIASES[path])
+
+ path = Path(path)
+ if path.exists():
+ return path.resolve()
+
+ bundled_names = ", ".join(get_bundled_checkpoint_names())
+ raise FileNotFoundError(
+ f"Checkpoint not found: {path}. "
+ "Expected a local path, a checkpoint alias "
+ f"({bundled_names}), or a Hugging Face URI."
+ )
+
+
+def _parse_huggingface_uri(uri: str) -> tuple[str, str]:
+ prefix = "hf://"
+ if not uri.startswith(prefix):
+ raise ValueError(
+ "Hugging Face checkpoint URIs must start with 'hf://'."
+ )
+
+ without_prefix = uri.removeprefix(prefix).strip("/")
+ parts = without_prefix.split("/")
+
+ if len(parts) < 3:
+ raise ValueError(
+ "Hugging Face checkpoint URIs must be in the form "
+ "'hf://owner/repo/path/to/checkpoint.ckpt'."
+ )
+
+ repo_id = "/".join(parts[:2])
+ filename = "/".join(parts[2:])
+ return repo_id, filename
diff --git a/src/batdetect2/train/config.py b/src/batdetect2/train/config.py
new file mode 100644
index 00000000..94584cbc
--- /dev/null
+++ b/src/batdetect2/train/config.py
@@ -0,0 +1,52 @@
+from pydantic import Field
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.evaluate.config import EvaluationConfig
+from batdetect2.train.checkpoints import CheckpointConfig
+from batdetect2.train.dataset import TrainLoaderConfig, ValLoaderConfig
+from batdetect2.train.labels import LabelConfig
+from batdetect2.train.losses import LossConfig
+from batdetect2.train.optimizers import AdamOptimizerConfig, OptimizerConfig
+from batdetect2.train.schedulers import (
+ CosineAnnealingSchedulerConfig,
+ SchedulerConfig,
+)
+
+__all__ = [
+ "TrainingConfig",
+]
+
+
+class PLTrainerConfig(BaseConfig):
+ accelerator: str = "auto"
+ accumulate_grad_batches: int = 1
+ deterministic: bool = True
+ check_val_every_n_epoch: int = 1
+ devices: str | int = "auto"
+ enable_checkpointing: bool = True
+ gradient_clip_val: float | None = None
+ limit_train_batches: int | float | None = None
+ limit_test_batches: int | float | None = None
+ limit_val_batches: int | float | None = None
+ log_every_n_steps: int | None = None
+ max_epochs: int | None = 200
+ min_epochs: int | None = None
+ max_steps: int | None = None
+ min_steps: int | None = None
+ max_time: str | None = None
+ precision: str | None = None
+ val_check_interval: int | float | None = None
+
+
+class TrainingConfig(BaseConfig):
+ train_loader: TrainLoaderConfig = Field(default_factory=TrainLoaderConfig)
+ val_loader: ValLoaderConfig = Field(default_factory=ValLoaderConfig)
+ optimizer: OptimizerConfig = Field(default_factory=AdamOptimizerConfig)
+ scheduler: SchedulerConfig = Field(
+ default_factory=CosineAnnealingSchedulerConfig
+ )
+ loss: LossConfig = Field(default_factory=LossConfig)
+ trainer: PLTrainerConfig = Field(default_factory=PLTrainerConfig)
+ labels: LabelConfig = Field(default_factory=LabelConfig)
+ validation: EvaluationConfig = Field(default_factory=EvaluationConfig)
+ checkpoints: CheckpointConfig = Field(default_factory=CheckpointConfig)
diff --git a/src/batdetect2/train/dataset.py b/src/batdetect2/train/dataset.py
new file mode 100644
index 00000000..e9e1883e
--- /dev/null
+++ b/src/batdetect2/train/dataset.py
@@ -0,0 +1,331 @@
+from typing import List, Sequence
+
+import torch
+from loguru import logger
+from pydantic import Field
+from soundevent import data
+from torch.utils.data import DataLoader, Dataset
+
+from batdetect2.audio import ClipConfig, build_audio_loader, build_clipper
+from batdetect2.audio.clips import PaddedClipConfig
+from batdetect2.audio.types import AudioLoader, ClipperProtocol
+from batdetect2.core import BaseConfig
+from batdetect2.core.arrays import adjust_width
+from batdetect2.preprocess import build_preprocessor
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.train.augmentations import (
+ DEFAULT_AUGMENTATION_CONFIG,
+ AugmentationsConfig,
+ RandomAudioSource,
+ build_augmentations,
+)
+from batdetect2.train.labels import build_clip_labeler
+from batdetect2.train.types import Augmentation, ClipLabeller, TrainExample
+
+__all__ = [
+ "TrainingDataset",
+ "ValidationDataset",
+ "build_val_loader",
+ "build_train_loader",
+ "build_train_dataset",
+ "build_val_dataset",
+]
+
+
+class TrainingDataset(Dataset):
+ def __init__(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader,
+ preprocessor: PreprocessorProtocol,
+ labeller: ClipLabeller,
+ clipper: ClipperProtocol | None = None,
+ audio_augmentation: Augmentation | None = None,
+ spectrogram_augmentation: Augmentation | None = None,
+ audio_dir: data.PathLike | None = None,
+ ):
+ self.clip_annotations = clip_annotations
+ self.clipper = clipper
+ self.labeller = labeller
+ self.preprocessor = preprocessor
+ self.audio_loader = audio_loader
+ self.audio_augmentation = audio_augmentation
+ self.spectrogram_augmentation = spectrogram_augmentation
+ self.audio_dir = audio_dir
+
+ def __len__(self):
+ return len(self.clip_annotations)
+
+ def __getitem__(self, index) -> TrainExample:
+ clip_annotation = self.clip_annotations[index]
+
+ if self.clipper is not None:
+ clip_annotation = self.clipper(clip_annotation)
+
+ clip = clip_annotation.clip
+
+ wav = self.audio_loader.load_clip(clip, audio_dir=self.audio_dir)
+
+ # Add channel dim
+ wav_tensor = torch.tensor(wav).unsqueeze(0)
+
+ if self.audio_augmentation is not None:
+ wav_tensor, clip_annotation = self.audio_augmentation(
+ wav_tensor,
+ clip_annotation,
+ )
+
+ spectrogram = self.preprocessor(wav_tensor)
+
+ if self.spectrogram_augmentation is not None:
+ spectrogram, clip_annotation = self.spectrogram_augmentation(
+ spectrogram,
+ clip_annotation,
+ )
+
+ heatmaps = self.labeller(clip_annotation, spectrogram)
+
+ return TrainExample(
+ spec=spectrogram,
+ detection_heatmap=heatmaps.detection,
+ class_heatmap=heatmaps.classes,
+ size_heatmap=heatmaps.size,
+ idx=torch.tensor(index),
+ start_time=torch.tensor(clip.start_time),
+ end_time=torch.tensor(clip.end_time),
+ )
+
+
+class ValidationDataset(Dataset):
+ def __init__(
+ self,
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader,
+ preprocessor: PreprocessorProtocol,
+ labeller: ClipLabeller,
+ clipper: ClipperProtocol | None = None,
+ audio_dir: data.PathLike | None = None,
+ ):
+ self.clip_annotations = clip_annotations
+ self.labeller = labeller
+ self.preprocessor = preprocessor
+ self.audio_loader = audio_loader
+ self.audio_dir = audio_dir
+ self.clipper = clipper
+
+ def __len__(self):
+ return len(self.clip_annotations)
+
+ def __getitem__(self, index) -> TrainExample:
+ clip_annotation = self.clip_annotations[index]
+
+ if self.clipper is not None:
+ clip_annotation = self.clipper(clip_annotation)
+
+ clip = clip_annotation.clip
+ wav = torch.tensor(
+ self.audio_loader.load_clip(clip, audio_dir=self.audio_dir)
+ ).unsqueeze(0)
+
+ spectrogram = self.preprocessor(wav)
+
+ heatmaps = self.labeller(clip_annotation, spectrogram)
+
+ return TrainExample(
+ spec=spectrogram,
+ detection_heatmap=heatmaps.detection,
+ class_heatmap=heatmaps.classes,
+ size_heatmap=heatmaps.size,
+ idx=torch.tensor(index),
+ start_time=torch.tensor(clip.start_time),
+ end_time=torch.tensor(clip.end_time),
+ )
+
+
+class TrainLoaderConfig(BaseConfig):
+ batch_size: int = 8
+
+ shuffle: bool = False
+
+ augmentations: AugmentationsConfig = Field(
+ default_factory=lambda: DEFAULT_AUGMENTATION_CONFIG.model_copy()
+ )
+
+ clipping_strategy: ClipConfig = Field(
+ default_factory=lambda: PaddedClipConfig()
+ )
+
+
+def build_train_loader(
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader | None = None,
+ labeller: ClipLabeller | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ config: TrainLoaderConfig | None = None,
+ num_workers: int = 0,
+) -> DataLoader:
+ config = config or TrainLoaderConfig()
+
+ logger.info("Building training data loader...")
+ logger.opt(lazy=True).debug(
+ "Training data loader config: \n{config}",
+ config=lambda: config.to_yaml_string(exclude_none=True),
+ )
+
+ train_dataset = build_train_dataset(
+ clip_annotations,
+ audio_loader=audio_loader,
+ labeller=labeller,
+ preprocessor=preprocessor,
+ config=config,
+ )
+
+ return DataLoader(
+ train_dataset,
+ batch_size=config.batch_size,
+ shuffle=config.shuffle,
+ num_workers=num_workers,
+ collate_fn=_collate_fn,
+ )
+
+
+class ValLoaderConfig(BaseConfig):
+ clipping_strategy: ClipConfig = Field(
+ default_factory=lambda: PaddedClipConfig()
+ )
+
+
+def build_val_loader(
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader | None = None,
+ labeller: ClipLabeller | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ config: ValLoaderConfig | None = None,
+ num_workers: int = 0,
+):
+ logger.info("Building validation data loader...")
+ config = config or ValLoaderConfig()
+ logger.opt(lazy=True).debug(
+ "Validation data loader config: \n{config}",
+ config=lambda: config.to_yaml_string(exclude_none=True),
+ )
+
+ val_dataset = build_val_dataset(
+ clip_annotations,
+ audio_loader=audio_loader,
+ labeller=labeller,
+ preprocessor=preprocessor,
+ config=config,
+ )
+
+ return DataLoader(
+ val_dataset,
+ batch_size=1,
+ shuffle=False,
+ num_workers=num_workers,
+ collate_fn=_collate_fn,
+ )
+
+
+def build_train_dataset(
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader | None = None,
+ labeller: ClipLabeller | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ config: TrainLoaderConfig | None = None,
+) -> TrainingDataset:
+ logger.info("Building training dataset...")
+ config = config or TrainLoaderConfig()
+
+ clipper = build_clipper(config=config.clipping_strategy)
+
+ if audio_loader is None:
+ audio_loader = build_audio_loader()
+
+ if preprocessor is None:
+ preprocessor = build_preprocessor()
+
+ if labeller is None:
+ labeller = build_clip_labeler(
+ min_freq=preprocessor.min_freq,
+ max_freq=preprocessor.max_freq,
+ )
+
+ random_example_source = RandomAudioSource(
+ clip_annotations,
+ audio_loader=audio_loader,
+ )
+
+ if config.augmentations.enabled:
+ audio_augmentation, spectrogram_augmentation = build_augmentations(
+ samplerate=preprocessor.input_samplerate,
+ config=config.augmentations,
+ audio_source=random_example_source,
+ )
+ else:
+ logger.debug("No augmentations configured for training dataset.")
+ audio_augmentation = None
+ spectrogram_augmentation = None
+
+ return TrainingDataset(
+ clip_annotations,
+ audio_loader=audio_loader,
+ labeller=labeller,
+ clipper=clipper,
+ preprocessor=preprocessor,
+ audio_augmentation=audio_augmentation,
+ spectrogram_augmentation=spectrogram_augmentation,
+ )
+
+
+def build_val_dataset(
+ clip_annotations: Sequence[data.ClipAnnotation],
+ audio_loader: AudioLoader | None = None,
+ labeller: ClipLabeller | None = None,
+ preprocessor: PreprocessorProtocol | None = None,
+ config: ValLoaderConfig | None = None,
+) -> ValidationDataset:
+ logger.info("Building validation dataset...")
+ config = config or ValLoaderConfig()
+
+ if audio_loader is None:
+ audio_loader = build_audio_loader()
+
+ if preprocessor is None:
+ preprocessor = build_preprocessor()
+
+ if labeller is None:
+ labeller = build_clip_labeler(
+ min_freq=preprocessor.min_freq,
+ max_freq=preprocessor.max_freq,
+ )
+
+ clipper = build_clipper(config.clipping_strategy)
+ return ValidationDataset(
+ clip_annotations,
+ audio_loader=audio_loader,
+ labeller=labeller,
+ preprocessor=preprocessor,
+ clipper=clipper,
+ )
+
+
+def _collate_fn(batch: List[TrainExample]) -> TrainExample:
+ max_width = max(item.spec.shape[-1] for item in batch)
+ return TrainExample(
+ spec=torch.stack(
+ [adjust_width(item.spec, max_width) for item in batch]
+ ),
+ detection_heatmap=torch.stack(
+ [adjust_width(item.detection_heatmap, max_width) for item in batch]
+ ),
+ size_heatmap=torch.stack(
+ [adjust_width(item.size_heatmap, max_width) for item in batch]
+ ),
+ class_heatmap=torch.stack(
+ [adjust_width(item.class_heatmap, max_width) for item in batch]
+ ),
+ idx=torch.stack([item.idx for item in batch]),
+ start_time=torch.stack([item.start_time for item in batch]),
+ end_time=torch.stack([item.end_time for item in batch]),
+ )
diff --git a/src/batdetect2/train/labels.py b/src/batdetect2/train/labels.py
new file mode 100644
index 00000000..790abddf
--- /dev/null
+++ b/src/batdetect2/train/labels.py
@@ -0,0 +1,159 @@
+"""Generate heatmap training targets for BatDetect2 models.
+
+This module is responsible for creating the target labels used for training
+BatDetect2 models. It converts sound event annotations for an audio clip into
+the specific multi-channel heatmap formats required by the neural network.
+"""
+
+from functools import partial
+
+import numpy as np
+import torch
+from loguru import logger
+from soundevent import data
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.preprocess import MAX_FREQ, MIN_FREQ
+from batdetect2.targets import (
+ build_roi_mapping,
+ build_targets,
+ iterate_encoded_sound_events,
+)
+from batdetect2.targets.types import ROIMapperProtocol, TargetProtocol
+from batdetect2.train.types import ClipLabeller, Heatmaps
+
+__all__ = [
+ "LabelConfig",
+ "build_clip_labeler",
+ "generate_heatmaps",
+]
+
+
+SIZE_DIMENSION = "dimension"
+"""Dimension name for the size heatmap."""
+
+
+class LabelConfig(BaseConfig):
+ """Configuration parameters for heatmap generation.
+
+ Attributes
+ ----------
+ sigma : float, default=3.0
+ """
+
+ sigma: float = 2.0
+
+
+def build_clip_labeler(
+ targets: TargetProtocol | None = None,
+ roi_mapper: ROIMapperProtocol | None = None,
+ min_freq: float = MIN_FREQ,
+ max_freq: float = MAX_FREQ,
+ config: LabelConfig | None = None,
+) -> ClipLabeller:
+ """Construct the final clip labelling function."""
+ config = config or LabelConfig()
+ logger.opt(lazy=True).debug(
+ "Building clip labeler with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+
+ targets = targets or build_targets()
+ roi_mapper = roi_mapper or build_roi_mapping()
+
+ return partial(
+ generate_heatmaps,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ min_freq=min_freq,
+ max_freq=max_freq,
+ target_sigma=config.sigma,
+ )
+
+
+def map_to_pixels(x, size, min_val, max_val) -> int:
+ return int(np.interp(x, [min_val, max_val], [0, size]))
+
+
+def generate_heatmaps(
+ clip_annotation: data.ClipAnnotation,
+ spec: torch.Tensor,
+ targets: TargetProtocol,
+ roi_mapper: ROIMapperProtocol,
+ min_freq: float,
+ max_freq: float,
+ target_sigma: float = 3.0,
+ dtype=torch.float32,
+) -> Heatmaps:
+ """Generate training heatmaps for a single annotated clip."""
+ logger.debug(
+ "Will generate heatmaps for clip annotation "
+ "{uuid} with {num} annotated sound events",
+ uuid=clip_annotation.uuid,
+ num=len(clip_annotation.sound_events),
+ )
+
+ height = spec.shape[-2]
+ width = spec.shape[-1]
+ num_classes = len(targets.class_names)
+ num_dims = len(roi_mapper.dimension_names)
+ clip = clip_annotation.clip
+
+ # Initialize heatmaps
+ detection_heatmap = torch.zeros([1, height, width], dtype=dtype)
+ class_heatmap = torch.zeros([num_classes, height, width], dtype=dtype)
+ size_heatmap = torch.zeros([num_dims, height, width], dtype=dtype)
+
+ freqs, times = torch.meshgrid(
+ torch.arange(height, dtype=dtype),
+ torch.arange(width, dtype=dtype),
+ indexing="ij",
+ )
+
+ freqs = freqs.to(spec.device)
+ times = times.to(spec.device)
+
+ for class_name, (time, frequency), size in iterate_encoded_sound_events(
+ clip_annotation.sound_events,
+ targets,
+ roi_mapper,
+ ):
+ time_index = map_to_pixels(time, width, clip.start_time, clip.end_time)
+ freq_index = map_to_pixels(frequency, height, min_freq, max_freq)
+
+ if (
+ time_index < 0
+ or time_index >= width
+ or freq_index < 0
+ or freq_index >= height
+ ):
+ logger.debug(
+ "Skipping annotation: position outside spectrogram. Pos: %s",
+ (time, frequency),
+ )
+ continue
+
+ distance = (times - time_index) ** 2 + (freqs - freq_index) ** 2
+ gaussian_blob = torch.exp(-distance / (2 * target_sigma**2))
+
+ detection_heatmap[0] = torch.maximum(
+ detection_heatmap[0],
+ gaussian_blob,
+ )
+ size_heatmap[:, freq_index, time_index] = torch.tensor(size[:])
+
+ # If the label is None skip the sound event
+ if class_name is None:
+ continue
+
+ class_index = targets.class_names.index(class_name)
+ class_heatmap[class_index] = torch.maximum(
+ class_heatmap[class_index],
+ gaussian_blob,
+ )
+
+ return Heatmaps(
+ detection=detection_heatmap,
+ classes=class_heatmap,
+ size=size_heatmap,
+ )
diff --git a/batdetect2/train/readme.md b/src/batdetect2/train/legacy/readme.md
similarity index 82%
rename from batdetect2/train/readme.md
rename to src/batdetect2/train/legacy/readme.md
index e406c7dd..eaa1253e 100644
--- a/batdetect2/train/readme.md
+++ b/src/batdetect2/train/legacy/readme.md
@@ -1,18 +1,21 @@
## How to train a model from scratch
-`python train_model.py data_dir annotation_dir` e.g.
+
+> **Warning**
+> This code in currently broken. Will fix soon, stay tuned.
+
+`python train_model.py data_dir annotation_dir` e.g.
`python train_model.py /data1/bat_data/data/ /data1/bat_data/annotations/anns/`
More comprehensive instructions are provided in the finetune directory.
-
## Training on your own data
You can either use the finetuning scripts to finetune from an existing training dataset. Follow the instructions in the `../finetune/` directory.
-Alternatively, you can train from scratch. First, you will need to create your own annotation file (like in the finetune example), and then you will need to edit `train_split.py` to add your new dataset and specify which combination of files you want to train on.
+Alternatively, you can train from scratch. First, you will need to create your own annotation file (like in the finetune example), and then you will need to edit `train_split.py` to add your new dataset and specify which combination of files you want to train on.
-Note, if training from scratch and you want to include the existing data, you may need to set all the class names to the generic class name ('Bat') so that the existing species are not added to your model, but instead just used to help perform the bat/not bat task.
+Note, if training from scratch and you want to include the existing data, you may need to set all the class names to the generic class name ('Bat') so that the existing species are not added to your model, but instead just used to help perform the bat/not bat task.
## Additional notes
-Having blank files with no bats in them is also useful, just make sure that the annotation files lists them as not being annotated (i.e. `is_annotated=True`).
+Having blank files with no bats in them is also useful, just make sure that the annotation files lists them as not being annotated (i.e. `is_annotated=True`).
-Training will be slow without a GPU.
+Training will be slow without a GPU.
diff --git a/src/batdetect2/train/legacy/train.py b/src/batdetect2/train/legacy/train.py
new file mode 100644
index 00000000..bd05aa27
--- /dev/null
+++ b/src/batdetect2/train/legacy/train.py
@@ -0,0 +1,82 @@
+from typing import NamedTuple
+
+import torch
+from batdetect2.models.types import DetectionModel
+from soundevent import data
+from torch.optim import Adam
+from torch.optim.lr_scheduler import CosineAnnealingLR
+from torch.utils.data import DataLoader
+
+from batdetect2.train.dataset import LabeledDataset
+
+
+class TrainInputs(NamedTuple):
+ spec: torch.Tensor
+ detection_heatmap: torch.Tensor
+ class_heatmap: torch.Tensor
+ size_heatmap: torch.Tensor
+
+
+def train_loop(
+ model: DetectionModel,
+ train_dataset: LabeledDataset[TrainInputs],
+ validation_dataset: LabeledDataset[TrainInputs],
+ device: torch.device | None = None,
+ num_epochs: int = 100,
+ learning_rate: float = 1e-4,
+):
+ train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
+ DataLoader(validation_dataset, batch_size=32)
+
+ model.to(device)
+
+ optimizer = Adam(model.parameters(), lr=learning_rate)
+ scheduler = CosineAnnealingLR(
+ optimizer,
+ num_epochs * len(train_loader),
+ )
+
+ for _epoch in range(num_epochs):
+ train_single_epoch(
+ model,
+ train_loader,
+ optimizer,
+ device,
+ scheduler,
+ )
+
+
+def train_single_epoch(
+ model: DetectionModel,
+ train_loader: DataLoader,
+ optimizer: Adam,
+ device: torch.device,
+ scheduler: CosineAnnealingLR,
+):
+ model.train()
+ train_loss = tu.AverageMeter()
+
+ for batch in train_loader:
+ optimizer.zero_grad()
+
+ spec = batch.spec.to(device)
+ batch.detection_heatmap.to(device)
+ batch.class_heatmap.to(device)
+ batch.size_heatmap.to(device)
+
+ outputs = model(spec)
+
+ loss = loss_fun(
+ outputs,
+ gt_det,
+ gt_size,
+ gt_class,
+ det_criterion,
+ params,
+ class_inv_freq,
+ )
+
+ train_loss.update(loss.item(), data.shape[0])
+ loss.backward()
+ optimizer.step()
+ scheduler.step()
diff --git a/batdetect2/train/train_model.py b/src/batdetect2/train/legacy/train_model.py
similarity index 94%
rename from batdetect2/train/train_model.py
rename to src/batdetect2/train/legacy/train_model.py
index e38de39d..f7504a4a 100644
--- a/batdetect2/train/train_model.py
+++ b/src/batdetect2/train/legacy/train_model.py
@@ -2,16 +2,17 @@
import json
import warnings
+import batdetect2.train.audio_dataloader as adl
+import batdetect2.train.evaluate as evl
+import batdetect2.train.train_split as ts
+import batdetect2.train.train_utils as tu
import matplotlib.pyplot as plt
import numpy as np
import torch
+import torch.utils.data
from torch.optim.lr_scheduler import CosineAnnealingLR
import batdetect2.detector.post_process as pp
-import batdetect2.train.audio_dataloader as adl
-import batdetect2.train.evaluate as evl
-import batdetect2.train.train_split as ts
-import batdetect2.train.train_utils as tu
import batdetect2.utils.plot_utils as pu
from batdetect2.detector import models, parameters
from batdetect2.train import losses
@@ -29,7 +30,7 @@ def save_images_batch(model, data_loader, params):
ind = 0 # first image in each batch
with torch.no_grad():
- for batch_idx, inputs in enumerate(data_loader):
+ for inputs in data_loader:
data = inputs["spec"].to(params["device"])
outputs = model(data)
@@ -81,7 +82,12 @@ def save_image(
def loss_fun(
- outputs, gt_det, gt_size, gt_class, det_criterion, params, class_inv_freq
+ outputs,
+ gt_det,
+ gt_size,
+ gt_class,
+ det_criterion,
+ params,
):
# detection loss
loss = params["det_loss_weight"] * det_criterion(
@@ -104,7 +110,13 @@ def loss_fun(
def train(
- model, epoch, data_loader, det_criterion, optimizer, scheduler, params
+ model,
+ epoch,
+ data_loader,
+ det_criterion,
+ optimizer,
+ scheduler,
+ params,
):
model.train()
@@ -309,7 +321,7 @@ def select_model(params):
resize_factor=params["resize_factor"],
)
else:
- print("No valid network specified")
+ raise ValueError("No valid network specified")
return model
@@ -319,9 +331,9 @@ def main():
params = parameters.get_params(True)
if torch.cuda.is_available():
- params["device"] = "cuda"
+ params.device = "cuda"
else:
- params["device"] = "cpu"
+ params.device = "cpu"
# setup arg parser and populate it with exiting parameters - will not work with lists
parser = argparse.ArgumentParser()
@@ -349,13 +361,16 @@ def main():
default="Rhinolophus ferrumequinum;Rhinolophus hipposideros",
help='Will set low and high frequency the same for these classes. Separate names with ";"',
)
+
for key, val in params.items():
parser.add_argument("--" + key, type=type(val), default=val)
params = vars(parser.parse_args())
# save notes file
if params["notes"] != "":
- tu.write_notes_file(params["experiment"] + "notes.txt", params["notes"])
+ tu.write_notes_file(
+ params["experiment"] + "notes.txt", params["notes"]
+ )
# load the training and test meta data - there are different splits defined
train_sets, test_sets = ts.get_train_test_data(
@@ -374,15 +389,11 @@ def main():
for tt in train_sets:
print(tt["ann_path"])
classes_to_ignore = params["classes_to_ignore"] + params["generic_class"]
- (
- data_train,
- params["class_names"],
- params["class_inv_freq"],
- ) = tu.load_set_of_anns(
+ data_train = tu.load_set_of_anns(
train_sets,
- classes_to_ignore,
- params["events_of_interest"],
- params["convert_to_genus"],
+ classes_to_ignore=classes_to_ignore,
+ events_of_interest=params["events_of_interest"],
+ convert_to_genus=params["convert_to_genus"],
)
params["genus_names"], params["genus_mapping"] = tu.get_genus_mapping(
params["class_names"]
@@ -415,11 +426,12 @@ def main():
print("\nTesting on:")
for tt in test_sets:
print(tt["ann_path"])
- data_test, _, _ = tu.load_set_of_anns(
+
+ data_test = tu.load_set_of_anns(
test_sets,
- classes_to_ignore,
- params["events_of_interest"],
- params["convert_to_genus"],
+ classes_to_ignore=classes_to_ignore,
+ events_of_interest=params["events_of_interest"],
+ convert_to_genus=params["convert_to_genus"],
)
data_train = tu.remove_dupes(data_train, data_test)
test_dataset = adl.AudioLoader(data_test, params, is_train=False)
@@ -447,10 +459,13 @@ def main():
scheduler = CosineAnnealingLR(
optimizer, params["num_epochs"] * len(train_loader)
)
+
if params["train_loss"] == "mse":
det_criterion = losses.mse_loss
elif params["train_loss"] == "focal":
det_criterion = losses.focal_loss
+ else:
+ raise ValueError("No valid loss specified")
# save parameters to file
with open(params["experiment"] + "params.json", "w") as da:
diff --git a/batdetect2/train/train_split.py b/src/batdetect2/train/legacy/train_split.py
similarity index 99%
rename from batdetect2/train/train_split.py
rename to src/batdetect2/train/legacy/train_split.py
index 902fe82b..31414b3b 100644
--- a/batdetect2/train/train_split.py
+++ b/src/batdetect2/train/legacy/train_split.py
@@ -10,13 +10,12 @@ def get_train_test_data(ann_dir, wav_dir, split_name, load_extra=True):
train_sets, test_sets = split_same(ann_dir, wav_dir, load_extra)
else:
print("Split not defined")
- assert False
+ raise AssertionError()
return train_sets, test_sets
def split_diff(ann_dir, wav_dir, load_extra=True):
-
train_sets = []
if load_extra:
train_sets.append(
@@ -144,7 +143,6 @@ def split_diff(ann_dir, wav_dir, load_extra=True):
def split_same(ann_dir, wav_dir, load_extra=True):
-
train_sets = []
if load_extra:
train_sets.append(
diff --git a/src/batdetect2/train/legacy/train_utils.py b/src/batdetect2/train/legacy/train_utils.py
new file mode 100644
index 00000000..6c4cd26a
--- /dev/null
+++ b/src/batdetect2/train/legacy/train_utils.py
@@ -0,0 +1,263 @@
+import json
+import sys
+from collections import Counter
+from pathlib import Path
+from typing import Dict, Generator, List, Tuple
+
+import numpy as np
+
+from batdetect2 import types
+
+if sys.version_info >= (3, 9):
+ StringCounter = Counter[str]
+else:
+ from typing import Counter as StringCounter
+
+
+def write_notes_file(file_name: str, text: str):
+ with open(file_name, "a") as da:
+ da.write(text + "\n")
+
+
+def get_blank_dataset_dict(
+ dataset_name: str,
+ is_test: bool,
+ ann_path: str,
+ wav_path: str,
+) -> types.DatasetDict:
+ return {
+ "dataset_name": dataset_name,
+ "is_test": is_test,
+ "is_binary": False,
+ "ann_path": ann_path,
+ "wav_path": wav_path,
+ }
+
+
+def get_short_class_names(
+ class_names: List[str],
+ str_len: int = 3,
+) -> List[str]:
+ class_names_short = []
+ for cc in class_names:
+ class_names_short.append(
+ " ".join([sp[:str_len] for sp in cc.split(" ")])
+ )
+ return class_names_short
+
+
+def remove_dupes(
+ data_train: List[types.FileAnnotation],
+ data_test: List[types.FileAnnotation],
+) -> List[types.FileAnnotation]:
+ test_ids = [dd["id"] for dd in data_test]
+ data_train_prune = []
+ for aa in data_train:
+ if aa["id"] not in test_ids:
+ data_train_prune.append(aa)
+ diff = len(data_train) - len(data_train_prune)
+ if diff != 0:
+ print(diff, "items removed from train set")
+ return data_train_prune
+
+
+def get_genus_mapping(class_names: List[str]) -> Tuple[List[str], List[int]]:
+ genus_names, genus_mapping = np.unique(
+ [cc.split(" ")[0] for cc in class_names], return_inverse=True
+ )
+ return genus_names.tolist(), genus_mapping.tolist()
+
+
+def standardize_low_freq(
+ data: List[types.FileAnnotation],
+ class_of_interest: str,
+) -> List[types.FileAnnotation]:
+ # address the issue of highly variable low frequency annotations
+ # this often happens for contstant frequency calls
+ # for the class of interest sets the low and high freq to be the dataset mean
+ low_freqs = []
+ high_freqs = []
+ for dd in data:
+ for aa in dd["annotation"]:
+ if aa["class"] == class_of_interest:
+ low_freqs.append(aa["low_freq"])
+ high_freqs.append(aa["high_freq"])
+
+ low_mean = float(np.mean(low_freqs))
+ high_mean = float(np.mean(high_freqs))
+ assert low_mean < high_mean
+
+ print("\nStandardizing low and high frequency for:")
+ print(class_of_interest)
+ print("low: ", round(low_mean, 2))
+ print("high: ", round(high_mean, 2))
+
+ # only set the low freq, high stays the same
+ # assumes that low_mean < high_mean
+ for dd in data:
+ for aa in dd["annotation"]:
+ if aa["class"] == class_of_interest:
+ aa["low_freq"] = low_mean
+ if aa["high_freq"] < low_mean:
+ aa["high_freq"] = high_mean
+
+ return data
+
+
+def format_annotation(
+ annotation: types.FileAnnotation,
+ events_of_interest: List[str] | None = None,
+ name_replace: Dict[str, str] | None = None,
+ convert_to_genus: bool = False,
+ classes_to_ignore: List[str] | None = None,
+) -> types.FileAnnotation:
+ formated = []
+ for aa in annotation["annotation"]:
+ if (
+ events_of_interest is not None
+ and aa["event"] not in events_of_interest
+ ):
+ # Omit files with annotation issues
+ continue
+
+ # remove leading and trailing spaces
+ class_name = aa["class"].strip()
+
+ if name_replace is not None:
+ # replace_names will be a dictionary mapping input name to output
+ class_name = name_replace.get(class_name, class_name)
+
+ if convert_to_genus:
+ # convert everything to genus name
+ class_name = class_name.split(" ")[0]
+
+ # NOTE: It is important to acknowledge that the class names filtering
+ # is done after the name replacement and the conversion to
+ # genus name. This allows filtering converted genus names and names
+ # that were replaced with a name that should be ignored.
+ if classes_to_ignore is not None and class_name in classes_to_ignore:
+ # Omit annotations with ignored classes
+ continue
+
+ formated.append(
+ {
+ **aa,
+ "class": class_name,
+ }
+ )
+
+ return {
+ **annotation,
+ "annotation": formated,
+ }
+
+
+def get_class_names(
+ data: List[types.FileAnnotation],
+ classes_to_ignore: List[str] | None = None,
+) -> Tuple[StringCounter, List[float]]:
+ """Extracts class names and their inverse frequencies.
+
+ Parameters
+ ----------
+ data
+ A list of file annotations, where each annotation contains a list of
+ sound events with associated class names.
+ classes_to_ignore
+ A list of class names to ignore.
+
+ Returns:
+ --------
+ class_names
+ A list of unique class names extracted from the annotations.
+ class_inv_freq
+ List of inverse frequencies of each class name in the provided data.
+ """
+ if classes_to_ignore is None:
+ classes_to_ignore = []
+
+ class_names_list: List[str] = []
+ for annotation in data:
+ for sound_event in annotation["annotation"]:
+ if sound_event["class"] in classes_to_ignore:
+ continue
+
+ class_names_list.append(sound_event["class"])
+
+ counts = Counter(class_names_list)
+ mean_counts = float(np.mean(list(counts.values())))
+ return counts, [mean_counts / counts[cc] for cc in class_names_list]
+
+
+def report_class_counts(class_names: StringCounter):
+ print("Class count:")
+ str_len = np.max([len(cc) for cc in class_names]) + 5
+ for index, (class_name, count) in enumerate(class_names.most_common()):
+ print(f"{index:<5}{class_name:<{str_len}}{count}")
+
+
+def load_set_of_anns(
+ data: List[types.DatasetDict],
+ *,
+ convert_to_genus: bool = False,
+ filter_issues: bool = False,
+ events_of_interest: List[str] | None = None,
+ classes_to_ignore: List[str] | None = None,
+ name_replace: Dict[str, str] | None = None,
+) -> List[types.FileAnnotation]:
+ # load the annotations
+ anns = []
+
+ # dictionary of datasets
+ for dataset in data:
+ for ann in load_anns(dataset["ann_path"], dataset["wav_path"]):
+ if not ann["annotated"]:
+ # Omit unannotated files
+ continue
+
+ if filter_issues and ann["issues"]:
+ # Omit files with annotation issues
+ continue
+
+ anns.append(
+ format_annotation(
+ ann,
+ events_of_interest=events_of_interest,
+ name_replace=name_replace,
+ convert_to_genus=convert_to_genus,
+ classes_to_ignore=classes_to_ignore,
+ )
+ )
+
+ return anns
+
+
+def load_anns(
+ ann_dir: str,
+ raw_audio_dir: str,
+) -> Generator[types.FileAnnotation, None, None]:
+ for path in Path(ann_dir).rglob("*.json"):
+ with open(path) as fp:
+ file_annotation = json.load(fp)
+
+ file_annotation["file_path"] = raw_audio_dir + file_annotation["id"]
+ yield file_annotation
+
+
+class AverageMeter:
+ """Computes and stores the average and current value."""
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+
+ def update(self, val, n=1):
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count
diff --git a/src/batdetect2/train/lightning.py b/src/batdetect2/train/lightning.py
new file mode 100644
index 00000000..3e7833e2
--- /dev/null
+++ b/src/batdetect2/train/lightning.py
@@ -0,0 +1,192 @@
+from dataclasses import dataclass
+
+import lightning as L
+import torch
+from soundevent.data import PathLike
+
+from batdetect2.models import ModelConfig, build_model
+from batdetect2.models.types import ModelOutput, ModelProtocol
+from batdetect2.targets import TargetConfig
+from batdetect2.train.checkpoints import resolve_checkpoint_path
+from batdetect2.train.config import TrainingConfig
+from batdetect2.train.losses import build_loss
+from batdetect2.train.optimizers import build_optimizer
+from batdetect2.train.schedulers import build_scheduler
+from batdetect2.train.types import LossProtocol, TrainExample
+
+__all__ = [
+ "TrainingModule",
+ "load_model_from_checkpoint",
+]
+
+
+class TrainingModule(L.LightningModule):
+ model: ModelProtocol
+ loss: LossProtocol
+
+ def __init__(
+ self,
+ model_config: dict | None = None,
+ targets_config: dict | None = None,
+ class_names: list[str] | None = None,
+ dimension_names: list[str] | None = None,
+ train_config: dict | None = None,
+ loss: LossProtocol | None = None,
+ model: ModelProtocol | None = None,
+ ):
+ super().__init__()
+
+ self.save_hyperparameters(ignore=["model", "loss"], logger=False)
+
+ self.model_config: dict = model_config or {}
+ self.targets_config: dict = targets_config or {}
+ self.class_names = list(class_names or [])
+ self.dimension_names = list(dimension_names or [])
+
+ self.train_config = TrainingConfig.model_validate(train_config or {})
+
+ if loss is None:
+ loss = build_loss(config=self.train_config.loss)
+
+ if model is None:
+ if not self.class_names:
+ raise ValueError(
+ "class_names must be provided when rebuilding a training "
+ "module without a model."
+ )
+
+ if not self.dimension_names:
+ raise ValueError(
+ "dimension_names must be provided when rebuilding a "
+ "training module without a model."
+ )
+
+ model = build_model(
+ config=self.model_config,
+ class_names=self.class_names,
+ dimension_names=self.dimension_names,
+ )
+
+ self.loss = loss
+ self.model = model
+
+ def training_step(self, batch: TrainExample):
+ outputs = self.model.detector(batch.spec)
+ losses = self.loss(outputs, batch)
+ self.log("total_loss/train", losses.total, prog_bar=True, logger=True)
+ self.log("detection_loss/train", losses.detection, logger=True)
+ self.log("size_loss/train", losses.size, logger=True)
+ self.log(
+ "classification_loss/train",
+ losses.classification,
+ logger=True,
+ )
+ return losses.total
+
+ def validation_step( # type: ignore
+ self,
+ batch: TrainExample,
+ batch_idx: int,
+ ) -> ModelOutput:
+ outputs = self.model.detector(batch.spec)
+ losses = self.loss(outputs, batch)
+ self.log("total_loss/val", losses.total, prog_bar=True, logger=True)
+ self.log("detection_loss/val", losses.detection, logger=True)
+ self.log("size_loss/val", losses.size, logger=True)
+ self.log("classification_loss/val", losses.classification, logger=True)
+ return outputs
+
+ def configure_optimizers(self):
+ trainable_parameters = [
+ parameter
+ for parameter in self.parameters()
+ if parameter.requires_grad
+ ]
+
+ if not trainable_parameters:
+ raise ValueError("No trainable parameters available.")
+
+ optimizer = build_optimizer(
+ trainable_parameters,
+ config=self.train_config.optimizer,
+ )
+ scheduler = build_scheduler(
+ optimizer,
+ config=self.train_config.scheduler,
+ )
+ return {
+ "optimizer": optimizer,
+ "lr_scheduler": {
+ "scheduler": scheduler,
+ "interval": "epoch",
+ "frequency": 1,
+ },
+ }
+
+
+@dataclass
+class StoredConfig:
+ model: ModelConfig
+ targets: TargetConfig
+ train: TrainingConfig
+
+
+def load_model_from_checkpoint(
+ path: PathLike | str | None = None,
+) -> tuple[ModelProtocol, StoredConfig]:
+ """Load a model and its configuration from a Lightning checkpoint.
+
+ Parameters
+ ----------
+ path : PathLike | str | None
+ Path to a ``.ckpt`` file produced by the BatDetect2 training
+ pipeline. If omitted, the default bundled checkpoint is used.
+
+ Returns
+ -------
+ tuple[ModelProtocol, ModelConfig]
+ The restored ``Model`` instance and the ``ModelConfig`` that
+ describes its architecture, preprocessing, and postprocessing.
+ """
+ resolved_path = resolve_checkpoint_path(path)
+ module = TrainingModule.load_from_checkpoint(
+ resolved_path,
+ map_location=torch.device("cpu"),
+ )
+ training_config = TrainingConfig.model_validate(module.train_config)
+ model_config = ModelConfig.model_validate(module.model_config)
+ targets_config = TargetConfig.model_validate(module.targets_config)
+ return module.model, StoredConfig(
+ model=model_config,
+ targets=targets_config,
+ train=training_config,
+ )
+
+
+def build_training_module(
+ model_config: ModelConfig | None = None,
+ targets_config: TargetConfig | dict | None = None,
+ class_names: list[str] | None = None,
+ dimension_names: list[str] | None = None,
+ train_config: TrainingConfig | None = None,
+ model: ModelProtocol | None = None,
+) -> TrainingModule:
+ if model_config is None:
+ model_config = ModelConfig()
+
+ if train_config is None:
+ train_config = TrainingConfig()
+
+ if targets_config is None:
+ targets_config = TargetConfig()
+
+ targets_config = TargetConfig.model_validate(targets_config)
+
+ return TrainingModule(
+ model_config=model_config.model_dump(mode="json"),
+ targets_config=targets_config.model_dump(mode="json"),
+ train_config=train_config.model_dump(mode="json"),
+ class_names=class_names,
+ dimension_names=dimension_names,
+ model=model,
+ )
diff --git a/src/batdetect2/train/logging.py b/src/batdetect2/train/logging.py
new file mode 100644
index 00000000..0afad6c0
--- /dev/null
+++ b/src/batdetect2/train/logging.py
@@ -0,0 +1,166 @@
+from __future__ import annotations
+
+from collections.abc import Sequence
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any
+
+import pandas as pd
+from lightning.pytorch.loggers import Logger
+from soundevent import data
+
+from batdetect2.audio import AudioConfig
+from batdetect2.core.configs import BaseConfig
+from batdetect2.data import Dataset, compute_class_summary
+from batdetect2.logging import log_config_artifact, log_csv_artifact
+from batdetect2.models import ModelConfig
+from batdetect2.targets import TargetConfig, TargetProtocol
+from batdetect2.train.config import TrainingConfig
+
+__all__ = [
+ "ConfigHyperparameterLogging",
+ "DataSummaryArtifactLogging",
+ "DatasetConfigArtifact",
+ "DatasetConfigArtifactLogging",
+ "TargetConfigArtifactLogging",
+ "TrainLoggingContext",
+]
+
+
+@dataclass(frozen=True)
+class TrainLoggingContext:
+ model_config: dict[str, Any]
+ train_config: TrainingConfig
+ audio_config: AudioConfig
+ targets: TargetProtocol
+ train_dataset: Dataset
+ val_dataset: Dataset | None
+
+
+@dataclass(frozen=True)
+class DatasetConfigArtifact:
+ filename: str
+ config: BaseConfig
+
+
+class ConfigHyperparameterLogging:
+ def run(
+ self,
+ logger: Logger,
+ artifact_path: Path,
+ context: TrainLoggingContext,
+ ) -> None:
+ model_config = ModelConfig.model_validate(context.model_config)
+ logger.log_hyperparams(
+ {
+ "model": model_config.model_dump(
+ mode="json",
+ exclude_none=True,
+ ),
+ "training": context.train_config.model_dump(
+ mode="json",
+ exclude_none=True,
+ ),
+ "audio": context.audio_config.model_dump(
+ mode="json",
+ exclude_none=True,
+ ),
+ }
+ )
+
+
+class TargetConfigArtifactLogging:
+ def run(
+ self,
+ logger: Logger,
+ artifact_path: Path,
+ context: TrainLoggingContext,
+ ) -> None:
+ targets_config = TargetConfig.model_validate(
+ context.targets.get_config()
+ )
+ log_config_artifact(
+ logger,
+ targets_config,
+ filename="targets.yaml",
+ artifact_path=artifact_path / "training_artifacts",
+ )
+
+
+class DatasetConfigArtifactLogging:
+ def __init__(
+ self,
+ train_dataset_config: DatasetConfigArtifact,
+ val_dataset_config: DatasetConfigArtifact | None = None,
+ ):
+ self.train_dataset_config = train_dataset_config
+ self.val_dataset_config = val_dataset_config
+
+ def run(
+ self,
+ logger: Logger,
+ artifact_path: Path,
+ context: TrainLoggingContext,
+ ) -> None:
+ training_artifact_path = artifact_path / "training_artifacts"
+
+ log_config_artifact(
+ logger,
+ self.train_dataset_config.config,
+ filename=self.train_dataset_config.filename,
+ artifact_path=training_artifact_path,
+ )
+
+ if self.val_dataset_config is not None:
+ log_config_artifact(
+ logger,
+ self.val_dataset_config.config,
+ filename=self.val_dataset_config.filename,
+ artifact_path=training_artifact_path,
+ )
+
+
+class DataSummaryArtifactLogging:
+ def run(
+ self,
+ logger: Logger,
+ artifact_path: Path,
+ context: TrainLoggingContext,
+ ) -> None:
+ training_artifact_path = artifact_path / "training_artifacts"
+
+ log_csv_artifact(
+ logger,
+ _compute_class_summary_or_empty(
+ context.train_dataset,
+ context.targets,
+ ),
+ filename="train_class_summary.csv",
+ artifact_path=training_artifact_path,
+ )
+
+ if context.val_dataset is not None:
+ log_csv_artifact(
+ logger,
+ _compute_class_summary_or_empty(
+ context.val_dataset,
+ context.targets,
+ ),
+ filename="val_class_summary.csv",
+ artifact_path=training_artifact_path,
+ )
+
+
+def _compute_class_summary_or_empty(
+ dataset: Sequence[data.ClipAnnotation],
+ targets: TargetProtocol,
+) -> pd.DataFrame:
+ try:
+ return compute_class_summary(dataset, targets)
+ except KeyError as error:
+ if error.args != ("class_name",):
+ raise
+
+ return pd.DataFrame(
+ columns=["num calls", "num recordings", "duration", "call_rate"]
+ )
diff --git a/src/batdetect2/train/losses.py b/src/batdetect2/train/losses.py
new file mode 100644
index 00000000..fd675a57
--- /dev/null
+++ b/src/batdetect2/train/losses.py
@@ -0,0 +1,483 @@
+"""Loss functions and configurations for training BatDetect2 models.
+
+This module defines the loss functions used to train BatDetect2 models,
+including individual loss components for different prediction tasks (detection,
+classification, size regression) and a main coordinating loss function that
+combines them.
+
+It utilizes common loss types like L1 loss (`BBoxLoss`) for regression and
+Focal Loss (`FocalLoss`) for handling class imbalance in dense detection and
+classification tasks. Configuration objects (`LossConfig`, etc.) allow for easy
+customization of loss parameters and weights via configuration files.
+
+The primary entry points are:
+- `LossFunction`: An `nn.Module` that computes the weighted sum of individual
+ loss components given model outputs and ground truth targets.
+- `build_loss`: A factory function that constructs the `LossFunction` based
+ on a `LossConfig` object.
+- `LossConfig`: The Pydantic model for configuring loss weights and parameters.
+"""
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from loguru import logger
+from pydantic import Field
+from torch import nn
+
+from batdetect2.core.configs import BaseConfig
+from batdetect2.models.types import ModelOutput
+from batdetect2.train.types import Losses, LossProtocol, TrainExample
+
+__all__ = [
+ "BBoxLoss",
+ "ClassificationLossConfig",
+ "DetectionLossConfig",
+ "FocalLoss",
+ "FocalLossConfig",
+ "LossConfig",
+ "LossFunction",
+ "MSELoss",
+ "SizeLossConfig",
+ "build_loss",
+]
+
+
+class SizeLossConfig(BaseConfig):
+ """Configuration for the bounding box size loss component.
+
+ Attributes
+ ----------
+ weight : float, default=0.1
+ The weighting factor applied to the size loss when combining it with
+ other losses (detection, classification) to form the total training
+ loss.
+ """
+
+ weight: float = 0.1
+
+
+class BBoxLoss(nn.Module):
+ """Computes L1 loss for bounding box size regression.
+
+ Calculates the Mean Absolute Error (MAE or L1 loss) between the predicted
+ size dimensions (`pred`) and the ground truth size dimensions (`gt`).
+ Crucially, the loss is only computed at locations where the ground truth
+ size heatmap (`gt`) contains non-zero values (i.e., at the reference points
+ of actual annotated sound events). This prevents the model from being
+ penalized for size predictions in background regions.
+
+ The loss is summed over all valid locations and normalized by the number
+ of valid locations.
+ """
+
+ def forward(self, pred: torch.Tensor, gt: torch.Tensor) -> torch.Tensor:
+ """Calculate masked L1 loss for size prediction.
+
+ Parameters
+ ----------
+ pred : torch.Tensor
+ Predicted size tensor, typically shape `(B, 2, H, W)`, where
+ channels represent scaled width and height.
+ gt : torch.Tensor
+ Ground truth size tensor, same shape as `pred`. Non-zero values
+ indicate locations and target sizes of actual annotations.
+
+ Returns
+ -------
+ torch.Tensor
+ Scalar tensor representing the calculated masked L1 loss.
+ """
+ gt_size_mask = (gt > 0).float()
+ masked_pred = pred * gt_size_mask
+ loss = F.l1_loss(masked_pred, gt, reduction="sum")
+ num_pos = gt_size_mask.sum() + 1e-5
+ return loss / num_pos
+
+
+class FocalLossConfig(BaseConfig):
+ """Configuration parameters for the Focal Loss function.
+
+ Attributes
+ ----------
+ beta : float, default=4
+ Exponent controlling the down-weighting of easy negative examples.
+ Higher values increase down-weighting (focus more on hard negatives).
+ alpha : float, default=2
+ Exponent controlling the down-weighting based on prediction confidence.
+ Higher values focus more on misclassified examples (both positive and
+ negative).
+ """
+
+ beta: float = 4
+ alpha: float = 2
+
+
+class FocalLoss(nn.Module):
+ """Focal Loss implementation, adapted from CornerNet.
+
+ Addresses class imbalance in dense object detection/classification tasks by
+ down-weighting the loss contribution from easy examples (both positive and
+ negative), allowing the model to focus more on hard-to-classify examples.
+
+ Parameters
+ ----------
+ eps : float, default=1e-5
+ Small epsilon value added for numerical stability.
+ beta : float, default=4
+ Exponent focusing on hard negative examples (modulates `(1-gt)^beta`).
+ alpha : float, default=2
+ Exponent focusing on misclassified examples (modulates `(1-p)^alpha`
+ for positives and `p^alpha` for negatives).
+ class_weights : torch.Tensor, optional
+ Optional tensor containing weights for each class (applied to positive
+ loss). Shape should be broadcastable to the channel dimension of the
+ input tensors.
+ mask_zero : bool, default=False
+ If True, ignores loss contributions from spatial locations where the
+ ground truth `gt` tensor is zero across *all* channels. Useful for
+ classification heatmaps where some areas might have no assigned class.
+
+ References
+ ----------
+ - Lin, T. Y., et al. "Focal loss for dense object detection." ICCV 2017.
+ - Law, H., & Deng, J. "CornerNet: Detecting Objects as Paired Keypoints."
+ ECCV 2018.
+ """
+
+ def __init__(
+ self,
+ eps: float = 1e-5,
+ beta: float = 4,
+ alpha: float = 2,
+ class_weights: torch.Tensor | None = None,
+ mask_zero: bool = False,
+ ):
+ super().__init__()
+ self.class_weights = class_weights
+ self.eps = eps
+ self.beta = beta
+ self.alpha = alpha
+ self.mask_zero = mask_zero
+
+ def forward(
+ self,
+ pred: torch.Tensor,
+ gt: torch.Tensor,
+ ) -> torch.Tensor:
+ """Compute the Focal Loss.
+
+ Parameters
+ ----------
+ pred : torch.Tensor
+ Predicted probabilities or logits (typically sigmoid output for
+ detection, or softmax/sigmoid for classification). Must be in the
+ range [0, 1] after potential activation. Shape `(B, C, H, W)`.
+ gt : torch.Tensor
+ Ground truth heatmap tensor. Shape `(B, C, H, W)`. Values typically
+ represent target probabilities (e.g., Gaussian peaks for detection,
+ one-hot encoding or smoothed labels for classification). For the
+ adapted CornerNet loss, `gt=1` indicates a positive location, and
+ values `< 1` indicate negative locations (with potential Gaussian
+ weighting `(1-gt)^beta` for negatives near positives).
+
+ Returns
+ -------
+ torch.Tensor
+ Scalar tensor representing the computed focal loss, normalized by
+ the number of positive locations.
+ """
+
+ pos_inds = gt.eq(1).float()
+ neg_inds = gt.lt(1).float()
+
+ pos_loss = (
+ torch.log(pred + self.eps)
+ * torch.pow(1 - pred, self.alpha)
+ * pos_inds
+ )
+ neg_loss = (
+ torch.log(1 - pred + self.eps)
+ * torch.pow(pred, self.alpha)
+ * torch.pow(1 - gt, self.beta)
+ * neg_inds
+ )
+
+ if self.class_weights is not None:
+ pos_loss = pos_loss * torch.tensor(self.class_weights)
+
+ if self.mask_zero:
+ valid_mask = (gt.sum(1) > 0).float().unsqueeze(1)
+ pos_loss = pos_loss * valid_mask
+ neg_loss = neg_loss * valid_mask
+
+ pos_loss = pos_loss.sum()
+ neg_loss = neg_loss.sum()
+
+ num_pos = pos_inds.float().sum()
+ if num_pos == 0:
+ loss = -neg_loss
+ else:
+ loss = -(pos_loss + neg_loss) / num_pos
+ return loss
+
+
+class MSELoss(nn.Module):
+ """Mean Squared Error (MSE) Loss module.
+
+ Calculates the mean squared difference between predictions and ground
+ truth. Optionally masks contributions where the ground truth is zero across
+ channels.
+
+ Parameters
+ ----------
+ mask_zero : bool, default=False
+ If True, calculates the loss only over spatial locations (H, W) where
+ at least one channel in the ground truth `gt` tensor is non-zero. The
+ loss is then averaged over these valid locations. If False (default),
+ the standard MSE over all elements is computed.
+ """
+
+ def __init__(self, mask_zero: bool = False):
+ super().__init__()
+ self.mask_zero = mask_zero
+
+ def forward(
+ self,
+ pred: torch.Tensor,
+ gt: torch.Tensor,
+ ) -> torch.Tensor:
+ """Compute the Mean Squared Error loss.
+
+ Parameters
+ ----------
+ pred : torch.Tensor
+ Predicted tensor, shape `(B, C, H, W)`.
+ gt : torch.Tensor
+ Ground truth tensor, shape `(B, C, H, W)`.
+
+ Returns
+ -------
+ torch.Tensor
+ Scalar tensor representing the calculated MSE loss.
+ """
+ if not self.mask_zero:
+ return ((gt - pred) ** 2).mean()
+
+ valid_mask = gt.any(dim=1, keepdim=True).float()
+ return (valid_mask * ((gt - pred) ** 2)).sum() / valid_mask.sum()
+
+
+class DetectionLossConfig(BaseConfig):
+ """Configuration for the detection loss component.
+
+ Attributes
+ ----------
+ weight : float, default=1.0
+ Weighting factor for the detection loss in the combined total loss.
+ focal : FocalLossConfig
+ Configuration for the Focal Loss used for detection. Defaults to
+ standard Focal Loss parameters (`alpha=2`, `beta=4`).
+ """
+
+ weight: float = 1.0
+ focal: FocalLossConfig = Field(default_factory=FocalLossConfig)
+
+
+class ClassificationLossConfig(BaseConfig):
+ """Configuration for the classification loss component.
+
+ Attributes
+ ----------
+ weight : float, default=2.0
+ Weighting factor for the classification loss in the combined total loss.
+ focal : FocalLossConfig
+ Configuration for the Focal Loss used for classification. Defaults to
+ standard Focal Loss parameters (`alpha=2`, `beta=4`).
+ """
+
+ weight: float = 2.0
+ focal: FocalLossConfig = Field(default_factory=FocalLossConfig)
+
+
+class LossConfig(BaseConfig):
+ """Aggregated configuration for all loss components.
+
+ Defines the configuration and weighting for detection, size regression,
+ and classification losses used in the main `LossFunction`.
+
+ Attributes
+ ----------
+ detection : DetectionLossConfig
+ Configuration for the detection loss (Focal Loss).
+ size : SizeLossConfig
+ Configuration for the size regression loss (L1 loss).
+ classification : ClassificationLossConfig
+ Configuration for the classification loss (Focal Loss).
+ """
+
+ detection: DetectionLossConfig = Field(default_factory=DetectionLossConfig)
+ size: SizeLossConfig = Field(default_factory=SizeLossConfig)
+ classification: ClassificationLossConfig = Field(
+ default_factory=ClassificationLossConfig
+ )
+
+
+class LossFunction(nn.Module, LossProtocol):
+ """Computes the combined training loss for the BatDetect2 model.
+
+ Aggregates individual loss functions for detection, size regression, and
+ classification tasks. Calculates each component loss based on model outputs
+ and ground truth targets, applies configured weights, and sums them to get
+ the final total loss used for optimization. Also returns individual
+ components for monitoring.
+
+ Parameters
+ ----------
+ size_loss : nn.Module
+ Instantiated loss module for size regression (e.g., `BBoxLoss`).
+ detection_loss : nn.Module
+ Instantiated loss module for detection (e.g., `FocalLoss`).
+ classification_loss : nn.Module
+ Instantiated loss module for classification (e.g., `FocalLoss`).
+ size_weight : float, default=0.1
+ Weighting factor for the size loss component.
+ detection_weight : float, default=1.0
+ Weighting factor for the detection loss component.
+ classification_weight : float, default=2.0
+ Weighting factor for the classification loss component.
+
+ Attributes
+ ----------
+ size_loss_fn : nn.Module
+ detection_loss_fn : nn.Module
+ classification_loss_fn : nn.Module
+ size_weight : float
+ detection_weight : float
+ classification_weight : float
+ """
+
+ def __init__(
+ self,
+ size_loss: nn.Module,
+ detection_loss: nn.Module,
+ classification_loss: nn.Module,
+ size_weight: float = 0.1,
+ detection_weight: float = 1.0,
+ classification_weight: float = 2.0,
+ ):
+ super().__init__()
+ self.size_loss_fn = size_loss
+ self.detection_loss_fn = detection_loss
+ self.classification_loss_fn = classification_loss
+
+ self.size_weight = size_weight
+ self.detection_weight = detection_weight
+ self.classification_weight = classification_weight
+
+ def forward(
+ self,
+ pred: ModelOutput,
+ gt: TrainExample,
+ ) -> Losses:
+ """Calculate the combined loss and individual components.
+
+ Parameters
+ ----------
+ pred: ModelOutput
+ A NamedTuple containing the model's prediction tensors for the
+ batch: `detection_probs`, `size_preds`, `class_probs`.
+ gt: TrainExample
+ A structure containing the ground truth targets for the batch,
+ expected to have attributes like `detection_heatmap`,
+ `size_heatmap`, and `class_heatmap` (as `torch.Tensor`).
+
+ Returns
+ -------
+ Losses
+ A NamedTuple containing the scalar loss values for detection, size,
+ classification, and the total weighted loss.
+ """
+ size_loss = self.size_loss_fn(pred.size_preds, gt.size_heatmap)
+ detection_loss = self.detection_loss_fn(
+ pred.detection_probs,
+ gt.detection_heatmap,
+ )
+ classification_loss = self.classification_loss_fn(
+ pred.class_probs,
+ gt.class_heatmap,
+ )
+ total_loss = (
+ size_loss * self.size_weight
+ + classification_loss * self.classification_weight
+ + detection_loss * self.detection_weight
+ )
+ return Losses(
+ detection=detection_loss,
+ size=size_loss,
+ classification=classification_loss,
+ total=total_loss,
+ )
+
+
+def build_loss(
+ config: LossConfig | None = None,
+ class_weights: np.ndarray | None = None,
+) -> LossFunction:
+ """Factory function to build the main LossFunction from configuration.
+
+ Instantiates the necessary loss components (`BBoxLoss`, `FocalLoss`) based
+ on the provided `LossConfig` (or defaults) and optional `class_weights`,
+ then assembles them into the main `LossFunction` module with the specified
+ component weights.
+
+ Parameters
+ ----------
+ config : LossConfig, optional
+ Configuration object defining weights and parameters (e.g., alpha, beta
+ for Focal Loss) for each loss component. If None, default settings
+ from `LossConfig` and its nested configs are used.
+ class_weights : np.ndarray, optional
+ An array of weights for each specific class, used to adjust the
+ classification loss (typically Focal Loss). If provided, this overrides
+ any `class_weights` specified within `config.classification`. If None,
+ weights from the config (or default of equal weights) are used.
+
+ Returns
+ -------
+ LossFunction
+ An initialized `LossFunction` module ready for training.
+ """
+ config = config or LossConfig()
+ logger.opt(lazy=True).debug(
+ "Building loss with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+
+ class_weights_tensor = (
+ torch.tensor(class_weights) if class_weights else None
+ )
+
+ detection_loss_fn = FocalLoss(
+ beta=config.detection.focal.beta,
+ alpha=config.detection.focal.alpha,
+ mask_zero=False,
+ )
+
+ classification_loss_fn = FocalLoss(
+ beta=config.classification.focal.beta,
+ alpha=config.classification.focal.alpha,
+ class_weights=class_weights_tensor,
+ mask_zero=True,
+ )
+
+ size_loss_fn = BBoxLoss()
+
+ return LossFunction(
+ size_loss=size_loss_fn,
+ classification_loss=classification_loss_fn,
+ detection_loss=detection_loss_fn,
+ size_weight=config.size.weight,
+ detection_weight=config.detection.weight,
+ classification_weight=config.classification.weight,
+ )
diff --git a/src/batdetect2/train/optimizers.py b/src/batdetect2/train/optimizers.py
new file mode 100644
index 00000000..092e57a7
--- /dev/null
+++ b/src/batdetect2/train/optimizers.py
@@ -0,0 +1,94 @@
+"""Optimizer configuration and factory utilities for training."""
+
+from collections.abc import Iterable
+from typing import Annotated, Literal
+
+from loguru import logger
+from pydantic import Field
+from torch import nn
+from torch.optim import Adam, Optimizer
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+
+__all__ = [
+ "AdamOptimizerConfig",
+ "OptimizerConfig",
+ "OptimizerImportConfig",
+ "build_optimizer",
+ "optimizer_registry",
+]
+
+
+class AdamOptimizerConfig(BaseConfig):
+ """Configuration for the Adam optimizer.
+
+ Attributes
+ ----------
+ name : Literal["adam"]
+ Discriminator field used by the optimizer registry.
+ learning_rate : float
+ Learning rate used by ``torch.optim.Adam``.
+ """
+
+ name: Literal["adam"] = "adam"
+ learning_rate: float = 1e-3
+
+
+optimizer_registry: Registry[Optimizer, [Iterable[nn.Parameter]]] = Registry(
+ "optimizer"
+)
+
+
+@add_import_config(optimizer_registry, arg_names=["params"])
+class OptimizerImportConfig(ImportConfig):
+ """Use any callable as an optimizer.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any callable
+ that returns an optimizer. The training parameters are passed as the
+ ``params`` keyword argument.
+ """
+
+ name: Literal["import"] = "import"
+
+
+@optimizer_registry.register(AdamOptimizerConfig)
+def build_adam(
+ config: AdamOptimizerConfig,
+ params: Iterable[nn.Parameter],
+) -> Optimizer:
+ """Build an Adam optimizer from configuration."""
+ return Adam(params, lr=config.learning_rate)
+
+
+OptimizerConfig = Annotated[
+ AdamOptimizerConfig | OptimizerImportConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_optimizer(
+ parameters: Iterable[nn.Parameter],
+ config: OptimizerConfig | None = None,
+) -> Optimizer:
+ """Build an optimizer from configuration.
+
+ Parameters
+ ----------
+ parameters : Iterable[nn.Parameter]
+ Model parameters to optimize.
+ config : OptimizerConfig, optional
+ Optimizer configuration. Defaults to ``AdamOptimizerConfig``.
+ """
+ config = config or AdamOptimizerConfig()
+
+ logger.opt(lazy=True).debug(
+ "Building optimizer with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+
+ return optimizer_registry.build(config, parameters)
diff --git a/src/batdetect2/train/schedulers.py b/src/batdetect2/train/schedulers.py
new file mode 100644
index 00000000..73ebd095
--- /dev/null
+++ b/src/batdetect2/train/schedulers.py
@@ -0,0 +1,87 @@
+"""Scheduler configuration and factory utilities for training."""
+
+from typing import Annotated, Literal
+
+from loguru import logger
+from pydantic import Field
+from torch.optim import Optimizer
+from torch.optim.lr_scheduler import CosineAnnealingLR, LRScheduler
+
+from batdetect2.core import (
+ BaseConfig,
+ ImportConfig,
+ Registry,
+ add_import_config,
+)
+
+__all__ = [
+ "CosineAnnealingSchedulerConfig",
+ "SchedulerConfig",
+ "SchedulerImportConfig",
+ "build_scheduler",
+ "scheduler_registry",
+]
+
+
+class CosineAnnealingSchedulerConfig(BaseConfig):
+ """Configuration for ``CosineAnnealingLR``.
+
+ Attributes
+ ----------
+ name : Literal["cosine_annealing"]
+ Discriminator field used by the scheduler registry.
+ t_max : int
+ Number of epochs to complete one cosine cycle.
+ """
+
+ name: Literal["cosine_annealing"] = "cosine_annealing"
+ t_max: int = 200
+
+
+scheduler_registry: Registry[LRScheduler, [Optimizer]] = Registry("scheduler")
+
+
+@add_import_config(scheduler_registry, arg_names=["optimizer"])
+class SchedulerImportConfig(ImportConfig):
+ """Use any callable as a scheduler.
+
+ Set ``name="import"`` and provide a ``target`` pointing to any callable
+ that returns a scheduler. The optimizer instance is passed as the
+ ``optimizer`` keyword argument.
+ """
+
+ name: Literal["import"] = "import"
+
+
+@scheduler_registry.register(CosineAnnealingSchedulerConfig)
+def build_cosine_scheduler(
+ config: CosineAnnealingSchedulerConfig,
+ optimizer: Optimizer,
+) -> LRScheduler:
+ """Build a cosine annealing scheduler.
+
+ ``t_max`` is interpreted in epochs because Lightning steps the scheduler
+ once per epoch when ``interval="epoch"`` is used.
+ """
+ return CosineAnnealingLR(optimizer, T_max=config.t_max)
+
+
+SchedulerConfig = Annotated[
+ CosineAnnealingSchedulerConfig | SchedulerImportConfig,
+ Field(discriminator="name"),
+]
+
+
+def build_scheduler(
+ optimizer: Optimizer,
+ config: SchedulerConfig | None = None,
+) -> LRScheduler:
+ """Build a scheduler from configuration."""
+ config = config or CosineAnnealingSchedulerConfig()
+
+ logger.opt(lazy=True).debug(
+ "Building scheduler with config: \n{}",
+ lambda: config.to_yaml_string(),
+ )
+
+ return scheduler_registry.build(config, optimizer)
diff --git a/src/batdetect2/train/train.py b/src/batdetect2/train/train.py
new file mode 100644
index 00000000..f59138bc
--- /dev/null
+++ b/src/batdetect2/train/train.py
@@ -0,0 +1,303 @@
+from collections.abc import Sequence
+from pathlib import Path
+from typing import Optional
+
+from lightning import Trainer, seed_everything
+from lightning.pytorch.loggers import Logger
+from loguru import logger
+from soundevent import data
+
+from batdetect2.audio import AudioConfig, AudioLoader, build_audio_loader
+from batdetect2.evaluate import EvaluatorProtocol, build_evaluator
+from batdetect2.logging import (
+ CSVLoggerConfig,
+ LoggerConfig,
+ LoggingCallback,
+ build_logger,
+)
+from batdetect2.models import ModelConfig, build_model
+from batdetect2.models.types import ModelProtocol
+from batdetect2.preprocess import PreprocessorProtocol, build_preprocessor
+from batdetect2.targets import (
+ ROIMapperProtocol,
+ TargetConfig,
+ TargetProtocol,
+ build_roi_mapping,
+ build_targets,
+)
+from batdetect2.train.callbacks import ValidationMetrics
+from batdetect2.train.checkpoints import build_checkpoint_callback
+from batdetect2.train.config import TrainingConfig
+from batdetect2.train.dataset import build_train_loader, build_val_loader
+from batdetect2.train.labels import build_clip_labeler
+from batdetect2.train.lightning import build_training_module
+from batdetect2.train.logging import (
+ ConfigHyperparameterLogging,
+ DataSummaryArtifactLogging,
+ TargetConfigArtifactLogging,
+ TrainLoggingContext,
+)
+from batdetect2.train.types import ClipLabeller
+
+__all__ = [
+ "build_trainer",
+ "run_train",
+]
+
+
+DEFAULT_LOG_DIR = Path("outputs") / "logs"
+
+
+def run_train(
+ train_annotations: Sequence[data.ClipAnnotation],
+ val_annotations: Sequence[data.ClipAnnotation] | None = None,
+ model: ModelProtocol | None = None,
+ targets: Optional["TargetProtocol"] = None,
+ roi_mapper: Optional["ROIMapperProtocol"] = None,
+ preprocessor: Optional["PreprocessorProtocol"] = None,
+ audio_loader: Optional["AudioLoader"] = None,
+ labeller: Optional["ClipLabeller"] = None,
+ audio_config: Optional[AudioConfig] = None,
+ targets_config: TargetConfig | None = None,
+ train_config: Optional[TrainingConfig] = None,
+ logger_config: LoggerConfig | None = None,
+ trainer: Trainer | None = None,
+ train_workers: int = 0,
+ val_workers: int = 0,
+ checkpoint_dir: Path | None = None,
+ log_dir: Path | None = None,
+ experiment_name: str | None = None,
+ num_epochs: int | None = None,
+ run_name: str | None = None,
+ seed: int | None = None,
+ logging_callbacks: Sequence[LoggingCallback[TrainLoggingContext]] = (),
+):
+ if seed is not None:
+ seed_everything(seed)
+
+ model_config = (
+ ModelConfig()
+ if model is None
+ else ModelConfig.model_validate(model.get_config())
+ )
+ targets_config = targets_config or TargetConfig()
+ audio_config = audio_config or AudioConfig()
+ train_config = train_config or TrainingConfig()
+
+ if model is not None:
+ if targets is None:
+ raise ValueError(
+ "targets must be provided when training with an existing "
+ "model."
+ )
+
+ if roi_mapper is None:
+ raise ValueError(
+ "roi_mapper must be provided when training with an existing "
+ "model."
+ )
+
+ if targets is None:
+ targets = build_targets(config=targets_config)
+ else:
+ targets_config = TargetConfig.model_validate(targets.get_config())
+
+ roi_mapper = roi_mapper or build_roi_mapping(config=targets_config.roi)
+
+ if model is not None:
+ _validate_model_compatibility(
+ model=model,
+ model_config=model_config,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ )
+
+ audio_loader = audio_loader or build_audio_loader(config=audio_config)
+
+ preprocessor = preprocessor or build_preprocessor(
+ input_samplerate=audio_loader.samplerate,
+ config=model_config.preprocess,
+ )
+
+ labeller = labeller or build_clip_labeler(
+ targets,
+ roi_mapper,
+ min_freq=preprocessor.min_freq,
+ max_freq=preprocessor.max_freq,
+ config=train_config.labels,
+ )
+
+ train_dataloader = build_train_loader(
+ train_annotations,
+ audio_loader=audio_loader,
+ labeller=labeller,
+ preprocessor=preprocessor,
+ config=train_config.train_loader,
+ num_workers=train_workers,
+ )
+
+ val_dataloader = (
+ build_val_loader(
+ val_annotations,
+ audio_loader=audio_loader,
+ labeller=labeller,
+ preprocessor=preprocessor,
+ config=train_config.val_loader,
+ num_workers=val_workers,
+ )
+ if val_annotations is not None
+ else None
+ )
+
+ module = build_training_module(
+ model_config=model_config,
+ targets_config=targets_config,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ train_config=train_config,
+ model=model,
+ )
+
+ evaluator = build_evaluator(
+ train_config.validation,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+
+ train_logger = build_logger(
+ logger_config or CSVLoggerConfig(),
+ log_dir=log_dir,
+ experiment_name=experiment_name,
+ run_name=run_name,
+ )
+ root_artifact_path = (
+ Path(log_dir) if log_dir is not None else DEFAULT_LOG_DIR
+ )
+ root_artifact_path.mkdir(parents=True, exist_ok=True)
+
+ logging_context = TrainLoggingContext(
+ model_config=model_config.model_dump(mode="json"),
+ train_config=train_config,
+ audio_config=audio_config,
+ targets=targets,
+ train_dataset=train_annotations,
+ val_dataset=val_annotations,
+ )
+
+ resolved_logging_callbacks = (
+ ConfigHyperparameterLogging(),
+ TargetConfigArtifactLogging(),
+ DataSummaryArtifactLogging(),
+ *logging_callbacks,
+ )
+
+ for callback in resolved_logging_callbacks:
+ callback.run(train_logger, root_artifact_path, logging_context)
+
+ trainer = trainer or build_trainer(
+ train_config,
+ train_logger=train_logger,
+ evaluator=evaluator,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ checkpoint_dir=checkpoint_dir,
+ num_epochs=num_epochs,
+ experiment_name=experiment_name,
+ run_name=run_name,
+ )
+
+ logger.info("Starting main training loop...")
+ trainer.fit(
+ module,
+ train_dataloaders=train_dataloader,
+ val_dataloaders=val_dataloader,
+ )
+ logger.info("Training complete.")
+
+ return module
+
+
+def _validate_model_compatibility(
+ model: ModelProtocol,
+ model_config: ModelConfig,
+ class_names: list[str],
+ dimension_names: list[str],
+) -> None:
+ reference_model = build_model(
+ config=model_config,
+ class_names=class_names,
+ dimension_names=dimension_names,
+ )
+
+ expected_shapes = {
+ key: tuple(value.shape)
+ for key, value in reference_model.state_dict().items()
+ }
+ actual_shapes = {
+ key: tuple(value.shape) for key, value in model.state_dict().items()
+ }
+
+ expected_keys = set(expected_shapes)
+ actual_keys = set(actual_shapes)
+
+ missing_keys = sorted(expected_keys - actual_keys)
+ if missing_keys:
+ key = missing_keys[0]
+ raise ValueError(
+ "Provided model is incompatible with model_config: "
+ f"missing state key '{key}'."
+ )
+
+ extra_keys = sorted(actual_keys - expected_keys)
+ if extra_keys:
+ key = extra_keys[0]
+ raise ValueError(
+ "Provided model is incompatible with model_config: "
+ f"unexpected state key '{key}'."
+ )
+
+ for key, expected_shape in expected_shapes.items():
+ actual_shape = actual_shapes[key]
+ if actual_shape != expected_shape:
+ raise ValueError(
+ "Provided model is incompatible with model_config: "
+ f"shape mismatch for '{key}' (expected {expected_shape}, "
+ f"got {actual_shape})."
+ )
+
+
+def build_trainer(
+ config: TrainingConfig,
+ train_logger: Logger,
+ evaluator: "EvaluatorProtocol",
+ targets: "TargetProtocol",
+ roi_mapper: "ROIMapperProtocol",
+ checkpoint_dir: Path | None = None,
+ experiment_name: str | None = None,
+ run_name: str | None = None,
+ num_epochs: int | None = None,
+) -> Trainer:
+ trainer_conf = config.trainer
+ logger.opt(lazy=True).debug(
+ "Building trainer with config: \n{config}",
+ config=lambda: trainer_conf.to_yaml_string(exclude_none=True),
+ )
+
+ if num_epochs is not None:
+ trainer_conf.max_epochs = num_epochs
+
+ train_config = trainer_conf.model_dump(exclude_none=True)
+
+ return Trainer(
+ **train_config,
+ logger=train_logger,
+ callbacks=[
+ build_checkpoint_callback(
+ config=config.checkpoints,
+ checkpoint_dir=checkpoint_dir,
+ experiment_name=experiment_name,
+ run_name=run_name,
+ ),
+ ValidationMetrics(evaluator, targets, roi_mapper),
+ ],
+ )
diff --git a/src/batdetect2/train/types.py b/src/batdetect2/train/types.py
new file mode 100644
index 00000000..ed347e99
--- /dev/null
+++ b/src/batdetect2/train/types.py
@@ -0,0 +1,70 @@
+from collections.abc import Callable
+from typing import TYPE_CHECKING, NamedTuple, Protocol
+
+import torch
+from soundevent import data
+
+if TYPE_CHECKING:
+ from batdetect2.models.types import ModelOutput
+
+__all__ = [
+ "Augmentation",
+ "ClipLabeller",
+ "Heatmaps",
+ "Losses",
+ "LossProtocol",
+ "TrainExample",
+]
+
+
+class Heatmaps(NamedTuple):
+ detection: torch.Tensor
+ classes: torch.Tensor
+ size: torch.Tensor
+
+
+class PreprocessedExample(NamedTuple):
+ audio: torch.Tensor
+ spectrogram: torch.Tensor
+ detection_heatmap: torch.Tensor
+ class_heatmap: torch.Tensor
+ size_heatmap: torch.Tensor
+
+ def copy(self):
+ return PreprocessedExample(
+ audio=self.audio.clone(),
+ spectrogram=self.spectrogram.clone(),
+ detection_heatmap=self.detection_heatmap.clone(),
+ size_heatmap=self.size_heatmap.clone(),
+ class_heatmap=self.class_heatmap.clone(),
+ )
+
+
+ClipLabeller = Callable[[data.ClipAnnotation, torch.Tensor], Heatmaps]
+
+
+Augmentation = Callable[
+ [torch.Tensor, data.ClipAnnotation],
+ tuple[torch.Tensor, data.ClipAnnotation],
+]
+
+
+class TrainExample(NamedTuple):
+ spec: torch.Tensor
+ detection_heatmap: torch.Tensor
+ class_heatmap: torch.Tensor
+ size_heatmap: torch.Tensor
+ idx: torch.Tensor
+ start_time: torch.Tensor
+ end_time: torch.Tensor
+
+
+class Losses(NamedTuple):
+ detection: torch.Tensor
+ size: torch.Tensor
+ classification: torch.Tensor
+ total: torch.Tensor
+
+
+class LossProtocol(Protocol):
+ def __call__(self, pred: "ModelOutput", gt: TrainExample) -> Losses: ...
diff --git a/batdetect2/types.py b/src/batdetect2/types.py
similarity index 75%
rename from batdetect2/types.py
rename to src/batdetect2/types.py
index 3f228621..5d7455e5 100644
--- a/batdetect2/types.py
+++ b/src/batdetect2/types.py
@@ -1,29 +1,14 @@
"""Types used in the code base."""
-from typing import List, NamedTuple, Optional, Union, Any, BinaryIO
-
-import audioread
-import os
-import soundfile as sf
+import sys
+from typing import Any, NamedTuple, Protocol, TypedDict
import numpy as np
import torch
-try:
- from typing import TypedDict
-except ImportError:
- from typing_extensions import TypedDict
-
-
-try:
- from typing import Protocol
-except ImportError:
- from typing_extensions import Protocol
-
-
-try:
- from typing import NotRequired # type: ignore
-except ImportError:
+if sys.version_info >= (3, 11):
+ from typing import NotRequired
+else:
from typing_extensions import NotRequired
@@ -31,8 +16,7 @@
"Annotation",
"DetectionModel",
"FeatureExtractionParameters",
- "FeatureExtractor",
- "FileAnnotations",
+ "FileAnnotation",
"ModelOutput",
"ModelParameters",
"NonMaximumSuppressionConfig",
@@ -42,11 +26,9 @@
"ResultParams",
"RunResults",
"SpectrogramParameters",
+ "AudioLoaderAnnotationGroup",
]
-AudioPath = Union[
- str, int, os.PathLike[Any], sf.SoundFile, audioread.AudioFile, BinaryIO
- ]
class SpectrogramParameters(TypedDict):
"""Parameters for generating spectrograms."""
@@ -100,8 +82,11 @@ class ModelParameters(TypedDict):
resize_factor: float
"""Resize factor."""
- class_names: List[str]
- """Class names. The model is trained to detect these classes."""
+ class_names: list[str]
+ """Class names.
+
+ The model is trained to detect these classes.
+ """
DictWithClass = TypedDict("DictWithClass", {"class": str})
@@ -110,8 +95,8 @@ class ModelParameters(TypedDict):
class Annotation(DictWithClass):
"""Format of annotations.
- This is the format of a single annotation as expected by the annotation
- tool.
+ This is the format of a single annotation as expected by the
+ annotation tool.
"""
start_time: float
@@ -120,10 +105,10 @@ class Annotation(DictWithClass):
end_time: float
"""End time in seconds."""
- low_freq: int
+ low_freq: float
"""Low frequency in Hz."""
- high_freq: int
+ high_freq: float
"""High frequency in Hz."""
class_prob: float
@@ -138,8 +123,11 @@ class Annotation(DictWithClass):
event: str
"""Type of detected event."""
+ class_id: NotRequired[int]
+ """Numeric ID for the class of the annotation."""
+
-class FileAnnotations(TypedDict):
+class FileAnnotation(TypedDict):
"""Format of results.
This is the format of the results expected by the annotation tool.
@@ -161,41 +149,41 @@ class FileAnnotations(TypedDict):
"""Time expansion factor."""
class_name: str
- """Class predicted at file level"""
+ """Class predicted at file level."""
notes: str
"""Notes of file."""
- annotation: List[Annotation]
+ annotation: list[Annotation]
"""List of annotations."""
class RunResults(TypedDict):
"""Run results."""
- pred_dict: FileAnnotations
+ pred_dict: FileAnnotation
"""Predictions in the format expected by the annotation tool."""
- spec_feats: NotRequired[List[np.ndarray]]
+ spec_feats: NotRequired[list[np.ndarray]]
"""Spectrogram features."""
- spec_feat_names: NotRequired[List[str]]
+ spec_feat_names: NotRequired[list[str]]
"""Spectrogram feature names."""
- cnn_feats: NotRequired[List[np.ndarray]]
+ cnn_feats: NotRequired[list[np.ndarray]]
"""CNN features."""
- cnn_feat_names: NotRequired[List[str]]
+ cnn_feat_names: NotRequired[list[str]]
"""CNN feature names."""
- spec_slices: NotRequired[List[np.ndarray]]
+ spec_slices: NotRequired[list[np.ndarray]]
"""Spectrogram slices."""
class ResultParams(TypedDict):
"""Result parameters."""
- class_names: List[str]
+ class_names: list[str]
"""Class names."""
spec_features: bool
@@ -242,13 +230,13 @@ class ProcessingConfiguration(TypedDict):
scale_raw_audio: bool
"""Whether to scale the raw audio to be between -1 and 1."""
- class_names: List[str]
+ class_names: list[str]
"""Names of the classes the model can detect."""
detection_threshold: float
"""Threshold for detection probability."""
- time_expansion: Optional[float]
+ time_expansion: float | None
"""Time expansion factor of the processed recordings."""
top_n: int
@@ -257,7 +245,7 @@ class ProcessingConfiguration(TypedDict):
return_raw_preds: bool
"""Whether to return raw predictions."""
- max_duration: Optional[float]
+ max_duration: float | None
"""Maximum duration of audio file to process in seconds."""
nms_kernel_size: int
@@ -398,9 +386,9 @@ class PredictionResults(TypedDict):
class DetectionModel(Protocol):
"""Protocol for detection models.
- This protocol is used to define the interface for the detection models.
- This allows us to use the same code for training and inference, even
- though the models are different.
+ This protocol is used to define the interface for the detection
+ models. This allows us to use the same code for training and
+ inference, even though the models are different.
"""
num_classes: int
@@ -420,16 +408,14 @@ class DetectionModel(Protocol):
def forward(
self,
- ip: torch.Tensor,
- return_feats: bool = False,
+ spec: torch.Tensor,
) -> ModelOutput:
"""Forward pass of the model."""
...
def __call__(
self,
- ip: torch.Tensor,
- return_feats: bool = False,
+ spec: torch.Tensor,
) -> ModelOutput:
"""Forward pass of the model."""
...
@@ -476,7 +462,7 @@ class FeatureExtractionParameters(TypedDict):
class HeatmapParameters(TypedDict):
"""Parameters that control the heatmap generation function."""
- class_names: List[str]
+ class_names: list[str]
fft_win_length: float
"""Length of the FFT window in seconds."""
@@ -494,8 +480,10 @@ class HeatmapParameters(TypedDict):
"""Maximum frequency to consider in Hz."""
target_sigma: float
- """Sigma for the Gaussian kernel. Controls the width of the points in
- the heatmap."""
+ """Sigma for the Gaussian kernel.
+
+ Controls the width of the points in the heatmap.
+ """
class AnnotationGroup(TypedDict):
@@ -523,6 +511,15 @@ class AnnotationGroup(TypedDict):
individual_ids: np.ndarray
"""Individual IDs of the annotations."""
+ annotated: NotRequired[bool]
+ """Wether the annotation group is complete or not.
+
+ Usually annotation groups are associated to a single audio clip. If
+ the annotation group is complete, it means that all relevant sound
+ events have been annotated. If it is not complete, it means that
+ some sound events might not have been annotated.
+ """
+
x_inds: NotRequired[np.ndarray]
"""X coordinate of the annotations in the spectrogram."""
@@ -530,9 +527,87 @@ class AnnotationGroup(TypedDict):
"""Y coordinate of the annotations in the spectrogram."""
+class AudioLoaderAnnotationGroup(TypedDict):
+ """Group of annotation items for the training audio loader.
+
+ This class is used to store the annotations for the training audio
+ loader. It inherits from `AnnotationGroup` and `FileAnnotations`.
+ """
+
+ id: str
+ duration: float
+ issues: bool
+ file_path: str
+ time_exp: float
+ class_name: str
+ notes: str
+ start_times: np.ndarray
+ end_times: np.ndarray
+ low_freqs: np.ndarray
+ high_freqs: np.ndarray
+ class_ids: np.ndarray
+ individual_ids: np.ndarray
+ x_inds: np.ndarray
+ y_inds: np.ndarray
+ annotation: list[Annotation]
+ annotated: bool
+ class_id_file: int
+ """ID of the class of the file."""
+
+
+class AudioLoaderParameters(TypedDict):
+ class_names: list[str]
+ classes_to_ignore: list[str]
+ target_samp_rate: int
+ scale_raw_audio: bool
+ fft_win_length: float
+ fft_overlap: float
+ spec_train_width: int
+ resize_factor: float
+ spec_divide_factor: int
+ augment_at_train: bool
+ augment_at_train_combine: bool
+ aug_prob: float
+ spec_height: int
+ echo_max_delay: float
+ spec_amp_scaling: float
+ stretch_squeeze_delta: float
+ mask_max_time_perc: float
+ mask_max_freq_perc: float
+ max_freq: float
+ min_freq: float
+ spec_scale: str
+ denoise_spec_avg: bool
+ max_scale_spec: bool
+ target_sigma: float
+
+
class FeatureExtractor(Protocol):
- """Protocol for feature extractors."""
+ def __call__(
+ self,
+ prediction: Prediction,
+ **kwargs: Any,
+ ) -> float: ...
- def __call__(self, prediction: Prediction, **kwargs) -> Union[float, int]:
- """Extract features from a prediction."""
- ...
+
+class DatasetDict(TypedDict):
+ """Dataset dictionary.
+
+ This is the format of the dictionary that contains the dataset
+ information.
+ """
+
+ dataset_name: str
+ """Name of the dataset."""
+
+ is_test: bool
+ """Whether the dataset is a test set."""
+
+ is_binary: bool
+ """Whether the dataset is binary."""
+
+ ann_path: str
+ """Path to the annotations."""
+
+ wav_path: str
+ """Path to the audio files."""
diff --git a/src/batdetect2/utils/__init__.py b/src/batdetect2/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/batdetect2/utils/audio_utils.py b/src/batdetect2/utils/audio_utils.py
similarity index 81%
rename from batdetect2/utils/audio_utils.py
rename to src/batdetect2/utils/audio_utils.py
index b89cdca7..f5887cb0 100644
--- a/batdetect2/utils/audio_utils.py
+++ b/src/batdetect2/utils/audio_utils.py
@@ -1,24 +1,16 @@
import warnings
-from typing import Optional, Tuple, Union, Any, BinaryIO
-
-from ..types import AudioPath
import librosa
import librosa.core.spectrum
import numpy as np
import torch
-import audioread
-import os
-import soundfile as sf
-
from batdetect2.detector import parameters
from . import wavfile
__all__ = [
"load_audio",
- "load_audio_and_samplerate",
"generate_spectrogram",
"pad_audio",
]
@@ -85,7 +77,7 @@ def generate_spectrogram(
spec = np.vstack(
(np.zeros((freq_pad, spec.shape[1]), dtype=spec.dtype), spec)
)
- spec_cropped = spec[-max_freq : spec.shape[0] - min_freq, :]
+ spec = spec[-max_freq : spec.shape[0] - min_freq, :]
if params["spec_scale"] == "log":
log_scaling = (
@@ -97,7 +89,7 @@ def generate_spectrogram(
np.abs(
np.hanning(
int(params["fft_win_length"] * sampling_rate)
- )
+ ).astype(np.float32)
)
** 2
).sum()
@@ -105,9 +97,9 @@ def generate_spectrogram(
)
# log_scaling = (1.0 / sampling_rate)*0.1
# log_scaling = (1.0 / sampling_rate)*10e4
- spec = np.log1p(log_scaling * spec_cropped)
+ spec = np.log1p(log_scaling * spec)
elif params["spec_scale"] == "pcen":
- spec = pcen(spec_cropped, sampling_rate)
+ spec = pcen(spec, sampling_rate)
elif params["spec_scale"] == "none":
pass
@@ -141,81 +133,62 @@ def generate_spectrogram(
).sum()
)
)
- spec_for_viz = np.log1p(log_scaling * spec_cropped).astype(np.float32)
+ spec_for_viz = np.log1p(log_scaling * spec).astype(np.float32)
else:
spec_for_viz = None
return spec, spec_for_viz
-def load_audio(
- path: AudioPath,
- time_exp_fact: float,
- target_samp_rate: int,
- scale: bool = False,
- max_duration: Optional[float] = None,
-) -> Tuple[int, np.ndarray ]:
- """Load an audio file and resample it to the target sampling rate.
-
- The audio is also scaled to [-1, 1] and clipped to the maximum duration.
- Only mono files are supported.
-
- Args:
- path (string, int, pathlib.Path, soundfile.SoundFile, audioread object, or file-like object): path to the input file.
- target_samp_rate (int): Target sampling rate.
- scale (bool): Whether to scale the audio to [-1, 1].
- max_duration (float): Maximum duration of the audio in seconds.
-
- Returns:
- sampling_rate: The sampling rate of the audio.
- audio_raw: The audio signal in a numpy array.
-
- Raises:
- ValueError: If the audio file is stereo.
- """
- sample_rate, audio_data, _ = load_audio_and_samplerate(path, time_exp_fact, target_samp_rate, scale, max_duration)
- return sample_rate, audio_data
-
-def load_audio_and_samplerate(
- path: AudioPath,
+def load_audio(
+ audio_file: str,
time_exp_fact: float,
target_samp_rate: int,
scale: bool = False,
- max_duration: Optional[float] = None,
-) -> Tuple[int, np.ndarray, Union[float, int]]:
+ max_duration: float | None = None,
+) -> tuple[int, np.ndarray]:
"""Load an audio file and resample it to the target sampling rate.
The audio is also scaled to [-1, 1] and clipped to the maximum duration.
Only mono files are supported.
- Args:
- path (string, int, pathlib.Path, soundfile.SoundFile, audioread object, or file-like object): path to the input file.
- target_samp_rate (int): Target sampling rate.
- scale (bool): Whether to scale the audio to [-1, 1].
- max_duration (float): Maximum duration of the audio in seconds.
+ Parameters
+ ----------
+ audio_file: str
+ Path to the audio file.
+ target_samp_rate: int
+ Target sampling rate.
+ scale: bool, optional
+ Whether to scale the audio to [-1, 1]. Default: False.
+ max_duration: float, optional
+ Maximum duration of the audio in seconds. Defaults to None.
+ If provided, the audio is clipped to this duration.
- Returns:
- sampling_rate: The sampling rate of the audio.
- audio_raw: The audio signal in a numpy array.
- file_sampling_rate: The original sampling rate of the audio
+ Returns
+ -------
+ sampling_rate: int
+ The sampling rate of the audio.
+ audio_raw: np.ndarray
+ The audio signal in a numpy array.
- Raises:
- ValueError: If the audio file is stereo.
+ Raises
+ ------
+ ValueError: If the audio file is stereo.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=wavfile.WavFileWarning)
# sampling_rate, audio_raw = wavfile.read(audio_file)
- audio_raw, file_sampling_rate = librosa.load(
- path,
+ audio_raw, sampling_rate = librosa.load(
+ audio_file,
sr=None,
dtype=np.float32,
)
-
+
if len(audio_raw.shape) > 1:
raise ValueError("Currently does not handle stereo files")
- sampling_rate = file_sampling_rate * time_exp_fact
+ sampling_rate = sampling_rate * time_exp_fact
# resample - need to do this after correcting for time expansion
sampling_rate_old = sampling_rate
@@ -243,7 +216,7 @@ def load_audio_and_samplerate(
audio_raw = audio_raw - audio_raw.mean()
audio_raw = audio_raw / (np.abs(audio_raw).max() + 10e-6)
- return sampling_rate, audio_raw, file_sampling_rate
+ return sampling_rate, audio_raw
def compute_spectrogram_width(
@@ -267,7 +240,7 @@ def pad_audio(
window_overlap: float = parameters.FFT_OVERLAP,
resize_factor: float = parameters.RESIZE_FACTOR,
divide_factor: int = parameters.SPEC_DIVIDE_FACTOR,
- fixed_width: Optional[int] = None,
+ fixed_width: int | None = None,
):
"""Pad audio to be evenly divisible by `divide_factor`.
diff --git a/batdetect2/utils/detector_utils.py b/src/batdetect2/utils/detector_utils.py
similarity index 88%
rename from batdetect2/utils/detector_utils.py
rename to src/batdetect2/utils/detector_utils.py
index f96c5d79..9c838b20 100644
--- a/batdetect2/utils/detector_utils.py
+++ b/src/batdetect2/utils/detector_utils.py
@@ -1,9 +1,8 @@
import json
import os
-from typing import Any, Iterator, List, Optional, Tuple, Union, BinaryIO
-
-from ..types import AudioPath
+from typing import Any, Iterator
+import librosa
import numpy as np
import pandas as pd
import torch
@@ -22,7 +21,7 @@
from batdetect2.types import (
Annotation,
DetectionModel,
- FileAnnotations,
+ FileAnnotation,
ModelOutput,
ModelParameters,
PredictionResults,
@@ -32,13 +31,6 @@
SpectrogramParameters,
)
-import audioread
-import os
-import io
-import soundfile as sf
-import hashlib
-import uuid
-
__all__ = [
"load_model",
"list_audio_files",
@@ -68,7 +60,7 @@ def get_default_bd_args():
return args
-def list_audio_files(ip_dir: str) -> List[str]:
+def list_audio_files(ip_dir: str) -> list[str]:
"""Get all audio files in directory.
Args:
@@ -92,9 +84,9 @@ def list_audio_files(ip_dir: str) -> List[str]:
def load_model(
model_path: str = DEFAULT_MODEL_PATH,
load_weights: bool = True,
- device: Optional[torch.device] = None,
+ device: torch.device | str | None = None,
weights_only: bool = True,
-) -> Tuple[DetectionModel, ModelParameters]:
+) -> tuple[DetectionModel, ModelParameters]:
"""Load model from file.
Args:
@@ -193,26 +185,28 @@ def _merge_results(predictions, spec_feats, cnn_feats, spec_slices):
def get_annotations_from_preds(
predictions: PredictionResults,
- class_names: List[str],
-) -> List[Annotation]:
+ class_names: list[str],
+) -> list[Annotation]:
"""Get list of annotations from predictions."""
# Get the best class prediction probability and index for each detection
class_prob_best = predictions["class_probs"].max(0)
class_ind_best = predictions["class_probs"].argmax(0)
# Pack the results into a list of dictionaries
- annotations: List[Annotation] = [
- {
- "start_time": round(float(start_time), 4),
- "end_time": round(float(end_time), 4),
- "low_freq": int(low_freq),
- "high_freq": int(high_freq),
- "class": str(class_names[class_index]),
- "class_prob": round(float(class_prob), 3),
- "det_prob": round(float(det_prob), 3),
- "individual": "-1",
- "event": "Echolocation",
- }
+ annotations: list[Annotation] = [
+ Annotation(
+ {
+ "start_time": round(float(start_time), 4),
+ "end_time": round(float(end_time), 4),
+ "low_freq": int(low_freq),
+ "high_freq": int(high_freq),
+ "class": str(class_names[class_index]),
+ "class_prob": round(float(class_prob), 3),
+ "det_prob": round(float(det_prob), 3),
+ "individual": "-1",
+ "event": "Echolocation",
+ }
+ )
for (
start_time,
end_time,
@@ -229,6 +223,7 @@ def get_annotations_from_preds(
class_ind_best,
class_prob_best,
predictions["det_probs"],
+ strict=False,
)
]
return annotations
@@ -239,8 +234,8 @@ def format_single_result(
time_exp: float,
duration: float,
predictions: PredictionResults,
- class_names: List[str],
-) -> FileAnnotations:
+ class_names: list[str],
+) -> FileAnnotation:
"""Format results into the format expected by the annotation tool.
Args:
@@ -287,7 +282,7 @@ def convert_results(
spec_feats,
cnn_feats,
spec_slices,
- nyquist_freq: Optional[float] = None,
+ nyquist_freq: float | None = None,
) -> RunResults:
"""Convert results to dictionary as expected by the annotation tool.
@@ -322,9 +317,11 @@ def convert_results(
]
# combine into final results dictionary
- results: RunResults = {
- "pred_dict": pred_dict,
- }
+ results: RunResults = RunResults( # type: ignore[missing-argument]
+ {
+ "pred_dict": pred_dict,
+ }
+ )
# add spectrogram features if they exist
if len(spec_feats) > 0 and params["spec_features"]:
@@ -420,8 +417,7 @@ def compute_spectrogram(
sampling_rate: int,
params: SpectrogramParameters,
device: torch.device,
- return_np: bool = False,
-) -> Tuple[float, torch.Tensor, Optional[np.ndarray]]:
+) -> tuple[float, torch.Tensor]:
"""Compute a spectrogram from an audio array.
Will pad the audio array so that it is evenly divisible by the
@@ -430,24 +426,16 @@ def compute_spectrogram(
Parameters
----------
audio : np.ndarray
-
sampling_rate : int
-
params : SpectrogramParameters
The parameters to use for generating the spectrogram.
- return_np : bool, optional
- Whether to return the spectrogram as a numpy array as well as a
- torch tensor. The default is False.
-
Returns
-------
duration : float
The duration of the spectrgram in seconds.
-
spec : torch.Tensor
The spectrogram as a torch tensor.
-
spec_np : np.ndarray, optional
The spectrogram as a numpy array. Only returned if `return_np` is
True, otherwise None.
@@ -484,20 +472,14 @@ def compute_spectrogram(
mode="bilinear",
align_corners=False,
)
-
- if return_np:
- spec_np = spec[0, 0, :].cpu().data.numpy()
- else:
- spec_np = None
-
- return duration, spec, spec_np
+ return duration, spec
def iterate_over_chunks(
audio: np.ndarray,
- samplerate: int,
+ samplerate: float,
chunk_size: float,
-) -> Iterator[Tuple[float, np.ndarray]]:
+) -> Iterator[tuple[float, np.ndarray]]:
"""Iterate over audio in chunks of size chunk_size.
Parameters
@@ -528,10 +510,10 @@ def iterate_over_chunks(
def _process_spectrogram(
spec: torch.Tensor,
- samplerate: int,
+ samplerate: float,
model: DetectionModel,
config: ProcessingConfiguration,
-) -> Tuple[PredictionResults, np.ndarray]:
+) -> tuple[PredictionResults, np.ndarray]:
# evaluate model
with torch.no_grad():
outputs = model(spec)
@@ -568,7 +550,7 @@ def postprocess_model_outputs(
outputs: ModelOutput,
samp_rate: int,
config: ProcessingConfiguration,
-) -> Tuple[List[Annotation], np.ndarray]:
+) -> tuple[list[Annotation], np.ndarray]:
# run non-max suppression
pred_nms_list, features = pp.run_nms(
outputs,
@@ -607,7 +589,7 @@ def process_spectrogram(
samplerate: int,
model: DetectionModel,
config: ProcessingConfiguration,
-) -> Tuple[List[Annotation], np.ndarray]:
+) -> tuple[list[Annotation], np.ndarray]:
"""Process a spectrogram with detection model.
Will run non-maximum suppression on the output of the model.
@@ -626,9 +608,9 @@ def process_spectrogram(
Returns
-------
- detections: List[Annotation]
+ detections
List of detections predicted by the model.
- features : np.ndarray
+ features
An array of CNN features associated with each annotation.
The array is of shape (num_detections, num_features).
Is empty if `config["cnn_features"]` is False.
@@ -654,9 +636,9 @@ def _process_audio_array(
model: DetectionModel,
config: ProcessingConfiguration,
device: torch.device,
-) -> Tuple[PredictionResults, np.ndarray, torch.Tensor]:
+) -> tuple[PredictionResults, np.ndarray, torch.Tensor]:
# load audio file and compute spectrogram
- _, spec, _ = compute_spectrogram(
+ _, spec = compute_spectrogram(
audio,
sampling_rate,
{
@@ -672,7 +654,6 @@ def _process_audio_array(
"max_scale_spec": config["max_scale_spec"],
},
device,
- return_np=False,
)
# process spectrogram with model
@@ -692,7 +673,7 @@ def process_audio_array(
model: DetectionModel,
config: ProcessingConfiguration,
device: torch.device,
-) -> Tuple[List[Annotation], np.ndarray, torch.Tensor]:
+) -> tuple[list[Annotation], np.ndarray, torch.Tensor]:
"""Process a single audio array with detection model.
Parameters
@@ -712,7 +693,7 @@ def process_audio_array(
Returns
-------
- annotations : List[Annotation]
+ annotations : list[Annotation]
List of annotations predicted by the model.
features : np.ndarray
Array of CNN features associated with each annotation.
@@ -737,12 +718,11 @@ def process_audio_array(
def process_file(
- path: AudioPath,
+ audio_file: str,
model: DetectionModel,
config: ProcessingConfiguration,
device: torch.device,
- file_id: Optional[str] = None
-) -> Union[RunResults, Any]:
+) -> RunResults | Any:
"""Process a single audio file with detection model.
Will split the audio file into chunks if it is too long and
@@ -750,7 +730,7 @@ def process_file(
Parameters
----------
- path : AudioPath
+ audio_file : str
Path to audio file.
model : torch.nn.Module
@@ -758,9 +738,6 @@ def process_file(
config : ProcessingConfiguration
Configuration for processing.
-
- file_id: Optional[str],
- Give the data an id. Defaults to the filename if path is a string. Otherwise an md5 will be calculated from the binary data.
Returns
-------
@@ -774,17 +751,19 @@ def process_file(
cnn_feats = []
spec_slices = []
+ # Get original sampling rate
+ file_samp_rate = librosa.get_samplerate(audio_file)
+ orig_samp_rate = file_samp_rate * (config.get("time_expansion") or 1)
+
# load audio file
- sampling_rate, audio_full, file_samp_rate = au.load_audio_and_samplerate(
- path,
+ sampling_rate, audio_full = au.load_audio(
+ audio_file,
time_exp_fact=config.get("time_expansion", 1) or 1,
target_samp_rate=config["target_samp_rate"],
scale=config["scale_raw_audio"],
max_duration=config.get("max_duration"),
)
- orig_samp_rate = file_samp_rate * (config.get("time_expansion") or 1)
-
# loop through larger file and split into chunks
# TODO: fix so that it overlaps correctly and takes care of
# duplicate detections at borders
@@ -822,7 +801,6 @@ def process_file(
cnn_feats.append(features[0])
if config["spec_slices"]:
- # FIX: This is not currently working. Returns empty slices
spec_slices.extend(feats.extract_spec_slices(spec_np, pred_nms))
# Merge results from chunks
@@ -833,13 +811,9 @@ def process_file(
spec_slices,
)
- _file_id = file_id
- if _file_id is None:
- _file_id = _generate_id(path)
-
# convert results to a dictionary in the right format
results = convert_results(
- file_id=_file_id,
+ file_id=os.path.basename(audio_file),
time_exp=config.get("time_expansion", 1) or 1,
duration=audio_full.shape[0] / float(sampling_rate),
params=config,
@@ -859,22 +833,6 @@ def process_file(
return results
-def _generate_id(path: AudioPath) -> str:
- """ Generate an id based on the path.
-
- If the path is a str or PathLike it will parsed as the basename.
- This should ensure backwards compatibility with previous versions.
- """
- if isinstance(path, str) or isinstance(path, os.PathLike):
- return os.path.basename(path)
- elif isinstance(path, (BinaryIO, io.BytesIO)):
- path.seek(0)
- md5 = hashlib.md5(path.read()).hexdigest()
- path.seek(0)
- return md5
- else:
- return str(uuid.uuid4())
-
def summarize_results(results, predictions, config):
"""Print summary of results."""
diff --git a/batdetect2/utils/plot_utils.py b/src/batdetect2/utils/plot_utils.py
similarity index 98%
rename from batdetect2/utils/plot_utils.py
rename to src/batdetect2/utils/plot_utils.py
index 4bfde7a6..359f0883 100644
--- a/batdetect2/utils/plot_utils.py
+++ b/src/batdetect2/utils/plot_utils.py
@@ -87,9 +87,7 @@ def save_ann_spec(
y_extent = [0, duration, min_freq, max_freq]
plt.close("all")
- fig = plt.figure(
- 0, figsize=(spec.shape[1] / 100, spec.shape[0] / 100), dpi=100
- )
+ plt.figure(0, figsize=(spec.shape[1] / 100, spec.shape[0] / 100), dpi=100)
plt.imshow(
spec,
aspect="auto",
@@ -369,7 +367,7 @@ def plot_pr_curve_class(
# print(class_name)
# plot the location of the confidence threshold values
- for jj, tt in enumerate(rr["thresholds"]):
+ for jj, _tt in enumerate(rr["thresholds"]):
ind = rr["thresholds_inds"][jj]
if ind > -1:
plt.plot(
@@ -417,7 +415,9 @@ def plot_confusion_matrix(
cm_norm = cm.sum(1)
valid_inds = np.where(cm_norm > 0)[0]
- cm[valid_inds, :] = cm[valid_inds, :] / cm_norm[valid_inds][..., np.newaxis]
+ cm[valid_inds, :] = (
+ cm[valid_inds, :] / cm_norm[valid_inds][..., np.newaxis]
+ )
cm[np.where(cm_norm == -0)[0], :] = np.nan
if verbose:
diff --git a/src/batdetect2/utils/tensors.py b/src/batdetect2/utils/tensors.py
new file mode 100644
index 00000000..6436c4d4
--- /dev/null
+++ b/src/batdetect2/utils/tensors.py
@@ -0,0 +1,74 @@
+import numpy as np
+import torch
+from torch.nn import functional as F
+
+
+def extend_width(
+ array: np.ndarray | torch.Tensor,
+ extra: int,
+ axis: int = -1,
+ value: float = 0,
+) -> torch.Tensor:
+ if not isinstance(array, torch.Tensor):
+ array = torch.Tensor(array)
+
+ dims = len(array.shape)
+ axis = axis % dims
+ pad = [
+ [0, 0] if index != axis else [0, extra]
+ for index in range(axis, dims)[::-1]
+ ]
+ return F.pad(
+ array,
+ [x for y in pad for x in y],
+ value=value,
+ )
+
+
+def make_width_divisible(
+ array: np.ndarray | torch.Tensor,
+ factor: int,
+ axis: int = -1,
+ value: float = 0,
+) -> torch.Tensor:
+ if not isinstance(array, torch.Tensor):
+ array = torch.Tensor(array)
+
+ width = array.shape[axis]
+
+ if width % factor == 0:
+ return array
+
+ extra = (-width) % factor
+ return extend_width(array, extra, axis=axis, value=value)
+
+
+def adjust_width(
+ array: np.ndarray | torch.Tensor,
+ width: int,
+ axis: int = -1,
+ value: float = 0,
+) -> torch.Tensor:
+ if not isinstance(array, torch.Tensor):
+ array = torch.Tensor(array)
+
+ dims = len(array.shape)
+ axis = axis % dims
+ current_width = array.shape[axis]
+
+ if current_width == width:
+ return array
+
+ if current_width < width:
+ return extend_width(
+ array,
+ extra=width - current_width,
+ axis=axis,
+ value=value,
+ )
+
+ slices = [
+ slice(None, None) if index != axis else slice(None, width)
+ for index in range(dims)
+ ]
+ return array[tuple(slices)]
diff --git a/batdetect2/utils/visualize.py b/src/batdetect2/utils/visualize.py
similarity index 96%
rename from batdetect2/utils/visualize.py
rename to src/batdetect2/utils/visualize.py
index d79f322c..b7f889a6 100644
--- a/batdetect2/utils/visualize.py
+++ b/src/batdetect2/utils/visualize.py
@@ -133,7 +133,7 @@ def plot(self, fig_id):
self.fig.canvas.mpl_connect("key_press_event", self.key_press)
def mouse_hover(self, event):
- vis = self.annot.get_visible()
+ self.annot.get_visible()
if event.inaxes == self.ax[0]:
cont, ind = self.low_dim_plt.contains(event)
if cont:
@@ -155,9 +155,9 @@ def mouse_hover(self, event):
# draw bounding box around call
self.ax[1].patches[0].remove()
- spec_width_orig = self.spec_slices[self.current_id].shape[1] / (
- 1.0 + 2.0 * self.spec_pad
- )
+ spec_width_orig = self.spec_slices[self.current_id].shape[
+ 1
+ ] / (1.0 + 2.0 * self.spec_pad)
xx = w_diff + self.spec_pad * spec_width_orig
ww = spec_width_orig
yy = self.call_info[self.current_id]["low_freq"] / 1000
@@ -183,7 +183,9 @@ def mouse_hover(self, event):
round(self.call_info[self.current_id]["start_time"], 3)
)
+ ", prob="
- + str(round(self.call_info[self.current_id]["det_prob"], 3))
+ + str(
+ round(self.call_info[self.current_id]["det_prob"], 3)
+ )
)
self.ax[0].set_xlabel(info_str)
diff --git a/batdetect2/utils/wavfile.py b/src/batdetect2/utils/wavfile.py
similarity index 99%
rename from batdetect2/utils/wavfile.py
rename to src/batdetect2/utils/wavfile.py
index 7fee6602..d611cdf8 100644
--- a/batdetect2/utils/wavfile.py
+++ b/src/batdetect2/utils/wavfile.py
@@ -8,6 +8,7 @@
`write`: Write a numpy array as a WAV file.
"""
+
from __future__ import absolute_import, division, print_function
import os
@@ -42,7 +43,7 @@ def _read_fmt_chunk(fid):
size, comp, noc, rate, sbytes, ba, bits = res
if comp not in KNOWN_WAVE_FORMATS or size > 16:
comp = WAVE_FORMAT_PCM
- warnings.warn("Unknown wave file format", WavFileWarning)
+ warnings.warn("Unknown wave file format", WavFileWarning, stacklevel=2)
if size > 16:
fid.read(size - 16)
@@ -156,7 +157,6 @@ def read(filename, mmap=False):
fid = open(filename, "rb")
try:
-
# some files seem to have the size recorded in the header greater than
# the actual file size.
fid.seek(0, os.SEEK_END)
diff --git a/tests/conftest.py b/tests/conftest.py
index fbebc980..8a6a0a3f 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,7 +1,34 @@
+import uuid
from pathlib import Path
-from typing import List
+from typing import Callable, List, Optional
+from uuid import uuid4
+import lightning as L
+import numpy as np
import pytest
+import soundfile as sf
+from scipy import signal
+from soundevent import data, terms
+
+from batdetect2.audio import build_audio_loader
+from batdetect2.audio.clips import build_clipper
+from batdetect2.audio.types import AudioLoader, ClipperProtocol
+from batdetect2.data import DatasetConfig, load_dataset
+from batdetect2.data.annotations.batdetect2 import BatDetect2FilesAnnotations
+from batdetect2.preprocess import build_preprocessor
+from batdetect2.preprocess.types import PreprocessorProtocol
+from batdetect2.targets import (
+ ROIMapperProtocol,
+ TargetConfig,
+ build_roi_mapping,
+ build_targets,
+ call_type,
+)
+from batdetect2.targets.classes import TargetClassConfig
+from batdetect2.targets.types import TargetProtocol
+from batdetect2.train.labels import build_clip_labeler
+from batdetect2.train.lightning import build_training_module
+from batdetect2.train.types import ClipLabeller
@pytest.fixture
@@ -19,6 +46,13 @@ def example_audio_dir(example_data_dir: Path) -> Path:
return example_audio_dir
+@pytest.fixture
+def example_anns_dir(example_data_dir: Path) -> Path:
+ example_anns_dir = example_data_dir / "anns"
+ assert example_anns_dir.exists()
+ return example_anns_dir
+
+
@pytest.fixture
def example_audio_files(example_audio_dir: Path) -> List[Path]:
audio_files = list(example_audio_dir.glob("*.[wW][aA][vV]"))
@@ -38,3 +72,422 @@ def contrib_dir(data_dir) -> Path:
dir = data_dir / "contrib"
assert dir.exists()
return dir
+
+
+@pytest.fixture
+def wav_factory(tmp_path: Path):
+ def _wav_factory(
+ path: Optional[Path] = None,
+ duration: float = 0.3,
+ channels: int = 1,
+ samplerate: int = 441_000,
+ bit_depth: int = 16,
+ ) -> Path:
+ path = path or tmp_path / f"{uuid.uuid4()}.wav"
+ frames = int(samplerate * duration)
+ shape = (frames, channels)
+ subtype = f"PCM_{bit_depth}"
+
+ if bit_depth == 16:
+ dtype = np.int16
+ elif bit_depth == 32:
+ dtype = np.int32
+ else:
+ raise ValueError(f"Unsupported bit depth: {bit_depth}")
+
+ wav = np.random.uniform(
+ low=np.iinfo(dtype).min,
+ high=np.iinfo(dtype).max,
+ size=shape,
+ ).astype(dtype)
+ sf.write(str(path), wav, samplerate, subtype=subtype)
+ return path
+
+ return _wav_factory
+
+
+@pytest.fixture
+def create_recording(wav_factory: Callable[..., Path]):
+ def factory(
+ tags: Optional[list[data.Tag]] = None,
+ path: Optional[Path] = None,
+ recording_id: Optional[uuid.UUID] = None,
+ duration: float = 1,
+ channels: int = 1,
+ samplerate: int = 256_000,
+ time_expansion: float = 1,
+ ) -> data.Recording:
+ path = wav_factory(
+ path=path,
+ duration=duration,
+ channels=channels,
+ samplerate=samplerate,
+ )
+ return data.Recording.from_file(
+ path=path,
+ uuid=recording_id or uuid.uuid4(),
+ time_expansion=time_expansion,
+ tags=tags or [],
+ )
+
+ return factory
+
+
+@pytest.fixture
+def generate_whistle(tmp_path: Path):
+ """
+ Pytest fixture that provides a factory for generating WAV audio files.
+
+ The factory creates a recording containing a "whistle" (a short,
+ frequency-specific pulse) positioned at a precise time, suitable for
+ testing audio analysis functions.
+ """
+
+ def factory(
+ time: float,
+ frequency: int,
+ path: Optional[Path] = None,
+ duration: float = 0.3,
+ samplerate: int = 441_000,
+ whistle_duration: float = 0.1,
+ ) -> Path:
+ path = path or tmp_path / f"{uuid.uuid4()}.wav"
+ frames = int(samplerate * duration)
+
+ offset = int((time - duration / 2) * samplerate)
+ t = np.linspace(-duration / 2, duration / 2, frames, endpoint=False)
+ data = signal.gausspulse(
+ t,
+ fc=frequency,
+ bw=2 / (frequency * whistle_duration),
+ )
+ wave = (np.roll(data, offset) * np.iinfo(np.int16).max).astype(
+ np.int16
+ )
+ sf.write(str(path), wave, samplerate, subtype="PCM_16")
+ return path
+
+ return factory
+
+
+@pytest.fixture
+def recording(
+ create_recording: Callable[..., data.Recording],
+) -> data.Recording:
+ return create_recording()
+
+
+@pytest.fixture
+def create_clip():
+ def factory(
+ recording: data.Recording,
+ start_time: float = 0,
+ end_time: float = 0.5,
+ ) -> data.Clip:
+ return data.Clip(
+ recording=recording,
+ start_time=start_time,
+ end_time=end_time,
+ )
+
+ return factory
+
+
+@pytest.fixture
+def clip(recording: data.Recording) -> data.Clip:
+ return data.Clip(recording=recording, start_time=0, end_time=0.5)
+
+
+@pytest.fixture
+def create_sound_event():
+ def factory(
+ recording: data.Recording,
+ coords: Optional[List[float]] = None,
+ ) -> data.SoundEvent:
+ coords = coords or [0.2, 60_000, 0.3, 70_000]
+
+ return data.SoundEvent(
+ geometry=data.BoundingBox(coordinates=coords),
+ recording=recording,
+ )
+
+ return factory
+
+
+@pytest.fixture
+def sound_event(recording: data.Recording) -> data.SoundEvent:
+ return data.SoundEvent(
+ geometry=data.BoundingBox(coordinates=[0.1, 67_000, 0.11, 73_000]),
+ recording=recording,
+ )
+
+
+@pytest.fixture
+def create_sound_event_annotation():
+ def factory(
+ sound_event: data.SoundEvent,
+ tags: Optional[List[data.Tag]] = None,
+ ) -> data.SoundEventAnnotation:
+ return data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=tags or [],
+ )
+
+ return factory
+
+
+@pytest.fixture
+def echolocation_call(recording: data.Recording) -> data.SoundEventAnnotation:
+ return data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(
+ geometry=data.BoundingBox(coordinates=[0.1, 67_000, 0.11, 73_000]),
+ recording=recording,
+ ),
+ tags=[
+ data.Tag(term=terms.scientific_name, value="Myotis myotis"),
+ data.Tag(term=call_type, value="Echolocation"),
+ ],
+ )
+
+
+@pytest.fixture
+def generic_call(recording: data.Recording) -> data.SoundEventAnnotation:
+ return data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(
+ geometry=data.BoundingBox(
+ coordinates=[0.34, 35_000, 0.348, 62_000]
+ ),
+ recording=recording,
+ ),
+ tags=[
+ data.Tag(term=terms.order, value="Chiroptera"),
+ data.Tag(term=call_type, value="Echolocation"),
+ ],
+ )
+
+
+@pytest.fixture
+def non_relevant_sound_event(
+ recording: data.Recording,
+) -> data.SoundEventAnnotation:
+ return data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(
+ geometry=data.BoundingBox(
+ coordinates=[0.22, 50_000, 0.24, 58_000]
+ ),
+ recording=recording,
+ ),
+ tags=[
+ data.Tag(
+ term=terms.scientific_name,
+ value="Muscardinus avellanarius",
+ ),
+ ],
+ )
+
+
+@pytest.fixture
+def create_clip_annotation():
+ def factory(
+ clip: data.Clip,
+ clip_tags: Optional[List[data.Tag]] = None,
+ sound_events: Optional[List[data.SoundEventAnnotation]] = None,
+ ) -> data.ClipAnnotation:
+ return data.ClipAnnotation(
+ clip=clip,
+ tags=clip_tags or [],
+ sound_events=sound_events or [],
+ )
+
+ return factory
+
+
+@pytest.fixture
+def clip_annotation(
+ clip: data.Clip,
+ echolocation_call: data.SoundEventAnnotation,
+ generic_call: data.SoundEventAnnotation,
+ non_relevant_sound_event: data.SoundEventAnnotation,
+) -> data.ClipAnnotation:
+ return data.ClipAnnotation(
+ clip=clip,
+ sound_events=[
+ echolocation_call,
+ generic_call,
+ non_relevant_sound_event,
+ ],
+ )
+
+
+@pytest.fixture
+def create_annotation_set():
+ def factory(
+ name: str = "test",
+ description: str = "Test annotation set",
+ annotations: Optional[List[data.ClipAnnotation]] = None,
+ ) -> data.AnnotationSet:
+ return data.AnnotationSet(
+ name=name,
+ description=description,
+ clip_annotations=annotations or [],
+ )
+
+ return factory
+
+
+@pytest.fixture
+def create_annotation_project():
+ def factory(
+ name: str = "test_project",
+ description: str = "Test Annotation Project",
+ tasks: Optional[List[data.AnnotationTask]] = None,
+ annotations: Optional[List[data.ClipAnnotation]] = None,
+ ) -> data.AnnotationProject:
+ return data.AnnotationProject(
+ name=name,
+ description=description,
+ tasks=tasks or [],
+ clip_annotations=annotations or [],
+ )
+
+ return factory
+
+
+@pytest.fixture
+def sample_preprocessor() -> PreprocessorProtocol:
+ return build_preprocessor()
+
+
+@pytest.fixture
+def sample_audio_loader() -> AudioLoader:
+ return build_audio_loader()
+
+
+@pytest.fixture
+def bat_tag() -> data.Tag:
+ return data.Tag(key="class", value="bat")
+
+
+@pytest.fixture
+def noise_tag() -> data.Tag:
+ return data.Tag(key="class", value="noise")
+
+
+@pytest.fixture
+def myomyo_tag() -> data.Tag:
+ return data.Tag(key="species", value="Myotis myotis")
+
+
+@pytest.fixture
+def pippip_tag() -> data.Tag:
+ return data.Tag(key="species", value="Pipistrellus pipistrellus")
+
+
+@pytest.fixture
+def sample_target_config(
+ bat_tag: data.Tag,
+ myomyo_tag: data.Tag,
+ pippip_tag: data.Tag,
+) -> TargetConfig:
+ return TargetConfig(
+ detection_target=TargetClassConfig(name="bat", tags=[bat_tag]),
+ classification_targets=[
+ TargetClassConfig(name="pippip", tags=[pippip_tag]),
+ TargetClassConfig(name="myomyo", tags=[myomyo_tag]),
+ ],
+ )
+
+
+@pytest.fixture
+def sample_targets(
+ sample_target_config: TargetConfig,
+) -> TargetProtocol:
+ return build_targets(sample_target_config)
+
+
+@pytest.fixture
+def sample_roi_mapper(
+ sample_target_config: TargetConfig,
+) -> ROIMapperProtocol:
+ return build_roi_mapping(sample_target_config.roi)
+
+
+@pytest.fixture
+def sample_labeller(
+ sample_targets: TargetProtocol,
+ sample_preprocessor: PreprocessorProtocol,
+) -> ClipLabeller:
+ return build_clip_labeler(
+ sample_targets,
+ min_freq=sample_preprocessor.min_freq,
+ max_freq=sample_preprocessor.max_freq,
+ )
+
+
+@pytest.fixture
+def sample_clipper() -> ClipperProtocol:
+ return build_clipper()
+
+
+@pytest.fixture
+def example_dataset(example_data_dir: Path) -> DatasetConfig:
+ return DatasetConfig(
+ name="test dataset",
+ description="test dataset",
+ sources=[
+ BatDetect2FilesAnnotations(
+ name="example annotations",
+ audio_dir=example_data_dir / "audio",
+ annotations_dir=example_data_dir / "anns",
+ )
+ ],
+ )
+
+
+@pytest.fixture
+def example_annotations(
+ example_dataset: DatasetConfig,
+) -> List[data.ClipAnnotation]:
+ annotations = load_dataset(example_dataset)
+ assert len(annotations) == 3
+ return list(annotations)
+
+
+@pytest.fixture
+def create_temp_yaml(tmp_path: Path) -> Callable[[str], Path]:
+ """Create a temporary YAML file with the given content."""
+
+ def factory(content: str) -> Path:
+ temp_file = tmp_path / f"{uuid4()}.yaml"
+ temp_file.write_text(content)
+ return temp_file
+
+ return factory
+
+
+@pytest.fixture
+def tiny_checkpoint_path(
+ sample_targets: TargetProtocol,
+ sample_roi_mapper: ROIMapperProtocol,
+ tmp_path: Path,
+) -> Path:
+ module = build_training_module(
+ targets_config=sample_targets.get_config(),
+ class_names=sample_targets.class_names,
+ dimension_names=sample_roi_mapper.dimension_names,
+ )
+ trainer = L.Trainer(enable_checkpointing=False, logger=False)
+ checkpoint_path = tmp_path / "model.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(checkpoint_path)
+ return checkpoint_path
+
+
+@pytest.fixture
+def single_audio_dir(tmp_path: Path, example_audio_files: List[Path]) -> Path:
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ source = example_audio_files[0]
+ target = audio_dir / source.name
+ target.write_bytes(source.read_bytes())
+ return audio_dir
diff --git a/tests/data/regression/20170701_213954-MYOMYS-LR_0_0.5.wav.npz b/tests/data/regression/20170701_213954-MYOMYS-LR_0_0.5.wav.npz
new file mode 100644
index 00000000..a66a0889
Binary files /dev/null and b/tests/data/regression/20170701_213954-MYOMYS-LR_0_0.5.wav.npz differ
diff --git a/tests/data/regression/20180530_213516-EPTSER-LR_0_0.5.wav.npz b/tests/data/regression/20180530_213516-EPTSER-LR_0_0.5.wav.npz
new file mode 100644
index 00000000..56884baf
Binary files /dev/null and b/tests/data/regression/20180530_213516-EPTSER-LR_0_0.5.wav.npz differ
diff --git a/tests/data/regression/20180627_215323-RHIFER-LR_0_0.5.wav.npz b/tests/data/regression/20180627_215323-RHIFER-LR_0_0.5.wav.npz
new file mode 100644
index 00000000..7a3f27ca
Binary files /dev/null and b/tests/data/regression/20180627_215323-RHIFER-LR_0_0.5.wav.npz differ
diff --git a/tests/test_api.py b/tests/test_api.py
index d46786d5..e828c9e4 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -10,13 +10,11 @@
from torch import nn
from batdetect2 import api
-import io
PKG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_DATA_DIR = os.path.join(PKG_DIR, "example_data", "audio")
TEST_DATA = glob(os.path.join(TEST_DATA_DIR, "*.wav"))
-DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def test_load_model_with_default_params():
"""Test loading model with default parameters."""
@@ -282,28 +280,3 @@ def test_process_file_with_empty_predictions_does_not_fail(
assert results is not None
assert len(results["pred_dict"]["annotation"]) == 0
-
-def test_process_file_file_id_defaults_to_basename():
- """Test that process_file assigns basename as an id if no file_id is provided."""
- # Recording donated by @@kdarras
- basename = "20230322_172000_selec2.wav"
- path = os.path.join(DATA_DIR, basename)
-
- output = api.process_file(path)
- predictions = output["pred_dict"]
- id = predictions["id"]
- assert id == basename
-
-def test_bytesio_file_id_defaults_to_md5():
- """Test that process_file assigns an md5 sum as an id if no file_id is provided when using binary data."""
- # Recording donated by @@kdarras
- basename = "20230322_172000_selec2.wav"
- path = os.path.join(DATA_DIR, basename)
-
- with open(path, "rb") as f:
- data = io.BytesIO(f.read())
-
- output = api.process_file(data)
- predictions = output["pred_dict"]
- id = predictions["id"]
- assert id == "7ade9ebf1a9fe5477ff3a2dc57001929"
diff --git a/tests/test_api_v2/__init__.py b/tests/test_api_v2/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_api_v2/test_api_v2.py b/tests/test_api_v2/test_api_v2.py
new file mode 100644
index 00000000..dc1794d9
--- /dev/null
+++ b/tests/test_api_v2/test_api_v2.py
@@ -0,0 +1,447 @@
+from pathlib import Path
+from typing import cast
+
+import lightning as L
+import numpy as np
+import pytest
+import torch
+from soundevent.geometry import compute_bounds
+
+from batdetect2.api_v2 import BatDetect2API
+from batdetect2.inference import InferenceConfig
+from batdetect2.models.detectors import Detector
+from batdetect2.targets import TargetConfig
+from batdetect2.train import TrainingConfig, load_model_from_checkpoint
+from batdetect2.train.lightning import build_training_module
+
+
+@pytest.fixture
+def train_config() -> TrainingConfig:
+ """Train config with a small batch size for testing."""
+ return TrainingConfig.model_validate({"train_loader": {"batch_size": 2}})
+
+
+@pytest.fixture
+def inference_config() -> InferenceConfig:
+ """Inference config with a small batch size for testing."""
+ return InferenceConfig.model_validate({"loader": {"batch_size": 2}})
+
+
+@pytest.fixture
+def example_targets_config(example_data_dir: Path) -> TargetConfig:
+ return TargetConfig.load(example_data_dir / "targets.yaml")
+
+
+@pytest.fixture
+def api_v2(
+ train_config: TrainingConfig,
+ inference_config: InferenceConfig,
+) -> BatDetect2API:
+ """User story: users can create a ready-to-use API from config."""
+
+ api = BatDetect2API.from_config(
+ train_config=train_config,
+ inference_config=inference_config,
+ )
+ assert api.inference_config.loader.batch_size == 2
+ return api
+
+
+def test_process_file_returns_recording_level_predictions(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: process a file and get detections in recording time."""
+
+ # When
+ prediction = api_v2.process_file(example_audio_files[0])
+
+ # Then
+ assert prediction.clip.recording.path == example_audio_files[0]
+ assert prediction.clip.start_time == 0
+ assert prediction.clip.end_time == prediction.clip.recording.duration
+
+ for detection in prediction.detections:
+ start, low, end, high = compute_bounds(detection.geometry)
+ assert 0 <= start <= end <= prediction.clip.recording.duration
+ assert prediction.clip.recording.samplerate > 2 * low
+ assert prediction.clip.recording.samplerate > 2 * high
+ assert detection.class_scores.shape[0] == len(
+ api_v2.targets.class_names
+ )
+
+
+@pytest.mark.slow
+def test_process_files_is_batch_size_invariant(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: changing batch size should not change predictions."""
+
+ # When
+ preds_batch_1 = api_v2.process_files(example_audio_files, batch_size=1)
+ preds_batch_3 = api_v2.process_files(example_audio_files, batch_size=3)
+
+ # Then
+ assert len(preds_batch_1) == len(preds_batch_3)
+
+ by_key_1 = {
+ (
+ str(pred.clip.recording.path),
+ pred.clip.start_time,
+ pred.clip.end_time,
+ ): pred
+ for pred in preds_batch_1
+ }
+ by_key_3 = {
+ (
+ str(pred.clip.recording.path),
+ pred.clip.start_time,
+ pred.clip.end_time,
+ ): pred
+ for pred in preds_batch_3
+ }
+
+ assert set(by_key_1) == set(by_key_3)
+
+ for key in by_key_1:
+ pred_1 = by_key_1[key]
+ pred_3 = by_key_3[key]
+ assert pred_1.clip.start_time == pred_3.clip.start_time
+ assert pred_1.clip.end_time == pred_3.clip.end_time
+ assert len(pred_1.detections) == len(pred_3.detections)
+
+
+def test_process_audio_matches_process_spectrogram(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: users can call either audio or spectrogram entrypoint."""
+
+ # When
+ audio = api_v2.load_audio(example_audio_files[0])
+ from_audio = api_v2.process_audio(audio)
+
+ spec = api_v2.generate_spectrogram(audio)
+ from_spec = api_v2.process_spectrogram(spec)
+
+ # Then
+ assert len(from_audio) == len(from_spec)
+
+ for det_audio, det_spec in zip(from_audio, from_spec, strict=True):
+ bounds_audio = np.array(compute_bounds(det_audio.geometry))
+ bounds_spec = np.array(compute_bounds(det_spec.geometry))
+ np.testing.assert_allclose(bounds_audio, bounds_spec, atol=1e-6)
+ assert np.isclose(det_audio.detection_score, det_spec.detection_score)
+ np.testing.assert_allclose(
+ det_audio.class_scores,
+ det_spec.class_scores,
+ atol=1e-6,
+ )
+
+
+def test_process_spectrogram_rejects_batched_input(
+ api_v2: BatDetect2API,
+) -> None:
+ """User story: invalid batched input gives a clear error."""
+
+ # Given
+ spec = torch.zeros((2, 1, 128, 64), dtype=torch.float32)
+
+ # When/Then
+ with pytest.raises(ValueError, match="Batched spectrograms not supported"):
+ api_v2.process_spectrogram(spec)
+
+
+def test_user_can_read_top_class_and_other_class_scores(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: inspect top class and all class scores per detection."""
+
+ prediction = api_v2.process_file(example_audio_files[0])
+
+ assert len(prediction.detections) > 0
+
+ top_classes = [
+ api_v2.get_top_class_name(det) for det in prediction.detections
+ ]
+ other_class_scores = [
+ api_v2.get_class_scores(det, include_top_class=False)
+ for det in prediction.detections
+ ]
+
+ assert len(top_classes) == len(prediction.detections)
+ assert all(isinstance(class_name, str) for class_name in top_classes)
+ assert len(other_class_scores) == len(prediction.detections)
+ assert all(len(scores) >= 1 for scores in other_class_scores)
+ assert all(
+ all(class_name != top_class for class_name, _ in scores)
+ for top_class, scores in zip(
+ top_classes,
+ other_class_scores,
+ strict=True,
+ )
+ )
+ assert all(
+ all(
+ score_a >= score_b
+ for (_, score_a), (_, score_b) in zip(
+ scores, scores[1:], strict=False
+ )
+ )
+ for scores in other_class_scores
+ )
+
+
+def test_user_can_read_extracted_features_per_detection(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: inspect extracted feature vectors per detection."""
+
+ # Given
+ prediction = api_v2.process_file(example_audio_files[0])
+
+ # When
+ feature_vectors = [det.features for det in prediction.detections]
+
+ # Then
+ assert len(prediction.detections) > 0
+ assert len(feature_vectors) == len(prediction.detections)
+ assert all(vec.ndim == 1 for vec in feature_vectors)
+ assert all(vec.size > 0 for vec in feature_vectors)
+
+
+@pytest.mark.slow
+def test_user_can_load_checkpoint_and_finetune(
+ tmp_path: Path,
+ example_targets_config: TargetConfig,
+ example_annotations,
+) -> None:
+ """User story: load a checkpoint and continue training from it."""
+
+ api = BatDetect2API.from_config(
+ targets_config=example_targets_config,
+ )
+ module = build_training_module(
+ model_config=api.model_config,
+ targets_config=example_targets_config,
+ class_names=api.targets.class_names,
+ dimension_names=api.roi_mapper.dimension_names,
+ )
+ trainer = L.Trainer(enable_checkpointing=False, logger=False)
+ checkpoint_path = tmp_path / "base.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(checkpoint_path)
+
+ train_config = api.train_config.model_copy(deep=True)
+ train_config.trainer.limit_train_batches = 1
+ train_config.trainer.limit_val_batches = 1
+ train_config.trainer.log_every_n_steps = 1
+ train_config.train_loader.batch_size = 1
+ train_config.train_loader.augmentations.enabled = False
+
+ api = BatDetect2API.from_checkpoint(
+ checkpoint_path,
+ train_config=train_config,
+ )
+ finetune_dir = tmp_path / "finetuned"
+
+ api.train(
+ train_annotations=example_annotations[:1],
+ val_annotations=example_annotations[:1],
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=finetune_dir,
+ log_dir=tmp_path / "logs",
+ num_epochs=1,
+ seed=0,
+ )
+
+ checkpoints = list(finetune_dir.rglob("*.ckpt"))
+ assert checkpoints
+
+
+def test_checkpoint_with_same_targets_config_keeps_heads_unchanged(
+ example_targets_config: TargetConfig,
+ tmp_path: Path,
+) -> None:
+ """User story: same targets config does not rebuild prediction heads."""
+
+ # Given
+ source_api = BatDetect2API.from_config(
+ targets_config=example_targets_config
+ )
+ module = build_training_module(
+ model_config=source_api.model_config,
+ targets_config=example_targets_config,
+ class_names=source_api.targets.class_names,
+ dimension_names=source_api.roi_mapper.dimension_names,
+ )
+ trainer = L.Trainer(enable_checkpointing=False, logger=False)
+ checkpoint_path = tmp_path / "same_targets.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(checkpoint_path)
+
+ source_model, _ = load_model_from_checkpoint(checkpoint_path)
+ source_detector = cast(Detector, source_model.detector)
+
+ # When
+ api = BatDetect2API.from_checkpoint(checkpoint_path)
+
+ # Then
+ detector = cast(Detector, api.model.detector)
+
+ for key, value in source_detector.classifier_head.state_dict().items():
+ assert key in detector.classifier_head.state_dict()
+ torch.testing.assert_close(
+ detector.classifier_head.state_dict()[key],
+ value,
+ )
+
+ for key, value in source_detector.size_head.state_dict().items():
+ assert key in detector.size_head.state_dict()
+ torch.testing.assert_close(
+ detector.size_head.state_dict()[key],
+ value,
+ )
+
+
+def test_api_from_checkpoint_defaults_to_bundled_model() -> None:
+ api = BatDetect2API.from_checkpoint()
+
+ assert api.model.class_names
+
+
+@pytest.mark.slow
+def test_user_can_evaluate_small_dataset_and_get_metrics(
+ api_v2: BatDetect2API,
+ example_annotations,
+ tmp_path: Path,
+) -> None:
+ """User story: run evaluation and receive metrics."""
+
+ metrics, predictions = api_v2.evaluate(
+ test_annotations=example_annotations[:1],
+ num_workers=0,
+ output_dir=tmp_path / "eval",
+ save_predictions=False,
+ )
+
+ assert isinstance(metrics, list)
+ assert len(metrics) == 1
+ assert isinstance(predictions, list)
+ assert len(predictions) == 1
+
+
+def test_user_can_save_evaluation_results_to_disk(
+ api_v2: BatDetect2API,
+ example_annotations,
+ tmp_path: Path,
+) -> None:
+ """User story: evaluate saved predictions and persist results."""
+
+ prediction = api_v2.process_file(
+ example_annotations[0].clip.recording.path
+ )
+ metrics = api_v2.evaluate_predictions(
+ annotations=[example_annotations[0]],
+ predictions=[prediction],
+ output_dir=tmp_path,
+ )
+
+ assert isinstance(metrics, dict)
+ assert (tmp_path / "metrics.json").exists()
+
+
+def test_process_file_uses_resolved_batch_size_by_default(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+ monkeypatch,
+) -> None:
+ """User story: process_file falls back to resolved inference config."""
+
+ captured: dict[str, object] = {}
+
+ def fake_process_files(
+ audio_files,
+ batch_size=None,
+ **kwargs,
+ ):
+ captured["audio_files"] = audio_files
+ captured["batch_size"] = batch_size
+ captured["kwargs"] = kwargs
+ return []
+
+ monkeypatch.setattr(api_v2, "process_files", fake_process_files)
+
+ api_v2.process_file(example_audio_files[0])
+
+ assert captured["audio_files"] == [example_audio_files[0]]
+ assert captured["batch_size"] == api_v2.inference_config.loader.batch_size
+
+
+def test_detection_threshold_override_changes_process_file_results(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: users can override threshold in process_file."""
+
+ default_prediction = api_v2.process_file(example_audio_files[0])
+ strict_prediction = api_v2.process_file(
+ example_audio_files[0],
+ detection_threshold=1.0,
+ )
+
+ assert len(strict_prediction.detections) <= len(
+ default_prediction.detections
+ )
+
+
+@pytest.mark.slow
+def test_detection_threshold_override_is_ephemeral_in_process_file(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: per-call threshold override does not change defaults."""
+
+ before = api_v2.process_file(example_audio_files[0])
+ _ = api_v2.process_file(
+ example_audio_files[0],
+ detection_threshold=1.0,
+ )
+ after = api_v2.process_file(example_audio_files[0])
+
+ assert len(before.detections) == len(after.detections)
+ np.testing.assert_allclose(
+ [det.detection_score for det in before.detections],
+ [det.detection_score for det in after.detections],
+ atol=1e-6,
+ )
+
+
+def test_detection_threshold_override_changes_spectrogram_results(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: threshold override works in spectrogram path."""
+
+ audio = api_v2.load_audio(example_audio_files[0])
+ spec = api_v2.generate_spectrogram(audio)
+ default_detections = api_v2.process_spectrogram(spec)
+ strict_detections = api_v2.process_spectrogram(
+ spec, detection_threshold=1.0
+ )
+
+ assert len(strict_detections) <= len(default_detections)
+
+
+def test_user_can_create_api_with_custom_targets_and_model_metadata_matches(
+ sample_targets,
+) -> None:
+ """User story: custom targets define model output names for a new API."""
+
+ api = BatDetect2API.from_config(targets_config=sample_targets.config)
+
+ assert api.model.class_names == sample_targets.class_names
diff --git a/tests/test_api_v2/test_finetune.py b/tests/test_api_v2/test_finetune.py
new file mode 100644
index 00000000..5d8b223f
--- /dev/null
+++ b/tests/test_api_v2/test_finetune.py
@@ -0,0 +1,114 @@
+from pathlib import Path
+from typing import cast
+
+import pytest
+
+from batdetect2.api_v2 import BatDetect2API
+from batdetect2.models.detectors import Detector
+from batdetect2.targets import TargetConfig
+from batdetect2.train import load_model_from_checkpoint
+
+
+@pytest.mark.slow
+def test_user_can_finetune_only_heads(
+ tmp_path: Path,
+ example_annotations,
+) -> None:
+ """User story: fine-tune only prediction heads."""
+
+ api = BatDetect2API.from_config()
+ source_classifier_head = api.model.detector.classifier_head
+ source_size_head = api.model.detector.size_head
+ source_backbone = api.model.detector.backbone
+ finetune_dir = tmp_path / "heads_only"
+
+ finetuned_api = api.finetune(
+ train_annotations=example_annotations[:1],
+ val_annotations=example_annotations[:1],
+ targets_config=TargetConfig(),
+ trainable="heads",
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=finetune_dir,
+ log_dir=tmp_path / "logs",
+ num_epochs=1,
+ seed=0,
+ )
+
+ detector = cast(Detector, finetuned_api.model.detector)
+
+ backbone_params = list(detector.backbone.parameters())
+ classifier_params = list(detector.classifier_head.parameters())
+ bbox_params = list(detector.size_head.parameters())
+
+ assert backbone_params
+ assert classifier_params
+ assert bbox_params
+ assert all(not parameter.requires_grad for parameter in backbone_params)
+ assert all(parameter.requires_grad for parameter in classifier_params)
+ assert all(parameter.requires_grad for parameter in bbox_params)
+ assert finetuned_api is not api
+ assert detector.backbone is source_backbone
+ assert detector.classifier_head is not source_classifier_head
+ assert detector.size_head is not source_size_head
+ assert list(finetune_dir.rglob("*.ckpt"))
+
+
+@pytest.mark.slow
+def test_finetune_replaces_targets_and_checkpoint_owns_new_targets(
+ tmp_path: Path,
+ example_annotations,
+) -> None:
+ """User story: fine-tuning writes checkpoints with the new targets."""
+
+ source_api = BatDetect2API.from_config()
+ source_evaluator = source_api.evaluator
+ source_formatter = source_api.formatter
+ source_output_transform = source_api.output_transform
+ new_targets = TargetConfig.model_validate(
+ {
+ "classification_targets": [
+ {
+ "name": "single_class",
+ "tags": [{"key": "class", "value": "single_class"}],
+ }
+ ],
+ "roi": {"mapper": "top_left"},
+ }
+ )
+ finetune_dir = tmp_path / "new_targets"
+
+ finetuned_api = source_api.finetune(
+ train_annotations=example_annotations[:1],
+ val_annotations=example_annotations[:1],
+ targets_config=new_targets,
+ trainable="heads",
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=finetune_dir,
+ log_dir=tmp_path / "logs",
+ num_epochs=1,
+ seed=0,
+ )
+
+ checkpoints = list(finetune_dir.rglob("*.ckpt"))
+
+ assert source_api.targets.get_config() != new_targets.model_dump(
+ mode="json"
+ )
+ assert finetuned_api.targets.get_config() == new_targets.model_dump(
+ mode="json"
+ )
+ assert finetuned_api.evaluator is not source_evaluator
+ assert finetuned_api.formatter is not source_formatter
+ assert finetuned_api.output_transform is not source_output_transform
+ assert finetuned_api.evaluator.targets is finetuned_api.targets
+ assert finetuned_api.evaluator.transform is finetuned_api.output_transform
+ assert finetuned_api.model.class_names == ["single_class"]
+ assert finetuned_api.model.dimension_names == ["width", "height"]
+ assert checkpoints
+
+ _, configs = load_model_from_checkpoint(checkpoints[0])
+ assert configs.targets.model_dump(mode="json") == new_targets.model_dump(
+ mode="json"
+ )
diff --git a/tests/test_api_v2/test_outputs_io.py b/tests/test_api_v2/test_outputs_io.py
new file mode 100644
index 00000000..914b0cea
--- /dev/null
+++ b/tests/test_api_v2/test_outputs_io.py
@@ -0,0 +1,243 @@
+from pathlib import Path
+from typing import cast
+from unittest.mock import Mock
+
+import numpy as np
+import pandas as pd
+import pytest
+from soundevent import data as soundevent_data
+
+from batdetect2.api_v2 import BatDetect2API
+from batdetect2.outputs import build_output_formatter
+from batdetect2.outputs.formats import (
+ BatDetect2OutputConfig,
+ SoundEventOutputConfig,
+)
+from batdetect2.outputs.formats.batdetect2 import BatDetect2Formatter
+from batdetect2.postprocess.types import ClipDetections
+
+
+@pytest.fixture
+def api_v2() -> BatDetect2API:
+ """User story: API object manages prediction IO formats."""
+
+ return BatDetect2API.from_config()
+
+
+@pytest.fixture
+def file_prediction(api_v2: BatDetect2API, example_audio_files: list[Path]):
+ """User story: users save/load predictions produced by API inference."""
+
+ return api_v2.process_file(example_audio_files[0])
+
+
+def test_save_and_load_predictions_roundtrip_default_raw(
+ api_v2: BatDetect2API,
+ file_prediction,
+ tmp_path: Path,
+) -> None:
+ output_dir = tmp_path / "raw_preds"
+ api_v2.save_predictions([file_prediction], path=output_dir)
+ loaded = cast(list[ClipDetections], api_v2.load_predictions(output_dir))
+
+ assert len(loaded) == 1
+ loaded_prediction = loaded[0]
+ assert loaded_prediction.clip == file_prediction.clip
+ assert len(loaded_prediction.detections) == len(file_prediction.detections)
+
+ for loaded_det, det in zip(
+ loaded_prediction.detections,
+ file_prediction.detections,
+ strict=True,
+ ):
+ assert loaded_det.geometry == det.geometry
+ assert np.isclose(loaded_det.detection_score, det.detection_score)
+ np.testing.assert_allclose(
+ loaded_det.class_scores,
+ det.class_scores,
+ atol=1e-6,
+ )
+
+
+def test_save_predictions_with_batdetect2_override(
+ api_v2: BatDetect2API,
+ file_prediction,
+ tmp_path: Path,
+) -> None:
+ output_dir = tmp_path / "batdetect2_preds"
+ api_v2.save_predictions(
+ [file_prediction],
+ path=output_dir,
+ format="batdetect2",
+ )
+
+ formatter = build_output_formatter(
+ targets=api_v2.targets,
+ config=BatDetect2OutputConfig(),
+ )
+ loaded = formatter.load(output_dir)
+
+ assert len(loaded) == 1
+ assert "annotation" in loaded[0]
+ assert len(loaded[0]["annotation"]) == len(file_prediction.detections)
+
+
+def test_batdetect2_formatter_can_use_raw_class_names(
+ api_v2: BatDetect2API,
+ file_prediction,
+ tmp_path: Path,
+) -> None:
+ output_dir = tmp_path / "batdetect2_raw_class_names"
+ api_v2.save_predictions(
+ [file_prediction],
+ path=output_dir,
+ config=BatDetect2OutputConfig(class_label_mode="class_name"),
+ )
+
+ loaded = cast(
+ list[dict], api_v2.load_predictions(output_dir, format="batdetect2")
+ )
+ first_annotation = loaded[0]["annotation"][0]
+
+ assert first_annotation["class"] in api_v2.targets.class_names
+
+
+def test_batdetect2_formatter_can_use_decoded_species_tag() -> None:
+ targets = Mock()
+ targets.class_names = ["myodau"]
+ targets.decode_class.return_value = [
+ soundevent_data.Tag(
+ key="dwc:scientificName",
+ value="Myotis daubentonii",
+ )
+ ]
+
+ formatter = BatDetect2Formatter(
+ targets=targets,
+ event_name="Echolocation",
+ annotation_note="Automatically generated.",
+ )
+
+ assert formatter.get_class_name(0) == "Myotis daubentonii"
+
+
+def test_batdetect2_formatter_can_fallback_to_class_name_when_key_missing() -> (
+ None
+):
+ targets = Mock()
+ targets.class_names = ["myodau"]
+ targets.decode_class.return_value = []
+
+ formatter = BatDetect2Formatter(
+ targets=targets,
+ event_name="Echolocation",
+ annotation_note="Automatically generated.",
+ decoded_label_key="dwc:scientificName",
+ fallback_to_class_name=True,
+ )
+
+ assert formatter.get_class_name(0) == "myodau"
+
+
+def test_batdetect2_formatter_rejects_missing_decoded_key_without_fallback() -> (
+ None
+):
+ targets = Mock()
+ targets.class_names = ["myodau"]
+ targets.decode_class.return_value = []
+
+ formatter = BatDetect2Formatter(
+ targets=targets,
+ event_name="Echolocation",
+ annotation_note="Automatically generated.",
+ decoded_label_key="dwc:scientificName",
+ fallback_to_class_name=False,
+ )
+
+ with pytest.raises(ValueError, match="Could not decode class label"):
+ formatter.get_class_name(0)
+
+
+def test_load_predictions_with_format_override(
+ api_v2: BatDetect2API,
+ file_prediction,
+ tmp_path: Path,
+) -> None:
+ output_dir = tmp_path / "batdetect2_preds_load"
+ api_v2.save_predictions(
+ [file_prediction],
+ path=output_dir,
+ format="batdetect2",
+ )
+
+ loaded = api_v2.load_predictions(output_dir, format="batdetect2")
+
+ assert len(loaded) == 1
+ loaded_item = loaded[0]
+ assert isinstance(loaded_item, dict)
+ assert "annotation" in loaded_item
+
+
+def test_load_predictions_with_batdetect2_nested_layout(
+ api_v2: BatDetect2API,
+ example_audio_files: list[Path],
+ tmp_path: Path,
+) -> None:
+ output_dir = tmp_path / "batdetect2_nested"
+ predictions = [
+ api_v2.process_file(audio_file) for audio_file in example_audio_files
+ ]
+
+ api_v2.save_predictions(
+ predictions,
+ path=output_dir,
+ format="batdetect2",
+ audio_dir=example_audio_files[0].parent,
+ )
+
+ loaded = api_v2.load_predictions(output_dir, format="batdetect2")
+
+ assert len(loaded) == len(example_audio_files)
+
+
+def test_save_predictions_with_batdetect2_writes_cnn_feature_csv(
+ api_v2: BatDetect2API,
+ file_prediction,
+ tmp_path: Path,
+) -> None:
+ output_dir = tmp_path / "batdetect2_cnn"
+ api_v2.save_predictions(
+ [file_prediction],
+ path=output_dir,
+ config=BatDetect2OutputConfig(write_cnn_features_csv=True),
+ )
+
+ cnn_csvs = list(output_dir.rglob("*_cnn_features.csv"))
+ assert len(cnn_csvs) == 1
+
+ loaded_df = pd.read_csv(cnn_csvs[0])
+ assert not loaded_df.empty
+
+
+def test_save_predictions_with_soundevent_override(
+ api_v2: BatDetect2API,
+ file_prediction,
+ tmp_path: Path,
+) -> None:
+ output_path = tmp_path / "soundevent_preds"
+ api_v2.save_predictions(
+ [file_prediction],
+ path=output_path,
+ format="soundevent",
+ )
+
+ formatter = build_output_formatter(
+ targets=api_v2.targets,
+ config=SoundEventOutputConfig(),
+ )
+ load_path = output_path.with_suffix(".json")
+ loaded = formatter.load(load_path)
+
+ assert load_path.exists()
+ assert len(loaded) == 1
+ assert len(loaded[0].sound_events) == len(file_prediction.detections)
diff --git a/tests/test_audio_utils.py b/tests/test_audio_utils.py
index ed64b159..777d392e 100644
--- a/tests/test_audio_utils.py
+++ b/tests/test_audio_utils.py
@@ -1,17 +1,17 @@
import numpy as np
+import pytest
import torch
import torch.nn.functional as F
-from hypothesis import given
+from hypothesis import given, settings
from hypothesis import strategies as st
from batdetect2.detector import parameters
from batdetect2.utils import audio_utils, detector_utils
-import io
-import os
-DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
-@given(duration=st.floats(min_value=0.1, max_value=2))
+@given(duration=st.floats(min_value=0.1, max_value=1))
+@settings(deadline=None)
+@pytest.mark.slow
def test_can_compute_correct_spectrogram_width(duration: float):
samplerate = parameters.TARGET_SAMPLERATE_HZ
params = parameters.DEFAULT_SPECTROGRAM_PARAMETERS
@@ -90,6 +90,8 @@ def test_pad_audio_without_fixed_size(duration: float):
@given(duration=st.floats(min_value=0.1, max_value=2))
+@settings(deadline=None)
+@pytest.mark.slow
def test_computed_spectrograms_are_actually_divisible_by_the_spec_divide_factor(
duration: float,
):
@@ -97,7 +99,7 @@ def test_computed_spectrograms_are_actually_divisible_by_the_spec_divide_factor(
params = parameters.DEFAULT_SPECTROGRAM_PARAMETERS
length = int(duration * samplerate)
audio = np.random.rand(length)
- _, spectrogram, _ = detector_utils.compute_spectrogram(
+ _, spectrogram = detector_utils.compute_spectrogram(
audio,
samplerate,
params,
@@ -137,20 +139,3 @@ def test_pad_audio_with_fixed_width(duration: float, width: int):
resize_factor=params["resize_factor"],
)
assert expected_width == width
-
-
-def test_load_audio_using_bytesio():
- basename = "20230322_172000_selec2.wav"
- path = os.path.join(DATA_DIR, basename)
-
- with open(path, "rb") as f:
- data = io.BytesIO(f.read())
-
- sample_rate, audio_data, file_sample_rate = audio_utils.load_audio_and_samplerate(data, time_exp_fact=1, target_samp_rate=parameters.TARGET_SAMPLERATE_HZ)
-
- expected_sample_rate, expected_audio_data, exp_file_sample_rate = audio_utils.load_audio_and_samplerate(path, time_exp_fact=1, target_samp_rate=parameters.TARGET_SAMPLERATE_HZ)
-
- assert expected_sample_rate == sample_rate
- assert exp_file_sample_rate == file_sample_rate
-
- assert np.array_equal(audio_data, expected_audio_data)
\ No newline at end of file
diff --git a/tests/test_cli/__init__.py b/tests/test_cli/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_cli/test_base.py b/tests/test_cli/test_base.py
new file mode 100644
index 00000000..a96a600e
--- /dev/null
+++ b/tests/test_cli/test_base.py
@@ -0,0 +1,18 @@
+"""Behavior-focused tests for top-level CLI command discovery."""
+
+from click.testing import CliRunner
+
+from batdetect2.cli import cli
+
+
+def test_cli_base_help_lists_main_commands() -> None:
+ """User story: discover available workflows from top-level help."""
+
+ result = CliRunner().invoke(cli, ["--help"])
+
+ assert result.exit_code == 0
+ assert "process" in result.output
+ assert "train" in result.output
+ assert "evaluate" in result.output
+ assert "data" in result.output
+ assert "detect" in result.output
diff --git a/tests/test_cli/test_data.py b/tests/test_cli/test_data.py
new file mode 100644
index 00000000..8d4b9dff
--- /dev/null
+++ b/tests/test_cli/test_data.py
@@ -0,0 +1,60 @@
+"""Behavior tests for data CLI command group."""
+
+from pathlib import Path
+
+from click.testing import CliRunner
+
+from batdetect2.cli import cli
+
+
+def test_cli_data_help() -> None:
+ """User story: discover data subcommands."""
+
+ result = CliRunner().invoke(cli, ["data", "--help"])
+
+ assert result.exit_code == 0
+ assert "summary" in result.output
+ assert "convert" in result.output
+
+
+def test_cli_data_convert_creates_annotation_set(tmp_path: Path) -> None:
+ """User story: convert dataset config into a soundevent annotation set."""
+
+ output = tmp_path / "annotations.json"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "data",
+ "convert",
+ "example_data/dataset.yaml",
+ "--base-dir",
+ ".",
+ "--output",
+ str(output),
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert output.exists()
+
+
+def test_cli_data_convert_fails_with_invalid_field(tmp_path: Path) -> None:
+ """User story: invalid nested field in dataset config fails clearly."""
+
+ output = tmp_path / "annotations.json"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "data",
+ "convert",
+ "example_data/dataset.yaml",
+ "--field",
+ "does.not.exist",
+ "--output",
+ str(output),
+ ],
+ )
+
+ assert result.exit_code != 0
diff --git a/tests/test_cli.py b/tests/test_cli/test_detect.py
similarity index 50%
rename from tests/test_cli.py
rename to tests/test_cli/test_detect.py
index adba9698..f20c15e8 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli/test_detect.py
@@ -1,40 +1,21 @@
-"""Test the command line interface."""
+"""Behavior tests for legacy detect command."""
from pathlib import Path
import pandas as pd
+import pytest
from click.testing import CliRunner
from batdetect2.cli import cli
-runner = CliRunner()
+@pytest.mark.slow
+def test_cli_detect_command_on_test_audio(tmp_path: Path) -> None:
+ """User story: run legacy detect on example audio directory."""
-def test_cli_base_command():
- """Test the base command."""
- result = runner.invoke(cli, ["--help"])
- assert result.exit_code == 0
- assert (
- "BatDetect2 - Bat Call Detection and Classification" in result.output
- )
-
-
-def test_cli_detect_command_help():
- """Test the detect command help."""
- result = runner.invoke(cli, ["detect", "--help"])
- assert result.exit_code == 0
- assert "Detect bat calls in files in AUDIO_DIR" in result.output
-
-
-def test_cli_detect_command_on_test_audio(tmp_path):
- """Test the detect command on test audio."""
results_dir = tmp_path / "results"
- # Remove results dir if it exists
- if results_dir.exists():
- results_dir.rmdir()
-
- result = runner.invoke(
+ result = CliRunner().invoke(
cli,
[
"detect",
@@ -43,21 +24,22 @@ def test_cli_detect_command_on_test_audio(tmp_path):
"0.3",
],
)
+
assert result.exit_code == 0
assert results_dir.exists()
assert len(list(results_dir.glob("*.csv"))) == 3
assert len(list(results_dir.glob("*.json"))) == 3
-def test_cli_detect_command_with_non_trivial_time_expansion(tmp_path):
- """Test the detect command with a non-trivial time expansion factor."""
- results_dir = tmp_path / "results"
+@pytest.mark.slow
+def test_cli_detect_command_with_non_trivial_time_expansion(
+ tmp_path: Path,
+) -> None:
+ """User story: set time expansion in legacy detect command."""
- # Remove results dir if it exists
- if results_dir.exists():
- results_dir.rmdir()
+ results_dir = tmp_path / "results"
- result = runner.invoke(
+ result = CliRunner().invoke(
cli,
[
"detect",
@@ -73,15 +55,13 @@ def test_cli_detect_command_with_non_trivial_time_expansion(tmp_path):
assert "Time Expansion Factor: 10" in result.stdout
-def test_cli_detect_command_with_the_spec_feature_flag(tmp_path: Path):
- """Test the detect command with the spec feature flag."""
- results_dir = tmp_path / "results"
+@pytest.mark.slow
+def test_cli_detect_command_with_spec_feature_flag(tmp_path: Path) -> None:
+ """User story: request extra spectral features in output CSV."""
- # Remove results dir if it exists
- if results_dir.exists():
- results_dir.rmdir()
+ results_dir = tmp_path / "results"
- result = runner.invoke(
+ result = CliRunner().invoke(
cli,
[
"detect",
@@ -91,6 +71,7 @@ def test_cli_detect_command_with_the_spec_feature_flag(tmp_path: Path):
"--spec_features",
],
)
+
assert result.exit_code == 0
assert results_dir.exists()
@@ -104,21 +85,21 @@ def test_cli_detect_command_with_the_spec_feature_flag(tmp_path: Path):
for expected_file in expected_files:
assert expected_file in csv_files
-
df = pd.read_csv(results_dir / expected_file)
assert not (df.duration == -1).any()
-def test_cli_detect_fails_gracefully_on_empty_file(tmp_path: Path):
+def test_cli_detect_fails_gracefully_on_empty_file(tmp_path: Path) -> None:
+ """User story: bad/empty input file reports error but command survives."""
+
results_dir = tmp_path / "results"
target = tmp_path / "audio"
target.mkdir()
- # Create an empty file with the .wav extension
empty_file = target / "empty.wav"
empty_file.touch()
- result = runner.invoke(
+ result = CliRunner().invoke(
cli,
args=[
"detect",
@@ -128,31 +109,6 @@ def test_cli_detect_fails_gracefully_on_empty_file(tmp_path: Path):
"--spec_features",
],
)
- assert result.exit_code == 0
- assert f"Error processing file {empty_file}" in result.output
-
-def test_can_set_chunk_size(tmp_path: Path):
- results_dir = tmp_path / "results"
-
- # Remove results dir if it exists
- if results_dir.exists():
- results_dir.rmdir()
-
- result = runner.invoke(
- cli,
- [
- "detect",
- "example_data/audio",
- str(results_dir),
- "0.3",
- "--chunk_size",
- "1",
- ],
- )
-
- assert "Chunk Size: 1.0s" in result.output
assert result.exit_code == 0
- assert results_dir.exists()
- assert len(list(results_dir.glob("*.csv"))) == 3
- assert len(list(results_dir.glob("*.json"))) == 3
+ assert f"Error processing file {empty_file}" in result.output
diff --git a/tests/test_cli/test_evaluate.py b/tests/test_cli/test_evaluate.py
new file mode 100644
index 00000000..2cd1eb01
--- /dev/null
+++ b/tests/test_cli/test_evaluate.py
@@ -0,0 +1,48 @@
+"""CLI tests for evaluate command."""
+
+from pathlib import Path
+
+from click.testing import CliRunner
+
+from batdetect2.cli import cli
+
+BASE_DIR = Path(__file__).parent.parent.parent
+
+
+def test_cli_evaluate_help() -> None:
+ """User story: inspect evaluate command interface and options."""
+
+ result = CliRunner().invoke(cli, ["evaluate", "--help"])
+
+ assert result.exit_code == 0
+ assert "TEST_DATASET" in result.output
+ assert "--model" in result.output
+ assert "--evaluation-config" in result.output
+
+
+def test_cli_evaluate_writes_metrics_for_small_dataset(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+) -> None:
+ """User story: evaluate a checkpoint and get metrics artifacts."""
+
+ output_dir = tmp_path / "eval_out"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "evaluate",
+ str(BASE_DIR / "example_data" / "dataset.yaml"),
+ "--model",
+ str(tiny_checkpoint_path),
+ "--base-dir",
+ str(BASE_DIR),
+ "--workers",
+ "0",
+ "--output-dir",
+ str(output_dir),
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert len(list(output_dir.rglob("metrics.csv"))) >= 1
diff --git a/tests/test_cli/test_finetune.py b/tests/test_cli/test_finetune.py
new file mode 100644
index 00000000..cdc02aa9
--- /dev/null
+++ b/tests/test_cli/test_finetune.py
@@ -0,0 +1,134 @@
+"""CLI tests for finetune command."""
+
+from pathlib import Path
+from types import SimpleNamespace
+
+import pytest
+from click.testing import CliRunner
+
+from batdetect2.cli import cli
+
+
+def test_cli_finetune_help() -> None:
+ """User story: inspect finetune command interface and options."""
+
+ result = CliRunner().invoke(cli, ["finetune", "--help"])
+
+ assert result.exit_code == 0
+ assert "TRAIN_DATASET" in result.output
+ assert "--model" in result.output
+ assert "--targets" in result.output
+ assert "--training-config" in result.output
+ assert "--audio-config" in result.output
+ assert "--logging-config" in result.output
+ assert "--evaluation-config" not in result.output
+ assert "--inference-config" not in result.output
+ assert "--outputs-config" not in result.output
+
+
+def test_cli_finetune_defaults_to_bundled_model(
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ """User story: finetune can use the bundled checkpoint by default."""
+
+ called = {}
+
+ class FakeAPI:
+ def finetune(self, **kwargs):
+ called["finetune"] = kwargs
+ return None
+
+ class FakeBatDetect2API:
+ @classmethod
+ def from_checkpoint(cls, path=None, **kwargs):
+ called["path"] = path
+ called["from_checkpoint_kwargs"] = kwargs
+ return FakeAPI()
+
+ monkeypatch.setattr(
+ "batdetect2.api_v2.BatDetect2API",
+ FakeBatDetect2API,
+ )
+ monkeypatch.setattr(
+ "batdetect2.data.load_dataset_config",
+ lambda path: SimpleNamespace(path=path),
+ )
+ monkeypatch.setattr(
+ "batdetect2.data.load_dataset",
+ lambda config, base_dir=None: [],
+ )
+ monkeypatch.setattr(
+ "batdetect2.targets.TargetConfig.load",
+ lambda path: SimpleNamespace(path=path),
+ )
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "finetune",
+ "example_data/dataset.yaml",
+ "--targets",
+ "example_data/targets.yaml",
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert called["path"] is None
+ assert "finetune" in called
+
+
+def test_cli_finetune_requires_targets(tiny_checkpoint_path: Path) -> None:
+ """User story: finetune requires a new target definition."""
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "finetune",
+ "example_data/dataset.yaml",
+ "--model",
+ str(tiny_checkpoint_path),
+ ],
+ )
+
+ assert result.exit_code != 0
+ assert "--targets" in result.output
+
+
+@pytest.mark.slow
+def test_cli_finetune_from_checkpoint_runs_on_small_dataset(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+) -> None:
+ """User story: fine-tune a checkpoint via CLI with new targets."""
+
+ ckpt_dir = tmp_path / "checkpoints"
+ log_dir = tmp_path / "logs"
+ ckpt_dir.mkdir()
+ log_dir.mkdir()
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "finetune",
+ "example_data/dataset.yaml",
+ "--val-dataset",
+ "example_data/dataset.yaml",
+ "--model",
+ str(tiny_checkpoint_path),
+ "--targets",
+ "example_data/targets.yaml",
+ "--num-epochs",
+ "1",
+ "--train-workers",
+ "0",
+ "--val-workers",
+ "0",
+ "--ckpt-dir",
+ str(ckpt_dir),
+ "--log-dir",
+ str(log_dir),
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert len(list(ckpt_dir.rglob("*.ckpt"))) >= 1
diff --git a/tests/test_cli/test_predict.py b/tests/test_cli/test_predict.py
new file mode 100644
index 00000000..1f72a712
--- /dev/null
+++ b/tests/test_cli/test_predict.py
@@ -0,0 +1,452 @@
+"""Behavior tests for process CLI workflows."""
+
+import json
+from pathlib import Path
+
+import pandas as pd
+import pytest
+from click.testing import CliRunner
+from soundevent import data, io
+
+from batdetect2.cli import cli
+from batdetect2.outputs import OutputsConfig
+from batdetect2.outputs.formats import BatDetect2OutputConfig
+
+
+def test_cli_process_help() -> None:
+ """User story: discover available process modes."""
+
+ result = CliRunner().invoke(cli, ["process", "--help"])
+
+ assert result.exit_code == 0
+ assert "directory" in result.output
+ assert "file_list" in result.output
+ assert "dataset" in result.output
+
+
+@pytest.mark.slow
+def test_cli_process_directory_runs_on_real_audio(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ single_audio_dir: Path,
+) -> None:
+ """User story: process all files in a directory."""
+
+ output_path = tmp_path / "predictions"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "directory",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(single_audio_dir),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ "--format",
+ "batdetect2",
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert output_path.exists()
+ assert len(list(output_path.glob("*.json"))) == 1
+
+
+@pytest.mark.slow
+def test_cli_process_directory_runs_on_example_audio_data(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ example_audio_dir: Path,
+ example_audio_files: list[Path],
+) -> None:
+ """User story: process the bundled example audio directory."""
+
+ output_path = tmp_path / "predictions"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "directory",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(example_audio_dir),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ "--format",
+ "batdetect2",
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert output_path.exists()
+ assert len(list(output_path.glob("*.json"))) == len(example_audio_files)
+
+
+@pytest.mark.slow
+def test_cli_process_directory_batdetect2_matches_legacy_artifacts(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ example_audio_dir: Path,
+ example_audio_files: list[Path],
+ example_anns_dir: Path,
+) -> None:
+ """User story: process batdetect2 output matches legacy-style files."""
+
+ output_path = tmp_path / "predictions"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "directory",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(example_audio_dir),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ "--format",
+ "batdetect2",
+ ],
+ )
+
+ assert result.exit_code == 0
+
+ json_files = sorted(output_path.rglob("*.json"))
+ csv_files = sorted(output_path.rglob("*.csv"))
+
+ assert len(json_files) == len(example_audio_files)
+ assert len(csv_files) == len(example_audio_files)
+
+ expected_names = sorted(
+ audio_file.name for audio_file in example_audio_files
+ )
+ assert sorted(path.stem for path in json_files) == expected_names
+ assert sorted(path.stem for path in csv_files) == expected_names
+
+ first_output = json.loads(json_files[0].read_text())
+ assert "file_path" not in first_output
+ assert isinstance(first_output["class_name"], str)
+ assert first_output["class_name"]
+
+ first_annotation = first_output["annotation"][0]
+ assert first_annotation["individual"] == "-1"
+ assert isinstance(first_annotation["high_freq"], int)
+ assert isinstance(first_annotation["low_freq"], int)
+
+ expected_json = json.loads(
+ (example_anns_dir / json_files[0].name).read_text()
+ )
+ assert first_output["id"] == expected_json["id"]
+ assert first_output["time_exp"] == expected_json["time_exp"]
+
+ first_csv = pd.read_csv(csv_files[0], index_col=0)
+ assert list(first_csv.columns) == [
+ "det_prob",
+ "start_time",
+ "end_time",
+ "high_freq",
+ "low_freq",
+ "class",
+ "class_prob",
+ ]
+
+
+@pytest.mark.slow
+def test_cli_process_directory_batdetect2_writes_cnn_features_csv_when_enabled(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ example_audio_dir: Path,
+) -> None:
+ """User story: request legacy CNN feature CSV sidecars via config."""
+
+ output_path = tmp_path / "predictions"
+ outputs_config_path = tmp_path / "outputs.yaml"
+ outputs_config_path.write_text(
+ OutputsConfig(
+ format=BatDetect2OutputConfig(write_cnn_features_csv=True)
+ ).to_yaml_string()
+ )
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "directory",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(example_audio_dir),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ "--outputs-config",
+ str(outputs_config_path),
+ ],
+ )
+
+ assert result.exit_code == 0
+
+ cnn_csvs = sorted(output_path.rglob("*_cnn_features.csv"))
+ assert len(cnn_csvs) == 3
+
+ first_df = pd.read_csv(cnn_csvs[0])
+ assert not first_df.empty
+ assert list(first_df.columns) == [
+ str(ii) for ii in range(len(first_df.columns))
+ ]
+
+
+def test_cli_process_directory_defaults_to_batdetect2_without_output_options(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ single_audio_dir: Path,
+) -> None:
+ """User story: default process output stays batdetect2 for CLI users."""
+
+ output_path = tmp_path / "predictions"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "directory",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(single_audio_dir),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert output_path.exists()
+ assert len(list(output_path.glob("*.json"))) == 1
+ assert len(list(output_path.glob("*.csv"))) == 1
+ assert len(list(output_path.glob("*.nc"))) == 0
+
+
+def test_cli_process_file_list_runs_on_real_audio(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ single_audio_dir: Path,
+) -> None:
+ """User story: process an explicit list of files."""
+
+ audio_file = next(single_audio_dir.glob("*.wav"))
+ file_list = tmp_path / "files.txt"
+ file_list.write_text(f"{audio_file}\n")
+
+ output_path = tmp_path / "predictions"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "file_list",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(file_list),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ "--format",
+ "batdetect2",
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert output_path.exists()
+ assert len(list(output_path.glob("*.json"))) == 1
+
+
+def test_cli_process_dataset_runs_on_aoef_metadata(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ single_audio_dir: Path,
+) -> None:
+ """User story: process from AOEF dataset metadata file."""
+
+ audio_file = next(single_audio_dir.glob("*.wav"))
+ recording = data.Recording.from_file(audio_file)
+ clip = data.Clip(
+ recording=recording,
+ start_time=0,
+ end_time=recording.duration,
+ )
+ annotation_set = data.AnnotationSet(
+ name="test",
+ description="process dataset test",
+ clip_annotations=[data.ClipAnnotation(clip=clip, sound_events=[])],
+ )
+
+ dataset_path = tmp_path / "dataset.json"
+ io.save(annotation_set, dataset_path)
+
+ output_path = tmp_path / "predictions"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "dataset",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(dataset_path),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ "--format",
+ "batdetect2",
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert output_path.exists()
+ assert len(list(output_path.glob("*.json"))) == 1
+
+
+@pytest.mark.parametrize(
+ ("format_name", "expected_pattern", "writes_single_file"),
+ [
+ ("batdetect2", "*.json", False),
+ ("raw", "*.nc", False),
+ ("soundevent", "*.json", True),
+ ],
+)
+def test_cli_process_directory_supports_output_format_override(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ single_audio_dir: Path,
+ format_name: str,
+ expected_pattern: str,
+ writes_single_file: bool,
+) -> None:
+ """User story: change output format via --format only."""
+
+ output_path = tmp_path / f"predictions_{format_name}"
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "directory",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(single_audio_dir),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ "--format",
+ format_name,
+ ],
+ )
+
+ assert result.exit_code == 0
+
+ if writes_single_file:
+ assert output_path.with_suffix(".json").exists()
+ else:
+ assert output_path.exists()
+ assert len(list(output_path.glob(expected_pattern))) >= 1
+
+
+def test_cli_process_dataset_deduplicates_recordings(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ single_audio_dir: Path,
+) -> None:
+ """User story: duplicated recording entries are processed once."""
+
+ audio_file = next(single_audio_dir.glob("*.wav"))
+ recording = data.Recording.from_file(audio_file)
+ first_clip = data.Clip(
+ recording=recording,
+ start_time=0,
+ end_time=recording.duration,
+ )
+ second_clip = data.Clip(
+ recording=recording,
+ start_time=0,
+ end_time=recording.duration,
+ )
+ annotation_set = data.AnnotationSet(
+ name="dupe-recording-dataset",
+ description="contains same recording twice",
+ clip_annotations=[
+ data.ClipAnnotation(clip=first_clip, sound_events=[]),
+ data.ClipAnnotation(clip=second_clip, sound_events=[]),
+ ],
+ )
+
+ dataset_path = tmp_path / "dupes.json"
+ io.save(annotation_set, dataset_path)
+
+ output_path = tmp_path / "predictions"
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "dataset",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(dataset_path),
+ str(output_path),
+ "--batch-size",
+ "1",
+ "--workers",
+ "0",
+ "--format",
+ "raw",
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert output_path.exists()
+ assert len(list(output_path.glob("*.nc"))) == 1
+
+
+def test_cli_process_rejects_unknown_output_format(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+ single_audio_dir: Path,
+) -> None:
+ """User story: invalid output format fails with error."""
+
+ output_path = tmp_path / "predictions"
+ result = CliRunner().invoke(
+ cli,
+ [
+ "process",
+ "directory",
+ "--model",
+ str(tiny_checkpoint_path),
+ str(single_audio_dir),
+ str(output_path),
+ "--format",
+ "not_a_real_format",
+ ],
+ )
+
+ assert result.exit_code != 0
diff --git a/tests/test_cli/test_train.py b/tests/test_cli/test_train.py
new file mode 100644
index 00000000..5fd0210f
--- /dev/null
+++ b/tests/test_cli/test_train.py
@@ -0,0 +1,104 @@
+"""CLI tests for train command."""
+
+from pathlib import Path
+
+import pytest
+from click.testing import CliRunner
+
+from batdetect2.cli import cli
+from batdetect2.models import ModelConfig
+
+
+def test_cli_train_help() -> None:
+ """User story: inspect train command interface and options."""
+
+ result = CliRunner().invoke(cli, ["train", "--help"])
+
+ assert result.exit_code == 0
+ assert "TRAIN_DATASET" in result.output
+ assert "--training-config" in result.output
+ assert "--model" in result.output
+
+
+@pytest.mark.slow
+def test_cli_train_from_checkpoint_runs_on_small_dataset(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+) -> None:
+ """User story: continue training from checkpoint via CLI."""
+
+ ckpt_dir = tmp_path / "checkpoints"
+ log_dir = tmp_path / "logs"
+ ckpt_dir.mkdir()
+ log_dir.mkdir()
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "train",
+ "example_data/dataset.yaml",
+ "--val-dataset",
+ "example_data/dataset.yaml",
+ "--model",
+ str(tiny_checkpoint_path),
+ "--num-epochs",
+ "1",
+ "--train-workers",
+ "0",
+ "--val-workers",
+ "0",
+ "--ckpt-dir",
+ str(ckpt_dir),
+ "--log-dir",
+ str(log_dir),
+ ],
+ )
+
+ assert result.exit_code == 0
+ assert len(list(ckpt_dir.rglob("*.ckpt"))) >= 1
+
+
+def test_cli_train_rejects_model_and_model_config_together(
+ tmp_path: Path,
+ tiny_checkpoint_path: Path,
+) -> None:
+ """User story: invalid train flags fail with clear usage error."""
+
+ model_config_path = tmp_path / "model.yaml"
+ model_config_path.write_text(ModelConfig().to_yaml_string())
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "train",
+ "example_data/dataset.yaml",
+ "--model",
+ str(tiny_checkpoint_path),
+ "--model-config",
+ str(model_config_path),
+ ],
+ )
+
+ assert result.exit_code != 0
+ assert "--model-config cannot be used with --model" in result.output
+
+
+def test_cli_train_rejects_model_and_targets_together(
+ tiny_checkpoint_path: Path,
+) -> None:
+ """User story: checkpoint training does not accept new targets."""
+
+ result = CliRunner().invoke(
+ cli,
+ [
+ "train",
+ "example_data/dataset.yaml",
+ "--model",
+ str(tiny_checkpoint_path),
+ "--targets",
+ "example_data/targets.yaml",
+ ],
+ )
+
+ assert result.exit_code != 0
+ assert "--targets cannot be used with --model" in result.output
diff --git a/tests/test_contrib.py b/tests/test_contrib.py
index b97e9c04..ecfa8949 100644
--- a/tests/test_contrib.py
+++ b/tests/test_contrib.py
@@ -2,11 +2,13 @@
from pathlib import Path
+import pytest
from click.testing import CliRunner
from batdetect2.cli import cli
runner = CliRunner()
+pytestmark = pytest.mark.slow
def test_can_process_jeff37_files(
diff --git a/tests/test_core/__init__.py b/tests/test_core/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_core/test_registry.py b/tests/test_core/test_registry.py
new file mode 100644
index 00000000..3c422927
--- /dev/null
+++ b/tests/test_core/test_registry.py
@@ -0,0 +1,419 @@
+"""Tests for the Registry and SimpleRegistry classes.
+
+Covers:
+- SimpleRegistry: registration, retrieval, and membership checks.
+- Registry: decorator-based registration, config type tracking,
+ discriminator-based dispatch, and error handling.
+- ImportConfig base class and add_import_config decorator utility.
+- AnnotationFormatImportConfig: concrete per-registry escape hatch.
+"""
+
+from typing import Literal
+
+import pytest
+from pydantic import BaseModel
+
+from batdetect2.core.registries import (
+ ImportConfig,
+ Registry,
+ SimpleRegistry,
+ add_import_config,
+)
+
+
+class TestSimpleRegistry:
+ def test_register_and_get(self):
+ """Registered objects can be retrieved by name."""
+ registry = SimpleRegistry("test")
+
+ @registry.register("my_item")
+ def item():
+ return 42
+
+ assert registry.get("my_item")() == 42
+
+ def test_register_returns_original_object(self):
+ """The register decorator returns the decorated object unchanged."""
+ registry = SimpleRegistry[int]("test")
+
+ @registry.register("x")
+ def fn() -> int:
+ return 7
+
+ assert fn() == 7
+
+ def test_has_returns_true_for_registered_name(self):
+ """has() returns True for a name that was registered."""
+ registry = SimpleRegistry("test")
+ registry.register("present")(lambda: None)
+ assert registry.has("present") is True
+
+ def test_has_returns_false_for_unknown_name(self):
+ """has() returns False for a name that was never registered."""
+ registry = SimpleRegistry("test")
+ assert registry.has("absent") is False
+
+ def test_get_raises_for_unknown_name(self):
+ """get() raises KeyError for an unregistered name."""
+ registry = SimpleRegistry("test")
+ with pytest.raises(KeyError):
+ registry.get("nonexistent")
+
+ def test_register_overwrites_existing_entry(self):
+ """Re-registering the same name replaces the previous entry."""
+ registry = SimpleRegistry("test")
+ registry.register("key")(lambda: 1)
+ registry.register("key")(lambda: 2)
+ assert registry.get("key")() == 2
+
+ def test_multiple_items_registered_independently(self):
+ """Multiple items can be registered without interfering."""
+ registry = SimpleRegistry("test")
+ registry.register("a")(lambda: "a")
+ registry.register("b")(lambda: "b")
+ assert registry.get("a")() == "a"
+ assert registry.get("b")() == "b"
+
+
+class TestRegistryRegister:
+ def test_register_makes_factory_callable_via_build(self):
+ """A registered factory is reachable through build()."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+ value: int = 0
+
+ class DummyOutput:
+ def __init__(self, config: DummyConfig):
+ self.config = config
+
+ registry: Registry[DummyOutput, []] = Registry("test")
+ registry.register(DummyConfig)(lambda c: DummyOutput(c))
+ result = registry.build(DummyConfig(value=3))
+ assert isinstance(result, DummyOutput)
+ assert result.config.value == 3
+
+ def test_register_makes_config_type_retrievable(self):
+ """A registered config type is reachable through get_config_type()."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+ value: int = 0
+
+ registry: Registry[object, []] = Registry("test")
+ registry.register(DummyConfig)(lambda c: c)
+ assert registry.get_config_type("dummy") is DummyConfig
+
+ def test_register_raises_when_discriminator_field_missing(self):
+ """ValueError is raised if config has no discriminator field."""
+
+ class ConfigWithoutDiscriminator(BaseModel):
+ unrelated_field: str = "hello"
+
+ registry: Registry[object, []] = Registry("test")
+ with pytest.raises(ValueError, match="'name' field"):
+ registry.register(ConfigWithoutDiscriminator)(lambda c: c)
+
+ def test_register_raises_when_discriminator_is_not_string(self):
+ """ValueError is raised if the discriminator default is not a str."""
+
+ class ConfigWithNonStringDiscriminator(BaseModel):
+ name: int = 42
+
+ registry: Registry[object, []] = Registry("test")
+ with pytest.raises(ValueError, match="'name' field must be a string"):
+ registry.register(ConfigWithNonStringDiscriminator)(lambda c: c)
+
+ def test_register_uses_custom_discriminator_field(self):
+ """Registry respects a non-default discriminator field name."""
+
+ class FormatConfig(BaseModel):
+ format: Literal["fmt"] = "fmt"
+
+ registry: Registry[object, []] = Registry(
+ "test", discriminator="format"
+ )
+ registry.register(FormatConfig)(lambda c: c)
+ assert registry.get_config_type("fmt") is FormatConfig
+
+ def test_register_decorator_returns_original_function(self):
+ """The register decorator returns the wrapped function unchanged."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+ value: int = 0
+
+ class DummyOutput:
+ def __init__(self, config: DummyConfig):
+ self.config = config
+
+ registry: Registry[DummyOutput, []] = Registry("test")
+
+ def factory(config: DummyConfig) -> DummyOutput:
+ return DummyOutput(config)
+
+ returned = registry.register(DummyConfig)(factory)
+ assert returned is factory
+
+
+class TestRegistryConfigTypes:
+ def test_get_config_types_returns_all_registered_types(self):
+ """get_config_types() returns every registered config class."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+
+ class AnotherConfig(BaseModel):
+ name: Literal["another"] = "another"
+
+ registry: Registry[object, []] = Registry("test")
+ registry.register(DummyConfig)(lambda c: c)
+ registry.register(AnotherConfig)(lambda c: c)
+ config_types = registry.get_config_types()
+ assert DummyConfig in config_types
+ assert AnotherConfig in config_types
+
+ def test_get_config_types_empty_when_nothing_registered(self):
+ """get_config_types() returns empty tuple for a fresh registry."""
+ registry: Registry[object, []] = Registry("test")
+ assert registry.get_config_types() == ()
+
+ def test_get_config_type_returns_correct_class(self):
+ """get_config_type() returns the class registered under a key."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+
+ registry: Registry[object, []] = Registry("test")
+ registry.register(DummyConfig)(lambda c: c)
+ assert registry.get_config_type("dummy") is DummyConfig
+
+ def test_get_config_type_raises_for_unknown_key(self):
+ """get_config_type() raises ValueError for an unregistered key."""
+ registry: Registry[object, []] = Registry("test")
+ with pytest.raises(
+ ValueError, match="No config type with name 'unknown'"
+ ):
+ registry.get_config_type("unknown")
+
+ def test_get_config_type_error_message_lists_existing_keys(self):
+ """ValueError message includes the names of registered keys."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+
+ registry: Registry[object, []] = Registry("test")
+ registry.register(DummyConfig)(lambda c: c)
+ with pytest.raises(ValueError, match="dummy"):
+ registry.get_config_type("missing")
+
+
+class TestRegistryBuild:
+ def test_build_dispatches_to_correct_factory(self):
+ """build() calls the factory registered for the config's discriminator."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+ value: int = 0
+
+ class DummyOutput:
+ def __init__(self, config: DummyConfig):
+ self.config = config
+
+ registry: Registry[DummyOutput, []] = Registry("test")
+ registry.register(DummyConfig)(lambda c: DummyOutput(c))
+
+ config = DummyConfig(value=99)
+ result = registry.build(config)
+
+ assert isinstance(result, DummyOutput)
+ assert result.config.value == 99
+
+ def test_build_dispatches_to_correct_factory_among_multiple(self):
+ """build() picks the right factory when several are registered."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+
+ class AnotherConfig(BaseModel):
+ name: Literal["another"] = "another"
+
+ class DummyOutput:
+ def __init__(self, config: DummyConfig):
+ self.config = config
+
+ class AnotherOutput:
+ def __init__(self, config: AnotherConfig):
+ self.config = config
+
+ registry: Registry[object, []] = Registry("test")
+ registry.register(DummyConfig)(lambda c: DummyOutput(c))
+ registry.register(AnotherConfig)(lambda c: AnotherOutput(c))
+
+ assert isinstance(registry.build(DummyConfig()), DummyOutput)
+ assert isinstance(registry.build(AnotherConfig()), AnotherOutput)
+
+ def test_build_raises_not_implemented_for_unregistered_format(self):
+ """build() raises NotImplementedError for an unregistered discriminator."""
+ registry: Registry[object, []] = Registry("test")
+
+ class UnknownConfig(BaseModel):
+ name: Literal["unknown"] = "unknown"
+
+ with pytest.raises(NotImplementedError, match="'unknown'"):
+ registry.build(UnknownConfig())
+
+ def test_build_passes_config_to_factory(self):
+ """build() passes the exact config object through to the factory."""
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+ value: int = 0
+
+ registry: Registry[DummyConfig, []] = Registry("test")
+ received: list[DummyConfig] = []
+ registry.register(DummyConfig)(lambda c: received.append(c) or c)
+
+ config = DummyConfig(value=7)
+ registry.build(config)
+
+ assert received == [config]
+
+ def test_build_uses_custom_discriminator_field(self):
+ """build() resolves the factory using the configured discriminator."""
+
+ class FormatConfig(BaseModel):
+ format: Literal["fmt"] = "fmt"
+
+ registry: Registry[str, []] = Registry("test", discriminator="format")
+ registry.register(FormatConfig)(lambda c: "fmt_result")
+
+ assert registry.build(FormatConfig()) == "fmt_result"
+
+ def test_build_error_message_includes_registry_name(self):
+ """NotImplementedError message names the registry for easier debugging."""
+ registry: Registry[object, []] = Registry("my_registry")
+
+ class UnknownConfig(BaseModel):
+ name: Literal["ghost"] = "ghost"
+
+ with pytest.raises(NotImplementedError, match="my_registry"):
+ registry.build(UnknownConfig())
+
+
+class TestAddImportConfig:
+ def test_decorator_returns_config_class_unchanged(self):
+ """add_import_config returns the decorated class as-is."""
+
+ class MyImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+ registry: Registry[object, []] = Registry("test")
+ result = add_import_config(registry)(MyImportConfig)
+ assert result is MyImportConfig
+
+ def test_registered_config_type_is_discoverable(self):
+ """After decoration, get_config_type('import') returns the class."""
+ registry: Registry[object, []] = Registry("test")
+
+ @add_import_config(registry)
+ class MyImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+ assert registry.get_config_type("import") is MyImportConfig
+
+ def test_build_instantiates_target(self):
+ """build() with a registered import config instantiates the target."""
+ import collections
+
+ registry: Registry[object, []] = Registry("test")
+
+ @add_import_config(registry)
+ class MyImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+ config = MyImportConfig(target="collections.OrderedDict")
+ result = registry.build(config)
+ assert isinstance(result, collections.OrderedDict)
+
+ def test_build_forwards_arguments_to_target(self):
+ """build() passes config.arguments as kwargs to the target."""
+ import decimal
+
+ registry: Registry[object, []] = Registry("test")
+
+ @add_import_config(registry)
+ class MyImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+ config = MyImportConfig(
+ target="decimal.Decimal",
+ arguments={"value": "3.14"},
+ )
+ result = registry.build(config)
+ assert isinstance(result, decimal.Decimal)
+ assert result == decimal.Decimal("3.14")
+
+ def test_build_kwargs_override_config_arguments(self):
+ """kwargs passed to build() win over same-key entries in arguments."""
+ import decimal
+
+ registry: Registry[object, []] = Registry("test")
+
+ @add_import_config(registry)
+ class MyImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+ config = MyImportConfig(
+ target="decimal.Decimal",
+ arguments={"value": "1.00"},
+ )
+ result = registry.build(config, value="9.99")
+ assert isinstance(result, decimal.Decimal)
+ assert result == decimal.Decimal("9.99")
+
+ def test_build_bad_target_raises(self):
+ """build() raises when the dotted target path cannot be resolved."""
+ from hydra.errors import InstantiationException
+
+ registry: Registry[object, []] = Registry("test")
+
+ @add_import_config(registry)
+ class MyImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+ config = MyImportConfig(target="nonexistent.module.DoesNotExist")
+ with pytest.raises(InstantiationException):
+ registry.build(config)
+
+ def test_works_with_custom_discriminator_field(self):
+ """add_import_config works for registries with a non-default discriminator."""
+ import collections
+
+ registry: Registry[object, []] = Registry(
+ "test",
+ discriminator="format",
+ )
+
+ @add_import_config(registry)
+ class FormatImportConfig(ImportConfig):
+ format: Literal["import"] = "import"
+
+ config = FormatImportConfig(target="collections.OrderedDict")
+ result = registry.build(config)
+ assert isinstance(result, collections.OrderedDict)
+
+ def test_coexists_with_other_registered_entries(self):
+ """The import config entry does not interfere with other entries."""
+ registry: Registry[object, []] = Registry("test")
+
+ class DummyConfig(BaseModel):
+ name: Literal["dummy"] = "dummy"
+
+ @add_import_config(registry)
+ class MyImportConfig(ImportConfig):
+ name: Literal["import"] = "import"
+
+ registry.register(DummyConfig)(lambda c: "dummy_result")
+
+ assert registry.build(DummyConfig()) == "dummy_result"
diff --git a/tests/test_data/__init__.py b/tests/test_data/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_data/test_annotations/__init__.py b/tests/test_data/test_annotations/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_data/test_annotations/test_annotations.py b/tests/test_data/test_annotations/test_annotations.py
new file mode 100644
index 00000000..bcca2590
--- /dev/null
+++ b/tests/test_data/test_annotations/test_annotations.py
@@ -0,0 +1,152 @@
+"""Tests for the public load_annotated_dataset dispatcher.
+
+Covers:
+- load_annotated_dataset dispatches correctly to each of the three
+ registered annotation format loaders.
+- The base_dir keyword argument is forwarded to the loader.
+
+The internal load functions, config models, and filtering logic for each
+format are tested in their respective modules
+"""
+
+import json
+from pathlib import Path
+
+from soundevent import data, io
+
+from batdetect2.data.annotations import (
+ AOEFAnnotations,
+ BatDetect2FilesAnnotations,
+ BatDetect2MergedAnnotations,
+ load_annotated_dataset,
+)
+
+ROOT_DIR = Path(__file__).parent.parent.parent.parent
+
+
+class TestLoadAnnotatedDataset:
+ def test_load_example_batdetect2_files_annotation_project(self):
+ """load_annotated_dataset works end-to-end with BatDetect2Files format.
+
+ Uses the committed example data so this test doubles as a smoke test
+ against the real on-disk data.
+ """
+ audio_dir = ROOT_DIR / "example_data" / "audio"
+ anns_dir = ROOT_DIR / "example_data" / "anns"
+ config = BatDetect2FilesAnnotations(
+ name="test",
+ audio_dir=audio_dir,
+ annotations_dir=anns_dir,
+ )
+
+ result = load_annotated_dataset(config)
+
+ assert isinstance(result, data.AnnotationSet)
+ assert result.name == "test"
+ assert len(result.clip_annotations) == 3
+
+ def test_dispatches_to_aoef_loader(
+ self,
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+ create_annotation_set,
+ ):
+ """load_annotated_dataset returns an AnnotationSet for AOEFAnnotations."""
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ rec = create_recording(path=audio_dir / "rec.wav")
+ clip = create_clip(rec)
+ ann = create_clip_annotation(clip)
+ annotation_set = create_annotation_set(
+ name="aoef_test", annotations=[ann]
+ )
+ anns_path = tmp_path / "anns.json"
+ io.save(annotation_set, anns_path)
+
+ config = AOEFAnnotations(
+ name="aoef_test",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ )
+
+ result = load_annotated_dataset(config)
+
+ assert isinstance(result, data.AnnotationSet)
+ assert len(result.clip_annotations) == 1
+
+ def test_dispatches_to_batdetect2_merged_loader(
+ self,
+ tmp_path: Path,
+ wav_factory,
+ ):
+ """load_annotated_dataset returns an AnnotationSet for BatDetect2Merged."""
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ wav_factory(path=audio_dir / "rec.wav", duration=2.0)
+
+ merged = [
+ {
+ "id": "rec.wav",
+ "duration": 2.0,
+ "time_exp": 1.0,
+ "class_name": "Myotis",
+ "annotation": [],
+ "annotated": True,
+ "issues": False,
+ "notes": "",
+ }
+ ]
+ anns_path = tmp_path / "merged.json"
+ anns_path.write_text(json.dumps(merged))
+
+ config = BatDetect2MergedAnnotations(
+ name="merged_test",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ )
+
+ result = load_annotated_dataset(config)
+
+ assert isinstance(result, data.AnnotationSet)
+ assert len(result.clip_annotations) == 1
+
+ def test_passes_base_dir_to_loader(
+ self,
+ tmp_path: Path,
+ wav_factory,
+ ):
+ """base_dir is forwarded to the loader so relative paths are resolved."""
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ anns_dir = tmp_path / "anns"
+ anns_dir.mkdir()
+
+ wav_factory(path=audio_dir / "rec.wav", duration=2.0)
+ ann_data = {
+ "id": "rec.wav",
+ "duration": 2.0,
+ "time_exp": 1.0,
+ "class_name": "Myotis",
+ "annotation": [],
+ "annotated": True,
+ "issues": False,
+ "notes": "",
+ }
+ (anns_dir / "rec.wav.json").write_text(json.dumps(ann_data))
+
+ config = BatDetect2FilesAnnotations(
+ name="base_dir_test",
+ audio_dir=Path("audio"),
+ annotations_dir=Path("anns"),
+ )
+
+ result = load_annotated_dataset(config, base_dir=tmp_path)
+
+ assert isinstance(result, data.AnnotationSet)
+ assert len(result.clip_annotations) == 1
+ assert (
+ result.clip_annotations[0].clip.recording.path
+ == audio_dir / "rec.wav"
+ )
diff --git a/tests/test_data/test_annotations/test_aoef.py b/tests/test_data/test_annotations/test_aoef.py
new file mode 100644
index 00000000..ce89e3fd
--- /dev/null
+++ b/tests/test_data/test_annotations/test_aoef.py
@@ -0,0 +1,784 @@
+import uuid
+from pathlib import Path
+from typing import Callable, Optional, Sequence
+
+import pytest
+from pydantic import ValidationError
+from soundevent import data, io
+from soundevent.data.annotation_tasks import AnnotationState
+
+from batdetect2.data.annotations import aoef
+
+
+@pytest.fixture
+def base_dir(tmp_path: Path) -> Path:
+ path = tmp_path / "base_dir"
+ path.mkdir(parents=True, exist_ok=True)
+ return path
+
+
+@pytest.fixture
+def audio_dir(base_dir: Path) -> Path:
+ path = base_dir / "audio"
+ path.mkdir(parents=True, exist_ok=True)
+ return path
+
+
+@pytest.fixture
+def anns_dir(base_dir: Path) -> Path:
+ path = base_dir / "annotations"
+ path.mkdir(parents=True, exist_ok=True)
+ return path
+
+
+def create_task(
+ clip: data.Clip,
+ badges: list[data.StatusBadge],
+ task_id: Optional[uuid.UUID] = None,
+) -> data.AnnotationTask:
+ """Creates a simple AnnotationTask for testing."""
+ return data.AnnotationTask(
+ uuid=task_id or uuid.uuid4(),
+ clip=clip,
+ status_badges=badges,
+ )
+
+
+def test_annotation_task_filter_defaults():
+ """Test default values of AnnotationTaskFilter."""
+ f = aoef.AnnotationTaskFilter()
+ assert f.only_completed is True
+ assert f.only_verified is False
+ assert f.exclude_issues is True
+
+
+def test_annotation_task_filter_initialization():
+ """Test initialization of AnnotationTaskFilter with non-default values."""
+ f = aoef.AnnotationTaskFilter(
+ only_completed=False,
+ only_verified=True,
+ exclude_issues=False,
+ )
+ assert f.only_completed is False
+ assert f.only_verified is True
+ assert f.exclude_issues is False
+
+
+def test_aoef_annotations_defaults(
+ audio_dir: Path,
+ anns_dir: Path,
+):
+ """Test default values of AOEFAnnotations."""
+ annotations_path = anns_dir / "test.aoef"
+ config = aoef.AOEFAnnotations(
+ name="default_name",
+ audio_dir=audio_dir,
+ annotations_path=annotations_path,
+ )
+ assert config.format == "aoef"
+ assert config.annotations_path == annotations_path
+ assert config.audio_dir == audio_dir
+ assert isinstance(config.filter, aoef.AnnotationTaskFilter)
+ assert config.filter.only_completed is True
+ assert config.filter.only_verified is False
+ assert config.filter.exclude_issues is True
+
+
+def test_aoef_annotations_initialization(tmp_path):
+ """Test initialization of AOEFAnnotations with specific values."""
+ annotations_path = tmp_path / "custom.json"
+ audio_dir = Path("audio/files")
+ custom_filter = aoef.AnnotationTaskFilter(
+ only_completed=False, only_verified=True
+ )
+ config = aoef.AOEFAnnotations(
+ name="custom_name",
+ description="custom_desc",
+ audio_dir=audio_dir,
+ annotations_path=annotations_path,
+ filter=custom_filter,
+ )
+ assert config.name == "custom_name"
+ assert config.description == "custom_desc"
+ assert config.format == "aoef"
+ assert config.audio_dir == audio_dir
+ assert config.annotations_path == annotations_path
+ assert config.filter is custom_filter
+
+
+def test_aoef_annotations_initialization_no_filter(tmp_path):
+ """Test initialization of AOEFAnnotations with filter=None."""
+ annotations_path = tmp_path / "no_filter.aoef"
+ audio_dir = tmp_path / "audio"
+ config = aoef.AOEFAnnotations(
+ name="no_filter_name",
+ description="no_filter_desc",
+ audio_dir=audio_dir,
+ annotations_path=annotations_path,
+ filter=None,
+ )
+ assert config.format == "aoef"
+ assert config.annotations_path == annotations_path
+ assert config.filter is None
+
+
+def test_aoef_annotations_validation_error(tmp_path):
+ """Test Pydantic validation for missing required fields."""
+ with pytest.raises(ValidationError, match="annotations_path"):
+ aoef.AOEFAnnotations( # type: ignore
+ name="test_name",
+ audio_dir=tmp_path,
+ )
+ with pytest.raises(ValidationError, match="name"):
+ aoef.AOEFAnnotations( # type: ignore
+ annotations_path=tmp_path / "dummy.aoef",
+ audio_dir=tmp_path,
+ )
+
+
+@pytest.mark.parametrize(
+ "badges, only_completed, only_verified, exclude_issues, expected",
+ [
+ ([], True, False, True, False), # No badges -> not completed
+ (
+ [data.StatusBadge(state=AnnotationState.completed)],
+ True,
+ False,
+ True,
+ True,
+ ),
+ (
+ [data.StatusBadge(state=AnnotationState.verified)],
+ True,
+ False,
+ True,
+ False,
+ ), # Not completed
+ (
+ [data.StatusBadge(state=AnnotationState.rejected)],
+ True,
+ False,
+ True,
+ False,
+ ), # Has issues
+ (
+ [
+ data.StatusBadge(state=AnnotationState.completed),
+ data.StatusBadge(state=AnnotationState.rejected),
+ ],
+ True,
+ False,
+ True,
+ False,
+ ), # Completed but has issues
+ (
+ [
+ data.StatusBadge(state=AnnotationState.completed),
+ data.StatusBadge(state=AnnotationState.verified),
+ ],
+ True,
+ False,
+ True,
+ True,
+ ), # Completed, verified doesn't matter
+ # Verified only (completed=F, verified=T, exclude_issues=T)
+ (
+ [data.StatusBadge(state=AnnotationState.verified)],
+ False,
+ True,
+ True,
+ True,
+ ),
+ (
+ [data.StatusBadge(state=AnnotationState.completed)],
+ False,
+ True,
+ True,
+ False,
+ ), # Not verified
+ (
+ [
+ data.StatusBadge(state=AnnotationState.verified),
+ data.StatusBadge(state=AnnotationState.rejected),
+ ],
+ False,
+ True,
+ True,
+ False,
+ ), # Verified but has issues
+ # Completed AND Verified (completed=T, verified=T, exclude_issues=T)
+ (
+ [
+ data.StatusBadge(state=AnnotationState.completed),
+ data.StatusBadge(state=AnnotationState.verified),
+ ],
+ True,
+ True,
+ True,
+ True,
+ ),
+ (
+ [data.StatusBadge(state=AnnotationState.completed)],
+ True,
+ True,
+ True,
+ False,
+ ), # Not verified
+ (
+ [data.StatusBadge(state=AnnotationState.verified)],
+ True,
+ True,
+ True,
+ False,
+ ), # Not completed
+ # Include Issues (completed=T, verified=F, exclude_issues=F)
+ (
+ [
+ data.StatusBadge(state=AnnotationState.completed),
+ data.StatusBadge(state=AnnotationState.rejected),
+ ],
+ True,
+ False,
+ False,
+ True,
+ ), # Completed, issues allowed
+ (
+ [data.StatusBadge(state=AnnotationState.rejected)],
+ True,
+ False,
+ False,
+ False,
+ ), # Has issues, but not completed
+ # No filters (completed=F, verified=F, exclude_issues=F)
+ ([], False, False, False, True),
+ (
+ [data.StatusBadge(state=AnnotationState.rejected)],
+ False,
+ False,
+ False,
+ True,
+ ),
+ (
+ [data.StatusBadge(state=AnnotationState.completed)],
+ False,
+ False,
+ False,
+ True,
+ ),
+ (
+ [data.StatusBadge(state=AnnotationState.verified)],
+ False,
+ False,
+ False,
+ True,
+ ),
+ ],
+)
+def test_select_task(
+ badges: Sequence[data.StatusBadge],
+ only_completed: bool,
+ only_verified: bool,
+ exclude_issues: bool,
+ expected: bool,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+):
+ """Test select_task logic with various badge and filter combinations."""
+ rec = create_recording()
+ clip = create_clip(rec)
+ task = create_task(clip, badges=list(badges))
+ result = aoef.select_task(
+ task,
+ only_completed=only_completed,
+ only_verified=only_verified,
+ exclude_issues=exclude_issues,
+ )
+ assert result == expected
+
+
+def test_filter_ready_clips_default(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+ create_clip_annotation: Callable[..., data.ClipAnnotation],
+ create_annotation_project: Callable[..., data.AnnotationProject],
+):
+ """Test filter_ready_clips with default filtering."""
+ rec = create_recording(path=tmp_path / "rec.wav")
+ clip_completed = create_clip(rec, 0, 1)
+ clip_verified = create_clip(rec, 1, 2)
+ clip_rejected = create_clip(rec, 2, 3)
+ clip_completed_rejected = create_clip(rec, 3, 4)
+ clip_no_badges = create_clip(rec, 4, 5)
+
+ task_completed = create_task(
+ clip_completed, [data.StatusBadge(state=AnnotationState.completed)]
+ )
+ task_verified = create_task(
+ clip_verified, [data.StatusBadge(state=AnnotationState.verified)]
+ )
+ task_rejected = create_task(
+ clip_rejected, [data.StatusBadge(state=AnnotationState.rejected)]
+ )
+ task_completed_rejected = create_task(
+ clip_completed_rejected,
+ [
+ data.StatusBadge(state=AnnotationState.completed),
+ data.StatusBadge(state=AnnotationState.rejected),
+ ],
+ )
+ task_no_badges = create_task(clip_no_badges, [])
+
+ ann_completed = create_clip_annotation(clip_completed)
+ ann_verified = create_clip_annotation(clip_verified)
+ ann_rejected = create_clip_annotation(clip_rejected)
+ ann_completed_rejected = create_clip_annotation(clip_completed_rejected)
+ ann_no_badges = create_clip_annotation(clip_no_badges)
+
+ project = create_annotation_project(
+ name="FilterTestProject",
+ description="Project for testing filters",
+ tasks=[
+ task_completed,
+ task_verified,
+ task_rejected,
+ task_completed_rejected,
+ task_no_badges,
+ ],
+ annotations=[
+ ann_completed,
+ ann_verified,
+ ann_rejected,
+ ann_completed_rejected,
+ ann_no_badges,
+ ],
+ )
+
+ filtered_set = aoef.filter_ready_clips(project)
+
+ assert isinstance(filtered_set, data.AnnotationSet)
+ assert filtered_set.name == project.name
+ assert filtered_set.description == project.description
+ assert len(filtered_set.clip_annotations) == 1
+ assert filtered_set.clip_annotations[0].clip.uuid == clip_completed.uuid
+
+ expected_uuid = uuid.uuid5(project.uuid, f"{True}_{False}_{True}")
+ assert filtered_set.uuid == expected_uuid
+
+
+def test_filter_ready_clips_custom_filter(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+ create_clip_annotation: Callable[..., data.ClipAnnotation],
+ create_annotation_project: Callable[..., data.AnnotationProject],
+):
+ """Test filter_ready_clips with custom filtering (verified=T, issues=F)."""
+ rec = create_recording(path=tmp_path / "rec.wav")
+ clip_completed = create_clip(rec, 0, 1)
+ clip_verified = create_clip(rec, 1, 2)
+ clip_rejected = create_clip(rec, 2, 3)
+ clip_completed_verified = create_clip(rec, 3, 4)
+ clip_verified_rejected = create_clip(rec, 4, 5)
+
+ task_completed = create_task(
+ clip_completed, [data.StatusBadge(state=AnnotationState.completed)]
+ )
+ task_verified = create_task(
+ clip_verified, [data.StatusBadge(state=AnnotationState.verified)]
+ )
+ task_rejected = create_task(
+ clip_rejected, [data.StatusBadge(state=AnnotationState.rejected)]
+ )
+ task_completed_verified = create_task(
+ clip_completed_verified,
+ [
+ data.StatusBadge(state=AnnotationState.completed),
+ data.StatusBadge(state=AnnotationState.verified),
+ ],
+ )
+ task_verified_rejected = create_task(
+ clip_verified_rejected,
+ [
+ data.StatusBadge(state=AnnotationState.verified),
+ data.StatusBadge(state=AnnotationState.rejected),
+ ],
+ )
+
+ ann_completed = create_clip_annotation(clip_completed)
+ ann_verified = create_clip_annotation(clip_verified)
+ ann_rejected = create_clip_annotation(clip_rejected)
+ ann_completed_verified = create_clip_annotation(clip_completed_verified)
+ ann_verified_rejected = create_clip_annotation(clip_verified_rejected)
+
+ project = create_annotation_project(
+ tasks=[
+ task_completed,
+ task_verified,
+ task_rejected,
+ task_completed_verified,
+ task_verified_rejected,
+ ],
+ annotations=[
+ ann_completed,
+ ann_verified,
+ ann_rejected,
+ ann_completed_verified,
+ ann_verified_rejected,
+ ],
+ )
+
+ filtered_set = aoef.filter_ready_clips(
+ project, only_completed=False, only_verified=True, exclude_issues=False
+ )
+
+ assert len(filtered_set.clip_annotations) == 3
+ filtered_clip_uuids = {
+ ann.clip.uuid for ann in filtered_set.clip_annotations
+ }
+ assert clip_verified.uuid in filtered_clip_uuids
+ assert clip_completed_verified.uuid in filtered_clip_uuids
+ assert clip_verified_rejected.uuid in filtered_clip_uuids
+
+ expected_uuid = uuid.uuid5(project.uuid, f"{False}_{True}_{False}")
+ assert filtered_set.uuid == expected_uuid
+
+
+def test_filter_ready_clips_no_filters(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+ create_clip_annotation: Callable[..., data.ClipAnnotation],
+ create_annotation_project: Callable[..., data.AnnotationProject],
+):
+ """Test filter_ready_clips with all filters disabled."""
+ rec = create_recording(path=tmp_path / "rec.wav")
+ clip1 = create_clip(rec, 0, 1)
+ clip2 = create_clip(rec, 1, 2)
+
+ task1 = create_task(
+ clip1, [data.StatusBadge(state=AnnotationState.rejected)]
+ )
+ task2 = create_task(clip2, [])
+ ann1 = create_clip_annotation(clip1)
+ ann2 = create_clip_annotation(clip2)
+
+ project = create_annotation_project(
+ tasks=[task1, task2], annotations=[ann1, ann2]
+ )
+
+ filtered_set = aoef.filter_ready_clips(
+ project,
+ only_completed=False,
+ only_verified=False,
+ exclude_issues=False,
+ )
+
+ assert len(filtered_set.clip_annotations) == 2
+ filtered_clip_uuids = {
+ ann.clip.uuid for ann in filtered_set.clip_annotations
+ }
+ assert clip1.uuid in filtered_clip_uuids
+ assert clip2.uuid in filtered_clip_uuids
+
+ expected_uuid = uuid.uuid5(project.uuid, f"{False}_{False}_{False}")
+ assert filtered_set.uuid == expected_uuid
+
+
+def test_filter_ready_clips_empty_project(
+ create_annotation_project: Callable[..., data.AnnotationProject],
+):
+ """Test filter_ready_clips with an empty project."""
+ project = create_annotation_project(tasks=[], annotations=[])
+ filtered_set = aoef.filter_ready_clips(project)
+ assert len(filtered_set.clip_annotations) == 0
+ assert filtered_set.name == project.name
+ assert filtered_set.description == project.description
+
+
+def test_filter_ready_clips_no_matching_tasks(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+ create_clip_annotation: Callable[..., data.ClipAnnotation],
+ create_annotation_project: Callable[..., data.AnnotationProject],
+):
+ """Test filter_ready_clips when no tasks match the criteria."""
+ rec = create_recording(path=tmp_path / "rec.wav")
+ clip_rejected = create_clip(rec, 0, 1)
+
+ task_rejected = create_task(
+ clip_rejected, [data.StatusBadge(state=AnnotationState.rejected)]
+ )
+ ann_rejected = create_clip_annotation(clip_rejected)
+
+ project = create_annotation_project(
+ tasks=[task_rejected], annotations=[ann_rejected]
+ )
+
+ filtered_set = aoef.filter_ready_clips(project)
+ assert len(filtered_set.clip_annotations) == 0
+
+
+def test_load_aoef_annotated_dataset_set(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+ create_clip_annotation: Callable[..., data.ClipAnnotation],
+ create_annotation_set: Callable[..., data.AnnotationSet],
+):
+ """Test loading a standard AnnotationSet file."""
+ rec_path = tmp_path / "audio" / "rec1.wav"
+ rec_path.parent.mkdir()
+ rec = create_recording(path=rec_path)
+ clip = create_clip(rec)
+ ann = create_clip_annotation(clip)
+ original_set = create_annotation_set(annotations=[ann])
+
+ annotations_file = tmp_path / "set.json"
+ io.save(original_set, annotations_file)
+
+ config = aoef.AOEFAnnotations(
+ name="test_set_load",
+ annotations_path=annotations_file,
+ audio_dir=rec_path.parent,
+ )
+
+ loaded_set = aoef.load_aoef_annotated_dataset(config)
+
+ assert isinstance(loaded_set, data.AnnotationSet)
+
+ assert loaded_set.name == original_set.name
+ assert len(loaded_set.clip_annotations) == len(
+ original_set.clip_annotations
+ )
+ assert (
+ loaded_set.clip_annotations[0].clip.uuid
+ == original_set.clip_annotations[0].clip.uuid
+ )
+ assert (
+ loaded_set.clip_annotations[0].clip.recording.path
+ == rec_path.resolve()
+ )
+
+
+def test_load_aoef_annotated_dataset_project_with_filter(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+ create_clip_annotation: Callable[..., data.ClipAnnotation],
+ create_annotation_project: Callable[..., data.AnnotationProject],
+):
+ """Test loading an AnnotationProject file with filtering enabled."""
+ rec_path = tmp_path / "audio" / "rec.wav"
+ rec_path.parent.mkdir()
+ rec = create_recording(path=rec_path)
+
+ clip_completed = create_clip(rec, 0, 1)
+ clip_rejected = create_clip(rec, 1, 2)
+
+ task_completed = create_task(
+ clip_completed, [data.StatusBadge(state=AnnotationState.completed)]
+ )
+ task_rejected = create_task(
+ clip_rejected, [data.StatusBadge(state=AnnotationState.rejected)]
+ )
+
+ ann_completed = create_clip_annotation(clip_completed)
+ ann_rejected = create_clip_annotation(clip_rejected)
+
+ project = create_annotation_project(
+ name="ProjectToFilter",
+ tasks=[task_completed, task_rejected],
+ annotations=[ann_completed, ann_rejected],
+ )
+
+ annotations_file = tmp_path / "project.json"
+ io.save(project, annotations_file)
+
+ config = aoef.AOEFAnnotations(
+ name="test_project_filter_load",
+ annotations_path=annotations_file,
+ audio_dir=rec_path.parent,
+ )
+
+ loaded_data = aoef.load_aoef_annotated_dataset(config)
+
+ assert isinstance(loaded_data, data.AnnotationSet)
+ assert loaded_data.name == project.name
+ assert len(loaded_data.clip_annotations) == 1
+ assert loaded_data.clip_annotations[0].clip.uuid == clip_completed.uuid
+ assert (
+ loaded_data.clip_annotations[0].clip.recording.path
+ == rec_path.resolve()
+ )
+
+
+def test_load_aoef_annotated_dataset_project_no_filter(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+ create_clip_annotation: Callable[..., data.ClipAnnotation],
+ create_annotation_project: Callable[..., data.AnnotationProject],
+):
+ """Test loading an AnnotationProject file with filtering disabled."""
+ rec_path = tmp_path / "audio" / "rec.wav"
+ rec_path.parent.mkdir()
+ rec = create_recording(path=rec_path)
+ clip1 = create_clip(rec, 0, 1)
+ clip2 = create_clip(rec, 1, 2)
+
+ task1 = create_task(
+ clip1, [data.StatusBadge(state=AnnotationState.completed)]
+ )
+ task2 = create_task(
+ clip2, [data.StatusBadge(state=AnnotationState.rejected)]
+ )
+ ann1 = create_clip_annotation(clip1)
+ ann2 = create_clip_annotation(clip2)
+
+ original_project = create_annotation_project(
+ tasks=[task1, task2], annotations=[ann1, ann2]
+ )
+
+ annotations_file = tmp_path / "project_nofilter.json"
+ io.save(original_project, annotations_file)
+
+ config = aoef.AOEFAnnotations(
+ name="test_project_nofilter_load",
+ annotations_path=annotations_file,
+ audio_dir=rec_path.parent,
+ filter=None,
+ )
+
+ loaded_data = aoef.load_aoef_annotated_dataset(config)
+
+ assert isinstance(loaded_data, data.AnnotationProject)
+ assert loaded_data.uuid == original_project.uuid
+ assert len(loaded_data.clip_annotations) == 2
+ assert (
+ loaded_data.clip_annotations[0].clip.recording.path
+ == rec_path.resolve()
+ )
+ assert (
+ loaded_data.clip_annotations[1].clip.recording.path
+ == rec_path.resolve()
+ )
+
+
+def test_load_aoef_annotated_dataset_base_dir(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+ create_clip: Callable[..., data.Clip],
+ create_clip_annotation: Callable[..., data.ClipAnnotation],
+ create_annotation_project: Callable[..., data.AnnotationProject],
+):
+ """Test loading with a base_dir specified."""
+ base = tmp_path / "basedir"
+ base.mkdir()
+ audio_rel = Path("audio")
+ ann_rel = Path("annotations/project.json")
+
+ abs_audio_dir = base / audio_rel
+ abs_ann_path = base / ann_rel
+ abs_audio_dir.mkdir(parents=True)
+ abs_ann_path.parent.mkdir(parents=True)
+
+ rec = create_recording(path=abs_audio_dir / "rec.wav")
+ rec_path = rec.path
+
+ clip = create_clip(rec)
+
+ task = create_task(
+ clip, [data.StatusBadge(state=AnnotationState.completed)]
+ )
+ ann = create_clip_annotation(clip)
+ project = create_annotation_project(tasks=[task], annotations=[ann])
+ io.save(project, abs_ann_path)
+
+ config = aoef.AOEFAnnotations(
+ name="test_base_dir_load",
+ annotations_path=ann_rel,
+ audio_dir=audio_rel,
+ filter=aoef.AnnotationTaskFilter(),
+ )
+
+ loaded_set = aoef.load_aoef_annotated_dataset(config, base_dir=base)
+
+ assert isinstance(loaded_set, data.AnnotationSet)
+ assert len(loaded_set.clip_annotations) == 1
+
+ assert (
+ loaded_set.clip_annotations[0].clip.recording.path
+ == rec_path.resolve()
+ )
+
+
+def test_load_aoef_annotated_dataset_file_not_found(tmp_path):
+ """Test FileNotFoundError when annotation file doesn't exist."""
+ config = aoef.AOEFAnnotations(
+ name="test_not_found",
+ annotations_path=tmp_path / "nonexistent.aoef",
+ audio_dir=tmp_path,
+ )
+ with pytest.raises(FileNotFoundError):
+ aoef.load_aoef_annotated_dataset(config)
+
+
+def test_load_aoef_annotated_dataset_file_not_found_with_base_dir(tmp_path):
+ """Test FileNotFoundError with base_dir."""
+ base = tmp_path / "base"
+ base.mkdir()
+ config = aoef.AOEFAnnotations(
+ name="test_not_found_base",
+ annotations_path=Path("nonexistent.aoef"),
+ audio_dir=Path("audio"),
+ )
+ with pytest.raises(FileNotFoundError):
+ aoef.load_aoef_annotated_dataset(config, base_dir=base)
+
+
+def test_load_aoef_annotated_dataset_invalid_content(tmp_path):
+ """Test ValueError when file contains invalid JSON or non-soundevent data."""
+ invalid_file = tmp_path / "invalid.json"
+ invalid_file.write_text("{invalid json")
+
+ config = aoef.AOEFAnnotations(
+ name="test_invalid_content",
+ annotations_path=invalid_file,
+ audio_dir=tmp_path,
+ )
+ with pytest.raises(ValidationError):
+ aoef.load_aoef_annotated_dataset(config)
+
+
+def test_load_aoef_annotated_dataset_wrong_object_type(
+ tmp_path: Path,
+ create_recording: Callable[..., data.Recording],
+):
+ """Test ValueError when file contains correct soundevent obj but wrong type."""
+ rec_path = tmp_path / "audio" / "rec.wav"
+ rec_path.parent.mkdir()
+ rec = create_recording(path=rec_path)
+ dataset = data.Dataset(
+ name="test_wrong_type",
+ description="Test for wrong type",
+ recordings=[rec],
+ )
+
+ wrong_type_file = tmp_path / "wrong_type.json"
+ io.save(dataset, wrong_type_file)
+
+ config = aoef.AOEFAnnotations(
+ name="test_wrong_type",
+ annotations_path=wrong_type_file,
+ audio_dir=rec_path.parent,
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ aoef.load_aoef_annotated_dataset(config)
+
+ assert (
+ "does not contain a soundevent AnnotationSet or AnnotationProject"
+ in str(excinfo.value)
+ )
diff --git a/tests/test_data/test_annotations/test_batdetect2.py b/tests/test_data/test_annotations/test_batdetect2.py
new file mode 100644
index 00000000..446a8fd9
--- /dev/null
+++ b/tests/test_data/test_annotations/test_batdetect2.py
@@ -0,0 +1,613 @@
+import json
+import uuid
+from pathlib import Path
+from typing import Any
+
+import pytest
+from soundevent import data
+
+from batdetect2.data.annotations.batdetect2 import (
+ AnnotationFilter,
+ BatDetect2FilesAnnotations,
+ BatDetect2MergedAnnotations,
+ load_batdetect2_files_annotated_dataset,
+ load_batdetect2_merged_annotated_dataset,
+)
+
+
+def create_legacy_file_annotation(
+ file_id: str,
+ duration: float = 5.0,
+ time_exp: float = 1.0,
+ class_name: str = "Myotis",
+ annotations: list[dict[str, Any]] | None = None,
+ annotated: bool = True,
+ issues: bool = False,
+ notes: str = "",
+) -> dict[str, Any]:
+ if annotations is None:
+ annotations = [
+ {
+ "class": "Myotis",
+ "event": "Echolocation",
+ "individual": 0,
+ "start_time": 1.1,
+ "end_time": 1.2,
+ "low_freq": 30000,
+ "high_freq": 40000,
+ },
+ {
+ "class": "Pipistrellus",
+ "event": "Echolocation",
+ "individual": 0,
+ "start_time": 2.5,
+ "end_time": 2.55,
+ "low_freq": 50000,
+ "high_freq": 55000,
+ },
+ ]
+ return {
+ "id": file_id,
+ "duration": duration,
+ "time_exp": time_exp,
+ "class_name": class_name,
+ "annotation": annotations,
+ "annotated": annotated,
+ "issues": issues,
+ "notes": notes,
+ }
+
+
+@pytest.fixture
+def batdetect2_files_test_setup(
+ tmp_path: Path, wav_factory
+) -> tuple[Path, Path, list[dict[str, Any]]]:
+ """Sets up a directory structure for batdetect2 files format tests."""
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ anns_dir = tmp_path / "anns"
+ anns_dir.mkdir()
+
+ files_data = []
+
+ # 1. File with single myotis annotation
+ rec1_path = wav_factory(path=audio_dir / "rec1.wav", duration=5.0)
+ ann1_data = create_legacy_file_annotation(
+ file_id="rec1.wav",
+ annotated=True,
+ issues=False,
+ notes="Standard notes.",
+ class_name="Myotis",
+ annotations=[
+ {
+ "class": "Myotis",
+ "event": "Echolocation",
+ "individual": 0,
+ "start_time": 1.1,
+ "end_time": 1.2,
+ "low_freq": 30000,
+ "high_freq": 40000,
+ }
+ ],
+ )
+ (anns_dir / f"{rec1_path.name}.json").write_text(json.dumps(ann1_data))
+ files_data.append(ann1_data)
+
+ # 2. File that has not been annotated
+ rec2_path = wav_factory(path=audio_dir / "rec2.wav", duration=4.0)
+ ann2_data = create_legacy_file_annotation(
+ file_id="rec2.wav",
+ annotated=False,
+ issues=False,
+ annotations=[],
+ class_name="Unknown",
+ )
+ (anns_dir / f"{rec2_path.name}.json").write_text(json.dumps(ann2_data))
+ files_data.append(ann2_data)
+
+ # 3. File that has been annotated but has issues
+ rec3_path = wav_factory(path=audio_dir / "rec3.wav", duration=6.0)
+ ann3_data = create_legacy_file_annotation(
+ file_id="rec3.wav",
+ annotated=True,
+ issues=True,
+ notes="File has issues.",
+ class_name="Pipistrellus",
+ )
+ (anns_dir / f"{rec3_path.name}.json").write_text(json.dumps(ann3_data))
+ files_data.append(ann3_data)
+
+ # 4. File that has been not been annotated and has issues
+ rec4_path = wav_factory(path=audio_dir / "rec4.wav", duration=3.0)
+ ann4_data = create_legacy_file_annotation(
+ file_id="rec4.wav", annotated=False, issues=True, class_name="Nyctalus"
+ )
+ (anns_dir / f"{rec4_path.name}.json").write_text(json.dumps(ann4_data))
+ files_data.append(ann4_data)
+
+ # 5. File that has been annotated but is missing audio
+ ann5_data = create_legacy_file_annotation(
+ file_id="rec_missing_audio.wav", annotated=True, issues=False
+ )
+ (anns_dir / "rec_missing_audio.wav.json").write_text(json.dumps(ann5_data))
+
+ # 6. File that has missing annotations
+ wav_factory(path=audio_dir / "rec_missing_ann.wav", duration=2.0)
+
+ # 7. A non -JSON file in the annotations directory
+ (anns_dir / "not_a_json.txt").write_text("hello")
+
+ return audio_dir, anns_dir, files_data
+
+
+@pytest.fixture
+def batdetect2_merged_test_setup(
+ tmp_path: Path, batdetect2_files_test_setup
+) -> tuple[Path, Path, list[dict[str, Any]]]:
+ """Sets up a directory structure for batdetect2 merged file format tests."""
+ audio_dir, _, files_data = batdetect2_files_test_setup
+ merged_anns_path = tmp_path / "merged_anns.json"
+
+ merged_data = [
+ fd for fd in files_data if fd["id"] != "rec_missing_audio.wav"
+ ]
+ merged_anns_path.write_text(json.dumps(merged_data))
+
+ return audio_dir, merged_anns_path, merged_data
+
+
+def test_annotation_filter_defaults():
+ """Test default values for AnnotationFilter."""
+ filt = AnnotationFilter()
+ assert filt.only_annotated is True
+ assert filt.exclude_issues is True
+
+
+def test_annotation_filter_custom():
+ """Test custom values for AnnotationFilter."""
+ filt = AnnotationFilter(only_annotated=False, exclude_issues=False)
+ assert filt.only_annotated is False
+ assert filt.exclude_issues is False
+
+
+def test_batdetect2_files_annotations_config(tmp_path: Path):
+ """Test initialization of BatDetect2FilesAnnotations."""
+ anns_dir = tmp_path / "annotations"
+ config = BatDetect2FilesAnnotations(
+ name="test_files",
+ description="Test Files Desc",
+ audio_dir=tmp_path / "audio",
+ annotations_dir=anns_dir,
+ )
+ assert config.format == "batdetect2"
+ assert config.name == "test_files"
+ assert config.description == "Test Files Desc"
+ assert config.annotations_dir == anns_dir
+ assert isinstance(config.filter, AnnotationFilter)
+ assert config.filter.only_annotated is True
+ assert config.filter.exclude_issues is True
+
+
+def test_batdetect2_files_annotations_config_no_filter(tmp_path: Path):
+ """Test BatDetect2FilesAnnotations with filter explicitly set to None."""
+ anns_dir = tmp_path / "annotations"
+ data = {
+ "name": "test_files_no_filter",
+ "audio_dir": str(tmp_path / "audio"),
+ "annotations_dir": str(anns_dir),
+ "filter": None,
+ }
+ config = BatDetect2FilesAnnotations.model_validate(data)
+ assert config.filter is None
+
+
+def test_batdetect2_merged_annotations_config(tmp_path: Path):
+ """Test initialization of BatDetect2MergedAnnotations."""
+ anns_path = tmp_path / "annotations.json"
+ config = BatDetect2MergedAnnotations(
+ name="test_merged",
+ description="Test Merged Desc",
+ audio_dir=tmp_path / "audio",
+ annotations_path=anns_path,
+ filter=AnnotationFilter(only_annotated=False, exclude_issues=True),
+ )
+ assert config.format == "batdetect2_file"
+ assert config.name == "test_merged"
+ assert config.description == "Test Merged Desc"
+ assert config.annotations_path == anns_path
+ assert isinstance(config.filter, AnnotationFilter)
+ assert config.filter.only_annotated is False
+ assert config.filter.exclude_issues is True
+
+
+def test_batdetect2_merged_annotations_config_default_filter(tmp_path: Path):
+ """Test BatDetect2MergedAnnotations uses default filter if not provided."""
+ anns_path = tmp_path / "annotations.json"
+ config = BatDetect2MergedAnnotations(
+ name="test_merged_default",
+ audio_dir=tmp_path / "audio",
+ annotations_path=anns_path,
+ )
+ assert isinstance(config.filter, AnnotationFilter)
+ assert config.filter.only_annotated is True
+ assert config.filter.exclude_issues is True
+
+
+class TestLoadBatDetect2Files:
+ def test_load_default_filter(self, batdetect2_files_test_setup):
+ """Test loading with default filter (annotated=True, issues=False)."""
+ audio_dir, anns_dir, _ = batdetect2_files_test_setup
+ config = BatDetect2FilesAnnotations(
+ name="default_load",
+ audio_dir=audio_dir,
+ annotations_dir=anns_dir,
+ )
+
+ result_set = load_batdetect2_files_annotated_dataset(config)
+
+ assert isinstance(result_set, data.AnnotationSet)
+ assert result_set.name == "default_load"
+ assert len(result_set.clip_annotations) == 1
+
+ clip_ann = result_set.clip_annotations[0]
+ assert clip_ann.clip.recording.path.name == "rec1.wav"
+ assert clip_ann.clip.recording.duration == 5.0
+ assert len(clip_ann.sound_events) == 1
+ assert clip_ann.notes[0].message == "Standard notes."
+ clip_tag = data.find_tag(clip_ann.tags, term_label="Class")
+ assert clip_tag is not None
+ assert clip_tag.value == "Myotis"
+
+ recording_tag = data.find_tag(
+ clip_ann.clip.recording.tags, term_label="Class"
+ )
+ assert recording_tag is not None
+ assert recording_tag.value == "Myotis"
+
+ se_ann = clip_ann.sound_events[0]
+ assert se_ann.sound_event.geometry is not None
+ assert se_ann.sound_event.geometry.coordinates == [
+ 1.1,
+ 30000,
+ 1.2,
+ 40000,
+ ]
+
+ se_class_tag = data.find_tag(se_ann.tags, term_label="Class")
+ assert se_class_tag is not None
+ assert se_class_tag.value == "Myotis"
+
+ se_event_tag = data.find_tag(se_ann.tags, term_label="Call Type")
+ assert se_event_tag is not None
+ assert se_event_tag.value == "Echolocation"
+
+ se_individual_tag = data.find_tag(se_ann.tags, term_label="Individual")
+ assert se_individual_tag is not None
+ assert se_individual_tag.value == "0"
+
+ def test_load_only_annotated_false(self, batdetect2_files_test_setup):
+ """Test filter with only_annotated=False."""
+ audio_dir, anns_dir, _ = batdetect2_files_test_setup
+ config = BatDetect2FilesAnnotations(
+ name="ann_false",
+ audio_dir=audio_dir,
+ annotations_dir=anns_dir,
+ filter=AnnotationFilter(only_annotated=False, exclude_issues=True),
+ )
+ result_set = load_batdetect2_files_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 2
+ loaded_files = {
+ ann.clip.recording.path.name for ann in result_set.clip_annotations
+ }
+ assert loaded_files == {"rec1.wav", "rec2.wav"}
+
+ def test_load_exclude_issues_false(self, batdetect2_files_test_setup):
+ """Test filter with exclude_issues=False."""
+ audio_dir, anns_dir, _ = batdetect2_files_test_setup
+ config = BatDetect2FilesAnnotations(
+ name="iss_false",
+ audio_dir=audio_dir,
+ annotations_dir=anns_dir,
+ filter=AnnotationFilter(only_annotated=True, exclude_issues=False),
+ )
+ result_set = load_batdetect2_files_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 2
+ loaded_files = {
+ ann.clip.recording.path.name for ann in result_set.clip_annotations
+ }
+ assert loaded_files == {"rec1.wav", "rec3.wav"}
+
+ def test_load_no_filter(self, batdetect2_files_test_setup):
+ """Test loading with filtering disabled."""
+ audio_dir, anns_dir, _ = batdetect2_files_test_setup
+ config_data = {
+ "name": "no_filter",
+ "audio_dir": str(audio_dir),
+ "annotations_dir": str(anns_dir),
+ "filter": None,
+ }
+ config = BatDetect2FilesAnnotations.model_validate(config_data)
+
+ result_set = load_batdetect2_files_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 4
+ loaded_files = {
+ ann.clip.recording.path.name for ann in result_set.clip_annotations
+ }
+ assert loaded_files == {"rec1.wav", "rec2.wav", "rec3.wav", "rec4.wav"}
+
+ def test_load_with_base_dir(self, tmp_path, batdetect2_files_test_setup):
+ """Test loading with a base_dir."""
+ audio_dir_abs, anns_dir_abs, _ = batdetect2_files_test_setup
+ base_dir = tmp_path
+ audio_dir_rel = audio_dir_abs.relative_to(base_dir)
+ anns_dir_rel = anns_dir_abs.relative_to(base_dir)
+
+ config = BatDetect2FilesAnnotations(
+ name="base_dir_test",
+ audio_dir=audio_dir_rel,
+ annotations_dir=anns_dir_rel,
+ )
+
+ result_set = load_batdetect2_files_annotated_dataset(
+ config, base_dir=base_dir
+ )
+ assert len(result_set.clip_annotations) == 1
+ assert result_set.clip_annotations[0].clip.recording.path.is_absolute()
+ assert (
+ result_set.clip_annotations[0].clip.recording.path
+ == audio_dir_abs / "rec1.wav"
+ )
+
+ def test_load_missing_annotations_dir(self, tmp_path):
+ """Test error when annotations_dir does not exist."""
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ anns_dir = tmp_path / "non_existent_anns"
+ config = BatDetect2FilesAnnotations(
+ name="missing_anns",
+ audio_dir=audio_dir,
+ annotations_dir=anns_dir,
+ )
+ result_set = load_batdetect2_files_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 0
+
+ def test_load_missing_audio_dir(self, batdetect2_files_test_setup):
+ """Test error or skipping when audio_dir does not exist or files missing."""
+ _, anns_dir, _ = batdetect2_files_test_setup
+ missing_audio_dir = Path(
+ "/tmp/non_existent_audio_dir_" + str(uuid.uuid4())
+ )
+ config = BatDetect2FilesAnnotations(
+ name="missing_audio",
+ audio_dir=missing_audio_dir,
+ annotations_dir=anns_dir,
+ filter=None,
+ )
+ result_set = load_batdetect2_files_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 0
+
+ def test_load_skips_invalid_json(self, batdetect2_files_test_setup):
+ """Test that invalid JSON files are skipped."""
+ audio_dir, anns_dir, _ = batdetect2_files_test_setup
+ (anns_dir / "invalid.json").write_text(".invalid json")
+ (anns_dir / "wrong_structure.json").write_text("[1, 2, 3]")
+
+ config = BatDetect2FilesAnnotations(
+ name="invalid_json_test",
+ audio_dir=audio_dir,
+ annotations_dir=anns_dir,
+ filter=None,
+ )
+ result_set = load_batdetect2_files_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 4
+
+ def test_load_skips_missing_individual_audio(
+ self, batdetect2_files_test_setup
+ ):
+ """Test skipping a file if its corresponding audio is missing."""
+ audio_dir, anns_dir, _ = batdetect2_files_test_setup
+ config = BatDetect2FilesAnnotations(
+ name="skip_missing_audio",
+ audio_dir=audio_dir,
+ annotations_dir=anns_dir,
+ filter=None,
+ )
+ result_set = load_batdetect2_files_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 4
+ loaded_files = {
+ ann.clip.recording.path.name for ann in result_set.clip_annotations
+ }
+ assert "rec_missing_audio.wav" not in loaded_files
+
+
+class TestLoadBatDetect2Merged:
+ def test_load_default_filter(self, batdetect2_merged_test_setup):
+ """Test loading merged file with default filter."""
+ audio_dir, anns_path, _ = batdetect2_merged_test_setup
+ config = BatDetect2MergedAnnotations(
+ name="merged_default",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ )
+
+ result_set = load_batdetect2_merged_annotated_dataset(config)
+
+ assert isinstance(result_set, data.AnnotationSet)
+ assert result_set.name == "merged_default"
+ assert len(result_set.clip_annotations) == 1
+
+ clip_ann = result_set.clip_annotations[0]
+ assert clip_ann.clip.recording.path.name == "rec1.wav"
+ assert clip_ann.clip.recording.duration == 5.0
+ assert len(clip_ann.sound_events) == 1
+
+ clip_class_tag = data.find_tag(clip_ann.tags, term_label="Class")
+ assert clip_class_tag is not None
+ assert clip_class_tag.value == "Myotis"
+
+ def test_load_only_annotated_false(self, batdetect2_merged_test_setup):
+ """Test merged filter with only_annotated=False."""
+ audio_dir, anns_path, _ = batdetect2_merged_test_setup
+ config = BatDetect2MergedAnnotations(
+ name="merged_ann_false",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ filter=AnnotationFilter(only_annotated=False, exclude_issues=True),
+ )
+ result_set = load_batdetect2_merged_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 2
+ loaded_files = {
+ ann.clip.recording.path.name for ann in result_set.clip_annotations
+ }
+ assert loaded_files == {"rec1.wav", "rec2.wav"}
+
+ def test_load_exclude_issues_false(self, batdetect2_merged_test_setup):
+ """Test merged filter with exclude_issues=False."""
+ audio_dir, anns_path, _ = batdetect2_merged_test_setup
+ config = BatDetect2MergedAnnotations(
+ name="merged_iss_false",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ filter=AnnotationFilter(only_annotated=True, exclude_issues=False),
+ )
+ result_set = load_batdetect2_merged_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 2
+ loaded_files = {
+ ann.clip.recording.path.name for ann in result_set.clip_annotations
+ }
+ assert loaded_files == {"rec1.wav", "rec3.wav"}
+
+ def test_load_no_filter(self, batdetect2_merged_test_setup):
+ """Test loading merged file with filtering disabled."""
+ audio_dir, anns_path, _ = batdetect2_merged_test_setup
+ config_data = {
+ "name": "merged_no_filter",
+ "audio_dir": str(audio_dir),
+ "annotations_path": str(anns_path),
+ "filter": None,
+ }
+ config = BatDetect2MergedAnnotations.model_validate(config_data)
+
+ result_set = load_batdetect2_merged_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 4
+ loaded_files = {
+ ann.clip.recording.path.name for ann in result_set.clip_annotations
+ }
+ assert loaded_files == {"rec1.wav", "rec2.wav", "rec3.wav", "rec4.wav"}
+
+ def test_load_with_base_dir(self, tmp_path, batdetect2_merged_test_setup):
+ """Test loading merged file with a base_dir."""
+ audio_dir_abs, anns_path_abs, _ = batdetect2_merged_test_setup
+ base_dir = tmp_path
+ audio_dir_rel = audio_dir_abs.relative_to(base_dir)
+ anns_path_rel = anns_path_abs.relative_to(base_dir)
+
+ config = BatDetect2MergedAnnotations(
+ name="merged_base_dir",
+ audio_dir=audio_dir_rel,
+ annotations_path=anns_path_rel,
+ )
+
+ result_set = load_batdetect2_merged_annotated_dataset(
+ config, base_dir=base_dir
+ )
+ assert len(result_set.clip_annotations) == 1
+ assert result_set.clip_annotations[0].clip.recording.path.is_absolute()
+ assert (
+ result_set.clip_annotations[0].clip.recording.path
+ == audio_dir_abs / "rec1.wav"
+ )
+
+ def test_load_missing_annotations_path(self, tmp_path):
+ """Test error when annotations_path does not exist."""
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ anns_path = tmp_path / "non_existent_anns.json"
+ config = BatDetect2MergedAnnotations(
+ name="missing_anns_file",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ )
+ with pytest.raises(FileNotFoundError):
+ load_batdetect2_merged_annotated_dataset(config)
+
+ def test_load_missing_audio_dir(self, batdetect2_merged_test_setup):
+ """Test error/skipping when audio_dir does not exist in merged."""
+ _, anns_path, _ = batdetect2_merged_test_setup
+ missing_audio_dir = Path(
+ "/tmp/non_existent_audio_dir_merged_" + str(uuid.uuid4())
+ )
+ config = BatDetect2MergedAnnotations(
+ name="missing_audio_merged",
+ audio_dir=missing_audio_dir,
+ annotations_path=anns_path,
+ filter=None,
+ )
+ result_set = load_batdetect2_merged_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 0
+
+ def test_load_invalid_json_format(self, tmp_path):
+ """Test error for malformed JSON file."""
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ anns_path = tmp_path / "invalid.json"
+ anns_path.write_text(".malformed json")
+ config = BatDetect2MergedAnnotations(
+ name="invalid_json",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ )
+ with pytest.raises(json.JSONDecodeError):
+ load_batdetect2_merged_annotated_dataset(config)
+
+ def test_load_json_not_a_list(self, tmp_path):
+ """Test error if JSON root is not a list."""
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ anns_path = tmp_path / "not_list.json"
+ anns_path.write_text('{"not": "a list"}')
+ config = BatDetect2MergedAnnotations(
+ name="not_list", audio_dir=audio_dir, annotations_path=anns_path
+ )
+ with pytest.raises(TypeError):
+ load_batdetect2_merged_annotated_dataset(config)
+
+ def test_load_invalid_entry_in_list(self, batdetect2_merged_test_setup):
+ """Test skipping entries that don't conform to FileAnnotation."""
+ audio_dir, anns_path, merged_data = batdetect2_merged_test_setup
+ invalid_entry = {"wrong_field": 123}
+ merged_data_with_invalid = merged_data + [invalid_entry]
+ anns_path.write_text(json.dumps(merged_data_with_invalid))
+
+ config = BatDetect2MergedAnnotations(
+ name="invalid_entry",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ filter=None,
+ )
+ result_set = load_batdetect2_merged_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 4
+
+ def test_load_skips_missing_audio_in_merged(
+ self, batdetect2_merged_test_setup
+ ):
+ """Test skipping an entry if its audio file is missing."""
+ audio_dir, anns_path, merged_data = batdetect2_merged_test_setup
+ missing_audio_entry = create_legacy_file_annotation(
+ file_id="non_existent.wav", annotated=True, issues=False
+ )
+ merged_data_with_missing = merged_data + [missing_audio_entry]
+ anns_path.write_text(json.dumps(merged_data_with_missing))
+
+ config = BatDetect2MergedAnnotations(
+ name="skip_missing_audio_merged",
+ audio_dir=audio_dir,
+ annotations_path=anns_path,
+ filter=None,
+ )
+ result_set = load_batdetect2_merged_annotated_dataset(config)
+ assert len(result_set.clip_annotations) == 4
+ loaded_files = {
+ ann.clip.recording.path.name for ann in result_set.clip_annotations
+ }
+ assert "non_existent.wav" not in loaded_files
diff --git a/tests/test_data/test_conditions/__init__.py b/tests/test_data/test_conditions/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_data/test_conditions/test_clip.py b/tests/test_data/test_conditions/test_clip.py
new file mode 100644
index 00000000..dc9a642e
--- /dev/null
+++ b/tests/test_data/test_conditions/test_clip.py
@@ -0,0 +1,303 @@
+import json
+import textwrap
+import uuid
+from pathlib import Path
+
+from pydantic import TypeAdapter
+from soundevent import data
+
+from batdetect2.core import load_config
+from batdetect2.data.conditions import (
+ ClipAnnotationConditionConfig,
+ build_clip_annotation_condition,
+)
+
+
+def load_clip_condition_config(
+ tmp_path: Path,
+ yaml_string: str,
+) -> ClipAnnotationConditionConfig:
+ config_path = tmp_path / f"{uuid.uuid4().hex}.yaml"
+ config_path.write_text(textwrap.dedent(yaml_string).strip())
+ return load_config(
+ config_path, schema=TypeAdapter(ClipAnnotationConditionConfig)
+ )
+
+
+def build_clip_condition_from_yaml(
+ tmp_path: Path,
+ yaml_string: str,
+ base_dir: Path | None = None,
+):
+ config = load_clip_condition_config(tmp_path, yaml_string)
+ return build_clip_annotation_condition(config, base_dir=base_dir)
+
+
+def test_recording_satisfies_condition(
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+) -> None:
+ recording_a = create_recording(path=tmp_path / "a.wav")
+ recording_b = create_recording(path=tmp_path / "b.wav")
+ clip_a = create_clip(recording_a)
+ clip_b = create_clip(recording_b)
+ clip_annotation_a = create_clip_annotation(clip_a)
+ clip_annotation_b = create_clip_annotation(clip_b)
+ ids_path = tmp_path / "recording_ids.json"
+ ids_path.write_text(json.dumps([str(recording_a.uuid)]))
+
+ condition = build_clip_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: recording_satisfies
+ condition:
+ name: id_in_list
+ path: {ids_path}
+ """,
+ )
+
+ assert condition(clip_annotation_a)
+ assert not condition(clip_annotation_b)
+
+
+def test_clip_id_in_list_condition(
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+) -> None:
+ recording_a = create_recording(path=tmp_path / "a.wav")
+ recording_b = create_recording(path=tmp_path / "b.wav")
+ clip_annotation_a = create_clip_annotation(create_clip(recording_a))
+ clip_annotation_b = create_clip_annotation(create_clip(recording_b))
+ ids_path = tmp_path / "clip_annotation_ids.json"
+ ids_path.write_text(json.dumps([str(clip_annotation_a.uuid)]))
+
+ condition = build_clip_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: id_in_list
+ path: {ids_path}
+ """,
+ )
+
+ assert condition(clip_annotation_a)
+ assert not condition(clip_annotation_b)
+
+
+def test_clip_has_tag_conditions(
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+) -> None:
+ reviewed = data.Tag(key="status", value="reviewed")
+ train = data.Tag(key="split", value="train")
+
+ recording = create_recording(path=tmp_path / "rec.wav")
+ clip = create_clip(recording)
+ clip_annotation = create_clip_annotation(
+ clip,
+ clip_tags=[reviewed, train],
+ )
+ clip_annotation_missing = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[train],
+ )
+
+ condition = build_clip_condition_from_yaml(
+ tmp_path,
+ """
+ name: has_tag
+ tag:
+ key: status
+ value: reviewed
+ """,
+ )
+
+ assert condition(clip_annotation)
+ assert not condition(clip_annotation_missing)
+
+
+def test_clip_has_all_tags_condition(
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+) -> None:
+ reviewed = data.Tag(key="status", value="reviewed")
+ train = data.Tag(key="split", value="train")
+
+ recording = create_recording(path=tmp_path / "rec.wav")
+ clip_annotation = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[reviewed, train],
+ )
+ clip_annotation_missing = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[reviewed],
+ )
+
+ condition = build_clip_condition_from_yaml(
+ tmp_path,
+ """
+ name: has_all_tags
+ tags:
+ - key: status
+ value: reviewed
+ - key: split
+ value: train
+ """,
+ )
+
+ assert condition(clip_annotation)
+ assert not condition(clip_annotation_missing)
+
+
+def test_clip_has_any_tag_condition(
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+) -> None:
+ reviewed = data.Tag(key="status", value="reviewed")
+ train = data.Tag(key="split", value="train")
+
+ recording = create_recording(path=tmp_path / "rec.wav")
+ clip_annotation = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[reviewed, train],
+ )
+ clip_annotation_missing = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[data.Tag(key="split", value="test")],
+ )
+
+ condition = build_clip_condition_from_yaml(
+ tmp_path,
+ """
+ name: has_any_tag
+ tags:
+ - key: split
+ value: val
+ - key: split
+ value: train
+ """,
+ )
+
+ assert condition(clip_annotation)
+ assert not condition(clip_annotation_missing)
+
+
+def test_clip_all_of_condition(
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+) -> None:
+ reviewed = data.Tag(key="status", value="reviewed")
+ train = data.Tag(key="split", value="train")
+
+ recording = create_recording(path=tmp_path / "rec.wav")
+ clip = create_clip(recording)
+ clip_annotation = create_clip_annotation(
+ clip,
+ clip_tags=[reviewed, train],
+ )
+ clip_annotation_missing = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[reviewed],
+ )
+
+ condition = build_clip_condition_from_yaml(
+ tmp_path,
+ """
+ name: all_of
+ conditions:
+ - name: has_tag
+ tag:
+ key: status
+ value: reviewed
+ - name: has_any_tag
+ tags:
+ - key: split
+ value: train
+ - key: split
+ value: val
+ """,
+ )
+
+ assert condition(clip_annotation)
+ assert not condition(clip_annotation_missing)
+
+
+def test_clip_any_of_condition(
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+) -> None:
+ reviewed = data.Tag(key="status", value="reviewed")
+
+ recording = create_recording(path=tmp_path / "rec.wav")
+ clip_annotation = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[reviewed],
+ )
+ clip_annotation_missing = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[data.Tag(key="status", value="unchecked")],
+ )
+
+ condition = build_clip_condition_from_yaml(
+ tmp_path,
+ """
+ name: any_of
+ conditions:
+ - name: has_tag
+ tag:
+ key: split
+ value: val
+ - name: has_tag
+ tag:
+ key: status
+ value: reviewed
+ """,
+ )
+
+ assert condition(clip_annotation)
+ assert not condition(clip_annotation_missing)
+
+
+def test_clip_not_condition(
+ tmp_path: Path,
+ create_recording,
+ create_clip,
+ create_clip_annotation,
+) -> None:
+ recording = create_recording(path=tmp_path / "rec.wav")
+ clip_annotation = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[data.Tag(key="split", value="train")],
+ )
+ clip_annotation_missing = create_clip_annotation(
+ create_clip(recording),
+ clip_tags=[data.Tag(key="split", value="val")],
+ )
+
+ condition = build_clip_condition_from_yaml(
+ tmp_path,
+ """
+ name: not
+ condition:
+ name: has_tag
+ tag:
+ key: split
+ value: val
+ """,
+ )
+
+ assert condition(clip_annotation)
+ assert not condition(clip_annotation_missing)
diff --git a/tests/test_data/test_conditions/test_recording.py b/tests/test_data/test_conditions/test_recording.py
new file mode 100644
index 00000000..0ae747aa
--- /dev/null
+++ b/tests/test_data/test_conditions/test_recording.py
@@ -0,0 +1,564 @@
+import json
+import textwrap
+import uuid
+from pathlib import Path
+
+import pytest
+from pydantic import TypeAdapter
+from soundevent import data
+
+from batdetect2.core import load_config
+from batdetect2.data.conditions import (
+ RecordingConditionConfig,
+ build_recording_condition,
+)
+
+
+def load_recording_condition_config(
+ tmp_path: Path,
+ yaml_string: str,
+) -> RecordingConditionConfig:
+ config_path = tmp_path / f"{uuid.uuid4().hex}.yaml"
+ config_path.write_text(textwrap.dedent(yaml_string).strip())
+ return load_config(
+ config_path,
+ schema=TypeAdapter(RecordingConditionConfig),
+ )
+
+
+def build_recording_condition_from_yaml(
+ tmp_path: Path,
+ yaml_string: str,
+ base_dir: Path | None = None,
+):
+ config = load_recording_condition_config(tmp_path, yaml_string)
+ return build_recording_condition(config, base_dir=base_dir)
+
+
+def test_id_in_list_condition(tmp_path: Path, create_recording) -> None:
+ recording_a = create_recording(path=tmp_path / "a.wav")
+ recording_b = create_recording(path=tmp_path / "b.wav")
+ ids_path = tmp_path / "recording_ids.json"
+ ids_path.write_text(json.dumps([str(recording_a.uuid)]))
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: id_in_list
+ path: {ids_path}
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_id_in_list_condition_uses_base_dir(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ recording_a = create_recording(path=tmp_path / "a.wav")
+ recording_b = create_recording(path=tmp_path / "b.wav")
+ split_dir = tmp_path / "splits"
+ split_dir.mkdir()
+ ids_path = split_dir / "train_ids.json"
+ ids_path.write_text(json.dumps([str(recording_a.uuid)]))
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ """
+ name: id_in_list
+ path: splits/train_ids.json
+ """,
+ base_dir=tmp_path,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_id_in_list_condition_raises_for_non_list_json(
+ tmp_path: Path,
+) -> None:
+ ids_path = tmp_path / "recording_ids.json"
+ ids_path.write_text(json.dumps({"id": "foo"}))
+
+ with pytest.raises(TypeError, match="Expected JSON list"):
+ build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: id_in_list
+ path: {ids_path}
+ """,
+ )
+
+
+def test_id_in_list_condition_raises_for_invalid_id(tmp_path: Path) -> None:
+ ids_path = tmp_path / "recording_ids.json"
+ ids_path.write_text(json.dumps(["not-a-uuid"]))
+
+ with pytest.raises(ValueError, match="Invalid ID"):
+ build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: id_in_list
+ path: {ids_path}
+ """,
+ )
+
+
+def test_id_in_list_condition_supports_txt_format(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ recording_a = create_recording(path=tmp_path / "a.wav")
+ recording_b = create_recording(path=tmp_path / "b.wav")
+ ids_path = tmp_path / "recording_ids.txt"
+ ids_path.write_text(f"{recording_a.uuid}\n")
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: id_in_list
+ path: {ids_path}
+ format: txt
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_id_in_list_condition_supports_json_field(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ recording_a = create_recording(path=tmp_path / "a.wav")
+ recording_b = create_recording(path=tmp_path / "b.wav")
+ ids_path = tmp_path / "recording_ids.json"
+ ids_path.write_text(
+ json.dumps(
+ {
+ "train": [str(recording_a.uuid)],
+ "val": [str(recording_b.uuid)],
+ }
+ )
+ )
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: id_in_list
+ path: {ids_path}
+ format:
+ name: json
+ field: train
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_id_in_list_condition_supports_csv_column(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ recording_a = create_recording(path=tmp_path / "a.wav")
+ recording_b = create_recording(path=tmp_path / "b.wav")
+ ids_path = tmp_path / "recording_ids.csv"
+ ids_path.write_text(f"recording_uuid\n{recording_a.uuid}\n")
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: id_in_list
+ path: {ids_path}
+ format:
+ name: csv
+ column: recording_uuid
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_path_in_list_condition_supports_txt_format(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ recording_a = create_recording(path=audio_dir / "a.wav")
+ recording_b = create_recording(path=audio_dir / "b.wav")
+ paths_file = tmp_path / "recording_paths.txt"
+ paths_file.write_text(f"{recording_a.path}\n")
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: path_in_list
+ path: {paths_file}
+ format: txt
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_path_in_list_condition_supports_json_field(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ recording_a = create_recording(path=audio_dir / "a.wav")
+ recording_b = create_recording(path=audio_dir / "b.wav")
+ paths_file = tmp_path / "recording_paths.json"
+ paths_file.write_text(
+ json.dumps(
+ {
+ "train": [str(recording_a.path)],
+ "val": [str(recording_b.path)],
+ }
+ )
+ )
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: path_in_list
+ path: {paths_file}
+ format:
+ name: json
+ field: train
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_path_in_list_condition_supports_csv_column(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ audio_dir = tmp_path / "audio"
+ audio_dir.mkdir()
+ recording_a = create_recording(path=audio_dir / "a.wav")
+ recording_b = create_recording(path=audio_dir / "b.wav")
+ paths_file = tmp_path / "recording_paths.csv"
+ paths_file.write_text(f"recording_path\n{recording_a.path}\n")
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: path_in_list
+ path: {paths_file}
+ format:
+ name: csv
+ column: recording_path
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_path_in_list_condition_uses_base_dir(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ data_dir = tmp_path / "dataset"
+ audio_dir = data_dir / "audio"
+ audio_dir.mkdir(parents=True)
+ recording_a = create_recording(path=audio_dir / "a.wav")
+ recording_b = create_recording(path=audio_dir / "b.wav")
+ paths_file = tmp_path / "recording_paths.txt"
+ paths_file.write_text(f"{recording_a.path}\n")
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: path_in_list
+ path: {paths_file}
+ format: txt
+ base_dir: {data_dir}
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_path_in_list_condition_outside_allow(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ data_dir = tmp_path / "dataset"
+ inside_dir = data_dir / "audio"
+ inside_dir.mkdir(parents=True)
+ outside_dir = tmp_path / "other"
+ outside_dir.mkdir()
+ recording_inside = create_recording(path=inside_dir / "a.wav")
+ recording_outside = create_recording(path=outside_dir / "x.wav")
+ paths_file = tmp_path / "recording_paths.txt"
+ paths_file.write_text("dataset/audio/unknown.wav\n")
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: path_in_list
+ path: {paths_file}
+ format: txt
+ base_dir: {data_dir}
+ on_outside: allow
+ """,
+ )
+
+ assert condition(recording_outside)
+ assert not condition(recording_inside)
+
+
+def test_path_in_list_condition_outside_warn(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ data_dir = tmp_path / "dataset"
+ inside_dir = data_dir / "audio"
+ inside_dir.mkdir(parents=True)
+ outside_dir = tmp_path / "other"
+ outside_dir.mkdir()
+ recording_inside = create_recording(path=inside_dir / "a.wav")
+ recording_outside = create_recording(path=outside_dir / "x.wav")
+ paths_file = tmp_path / "recording_paths.txt"
+ paths_file.write_text("dataset/audio/unknown.wav\n")
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: path_in_list
+ path: {paths_file}
+ format: txt
+ base_dir: {data_dir}
+ on_outside: warn
+ """,
+ )
+
+ assert condition(recording_outside)
+ assert not condition(recording_inside)
+
+
+def test_path_in_list_condition_outside_error(
+ tmp_path: Path,
+ create_recording,
+) -> None:
+ data_dir = tmp_path / "dataset"
+ inside_dir = data_dir / "audio"
+ inside_dir.mkdir(parents=True)
+ outside_dir = tmp_path / "other"
+ outside_dir.mkdir()
+ recording_inside = create_recording(path=inside_dir / "a.wav")
+ recording_outside = create_recording(path=outside_dir / "x.wav")
+ paths_file = tmp_path / "recording_paths.txt"
+ paths_file.write_text(f"{recording_inside.path}\n")
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ f"""
+ name: path_in_list
+ path: {paths_file}
+ format: txt
+ base_dir: {data_dir}
+ on_outside: error
+ """,
+ )
+
+ assert condition(recording_inside)
+ with pytest.raises(ValueError, match="outside"):
+ condition(recording_outside)
+
+
+def test_has_tag_condition(tmp_path: Path, create_recording) -> None:
+ train = data.Tag(key="split", value="train")
+ val = data.Tag(key="split", value="val")
+
+ recording_a = create_recording(
+ path=tmp_path / "a.wav",
+ tags=[train],
+ )
+ recording_b = create_recording(
+ path=tmp_path / "b.wav",
+ tags=[val],
+ )
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ """
+ name: has_tag
+ tag:
+ key: split
+ value: train
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_has_all_tags_condition(tmp_path: Path, create_recording) -> None:
+ train = data.Tag(key="split", value="train")
+ uk = data.Tag(key="region", value="uk")
+
+ recording_a = create_recording(
+ path=tmp_path / "a.wav",
+ tags=[train, uk],
+ )
+ recording_b = create_recording(
+ path=tmp_path / "b.wav",
+ tags=[train],
+ )
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ """
+ name: has_all_tags
+ tags:
+ - key: split
+ value: train
+ - key: region
+ value: uk
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_has_any_tag_condition(tmp_path: Path, create_recording) -> None:
+ uk = data.Tag(key="region", value="uk")
+ us = data.Tag(key="region", value="us")
+
+ recording_a = create_recording(
+ path=tmp_path / "a.wav",
+ tags=[uk],
+ )
+ recording_b = create_recording(
+ path=tmp_path / "b.wav",
+ tags=[us],
+ )
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ """
+ name: has_any_tag
+ tags:
+ - key: region
+ value: eu
+ - key: region
+ value: uk
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_all_of_condition(tmp_path: Path, create_recording) -> None:
+ train = data.Tag(key="split", value="train")
+ uk = data.Tag(key="region", value="uk")
+ us = data.Tag(key="region", value="us")
+
+ recording_a = create_recording(
+ path=tmp_path / "a.wav",
+ tags=[train, uk],
+ )
+ recording_b = create_recording(
+ path=tmp_path / "b.wav",
+ tags=[train, us],
+ )
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ """
+ name: all_of
+ conditions:
+ - name: has_tag
+ tag:
+ key: split
+ value: train
+ - name: has_any_tag
+ tags:
+ - key: region
+ value: eu
+ - key: region
+ value: uk
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_any_of_condition(tmp_path: Path, create_recording) -> None:
+ train = data.Tag(key="split", value="train")
+ us = data.Tag(key="region", value="us")
+
+ recording_a = create_recording(
+ path=tmp_path / "a.wav",
+ tags=[train],
+ )
+ recording_b = create_recording(
+ path=tmp_path / "b.wav",
+ tags=[us],
+ )
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ """
+ name: any_of
+ conditions:
+ - name: has_tag
+ tag:
+ key: region
+ value: eu
+ - name: has_tag
+ tag:
+ key: split
+ value: train
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
+
+
+def test_not_condition(tmp_path: Path, create_recording) -> None:
+ uk = data.Tag(key="region", value="uk")
+ us = data.Tag(key="region", value="us")
+
+ recording_a = create_recording(
+ path=tmp_path / "a.wav",
+ tags=[uk],
+ )
+ recording_b = create_recording(
+ path=tmp_path / "b.wav",
+ tags=[us],
+ )
+
+ condition = build_recording_condition_from_yaml(
+ tmp_path,
+ """
+ name: not
+ condition:
+ name: has_tag
+ tag:
+ key: region
+ value: us
+ """,
+ )
+
+ assert condition(recording_a)
+ assert not condition(recording_b)
diff --git a/tests/test_data/test_conditions/test_sound_events.py b/tests/test_data/test_conditions/test_sound_events.py
new file mode 100644
index 00000000..9e7893f5
--- /dev/null
+++ b/tests/test_data/test_conditions/test_sound_events.py
@@ -0,0 +1,400 @@
+import json
+import textwrap
+import uuid
+from pathlib import Path
+
+import pytest
+from pydantic import TypeAdapter
+from soundevent import data
+
+from batdetect2.core import load_config
+from batdetect2.data.conditions import (
+ SoundEventConditionConfig,
+ build_sound_event_condition,
+)
+
+
+def load_sound_event_condition_config(
+ tmp_path: Path,
+ yaml_string: str,
+) -> SoundEventConditionConfig:
+ config_path = tmp_path / f"{uuid.uuid4().hex}.yaml"
+ config_path.write_text(textwrap.dedent(yaml_string).strip())
+ return load_config(
+ config_path,
+ schema=TypeAdapter(SoundEventConditionConfig),
+ )
+
+
+def build_condition_from_str(
+ tmp_path: Path,
+ yaml_string: str,
+ base_dir: Path | None = None,
+):
+ config = load_sound_event_condition_config(tmp_path, yaml_string)
+ return build_sound_event_condition(config, base_dir=base_dir)
+
+
+def create_sound_event_annotation(
+ recording: data.Recording,
+ geometry: data.Geometry,
+ tags: list[data.Tag] | None = None,
+) -> data.SoundEventAnnotation:
+ return data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(
+ recording=recording,
+ geometry=geometry,
+ ),
+ tags=tags or [],
+ )
+
+
+def test_has_tag_condition(
+ sound_event: data.SoundEvent, tmp_path: Path
+) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ """
+ name: has_tag
+ tag:
+ key: species
+ value: Myotis myotis
+ """,
+ )
+
+ passing = data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[data.Tag(key="species", value="Myotis myotis")],
+ )
+ failing = data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[data.Tag(key="species", value="Eptesicus fuscus")],
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+def test_has_all_tags_condition(
+ sound_event: data.SoundEvent,
+ tmp_path: Path,
+) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ """
+ name: has_all_tags
+ tags:
+ - key: species
+ value: Myotis myotis
+ - key: event
+ value: Echolocation
+ """,
+ )
+
+ passing = data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[
+ data.Tag(key="species", value="Myotis myotis"),
+ data.Tag(key="event", value="Echolocation"),
+ ],
+ )
+ failing = data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[data.Tag(key="species", value="Myotis myotis")],
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+def test_has_any_tag_condition(
+ sound_event: data.SoundEvent,
+ tmp_path: Path,
+) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ """
+ name: has_any_tag
+ tags:
+ - key: species
+ value: Myotis myotis
+ - key: event
+ value: Echolocation
+ """,
+ )
+
+ passing = data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[data.Tag(key="event", value="Echolocation")],
+ )
+ failing = data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[
+ data.Tag(key="species", value="Eptesicus fuscus"),
+ data.Tag(key="event", value="Social"),
+ ],
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+def test_not_condition(sound_event: data.SoundEvent, tmp_path: Path) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ """
+ name: not
+ condition:
+ name: has_tag
+ tag:
+ key: species
+ value: Myotis myotis
+ """,
+ )
+
+ passing = data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[data.Tag(key="species", value="Eptesicus fuscus")],
+ )
+ failing = data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[data.Tag(key="species", value="Myotis myotis")],
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+def test_id_in_list_condition(
+ sound_event: data.SoundEvent, tmp_path: Path
+) -> None:
+ passing = data.SoundEventAnnotation(sound_event=sound_event)
+ failing = data.SoundEventAnnotation(sound_event=sound_event)
+ ids_path = tmp_path / "sound_event_ids.json"
+ ids_path.write_text(json.dumps([str(passing.uuid)]))
+
+ condition = build_condition_from_str(
+ tmp_path,
+ f"""
+ name: id_in_list
+ path: {ids_path}
+ """,
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+def test_id_in_list_condition_uses_base_dir(
+ sound_event: data.SoundEvent,
+ tmp_path: Path,
+) -> None:
+ passing = data.SoundEventAnnotation(sound_event=sound_event)
+ failing = data.SoundEventAnnotation(sound_event=sound_event)
+ split_dir = tmp_path / "splits"
+ split_dir.mkdir()
+ ids_path = split_dir / "sound_event_ids.json"
+ ids_path.write_text(json.dumps([str(passing.uuid)]))
+
+ condition = build_condition_from_str(
+ tmp_path,
+ """
+ name: id_in_list
+ path: splits/sound_event_ids.json
+ """,
+ base_dir=tmp_path,
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+@pytest.mark.parametrize(
+ "operator,seconds,passing_duration,failing_duration",
+ [
+ ("lt", 2, 1, 2),
+ ("lte", 2, 2, 3),
+ ("gt", 2, 3, 2),
+ ("gte", 2, 2, 1),
+ ("eq", 2, 2, 3),
+ ],
+)
+def test_duration_condition(
+ tmp_path: Path,
+ recording: data.Recording,
+ operator: str,
+ seconds: int,
+ passing_duration: int,
+ failing_duration: int,
+) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ f"""
+ name: duration
+ operator: {operator}
+ seconds: {seconds}
+ """,
+ )
+
+ passing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.TimeInterval(coordinates=[0, passing_duration]),
+ )
+ failing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.TimeInterval(coordinates=[0, failing_duration]),
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+@pytest.mark.parametrize(
+ "boundary,operator,hertz,passing_bbox,failing_bbox",
+ [
+ ("high", "lt", 300, [0, 100, 1, 200], [0, 100, 1, 300]),
+ ("high", "lte", 300, [0, 100, 1, 300], [0, 100, 1, 400]),
+ ("high", "gt", 300, [0, 100, 1, 400], [0, 100, 1, 300]),
+ ("high", "gte", 300, [0, 100, 1, 300], [0, 100, 1, 200]),
+ ("high", "eq", 300, [0, 100, 1, 300], [0, 100, 1, 400]),
+ ("low", "lt", 200, [0, 100, 1, 400], [0, 200, 1, 400]),
+ ("low", "lte", 200, [0, 200, 1, 400], [0, 300, 1, 400]),
+ ("low", "gt", 200, [0, 300, 1, 400], [0, 200, 1, 400]),
+ ("low", "gte", 200, [0, 200, 1, 400], [0, 100, 1, 400]),
+ ("low", "eq", 200, [0, 200, 1, 400], [0, 300, 1, 400]),
+ ],
+)
+def test_frequency_condition(
+ tmp_path: Path,
+ recording: data.Recording,
+ boundary: str,
+ operator: str,
+ hertz: int,
+ passing_bbox: list[int],
+ failing_bbox: list[int],
+) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ f"""
+ name: frequency
+ boundary: {boundary}
+ operator: {operator}
+ hertz: {hertz}
+ """,
+ )
+
+ passing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.BoundingBox(
+ coordinates=[float(value) for value in passing_bbox]
+ ),
+ )
+ failing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.BoundingBox(
+ coordinates=[float(value) for value in failing_bbox]
+ ),
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+def test_frequency_condition_is_false_for_temporal_geometries(
+ tmp_path: Path,
+ recording: data.Recording,
+) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ """
+ name: frequency
+ boundary: low
+ operator: eq
+ hertz: 200
+ """,
+ )
+
+ passing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.BoundingBox(coordinates=[0, 200, 1, 400]),
+ )
+ failing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.TimeInterval(coordinates=[0, 3]),
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+def test_has_all_tags_fails_if_empty(tmp_path: Path) -> None:
+ with pytest.raises(ValueError, match="at least one tag"):
+ build_condition_from_str(
+ tmp_path,
+ """
+ name: has_all_tags
+ tags: []
+ """,
+ )
+
+
+def test_all_of_condition(tmp_path: Path, recording: data.Recording) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ """
+ name: all_of
+ conditions:
+ - name: has_tag
+ tag:
+ key: species
+ value: Myotis myotis
+ - name: duration
+ operator: lt
+ seconds: 1
+ """,
+ )
+
+ passing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.TimeInterval(coordinates=[0, 0.5]),
+ tags=[data.Tag(key="species", value="Myotis myotis")],
+ )
+ failing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.TimeInterval(coordinates=[0, 2]),
+ tags=[data.Tag(key="species", value="Myotis myotis")],
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
+
+
+def test_any_of_condition(tmp_path: Path, recording: data.Recording) -> None:
+ condition = build_condition_from_str(
+ tmp_path,
+ """
+ name: any_of
+ conditions:
+ - name: has_tag
+ tag:
+ key: species
+ value: Myotis myotis
+ - name: duration
+ operator: lt
+ seconds: 1
+ """,
+ )
+
+ passing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.TimeInterval(coordinates=[0, 2]),
+ tags=[data.Tag(key="species", value="Myotis myotis")],
+ )
+ failing = create_sound_event_annotation(
+ recording=recording,
+ geometry=data.TimeInterval(coordinates=[0, 2]),
+ tags=[data.Tag(key="species", value="Eptesicus fuscus")],
+ )
+
+ assert condition(passing)
+ assert not condition(failing)
diff --git a/tests/test_data/test_datasets.py b/tests/test_data/test_datasets.py
new file mode 100644
index 00000000..ca4e5c05
--- /dev/null
+++ b/tests/test_data/test_datasets.py
@@ -0,0 +1,100 @@
+import json
+from pathlib import Path
+
+from soundevent import data
+
+from batdetect2.data import DatasetConfig, load_dataset
+from batdetect2.data.conditions import (
+ HasTagConfig,
+ IdInListConfig,
+ RecordingSatisfiesConfig,
+)
+
+
+def test_load_dataset_applies_clip_filter(
+ example_dataset: DatasetConfig,
+ tmp_path: Path,
+) -> None:
+ baseline = list(load_dataset(example_dataset))
+ keep_recording_id = str(baseline[0].clip.recording.uuid)
+ ids_path = tmp_path / "train_ids.json"
+ ids_path.write_text(json.dumps([keep_recording_id]))
+
+ config = example_dataset.model_copy(
+ update={
+ "clip_filter": RecordingSatisfiesConfig(
+ condition=IdInListConfig(path=ids_path)
+ )
+ }
+ )
+
+ filtered = list(load_dataset(config))
+
+ assert len(filtered) == 1
+ assert str(filtered[0].clip.recording.uuid) == keep_recording_id
+
+
+def test_load_dataset_clip_filter_is_skipped_when_filters_disabled(
+ example_dataset: DatasetConfig,
+ tmp_path: Path,
+) -> None:
+ baseline = list(load_dataset(example_dataset))
+ keep_recording_id = str(baseline[0].clip.recording.uuid)
+ ids_path = tmp_path / "train_ids.json"
+ ids_path.write_text(json.dumps([keep_recording_id]))
+
+ config = example_dataset.model_copy(
+ update={
+ "clip_filter": RecordingSatisfiesConfig(
+ condition=IdInListConfig(path=ids_path)
+ )
+ }
+ )
+
+ filtered = list(load_dataset(config, apply_filters=False))
+
+ assert len(filtered) == len(baseline)
+
+
+def test_load_dataset_resolves_clip_filter_paths_from_base_dir(
+ example_dataset: DatasetConfig,
+ tmp_path: Path,
+) -> None:
+ baseline = list(load_dataset(example_dataset))
+ keep_recording_id = str(baseline[0].clip.recording.uuid)
+ split_dir = tmp_path / "splits"
+ split_dir.mkdir()
+ ids_path = split_dir / "train_ids.json"
+ ids_path.write_text(json.dumps([keep_recording_id]))
+
+ config = example_dataset.model_copy(
+ update={
+ "clip_filter": RecordingSatisfiesConfig(
+ condition=IdInListConfig(path=Path("splits/train_ids.json"))
+ )
+ }
+ )
+
+ filtered = list(load_dataset(config, base_dir=tmp_path))
+
+ assert len(filtered) == 1
+ assert str(filtered[0].clip.recording.uuid) == keep_recording_id
+
+
+def test_sound_event_filter_keeps_empty_clips(
+ example_dataset: DatasetConfig,
+) -> None:
+ config = example_dataset.model_copy(
+ update={
+ "sound_event_filter": HasTagConfig(
+ tag=data.Tag(key="species", value="__missing_species__")
+ )
+ }
+ )
+
+ filtered = list(load_dataset(config))
+
+ assert len(filtered) == 3
+ assert all(
+ len(clip_annotation.sound_events) == 0 for clip_annotation in filtered
+ )
diff --git a/tests/test_data/test_predictions/__init__.py b/tests/test_data/test_predictions/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_data/test_predictions/test_parquet.py b/tests/test_data/test_predictions/test_parquet.py
new file mode 100644
index 00000000..408ada99
--- /dev/null
+++ b/tests/test_data/test_predictions/test_parquet.py
@@ -0,0 +1,168 @@
+from pathlib import Path
+from uuid import uuid4
+
+import numpy as np
+import pytest
+from soundevent import data
+
+from batdetect2.outputs.formats import (
+ ParquetOutputConfig,
+ build_output_formatter,
+)
+from batdetect2.postprocess.types import ClipDetections, Detection
+from batdetect2.targets.types import TargetProtocol
+
+
+@pytest.fixture
+def sample_formatter(sample_targets: TargetProtocol):
+ return build_output_formatter(
+ config=ParquetOutputConfig(),
+ targets=sample_targets,
+ )
+
+
+def test_roundtrip(
+ sample_formatter,
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+ tmp_path: Path,
+):
+ detections = [
+ Detection(
+ geometry=data.BoundingBox(
+ coordinates=list(np.random.uniform(size=[4]))
+ ),
+ detection_score=0.5,
+ class_scores=np.random.uniform(
+ size=len(sample_targets.class_names)
+ ),
+ features=np.random.uniform(size=32),
+ )
+ for _ in range(10)
+ ]
+
+ prediction = ClipDetections(clip=clip, detections=detections)
+
+ path = tmp_path / "predictions.parquet"
+
+ sample_formatter.save(predictions=[prediction], path=path)
+
+ assert path.exists()
+
+ recovered = sample_formatter.load(path=path)
+
+ assert len(recovered) == 1
+ assert recovered[0].clip == prediction.clip
+
+ for recovered_prediction, detection in zip(
+ recovered[0].detections, detections, strict=True
+ ):
+ assert (
+ recovered_prediction.detection_score == detection.detection_score
+ )
+ # Note: floating point comparison might need tolerance, but parquet should preserve float64
+ assert np.allclose(
+ recovered_prediction.class_scores, detection.class_scores
+ )
+ assert np.allclose(recovered_prediction.features, detection.features)
+ assert recovered_prediction.geometry == detection.geometry
+
+
+def test_multiple_clips(
+ sample_formatter,
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+ tmp_path: Path,
+):
+ # Create a second clip
+ clip2 = clip.model_copy(update={"uuid": uuid4()})
+
+ detections1 = [
+ Detection(
+ geometry=data.BoundingBox(
+ coordinates=list(np.random.uniform(size=[4]))
+ ),
+ detection_score=0.8,
+ class_scores=np.random.uniform(
+ size=len(sample_targets.class_names)
+ ),
+ features=np.random.uniform(size=32),
+ )
+ ]
+
+ detections2 = [
+ Detection(
+ geometry=data.BoundingBox(
+ coordinates=list(np.random.uniform(size=[4]))
+ ),
+ detection_score=0.9,
+ class_scores=np.random.uniform(
+ size=len(sample_targets.class_names)
+ ),
+ features=np.random.uniform(size=32),
+ )
+ ]
+
+ predictions = [
+ ClipDetections(clip=clip, detections=detections1),
+ ClipDetections(clip=clip2, detections=detections2),
+ ]
+
+ path = tmp_path / "multi_predictions.parquet"
+ sample_formatter.save(predictions=predictions, path=path)
+
+ recovered = sample_formatter.load(path=path)
+
+ assert len(recovered) == 2
+ # Order might not be preserved if we don't sort, but implementation appends so it should be
+ # However, let's sort by clip uuid to be safe if needed, or just check existence
+
+ recovered_uuids = {p.clip.uuid for p in recovered}
+ expected_uuids = {clip.uuid, clip2.uuid}
+ assert recovered_uuids == expected_uuids
+
+
+def test_complex_geometry(
+ sample_formatter,
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+ tmp_path: Path,
+):
+ # Create a polygon geometry
+ polygon = data.Polygon(
+ coordinates=[
+ [
+ [0.0, 10000.0],
+ [0.1, 20000.0],
+ [0.2, 10000.0],
+ [0.0, 10000.0],
+ ]
+ ]
+ )
+
+ detections = [
+ Detection(
+ geometry=polygon,
+ detection_score=0.95,
+ class_scores=np.random.uniform(
+ size=len(sample_targets.class_names)
+ ),
+ features=np.random.uniform(size=32),
+ )
+ ]
+
+ prediction = ClipDetections(clip=clip, detections=detections)
+
+ path = tmp_path / "complex_geometry.parquet"
+ sample_formatter.save(predictions=[prediction], path=path)
+
+ recovered = sample_formatter.load(path=path)
+
+ assert len(recovered) == 1
+ assert len(recovered[0].detections) == 1
+
+ recovered_pred = recovered[0].detections[0]
+
+ # Check if geometry is recovered correctly as a Polygon
+ assert isinstance(recovered_pred.geometry, data.Polygon)
+ assert recovered_pred.geometry == polygon
diff --git a/tests/test_data/test_predictions/test_raw.py b/tests/test_data/test_predictions/test_raw.py
new file mode 100644
index 00000000..f4fa1abe
--- /dev/null
+++ b/tests/test_data/test_predictions/test_raw.py
@@ -0,0 +1,63 @@
+from pathlib import Path
+
+import numpy as np
+import pytest
+from soundevent import data
+
+from batdetect2.outputs.formats import RawOutputConfig, build_output_formatter
+from batdetect2.postprocess.types import ClipDetections, Detection
+from batdetect2.targets.types import TargetProtocol
+
+
+@pytest.fixture
+def sample_formatter(sample_targets: TargetProtocol):
+ return build_output_formatter(
+ config=RawOutputConfig(),
+ targets=sample_targets,
+ )
+
+
+def test_roundtrip(
+ sample_formatter,
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+ tmp_path: Path,
+):
+ detections = [
+ Detection(
+ geometry=data.BoundingBox(
+ coordinates=list(np.random.uniform(size=[4]))
+ ),
+ detection_score=0.5,
+ class_scores=np.random.uniform(
+ size=len(sample_targets.class_names)
+ ),
+ features=np.random.uniform(size=32),
+ )
+ for _ in range(10)
+ ]
+
+ prediction = ClipDetections(clip=clip, detections=detections)
+
+ path = tmp_path / "predictions"
+
+ sample_formatter.save(predictions=[prediction], path=path)
+
+ recovered = sample_formatter.load(path=path)
+
+ assert len(recovered) == 1
+ assert recovered[0].clip == prediction.clip
+
+ for recovered_prediction, detection in zip(
+ recovered[0].detections,
+ detections,
+ strict=True,
+ ):
+ assert (
+ recovered_prediction.detection_score == detection.detection_score
+ )
+ assert (
+ recovered_prediction.class_scores == detection.class_scores
+ ).all()
+ assert (recovered_prediction.features == detection.features).all()
+ assert recovered_prediction.geometry == detection.geometry
diff --git a/tests/test_detections.py b/tests/test_detections.py
index 7f170122..8263daac 100644
--- a/tests/test_detections.py
+++ b/tests/test_detections.py
@@ -2,11 +2,14 @@
import os
+import pytest
+
from batdetect2 import api
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
+@pytest.mark.slow
def test_no_detections_above_nyquist():
"""Test that no detections are made above the nyquist frequency."""
# Recording donated by @@kdarras
diff --git a/tests/test_evaluate/__init__.py b/tests/test_evaluate/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_evaluate/test_results.py b/tests/test_evaluate/test_results.py
new file mode 100644
index 00000000..57026478
--- /dev/null
+++ b/tests/test_evaluate/test_results.py
@@ -0,0 +1,21 @@
+import json
+
+from matplotlib.figure import Figure
+
+from batdetect2.evaluate.results import save_evaluation_results
+
+
+def test_save_evaluation_results_writes_metrics_and_plots(tmp_path) -> None:
+ metrics = {"mAP": 0.5}
+ figure = Figure()
+
+ save_evaluation_results(
+ metrics=metrics,
+ plots=[("plots/example.png", figure)],
+ output_dir=tmp_path,
+ )
+
+ metrics_path = tmp_path / "metrics.json"
+ assert metrics_path.exists()
+ assert json.loads(metrics_path.read_text()) == metrics
+ assert (tmp_path / "plots" / "example.png").exists()
diff --git a/tests/test_evaluate/test_tasks/__init__.py b/tests/test_evaluate/test_tasks/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_evaluate/test_tasks/conftest.py b/tests/test_evaluate/test_tasks/conftest.py
new file mode 100644
index 00000000..25bdf317
--- /dev/null
+++ b/tests/test_evaluate/test_tasks/conftest.py
@@ -0,0 +1,86 @@
+from typing import Literal
+
+import numpy as np
+import pytest
+from soundevent import data
+
+from batdetect2.postprocess.types import Detection
+
+
+@pytest.fixture
+def clip(recording: data.Recording) -> data.Clip:
+ return data.Clip(recording=recording, start_time=0, end_time=100)
+
+
+@pytest.fixture
+def create_detection():
+ def factory(
+ detection_score: float = 0.5,
+ start_time: float = 0.1,
+ duration: float = 0.01,
+ low_freq: float = 40_000,
+ bandwidth: float = 30_000,
+ pip_score: float = 0,
+ myo_score: float = 0,
+ ):
+ return Detection(
+ detection_score=detection_score,
+ class_scores=np.array(
+ [
+ pip_score,
+ myo_score,
+ ]
+ ),
+ features=np.zeros([32]),
+ geometry=data.BoundingBox(
+ coordinates=[
+ start_time,
+ low_freq,
+ start_time + duration,
+ low_freq + bandwidth,
+ ]
+ ),
+ )
+
+ return factory
+
+
+@pytest.fixture
+def create_annotation(
+ clip: data.Clip,
+ bat_tag: data.Tag,
+ myomyo_tag: data.Tag,
+ pippip_tag: data.Tag,
+):
+ def factory(
+ start_time: float = 0.1,
+ duration: float = 0.01,
+ low_freq: float = 40_000,
+ bandwidth: float = 30_000,
+ is_target: bool = True,
+ class_name: Literal["pippip", "myomyo"] | None = None,
+ ):
+ tags = [bat_tag] if is_target else []
+
+ if class_name is not None:
+ if class_name == "pippip":
+ tags.append(pippip_tag)
+ elif class_name == "myomyo":
+ tags.append(myomyo_tag)
+
+ return data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(
+ geometry=data.BoundingBox(
+ coordinates=[
+ start_time,
+ low_freq,
+ start_time + duration,
+ low_freq + bandwidth,
+ ]
+ ),
+ recording=clip.recording,
+ ),
+ tags=tags,
+ )
+
+ return factory
diff --git a/tests/test_evaluate/test_tasks/test_classification.py b/tests/test_evaluate/test_tasks/test_classification.py
new file mode 100644
index 00000000..6123a08a
--- /dev/null
+++ b/tests/test_evaluate/test_tasks/test_classification.py
@@ -0,0 +1,77 @@
+import numpy as np
+import pytest
+from soundevent import data
+
+from batdetect2.evaluate.tasks import build_task
+from batdetect2.evaluate.tasks.classification import ClassificationTaskConfig
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets.types import TargetProtocol
+
+
+def test_classification(
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+ create_detection,
+ create_annotation,
+):
+ config = ClassificationTaskConfig.model_validate(
+ {
+ "name": "sound_event_classification",
+ "metrics": [{"name": "average_precision"}],
+ }
+ )
+
+ evaluator = build_task(config, targets=sample_targets)
+
+ # Create a dummy prediction
+ prediction = ClipDetections(
+ clip=clip,
+ detections=[
+ create_detection(
+ start_time=1 + 0.1 * index,
+ pip_score=score,
+ )
+ for index, score in enumerate(np.linspace(0, 1, 100))
+ ]
+ + [
+ create_detection(
+ start_time=1.05 + 0.1 * index,
+ myo_score=score,
+ )
+ for index, score in enumerate(np.linspace(1, 0, 100))
+ ],
+ )
+
+ # Create a dummy annotation
+ gt = data.ClipAnnotation(
+ clip=clip,
+ sound_events=[
+ create_annotation(
+ start_time=1 + 0.1 * index,
+ is_target=index % 2 == 0,
+ class_name="pippip",
+ )
+ for index in range(100)
+ ]
+ + [
+ create_annotation(
+ start_time=1.05 + 0.1 * index,
+ is_target=index % 3 == 0,
+ class_name="myomyo",
+ )
+ for index in range(100)
+ ],
+ )
+
+ evals = evaluator.evaluate([gt], [prediction])
+ metrics = evaluator.compute_metrics(evals)
+
+ assert metrics["classification/average_precision/pippip"] == pytest.approx(
+ 0.5, abs=0.005
+ )
+ assert metrics["classification/average_precision/myomyo"] == pytest.approx(
+ 0.371, abs=0.005
+ )
+ assert metrics["classification/mean_average_precision"] == pytest.approx(
+ (0.5 + 0.371) / 2, abs=0.005
+ )
diff --git a/tests/test_evaluate/test_tasks/test_detection.py b/tests/test_evaluate/test_tasks/test_detection.py
new file mode 100644
index 00000000..2d448519
--- /dev/null
+++ b/tests/test_evaluate/test_tasks/test_detection.py
@@ -0,0 +1,50 @@
+import numpy as np
+import pytest
+from soundevent import data
+
+from batdetect2.evaluate.tasks import build_task
+from batdetect2.evaluate.tasks.detection import DetectionTaskConfig
+from batdetect2.postprocess.types import ClipDetections
+from batdetect2.targets.types import TargetProtocol
+
+
+def test_detection(
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+ create_detection,
+ create_annotation,
+):
+ config = DetectionTaskConfig.model_validate(
+ {
+ "name": "sound_event_detection",
+ "metrics": [{"name": "average_precision"}],
+ }
+ )
+ evaluator = build_task(config, targets=sample_targets)
+
+ # Create a dummy prediction
+ prediction = ClipDetections(
+ clip=clip,
+ detections=[
+ create_detection(start_time=1 + 0.1 * index, detection_score=score)
+ for index, score in enumerate(np.linspace(0, 1, 100))
+ ],
+ )
+
+ # Create a dummy annotation
+ gt = data.ClipAnnotation(
+ clip=clip,
+ sound_events=[
+ # Only half of the annotations are targets
+ create_annotation(
+ start_time=1 + 0.1 * index,
+ is_target=index % 2 == 0,
+ )
+ for index in range(100)
+ ],
+ )
+
+ # Run the task
+ evals = evaluator.evaluate([gt], [prediction])
+ metrics = evaluator.compute_metrics(evals)
+ assert metrics["detection/average_precision"] == pytest.approx(0.5)
diff --git a/tests/test_features.py b/tests/test_features.py
index 1271fda7..6aa2016b 100644
--- a/tests/test_features.py
+++ b/tests/test_features.py
@@ -133,17 +133,16 @@ def test_compute_max_power_bb(max_power: int):
audio = np.zeros((int(duration * samplerate),))
# Add a signal during the time and frequency range of interest
- audio[
- int(start_time * samplerate) : int(end_time * samplerate)
- ] = 0.5 * librosa.tone(
- max_power, sr=samplerate, duration=end_time - start_time
+ audio[int(start_time * samplerate) : int(end_time * samplerate)] = (
+ 0.5
+ * librosa.tone(
+ max_power, sr=samplerate, duration=end_time - start_time
+ )
)
# Add a more powerful signal outside frequency range of interest
- audio[
- int(start_time * samplerate) : int(end_time * samplerate)
- ] += 2 * librosa.tone(
- 80_000, sr=samplerate, duration=end_time - start_time
+ audio[int(start_time * samplerate) : int(end_time * samplerate)] += (
+ 2 * librosa.tone(80_000, sr=samplerate, duration=end_time - start_time)
)
params = api.get_config(
@@ -193,8 +192,6 @@ def test_compute_max_power_bb(max_power: int):
"high_freq": high_freq,
}
- print(prediction)
-
max_power_bb = feats.compute_max_power_bb(
prediction,
spec,
@@ -221,17 +218,17 @@ def test_compute_max_power():
audio = np.zeros((int(duration * samplerate),))
# Add a signal during the time and frequency range of interest
- audio[
- int(start_time * samplerate) : int(end_time * samplerate)
- ] = 0.5 * librosa.tone(
- 3_500, sr=samplerate, duration=end_time - start_time
+ audio[int(start_time * samplerate) : int(end_time * samplerate)] = (
+ 0.5
+ * librosa.tone(3_500, sr=samplerate, duration=end_time - start_time)
)
# Add a more powerful signal outside frequency range of interest
- audio[
- int(start_time * samplerate) : int(end_time * samplerate)
- ] += 2 * librosa.tone(
- max_power, sr=samplerate, duration=end_time - start_time
+ audio[int(start_time * samplerate) : int(end_time * samplerate)] += (
+ 2
+ * librosa.tone(
+ max_power, sr=samplerate, duration=end_time - start_time
+ )
)
params = api.get_config(
diff --git a/tests/test_inference/test_clips.py b/tests/test_inference/test_clips.py
new file mode 100644
index 00000000..207e8239
--- /dev/null
+++ b/tests/test_inference/test_clips.py
@@ -0,0 +1,25 @@
+from soundevent import data
+
+from batdetect2.inference.clips import get_recording_clips
+
+
+def test_get_recording_clips_uses_requested_duration(create_recording) -> None:
+ recording = create_recording(duration=2.0, samplerate=256_000)
+
+ clips = get_recording_clips(
+ recording,
+ duration=0.5,
+ overlap=0.0,
+ discard_empty=False,
+ )
+
+ assert len(clips) == 4
+ assert all(isinstance(clip, data.Clip) for clip in clips)
+ assert clips[0].start_time == 0.0
+ assert clips[0].end_time == 0.5
+ assert clips[1].start_time == 0.5
+ assert clips[1].end_time == 1.0
+ assert clips[2].start_time == 1.0
+ assert clips[2].end_time == 1.5
+ assert clips[3].start_time == 1.5
+ assert clips[3].end_time == 2.0
diff --git a/tests/test_model.py b/tests/test_model.py
index 3519c381..43b6eace 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -1,10 +1,10 @@
"""Test suite for model functions."""
-import warnings
from pathlib import Path
from typing import List
import numpy as np
+import pytest
from hypothesis import given, settings
from hypothesis import strategies as st
@@ -12,14 +12,9 @@
from batdetect2.detector import parameters
-def test_can_import_model_without_warnings():
- with warnings.catch_warnings():
- warnings.simplefilter("error")
- api.load_model()
-
-
@settings(deadline=None, max_examples=5)
@given(duration=st.floats(min_value=0.1, max_value=2))
+@pytest.mark.slow
def test_can_import_model_without_pickle(duration: float):
# NOTE: remove this test once no other issues are found This is a temporary
# test to check that change in model loading did not impact model behaviour
@@ -49,6 +44,7 @@ def test_can_import_model_without_pickle(duration: float):
assert predictions_without_pickle == predictions_with_pickle
+@pytest.mark.slow
def test_can_import_model_without_pickle_on_test_data(
example_audio_files: List[Path],
):
diff --git a/tests/test_models/__init__.py b/tests/test_models/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_models/test_backbones.py b/tests/test_models/test_backbones.py
new file mode 100644
index 00000000..99fc4e38
--- /dev/null
+++ b/tests/test_models/test_backbones.py
@@ -0,0 +1,212 @@
+"""Tests for backbone configuration loading and the backbone registry."""
+
+from pathlib import Path
+from typing import Callable
+
+import pytest
+
+from batdetect2.models.backbones import (
+ BackboneConfig,
+ UNetBackbone,
+ UNetBackboneConfig,
+ backbone_registry,
+ build_backbone,
+ load_backbone_config,
+)
+
+
+def test_unet_backbone_config_defaults():
+ """Default config has expected field values."""
+ config = UNetBackboneConfig()
+
+ assert config.name == "UNetBackbone"
+ assert config.input_height == 128
+ assert config.in_channels == 1
+
+
+def test_unet_backbone_config_custom_fields():
+ """Custom field values are stored correctly."""
+ config = UNetBackboneConfig(in_channels=2, input_height=64)
+
+ assert config.in_channels == 2
+ assert config.input_height == 64
+
+
+def test_unet_backbone_config_extra_fields_ignored():
+ """Extra/unknown fields are silently ignored (BaseConfig behaviour)."""
+ config = UNetBackboneConfig.model_validate(
+ {"name": "UNetBackbone", "unknown_field": 99}
+ )
+
+ assert config.name == "UNetBackbone"
+ assert not hasattr(config, "unknown_field")
+
+
+def test_build_backbone_default():
+ """Building with no config uses UNetBackbone defaults."""
+ backbone = build_backbone()
+
+ assert isinstance(backbone, UNetBackbone)
+ assert backbone.input_height == 128
+
+
+def test_build_backbone_custom_config():
+ """Building with a custom config propagates input_height and in_channels."""
+ config = UNetBackboneConfig(in_channels=2, input_height=64)
+ backbone = build_backbone(config)
+
+ assert isinstance(backbone, UNetBackbone)
+ assert backbone.input_height == 64
+ assert backbone.encoder.in_channels == 2
+
+
+def test_build_backbone_returns_unet_backbone():
+ """build_backbone returns the default UNet backbone."""
+ backbone = build_backbone()
+
+ assert isinstance(backbone, UNetBackbone)
+
+
+def test_registry_has_unet_backbone():
+ """The backbone registry has UNetBackbone registered."""
+ config_types = backbone_registry.get_config_types()
+
+ assert UNetBackboneConfig in config_types
+
+
+def test_registry_config_type_is_unet_backbone_config():
+ """The config type stored for UNetBackbone is UNetBackboneConfig."""
+ config_type = backbone_registry.get_config_type("UNetBackbone")
+
+ assert config_type is UNetBackboneConfig
+
+
+def test_registry_build_dispatches_correctly():
+ """Registry.build dispatches to UNetBackbone.from_config."""
+ config = UNetBackboneConfig(input_height=128)
+ backbone = backbone_registry.build(config)
+
+ assert isinstance(backbone, UNetBackbone)
+
+
+def test_registry_build_unknown_name_raises():
+ """Registry.build raises NotImplementedError for an unknown config name."""
+
+ class FakeConfig:
+ name = "NonExistentBackbone"
+
+ with pytest.raises(NotImplementedError):
+ backbone_registry.build(FakeConfig()) # ty: ignore[invalid-argument-type]
+
+
+def test_backbone_config_validates_unet_from_dict():
+ """BackboneConfig TypeAdapter resolves to UNetBackboneConfig via name."""
+ from pydantic import TypeAdapter
+
+ adapter = TypeAdapter(BackboneConfig)
+ config = adapter.validate_python(
+ {"name": "UNetBackbone", "input_height": 64}
+ )
+
+ assert isinstance(config, UNetBackboneConfig)
+ assert config.input_height == 64
+
+
+def test_backbone_config_invalid_name_raises():
+ """BackboneConfig validation raises for an unknown name discriminator."""
+ from pydantic import TypeAdapter, ValidationError
+
+ adapter = TypeAdapter(BackboneConfig)
+ with pytest.raises(ValidationError):
+ adapter.validate_python({"name": "NonExistentBackbone"})
+
+
+def test_load_backbone_config_from_yaml(
+ create_temp_yaml: Callable[[str], Path],
+):
+ """load_backbone_config loads a UNetBackboneConfig from a YAML file."""
+ yaml_content = """\
+name: UNetBackbone
+input_height: 64
+in_channels: 2
+"""
+ path = create_temp_yaml(yaml_content)
+ config = load_backbone_config(path)
+
+ assert isinstance(config, UNetBackboneConfig)
+ assert config.input_height == 64
+ assert config.in_channels == 2
+
+
+def test_load_backbone_config_with_field(
+ create_temp_yaml: Callable[[str], Path],
+):
+ """load_backbone_config extracts a nested field before validation."""
+ yaml_content = """\
+model:
+ name: UNetBackbone
+ input_height: 32
+"""
+ path = create_temp_yaml(yaml_content)
+ config = load_backbone_config(path, field="model")
+
+ assert isinstance(config, UNetBackboneConfig)
+ assert config.input_height == 32
+
+
+def test_load_backbone_config_defaults_on_minimal_yaml(
+ create_temp_yaml: Callable[[str], Path],
+):
+ """Minimal YAML with only name fills remaining fields with defaults."""
+ yaml_content = "name: UNetBackbone\n"
+ path = create_temp_yaml(yaml_content)
+ config = load_backbone_config(path)
+
+ assert isinstance(config, UNetBackboneConfig)
+ assert config.input_height == UNetBackboneConfig().input_height
+ assert config.in_channels == UNetBackboneConfig().in_channels
+
+
+def test_load_backbone_config_extra_fields_ignored(
+ create_temp_yaml: Callable[[str], Path],
+):
+ """Extra YAML fields are silently ignored when loading backbone config."""
+ yaml_content = """\
+name: UNetBackbone
+input_height: 128
+deprecated_field: 99
+"""
+ path = create_temp_yaml(yaml_content)
+ config = load_backbone_config(path)
+
+ assert isinstance(config, UNetBackboneConfig)
+ assert config.input_height == 128
+
+
+def test_round_trip_yaml_to_build_backbone(
+ create_temp_yaml: Callable[[str], Path],
+):
+ """A backbone config loaded from YAML can be used directly with build_backbone."""
+ yaml_content = """\
+name: UNetBackbone
+input_height: 128
+in_channels: 1
+"""
+ path = create_temp_yaml(yaml_content)
+ config = load_backbone_config(path)
+ backbone = build_backbone(config)
+
+ assert isinstance(backbone, UNetBackbone)
+ assert backbone.input_height == 128
+
+
+def test_load_backbone_config_from_example_data(example_data_dir: Path):
+ """load_backbone_config loads the real example config correctly."""
+ config = load_backbone_config(
+ example_data_dir / "configs" / "model.yaml",
+ field="architecture",
+ )
+
+ assert isinstance(config, UNetBackboneConfig)
+ assert config.input_height == 128
+ assert config.in_channels == 1
diff --git a/tests/test_models/test_blocks.py b/tests/test_models/test_blocks.py
new file mode 100644
index 00000000..57315d53
--- /dev/null
+++ b/tests/test_models/test_blocks.py
@@ -0,0 +1,190 @@
+import pytest
+import torch
+
+from batdetect2.models.blocks import (
+ ConvBlock,
+ ConvConfig,
+ FreqCoordConvDownBlock,
+ FreqCoordConvDownConfig,
+ FreqCoordConvUpBlock,
+ FreqCoordConvUpConfig,
+ LayerGroup,
+ LayerGroupConfig,
+ SelfAttention,
+ SelfAttentionConfig,
+ StandardConvDownBlock,
+ StandardConvDownConfig,
+ StandardConvUpBlock,
+ StandardConvUpConfig,
+ VerticalConv,
+ VerticalConvConfig,
+ build_layer,
+)
+
+
+@pytest.fixture
+def dummy_input() -> torch.Tensor:
+ """Provides a standard (B, C, H, W) tensor for testing blocks."""
+ batch_size, in_channels, height, width = 2, 16, 32, 32
+ return torch.randn(batch_size, in_channels, height, width)
+
+
+@pytest.fixture
+def dummy_bottleneck_input() -> torch.Tensor:
+ """Provides an input typical for the Bottleneck/SelfAttention (H=1)."""
+ return torch.randn(2, 64, 1, 32)
+
+
+@pytest.mark.parametrize(
+ "block_class, expected_h_scale",
+ [
+ (ConvBlock, 1.0),
+ (StandardConvDownBlock, 0.5),
+ (StandardConvUpBlock, 2.0),
+ ],
+)
+def test_standard_block_protocol_methods(
+ block_class, expected_h_scale, dummy_input
+):
+ """Test get_output_channels and get_output_height for standard blocks."""
+ in_channels = dummy_input.size(1)
+ input_height = dummy_input.size(2)
+ out_channels = 32
+
+ block = block_class(in_channels=in_channels, out_channels=out_channels)
+
+ assert block.out_channels == out_channels
+ assert block.get_output_height(input_height) == int(
+ input_height * expected_h_scale
+ )
+
+
+@pytest.mark.parametrize(
+ "block_class, expected_h_scale",
+ [
+ (FreqCoordConvDownBlock, 0.5),
+ (FreqCoordConvUpBlock, 2.0),
+ ],
+)
+def test_coord_block_protocol_methods(
+ block_class, expected_h_scale, dummy_input
+):
+ """Test get_output_channels and get_output_height for coord blocks."""
+ in_channels = dummy_input.size(1)
+ input_height = dummy_input.size(2)
+ out_channels = 32
+
+ block = block_class(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ input_height=input_height,
+ )
+
+ assert block.out_channels == out_channels
+ assert block.get_output_height(input_height) == int(
+ input_height * expected_h_scale
+ )
+
+
+def test_vertical_conv_forward_shape(dummy_input):
+ """Test that VerticalConv correctly collapses the height dimension to 1."""
+ in_channels = dummy_input.size(1)
+ input_height = dummy_input.size(2)
+ out_channels = 32
+
+ block = VerticalConv(in_channels, out_channels, input_height)
+ output = block(dummy_input)
+
+ assert output.shape == (2, out_channels, 1, 32)
+ assert block.out_channels == out_channels
+
+
+def test_self_attention_forward_shape(dummy_bottleneck_input):
+ """Test that SelfAttention maintains the exact shape."""
+ in_channels = dummy_bottleneck_input.size(1)
+ attention_channels = 32
+
+ block = SelfAttention(
+ in_channels=in_channels, attention_channels=attention_channels
+ )
+ output = block(dummy_bottleneck_input)
+
+ assert output.shape == dummy_bottleneck_input.shape
+ assert block.out_channels == in_channels
+
+
+def test_self_attention_weights(dummy_bottleneck_input):
+ """Test that attention weights sum to 1 over the time sequence."""
+ in_channels = dummy_bottleneck_input.size(1)
+ block = SelfAttention(in_channels=in_channels, attention_channels=32)
+
+ weights = block.compute_attention_weights(dummy_bottleneck_input)
+
+ # Weights shape should be (B, T, T) where T is time (width)
+ batch_size = dummy_bottleneck_input.size(0)
+ time_steps = dummy_bottleneck_input.size(3)
+
+ assert weights.shape == (batch_size, time_steps, time_steps)
+
+ # Summing across the keys (dim=1) for each query should equal 1.0
+ sum_weights = weights.sum(dim=1)
+ assert torch.allclose(sum_weights, torch.ones_like(sum_weights), atol=1e-5)
+
+
+@pytest.mark.parametrize(
+ "layer_config, expected_type",
+ [
+ (ConvConfig(out_channels=32), ConvBlock),
+ (StandardConvDownConfig(out_channels=32), StandardConvDownBlock),
+ (StandardConvUpConfig(out_channels=32), StandardConvUpBlock),
+ (FreqCoordConvDownConfig(out_channels=32), FreqCoordConvDownBlock),
+ (FreqCoordConvUpConfig(out_channels=32), FreqCoordConvUpBlock),
+ (SelfAttentionConfig(attention_channels=32), SelfAttention),
+ (VerticalConvConfig(channels=32), VerticalConv),
+ ],
+)
+def test_build_layer_factory(layer_config, expected_type):
+ """Test that the factory dynamically builds the correct block."""
+ input_height = 32
+ in_channels = 16
+
+ layer = build_layer(
+ input_height=input_height,
+ in_channels=in_channels,
+ config=layer_config,
+ )
+
+ assert isinstance(layer, expected_type)
+
+
+def test_layer_group_from_config_and_forward(dummy_input):
+ """Test that LayerGroup successfully chains multiple blocks."""
+ in_channels = dummy_input.size(1)
+ input_height = dummy_input.size(2)
+
+ config = LayerGroupConfig(
+ layers=[
+ ConvConfig(out_channels=32),
+ StandardConvDownConfig(out_channels=64),
+ ]
+ )
+
+ layer_group = build_layer(
+ input_height=input_height,
+ in_channels=in_channels,
+ config=config,
+ )
+
+ assert isinstance(layer_group, LayerGroup)
+ assert len(layer_group.layers) == 2
+
+ # The group should report the output channels of the LAST block
+ assert layer_group.out_channels == 64
+
+ # The group should report the accumulated height changes
+ assert layer_group.get_output_height(input_height) == input_height // 2
+
+ output = layer_group(dummy_input)
+
+ # Shape should reflect: Conv (stays 32x32) -> DownConv (halves to 16x16)
+ assert output.shape == (2, 64, 16, 16)
diff --git a/tests/test_models/test_detectors.py b/tests/test_models/test_detectors.py
new file mode 100644
index 00000000..129d7056
--- /dev/null
+++ b/tests/test_models/test_detectors.py
@@ -0,0 +1,162 @@
+from typing import cast
+
+import numpy as np
+import pytest
+import torch
+
+from batdetect2.models import UNetBackbone
+from batdetect2.models.backbones import UNetBackboneConfig
+from batdetect2.models.detectors import Detector, build_detector
+from batdetect2.models.encoder import Encoder
+from batdetect2.models.heads import BBoxHead, ClassifierHead
+from batdetect2.models.types import ModelOutput
+
+
+@pytest.fixture
+def dummy_spectrogram() -> torch.Tensor:
+ """Provides a dummy spectrogram tensor (B, C, H, W)."""
+ return torch.randn(2, 1, 256, 128)
+
+
+def test_build_detector_default():
+ """Test building the default detector without a config."""
+ num_classes = 5
+ model = build_detector(
+ class_names=[f"class_{i}" for i in range(num_classes)],
+ dimension_names=["width", "height"],
+ )
+
+ assert isinstance(model, Detector)
+ assert model.num_classes == num_classes
+ assert isinstance(model.classifier_head, ClassifierHead)
+ assert isinstance(model.size_head, BBoxHead)
+
+
+def test_build_detector_custom_config():
+ """Test building a detector with a custom BackboneConfig."""
+ num_classes = 3
+ config = UNetBackboneConfig(in_channels=2, input_height=128)
+
+ model = build_detector(
+ class_names=[f"class_{i}" for i in range(num_classes)],
+ dimension_names=["width", "height"],
+ config=config,
+ )
+
+ assert isinstance(model, Detector)
+ assert model.backbone.input_height == 128
+
+ backbone = cast(UNetBackbone, model.backbone)
+
+ assert isinstance(backbone.encoder, Encoder)
+ assert backbone.encoder.in_channels == 2
+
+
+def test_build_detector_custom_size_channels():
+ num_classes = 3
+ num_sizes = 4
+ config = UNetBackboneConfig(in_channels=1, input_height=128)
+
+ model = build_detector(
+ class_names=[f"class_{i}" for i in range(num_classes)],
+ dimension_names=[f"size_{i}" for i in range(num_sizes)],
+ config=config,
+ )
+
+ dummy = torch.randn(1, 1, 128, 64)
+ output = model(dummy)
+ assert output.size_preds.shape[1] == num_sizes
+
+
+def test_detector_forward_pass_shapes(dummy_spectrogram):
+ """Test that the forward pass produces correctly shaped outputs."""
+ num_classes = 4
+ # Build model matching the dummy input shape
+ config = UNetBackboneConfig(in_channels=1, input_height=256)
+ model = build_detector(
+ class_names=[f"class_{i}" for i in range(num_classes)],
+ dimension_names=["width", "height"],
+ config=config,
+ )
+
+ # Process the spectrogram through the model
+ # PyTorch expects shape (Batch, Channels, Height, Width)
+ output = model(dummy_spectrogram)
+
+ # Verify the output is a NamedTuple ModelOutput
+ assert isinstance(output, ModelOutput)
+
+ batch_size = dummy_spectrogram.size(0)
+ input_height = dummy_spectrogram.size(2)
+ input_width = dummy_spectrogram.size(3)
+
+ # Check detection probabilities shape: (B, 1, H, W)
+ assert output.detection_probs.shape == (
+ batch_size,
+ 1,
+ input_height,
+ input_width,
+ )
+
+ # Check size predictions shape: (B, 2, H, W)
+ assert output.size_preds.shape == (
+ batch_size,
+ 2,
+ input_height,
+ input_width,
+ )
+
+ # Check class probabilities shape: (B, num_classes, H, W)
+ assert output.class_probs.shape == (
+ batch_size,
+ num_classes,
+ input_height,
+ input_width,
+ )
+
+ # Check features shape: (B, out_channels, H, W)
+ assert isinstance(model.backbone, UNetBackbone)
+ out_channels = model.backbone.out_channels
+ assert output.features.shape == (
+ batch_size,
+ out_channels,
+ input_height,
+ input_width,
+ )
+
+
+def test_detector_forward_pass_with_preprocessor(sample_preprocessor):
+ """Test the full pipeline from audio to model output."""
+ # Generate random audio: 1 second at 256kHz
+ samplerate = 256000
+ duration = 1.0
+ audio = np.random.randn(int(samplerate * duration)).astype(np.float32)
+
+ # Create tensor: (Batch=1, Channels=1, Samples) - Preprocessor expects batched 1D waveforms
+ audio_tensor = torch.from_numpy(audio).unsqueeze(0).unsqueeze(0)
+
+ # Preprocess -> Output shape: (Batch=1, Channels=1, Height, Width)
+ spec = sample_preprocessor(audio_tensor)
+
+ # Just to be safe, make sure it has 4 dimensions if the preprocessor didn't add batch
+ if spec.ndim == 3:
+ spec = spec.unsqueeze(0)
+
+ # Build model matching the preprocessor's output shape
+ # The preprocessor output is (B, C, H, W) -> spec.shape[1] is C, spec.shape[2] is H
+ config = UNetBackboneConfig(
+ in_channels=spec.shape[1], input_height=spec.shape[2]
+ )
+ model = build_detector(
+ class_names=["class_0", "class_1", "class_2"],
+ dimension_names=["width", "height"],
+ config=config,
+ )
+
+ # Process
+ output = model(spec)
+
+ # Assert
+ assert isinstance(output, ModelOutput)
+ assert output.detection_probs.shape[0] == 1 # Batch size 1
+ assert output.class_probs.shape[1] == 3 # 3 classes
diff --git a/tests/test_outputs/test_base.py b/tests/test_outputs/test_base.py
new file mode 100644
index 00000000..04c490b0
--- /dev/null
+++ b/tests/test_outputs/test_base.py
@@ -0,0 +1,21 @@
+from pathlib import Path
+
+from batdetect2.outputs.formats.base import make_path_relative
+
+
+def test_make_path_relative_strips_shared_relative_prefix() -> None:
+ audio_dir = Path("example_data/audio")
+ path = Path("example_data/audio/subdir/clip.wav")
+
+ relative = make_path_relative(path, audio_dir)
+
+ assert relative == Path("subdir/clip.wav")
+
+
+def test_make_path_relative_returns_dot_for_matching_relative_dir() -> None:
+ audio_dir = Path("example_data/audio")
+ path = Path("example_data/audio")
+
+ relative = make_path_relative(path, audio_dir)
+
+ assert relative == Path(".")
diff --git a/tests/test_outputs/test_transform/test_clip_transforms.py b/tests/test_outputs/test_transform/test_clip_transforms.py
new file mode 100644
index 00000000..445502b0
--- /dev/null
+++ b/tests/test_outputs/test_transform/test_clip_transforms.py
@@ -0,0 +1,121 @@
+import numpy as np
+from soundevent import data
+
+from batdetect2.outputs.transforms.clip_transforms import (
+ RemoveAboveNyquist,
+ RemoveAtEdges,
+)
+from batdetect2.postprocess.types import ClipDetections, Detection
+
+
+def _detection(
+ start_time: float,
+ low_freq: float,
+ end_time: float,
+ high_freq: float,
+) -> Detection:
+ return Detection(
+ geometry=data.BoundingBox(
+ coordinates=[start_time, low_freq, end_time, high_freq]
+ ),
+ detection_score=0.9,
+ class_scores=np.array([0.9]),
+ features=np.array([1.0, 2.0]),
+ )
+
+
+def test_remove_above_nyquist_high_freq_mode(clip: data.Clip) -> None:
+ # Nyquist should be at 128kHz
+ assert clip.recording.samplerate == 256_000
+
+ transform = RemoveAboveNyquist(mode="high_freq", buffer=0)
+ prediction = ClipDetections(
+ clip=clip,
+ detections=[
+ _detection(0.1, 10_000, 0.2, 120_000),
+ _detection(0.1, 10_000, 0.2, 130_000),
+ ],
+ )
+
+ out = transform(prediction)
+
+ assert len(out.detections) == 1
+
+
+def test_remove_above_nyquist_low_freq_mode(clip: data.Clip) -> None:
+ transform = RemoveAboveNyquist(mode="low_freq", buffer=0)
+ prediction = ClipDetections(
+ clip=clip,
+ detections=[
+ _detection(0.1, 120_000, 0.2, 140_000),
+ _detection(0.1, 130_000, 0.2, 140_000),
+ ],
+ )
+
+ out = transform(prediction)
+
+ assert len(out.detections) == 1
+
+
+def test_remove_above_nyquist_respects_buffer(clip: data.Clip) -> None:
+ transform = RemoveAboveNyquist(mode="high_freq", buffer=5_000)
+ prediction = ClipDetections(
+ clip=clip,
+ detections=[
+ _detection(0.1, 10_000, 0.2, 122_000),
+ _detection(0.1, 10_000, 0.2, 124_000),
+ ],
+ )
+
+ out = transform(prediction)
+
+ assert len(out.detections) == 1
+
+
+def test_remove_at_edges_start_mode(clip: data.Clip) -> None:
+ clip = clip.model_copy(update={"start_time": 10.0, "end_time": 20.0})
+ transform = RemoveAtEdges(buffer=1.0, mode="start_time")
+ prediction = ClipDetections(
+ clip=clip,
+ detections=[
+ _detection(10.2, 20_000, 10.4, 30_000),
+ _detection(11.2, 20_000, 11.4, 30_000),
+ ],
+ )
+
+ out = transform(prediction)
+
+ assert len(out.detections) == 1
+
+
+def test_remove_at_edges_end_mode(clip: data.Clip) -> None:
+ clip = clip.model_copy(update={"start_time": 10.0, "end_time": 20.0})
+ transform = RemoveAtEdges(buffer=1.0, mode="end_time")
+ prediction = ClipDetections(
+ clip=clip,
+ detections=[
+ _detection(11.2, 20_000, 19.8, 30_000),
+ _detection(11.2, 20_000, 18.6, 30_000),
+ ],
+ )
+
+ out = transform(prediction)
+
+ assert len(out.detections) == 1
+
+
+def test_remove_at_edges_both_mode(clip: data.Clip) -> None:
+ clip = clip.model_copy(update={"start_time": 10.0, "end_time": 20.0})
+ transform = RemoveAtEdges(buffer=1.0, mode="both")
+ prediction = ClipDetections(
+ clip=clip,
+ detections=[
+ _detection(10.2, 20_000, 18.5, 30_000),
+ _detection(11.2, 20_000, 18.5, 30_000),
+ _detection(11.2, 20_000, 19.8, 30_000),
+ ],
+ )
+
+ out = transform(prediction)
+
+ assert len(out.detections) == 1
diff --git a/tests/test_outputs/test_transform/test_detection_transforms.py b/tests/test_outputs/test_transform/test_detection_transforms.py
new file mode 100644
index 00000000..a6cfe667
--- /dev/null
+++ b/tests/test_outputs/test_transform/test_detection_transforms.py
@@ -0,0 +1,117 @@
+import numpy as np
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.outputs.transforms.detection_transforms import (
+ FilterByDuration,
+ FilterByDurationConfig,
+ FilterByFrequency,
+ FilterByFrequencyConfig,
+ detection_transforms,
+ shift_detection_time,
+ shift_detections_to_start_time,
+)
+from batdetect2.postprocess.types import Detection
+
+
+def _detection(
+ start_time: float,
+ low_freq: float,
+ end_time: float,
+ high_freq: float,
+) -> Detection:
+ return Detection(
+ geometry=data.BoundingBox(
+ coordinates=[start_time, low_freq, end_time, high_freq]
+ ),
+ detection_score=0.9,
+ class_scores=np.array([0.9]),
+ features=np.array([1.0, 2.0]),
+ )
+
+
+def test_shift_detection_time_moves_geometry_by_offset() -> None:
+ detection = _detection(0.1, 20_000, 0.2, 30_000)
+
+ shifted = shift_detection_time(detection, time=2.5)
+ start, low, end, high = compute_bounds(shifted.geometry)
+
+ assert np.isclose(start, 2.6)
+ assert np.isclose(end, 2.7)
+ assert np.isclose(low, 20_000)
+ assert np.isclose(high, 30_000)
+
+
+def test_shift_detections_to_start_time_zero_is_identity() -> None:
+ detections = [_detection(0.1, 20_000, 0.2, 30_000)]
+
+ shifted = shift_detections_to_start_time(detections, start_time=0)
+
+ assert len(shifted) == 1
+ assert shifted[0] is detections[0]
+
+
+def test_filter_by_frequency_low_freq_mode() -> None:
+ transform = FilterByFrequency(
+ min_freq=20_000,
+ max_freq=40_000,
+ mode="low_freq",
+ )
+
+ assert transform(_detection(0.1, 25_000, 0.2, 60_000)) is not None
+ assert transform(_detection(0.1, 10_000, 0.2, 60_000)) is None
+
+
+def test_filter_by_frequency_high_freq_mode() -> None:
+ transform = FilterByFrequency(
+ min_freq=20_000,
+ max_freq=40_000,
+ mode="high_freq",
+ )
+
+ assert transform(_detection(0.1, 10_000, 0.2, 35_000)) is not None
+ assert transform(_detection(0.1, 10_000, 0.2, 60_000)) is None
+
+
+def test_filter_by_frequency_both_mode_current_semantics() -> None:
+ transform = FilterByFrequency(
+ min_freq=20_000,
+ max_freq=40_000,
+ mode="both",
+ )
+
+ # low >= min passes
+ assert transform(_detection(0.1, 25_000, 0.2, 80_000)) is not None
+ # high <= max passes
+ assert transform(_detection(0.1, 10_000, 0.2, 35_000)) is not None
+ # neither condition passes
+ assert transform(_detection(0.1, 10_000, 0.2, 80_000)) is None
+
+
+def test_filter_by_duration_keeps_within_range() -> None:
+ transform = FilterByDuration(min_duration=0.04, max_duration=0.06)
+
+ kept = transform(_detection(0.1, 20_000, 0.15, 30_000))
+ removed = transform(_detection(0.1, 20_000, 0.2, 30_000))
+
+ assert kept is not None
+ assert removed is None
+
+
+def test_detection_transform_registry_builds_builtin_transforms() -> None:
+ frequency_transform = detection_transforms.build(
+ FilterByFrequencyConfig(
+ min_freq=20_000,
+ max_freq=40_000,
+ mode="high_freq",
+ )
+ )
+ duration_transform = detection_transforms.build(
+ FilterByDurationConfig(
+ min_duration=0.01,
+ max_duration=0.2,
+ )
+ )
+
+ assert callable(frequency_transform)
+ assert callable(duration_transform)
diff --git a/tests/test_outputs/test_transform/test_output_transform_pipeline.py b/tests/test_outputs/test_transform/test_output_transform_pipeline.py
new file mode 100644
index 00000000..737d2f9a
--- /dev/null
+++ b/tests/test_outputs/test_transform/test_output_transform_pipeline.py
@@ -0,0 +1,137 @@
+import numpy as np
+import torch
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.outputs import build_output_transform
+from batdetect2.postprocess.types import ClipDetectionsTensor
+from batdetect2.targets.types import TargetProtocol
+
+
+def _mock_clip_detections_tensor(
+ *,
+ time: float,
+ duration: float,
+ frequency: float,
+ bandwidth: float,
+) -> ClipDetectionsTensor:
+ # NOTE: size time is represented in milliseconds.
+ return ClipDetectionsTensor(
+ scores=torch.tensor([0.9], dtype=torch.float32),
+ sizes=torch.tensor(
+ [[duration * 1_000, bandwidth]], dtype=torch.float32
+ ),
+ class_scores=torch.tensor([[0.8, 0.2]], dtype=torch.float32),
+ times=torch.tensor([time], dtype=torch.float32),
+ frequencies=torch.tensor([frequency], dtype=torch.float32),
+ features=torch.tensor([[1.0, 2.0]], dtype=torch.float32),
+ )
+
+
+def test_pipeline_from_config_applies_detection_and_clip_transforms(
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+) -> None:
+ clip = clip.model_copy(update={"start_time": 10.0, "end_time": 11.0})
+ transform = build_output_transform(
+ targets=sample_targets,
+ config={
+ "detection_transforms": [
+ {
+ "name": "filter_by_duration",
+ "min_duration": 0.08,
+ "max_duration": 0.12,
+ }
+ ],
+ "clip_transforms": [
+ {
+ "name": "remove_at_edges",
+ "buffer": 0.1,
+ "mode": "both",
+ }
+ ],
+ },
+ )
+
+ raw = _mock_clip_detections_tensor(
+ time=0.03,
+ duration=0.1,
+ frequency=60_000,
+ bandwidth=1_000,
+ )
+
+ prediction = transform.to_clip_detections(raw, clip=clip)
+
+ # duration filter keeps it, edge filter removes it.
+ assert len(prediction.detections) == 0
+
+
+def test_pipeline_keeps_detection_when_all_filters_pass(
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+) -> None:
+ clip = clip.model_copy(update={"start_time": 10.0, "end_time": 11.0})
+ transform = build_output_transform(
+ targets=sample_targets,
+ config={
+ "detection_transforms": [
+ {
+ "name": "filter_by_duration",
+ "min_duration": 0.08,
+ "max_duration": 0.12,
+ },
+ ],
+ "clip_transforms": [
+ {
+ "name": "remove_at_edges",
+ "buffer": 0.05,
+ "mode": "both",
+ }
+ ],
+ },
+ )
+
+ raw = _mock_clip_detections_tensor(
+ time=0.3,
+ duration=0.1,
+ frequency=60_000,
+ bandwidth=1_000,
+ )
+
+ prediction = transform.to_clip_detections(raw, clip=clip)
+
+ assert len(prediction.detections) == 1
+ start_time, _, _, _ = compute_bounds(prediction.detections[0].geometry)
+ assert np.isclose(start_time, 10.3)
+
+
+def test_remove_above_nyquist_uses_clip_recording_metadata(
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+) -> None:
+ clip = clip.model_copy(update={"start_time": 0.0, "end_time": 1.0})
+ transform = build_output_transform(
+ targets=sample_targets,
+ config={
+ "clip_transforms": [
+ {
+ "name": "remove_above_nyquist",
+ "mode": "high_freq",
+ "buffer": 0,
+ }
+ ]
+ },
+ )
+
+ raw = _mock_clip_detections_tensor(
+ time=0.5,
+ duration=0.05,
+ frequency=127_500,
+ bandwidth=2_000,
+ )
+
+ prediction = transform.to_clip_detections(raw, clip=clip)
+
+ # clip fixture samplerate is 256_000, nyquist is 128_000, high bound
+ # becomes 128_500 and must be removed.
+ assert len(prediction.detections) == 0
diff --git a/tests/test_outputs/test_transform/test_roundtrip.py b/tests/test_outputs/test_transform/test_roundtrip.py
new file mode 100644
index 00000000..ffc6d571
--- /dev/null
+++ b/tests/test_outputs/test_transform/test_roundtrip.py
@@ -0,0 +1,78 @@
+import pytest
+import torch
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.models.types import ModelOutput
+from batdetect2.outputs import build_output_transform
+from batdetect2.postprocess import build_postprocessor
+from batdetect2.targets import build_roi_mapping
+from batdetect2.targets.types import TargetProtocol
+from batdetect2.train.labels import build_clip_labeler
+
+
+def test_annotation_roundtrip_through_postprocess_and_output_transform(
+ create_recording,
+ create_clip,
+ sample_preprocessor,
+ sample_targets: TargetProtocol,
+ pippip_tag: data.Tag,
+ bat_tag: data.Tag,
+) -> None:
+ recording = create_recording(duration=30, samplerate=256_000)
+ clip = create_clip(recording=recording, start_time=10.0, end_time=10.5)
+
+ annotation = data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(
+ recording=recording,
+ geometry=data.BoundingBox(
+ coordinates=[10.2, 40_000, 10.26, 55_000]
+ ),
+ ),
+ tags=[pippip_tag, bat_tag],
+ )
+ clip_annotation = data.ClipAnnotation(clip=clip, sound_events=[annotation])
+
+ height = 128
+ duration = clip.end_time - clip.start_time
+ width = int(duration * sample_preprocessor.output_samplerate)
+ spec = torch.zeros((1, height, width), dtype=torch.float32)
+
+ roi_mapper = build_roi_mapping()
+
+ labeler = build_clip_labeler(targets=sample_targets, roi_mapper=roi_mapper)
+ heatmaps = labeler(clip_annotation, spec)
+
+ output = ModelOutput(
+ detection_probs=heatmaps.detection.unsqueeze(0),
+ size_preds=heatmaps.size.unsqueeze(0),
+ class_probs=heatmaps.classes.unsqueeze(0),
+ features=torch.zeros((1, 1, height, width), dtype=torch.float32),
+ )
+
+ postprocessor = build_postprocessor(preprocessor=sample_preprocessor)
+ clip_detection_tensors = postprocessor(output)
+ assert len(clip_detection_tensors) == 1
+
+ transform = build_output_transform(
+ targets=sample_targets,
+ roi_mapper=roi_mapper,
+ )
+ clip_detections = transform.to_clip_detections(
+ detections=clip_detection_tensors[0],
+ clip=clip,
+ )
+
+ assert len(clip_detections.detections) == 1
+ recovered = clip_detections.detections[0]
+
+ recovered_bounds = compute_bounds(recovered.geometry)
+ original_bounds = compute_bounds(annotation.sound_event.geometry)
+
+ # 1 ms of tolerance (spectrogram resolution)
+ assert recovered_bounds[0] == pytest.approx(original_bounds[0], abs=0.001)
+ assert recovered_bounds[2] == pytest.approx(original_bounds[2], abs=0.001)
+
+ # 1000 Hz of tolerance (spectrogram resolution)
+ assert recovered_bounds[1] == pytest.approx(original_bounds[1], abs=1000)
+ assert recovered_bounds[3] == pytest.approx(original_bounds[3], abs=1000)
diff --git a/tests/test_outputs/test_transform/test_transform.py b/tests/test_outputs/test_transform/test_transform.py
new file mode 100644
index 00000000..12399919
--- /dev/null
+++ b/tests/test_outputs/test_transform/test_transform.py
@@ -0,0 +1,122 @@
+from dataclasses import replace
+
+import numpy as np
+import torch
+from soundevent import data
+from soundevent.geometry import compute_bounds
+
+from batdetect2.outputs import build_output_transform
+from batdetect2.outputs.transforms import OutputTransform
+from batdetect2.postprocess.types import (
+ ClipDetections,
+ ClipDetectionsTensor,
+ Detection,
+)
+from batdetect2.targets import TargetConfig, build_roi_mapping
+from batdetect2.targets.types import TargetProtocol
+
+
+def _mock_clip_detections_tensor() -> ClipDetectionsTensor:
+ return ClipDetectionsTensor(
+ scores=torch.tensor([0.9], dtype=torch.float32),
+ # NOTE: Time is scaled by 1000
+ sizes=torch.tensor([[100, 1_000.0]], dtype=torch.float32),
+ class_scores=torch.tensor([[0.8, 0.2]], dtype=torch.float32),
+ times=torch.tensor([0.2], dtype=torch.float32),
+ frequencies=torch.tensor([60_000.0], dtype=torch.float32),
+ features=torch.tensor([[1.0, 2.0]], dtype=torch.float32),
+ )
+
+
+def _build_roi_mapper(targets: TargetProtocol):
+ config_obj = getattr(targets, "config", None)
+ target_config = (
+ config_obj if isinstance(config_obj, TargetConfig) else None
+ )
+ return build_roi_mapping(
+ config=(target_config.roi if target_config is not None else None),
+ )
+
+
+def test_shift_time_to_clip_start(sample_targets: TargetProtocol):
+ raw = _mock_clip_detections_tensor()
+ transform = build_output_transform(
+ targets=sample_targets,
+ roi_mapper=_build_roi_mapper(sample_targets),
+ )
+
+ transformed = transform.to_detections(raw, start_time=2.5)
+ start_time, _, end_time, _ = compute_bounds(transformed[0].geometry)
+
+ assert np.isclose(start_time, 2.7)
+ assert np.isclose(end_time, 2.8)
+
+
+def test_to_clip_detections_shifts_by_clip_start(
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+):
+ clip = clip.model_copy(update={"start_time": 2.5, "end_time": 3.0})
+ transform = build_output_transform(
+ targets=sample_targets,
+ roi_mapper=_build_roi_mapper(sample_targets),
+ )
+ raw = _mock_clip_detections_tensor()
+ shifted = transform.to_clip_detections(detections=raw, clip=clip)
+ start_time, _, end_time, _ = compute_bounds(shifted.detections[0].geometry)
+ assert np.isclose(start_time, 2.7)
+ assert np.isclose(end_time, 2.8)
+
+
+def test_detection_and_clip_transforms_applied_in_order(
+ clip: data.Clip,
+ sample_targets: TargetProtocol,
+):
+ clip = clip.model_copy(update={"start_time": 2.5, "end_time": 3.0})
+
+ detection_1 = Detection(
+ geometry=data.BoundingBox(coordinates=[0.1, 10_000, 0.2, 12_000]),
+ detection_score=0.5,
+ class_scores=np.array([0.9]),
+ features=np.array([1.0, 2.0]),
+ )
+ detection_2 = Detection(
+ geometry=data.BoundingBox(coordinates=[0.2, 10_000, 0.3, 12_000]),
+ detection_score=0.7,
+ class_scores=np.array([0.9]),
+ features=np.array([1.0, 2.0]),
+ )
+
+ def boost_score(detection: Detection) -> Detection:
+ return replace(
+ detection,
+ detection_score=detection.detection_score + 0.2,
+ )
+
+ def keep_high_score(detection: Detection) -> Detection | None:
+ if detection.detection_score < 0.8:
+ return None
+ return detection
+
+ def tag_clip_transform(prediction: ClipDetections) -> ClipDetections:
+ detections = [
+ replace(detection, detection_score=1.0)
+ for detection in prediction.detections
+ ]
+ return replace(prediction, detections=detections)
+
+ transform = OutputTransform(
+ targets=sample_targets,
+ roi_mapper=_build_roi_mapper(sample_targets),
+ detection_transform_steps=[boost_score, keep_high_score],
+ clip_transform_steps=[tag_clip_transform],
+ )
+ transformed = transform(
+ [ClipDetections(clip=clip, detections=[detection_1, detection_2])]
+ )[0]
+
+ assert len(transformed.detections) == 1
+ assert transformed.detections[0].detection_score == 1.0
+
+ start_time, _, _, _ = compute_bounds(transformed.detections[0].geometry)
+ assert np.isclose(start_time, 2.7)
diff --git a/tests/test_postprocessing/__init__.py b/tests/test_postprocessing/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_postprocessing/test_decoding.py b/tests/test_postprocessing/test_decoding.py
new file mode 100644
index 00000000..471c93af
--- /dev/null
+++ b/tests/test_postprocessing/test_decoding.py
@@ -0,0 +1,619 @@
+from pathlib import Path
+
+import numpy as np
+import pytest
+import xarray as xr
+from soundevent import data
+
+from batdetect2.outputs.transforms.decoding import (
+ DEFAULT_CLASSIFICATION_THRESHOLD,
+ convert_raw_prediction_to_sound_event_prediction,
+ convert_raw_predictions_to_clip_prediction,
+ get_class_tags,
+ get_generic_tags,
+ get_prediction_features,
+)
+from batdetect2.postprocess.types import Detection
+from batdetect2.targets.types import TargetProtocol
+
+
+@pytest.fixture
+def dummy_targets() -> TargetProtocol:
+ tag_map = {
+ "bat": [
+ data.Tag(term=data.term_from_key(key="species"), value="Myotis")
+ ],
+ "noise": [
+ data.Tag(term=data.term_from_key(key="category"), value="noise")
+ ],
+ "unknown": [
+ data.Tag(term=data.term_from_key(key="status"), value="uncertain")
+ ],
+ }
+
+ class DummyTargets(TargetProtocol):
+ class_names = [
+ "bat",
+ "noise",
+ "unknown",
+ ]
+
+ dimension_names = ["width", "height"]
+
+ detection_class_tags = [
+ data.Tag(
+ term=data.term_from_key(key="detector"), value="batdetect2"
+ )
+ ]
+
+ detection_class_name = "bat"
+
+ def filter(self, sound_event: data.SoundEventAnnotation):
+ return True
+
+ def transform(self, sound_event: data.SoundEventAnnotation):
+ return sound_event
+
+ def encode_class(
+ self, sound_event: data.SoundEventAnnotation
+ ) -> str | None:
+ return "bat"
+
+ def decode_class(self, class_label: str) -> list[data.Tag]:
+ return tag_map.get(class_label.lower(), [])
+
+ def encode_roi(self, sound_event: data.SoundEventAnnotation):
+ return np.array([0.0, 0.0]), np.array([0.0, 0.0])
+
+ def decode_roi(
+ self,
+ position,
+ size: np.ndarray,
+ class_name: str | None = None,
+ ):
+ time, freq = position
+ width, height = size
+ return data.BoundingBox(
+ coordinates=[
+ time - width / 2,
+ freq - height / 2,
+ time + width / 2,
+ freq + height / 2,
+ ]
+ )
+
+ t: TargetProtocol = DummyTargets()
+ return t
+
+
+@pytest.fixture
+def sample_recording() -> data.Recording:
+ """A sample soundevent Recording."""
+ return data.Recording(
+ path=Path("/path/to/recording.wav"),
+ duration=60.0,
+ channels=1,
+ samplerate=192000,
+ )
+
+
+@pytest.fixture
+def sample_clip(sample_recording) -> data.Clip:
+ """A sample soundevent Clip."""
+ return data.Clip(
+ recording=sample_recording,
+ start_time=10.0,
+ end_time=20.0,
+ )
+
+
+@pytest.fixture
+def sample_detection_dataset() -> xr.Dataset:
+ """Creates a sample detection dataset suitable for decoding."""
+ expected_times = np.array([20, 10])
+ expected_freqs = np.array([300, 200])
+ detection_coords = {
+ "time": ("detection", expected_times),
+ "frequency": ("detection", expected_freqs),
+ }
+
+ scores_data = np.array([0.9, 0.8], dtype=np.float64)
+ scores = xr.DataArray(
+ scores_data,
+ coords=detection_coords,
+ dims=["detection"],
+ name="scores",
+ )
+
+ dimensions_data = np.array([[7.0, 16.0], [3.0, 12.0]], dtype=np.float32)
+ dimensions = xr.DataArray(
+ dimensions_data,
+ coords={**detection_coords, "dimension": ["width", "height"]},
+ dims=["detection", "dimension"],
+ name="dimensions",
+ )
+
+ classes_data = np.array(
+ [[0.43, 0.85], [0.24, 0.66]],
+ dtype=np.float32,
+ )
+ classes = xr.DataArray(
+ classes_data,
+ coords={**detection_coords, "category": ["bat", "noise"]},
+ dims=["detection", "category"],
+ name="classes",
+ )
+
+ features_data = np.array(
+ [[7.0, 16.0, 25.0, 34.0], [3.0, 12.0, 21.0, 30.0]], dtype=np.float32
+ )
+ features = xr.DataArray(
+ features_data,
+ coords={**detection_coords, "feature": ["f0", "f1", "f2", "f3"]},
+ dims=["detection", "feature"],
+ name="features",
+ )
+
+ ds = xr.Dataset(
+ {
+ "scores": scores,
+ "dimensions": dimensions,
+ "classes": classes,
+ "features": features,
+ },
+ coords=detection_coords,
+ )
+ return ds
+
+
+@pytest.fixture
+def empty_detection_dataset() -> xr.Dataset:
+ """Creates an empty detection dataset with correct structure."""
+ detection_coords = {
+ "time": ("detection", np.array([], dtype=np.float64)),
+ "frequency": ("detection", np.array([], dtype=np.float64)),
+ }
+ scores = xr.DataArray(
+ np.array([], dtype=np.float64),
+ coords=detection_coords,
+ dims=["detection"],
+ name="scores",
+ )
+ dimensions = xr.DataArray(
+ np.empty((0, 2), dtype=np.float32),
+ coords={**detection_coords, "dimension": ["width", "height"]},
+ dims=["detection", "dimension"],
+ name="dimensions",
+ )
+ classes = xr.DataArray(
+ np.empty((0, 2), dtype=np.float32),
+ coords={**detection_coords, "category": ["bat", "noise"]},
+ dims=["detection", "category"],
+ name="classes",
+ )
+ features = xr.DataArray(
+ np.empty((0, 4), dtype=np.float32),
+ coords={**detection_coords, "feature": ["f0", "f1", "f2", "f3"]},
+ dims=["detection", "feature"],
+ name="features",
+ )
+ return xr.Dataset(
+ {
+ "scores": scores,
+ "dimensions": dimensions,
+ "classes": classes,
+ "features": features,
+ },
+ coords=detection_coords,
+ )
+
+
+@pytest.fixture
+def sample_raw_predictions() -> list[Detection]:
+ """Manually crafted RawPrediction objects using the actual type."""
+
+ pred1_classes = xr.DataArray(
+ [0.43, 0.85], coords={"category": ["bat", "noise"]}, dims=["category"]
+ )
+ pred1_features = xr.DataArray(
+ [7.0, 16.0, 25.0, 34.0],
+ coords={"feature": ["f0", "f1", "f2", "f3"]},
+ dims=["feature"],
+ )
+ pred1 = Detection(
+ detection_score=0.9,
+ geometry=data.BoundingBox(
+ coordinates=[
+ 20 - 7 / 2,
+ 300 - 16 / 2,
+ 20 + 7 / 2,
+ 300 + 16 / 2,
+ ]
+ ),
+ class_scores=pred1_classes.values,
+ features=pred1_features.values,
+ )
+
+ pred2_classes = xr.DataArray(
+ [0.24, 0.66], coords={"category": ["bat", "noise"]}, dims=["category"]
+ )
+ pred2_features = xr.DataArray(
+ [3.0, 12.0, 21.0, 30.0],
+ coords={"feature": ["f0", "f1", "f2", "f3"]},
+ dims=["feature"],
+ )
+ pred2 = Detection(
+ detection_score=0.8,
+ geometry=data.BoundingBox(
+ coordinates=[
+ 10 - 3 / 2,
+ 200 - 12 / 2,
+ 10 + 3 / 2,
+ 200 + 12 / 2,
+ ]
+ ),
+ class_scores=pred2_classes.values,
+ features=pred2_features.values,
+ )
+
+ pred3_classes = xr.DataArray(
+ [0.05, 0.02], coords={"category": ["bat", "noise"]}, dims=["category"]
+ )
+ pred3_features = xr.DataArray(
+ [1.0, 2.0, 3.0, 4.0],
+ coords={"feature": ["f0", "f1", "f2", "f3"]},
+ dims=["feature"],
+ )
+ pred3 = Detection(
+ detection_score=0.15,
+ geometry=data.BoundingBox(
+ coordinates=[
+ 5.0,
+ 50.0,
+ 6.0,
+ 60.0,
+ ]
+ ),
+ class_scores=pred3_classes.values,
+ features=pred3_features.values,
+ )
+ return [pred1, pred2, pred3]
+
+
+def test_convert_raw_to_sound_event_basic(
+ sample_raw_predictions: list[Detection],
+ sample_recording: data.Recording,
+ dummy_targets: TargetProtocol,
+):
+ """Test basic conversion, default threshold, multi-label."""
+
+ raw_pred = sample_raw_predictions[0]
+
+ se_pred = convert_raw_prediction_to_sound_event_prediction(
+ raw_prediction=raw_pred,
+ recording=sample_recording,
+ targets=dummy_targets,
+ )
+
+ assert isinstance(se_pred, data.SoundEventPrediction)
+ assert se_pred.score == raw_pred.detection_score
+
+ se = se_pred.sound_event
+ assert isinstance(se, data.SoundEvent)
+ assert se.recording == sample_recording
+ assert isinstance(se.geometry, data.BoundingBox)
+ assert se.geometry == raw_pred.geometry
+ assert len(se.features) == len(raw_pred.features)
+
+ feat_dict = {f.term.name: f.value for f in se.features}
+ assert "batdetect2:f0" in feat_dict and isinstance(
+ feat_dict["batdetect2:f0"], float
+ )
+ assert feat_dict["batdetect2:f0"] == 7.0
+
+ generic_tags = dummy_targets.detection_class_tags
+ expected_tags = {
+ (generic_tags[0].term.name, generic_tags[0].value, 0.9),
+ ("category", "noise", 0.85),
+ ("dwc:scientificName", "Myotis", 0.43),
+ }
+ actual_tags = {
+ (pt.tag.term.name, pt.tag.value, pt.score) for pt in se_pred.tags
+ }
+ assert actual_tags == expected_tags
+
+
+def test_convert_raw_to_sound_event_thresholding(
+ sample_raw_predictions: list[Detection],
+ sample_recording: data.Recording,
+ dummy_targets: TargetProtocol,
+):
+ """Test effect of classification threshold."""
+ raw_pred = sample_raw_predictions[0]
+ high_threshold = 0.5
+
+ se_pred = convert_raw_prediction_to_sound_event_prediction(
+ raw_prediction=raw_pred,
+ recording=sample_recording,
+ targets=dummy_targets,
+ classification_threshold=high_threshold,
+ top_class_only=False,
+ )
+
+ generic_tags = dummy_targets.detection_class_tags
+ expected_tags = {
+ (generic_tags[0].term.name, generic_tags[0].value, 0.9),
+ ("category", "noise", 0.85),
+ }
+ actual_tags = {
+ (pt.tag.term.name, pt.tag.value, pt.score) for pt in se_pred.tags
+ }
+ assert actual_tags == expected_tags
+
+
+def test_convert_raw_to_sound_event_no_threshold(
+ sample_raw_predictions: list[Detection],
+ sample_recording: data.Recording,
+ dummy_targets: TargetProtocol,
+):
+ """Test when classification_threshold is None."""
+ raw_pred = sample_raw_predictions[2]
+
+ se_pred = convert_raw_prediction_to_sound_event_prediction(
+ raw_prediction=raw_pred,
+ recording=sample_recording,
+ targets=dummy_targets,
+ classification_threshold=None,
+ top_class_only=False,
+ )
+
+ generic_tags = dummy_targets.detection_class_tags
+ expected_tags = {
+ (generic_tags[0].term.name, generic_tags[0].value, 0.15),
+ ("dwc:scientificName", "Myotis", 0.05),
+ ("category", "noise", 0.02),
+ }
+ actual_tags = {
+ (pt.tag.term.name, pt.tag.value, pt.score) for pt in se_pred.tags
+ }
+ assert actual_tags == expected_tags
+
+
+def test_convert_raw_to_sound_event_top_class(
+ sample_raw_predictions: list[Detection],
+ sample_recording: data.Recording,
+ dummy_targets: TargetProtocol,
+):
+ """Test top_class_only=True behavior."""
+ raw_pred = sample_raw_predictions[0]
+
+ se_pred = convert_raw_prediction_to_sound_event_prediction(
+ raw_prediction=raw_pred,
+ recording=sample_recording,
+ targets=dummy_targets,
+ classification_threshold=DEFAULT_CLASSIFICATION_THRESHOLD,
+ top_class_only=True,
+ )
+
+ generic_tags = dummy_targets.detection_class_tags
+ expected_tags = {
+ (generic_tags[0].term.name, generic_tags[0].value, 0.9),
+ ("category", "noise", 0.85),
+ }
+ actual_tags = {
+ (pt.tag.term.name, pt.tag.value, pt.score) for pt in se_pred.tags
+ }
+ assert actual_tags == expected_tags
+
+
+def test_convert_raw_to_sound_event_all_below_threshold(
+ sample_raw_predictions: list[Detection],
+ sample_recording: data.Recording,
+ dummy_targets: TargetProtocol,
+):
+ """Test when all class scores are below the default threshold."""
+ raw_pred = sample_raw_predictions[2]
+
+ se_pred = convert_raw_prediction_to_sound_event_prediction(
+ raw_prediction=raw_pred,
+ recording=sample_recording,
+ targets=dummy_targets,
+ classification_threshold=DEFAULT_CLASSIFICATION_THRESHOLD,
+ top_class_only=False,
+ )
+
+ generic_tags = dummy_targets.detection_class_tags
+ expected_tags = {
+ (generic_tags[0].term.name, generic_tags[0].value, 0.15),
+ }
+ actual_tags = {
+ (pt.tag.term.name, pt.tag.value, pt.score) for pt in se_pred.tags
+ }
+ assert actual_tags == expected_tags
+
+
+def test_convert_raw_list_to_clip_basic(
+ sample_raw_predictions: list[Detection],
+ sample_clip: data.Clip,
+ dummy_targets: TargetProtocol,
+):
+ """Test converting a list of RawPredictions to a ClipPrediction."""
+ clip_pred = convert_raw_predictions_to_clip_prediction(
+ raw_predictions=sample_raw_predictions,
+ clip=sample_clip,
+ targets=dummy_targets,
+ classification_threshold=DEFAULT_CLASSIFICATION_THRESHOLD,
+ top_class_only=False,
+ )
+
+ assert isinstance(clip_pred, data.ClipPrediction)
+ assert clip_pred.clip == sample_clip
+ assert len(clip_pred.sound_events) == len(sample_raw_predictions)
+
+ assert clip_pred.sound_events[0].score == (
+ sample_raw_predictions[0].detection_score
+ )
+ assert clip_pred.sound_events[1].score == (
+ sample_raw_predictions[1].detection_score
+ )
+ assert clip_pred.sound_events[2].score == (
+ sample_raw_predictions[2].detection_score
+ )
+
+ se_pred3_tags = {
+ (pt.tag.term.name, pt.tag.value, pt.score)
+ for pt in clip_pred.sound_events[2].tags
+ }
+ generic_tags = dummy_targets.detection_class_tags
+ expected_tags3 = {
+ (generic_tags[0].term.name, generic_tags[0].value, 0.15),
+ }
+ assert se_pred3_tags == expected_tags3
+
+
+def test_convert_raw_list_to_clip_empty(sample_clip, dummy_targets):
+ """Test converting an empty list of RawPredictions."""
+ clip_pred = convert_raw_predictions_to_clip_prediction(
+ raw_predictions=[],
+ clip=sample_clip,
+ targets=dummy_targets,
+ )
+
+ assert isinstance(clip_pred, data.ClipPrediction)
+ assert clip_pred.clip == sample_clip
+ assert len(clip_pred.sound_events) == 0
+
+
+def test_convert_raw_list_to_clip_passes_args(
+ sample_raw_predictions: list[Detection],
+ sample_clip: data.Clip,
+ dummy_targets: TargetProtocol,
+):
+ """Test that arguments like top_class_only are passed through."""
+
+ clip_pred = convert_raw_predictions_to_clip_prediction(
+ raw_predictions=sample_raw_predictions,
+ clip=sample_clip,
+ targets=dummy_targets,
+ classification_threshold=DEFAULT_CLASSIFICATION_THRESHOLD,
+ top_class_only=True,
+ )
+
+ assert len(clip_pred.sound_events) == 3
+
+ se_pred1_tags = {
+ (pt.tag.term.name, pt.tag.value, pt.score)
+ for pt in clip_pred.sound_events[0].tags
+ }
+ generic_tags = dummy_targets.detection_class_tags
+ expected_tags1 = {
+ (generic_tags[0].term.name, generic_tags[0].value, 0.9),
+ ("category", "noise", 0.85),
+ }
+ assert se_pred1_tags == expected_tags1
+
+
+def test_get_generic_tags_basic(dummy_targets: TargetProtocol):
+ """Test creation of generic tags with score."""
+ detection_score = 0.75
+ generic_tags = dummy_targets.detection_class_tags
+ predicted_tags = get_generic_tags(
+ detection_score=detection_score, generic_class_tags=generic_tags
+ )
+ assert len(predicted_tags) == len(generic_tags)
+ for predicted_tag in predicted_tags:
+ assert isinstance(predicted_tag, data.PredictedTag)
+ assert predicted_tag.score == detection_score
+ assert predicted_tag.tag in generic_tags
+
+
+def test_get_prediction_features_basic():
+ """Test conversion of feature DataArray to list of Features."""
+ feature_data = xr.DataArray(
+ [1.1, 2.2, 3.3],
+ coords={"feature": ["feat1", "feat2", "feat3"]},
+ dims=["feature"],
+ )
+ features = get_prediction_features(feature_data.values)
+ assert len(features) == 3
+ for feature, feat_name, feat_value in zip(
+ features,
+ ["f0", "f1", "f2"],
+ [1.1, 2.2, 3.3],
+ strict=True,
+ ):
+ assert isinstance(feature, data.Feature)
+ assert feature.term.name == f"batdetect2:{feat_name}"
+ assert feature.value == feat_value
+
+
+def test_get_class_tags_basic(dummy_targets):
+ """Test creation of class tags based on scores and decoder."""
+ class_scores = xr.DataArray(
+ [0.6, 0.2, 0.9],
+ coords={"category": ["bat", "noise", "unknown"]},
+ dims=["category"],
+ )
+ predicted_tags = get_class_tags(
+ class_scores=class_scores.values,
+ targets=dummy_targets,
+ )
+ assert len(predicted_tags) == 3
+ tag_values = [pt.tag.value for pt in predicted_tags]
+ tag_scores = [pt.score for pt in predicted_tags]
+
+ assert "Myotis" in tag_values
+ assert "noise" in tag_values
+ assert "uncertain" in tag_values
+ assert 0.6 in tag_scores
+ assert 0.2 in tag_scores
+ assert 0.9 in tag_scores
+
+
+def test_get_class_tags_thresholding(dummy_targets):
+ """Test class tag creation with a threshold."""
+ class_scores = xr.DataArray(
+ [0.6, 0.2, 0.9],
+ coords={"category": ["bat", "noise", "unknown"]},
+ dims=["category"],
+ )
+ threshold = 0.5
+ predicted_tags = get_class_tags(
+ class_scores=class_scores.values,
+ targets=dummy_targets,
+ threshold=threshold,
+ )
+
+ assert len(predicted_tags) == 2
+ tag_values = [pt.tag.value for pt in predicted_tags]
+ assert "Myotis" in tag_values
+ assert "noise" not in tag_values
+ assert "uncertain" in tag_values
+
+
+def test_get_class_tags_top_class_only(dummy_targets):
+ """Test class tag creation with top_class_only."""
+ class_scores = xr.DataArray(
+ [0.6, 0.2, 0.9],
+ coords={"category": ["bat", "noise", "unknown"]},
+ dims=["category"],
+ )
+ predicted_tags = get_class_tags(
+ class_scores=class_scores.values,
+ targets=dummy_targets,
+ top_class_only=True,
+ )
+
+ assert len(predicted_tags) == 1
+ assert predicted_tags[0].tag.value == "uncertain"
+ assert predicted_tags[0].score == 0.9
+
+
+def test_get_class_tags_empty(dummy_targets):
+ """Test with empty class scores."""
+ class_scores = xr.DataArray([], coords={"category": []}, dims=["category"])
+ predicted_tags = get_class_tags(
+ class_scores=class_scores.values,
+ targets=dummy_targets,
+ )
+ assert len(predicted_tags) == 0
diff --git a/tests/test_postprocessing/test_extraction.py b/tests/test_postprocessing/test_extraction.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_postprocessing/test_remapping.py b/tests/test_postprocessing/test_remapping.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_preprocessing/__init__.py b/tests/test_preprocessing/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_preprocessing/test_audio.py b/tests/test_preprocessing/test_audio.py
new file mode 100644
index 00000000..a7963eef
--- /dev/null
+++ b/tests/test_preprocessing/test_audio.py
@@ -0,0 +1,203 @@
+"""Tests for audio-level preprocessing transforms.
+
+Covers :mod:`batdetect2.preprocess.audio` and the shared helper functions
+in :mod:`batdetect2.preprocess.common`.
+"""
+
+import pathlib
+import uuid
+
+import numpy as np
+import pytest
+import soundfile as sf
+import torch
+from soundevent import data
+
+from batdetect2.audio import AudioConfig
+from batdetect2.preprocess.audio import (
+ CenterAudio,
+ CenterAudioConfig,
+ FixDuration,
+ FixDurationConfig,
+ ScaleAudio,
+ ScaleAudioConfig,
+ build_audio_transform,
+)
+from batdetect2.preprocess.common import center_tensor, peak_normalize
+
+SAMPLERATE = 256_000
+
+
+def create_dummy_wave(
+ samplerate: int,
+ duration: float,
+ num_channels: int = 1,
+ freq: float = 440.0,
+ amplitude: float = 0.5,
+ dtype: type = np.float32,
+) -> np.ndarray:
+ """Generate a simple sine-wave waveform as a NumPy array."""
+ t = np.linspace(
+ 0.0, duration, int(samplerate * duration), endpoint=False, dtype=dtype
+ )
+ wave = amplitude * np.sin(2 * np.pi * freq * t)
+ if num_channels > 1:
+ wave = np.stack([wave] * num_channels, axis=0)
+ return wave.astype(dtype)
+
+
+@pytest.fixture
+def dummy_wav_path(tmp_path: pathlib.Path) -> pathlib.Path:
+ """Create a dummy 2-channel WAV file and return its path."""
+ samplerate = 48000
+ duration = 2.0
+ num_channels = 2
+ wave_data = create_dummy_wave(samplerate, duration, num_channels)
+ file_path = tmp_path / f"{uuid.uuid4()}.wav"
+ sf.write(file_path, wave_data.T, samplerate, format="WAV", subtype="FLOAT")
+ return file_path
+
+
+@pytest.fixture
+def dummy_recording(dummy_wav_path: pathlib.Path) -> data.Recording:
+ """Create a Recording object pointing to the dummy WAV file."""
+ return data.Recording.from_file(dummy_wav_path)
+
+
+@pytest.fixture
+def dummy_clip(dummy_recording: data.Recording) -> data.Clip:
+ """Create a Clip object from the dummy recording."""
+ return data.Clip(
+ recording=dummy_recording,
+ start_time=0.5,
+ end_time=1.5,
+ )
+
+
+@pytest.fixture
+def default_audio_config() -> AudioConfig:
+ return AudioConfig()
+
+
+def test_center_tensor_zero_mean():
+ """Output tensor should have a mean very close to zero."""
+ wav = torch.tensor([1.0, 2.0, 3.0, 4.0])
+ result = center_tensor(wav)
+ assert result.mean().abs().item() < 1e-5
+
+
+def test_center_tensor_preserves_shape():
+ wav = torch.randn(3, 1000)
+ result = center_tensor(wav)
+ assert result.shape == wav.shape
+
+
+def test_peak_normalize_max_is_one():
+ """After peak normalisation, the maximum absolute value should be 1."""
+ wav = torch.tensor([0.1, -0.4, 0.2, 0.8, -0.3])
+ result = peak_normalize(wav)
+ assert abs(result.abs().max().item() - 1.0) < 1e-6
+
+
+def test_peak_normalize_zero_tensor_unchanged():
+ """A zero tensor should be returned unchanged (no division by zero)."""
+ wav = torch.zeros(100)
+ result = peak_normalize(wav)
+ assert (result == 0).all()
+
+
+def test_peak_normalize_preserves_shape():
+ wav = torch.randn(2, 512)
+ result = peak_normalize(wav)
+ assert result.shape == wav.shape
+
+
+def test_center_audio_forward_zero_mean():
+ module = CenterAudio()
+ wav = torch.tensor([1.0, 3.0, 5.0])
+ result = module(wav)
+ assert result.mean().abs().item() < 1e-5
+
+
+def test_center_audio_from_config():
+ config = CenterAudioConfig()
+ module = CenterAudio.from_config(config, samplerate=SAMPLERATE)
+ assert isinstance(module, CenterAudio)
+
+
+def test_scale_audio_peak_normalises_to_one():
+ """ScaleAudio.forward should scale the peak absolute value to 1."""
+ module = ScaleAudio()
+ wav = torch.tensor([0.0, 0.25, 0.1])
+ result = module(wav)
+ assert abs(result.abs().max().item() - 1.0) < 1e-6
+
+
+def test_scale_audio_handles_zero_tensor():
+ """ScaleAudio should not raise on a zero tensor."""
+ module = ScaleAudio()
+ wav = torch.zeros(100)
+ result = module(wav)
+ assert (result == 0).all()
+
+
+def test_scale_audio_from_config():
+ config = ScaleAudioConfig()
+ module = ScaleAudio.from_config(config, samplerate=SAMPLERATE)
+ assert isinstance(module, ScaleAudio)
+
+
+def test_fix_duration_truncates_long_input():
+ """Waveform longer than target should be truncated to the target length."""
+ target_samples = int(SAMPLERATE * 0.5)
+ module = FixDuration(samplerate=SAMPLERATE, duration=0.5)
+ wav = torch.randn(target_samples + 1000)
+ result = module(wav)
+ assert result.shape[-1] == target_samples
+
+
+def test_fix_duration_pads_short_input():
+ """Waveform shorter than target should be zero-padded to the target length."""
+ target_samples = int(SAMPLERATE * 0.5)
+ module = FixDuration(samplerate=SAMPLERATE, duration=0.5)
+ short_wav = torch.randn(target_samples - 100)
+ result = module(short_wav)
+ assert result.shape[-1] == target_samples
+ # Padded region should be zero
+ assert (result[target_samples - 100 :] == 0).all()
+
+
+def test_fix_duration_passthrough_exact_length():
+ """Waveform with exactly the right length should be returned unchanged."""
+ target_samples = int(SAMPLERATE * 0.5)
+ module = FixDuration(samplerate=SAMPLERATE, duration=0.5)
+ wav = torch.randn(target_samples)
+ result = module(wav)
+ assert result.shape[-1] == target_samples
+ assert torch.equal(result, wav)
+
+
+def test_fix_duration_from_config():
+ """FixDurationConfig should produce a FixDuration with the correct length."""
+ config = FixDurationConfig(duration=0.256)
+ module = FixDuration.from_config(config, samplerate=SAMPLERATE)
+ assert isinstance(module, FixDuration)
+ assert module.length == int(SAMPLERATE * 0.256)
+
+
+def test_build_audio_transform_center_audio():
+ config = CenterAudioConfig()
+ module = build_audio_transform(config, samplerate=SAMPLERATE)
+ assert isinstance(module, CenterAudio)
+
+
+def test_build_audio_transform_scale_audio():
+ config = ScaleAudioConfig()
+ module = build_audio_transform(config, samplerate=SAMPLERATE)
+ assert isinstance(module, ScaleAudio)
+
+
+def test_build_audio_transform_fix_duration():
+ config = FixDurationConfig(duration=0.5)
+ module = build_audio_transform(config, samplerate=SAMPLERATE)
+ assert isinstance(module, FixDuration)
diff --git a/tests/test_preprocessing/test_preprocessor.py b/tests/test_preprocessing/test_preprocessor.py
new file mode 100644
index 00000000..35899d76
--- /dev/null
+++ b/tests/test_preprocessing/test_preprocessor.py
@@ -0,0 +1,199 @@
+"""Integration and unit tests for the Preprocessor pipeline.
+
+Covers :mod:`batdetect2.preprocess.preprocessor` — construction,
+pipeline output shape/dtype, the ``process_numpy`` helper, attribute
+values, output frame rate, and a round-trip YAML → config → build test.
+"""
+
+import pathlib
+
+import numpy as np
+import torch
+
+from batdetect2.preprocess.audio import FixDurationConfig
+from batdetect2.preprocess.config import PreprocessingConfig
+from batdetect2.preprocess.preprocessor import (
+ Preprocessor,
+ build_preprocessor,
+ compute_output_samplerate,
+)
+from batdetect2.preprocess.spectrogram import (
+ FrequencyConfig,
+ PcenConfig,
+ ResizeConfig,
+ SpectralMeanSubtractionConfig,
+ STFTConfig,
+)
+
+SAMPLERATE = 256_000
+CLIP_SAMPLES = int(SAMPLERATE * 0.256)
+
+
+def make_sine_wav(
+ samplerate: int = SAMPLERATE,
+ duration: float = 0.256,
+ freq: float = 40_000.0,
+) -> torch.Tensor:
+ """Return a single-channel sine-wave tensor."""
+ t = torch.linspace(0.0, duration, int(samplerate * duration))
+ return torch.sin(2 * torch.pi * freq * t)
+
+
+def test_build_preprocessor_returns_protocol():
+ """build_preprocessor should return a Preprocessor instance."""
+ preprocessor = build_preprocessor()
+ assert isinstance(preprocessor, Preprocessor)
+
+
+def test_build_preprocessor_with_default_config():
+ """build_preprocessor() with no arguments should not raise."""
+ preprocessor = build_preprocessor(input_samplerate=SAMPLERATE)
+ assert preprocessor is not None
+
+
+def test_build_preprocessor_with_explicit_config():
+ config = PreprocessingConfig(
+ stft=STFTConfig(window_duration=0.002, window_overlap=0.75),
+ frequencies=FrequencyConfig(min_freq=10_000, max_freq=120_000),
+ size=ResizeConfig(height=128, resize_factor=0.5),
+ spectrogram_transforms=[PcenConfig(), SpectralMeanSubtractionConfig()],
+ )
+ preprocessor = build_preprocessor(config, input_samplerate=SAMPLERATE)
+ assert isinstance(preprocessor, Preprocessor)
+
+
+def test_preprocessor_output_is_2d():
+ """The preprocessor output should be a 2-D tensor (freq_bins × time_frames)."""
+ preprocessor = build_preprocessor(input_samplerate=SAMPLERATE)
+ wav = make_sine_wav()
+ result = preprocessor(wav)
+ assert result.ndim == 2
+
+
+def test_preprocessor_output_height_matches_config():
+ """Output height should match the ResizeConfig.height setting."""
+ config = PreprocessingConfig(
+ size=ResizeConfig(height=64, resize_factor=0.5)
+ )
+ preprocessor = build_preprocessor(config, input_samplerate=SAMPLERATE)
+ wav = make_sine_wav()
+ result = preprocessor(wav)
+ assert result.shape[0] == 64
+
+
+def test_preprocessor_output_dtype_float32():
+ """Output tensor should be float32."""
+ preprocessor = build_preprocessor(input_samplerate=SAMPLERATE)
+ wav = make_sine_wav()
+ result = preprocessor(wav)
+ assert result.dtype == torch.float32
+
+
+def test_preprocessor_output_is_finite():
+ """Output spectrogram should contain no NaN or Inf values."""
+ preprocessor = build_preprocessor(input_samplerate=SAMPLERATE)
+ wav = make_sine_wav()
+ result = preprocessor(wav)
+ assert torch.isfinite(result).all()
+
+
+def test_preprocessor_process_numpy_accepts_ndarray():
+ """process_numpy should accept a NumPy array and return a NumPy array."""
+ preprocessor = build_preprocessor(input_samplerate=SAMPLERATE)
+ wav_np = make_sine_wav().numpy()
+ result = preprocessor.process_numpy(wav_np)
+ assert isinstance(result, np.ndarray)
+
+
+def test_preprocessor_process_numpy_matches_forward():
+ """process_numpy and forward should give numerically identical results."""
+ preprocessor = build_preprocessor(input_samplerate=SAMPLERATE)
+ wav = make_sine_wav()
+ result_pt = preprocessor(wav).numpy()
+ result_np = preprocessor.process_numpy(wav.numpy())
+ np.testing.assert_array_almost_equal(result_pt, result_np)
+
+
+def test_preprocessor_min_max_freq_attributes():
+ """min_freq and max_freq should match the FrequencyConfig values."""
+ config = PreprocessingConfig(
+ frequencies=FrequencyConfig(min_freq=15_000, max_freq=100_000)
+ )
+ preprocessor = build_preprocessor(config, input_samplerate=SAMPLERATE)
+ assert preprocessor.min_freq == 15_000
+ assert preprocessor.max_freq == 100_000
+
+
+def test_preprocessor_input_samplerate_attribute():
+ preprocessor = build_preprocessor(input_samplerate=SAMPLERATE)
+ assert preprocessor.input_samplerate == SAMPLERATE
+
+
+def test_compute_output_samplerate_defaults():
+ """At default settings, output_samplerate should equal 1000 fps."""
+ config = PreprocessingConfig()
+ rate = compute_output_samplerate(config, input_samplerate=SAMPLERATE)
+ assert abs(rate - 1000.0) < 1e-6
+
+
+def test_preprocessor_output_samplerate_attribute_matches_compute():
+ config = PreprocessingConfig()
+ preprocessor = build_preprocessor(config, input_samplerate=SAMPLERATE)
+ expected = compute_output_samplerate(config, input_samplerate=SAMPLERATE)
+ assert abs(preprocessor.output_samplerate - expected) < 1e-6
+
+
+def test_generate_spectrogram_shape():
+ """generate_spectrogram should return the full STFT without crop or resize."""
+ config = PreprocessingConfig()
+ preprocessor = build_preprocessor(config, input_samplerate=SAMPLERATE)
+ wav = make_sine_wav()
+ spec = preprocessor.generate_spectrogram(wav)
+ # Full STFT: n_fft//2 + 1 = 257 bins at defaults
+ assert spec.shape[0] == 257
+
+
+def test_generate_spectrogram_larger_than_forward():
+ """Raw spectrogram should have more frequency bins than the processed output."""
+ preprocessor = build_preprocessor(input_samplerate=SAMPLERATE)
+ wav = make_sine_wav()
+ raw = preprocessor.generate_spectrogram(wav)
+ processed = preprocessor(wav)
+ assert raw.shape[0] > processed.shape[0]
+
+
+def test_preprocessor_with_fix_duration_audio_transform():
+ """A FixDuration audio transform should produce consistent output shapes."""
+ config = PreprocessingConfig(
+ audio_transforms=[FixDurationConfig(duration=0.256)],
+ )
+ preprocessor = build_preprocessor(config, input_samplerate=SAMPLERATE)
+ for n_samples in [CLIP_SAMPLES - 1000, CLIP_SAMPLES, CLIP_SAMPLES + 1000]:
+ wav = torch.randn(n_samples)
+ result = preprocessor(wav)
+ assert result.ndim == 2
+
+
+def test_preprocessor_yaml_roundtrip(tmp_path: pathlib.Path):
+ """PreprocessingConfig serialised to YAML and reloaded should produce
+ a functionally identical preprocessor."""
+ config = PreprocessingConfig(
+ stft=STFTConfig(window_duration=0.002, window_overlap=0.75),
+ frequencies=FrequencyConfig(min_freq=10_000, max_freq=120_000),
+ size=ResizeConfig(height=128, resize_factor=0.5),
+ )
+
+ yaml_path = tmp_path / "preprocess_config.yaml"
+ yaml_path.write_text(config.to_yaml_string())
+
+ loaded_config = PreprocessingConfig.load(yaml_path)
+
+ preprocessor = build_preprocessor(
+ loaded_config, input_samplerate=SAMPLERATE
+ )
+ wav = make_sine_wav()
+ result = preprocessor(wav)
+
+ assert result.ndim == 2
+ assert result.shape[0] == 128
+ assert torch.isfinite(result).all()
diff --git a/tests/test_preprocessing/test_spectrogram.py b/tests/test_preprocessing/test_spectrogram.py
new file mode 100644
index 00000000..847010ef
--- /dev/null
+++ b/tests/test_preprocessing/test_spectrogram.py
@@ -0,0 +1,271 @@
+"""Tests for spectrogram-level preprocessing transforms.
+
+Covers :mod:`batdetect2.preprocess.spectrogram` — STFT configuration,
+frequency cropping, PCEN, spectral mean subtraction, amplitude scaling,
+peak normalisation, and resizing.
+"""
+
+import torch
+
+from batdetect2.preprocess.spectrogram import (
+ PCEN,
+ FrequencyConfig,
+ FrequencyCrop,
+ PcenConfig,
+ PeakNormalize,
+ PeakNormalizeConfig,
+ ResizeConfig,
+ ResizeSpec,
+ ScaleAmplitude,
+ ScaleAmplitudeConfig,
+ SpectralMeanSubtraction,
+ SpectralMeanSubtractionConfig,
+ STFTConfig,
+ _spec_params_from_config,
+ build_spectrogram_builder,
+ build_spectrogram_crop,
+ build_spectrogram_resizer,
+ build_spectrogram_transform,
+)
+
+SAMPLERATE = 256_000
+
+
+def test_stft_config_defaults_give_correct_params():
+ """Default STFTConfig at 256 kHz should give n_fft=512, hop_length=128."""
+ config = STFTConfig()
+ n_fft, hop_length = _spec_params_from_config(config, samplerate=SAMPLERATE)
+ assert n_fft == 512
+ assert hop_length == 128
+
+
+def test_stft_config_custom_params():
+ """Custom window duration and overlap should produce the expected sizes."""
+ config = STFTConfig(window_duration=0.004, window_overlap=0.5)
+ n_fft, hop_length = _spec_params_from_config(config, samplerate=SAMPLERATE)
+ assert n_fft == 1024
+ assert hop_length == 512
+
+
+def test_spectrogram_builder_output_shape():
+ """Builder should produce a spectrogram with the expected number of bins."""
+ config = STFTConfig()
+ n_fft, _ = _spec_params_from_config(config, samplerate=SAMPLERATE)
+ expected_freq_bins = n_fft // 2 + 1 # 257 at defaults
+
+ builder = build_spectrogram_builder(config, samplerate=SAMPLERATE)
+ n_samples = SAMPLERATE # 1 second of audio
+ wav = torch.randn(n_samples)
+ spec = builder(wav)
+
+ assert spec.ndim == 2
+ assert spec.shape[0] == expected_freq_bins
+
+
+def test_spectrogram_builder_output_is_nonnegative():
+ """Amplitude spectrogram values should all be >= 0."""
+ config = STFTConfig()
+ builder = build_spectrogram_builder(config, samplerate=SAMPLERATE)
+ wav = torch.randn(SAMPLERATE)
+ spec = builder(wav)
+ assert (spec >= 0).all()
+
+
+def test_frequency_crop_output_shape():
+ """FrequencyCrop should reduce the number of frequency bins."""
+ config = STFTConfig()
+ n_fft, _ = _spec_params_from_config(config, samplerate=SAMPLERATE)
+
+ crop = FrequencyCrop(
+ samplerate=SAMPLERATE,
+ n_fft=n_fft,
+ min_freq=10_000,
+ max_freq=120_000,
+ )
+ spec = torch.ones(n_fft // 2 + 1, 100)
+ cropped = crop(spec)
+
+ assert cropped.ndim == 2
+ # Must be smaller than the full spectrogram
+ assert cropped.shape[0] < spec.shape[0]
+ assert cropped.shape[1] == 100 # time axis unchanged
+
+
+def test_frequency_crop_build_from_config():
+ """build_spectrogram_crop should return a working FrequencyCrop."""
+ freq_config = FrequencyConfig(min_freq=10_000, max_freq=120_000)
+ stft_config = STFTConfig()
+ crop = build_spectrogram_crop(
+ freq_config, stft=stft_config, samplerate=SAMPLERATE
+ )
+ assert isinstance(crop, FrequencyCrop)
+
+
+def test_frequency_crop_no_crop_when_bounds_are_none():
+ """FrequencyCrop with no bounds should return the full spectrogram."""
+ config = STFTConfig()
+ n_fft, _ = _spec_params_from_config(config, samplerate=SAMPLERATE)
+
+ crop = FrequencyCrop(samplerate=SAMPLERATE, n_fft=n_fft)
+ spec = torch.ones(n_fft // 2 + 1, 100)
+ cropped = crop(spec)
+
+ assert cropped.shape == spec.shape
+
+
+def test_pcen_output_shape_preserved():
+ """PCEN should not change the shape of the spectrogram."""
+ config = PcenConfig()
+ pcen = PCEN.from_config(config, samplerate=SAMPLERATE)
+ spec = torch.rand(64, 200) + 1e-6 # positive values
+ result = pcen(spec)
+ assert result.shape == spec.shape
+
+
+def test_pcen_output_is_finite():
+ """PCEN applied to a well-formed spectrogram should produce no NaN or Inf."""
+ config = PcenConfig()
+ pcen = PCEN.from_config(config, samplerate=SAMPLERATE)
+ spec = torch.rand(64, 200) + 1e-6
+ result = pcen(spec)
+ assert torch.isfinite(result).all()
+
+
+def test_pcen_output_dtype_matches_input():
+ """PCEN should return a tensor with the same dtype as the input."""
+ config = PcenConfig()
+ pcen = PCEN.from_config(config, samplerate=SAMPLERATE)
+ spec = torch.rand(64, 200, dtype=torch.float32)
+ result = pcen(spec)
+ assert result.dtype == spec.dtype
+
+
+def test_spectral_mean_subtraction_output_nonnegative():
+ """SpectralMeanSubtraction clamps output to >= 0."""
+ module = SpectralMeanSubtraction()
+ spec = torch.rand(64, 200)
+ result = module(spec)
+ assert (result >= 0).all()
+
+
+def test_spectral_mean_subtraction_shape_preserved():
+ module = SpectralMeanSubtraction()
+ spec = torch.rand(64, 200)
+ result = module(spec)
+ assert result.shape == spec.shape
+
+
+def test_spectral_mean_subtraction_reduces_time_mean():
+ """After subtraction the time-axis mean per bin should be <= 0 (pre-clamp)."""
+ module = SpectralMeanSubtraction()
+ # Constant spectrogram: mean subtraction should produce all zeros before clamp
+ spec = torch.ones(32, 100) * 3.0
+ result = module(spec)
+ assert (result == 0).all()
+
+
+def test_spectral_mean_subtraction_from_config():
+ config = SpectralMeanSubtractionConfig()
+ module = SpectralMeanSubtraction.from_config(config, samplerate=SAMPLERATE)
+ assert isinstance(module, SpectralMeanSubtraction)
+
+
+def test_peak_normalize_spec_max_is_one():
+ """PeakNormalize should scale the spectrogram peak to 1."""
+ module = PeakNormalize()
+ spec = torch.rand(64, 200) * 5.0
+ result = module(spec)
+ assert abs(result.abs().max().item() - 1.0) < 1e-6
+
+
+def test_peak_normalize_spec_handles_zero():
+ """PeakNormalize on a zero spectrogram should not raise."""
+ module = PeakNormalize()
+ spec = torch.zeros(64, 200)
+ result = module(spec)
+ assert (result == 0).all()
+
+
+def test_peak_normalize_from_config():
+ config = PeakNormalizeConfig()
+ module = PeakNormalize.from_config(config, samplerate=SAMPLERATE)
+ assert isinstance(module, PeakNormalize)
+
+
+def test_scale_amplitude_db_output_is_finite():
+ """AmplitudeToDB scaling should produce finite values for positive input."""
+ module = ScaleAmplitude(scale="db")
+ spec = torch.rand(64, 200) + 1e-4
+ result = module(spec)
+ assert torch.isfinite(result).all()
+
+
+def test_scale_amplitude_power_output_equals_square():
+ """ScaleAmplitude('power') should square every element."""
+ module = ScaleAmplitude(scale="power")
+ spec = torch.tensor([[2.0, 3.0], [4.0, 5.0]])
+ result = module(spec)
+ expected = spec**2
+ assert torch.allclose(result, expected)
+
+
+def test_scale_amplitude_from_config():
+ config = ScaleAmplitudeConfig(scale="db")
+ module = ScaleAmplitude.from_config(config, samplerate=SAMPLERATE)
+ assert isinstance(module, ScaleAmplitude)
+ assert module.scale == "db"
+
+
+def test_resize_spec_output_shape():
+ """ResizeSpec should produce the target height and scaled width."""
+ module = ResizeSpec(height=64, time_factor=0.5)
+ spec = torch.rand(1, 128, 200) # (batch, freq, time)
+ result = module(spec)
+ assert result.shape == (1, 64, 100)
+
+
+def test_resize_spec_2d_input():
+ """ResizeSpec should handle 2-D input (no batch or channel dimensions)."""
+ module = ResizeSpec(height=64, time_factor=0.5)
+ spec = torch.rand(128, 200)
+ result = module(spec)
+ assert result.shape == (64, 100)
+
+
+def test_resize_spec_output_is_finite():
+ module = ResizeSpec(height=128, time_factor=0.5)
+ spec = torch.rand(128, 200)
+ result = module(spec)
+ assert torch.isfinite(result).all()
+
+
+def test_resize_spec_from_config():
+ config = ResizeConfig(height=64, resize_factor=0.25)
+ module = build_spectrogram_resizer(config)
+ assert isinstance(module, ResizeSpec)
+ assert module.height == 64
+ assert module.time_factor == 0.25
+
+
+def test_build_spectrogram_transform_pcen():
+ config = PcenConfig()
+ module = build_spectrogram_transform(config, samplerate=SAMPLERATE)
+ assert isinstance(module, PCEN)
+
+
+def test_build_spectrogram_transform_spectral_mean_subtraction():
+ config = SpectralMeanSubtractionConfig()
+ module = build_spectrogram_transform(config, samplerate=SAMPLERATE)
+ assert isinstance(module, SpectralMeanSubtraction)
+
+
+def test_build_spectrogram_transform_scale_amplitude():
+ config = ScaleAmplitudeConfig(scale="db")
+ module = build_spectrogram_transform(config, samplerate=SAMPLERATE)
+ assert isinstance(module, ScaleAmplitude)
+
+
+def test_build_spectrogram_transform_peak_normalize():
+ config = PeakNormalizeConfig()
+ module = build_spectrogram_transform(config, samplerate=SAMPLERATE)
+ assert isinstance(module, PeakNormalize)
diff --git a/tests/test_targets/__init__.py b/tests/test_targets/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_targets/test_classes.py b/tests/test_targets/test_classes.py
new file mode 100644
index 00000000..c80c5ec7
--- /dev/null
+++ b/tests/test_targets/test_classes.py
@@ -0,0 +1,110 @@
+from pathlib import Path
+from typing import Callable
+from uuid import uuid4
+
+import pytest
+from soundevent import data
+from soundevent.terms import get_term
+
+from batdetect2.targets.classes import (
+ TargetClassConfig,
+ build_sound_event_decoder,
+ build_sound_event_encoder,
+ get_class_names_from_config,
+)
+
+
+@pytest.fixture
+def sample_annotation(
+ sound_event: data.SoundEvent,
+) -> data.SoundEventAnnotation:
+ """Fixture for a sample SoundEventAnnotation."""
+ return data.SoundEventAnnotation(
+ sound_event=sound_event,
+ tags=[
+ data.Tag(key="species", value="Pipistrellus pipistrellus"),
+ data.Tag(key="quality", value="Good"),
+ ],
+ )
+
+
+@pytest.fixture
+def create_temp_yaml(tmp_path: Path) -> Callable[[str], Path]:
+ """Create a temporary YAML file with the given content."""
+
+ def factory(content: str) -> Path:
+ temp_file = tmp_path / f"{uuid4()}.yaml"
+ temp_file.write_text(content)
+ return temp_file
+
+ return factory
+
+
+def test_get_class_names_from_config():
+ target_class1 = TargetClassConfig(
+ name="pippip",
+ tags=[data.Tag(key="species", value="Pipistrellus pipistrellus")],
+ )
+ target_class2 = TargetClassConfig(
+ name="myodau",
+ tags=[data.Tag(key="species", value="Myotis daubentonii")],
+ )
+ names = get_class_names_from_config([target_class1, target_class2])
+ assert names == ["pippip", "myodau"]
+
+
+def test_build_encoder_from_config(
+ sample_annotation: data.SoundEventAnnotation,
+):
+ classes = [
+ TargetClassConfig(
+ name="pippip",
+ tags=[data.Tag(key="species", value="Pipistrellus pipistrellus")],
+ )
+ ]
+ encoder = build_sound_event_encoder(classes)
+ result = encoder(sample_annotation)
+ assert result == "pippip"
+
+ classes = []
+ encoder = build_sound_event_encoder(classes)
+ result = encoder(sample_annotation)
+ assert result is None
+
+
+def test_build_decoder_from_config():
+ classes = [
+ TargetClassConfig(
+ name="pippip",
+ tags=[data.Tag(key="species", value="Pipistrellus pipistrellus")],
+ assign_tags=[data.Tag(key="call_type", value="Echolocation")],
+ )
+ ]
+ decoder = build_sound_event_decoder(classes)
+ tags = decoder("pippip")
+ assert len(tags) == 1
+ assert tags[0].term == get_term("event")
+ assert tags[0].value == "Echolocation"
+
+ # Test when output_tags is None, should fall back to tags
+ classes = [
+ TargetClassConfig(
+ name="pippip",
+ tags=[data.Tag(key="species", value="Pipistrellus pipistrellus")],
+ )
+ ]
+ decoder = build_sound_event_decoder(classes)
+ tags = decoder("pippip")
+ assert len(tags) == 1
+ assert tags[0].term == get_term("species")
+ assert tags[0].value == "Pipistrellus pipistrellus"
+
+ # Test raise_on_unmapped=True
+ decoder = build_sound_event_decoder(classes, raise_on_unmapped=True)
+ with pytest.raises(ValueError):
+ decoder("unknown_class")
+
+ # Test raise_on_unmapped=False
+ decoder = build_sound_event_decoder(classes, raise_on_unmapped=False)
+ tags = decoder("unknown_class")
+ assert len(tags) == 0
diff --git a/tests/test_targets/test_rois.py b/tests/test_targets/test_rois.py
new file mode 100644
index 00000000..475df00e
--- /dev/null
+++ b/tests/test_targets/test_rois.py
@@ -0,0 +1,676 @@
+from pathlib import Path
+
+import numpy as np
+import pytest
+import soundfile as sf
+from soundevent import data
+
+from batdetect2.audio import build_audio_loader
+from batdetect2.preprocess import (
+ PreprocessingConfig,
+ build_preprocessor,
+)
+from batdetect2.preprocess.spectrogram import (
+ ScaleAmplitudeConfig,
+ SpectralMeanSubtractionConfig,
+)
+from batdetect2.targets.rois import (
+ DEFAULT_ANCHOR,
+ DEFAULT_FREQUENCY_SCALE,
+ DEFAULT_TIME_SCALE,
+ SIZE_HEIGHT,
+ SIZE_WIDTH,
+ AnchorBBoxMapper,
+ AnchorBBoxMapperConfig,
+ PeakEnergyBBoxMapper,
+ PeakEnergyBBoxMapperConfig,
+ ROIMappingConfig,
+ _build_bounding_box,
+ build_roi_mapper,
+ build_roi_mapping,
+ get_peak_energy_coordinates,
+)
+
+
+@pytest.fixture
+def sample_bbox() -> data.BoundingBox:
+ """A standard bounding box for testing."""
+ return data.BoundingBox(coordinates=[10.0, 100.0, 20.0, 200.0])
+
+
+@pytest.fixture
+def sample_recording(create_recording) -> data.Recording:
+ return create_recording(duration=30, samplerate=4_000)
+
+
+@pytest.fixture
+def sample_sound_event(sample_bbox, sample_recording) -> data.SoundEvent:
+ return data.SoundEvent(geometry=sample_bbox, recording=sample_recording)
+
+
+@pytest.fixture
+def zero_bbox() -> data.BoundingBox:
+ """A bounding box with zero duration and bandwidth."""
+ return data.BoundingBox(coordinates=[15.0, 150.0, 15.0, 150.0])
+
+
+@pytest.fixture
+def zero_sound_event(zero_bbox, sample_recording) -> data.SoundEvent:
+ """A sample sound event with a zero-sized bounding box."""
+ return data.SoundEvent(geometry=zero_bbox, recording=sample_recording)
+
+
+@pytest.fixture
+def default_mapper() -> AnchorBBoxMapper:
+ """A BBoxEncoder with default settings."""
+ return AnchorBBoxMapper()
+
+
+@pytest.fixture
+def custom_encoder() -> AnchorBBoxMapper:
+ """A BBoxEncoder with custom settings."""
+ return AnchorBBoxMapper(
+ anchor="center", time_scale=1.0, frequency_scale=10.0
+ )
+
+
+@pytest.fixture
+def custom_mapper() -> AnchorBBoxMapper:
+ """An AnchorBBoxMapper with custom settings."""
+ return AnchorBBoxMapper(
+ anchor="center", time_scale=1.0, frequency_scale=10.0
+ )
+
+
+def test_bbox_encoder_init_defaults(default_mapper):
+ """Test BBoxEncoder initialization with default arguments."""
+ assert default_mapper.anchor == DEFAULT_ANCHOR
+ assert default_mapper.time_scale == DEFAULT_TIME_SCALE
+ assert default_mapper.frequency_scale == DEFAULT_FREQUENCY_SCALE
+ assert default_mapper.dimension_names == [SIZE_WIDTH, SIZE_HEIGHT]
+
+
+def test_bbox_encoder_init_custom(custom_encoder):
+ """Test BBoxEncoder initialization with custom arguments."""
+ assert custom_encoder.anchor == "center"
+ assert custom_encoder.time_scale == 1.0
+ assert custom_encoder.frequency_scale == 10.0
+ assert custom_encoder.dimension_names == [SIZE_WIDTH, SIZE_HEIGHT]
+
+
+POSITION_TEST_CASES = [
+ ("bottom-left", (10.0, 100.0)),
+ ("bottom-right", (20.0, 100.0)),
+ ("top-left", (10.0, 200.0)),
+ ("top-right", (20.0, 200.0)),
+ ("center-left", (10.0, 150.0)),
+ ("center-right", (20.0, 150.0)),
+ ("top-center", (15.0, 200.0)),
+ ("bottom-center", (15.0, 100.0)),
+ ("center", (15.0, 150.0)),
+ ("centroid", (15.0, 150.0)),
+ ("point_on_surface", (15.0, 150.0)),
+]
+
+
+@pytest.mark.parametrize("anchor, expected_pos", POSITION_TEST_CASES)
+def test_anchor_bbox_mapper_encode_position(
+ sample_sound_event, anchor, expected_pos
+):
+ """Test encode returns the correct position for various anchors."""
+ encoder = AnchorBBoxMapper(anchor=anchor)
+ actual_pos, _ = encoder.encode(sample_sound_event)
+ assert actual_pos == pytest.approx(expected_pos)
+
+
+def test_anchor_bbox_mapper_encode_defaults(
+ sample_sound_event, default_mapper
+):
+ """Test encode with default settings returns correct position and size."""
+ expected_pos = (10.0, 100.0) # bottom-left
+ expected_size = np.array(
+ [
+ 10.0 * DEFAULT_TIME_SCALE,
+ 100.0 * DEFAULT_FREQUENCY_SCALE,
+ ]
+ )
+ actual_pos, actual_size = default_mapper.encode(sample_sound_event)
+ assert actual_pos == pytest.approx(expected_pos)
+ np.testing.assert_allclose(actual_size, expected_size)
+ assert actual_size.shape == (2,)
+
+
+def test_anchor_bbox_mapper_encode_custom(sample_sound_event, custom_mapper):
+ """Test encode with custom settings returns correct position and size."""
+ expected_pos = (15.0, 150.0) # center
+ expected_size = np.array([10.0 * 1.0, 100.0 * 10.0])
+
+ actual_pos, actual_size = custom_mapper.encode(sample_sound_event)
+ assert actual_pos == pytest.approx(expected_pos)
+ np.testing.assert_allclose(actual_size, expected_size)
+ assert actual_size.shape == (2,)
+
+
+def test_anchor_bbox_mapper_encode_zero_box(zero_sound_event, default_mapper):
+ """Test encode for a zero-sized box."""
+ expected_pos = (15.0, 150.0)
+ expected_size = np.array([0.0, 0.0])
+ actual_pos, actual_size = default_mapper.encode(zero_sound_event)
+ assert actual_pos == pytest.approx(expected_pos)
+ np.testing.assert_allclose(actual_size, expected_size)
+
+
+BUILD_BOX_TEST_CASES = [
+ ("bottom-left", [50.0, 500.0, 60.0, 600.0]),
+ ("bottom-right", [40.0, 500.0, 50.0, 600.0]),
+ ("top-left", [50.0, 400.0, 60.0, 500.0]),
+ ("top-right", [40.0, 400.0, 50.0, 500.0]),
+ ("center-left", [50.0, 450.0, 60.0, 550.0]),
+ ("center-right", [40.0, 450.0, 50.0, 550.0]),
+ ("top-center", [45.0, 400.0, 55.0, 500.0]),
+ ("bottom-center", [45.0, 500.0, 55.0, 600.0]),
+ ("center", [45.0, 450.0, 55.0, 550.0]),
+ ("centroid", [45.0, 450.0, 55.0, 550.0]),
+ ("point_on_surface", [45.0, 450.0, 55.0, 550.0]),
+]
+
+
+@pytest.mark.parametrize(
+ "position_type, expected_coords", BUILD_BOX_TEST_CASES
+)
+def test_build_bounding_box(position_type, expected_coords):
+ """Test _build_bounding_box for various position types."""
+ ref_pos = (50.0, 500.0)
+ duration = 10.0
+ bandwidth = 100.0
+ bbox = _build_bounding_box(
+ ref_pos, duration, bandwidth, anchor=position_type
+ )
+ assert isinstance(bbox, data.BoundingBox)
+ np.testing.assert_allclose(bbox.coordinates, expected_coords)
+
+
+def test_build_bounding_box_invalid_anchor():
+ """Test _build_bounding_box raises error for invalid position."""
+ with pytest.raises(ValueError, match="Invalid anchor"):
+ _build_bounding_box(
+ (0, 0),
+ 1,
+ 1,
+ anchor="invalid-spot", # type: ignore
+ )
+
+
+@pytest.mark.parametrize(
+ "anchor", [anchor for anchor, _ in POSITION_TEST_CASES]
+)
+def test_anchor_bbox_mapper_encode_decode_roundtrip(
+ sample_sound_event, sample_bbox, anchor
+):
+ """Test encode-decode roundtrip reconstructs the original bbox."""
+ mapper = AnchorBBoxMapper(anchor=anchor)
+ position, size = mapper.encode(sample_sound_event)
+ recovered_bbox = mapper.decode(position, size)
+
+ assert isinstance(recovered_bbox, data.BoundingBox)
+ np.testing.assert_allclose(
+ recovered_bbox.coordinates, sample_bbox.coordinates, atol=1e-6
+ )
+
+
+def test_anchor_bbox_mapper_roundtrip_custom_scale(
+ sample_sound_event, sample_bbox, custom_mapper
+):
+ """Test encode-decode roundtrip with custom scaling factors."""
+ position, size = custom_mapper.encode(sample_sound_event)
+ recovered_bbox = custom_mapper.decode(position, size)
+
+ assert isinstance(recovered_bbox, data.BoundingBox)
+ np.testing.assert_allclose(
+ recovered_bbox.coordinates, sample_bbox.coordinates, atol=1e-6
+ )
+
+
+def test_anchor_bbox_mapper_roundtrip_zero_box(
+ zero_sound_event, zero_bbox, default_mapper
+):
+ """Test encode-decode roundtrip for a zero-sized box."""
+ position, size = default_mapper.encode(zero_sound_event)
+ recovered_bbox = default_mapper.decode(position, size)
+ np.testing.assert_allclose(
+ recovered_bbox.coordinates, zero_bbox.coordinates, atol=1e-6
+ )
+
+
+def test_anchor_bbox_mapper_decode_invalid_size_shape(default_mapper):
+ """Test decode raises ValueError for incorrect size shape."""
+ ref_pos = (10, 100)
+ with pytest.raises(ValueError, match="does not have the expected shape"):
+ default_mapper.decode(ref_pos, np.array([1.0]))
+ with pytest.raises(ValueError, match="does not have the expected shape"):
+ default_mapper.decode(ref_pos, np.array([1.0, 2.0, 3.0]))
+ with pytest.raises(ValueError, match="does not have the expected shape"):
+ default_mapper.decode(ref_pos, np.array([[1.0], [2.0]]))
+
+
+def test_build_roi_mapper():
+ """Test build_roi_mapper creates a configured BBoxEncoder."""
+ config = AnchorBBoxMapperConfig(
+ anchor="top-right", time_scale=2.0, frequency_scale=20.0
+ )
+ mapper = build_roi_mapper(config)
+
+ assert isinstance(mapper, AnchorBBoxMapper)
+ assert mapper.anchor == config.anchor
+ assert mapper.time_scale == config.time_scale
+ assert mapper.frequency_scale == config.frequency_scale
+
+
+def test_get_peak_energy_coordinates(generate_whistle):
+ whistle_time = 0.5
+ whistle_frequency = 40_000
+ duration = 1.0
+ samplerate = 256_000
+
+ # Generate a WAV file with a whistle
+ whistle_path = generate_whistle(
+ time=whistle_time,
+ frequency=whistle_frequency,
+ duration=duration,
+ samplerate=samplerate,
+ whistle_duration=0.01,
+ )
+
+ # Create a recording object from the generated WAV
+ recording = data.Recording.from_file(path=whistle_path)
+
+ # Build a preprocessor (default config should be fine for this test)
+ preprocessor = build_preprocessor()
+
+ audio_loader = build_audio_loader()
+
+ # Define a region of interest that contains the whistle
+ start_time = 0.2
+ end_time = 0.7
+ low_freq = 20_000
+ high_freq = 60_000
+
+ # Get the peak energy coordinates
+ peak_time, peak_freq = get_peak_energy_coordinates(
+ recording=recording,
+ preprocessor=preprocessor,
+ audio_loader=audio_loader,
+ start_time=start_time,
+ end_time=end_time,
+ low_freq=low_freq,
+ high_freq=high_freq,
+ loading_buffer=0.05,
+ )
+
+ # Assert that the peak coordinates are close to the expected values
+ assert peak_time == pytest.approx(whistle_time, abs=0.01)
+ assert peak_freq == pytest.approx(whistle_frequency, abs=1000)
+
+
+def test_get_peak_energy_coordinates_with_two_whistles(generate_whistle):
+ # Parameters for the first (stronger) whistle
+ strong_whistle_time = 0.2
+ strong_whistle_frequency = 30_000
+ strong_whistle_amplitude = 1.0 # Full amplitude
+
+ # Parameters for the second (weaker) whistle
+ weak_whistle_time = 0.8
+ weak_whistle_frequency = 50_000
+ weak_whistle_amplitude = 0.1 # Weaker amplitude
+
+ # Recording parameters
+ duration = 1.0
+ samplerate = 256_000
+
+ # Generate WAV files for each whistle
+ strong_whistle_path = generate_whistle(
+ time=strong_whistle_time,
+ frequency=strong_whistle_frequency,
+ duration=duration,
+ samplerate=samplerate,
+ whistle_duration=0.01,
+ )
+ weak_whistle_path = generate_whistle(
+ time=weak_whistle_time,
+ frequency=weak_whistle_frequency,
+ duration=duration,
+ samplerate=samplerate,
+ whistle_duration=0.01,
+ )
+
+ # Load audio data
+ strong_audio, _ = sf.read(strong_whistle_path)
+ weak_audio, _ = sf.read(weak_whistle_path)
+
+ # Mix the audio files
+ mixed_audio = (
+ strong_audio * strong_whistle_amplitude
+ + weak_audio * weak_whistle_amplitude
+ )
+ mixed_audio_path = strong_whistle_path.parent / "mixed_whistles.wav"
+ sf.write(str(mixed_audio_path), mixed_audio, samplerate)
+
+ # Create a recording object from the mixed WAV
+ recording = data.Recording.from_file(path=mixed_audio_path)
+
+ # Build a preprocessor
+ preprocessor = build_preprocessor()
+
+ # Define a region of interest that contains only the weaker whistle
+ start_time = 0.7
+ end_time = 0.9
+ low_freq = 45_000
+ high_freq = 55_000
+
+ # Get the peak energy coordinates within the bounding box
+ peak_time, peak_freq = get_peak_energy_coordinates(
+ recording=recording,
+ preprocessor=preprocessor,
+ audio_loader=build_audio_loader(),
+ start_time=start_time,
+ end_time=end_time,
+ low_freq=low_freq,
+ high_freq=high_freq,
+ loading_buffer=0.05,
+ )
+
+ # Assert that the peak coordinates are close to the weaker whistle's values
+ assert peak_time == pytest.approx(weak_whistle_time, abs=0.01)
+ assert peak_freq == pytest.approx(weak_whistle_frequency, abs=1000)
+
+
+def test_get_peak_energy_coordinates_silent_region(create_recording):
+ # Parameters for a silent recording
+ duration = 2.0 # seconds
+ samplerate = 44_100 # Hz
+
+ # Create a silent recording
+ recording = create_recording(duration=duration, samplerate=samplerate)
+
+ # Build a preprocessor
+ preprocessor = build_preprocessor()
+
+ # Define a region of interest within the silent recording
+ start_time = 0.5
+ end_time = 1.5
+ low_freq = 10_000
+ high_freq = 20_000
+
+ # Get the peak energy coordinates from the silent region
+ peak_time, peak_freq = get_peak_energy_coordinates(
+ recording=recording,
+ preprocessor=preprocessor,
+ audio_loader=build_audio_loader(),
+ start_time=start_time,
+ end_time=end_time,
+ low_freq=low_freq,
+ high_freq=high_freq,
+ loading_buffer=0.05,
+ )
+
+ # Assert that the peak coordinates are within the defined ROI bounds
+ assert start_time <= peak_time <= end_time
+ assert low_freq <= peak_freq <= high_freq
+
+ # Since there's no actual peak, the exact values might vary depending on
+ # argmax behavior with all-zero or very low, uniform energy. We just need
+ # to ensure they are within the search bounds.
+
+
+def test_peak_energy_bbox_mapper_encode(generate_whistle):
+ """
+ Tests the 'happy path' for PeakEnergyBBoxMapper.encode.
+
+ It verifies that the method correctly identifies a known peak within a
+ bounding box and calculates the four scaled distances to the box edges.
+ """
+ # 1. SETUP
+ samplerate = 256_000
+ time_scale = 100.0
+ freq_scale = 0.1
+
+ bbox_start_time, bbox_low_freq = 1.0, 10000
+ bbox_end_time, bbox_high_freq = 2.0, 30000
+ bbox = data.BoundingBox(
+ coordinates=[
+ bbox_start_time,
+ bbox_low_freq,
+ bbox_end_time,
+ bbox_high_freq,
+ ]
+ )
+
+ # Define the known location of the peak energy inside the bbox
+ peak_time, peak_freq = 1.6, 25000
+
+ # Create a recording with a whistle at the defined peak location
+ recording_path = generate_whistle(
+ time=peak_time,
+ frequency=peak_freq,
+ duration=3.0,
+ samplerate=samplerate,
+ )
+ recording = data.Recording.from_file(path=recording_path)
+ sound_event = data.SoundEvent(geometry=bbox, recording=recording)
+
+ # Instantiate the mapper with a preprocessor
+ preprocessor = build_preprocessor(
+ PreprocessingConfig(spectrogram_transforms=[])
+ )
+ mapper = PeakEnergyBBoxMapper(
+ preprocessor=preprocessor,
+ audio_loader=build_audio_loader(),
+ time_scale=time_scale,
+ frequency_scale=freq_scale,
+ )
+
+ # Encode the sound event to get the position and size
+ actual_pos, actual_size = mapper.encode(sound_event)
+
+ # Then
+ assert actual_pos[0] == pytest.approx(peak_time, abs=0.01)
+ assert actual_pos[1] == pytest.approx(peak_freq, abs=1000)
+
+ # Assert that the calculated scaled distances are correct
+ identified_time, identified_freq = actual_pos
+ expected_left = (identified_time - bbox_start_time) * time_scale
+ expected_bottom = (identified_freq - bbox_low_freq) * freq_scale
+ expected_right = (bbox_end_time - identified_time) * time_scale
+ expected_top = (bbox_high_freq - identified_freq) * freq_scale
+ expected_size = np.array(
+ [expected_left, expected_bottom, expected_right, expected_top]
+ )
+
+ assert actual_size.shape == (4,)
+ np.testing.assert_allclose(actual_size, expected_size, rtol=1e-5)
+
+
+def test_peak_energy_bbox_mapper_decode():
+ """
+ Tests that PeakEnergyBBoxMapper.decode correctly reconstructs a BoundingBox.
+ """
+ # Given
+ time_scale = 100.0
+ freq_scale = 0.1
+
+ # Define a known peak position and scaled distances.
+ peak_position = (1.5, 15000)
+ scaled_size = np.array([50.0, 500.0, 50.0, 500.0])
+
+ mapper = PeakEnergyBBoxMapper(
+ preprocessor=build_preprocessor(),
+ audio_loader=build_audio_loader(),
+ time_scale=time_scale,
+ frequency_scale=freq_scale,
+ )
+
+ # When
+ reconstructed_bbox = mapper.decode(peak_position, scaled_size)
+
+ # Then
+ # Calculate the expected coordinates based on the decode logic.
+ expected_start_time = peak_position[0] - scaled_size[0] / time_scale
+ expected_low_freq = peak_position[1] - scaled_size[1] / freq_scale
+ expected_end_time = peak_position[0] + scaled_size[2] / time_scale
+ expected_high_freq = peak_position[1] + scaled_size[3] / freq_scale
+
+ expected_coordinates = [
+ expected_start_time,
+ expected_low_freq,
+ expected_end_time,
+ expected_high_freq,
+ ]
+
+ assert isinstance(reconstructed_bbox, data.BoundingBox)
+ np.testing.assert_allclose(
+ reconstructed_bbox.coordinates, expected_coordinates
+ )
+
+
+def test_peak_energy_bbox_mapper_encode_decode_roundtrip(generate_whistle):
+ """
+ Tests that encoding and then decoding a SoundEvent with the
+ PeakEnergyBBoxMapper results in the original BoundingBox.
+ """
+ # Given
+ samplerate = 256_000
+
+ # Define the original geometry and the peak location within it.
+ original_bbox = data.BoundingBox(coordinates=[1.0, 10000, 2.0, 30000])
+ peak_time, peak_freq = 1.6, 25000
+
+ # Create the recording and sound event.
+ recording_path = generate_whistle(
+ time=peak_time,
+ frequency=peak_freq,
+ duration=3.0,
+ samplerate=samplerate,
+ )
+ recording = data.Recording.from_file(path=recording_path)
+ sound_event = data.SoundEvent(geometry=original_bbox, recording=recording)
+
+ # Instantiate the mapper.
+ preprocessor = build_preprocessor(
+ PreprocessingConfig(spectrogram_transforms=[])
+ )
+ audio_loader = build_audio_loader()
+ mapper = PeakEnergyBBoxMapper(
+ preprocessor=preprocessor,
+ audio_loader=audio_loader,
+ )
+
+ # When
+ # Encode the sound event, then immediately decode the result.
+ position, size = mapper.encode(sound_event)
+ reconstructed_bbox = mapper.decode(position, size)
+
+ # Then
+ # Verify the reconstructed bounding box is identical to the original.
+ np.testing.assert_allclose(
+ reconstructed_bbox.coordinates,
+ original_bbox.coordinates,
+ rtol=1e-5,
+ )
+
+
+def test_build_roi_mapper_for_anchor_bbox():
+ # Given
+ config = AnchorBBoxMapperConfig(
+ anchor="center",
+ time_scale=123.0,
+ frequency_scale=456.0,
+ )
+
+ # When
+ mapper = build_roi_mapper(config)
+
+ # Then
+ assert isinstance(mapper, AnchorBBoxMapper)
+ assert mapper.anchor == "center"
+ assert mapper.time_scale == 123.0
+ assert mapper.frequency_scale == 456.0
+
+
+def test_build_roi_mapper_for_peak_energy_bbox():
+ # Given
+ preproc_config = PreprocessingConfig(
+ spectrogram_transforms=[
+ ScaleAmplitudeConfig(scale="db"),
+ SpectralMeanSubtractionConfig(),
+ ]
+ )
+ config = PeakEnergyBBoxMapperConfig(
+ loading_buffer=0.99,
+ time_scale=789.0,
+ frequency_scale=123.0,
+ preprocessing=preproc_config,
+ )
+
+ # When
+ mapper = build_roi_mapper(config)
+
+ # Then
+ assert isinstance(mapper, PeakEnergyBBoxMapper)
+ assert mapper.loading_buffer == 0.99
+ assert mapper.time_scale == 789.0
+ assert mapper.frequency_scale == 123.0
+
+
+def test_build_roi_mapper_raises_error_for_unknown_name():
+ """
+ Tests that the factory raises a NotImplementedError when given a
+ config with an unrecognized mapper name.
+ """
+
+ # Given
+ class DummyConfig:
+ name = "non_existent_mapper"
+
+ # Then
+ with pytest.raises(NotImplementedError):
+ build_roi_mapper(DummyConfig()) # type: ignore
+
+
+def test_build_roi_mapping_applies_class_override():
+ config = ROIMappingConfig(
+ default=AnchorBBoxMapperConfig(anchor="bottom-left"),
+ overrides={
+ "myomyo": AnchorBBoxMapperConfig(anchor="top-left"),
+ },
+ )
+
+ mapper = build_roi_mapping(config=config)
+
+ geometry = data.BoundingBox(coordinates=[0.1, 12_000, 0.2, 18_000])
+ sound_event = data.SoundEvent(
+ recording=data.Recording(
+ path=Path("x.wav"),
+ samplerate=256_000,
+ channels=1,
+ duration=1.0,
+ ),
+ geometry=geometry,
+ )
+
+ default_position, _ = mapper.encode(sound_event, class_name="pippip")
+ override_position, _ = mapper.encode(sound_event, class_name="myomyo")
+
+ assert default_position == pytest.approx((0.1, 12_000))
+ assert override_position == pytest.approx((0.1, 18_000))
+
+
+def test_build_roi_mapping_rejects_dimension_mismatch():
+ config = ROIMappingConfig(
+ default=AnchorBBoxMapperConfig(),
+ overrides={
+ "myomyo": PeakEnergyBBoxMapperConfig(),
+ },
+ )
+
+ with pytest.raises(ValueError, match="same dimension order"):
+ build_roi_mapping(config=config)
diff --git a/tests/test_targets/test_targets.py b/tests/test_targets/test_targets.py
new file mode 100644
index 00000000..37d1f3a4
--- /dev/null
+++ b/tests/test_targets/test_targets.py
@@ -0,0 +1,175 @@
+import json
+from collections.abc import Callable
+from pathlib import Path
+
+from soundevent import data, terms
+
+from batdetect2.targets import (
+ TargetConfig,
+ Targets,
+ build_roi_mapping,
+ build_targets,
+)
+
+
+def test_targets_get_config_returns_a_json_serializable_dict() -> None:
+ targets = build_targets(TargetConfig())
+
+ config_dict = targets.get_config()
+ assert isinstance(config_dict, dict)
+ assert json.dumps(config_dict)
+
+
+def test_targets_from_config_rebuilds_equivalent_targets() -> None:
+ original = build_targets(TargetConfig())
+
+ rebuilt = Targets.from_config(original.get_config())
+
+ assert rebuilt.class_names == original.class_names
+ assert rebuilt.detection_class_name == original.detection_class_name
+ assert rebuilt.detection_class_tags == original.detection_class_tags
+ assert rebuilt.get_config() == original.get_config()
+
+
+def test_can_override_default_roi_mapper_per_class(
+ create_temp_yaml: Callable[..., Path],
+ recording: data.Recording,
+):
+ yaml_content = """
+ detection_target:
+ name: bat
+ match_if:
+ name: has_tag
+ tag:
+ key: order
+ value: Chiroptera
+ assign_tags:
+ - key: order
+ value: Chiroptera
+
+ classification_targets:
+ - name: pippip
+ tags:
+ - key: species
+ value: Pipistrellus pipistrellus
+
+ - name: myomyo
+ tags:
+ - key: species
+ value: Myotis myotis
+
+ roi:
+ default:
+ name: anchor_bbox
+ anchor: bottom-left
+ overrides:
+ myomyo:
+ name: anchor_bbox
+ anchor: top-left
+ """
+ config_path = create_temp_yaml(yaml_content)
+
+ config = TargetConfig.load(config_path)
+ targets = build_targets(config)
+ roi_mapper = build_roi_mapping(config=config.roi)
+
+ geometry = data.BoundingBox(coordinates=[0.1, 12_000, 0.2, 18_000])
+
+ species = terms.get_term("species")
+ assert species is not None
+
+ se1 = data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(recording=recording, geometry=geometry),
+ tags=[data.Tag(term=species, value="Pipistrellus pipistrellus")],
+ )
+
+ se2 = data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(recording=recording, geometry=geometry),
+ tags=[data.Tag(term=species, value="Myotis myotis")],
+ )
+
+ class_name1 = targets.encode_class(se1)
+ class_name2 = targets.encode_class(se2)
+
+ (time1, freq1), _ = roi_mapper.encode(
+ se1.sound_event,
+ class_name=class_name1,
+ )
+ (time2, freq2), _ = roi_mapper.encode(
+ se2.sound_event,
+ class_name=class_name2,
+ )
+
+ assert time1 == time2 == 0.1
+ assert freq1 == 12_000
+ assert freq2 == 18_000
+
+
+# TODO: rename this test function
+def test_roi_is_recovered_roundtrip_even_with_overriders(
+ create_temp_yaml,
+ recording,
+):
+ yaml_content = """
+ detection_target:
+ name: bat
+ match_if:
+ name: has_tag
+ tag:
+ key: order
+ value: Chiroptera
+ assign_tags:
+ - key: order
+ value: Chiroptera
+
+ classification_targets:
+ - name: pippip
+ tags:
+ - key: species
+ value: Pipistrellus pipistrellus
+
+ - name: myomyo
+ tags:
+ - key: species
+ value: Myotis myotis
+
+ roi:
+ default:
+ name: anchor_bbox
+ anchor: bottom-left
+ overrides:
+ myomyo:
+ name: anchor_bbox
+ anchor: top-left
+ """
+ config_path = create_temp_yaml(yaml_content)
+
+ config = TargetConfig.load(config_path)
+ targets = build_targets(config)
+ roi_mapper = build_roi_mapping(config=config.roi)
+
+ geometry = data.BoundingBox(coordinates=[0.1, 12_000, 0.2, 18_000])
+
+ species = terms.get_term("species")
+ assert species is not None
+ se1 = data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(recording=recording, geometry=geometry),
+ tags=[data.Tag(term=species, value="Pipistrellus pipistrellus")],
+ )
+
+ se2 = data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(recording=recording, geometry=geometry),
+ tags=[data.Tag(term=species, value="Myotis myotis")],
+ )
+
+ position1, size1 = roi_mapper.encode(se1.sound_event, class_name="pippip")
+ position2, size2 = roi_mapper.encode(se2.sound_event, class_name="myomyo")
+
+ class_name1 = targets.encode_class(se1)
+ class_name2 = targets.encode_class(se2)
+
+ recovered1 = roi_mapper.decode(position1, size1, class_name=class_name1)
+ recovered2 = roi_mapper.decode(position2, size2, class_name=class_name2)
+
+ assert recovered1 == geometry
+ assert recovered2 == geometry
diff --git a/tests/test_targets/test_utils.py b/tests/test_targets/test_utils.py
new file mode 100644
index 00000000..c4b5d1ce
--- /dev/null
+++ b/tests/test_targets/test_utils.py
@@ -0,0 +1,40 @@
+from soundevent import data
+
+from batdetect2.targets import (
+ TargetClassConfig,
+ TargetConfig,
+ build_targets,
+ check_target_compatibility,
+)
+
+
+def _target_class(name: str) -> TargetClassConfig:
+ return TargetClassConfig(
+ name=name,
+ tags=[data.Tag(key="class", value=name)],
+ )
+
+
+def test_check_target_compatibility_accepts_superset_targets() -> None:
+ config = TargetConfig(
+ classification_targets=[
+ _target_class("pip35"),
+ _target_class("myo"),
+ _target_class("extra"),
+ ]
+ )
+ targets = build_targets(config)
+
+ assert check_target_compatibility(targets, ["pip35", "myo"])
+
+
+def test_check_target_compatibility_rejects_missing_model_classes() -> None:
+ config = TargetConfig(
+ classification_targets=[
+ _target_class("pip35"),
+ _target_class("myo"),
+ ]
+ )
+ targets = build_targets(config)
+
+ assert not check_target_compatibility(targets, ["pip35", "nyc"])
diff --git a/tests/test_train/__init__.py b/tests/test_train/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_train/test_checkpoints.py b/tests/test_train/test_checkpoints.py
new file mode 100644
index 00000000..4cffa48a
--- /dev/null
+++ b/tests/test_train/test_checkpoints.py
@@ -0,0 +1,232 @@
+import sys
+import types
+from pathlib import Path
+
+import pytest
+import torch
+from soundevent import data
+
+from batdetect2.train import TrainingConfig, run_train
+from batdetect2.train.checkpoints import (
+ DEFAULT_CHECKPOINT,
+ get_bundled_checkpoint_names,
+ resolve_checkpoint_path,
+)
+
+pytestmark = pytest.mark.slow
+
+
+def _build_fast_train_config() -> TrainingConfig:
+ config = TrainingConfig()
+ config.trainer.limit_train_batches = 1
+ config.trainer.limit_val_batches = 1
+ config.trainer.log_every_n_steps = 1
+ config.trainer.check_val_every_n_epoch = 1
+ config.train_loader.batch_size = 1
+ config.train_loader.augmentations.enabled = False
+ return config
+
+
+def test_train_saves_checkpoint_in_requested_experiment_run_dir(
+ tmp_path: Path,
+ example_annotations: list[data.ClipAnnotation],
+) -> None:
+ config = _build_fast_train_config()
+
+ run_train(
+ train_annotations=example_annotations[:1],
+ val_annotations=example_annotations[:1],
+ train_config=config,
+ num_epochs=1,
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=tmp_path,
+ experiment_name="exp_a",
+ run_name="run_b",
+ seed=0,
+ )
+
+ checkpoints = list((tmp_path / "exp_a" / "run_b").rglob("*.ckpt"))
+ assert checkpoints
+
+
+def test_train_without_validation_can_still_save_last_checkpoint(
+ tmp_path: Path,
+ example_annotations: list[data.ClipAnnotation],
+) -> None:
+ config = _build_fast_train_config()
+ config.checkpoints.save_last = True
+
+ run_train(
+ train_annotations=example_annotations[:1],
+ val_annotations=None,
+ train_config=config,
+ num_epochs=1,
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=tmp_path,
+ seed=0,
+ )
+
+ assert list(tmp_path.rglob("last*.ckpt"))
+
+
+def test_train_controls_which_checkpoints_are_kept(
+ tmp_path: Path,
+ example_annotations: list[data.ClipAnnotation],
+) -> None:
+ config = _build_fast_train_config()
+ config.checkpoints.save_top_k = 1
+ config.checkpoints.save_last = True
+ config.checkpoints.filename = "epoch{epoch}"
+
+ run_train(
+ train_annotations=example_annotations[:1],
+ val_annotations=example_annotations[:1],
+ train_config=config,
+ num_epochs=3,
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=tmp_path,
+ seed=0,
+ )
+
+ all_checkpoints = list(tmp_path.rglob("*.ckpt"))
+ last_checkpoints = list(tmp_path.rglob("last*.ckpt"))
+ best_checkpoints = [
+ path for path in all_checkpoints if not path.name.startswith("last")
+ ]
+
+ assert last_checkpoints
+ assert len(best_checkpoints) == 1
+ assert "epoch" in best_checkpoints[0].name
+
+
+def test_train_saves_weights_only_checkpoints_by_default(
+ tmp_path: Path,
+ example_annotations: list[data.ClipAnnotation],
+) -> None:
+ config = _build_fast_train_config()
+
+ run_train(
+ train_annotations=example_annotations[:1],
+ val_annotations=example_annotations[:1],
+ train_config=config,
+ num_epochs=1,
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=tmp_path,
+ seed=0,
+ )
+
+ checkpoint_path = next(tmp_path.rglob("*.ckpt"))
+ checkpoint = torch.load(
+ checkpoint_path,
+ map_location="cpu",
+ weights_only=False,
+ )
+
+ assert "state_dict" in checkpoint
+ assert "hyper_parameters" in checkpoint
+ assert "pytorch-lightning_version" in checkpoint
+ assert "optimizer_states" not in checkpoint
+ assert "lr_schedulers" not in checkpoint
+
+
+def test_resolve_checkpoint_path_returns_local_path_unchanged(
+ tmp_path: Path,
+) -> None:
+ local_path = tmp_path / "model.ckpt"
+ local_path.write_bytes(b"checkpoint")
+
+ assert resolve_checkpoint_path(local_path) == local_path
+ assert resolve_checkpoint_path(str(local_path)) == local_path
+
+
+def test_get_bundled_checkpoint_names_lists_supported_aliases() -> None:
+ assert get_bundled_checkpoint_names() == (
+ DEFAULT_CHECKPOINT,
+ "batdetect2_uk_same",
+ )
+
+
+def test_resolve_checkpoint_path_uses_default_bundled_alias() -> None:
+ resolved = resolve_checkpoint_path()
+
+ assert resolved == resolve_checkpoint_path(DEFAULT_CHECKPOINT)
+
+
+def test_resolve_checkpoint_path_accepts_bundled_alias() -> None:
+ resolved = resolve_checkpoint_path(DEFAULT_CHECKPOINT)
+
+ assert resolved.name == "batdetect2_uk_same.ckpt"
+ assert resolved.exists()
+
+
+def test_resolve_checkpoint_path_prefers_existing_local_path_over_alias(
+ tmp_path: Path,
+) -> None:
+ local_path = tmp_path / "uk_same"
+ local_path.write_bytes(b"checkpoint")
+
+ assert resolve_checkpoint_path(local_path) == local_path
+ assert resolve_checkpoint_path(str(local_path)) == local_path
+
+
+def test_resolve_checkpoint_path_downloads_huggingface_checkpoint(
+ monkeypatch: pytest.MonkeyPatch,
+ tmp_path: Path,
+) -> None:
+ expected_path = tmp_path / "downloaded.ckpt"
+
+ def fake_hf_hub_download(repo_id: str, filename: str) -> str:
+ assert repo_id == "owner/repo"
+ assert filename == "weights/model.ckpt"
+ return str(expected_path)
+
+ class FakeHuggingFaceHub(types.ModuleType):
+ hf_hub_download = staticmethod(fake_hf_hub_download)
+
+ fake_module = FakeHuggingFaceHub("huggingface_hub")
+ monkeypatch.setitem(
+ sys.modules,
+ "huggingface_hub",
+ fake_module,
+ )
+
+ resolved = resolve_checkpoint_path("hf://owner/repo/weights/model.ckpt")
+
+ assert resolved == expected_path
+
+
+def test_resolve_checkpoint_path_requires_huggingface_dependency(
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ monkeypatch.delitem(sys.modules, "huggingface_hub", raising=False)
+
+ import builtins
+
+ original_import = builtins.__import__
+
+ def fake_import(name, globals=None, locals=None, fromlist=(), level=0):
+ if name == "huggingface_hub":
+ raise ImportError("missing")
+ return original_import(name, globals, locals, fromlist, level)
+
+ monkeypatch.setattr(builtins, "__import__", fake_import)
+
+ with pytest.raises(ValueError, match="Hugging Face checkpoint support"):
+ resolve_checkpoint_path("hf://owner/repo/weights/model.ckpt")
+
+
+def test_resolve_checkpoint_path_rejects_incomplete_huggingface_uri() -> None:
+ with pytest.raises(ValueError, match="hf://owner/repo/path/to"):
+ resolve_checkpoint_path("hf://owner/repo")
+
+
+def test_resolve_checkpoint_path_rejects_missing_local_path() -> None:
+ with pytest.raises(
+ FileNotFoundError,
+ match="checkpoint alias",
+ ):
+ resolve_checkpoint_path("missing.ckpt")
diff --git a/tests/test_train/test_config.py b/tests/test_train/test_config.py
new file mode 100644
index 00000000..547e64f5
--- /dev/null
+++ b/tests/test_train/test_config.py
@@ -0,0 +1,43 @@
+from batdetect2.audio import AudioConfig
+from batdetect2.evaluate import EvaluationConfig
+from batdetect2.inference import InferenceConfig
+from batdetect2.logging import AppLoggingConfig
+from batdetect2.models import ModelConfig
+from batdetect2.outputs import OutputsConfig
+from batdetect2.targets import TargetConfig
+from batdetect2.train import TrainingConfig
+
+
+def test_example_split_configs_are_valid(example_data_dir):
+ configs_dir = example_data_dir / "configs"
+
+ assert isinstance(
+ AudioConfig.load(configs_dir / "audio.yaml"), AudioConfig
+ )
+ assert isinstance(
+ ModelConfig.load(configs_dir / "model.yaml"), ModelConfig
+ )
+ assert isinstance(
+ TargetConfig.load(example_data_dir / "targets.yaml"),
+ TargetConfig,
+ )
+ assert isinstance(
+ TrainingConfig.load(configs_dir / "training.yaml"),
+ TrainingConfig,
+ )
+ assert isinstance(
+ EvaluationConfig.load(configs_dir / "evaluation.yaml"),
+ EvaluationConfig,
+ )
+ assert isinstance(
+ InferenceConfig.load(configs_dir / "inference.yaml"),
+ InferenceConfig,
+ )
+ assert isinstance(
+ OutputsConfig.load(configs_dir / "outputs.yaml"),
+ OutputsConfig,
+ )
+ assert isinstance(
+ AppLoggingConfig.load(configs_dir / "logging.yaml"),
+ AppLoggingConfig,
+ )
diff --git a/tests/test_train/test_labels.py b/tests/test_train/test_labels.py
new file mode 100644
index 00000000..e1de1d63
--- /dev/null
+++ b/tests/test_train/test_labels.py
@@ -0,0 +1,74 @@
+from pathlib import Path
+
+import torch
+from soundevent import data
+
+from batdetect2.targets import TargetConfig, build_roi_mapping, build_targets
+from batdetect2.targets.rois import AnchorBBoxMapperConfig, ROIMappingConfig
+from batdetect2.train.labels import generate_heatmaps
+
+recording = data.Recording(
+ samplerate=256_000,
+ duration=1,
+ channels=1,
+ time_expansion=1,
+ hash="asdf98sdf",
+ path=Path("/path/to/audio.wav"),
+)
+
+clip = data.Clip(
+ recording=recording,
+ start_time=0,
+ end_time=100,
+)
+
+
+def test_generated_heatmap_are_non_zero_at_correct_positions(
+ sample_target_config: TargetConfig,
+ pippip_tag: data.Tag,
+ bat_tag: data.Tag,
+):
+ config = sample_target_config.model_copy(
+ update=dict(
+ roi=ROIMappingConfig(
+ default=AnchorBBoxMapperConfig(
+ time_scale=1,
+ frequency_scale=1,
+ )
+ )
+ )
+ )
+
+ targets = build_targets(config)
+ roi_mapper = build_roi_mapping(config=config.roi)
+
+ clip_annotation = data.ClipAnnotation(
+ clip=clip,
+ sound_events=[
+ data.SoundEventAnnotation(
+ sound_event=data.SoundEvent(
+ recording=recording,
+ geometry=data.BoundingBox(
+ coordinates=[10, 10, 20, 30],
+ ),
+ ),
+ tags=[pippip_tag, bat_tag],
+ )
+ ],
+ )
+
+ detection_heatmap, class_heatmap, size_heatmap = generate_heatmaps(
+ clip_annotation,
+ torch.rand([1, 100, 100]),
+ min_freq=0,
+ max_freq=100,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ )
+ pippip_index = targets.class_names.index("pippip")
+ myomyo_index = targets.class_names.index("myomyo")
+ assert size_heatmap[0, 10, 10] == 10
+ assert size_heatmap[1, 10, 10] == 20
+ assert class_heatmap[pippip_index, 10, 10] == 1.0
+ assert class_heatmap[myomyo_index, 10, 10] == 0.0
+ assert detection_heatmap[0, 10, 10] == 1.0
diff --git a/tests/test_train/test_lightning.py b/tests/test_train/test_lightning.py
new file mode 100644
index 00000000..756329d1
--- /dev/null
+++ b/tests/test_train/test_lightning.py
@@ -0,0 +1,456 @@
+from pathlib import Path
+
+import lightning as L
+import pytest
+import torch
+from deepdiff import DeepDiff
+from soundevent import data
+from torch.optim import Adam
+from torch.optim.lr_scheduler import CosineAnnealingLR
+
+from batdetect2.api_v2 import BatDetect2API
+from batdetect2.audio.types import AudioLoader
+from batdetect2.models import (
+ ModelConfig,
+ build_model,
+ build_model_with_new_targets,
+)
+from batdetect2.targets import TargetConfig, build_roi_mapping, build_targets
+from batdetect2.train import (
+ TrainingConfig,
+ TrainingModule,
+ load_model_from_checkpoint,
+ run_train,
+)
+from batdetect2.train.logging import (
+ DatasetConfigArtifact,
+ DatasetConfigArtifactLogging,
+)
+from batdetect2.train.optimizers import AdamOptimizerConfig
+from batdetect2.train.schedulers import CosineAnnealingSchedulerConfig
+from batdetect2.train.train import build_training_module
+
+
+def build_default_module(
+ target_config: TargetConfig | None = None,
+ model_config: ModelConfig | None = None,
+ train_config: TrainingConfig | None = None,
+):
+ target_config = target_config or TargetConfig()
+ model_config = model_config or ModelConfig()
+ train_config = train_config or TrainingConfig()
+ targets = build_targets(target_config)
+ roi_mapper = build_roi_mapping(target_config.roi)
+ return build_training_module(
+ model_config=model_config,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ train_config=train_config,
+ )
+
+
+def test_can_initialize_default_module():
+ module = build_default_module()
+ assert isinstance(module, L.LightningModule)
+
+
+@pytest.mark.slow
+def test_can_save_checkpoint(
+ tmp_path: Path,
+ clip: data.Clip,
+ sample_audio_loader: AudioLoader,
+):
+ module = build_default_module()
+ trainer = L.Trainer()
+ path = tmp_path / "example.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(path)
+
+ recovered = TrainingModule.load_from_checkpoint(path)
+
+ wav = torch.tensor(sample_audio_loader.load_clip(clip)).unsqueeze(0)
+
+ spec1 = module.model.preprocessor(wav)
+ spec2 = recovered.model.preprocessor(wav)
+
+ torch.testing.assert_close(spec1, spec2, rtol=0, atol=0)
+
+ output1 = module.model(wav.unsqueeze(0))
+ output2 = recovered.model(wav.unsqueeze(0))
+
+ torch.testing.assert_close(output1, output2, rtol=0, atol=0)
+
+
+def test_load_model_from_checkpoint_returns_model_and_configs(
+ tmp_path: Path,
+):
+ input_model_config = ModelConfig(samplerate=192_000)
+ expected_model_config = ModelConfig.model_validate(
+ input_model_config.model_dump(mode="json")
+ )
+ train_config = TrainingConfig()
+ targets_config = TargetConfig()
+ targets = build_targets(targets_config)
+ roi_mapper = build_roi_mapping(targets_config.roi)
+ module = build_training_module(
+ model_config=input_model_config,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ train_config=train_config,
+ )
+ trainer = L.Trainer()
+ path = tmp_path / "example.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(path)
+
+ model, loaded_configs = load_model_from_checkpoint(path)
+
+ assert model is not None
+ assert loaded_configs.model.model_dump(
+ mode="json"
+ ) == expected_model_config.model_dump(mode="json")
+ assert loaded_configs.targets.model_dump(
+ mode="json"
+ ) == targets_config.model_dump(mode="json")
+ assert loaded_configs.train.model_dump(
+ mode="json"
+ ) == train_config.model_dump(mode="json")
+ assert model.class_names == targets.class_names
+ assert model.dimension_names == roi_mapper.dimension_names
+
+ recovered = TrainingModule.load_from_checkpoint(path)
+ assert recovered.train_config.model_dump(
+ mode="json"
+ ) == train_config.model_dump(mode="json")
+
+
+def test_checkpoint_stores_train_config_hyperparameters(tmp_path: Path):
+ model_config = ModelConfig(samplerate=384_000)
+ expected_model_config = ModelConfig.model_validate(
+ model_config.model_dump(mode="json")
+ )
+ train_config = TrainingConfig()
+ targets_config = TargetConfig()
+ targets = build_targets(targets_config)
+ roi_mapper = build_roi_mapping(targets_config.roi)
+ train_config.optimizer = AdamOptimizerConfig(learning_rate=5e-4)
+ train_config.scheduler = CosineAnnealingSchedulerConfig(t_max=123)
+ train_config.trainer.max_epochs = 3
+ train_config.train_loader.batch_size = 2
+
+ module = build_training_module(
+ model_config=model_config,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ train_config=train_config,
+ )
+ trainer = L.Trainer()
+ path = tmp_path / "example.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(path)
+
+ _, recovered_configs = load_model_from_checkpoint(path)
+ assert not DeepDiff(
+ recovered_configs.model.model_dump(mode="json"),
+ expected_model_config.model_dump(mode="json"),
+ )
+ assert not DeepDiff(
+ recovered_configs.train.model_dump(mode="json"),
+ train_config.model_dump(mode="json"),
+ )
+
+
+def test_load_model_from_checkpoint_includes_targets_config(tmp_path: Path):
+ targets_config = TargetConfig()
+ targets = build_targets(targets_config)
+ roi_mapper = build_roi_mapping(targets_config.roi)
+ module = build_training_module(
+ model_config=ModelConfig(),
+ targets_config=targets_config,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ train_config=TrainingConfig(),
+ )
+ trainer = L.Trainer()
+ path = tmp_path / "example.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(path)
+
+ _, loaded_configs = load_model_from_checkpoint(path)
+
+ assert loaded_configs.targets.model_dump(
+ mode="json"
+ ) == targets_config.model_dump(mode="json")
+
+
+def test_configure_optimizers_uses_train_config_values(tmp_path: Path):
+ model_config = ModelConfig()
+ expected_model_config = ModelConfig.model_validate(
+ model_config.model_dump(mode="json")
+ )
+ train_config = TrainingConfig()
+ targets_config = TargetConfig()
+ targets = build_targets(targets_config)
+ roi_mapper = build_roi_mapping(targets_config.roi)
+ train_config.optimizer = AdamOptimizerConfig(learning_rate=5e-4)
+ train_config.scheduler = CosineAnnealingSchedulerConfig(t_max=321)
+
+ module = build_training_module(
+ model_config=model_config,
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ train_config=train_config,
+ )
+
+ optimization_config = module.configure_optimizers()
+ optimizer = optimization_config["optimizer"]
+ scheduler = optimization_config["lr_scheduler"]["scheduler"]
+
+ assert isinstance(optimizer, Adam)
+ assert isinstance(scheduler, CosineAnnealingLR)
+ assert optimizer.param_groups[0]["lr"] == 5e-4
+ assert scheduler.T_max == 321
+
+ trainer = L.Trainer()
+ path = tmp_path / "example.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(path)
+
+ _, recovered_configs = load_model_from_checkpoint(path)
+ assert recovered_configs.model.model_dump(
+ mode="json"
+ ) == expected_model_config.model_dump(mode="json")
+ assert recovered_configs.train.model_dump(
+ mode="json"
+ ) == train_config.model_dump(mode="json")
+
+ recovered = TrainingModule.load_from_checkpoint(path)
+
+ loaded_optimization_config = recovered.configure_optimizers()
+ loaded_optimizer = loaded_optimization_config["optimizer"]
+ loaded_scheduler = loaded_optimization_config["lr_scheduler"]["scheduler"]
+ assert loaded_optimizer.param_groups[0]["lr"] == 5e-4
+ assert loaded_scheduler.T_max == 321
+
+
+def test_api_from_checkpoint_reconstructs_model_config(tmp_path: Path):
+ module = build_default_module()
+ trainer = L.Trainer()
+ path = tmp_path / "example.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(path)
+
+ _, stored_configs = load_model_from_checkpoint(path)
+ api = BatDetect2API.from_checkpoint(path)
+
+ assert api.model_config.model_dump(
+ mode="json"
+ ) == stored_configs.model.model_dump(mode="json")
+ assert api.audio_config.samplerate == stored_configs.model.samplerate
+
+
+def test_api_from_checkpoint_reconstructs_targets_from_checkpoint(
+ tmp_path: Path,
+) -> None:
+ targets_config = TargetConfig()
+ module = build_default_module(target_config=targets_config)
+ trainer = L.Trainer()
+ path = tmp_path / "example.ckpt"
+ trainer.strategy.connect(module)
+ trainer.save_checkpoint(path)
+
+ api = BatDetect2API.from_checkpoint(path)
+
+ assert api.targets.get_config() == targets_config.model_dump(mode="json")
+
+
+@pytest.mark.slow
+def test_train_smoke_produces_loadable_checkpoint(
+ tmp_path: Path,
+ example_annotations: list[data.ClipAnnotation],
+ sample_audio_loader: AudioLoader,
+):
+ # Given
+ train_config = TrainingConfig.model_validate(
+ {
+ "trainer": {
+ "limit_train_batches": 1,
+ "limit_val_batches": 1,
+ "log_every_n_steps": 1,
+ },
+ "train_loader": {
+ "batch_size": 1,
+ "augmentations": {"enabled": False},
+ },
+ }
+ )
+
+ # When
+ run_train(
+ train_annotations=example_annotations[:1],
+ val_annotations=example_annotations[:1],
+ train_config=train_config,
+ num_epochs=1,
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=tmp_path,
+ seed=0,
+ )
+
+ # Then
+ checkpoints = list(tmp_path.rglob("*.ckpt"))
+ assert checkpoints
+
+ model, model_config = load_model_from_checkpoint(checkpoints[0])
+
+ wav = torch.tensor(
+ sample_audio_loader.load_clip(example_annotations[0].clip)
+ ).unsqueeze(0)
+ outputs = model(wav.unsqueeze(0))
+ assert outputs is not None
+
+
+def test_build_training_module_uses_provided_model() -> None:
+ targets = build_targets(TargetConfig())
+ roi_mapper = build_roi_mapping(TargetConfig().roi)
+ model = build_model(
+ ModelConfig(),
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ )
+
+ module = build_training_module(
+ model_config=ModelConfig(),
+ class_names=targets.class_names,
+ dimension_names=roi_mapper.dimension_names,
+ train_config=TrainingConfig(),
+ model=model,
+ )
+
+ assert module.model is model
+
+
+def test_build_model_with_new_targets_reuses_backbone_and_rebuilds_heads() -> (
+ None
+):
+ source_targets_config = TargetConfig()
+ source_targets = build_targets(source_targets_config)
+ source_roi_mapper = build_roi_mapping(source_targets_config.roi)
+ source_model = build_model(
+ ModelConfig(),
+ class_names=source_targets.class_names,
+ dimension_names=source_roi_mapper.dimension_names,
+ )
+
+ new_targets_config = TargetConfig.model_validate(
+ {
+ "classification_targets": [
+ {
+ "name": "single_class",
+ "tags": [{"key": "class", "value": "single_class"}],
+ }
+ ]
+ }
+ )
+ new_targets = build_targets(new_targets_config)
+ new_roi_mapper = build_roi_mapping(new_targets_config.roi)
+
+ rebuilt_model = build_model_with_new_targets(
+ model=source_model,
+ targets=new_targets,
+ roi_mapper=new_roi_mapper,
+ )
+
+ source_detector = source_model.detector
+ rebuilt_detector = rebuilt_model.detector
+
+ assert rebuilt_detector.backbone is source_detector.backbone
+ assert (
+ rebuilt_detector.classifier_head is not source_detector.classifier_head
+ )
+ assert rebuilt_detector.size_head is not source_detector.size_head
+ assert rebuilt_model.class_names == ["single_class"]
+ assert rebuilt_model.dimension_names == ["width", "height"]
+
+
+@pytest.mark.slow
+def test_run_train_logs_training_artifacts(
+ tmp_path: Path,
+ example_annotations: list[data.ClipAnnotation],
+ example_dataset,
+) -> None:
+ train_config = TrainingConfig.model_validate(
+ {
+ "trainer": {
+ "limit_train_batches": 1,
+ "limit_val_batches": 1,
+ "log_every_n_steps": 1,
+ },
+ "train_loader": {
+ "batch_size": 1,
+ "augmentations": {"enabled": False},
+ },
+ }
+ )
+
+ run_train(
+ train_annotations=example_annotations[:1],
+ val_annotations=example_annotations[:1],
+ train_config=train_config,
+ num_epochs=1,
+ train_workers=0,
+ val_workers=0,
+ checkpoint_dir=tmp_path / "checkpoints",
+ log_dir=tmp_path / "logs",
+ seed=0,
+ logging_callbacks=[
+ DatasetConfigArtifactLogging(
+ train_dataset_config=DatasetConfigArtifact(
+ filename="train_dataset.yaml",
+ config=example_dataset,
+ ),
+ val_dataset_config=DatasetConfigArtifact(
+ filename="val_dataset.yaml",
+ config=example_dataset,
+ ),
+ )
+ ],
+ )
+
+ artifact_root = next((tmp_path / "logs").rglob("training_artifacts"))
+
+ assert (artifact_root / "targets.yaml").exists()
+ assert (artifact_root / "train_dataset.yaml").exists()
+ assert (artifact_root / "val_dataset.yaml").exists()
+ assert (artifact_root / "train_class_summary.csv").exists()
+ assert (artifact_root / "val_class_summary.csv").exists()
+
+
+def test_run_train_rejects_incompatible_model_config(
+ example_annotations: list[data.ClipAnnotation],
+) -> None:
+ # Given
+ targets_config = TargetConfig()
+ targets = build_targets(targets_config)
+ roi_mapper = build_roi_mapping(targets_config.roi)
+ incompatible_config = ModelConfig()
+ incompatible_model = build_model(
+ incompatible_config,
+ class_names=targets.class_names,
+ dimension_names=[*roi_mapper.dimension_names, "extra_dim"],
+ )
+
+ # When/Then
+ with pytest.raises(
+ ValueError,
+ match="Provided model is incompatible with model_config",
+ ):
+ run_train(
+ train_annotations=example_annotations[:1],
+ val_annotations=None,
+ model=incompatible_model,
+ targets=targets,
+ roi_mapper=roi_mapper,
+ targets_config=targets_config,
+ train_config=TrainingConfig(),
+ )
diff --git a/tests/test_train/test_optimizers.py b/tests/test_train/test_optimizers.py
new file mode 100644
index 00000000..89c84bca
--- /dev/null
+++ b/tests/test_train/test_optimizers.py
@@ -0,0 +1,22 @@
+from torch import nn
+from torch.optim import SGD, Adam
+
+from batdetect2.train.optimizers import OptimizerImportConfig, build_optimizer
+
+
+def test_build_optimizer_defaults_to_adam():
+ model = nn.Linear(4, 2)
+ optimizer = build_optimizer(model.parameters())
+
+ assert isinstance(optimizer, Adam)
+
+
+def test_build_optimizer_supports_import_config():
+ model = nn.Linear(4, 2)
+ config = OptimizerImportConfig(
+ target="torch.optim.SGD",
+ arguments={"lr": 1e-3},
+ )
+
+ optimizer = build_optimizer(model.parameters(), config=config)
+ assert isinstance(optimizer, SGD)
diff --git a/tests/test_train/test_schedulers.py b/tests/test_train/test_schedulers.py
new file mode 100644
index 00000000..a489b542
--- /dev/null
+++ b/tests/test_train/test_schedulers.py
@@ -0,0 +1,35 @@
+from torch import nn
+from torch.optim import SGD
+from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
+
+from batdetect2.train.schedulers import (
+ CosineAnnealingSchedulerConfig,
+ SchedulerImportConfig,
+ build_scheduler,
+)
+
+
+def test_build_scheduler_uses_epoch_t_max_directly():
+ model = nn.Linear(4, 2)
+ optimizer = SGD(model.parameters(), lr=1e-3)
+ scheduler = build_scheduler(
+ optimizer,
+ config=CosineAnnealingSchedulerConfig(t_max=7),
+ )
+
+ assert isinstance(scheduler, CosineAnnealingLR)
+ assert scheduler.T_max == 7
+
+
+def test_build_scheduler_supports_import_config():
+ model = nn.Linear(4, 2)
+ optimizer = SGD(model.parameters(), lr=1e-3)
+ scheduler = build_scheduler(
+ optimizer,
+ config=SchedulerImportConfig(
+ target="torch.optim.lr_scheduler.StepLR",
+ arguments={"step_size": 2},
+ ),
+ )
+
+ assert isinstance(scheduler, StepLR)
diff --git a/tests/test_utils/test_arrays.py b/tests/test_utils/test_arrays.py
new file mode 100644
index 00000000..255f600e
--- /dev/null
+++ b/tests/test_utils/test_arrays.py
@@ -0,0 +1,59 @@
+import torch
+
+from batdetect2.core.arrays import adjust_width, extend_width
+
+
+def test_extend_width():
+ array = torch.rand([1, 1, 128, 100])
+ extended = extend_width(array, 100)
+ assert extended.shape == (1, 1, 128, 200)
+
+ extended = extend_width(array, 100, axis=0)
+ assert extended.shape == (101, 1, 128, 100)
+
+ extended = extend_width(array, 100, axis=1)
+ assert extended.shape == (1, 101, 128, 100)
+
+ extended = extend_width(array, 100, axis=2)
+ assert extended.shape == (1, 1, 228, 100)
+
+ extended = extend_width(array, 100, axis=3)
+ assert extended.shape == (1, 1, 128, 200)
+
+ extended = extend_width(array, 100, axis=-2)
+ assert extended.shape == (1, 1, 228, 100)
+
+
+def test_extends_with_value():
+ array = torch.rand([1, 1, 128, 100])
+ extended = extend_width(array, 100, value=-1)
+ torch.testing.assert_close(
+ extended[:, :, :, 100:],
+ torch.ones_like(array) * -1,
+ rtol=0,
+ atol=0,
+ )
+
+
+def test_can_adjust_short_width():
+ array = torch.rand([1, 1, 128, 100])
+ extended = adjust_width(array, 512)
+ assert extended.shape == (1, 1, 128, 512)
+
+ extended = adjust_width(array, 512, axis=0)
+ assert extended.shape == (512, 1, 128, 100)
+
+ extended = adjust_width(array, 512, axis=1)
+ assert extended.shape == (1, 512, 128, 100)
+
+ extended = adjust_width(array, 512, axis=2)
+ assert extended.shape == (1, 1, 512, 100)
+
+ extended = adjust_width(array, 512, axis=3)
+ assert extended.shape == (1, 1, 128, 512)
+
+
+def test_can_adjust_long_width():
+ array = torch.rand([1, 1, 128, 512])
+ extended = adjust_width(array, 256)
+ assert extended.shape == (1, 1, 128, 256)
diff --git a/tests/test_utils/test_tensors.py b/tests/test_utils/test_tensors.py
new file mode 100644
index 00000000..5d821192
--- /dev/null
+++ b/tests/test_utils/test_tensors.py
@@ -0,0 +1,42 @@
+import numpy as np
+import torch
+
+from batdetect2.utils.tensors import adjust_width, make_width_divisible
+
+
+def test_width_is_divisible_after_adjustment():
+ tensor = torch.rand([1, 1, 128, 374])
+ adjusted = make_width_divisible(tensor, 32)
+ assert adjusted.shape[-1] % 32 == 0
+ assert adjusted.shape == (1, 1, 128, 384)
+
+
+def test_non_last_axis_is_divisible_after_adjustment():
+ tensor = torch.rand([1, 1, 77, 124])
+ adjusted = make_width_divisible(tensor, 32, axis=-2)
+ assert adjusted.shape[-2] % 32 == 0
+ assert adjusted.shape == (1, 1, 96, 124)
+
+
+def test_make_width_divisible_can_handle_numpy_array():
+ array = np.random.random([1, 1, 128, 374])
+ adjusted = make_width_divisible(array, 32)
+ assert adjusted.shape[-1] % 32 == 0
+ assert adjusted.shape == (1, 1, 128, 384)
+ assert isinstance(adjusted, torch.Tensor)
+
+
+def test_adjust_last_axis_width_by_default():
+ tensor = torch.rand([1, 1, 128, 374])
+ adjusted = adjust_width(tensor, 512)
+ assert adjusted.shape == (1, 1, 128, 512)
+ assert (tensor == adjusted[:, :, :, :374]).all()
+ assert (adjusted[:, :, :, 374:] == 0).all()
+
+
+def test_can_adjust_second_to_last_axis():
+ tensor = torch.rand([1, 1, 89, 512])
+ adjusted = adjust_width(tensor, 128, axis=-2)
+ assert adjusted.shape == (1, 1, 128, 512)
+ assert (tensor == adjusted[:, :, :89, :]).all()
+ assert (adjusted[:, :, 89:, :] == 0).all()
diff --git a/uv.lock b/uv.lock
deleted file mode 100644
index 1c7f1b37..00000000
--- a/uv.lock
+++ /dev/null
@@ -1,3737 +0,0 @@
-version = 1
-revision = 3
-requires-python = ">=3.9, <3.15"
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-
-[[package]]
-name = "attrs"
-version = "26.1.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/9a/8e/82a0fe20a541c03148528be8cac2408564a6c9a0cc7e9171802bc1d26985/attrs-26.1.0.tar.gz", hash = "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32", size = 952055, upload-time = "2026-03-19T14:22:25.026Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/64/b4/17d4b0b2a2dc85a6df63d1157e028ed19f90d4cd97c36717afef2bc2f395/attrs-26.1.0-py3-none-any.whl", hash = "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", size = 67548, upload-time = "2026-03-19T14:22:23.645Z" },
-]
-
-[[package]]
-name = "audioop-lts"
-version = "0.2.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/38/53/946db57842a50b2da2e0c1e34bd37f36f5aadba1a929a3971c5d7841dbca/audioop_lts-0.2.2.tar.gz", hash = "sha256:64d0c62d88e67b98a1a5e71987b7aa7b5bcffc7dcee65b635823dbdd0a8dbbd0", size = 30686, upload-time = "2025-08-05T16:43:17.409Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/de/d4/94d277ca941de5a507b07f0b592f199c22454eeaec8f008a286b3fbbacd6/audioop_lts-0.2.2-cp313-abi3-macosx_10_13_universal2.whl", hash = "sha256:fd3d4602dc64914d462924a08c1a9816435a2155d74f325853c1f1ac3b2d9800", size = 46523, upload-time = "2025-08-05T16:42:20.836Z" },
- { url = "https://files.pythonhosted.org/packages/f8/5a/656d1c2da4b555920ce4177167bfeb8623d98765594af59702c8873f60ec/audioop_lts-0.2.2-cp313-abi3-macosx_10_13_x86_64.whl", hash = "sha256:550c114a8df0aafe9a05442a1162dfc8fec37e9af1d625ae6060fed6e756f303", size = 27455, upload-time = "2025-08-05T16:42:22.283Z" },
- { url = "https://files.pythonhosted.org/packages/1b/83/ea581e364ce7b0d41456fb79d6ee0ad482beda61faf0cab20cbd4c63a541/audioop_lts-0.2.2-cp313-abi3-macosx_11_0_arm64.whl", hash = "sha256:9a13dc409f2564de15dd68be65b462ba0dde01b19663720c68c1140c782d1d75", size = 26997, upload-time = "2025-08-05T16:42:23.849Z" },
- { url = "https://files.pythonhosted.org/packages/b8/3b/e8964210b5e216e5041593b7d33e97ee65967f17c282e8510d19c666dab4/audioop_lts-0.2.2-cp313-abi3-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:51c916108c56aa6e426ce611946f901badac950ee2ddaf302b7ed35d9958970d", size = 85844, upload-time = "2025-08-05T16:42:25.208Z" },
- { url = "https://files.pythonhosted.org/packages/c7/2e/0a1c52faf10d51def20531a59ce4c706cb7952323b11709e10de324d6493/audioop_lts-0.2.2-cp313-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47eba38322370347b1c47024defbd36374a211e8dd5b0dcbce7b34fdb6f8847b", size = 85056, upload-time = "2025-08-05T16:42:26.559Z" },
- { url = "https://files.pythonhosted.org/packages/75/e8/cd95eef479656cb75ab05dfece8c1f8c395d17a7c651d88f8e6e291a63ab/audioop_lts-0.2.2-cp313-abi3-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba7c3a7e5f23e215cb271516197030c32aef2e754252c4c70a50aaff7031a2c8", size = 93892, upload-time = "2025-08-05T16:42:27.902Z" },
- { url = "https://files.pythonhosted.org/packages/5c/1e/a0c42570b74f83efa5cca34905b3eef03f7ab09fe5637015df538a7f3345/audioop_lts-0.2.2-cp313-abi3-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:def246fe9e180626731b26e89816e79aae2276f825420a07b4a647abaa84becc", size = 96660, upload-time = "2025-08-05T16:42:28.9Z" },
- { url = "https://files.pythonhosted.org/packages/50/d5/8a0ae607ca07dbb34027bac8db805498ee7bfecc05fd2c148cc1ed7646e7/audioop_lts-0.2.2-cp313-abi3-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e160bf9df356d841bb6c180eeeea1834085464626dc1b68fa4e1d59070affdc3", size = 79143, upload-time = "2025-08-05T16:42:29.929Z" },
- { url = "https://files.pythonhosted.org/packages/12/17/0d28c46179e7910bfb0bb62760ccb33edb5de973052cb2230b662c14ca2e/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4b4cd51a57b698b2d06cb9993b7ac8dfe89a3b2878e96bc7948e9f19ff51dba6", size = 84313, upload-time = "2025-08-05T16:42:30.949Z" },
- { url = "https://files.pythonhosted.org/packages/84/ba/bd5d3806641564f2024e97ca98ea8f8811d4e01d9b9f9831474bc9e14f9e/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_ppc64le.whl", hash = "sha256:4a53aa7c16a60a6857e6b0b165261436396ef7293f8b5c9c828a3a203147ed4a", size = 93044, upload-time = "2025-08-05T16:42:31.959Z" },
- { url = "https://files.pythonhosted.org/packages/f9/5e/435ce8d5642f1f7679540d1e73c1c42d933331c0976eb397d1717d7f01a3/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_riscv64.whl", hash = "sha256:3fc38008969796f0f689f1453722a0f463da1b8a6fbee11987830bfbb664f623", size = 78766, upload-time = "2025-08-05T16:42:33.302Z" },
- { url = "https://files.pythonhosted.org/packages/ae/3b/b909e76b606cbfd53875693ec8c156e93e15a1366a012f0b7e4fb52d3c34/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_s390x.whl", hash = "sha256:15ab25dd3e620790f40e9ead897f91e79c0d3ce65fe193c8ed6c26cffdd24be7", size = 87640, upload-time = "2025-08-05T16:42:34.854Z" },
- { url = "https://files.pythonhosted.org/packages/30/e7/8f1603b4572d79b775f2140d7952f200f5e6c62904585d08a01f0a70393a/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:03f061a1915538fd96272bac9551841859dbb2e3bf73ebe4a23ef043766f5449", size = 86052, upload-time = "2025-08-05T16:42:35.839Z" },
- { url = "https://files.pythonhosted.org/packages/b5/96/c37846df657ccdda62ba1ae2b6534fa90e2e1b1742ca8dcf8ebd38c53801/audioop_lts-0.2.2-cp313-abi3-win32.whl", hash = "sha256:3bcddaaf6cc5935a300a8387c99f7a7fbbe212a11568ec6cf6e4bc458c048636", size = 26185, upload-time = "2025-08-05T16:42:37.04Z" },
- { url = "https://files.pythonhosted.org/packages/34/a5/9d78fdb5b844a83da8a71226c7bdae7cc638861085fff7a1d707cb4823fa/audioop_lts-0.2.2-cp313-abi3-win_amd64.whl", hash = "sha256:a2c2a947fae7d1062ef08c4e369e0ba2086049a5e598fda41122535557012e9e", size = 30503, upload-time = "2025-08-05T16:42:38.427Z" },
- { url = "https://files.pythonhosted.org/packages/34/25/20d8fde083123e90c61b51afb547bb0ea7e77bab50d98c0ab243d02a0e43/audioop_lts-0.2.2-cp313-abi3-win_arm64.whl", hash = "sha256:5f93a5db13927a37d2d09637ccca4b2b6b48c19cd9eda7b17a2e9f77edee6a6f", size = 24173, upload-time = "2025-08-05T16:42:39.704Z" },
- { url = "https://files.pythonhosted.org/packages/58/a7/0a764f77b5c4ac58dc13c01a580f5d32ae8c74c92020b961556a43e26d02/audioop_lts-0.2.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:73f80bf4cd5d2ca7814da30a120de1f9408ee0619cc75da87d0641273d202a09", size = 47096, upload-time = "2025-08-05T16:42:40.684Z" },
- { url = "https://files.pythonhosted.org/packages/aa/ed/ebebedde1a18848b085ad0fa54b66ceb95f1f94a3fc04f1cd1b5ccb0ed42/audioop_lts-0.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:106753a83a25ee4d6f473f2be6b0966fc1c9af7e0017192f5531a3e7463dce58", size = 27748, upload-time = "2025-08-05T16:42:41.992Z" },
- { url = "https://files.pythonhosted.org/packages/cb/6e/11ca8c21af79f15dbb1c7f8017952ee8c810c438ce4e2b25638dfef2b02c/audioop_lts-0.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fbdd522624141e40948ab3e8cdae6e04c748d78710e9f0f8d4dae2750831de19", size = 27329, upload-time = "2025-08-05T16:42:42.987Z" },
- { url = "https://files.pythonhosted.org/packages/84/52/0022f93d56d85eec5da6b9da6a958a1ef09e80c39f2cc0a590c6af81dcbb/audioop_lts-0.2.2-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:143fad0311e8209ece30a8dbddab3b65ab419cbe8c0dde6e8828da25999be911", size = 92407, upload-time = "2025-08-05T16:42:44.336Z" },
- { url = "https://files.pythonhosted.org/packages/87/1d/48a889855e67be8718adbc7a01f3c01d5743c325453a5e81cf3717664aad/audioop_lts-0.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dfbbc74ec68a0fd08cfec1f4b5e8cca3d3cd7de5501b01c4b5d209995033cde9", size = 91811, upload-time = "2025-08-05T16:42:45.325Z" },
- { url = "https://files.pythonhosted.org/packages/98/a6/94b7213190e8077547ffae75e13ed05edc488653c85aa5c41472c297d295/audioop_lts-0.2.2-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cfcac6aa6f42397471e4943e0feb2244549db5c5d01efcd02725b96af417f3fe", size = 100470, upload-time = "2025-08-05T16:42:46.468Z" },
- { url = "https://files.pythonhosted.org/packages/e9/e9/78450d7cb921ede0cfc33426d3a8023a3bda755883c95c868ee36db8d48d/audioop_lts-0.2.2-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:752d76472d9804ac60f0078c79cdae8b956f293177acd2316cd1e15149aee132", size = 103878, upload-time = "2025-08-05T16:42:47.576Z" },
- { url = "https://files.pythonhosted.org/packages/4f/e2/cd5439aad4f3e34ae1ee852025dc6aa8f67a82b97641e390bf7bd9891d3e/audioop_lts-0.2.2-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:83c381767e2cc10e93e40281a04852facc4cd9334550e0f392f72d1c0a9c5753", size = 84867, upload-time = "2025-08-05T16:42:49.003Z" },
- { url = "https://files.pythonhosted.org/packages/68/4b/9d853e9076c43ebba0d411e8d2aa19061083349ac695a7d082540bad64d0/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c0022283e9556e0f3643b7c3c03f05063ca72b3063291834cca43234f20c60bb", size = 90001, upload-time = "2025-08-05T16:42:50.038Z" },
- { url = "https://files.pythonhosted.org/packages/58/26/4bae7f9d2f116ed5593989d0e521d679b0d583973d203384679323d8fa85/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:a2d4f1513d63c795e82948e1305f31a6d530626e5f9f2605408b300ae6095093", size = 99046, upload-time = "2025-08-05T16:42:51.111Z" },
- { url = "https://files.pythonhosted.org/packages/b2/67/a9f4fb3e250dda9e9046f8866e9fa7d52664f8985e445c6b4ad6dfb55641/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:c9c8e68d8b4a56fda8c025e538e639f8c5953f5073886b596c93ec9b620055e7", size = 84788, upload-time = "2025-08-05T16:42:52.198Z" },
- { url = "https://files.pythonhosted.org/packages/70/f7/3de86562db0121956148bcb0fe5b506615e3bcf6e63c4357a612b910765a/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:96f19de485a2925314f5020e85911fb447ff5fbef56e8c7c6927851b95533a1c", size = 94472, upload-time = "2025-08-05T16:42:53.59Z" },
- { url = "https://files.pythonhosted.org/packages/f1/32/fd772bf9078ae1001207d2df1eef3da05bea611a87dd0e8217989b2848fa/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e541c3ef484852ef36545f66209444c48b28661e864ccadb29daddb6a4b8e5f5", size = 92279, upload-time = "2025-08-05T16:42:54.632Z" },
- { url = "https://files.pythonhosted.org/packages/4f/41/affea7181592ab0ab560044632571a38edaf9130b84928177823fbf3176a/audioop_lts-0.2.2-cp313-cp313t-win32.whl", hash = "sha256:d5e73fa573e273e4f2e5ff96f9043858a5e9311e94ffefd88a3186a910c70917", size = 26568, upload-time = "2025-08-05T16:42:55.627Z" },
- { url = "https://files.pythonhosted.org/packages/28/2b/0372842877016641db8fc54d5c88596b542eec2f8f6c20a36fb6612bf9ee/audioop_lts-0.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9191d68659eda01e448188f60364c7763a7ca6653ed3f87ebb165822153a8547", size = 30942, upload-time = "2025-08-05T16:42:56.674Z" },
- { url = "https://files.pythonhosted.org/packages/ee/ca/baf2b9cc7e96c179bb4a54f30fcd83e6ecb340031bde68f486403f943768/audioop_lts-0.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:c174e322bb5783c099aaf87faeb240c8d210686b04bd61dfd05a8e5a83d88969", size = 24603, upload-time = "2025-08-05T16:42:57.571Z" },
- { url = "https://files.pythonhosted.org/packages/5c/73/413b5a2804091e2c7d5def1d618e4837f1cb82464e230f827226278556b7/audioop_lts-0.2.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f9ee9b52f5f857fbaf9d605a360884f034c92c1c23021fb90b2e39b8e64bede6", size = 47104, upload-time = "2025-08-05T16:42:58.518Z" },
- { url = "https://files.pythonhosted.org/packages/ae/8c/daa3308dc6593944410c2c68306a5e217f5c05b70a12e70228e7dd42dc5c/audioop_lts-0.2.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:49ee1a41738a23e98d98b937a0638357a2477bc99e61b0f768a8f654f45d9b7a", size = 27754, upload-time = "2025-08-05T16:43:00.132Z" },
- { url = "https://files.pythonhosted.org/packages/4e/86/c2e0f627168fcf61781a8f72cab06b228fe1da4b9fa4ab39cfb791b5836b/audioop_lts-0.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5b00be98ccd0fc123dcfad31d50030d25fcf31488cde9e61692029cd7394733b", size = 27332, upload-time = "2025-08-05T16:43:01.666Z" },
- { url = "https://files.pythonhosted.org/packages/c7/bd/35dce665255434f54e5307de39e31912a6f902d4572da7c37582809de14f/audioop_lts-0.2.2-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a6d2e0f9f7a69403e388894d4ca5ada5c47230716a03f2847cfc7bd1ecb589d6", size = 92396, upload-time = "2025-08-05T16:43:02.991Z" },
- { url = "https://files.pythonhosted.org/packages/2d/d2/deeb9f51def1437b3afa35aeb729d577c04bcd89394cb56f9239a9f50b6f/audioop_lts-0.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f9b0b8a03ef474f56d1a842af1a2e01398b8f7654009823c6d9e0ecff4d5cfbf", size = 91811, upload-time = "2025-08-05T16:43:04.096Z" },
- { url = "https://files.pythonhosted.org/packages/76/3b/09f8b35b227cee28cc8231e296a82759ed80c1a08e349811d69773c48426/audioop_lts-0.2.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2b267b70747d82125f1a021506565bdc5609a2b24bcb4773c16d79d2bb260bbd", size = 100483, upload-time = "2025-08-05T16:43:05.085Z" },
- { url = "https://files.pythonhosted.org/packages/0b/15/05b48a935cf3b130c248bfdbdea71ce6437f5394ee8533e0edd7cfd93d5e/audioop_lts-0.2.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0337d658f9b81f4cd0fdb1f47635070cc084871a3d4646d9de74fdf4e7c3d24a", size = 103885, upload-time = "2025-08-05T16:43:06.197Z" },
- { url = "https://files.pythonhosted.org/packages/83/80/186b7fce6d35b68d3d739f228dc31d60b3412105854edb975aa155a58339/audioop_lts-0.2.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:167d3b62586faef8b6b2275c3218796b12621a60e43f7e9d5845d627b9c9b80e", size = 84899, upload-time = "2025-08-05T16:43:07.291Z" },
- { url = "https://files.pythonhosted.org/packages/49/89/c78cc5ac6cb5828f17514fb12966e299c850bc885e80f8ad94e38d450886/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0d9385e96f9f6da847f4d571ce3cb15b5091140edf3db97276872647ce37efd7", size = 89998, upload-time = "2025-08-05T16:43:08.335Z" },
- { url = "https://files.pythonhosted.org/packages/4c/4b/6401888d0c010e586c2ca50fce4c903d70a6bb55928b16cfbdfd957a13da/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:48159d96962674eccdca9a3df280e864e8ac75e40a577cc97c5c42667ffabfc5", size = 99046, upload-time = "2025-08-05T16:43:09.367Z" },
- { url = "https://files.pythonhosted.org/packages/de/f8/c874ca9bb447dae0e2ef2e231f6c4c2b0c39e31ae684d2420b0f9e97ee68/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:8fefe5868cd082db1186f2837d64cfbfa78b548ea0d0543e9b28935ccce81ce9", size = 84843, upload-time = "2025-08-05T16:43:10.749Z" },
- { url = "https://files.pythonhosted.org/packages/3e/c0/0323e66f3daebc13fd46b36b30c3be47e3fc4257eae44f1e77eb828c703f/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:58cf54380c3884fb49fdd37dfb7a772632b6701d28edd3e2904743c5e1773602", size = 94490, upload-time = "2025-08-05T16:43:12.131Z" },
- { url = "https://files.pythonhosted.org/packages/98/6b/acc7734ac02d95ab791c10c3f17ffa3584ccb9ac5c18fd771c638ed6d1f5/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:088327f00488cdeed296edd9215ca159f3a5a5034741465789cad403fcf4bec0", size = 92297, upload-time = "2025-08-05T16:43:13.139Z" },
- { url = "https://files.pythonhosted.org/packages/13/c3/c3dc3f564ce6877ecd2a05f8d751b9b27a8c320c2533a98b0c86349778d0/audioop_lts-0.2.2-cp314-cp314t-win32.whl", hash = "sha256:068aa17a38b4e0e7de771c62c60bbca2455924b67a8814f3b0dee92b5820c0b3", size = 27331, upload-time = "2025-08-05T16:43:14.19Z" },
- { url = "https://files.pythonhosted.org/packages/72/bb/b4608537e9ffcb86449091939d52d24a055216a36a8bf66b936af8c3e7ac/audioop_lts-0.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:a5bf613e96f49712073de86f20dbdd4014ca18efd4d34ed18c75bd808337851b", size = 31697, upload-time = "2025-08-05T16:43:15.193Z" },
- { url = "https://files.pythonhosted.org/packages/f6/22/91616fe707a5c5510de2cac9b046a30defe7007ba8a0c04f9c08f27df312/audioop_lts-0.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:b492c3b040153e68b9fdaff5913305aaaba5bb433d8a7f73d5cf6a64ed3cc1dd", size = 25206, upload-time = "2025-08-05T16:43:16.444Z" },
-]
-
-[[package]]
-name = "audioread"
-version = "3.1.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "standard-aifc", marker = "python_full_version >= '3.13'" },
- { name = "standard-sunau", marker = "python_full_version >= '3.13'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a1/4a/874ecf9b472f998130c2b5e145dcdb9f6131e84786111489103b66772143/audioread-3.1.0.tar.gz", hash = "sha256:1c4ab2f2972764c896a8ac61ac53e261c8d29f0c6ccd652f84e18f08a4cab190", size = 20082, upload-time = "2025-10-26T19:44:13.484Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7e/16/fbe8e1e185a45042f7cd3a282def5bb8d95bb69ab9e9ef6a5368aa17e426/audioread-3.1.0-py3-none-any.whl", hash = "sha256:b30d1df6c5d3de5dcef0fb0e256f6ea17bdcf5f979408df0297d8a408e2971b4", size = 23143, upload-time = "2025-10-26T19:44:12.016Z" },
-]
-
-[[package]]
-name = "batdetect2"
-version = "1.3.0"
-source = { editable = "." }
-dependencies = [
- { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "click", version = "8.3.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "librosa" },
- { name = "matplotlib", version = "3.9.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "matplotlib", version = "3.10.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "pandas", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "pandas", version = "3.0.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "scikit-learn", version = "1.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "scikit-learn", version = "1.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "scipy", version = "1.13.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "scipy", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "setuptools", marker = "python_full_version >= '3.14'" },
- { name = "torch", version = "2.7.1+cu118", source = { registry = "https://download.pytorch.org/whl/cu118" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "torch", version = "2.10.0+cu126", source = { registry = "https://download.pytorch.org/whl/cu126" }, marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "torchaudio", version = "2.7.1+cu118", source = { registry = "https://download.pytorch.org/whl/cu118" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "torchaudio", version = "2.11.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "torchaudio", version = "2.11.0+cu126", source = { registry = "https://download.pytorch.org/whl/cu126" }, marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "torchvision", version = "0.22.1+cu118", source = { registry = "https://download.pytorch.org/whl/cu118" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "torchvision", version = "0.23.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "torchvision", version = "0.25.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "torchvision", version = "0.25.0+cu126", source = { registry = "https://download.pytorch.org/whl/cu126" }, marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
-]
-
-[package.dev-dependencies]
-dev = [
- { name = "debugpy" },
- { name = "hypothesis", version = "6.141.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "hypothesis", version = "6.151.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "pyright" },
- { name = "pytest", version = "8.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "pytest", version = "9.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "ruff" },
-]
-
-[package.metadata]
-requires-dist = [
- { name = "click", specifier = ">=8.1.7" },
- { name = "librosa", specifier = ">=0.10.1" },
- { name = "matplotlib", specifier = ">=3.7.1" },
- { name = "numpy", specifier = ">=1.23.5" },
- { name = "pandas", specifier = ">=1.5.3" },
- { name = "scikit-learn", specifier = ">=1.2.2" },
- { name = "scipy", specifier = ">=1.10.1" },
- { name = "setuptools", marker = "python_full_version == '3.14.*'", specifier = ">=70.1.0" },
- { name = "torch", marker = "sys_platform != 'linux' and sys_platform != 'win32'", specifier = ">=1.13.1" },
- { name = "torch", marker = "(python_full_version != '3.9.*' and sys_platform == 'linux') or (python_full_version != '3.9.*' and sys_platform == 'win32')", specifier = ">=1.13.1", index = "https://download.pytorch.org/whl/cu126" },
- { name = "torch", marker = "(python_full_version == '3.9.*' and sys_platform == 'linux') or (python_full_version == '3.9.*' and sys_platform == 'win32')", specifier = ">=1.13.1", index = "https://download.pytorch.org/whl/cu118" },
- { name = "torchaudio", marker = "sys_platform != 'linux' and sys_platform != 'win32'", specifier = ">=1.13.1" },
- { name = "torchaudio", marker = "(python_full_version != '3.9.*' and sys_platform == 'linux') or (python_full_version != '3.9.*' and sys_platform == 'win32')", specifier = ">=1.13.1", index = "https://download.pytorch.org/whl/cu126" },
- { name = "torchaudio", marker = "(python_full_version == '3.9.*' and sys_platform == 'linux') or (python_full_version == '3.9.*' and sys_platform == 'win32')", specifier = ">=1.13.1", index = "https://download.pytorch.org/whl/cu118" },
- { name = "torchvision", marker = "sys_platform != 'linux' and sys_platform != 'win32'", specifier = ">=0.14.0" },
- { name = "torchvision", marker = "(python_full_version != '3.9.*' and sys_platform == 'linux') or (python_full_version != '3.9.*' and sys_platform == 'win32')", specifier = ">=0.14.0", index = "https://download.pytorch.org/whl/cu126" },
- { name = "torchvision", marker = "(python_full_version == '3.9.*' and sys_platform == 'linux') or (python_full_version == '3.9.*' and sys_platform == 'win32')", specifier = ">=0.14.0", index = "https://download.pytorch.org/whl/cu118" },
-]
-
-[package.metadata.requires-dev]
-dev = [
- { name = "debugpy", specifier = ">=1.8.8" },
- { name = "hypothesis", specifier = ">=6.118.7" },
- { name = "pyright", specifier = ">=1.1.388" },
- { name = "pytest", specifier = ">=7.2.2" },
- { name = "ruff", specifier = ">=0.7.3" },
-]
-
-[[package]]
-name = "certifi"
-version = "2026.2.25"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/af/2d/7bf41579a8986e348fa033a31cdd0e4121114f6bce2457e8876010b092dd/certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7", size = 155029, upload-time = "2026-02-25T02:54:17.342Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", size = 153684, upload-time = "2026-02-25T02:54:15.766Z" },
-]
-
-[[package]]
-name = "cffi"
-version = "2.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pycparser", version = "2.23", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and implementation_name != 'PyPy'" },
- { name = "pycparser", version = "3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and implementation_name != 'PyPy'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" },
- { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" },
- { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" },
- { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" },
- { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" },
- { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" },
- { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" },
- { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" },
- { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" },
- { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" },
- { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" },
- { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" },
- { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" },
- { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" },
- { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" },
- { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" },
- { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" },
- { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" },
- { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" },
- { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" },
- { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" },
- { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" },
- { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" },
- { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" },
- { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" },
- { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" },
- { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" },
- { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" },
- { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" },
- { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" },
- { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" },
- { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" },
- { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" },
- { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" },
- { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" },
- { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" },
- { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" },
- { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" },
- { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" },
- { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" },
- { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" },
- { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" },
- { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" },
- { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" },
- { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" },
- { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" },
- { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" },
- { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" },
- { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" },
- { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" },
- { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" },
- { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" },
- { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" },
- { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" },
- { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" },
- { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" },
- { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" },
- { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" },
- { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" },
- { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" },
- { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" },
- { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" },
- { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" },
- { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" },
- { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" },
- { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" },
- { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" },
- { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" },
- { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" },
- { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" },
- { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" },
- { url = "https://files.pythonhosted.org/packages/c0/cc/08ed5a43f2996a16b462f64a7055c6e962803534924b9b2f1371d8c00b7b/cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf", size = 184288, upload-time = "2025-09-08T23:23:48.404Z" },
- { url = "https://files.pythonhosted.org/packages/3d/de/38d9726324e127f727b4ecc376bc85e505bfe61ef130eaf3f290c6847dd4/cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7", size = 180509, upload-time = "2025-09-08T23:23:49.73Z" },
- { url = "https://files.pythonhosted.org/packages/9b/13/c92e36358fbcc39cf0962e83223c9522154ee8630e1df7c0b3a39a8124e2/cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c", size = 208813, upload-time = "2025-09-08T23:23:51.263Z" },
- { url = "https://files.pythonhosted.org/packages/15/12/a7a79bd0df4c3bff744b2d7e52cc1b68d5e7e427b384252c42366dc1ecbc/cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165", size = 216498, upload-time = "2025-09-08T23:23:52.494Z" },
- { url = "https://files.pythonhosted.org/packages/a3/ad/5c51c1c7600bdd7ed9a24a203ec255dccdd0ebf4527f7b922a0bde2fb6ed/cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534", size = 203243, upload-time = "2025-09-08T23:23:53.836Z" },
- { url = "https://files.pythonhosted.org/packages/32/f2/81b63e288295928739d715d00952c8c6034cb6c6a516b17d37e0c8be5600/cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f", size = 203158, upload-time = "2025-09-08T23:23:55.169Z" },
- { url = "https://files.pythonhosted.org/packages/1f/74/cc4096ce66f5939042ae094e2e96f53426a979864aa1f96a621ad128be27/cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63", size = 216548, upload-time = "2025-09-08T23:23:56.506Z" },
- { url = "https://files.pythonhosted.org/packages/e8/be/f6424d1dc46b1091ffcc8964fa7c0ab0cd36839dd2761b49c90481a6ba1b/cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2", size = 218897, upload-time = "2025-09-08T23:23:57.825Z" },
- { url = "https://files.pythonhosted.org/packages/f7/e0/dda537c2309817edf60109e39265f24f24aa7f050767e22c98c53fe7f48b/cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65", size = 211249, upload-time = "2025-09-08T23:23:59.139Z" },
- { url = "https://files.pythonhosted.org/packages/2b/e7/7c769804eb75e4c4b35e658dba01de1640a351a9653c3d49ca89d16ccc91/cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322", size = 218041, upload-time = "2025-09-08T23:24:00.496Z" },
- { url = "https://files.pythonhosted.org/packages/aa/d9/6218d78f920dcd7507fc16a766b5ef8f3b913cc7aa938e7fc80b9978d089/cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a", size = 172138, upload-time = "2025-09-08T23:24:01.7Z" },
- { url = "https://files.pythonhosted.org/packages/54/8f/a1e836f82d8e32a97e6b29cc8f641779181ac7363734f12df27db803ebda/cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9", size = 182794, upload-time = "2025-09-08T23:24:02.943Z" },
-]
-
-[[package]]
-name = "charset-normalizer"
-version = "3.4.6"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7b/60/e3bec1881450851b087e301bedc3daa9377a4d45f1c26aa90b0b235e38aa/charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6", size = 143363, upload-time = "2026-03-15T18:53:25.478Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e6/8c/2c56124c6dc53a774d435f985b5973bc592f42d437be58c0c92d65ae7296/charset_normalizer-3.4.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2e1d8ca8611099001949d1cdfaefc510cf0f212484fe7c565f735b68c78c3c95", size = 298751, upload-time = "2026-03-15T18:50:00.003Z" },
- { url = "https://files.pythonhosted.org/packages/86/2a/2a7db6b314b966a3bcad8c731c0719c60b931b931de7ae9f34b2839289ee/charset_normalizer-3.4.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e25369dc110d58ddf29b949377a93e0716d72a24f62bad72b2b39f155949c1fd", size = 200027, upload-time = "2026-03-15T18:50:01.702Z" },
- { url = "https://files.pythonhosted.org/packages/68/f2/0fe775c74ae25e2a3b07b01538fc162737b3e3f795bada3bc26f4d4d495c/charset_normalizer-3.4.6-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:259695e2ccc253feb2a016303543d691825e920917e31f894ca1a687982b1de4", size = 220741, upload-time = "2026-03-15T18:50:03.194Z" },
- { url = "https://files.pythonhosted.org/packages/10/98/8085596e41f00b27dd6aa1e68413d1ddda7e605f34dd546833c61fddd709/charset_normalizer-3.4.6-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dda86aba335c902b6149a02a55b38e96287157e609200811837678214ba2b1db", size = 215802, upload-time = "2026-03-15T18:50:05.859Z" },
- { url = "https://files.pythonhosted.org/packages/fd/ce/865e4e09b041bad659d682bbd98b47fb490b8e124f9398c9448065f64fee/charset_normalizer-3.4.6-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51fb3c322c81d20567019778cb5a4a6f2dc1c200b886bc0d636238e364848c89", size = 207908, upload-time = "2026-03-15T18:50:07.676Z" },
- { url = "https://files.pythonhosted.org/packages/a8/54/8c757f1f7349262898c2f169e0d562b39dcb977503f18fdf0814e923db78/charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:4482481cb0572180b6fd976a4d5c72a30263e98564da68b86ec91f0fe35e8565", size = 194357, upload-time = "2026-03-15T18:50:09.327Z" },
- { url = "https://files.pythonhosted.org/packages/6f/29/e88f2fac9218907fc7a70722b393d1bbe8334c61fe9c46640dba349b6e66/charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:39f5068d35621da2881271e5c3205125cc456f54e9030d3f723288c873a71bf9", size = 205610, upload-time = "2026-03-15T18:50:10.732Z" },
- { url = "https://files.pythonhosted.org/packages/4c/c5/21d7bb0cb415287178450171d130bed9d664211fdd59731ed2c34267b07d/charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8bea55c4eef25b0b19a0337dc4e3f9a15b00d569c77211fa8cde38684f234fb7", size = 203512, upload-time = "2026-03-15T18:50:12.535Z" },
- { url = "https://files.pythonhosted.org/packages/a4/be/ce52f3c7fdb35cc987ad38a53ebcef52eec498f4fb6c66ecfe62cfe57ba2/charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:f0cdaecd4c953bfae0b6bb64910aaaca5a424ad9c72d85cb88417bb9814f7550", size = 195398, upload-time = "2026-03-15T18:50:14.236Z" },
- { url = "https://files.pythonhosted.org/packages/81/a0/3ab5dd39d4859a3555e5dadfc8a9fa7f8352f8c183d1a65c90264517da0e/charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:150b8ce8e830eb7ccb029ec9ca36022f756986aaaa7956aad6d9ec90089338c0", size = 221772, upload-time = "2026-03-15T18:50:15.581Z" },
- { url = "https://files.pythonhosted.org/packages/04/6e/6a4e41a97ba6b2fa87f849c41e4d229449a586be85053c4d90135fe82d26/charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:e68c14b04827dd76dcbd1aeea9e604e3e4b78322d8faf2f8132c7138efa340a8", size = 205759, upload-time = "2026-03-15T18:50:17.047Z" },
- { url = "https://files.pythonhosted.org/packages/db/3b/34a712a5ee64a6957bf355b01dc17b12de457638d436fdb05d01e463cd1c/charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3778fd7d7cd04ae8f54651f4a7a0bd6e39a0cf20f801720a4c21d80e9b7ad6b0", size = 216938, upload-time = "2026-03-15T18:50:18.44Z" },
- { url = "https://files.pythonhosted.org/packages/cb/05/5bd1e12da9ab18790af05c61aafd01a60f489778179b621ac2a305243c62/charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dad6e0f2e481fffdcf776d10ebee25e0ef89f16d691f1e5dee4b586375fdc64b", size = 210138, upload-time = "2026-03-15T18:50:19.852Z" },
- { url = "https://files.pythonhosted.org/packages/bd/8e/3cb9e2d998ff6b21c0a1860343cb7b83eba9cdb66b91410e18fc4969d6ab/charset_normalizer-3.4.6-cp310-cp310-win32.whl", hash = "sha256:74a2e659c7ecbc73562e2a15e05039f1e22c75b7c7618b4b574a3ea9118d1557", size = 144137, upload-time = "2026-03-15T18:50:21.505Z" },
- { url = "https://files.pythonhosted.org/packages/d8/8f/78f5489ffadb0db3eb7aff53d31c24531d33eb545f0c6f6567c25f49a5ff/charset_normalizer-3.4.6-cp310-cp310-win_amd64.whl", hash = "sha256:aa9cccf4a44b9b62d8ba8b4dd06c649ba683e4bf04eea606d2e94cfc2d6ff4d6", size = 154244, upload-time = "2026-03-15T18:50:22.81Z" },
- { url = "https://files.pythonhosted.org/packages/e4/74/e472659dffb0cadb2f411282d2d76c60da1fc94076d7fffed4ae8a93ec01/charset_normalizer-3.4.6-cp310-cp310-win_arm64.whl", hash = "sha256:e985a16ff513596f217cee86c21371b8cd011c0f6f056d0920aa2d926c544058", size = 143312, upload-time = "2026-03-15T18:50:24.074Z" },
- { url = "https://files.pythonhosted.org/packages/62/28/ff6f234e628a2de61c458be2779cb182bc03f6eec12200d4a525bbfc9741/charset_normalizer-3.4.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:82060f995ab5003a2d6e0f4ad29065b7672b6593c8c63559beefe5b443242c3e", size = 293582, upload-time = "2026-03-15T18:50:25.454Z" },
- { url = "https://files.pythonhosted.org/packages/1c/b7/b1a117e5385cbdb3205f6055403c2a2a220c5ea80b8716c324eaf75c5c95/charset_normalizer-3.4.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60c74963d8350241a79cb8feea80e54d518f72c26db618862a8f53e5023deaf9", size = 197240, upload-time = "2026-03-15T18:50:27.196Z" },
- { url = "https://files.pythonhosted.org/packages/a1/5f/2574f0f09f3c3bc1b2f992e20bce6546cb1f17e111c5be07308dc5427956/charset_normalizer-3.4.6-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6e4333fb15c83f7d1482a76d45a0818897b3d33f00efd215528ff7c51b8e35d", size = 217363, upload-time = "2026-03-15T18:50:28.601Z" },
- { url = "https://files.pythonhosted.org/packages/4a/d1/0ae20ad77bc949ddd39b51bf383b6ca932f2916074c95cad34ae465ab71f/charset_normalizer-3.4.6-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bc72863f4d9aba2e8fd9085e63548a324ba706d2ea2c83b260da08a59b9482de", size = 212994, upload-time = "2026-03-15T18:50:30.102Z" },
- { url = "https://files.pythonhosted.org/packages/60/ac/3233d262a310c1b12633536a07cde5ddd16985e6e7e238e9f3f9423d8eb9/charset_normalizer-3.4.6-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9cc4fc6c196d6a8b76629a70ddfcd4635a6898756e2d9cac5565cf0654605d73", size = 204697, upload-time = "2026-03-15T18:50:31.654Z" },
- { url = "https://files.pythonhosted.org/packages/25/3c/8a18fc411f085b82303cfb7154eed5bd49c77035eb7608d049468b53f87c/charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:0c173ce3a681f309f31b87125fecec7a5d1347261ea11ebbb856fa6006b23c8c", size = 191673, upload-time = "2026-03-15T18:50:33.433Z" },
- { url = "https://files.pythonhosted.org/packages/ff/a7/11cfe61d6c5c5c7438d6ba40919d0306ed83c9ab957f3d4da2277ff67836/charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c907cdc8109f6c619e6254212e794d6548373cc40e1ec75e6e3823d9135d29cc", size = 201120, upload-time = "2026-03-15T18:50:35.105Z" },
- { url = "https://files.pythonhosted.org/packages/b5/10/cf491fa1abd47c02f69687046b896c950b92b6cd7337a27e6548adbec8e4/charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:404a1e552cf5b675a87f0651f8b79f5f1e6fd100ee88dc612f89aa16abd4486f", size = 200911, upload-time = "2026-03-15T18:50:36.819Z" },
- { url = "https://files.pythonhosted.org/packages/28/70/039796160b48b18ed466fde0af84c1b090c4e288fae26cd674ad04a2d703/charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e3c701e954abf6fc03a49f7c579cc80c2c6cc52525340ca3186c41d3f33482ef", size = 192516, upload-time = "2026-03-15T18:50:38.228Z" },
- { url = "https://files.pythonhosted.org/packages/ff/34/c56f3223393d6ff3124b9e78f7de738047c2d6bc40a4f16ac0c9d7a1cb3c/charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7a6967aaf043bceabab5412ed6bd6bd26603dae84d5cb75bf8d9a74a4959d398", size = 218795, upload-time = "2026-03-15T18:50:39.664Z" },
- { url = "https://files.pythonhosted.org/packages/e8/3b/ce2d4f86c5282191a041fdc5a4ce18f1c6bd40a5bd1f74cf8625f08d51c1/charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5feb91325bbceade6afab43eb3b508c63ee53579fe896c77137ded51c6b6958e", size = 201833, upload-time = "2026-03-15T18:50:41.552Z" },
- { url = "https://files.pythonhosted.org/packages/3b/9b/b6a9f76b0fd7c5b5ec58b228ff7e85095370282150f0bd50b3126f5506d6/charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f820f24b09e3e779fe84c3c456cb4108a7aa639b0d1f02c28046e11bfcd088ed", size = 213920, upload-time = "2026-03-15T18:50:43.33Z" },
- { url = "https://files.pythonhosted.org/packages/ae/98/7bc23513a33d8172365ed30ee3a3b3fe1ece14a395e5fc94129541fc6003/charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b35b200d6a71b9839a46b9b7fff66b6638bb52fc9658aa58796b0326595d3021", size = 206951, upload-time = "2026-03-15T18:50:44.789Z" },
- { url = "https://files.pythonhosted.org/packages/32/73/c0b86f3d1458468e11aec870e6b3feac931facbe105a894b552b0e518e79/charset_normalizer-3.4.6-cp311-cp311-win32.whl", hash = "sha256:9ca4c0b502ab399ef89248a2c84c54954f77a070f28e546a85e91da627d1301e", size = 143703, upload-time = "2026-03-15T18:50:46.103Z" },
- { url = "https://files.pythonhosted.org/packages/c6/e3/76f2facfe8eddee0bbd38d2594e709033338eae44ebf1738bcefe0a06185/charset_normalizer-3.4.6-cp311-cp311-win_amd64.whl", hash = "sha256:a9e68c9d88823b274cf1e72f28cb5dc89c990edf430b0bfd3e2fb0785bfeabf4", size = 153857, upload-time = "2026-03-15T18:50:47.563Z" },
- { url = "https://files.pythonhosted.org/packages/e2/dc/9abe19c9b27e6cd3636036b9d1b387b78c40dedbf0b47f9366737684b4b0/charset_normalizer-3.4.6-cp311-cp311-win_arm64.whl", hash = "sha256:97d0235baafca5f2b09cf332cc275f021e694e8362c6bb9c96fc9a0eb74fc316", size = 142751, upload-time = "2026-03-15T18:50:49.234Z" },
- { url = "https://files.pythonhosted.org/packages/e5/62/c0815c992c9545347aeea7859b50dc9044d147e2e7278329c6e02ac9a616/charset_normalizer-3.4.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ef7fedc7a6ecbe99969cd09632516738a97eeb8bd7258bf8a0f23114c057dab", size = 295154, upload-time = "2026-03-15T18:50:50.88Z" },
- { url = "https://files.pythonhosted.org/packages/a8/37/bdca6613c2e3c58c7421891d80cc3efa1d32e882f7c4a7ee6039c3fc951a/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4ea868bc28109052790eb2b52a9ab33f3aa7adc02f96673526ff47419490e21", size = 199191, upload-time = "2026-03-15T18:50:52.658Z" },
- { url = "https://files.pythonhosted.org/packages/6c/92/9934d1bbd69f7f398b38c5dae1cbf9cc672e7c34a4adf7b17c0a9c17d15d/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:836ab36280f21fc1a03c99cd05c6b7af70d2697e374c7af0b61ed271401a72a2", size = 218674, upload-time = "2026-03-15T18:50:54.102Z" },
- { url = "https://files.pythonhosted.org/packages/af/90/25f6ab406659286be929fd89ab0e78e38aa183fc374e03aa3c12d730af8a/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f1ce721c8a7dfec21fcbdfe04e8f68174183cf4e8188e0645e92aa23985c57ff", size = 215259, upload-time = "2026-03-15T18:50:55.616Z" },
- { url = "https://files.pythonhosted.org/packages/4e/ef/79a463eb0fff7f96afa04c1d4c51f8fc85426f918db467854bfb6a569ce3/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e28d62a8fc7a1fa411c43bd65e346f3bce9716dc51b897fbe930c5987b402d5", size = 207276, upload-time = "2026-03-15T18:50:57.054Z" },
- { url = "https://files.pythonhosted.org/packages/f7/72/d0426afec4b71dc159fa6b4e68f868cd5a3ecd918fec5813a15d292a7d10/charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:530d548084c4a9f7a16ed4a294d459b4f229db50df689bfe92027452452943a0", size = 195161, upload-time = "2026-03-15T18:50:58.686Z" },
- { url = "https://files.pythonhosted.org/packages/bf/18/c82b06a68bfcb6ce55e508225d210c7e6a4ea122bfc0748892f3dc4e8e11/charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30f445ae60aad5e1f8bdbb3108e39f6fbc09f4ea16c815c66578878325f8f15a", size = 203452, upload-time = "2026-03-15T18:51:00.196Z" },
- { url = "https://files.pythonhosted.org/packages/44/d6/0c25979b92f8adafdbb946160348d8d44aa60ce99afdc27df524379875cb/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ac2393c73378fea4e52aa56285a3d64be50f1a12395afef9cce47772f60334c2", size = 202272, upload-time = "2026-03-15T18:51:01.703Z" },
- { url = "https://files.pythonhosted.org/packages/2e/3d/7fea3e8fe84136bebbac715dd1221cc25c173c57a699c030ab9b8900cbb7/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:90ca27cd8da8118b18a52d5f547859cc1f8354a00cd1e8e5120df3e30d6279e5", size = 195622, upload-time = "2026-03-15T18:51:03.526Z" },
- { url = "https://files.pythonhosted.org/packages/57/8a/d6f7fd5cb96c58ef2f681424fbca01264461336d2a7fc875e4446b1f1346/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e5a94886bedca0f9b78fecd6afb6629142fd2605aa70a125d49f4edc6037ee6", size = 220056, upload-time = "2026-03-15T18:51:05.269Z" },
- { url = "https://files.pythonhosted.org/packages/16/50/478cdda782c8c9c3fb5da3cc72dd7f331f031e7f1363a893cdd6ca0f8de0/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:695f5c2823691a25f17bc5d5ffe79fa90972cc34b002ac6c843bb8a1720e950d", size = 203751, upload-time = "2026-03-15T18:51:06.858Z" },
- { url = "https://files.pythonhosted.org/packages/75/fc/cc2fcac943939c8e4d8791abfa139f685e5150cae9f94b60f12520feaa9b/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:231d4da14bcd9301310faf492051bee27df11f2bc7549bc0bb41fef11b82daa2", size = 216563, upload-time = "2026-03-15T18:51:08.564Z" },
- { url = "https://files.pythonhosted.org/packages/a8/b7/a4add1d9a5f68f3d037261aecca83abdb0ab15960a3591d340e829b37298/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a056d1ad2633548ca18ffa2f85c202cfb48b68615129143915b8dc72a806a923", size = 209265, upload-time = "2026-03-15T18:51:10.312Z" },
- { url = "https://files.pythonhosted.org/packages/6c/18/c094561b5d64a24277707698e54b7f67bd17a4f857bbfbb1072bba07c8bf/charset_normalizer-3.4.6-cp312-cp312-win32.whl", hash = "sha256:c2274ca724536f173122f36c98ce188fd24ce3dad886ec2b7af859518ce008a4", size = 144229, upload-time = "2026-03-15T18:51:11.694Z" },
- { url = "https://files.pythonhosted.org/packages/ab/20/0567efb3a8fd481b8f34f739ebddc098ed062a59fed41a8d193a61939e8f/charset_normalizer-3.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:c8ae56368f8cc97c7e40a7ee18e1cedaf8e780cd8bc5ed5ac8b81f238614facb", size = 154277, upload-time = "2026-03-15T18:51:13.004Z" },
- { url = "https://files.pythonhosted.org/packages/15/57/28d79b44b51933119e21f65479d0864a8d5893e494cf5daab15df0247c17/charset_normalizer-3.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:899d28f422116b08be5118ef350c292b36fc15ec2daeb9ea987c89281c7bb5c4", size = 142817, upload-time = "2026-03-15T18:51:14.408Z" },
- { url = "https://files.pythonhosted.org/packages/1e/1d/4fdabeef4e231153b6ed7567602f3b68265ec4e5b76d6024cf647d43d981/charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f", size = 294823, upload-time = "2026-03-15T18:51:15.755Z" },
- { url = "https://files.pythonhosted.org/packages/47/7b/20e809b89c69d37be748d98e84dce6820bf663cf19cf6b942c951a3e8f41/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843", size = 198527, upload-time = "2026-03-15T18:51:17.177Z" },
- { url = "https://files.pythonhosted.org/packages/37/a6/4f8d27527d59c039dce6f7622593cdcd3d70a8504d87d09eb11e9fdc6062/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf", size = 218388, upload-time = "2026-03-15T18:51:18.934Z" },
- { url = "https://files.pythonhosted.org/packages/f6/9b/4770ccb3e491a9bacf1c46cc8b812214fe367c86a96353ccc6daf87b01ec/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8", size = 214563, upload-time = "2026-03-15T18:51:20.374Z" },
- { url = "https://files.pythonhosted.org/packages/2b/58/a199d245894b12db0b957d627516c78e055adc3a0d978bc7f65ddaf7c399/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9", size = 206587, upload-time = "2026-03-15T18:51:21.807Z" },
- { url = "https://files.pythonhosted.org/packages/7e/70/3def227f1ec56f5c69dfc8392b8bd63b11a18ca8178d9211d7cc5e5e4f27/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88", size = 194724, upload-time = "2026-03-15T18:51:23.508Z" },
- { url = "https://files.pythonhosted.org/packages/58/ab/9318352e220c05efd31c2779a23b50969dc94b985a2efa643ed9077bfca5/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84", size = 202956, upload-time = "2026-03-15T18:51:25.239Z" },
- { url = "https://files.pythonhosted.org/packages/75/13/f3550a3ac25b70f87ac98c40d3199a8503676c2f1620efbf8d42095cfc40/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd", size = 201923, upload-time = "2026-03-15T18:51:26.682Z" },
- { url = "https://files.pythonhosted.org/packages/1b/db/c5c643b912740b45e8eec21de1bbab8e7fc085944d37e1e709d3dcd9d72f/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c", size = 195366, upload-time = "2026-03-15T18:51:28.129Z" },
- { url = "https://files.pythonhosted.org/packages/5a/67/3b1c62744f9b2448443e0eb160d8b001c849ec3fef591e012eda6484787c/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194", size = 219752, upload-time = "2026-03-15T18:51:29.556Z" },
- { url = "https://files.pythonhosted.org/packages/f6/98/32ffbaf7f0366ffb0445930b87d103f6b406bc2c271563644bde8a2b1093/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc", size = 203296, upload-time = "2026-03-15T18:51:30.921Z" },
- { url = "https://files.pythonhosted.org/packages/41/12/5d308c1bbe60cabb0c5ef511574a647067e2a1f631bc8634fcafaccd8293/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f", size = 215956, upload-time = "2026-03-15T18:51:32.399Z" },
- { url = "https://files.pythonhosted.org/packages/53/e9/5f85f6c5e20669dbe56b165c67b0260547dea97dba7e187938833d791687/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2", size = 208652, upload-time = "2026-03-15T18:51:34.214Z" },
- { url = "https://files.pythonhosted.org/packages/f1/11/897052ea6af56df3eef3ca94edafee410ca699ca0c7b87960ad19932c55e/charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d", size = 143940, upload-time = "2026-03-15T18:51:36.15Z" },
- { url = "https://files.pythonhosted.org/packages/a1/5c/724b6b363603e419829f561c854b87ed7c7e31231a7908708ac086cdf3e2/charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389", size = 154101, upload-time = "2026-03-15T18:51:37.876Z" },
- { url = "https://files.pythonhosted.org/packages/01/a5/7abf15b4c0968e47020f9ca0935fb3274deb87cb288cd187cad92e8cdffd/charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f", size = 143109, upload-time = "2026-03-15T18:51:39.565Z" },
- { url = "https://files.pythonhosted.org/packages/25/6f/ffe1e1259f384594063ea1869bfb6be5cdb8bc81020fc36c3636bc8302a1/charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8", size = 294458, upload-time = "2026-03-15T18:51:41.134Z" },
- { url = "https://files.pythonhosted.org/packages/56/60/09bb6c13a8c1016c2ed5c6a6488e4ffef506461aa5161662bd7636936fb1/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421", size = 199277, upload-time = "2026-03-15T18:51:42.953Z" },
- { url = "https://files.pythonhosted.org/packages/00/50/dcfbb72a5138bbefdc3332e8d81a23494bf67998b4b100703fd15fa52d81/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2", size = 218758, upload-time = "2026-03-15T18:51:44.339Z" },
- { url = "https://files.pythonhosted.org/packages/03/b3/d79a9a191bb75f5aa81f3aaaa387ef29ce7cb7a9e5074ba8ea095cc073c2/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30", size = 215299, upload-time = "2026-03-15T18:51:45.871Z" },
- { url = "https://files.pythonhosted.org/packages/76/7e/bc8911719f7084f72fd545f647601ea3532363927f807d296a8c88a62c0d/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db", size = 206811, upload-time = "2026-03-15T18:51:47.308Z" },
- { url = "https://files.pythonhosted.org/packages/e2/40/c430b969d41dda0c465aa36cc7c2c068afb67177bef50905ac371b28ccc7/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8", size = 193706, upload-time = "2026-03-15T18:51:48.849Z" },
- { url = "https://files.pythonhosted.org/packages/48/15/e35e0590af254f7df984de1323640ef375df5761f615b6225ba8deb9799a/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815", size = 202706, upload-time = "2026-03-15T18:51:50.257Z" },
- { url = "https://files.pythonhosted.org/packages/5e/bd/f736f7b9cc5e93a18b794a50346bb16fbfd6b37f99e8f306f7951d27c17c/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a", size = 202497, upload-time = "2026-03-15T18:51:52.012Z" },
- { url = "https://files.pythonhosted.org/packages/9d/ba/2cc9e3e7dfdf7760a6ed8da7446d22536f3d0ce114ac63dee2a5a3599e62/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43", size = 193511, upload-time = "2026-03-15T18:51:53.723Z" },
- { url = "https://files.pythonhosted.org/packages/9e/cb/5be49b5f776e5613be07298c80e1b02a2d900f7a7de807230595c85a8b2e/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0", size = 220133, upload-time = "2026-03-15T18:51:55.333Z" },
- { url = "https://files.pythonhosted.org/packages/83/43/99f1b5dad345accb322c80c7821071554f791a95ee50c1c90041c157ae99/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1", size = 203035, upload-time = "2026-03-15T18:51:56.736Z" },
- { url = "https://files.pythonhosted.org/packages/87/9a/62c2cb6a531483b55dddff1a68b3d891a8b498f3ca555fbcf2978e804d9d/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f", size = 216321, upload-time = "2026-03-15T18:51:58.17Z" },
- { url = "https://files.pythonhosted.org/packages/6e/79/94a010ff81e3aec7c293eb82c28f930918e517bc144c9906a060844462eb/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815", size = 208973, upload-time = "2026-03-15T18:51:59.998Z" },
- { url = "https://files.pythonhosted.org/packages/2a/57/4ecff6d4ec8585342f0c71bc03efaa99cb7468f7c91a57b105bcd561cea8/charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d", size = 144610, upload-time = "2026-03-15T18:52:02.213Z" },
- { url = "https://files.pythonhosted.org/packages/80/94/8434a02d9d7f168c25767c64671fead8d599744a05d6a6c877144c754246/charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f", size = 154962, upload-time = "2026-03-15T18:52:03.658Z" },
- { url = "https://files.pythonhosted.org/packages/46/4c/48f2cdbfd923026503dfd67ccea45c94fd8fe988d9056b468579c66ed62b/charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e", size = 143595, upload-time = "2026-03-15T18:52:05.123Z" },
- { url = "https://files.pythonhosted.org/packages/31/93/8878be7569f87b14f1d52032946131bcb6ebbd8af3e20446bc04053dc3f1/charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866", size = 314828, upload-time = "2026-03-15T18:52:06.831Z" },
- { url = "https://files.pythonhosted.org/packages/06/b6/fae511ca98aac69ecc35cde828b0a3d146325dd03d99655ad38fc2cc3293/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc", size = 208138, upload-time = "2026-03-15T18:52:08.239Z" },
- { url = "https://files.pythonhosted.org/packages/54/57/64caf6e1bf07274a1e0b7c160a55ee9e8c9ec32c46846ce59b9c333f7008/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e", size = 224679, upload-time = "2026-03-15T18:52:10.043Z" },
- { url = "https://files.pythonhosted.org/packages/aa/cb/9ff5a25b9273ef160861b41f6937f86fae18b0792fe0a8e75e06acb08f1d/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077", size = 223475, upload-time = "2026-03-15T18:52:11.854Z" },
- { url = "https://files.pythonhosted.org/packages/fc/97/440635fc093b8d7347502a377031f9605a1039c958f3cd18dcacffb37743/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f", size = 215230, upload-time = "2026-03-15T18:52:13.325Z" },
- { url = "https://files.pythonhosted.org/packages/cd/24/afff630feb571a13f07c8539fbb502d2ab494019492aaffc78ef41f1d1d0/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e", size = 199045, upload-time = "2026-03-15T18:52:14.752Z" },
- { url = "https://files.pythonhosted.org/packages/e5/17/d1399ecdaf7e0498c327433e7eefdd862b41236a7e484355b8e0e5ebd64b/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484", size = 211658, upload-time = "2026-03-15T18:52:16.278Z" },
- { url = "https://files.pythonhosted.org/packages/b5/38/16baa0affb957b3d880e5ac2144caf3f9d7de7bc4a91842e447fbb5e8b67/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7", size = 210769, upload-time = "2026-03-15T18:52:17.782Z" },
- { url = "https://files.pythonhosted.org/packages/05/34/c531bc6ac4c21da9ddfddb3107be2287188b3ea4b53b70fc58f2a77ac8d8/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff", size = 201328, upload-time = "2026-03-15T18:52:19.553Z" },
- { url = "https://files.pythonhosted.org/packages/fa/73/a5a1e9ca5f234519c1953608a03fe109c306b97fdfb25f09182babad51a7/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e", size = 225302, upload-time = "2026-03-15T18:52:21.043Z" },
- { url = "https://files.pythonhosted.org/packages/ba/f6/cd782923d112d296294dea4bcc7af5a7ae0f86ab79f8fefbda5526b6cfc0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659", size = 211127, upload-time = "2026-03-15T18:52:22.491Z" },
- { url = "https://files.pythonhosted.org/packages/0e/c5/0b6898950627af7d6103a449b22320372c24c6feda91aa24e201a478d161/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602", size = 222840, upload-time = "2026-03-15T18:52:24.113Z" },
- { url = "https://files.pythonhosted.org/packages/7d/25/c4bba773bef442cbdc06111d40daa3de5050a676fa26e85090fc54dd12f0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407", size = 216890, upload-time = "2026-03-15T18:52:25.541Z" },
- { url = "https://files.pythonhosted.org/packages/35/1a/05dacadb0978da72ee287b0143097db12f2e7e8d3ffc4647da07a383b0b7/charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579", size = 155379, upload-time = "2026-03-15T18:52:27.05Z" },
- { url = "https://files.pythonhosted.org/packages/5d/7a/d269d834cb3a76291651256f3b9a5945e81d0a49ab9f4a498964e83c0416/charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4", size = 169043, upload-time = "2026-03-15T18:52:28.502Z" },
- { url = "https://files.pythonhosted.org/packages/23/06/28b29fba521a37a8932c6a84192175c34d49f84a6d4773fa63d05f9aff22/charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c", size = 148523, upload-time = "2026-03-15T18:52:29.956Z" },
- { url = "https://files.pythonhosted.org/packages/41/85/580dbaa12ab31041ed7df59f0bebc8893514fc21da6c05c3a1c1707d118f/charset_normalizer-3.4.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31215157227939b4fb3d740cd23fe27be0439afef67b785a1eb78a3ae69cba9e", size = 298620, upload-time = "2026-03-15T18:52:57.332Z" },
- { url = "https://files.pythonhosted.org/packages/67/2c/1e55af3a5e2f52e44396d5c5b731e0ae4f3bb92915ff09a610fb2f4497eb/charset_normalizer-3.4.6-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecbbd45615a6885fe3240eb9db73b9e62518b611850fdf8ab08bd56de7ad2b17", size = 200106, upload-time = "2026-03-15T18:52:59.2Z" },
- { url = "https://files.pythonhosted.org/packages/10/42/0f2f51a1d16caa45fbf384fd337d4242df1a5b313babee211381d2d39a96/charset_normalizer-3.4.6-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c45a03a4c69820a399f1dda9e1d8fbf3562eda46e7720458180302021b08f778", size = 220539, upload-time = "2026-03-15T18:53:01.019Z" },
- { url = "https://files.pythonhosted.org/packages/1c/0c/4e10996c740eec0f4ae8afbbbfa25f66e8479c4b6ee9cff1ca366a4f6c04/charset_normalizer-3.4.6-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e8aeb10fcbe92767f0fa69ad5a72deca50d0dca07fbde97848997d778a50c9fe", size = 215821, upload-time = "2026-03-15T18:53:02.621Z" },
- { url = "https://files.pythonhosted.org/packages/46/73/205ae7644ebb581a7c6fa9c3751e283606e145f0e6f066003c66aafc9973/charset_normalizer-3.4.6-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:54fae94be3d75f3e573c9a1b5402dc593de19377013c9a0e4285e3d402dd3a2a", size = 207917, upload-time = "2026-03-15T18:53:04.413Z" },
- { url = "https://files.pythonhosted.org/packages/b3/ca/18f7dcf19afdab8097aeb2feb8b3809bb4b6ee356cb720abf5263d79406a/charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:2f7fdd9b6e6c529d6a2501a2d36b240109e78a8ceaef5687cfcfa2bbe671d297", size = 194513, upload-time = "2026-03-15T18:53:06.025Z" },
- { url = "https://files.pythonhosted.org/packages/e4/6a/e7e3e204c8d79832a091e00b24595af1d5d9800d37dc1f67a6b264cc99a6/charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4d1d02209e06550bdaef34af58e041ad71b88e624f5d825519da3a3308e22687", size = 205612, upload-time = "2026-03-15T18:53:07.494Z" },
- { url = "https://files.pythonhosted.org/packages/ff/ae/2169ebcea2851c5460c7a21993a0f87028be3c3e60899cb36251e1135cf5/charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8bc5f0687d796c05b1e28ab0d38a50e6309906ee09375dd3aff6a9c09dd6e8f4", size = 203519, upload-time = "2026-03-15T18:53:09.048Z" },
- { url = "https://files.pythonhosted.org/packages/43/a0/6a49a925b9c225fe35dffeac5c76f68996b814c637e9d7213718f96be109/charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ee4ec14bc1680d6b0afab9aea2ef27e26d2024f18b24a2d7155a52b60da7e833", size = 195411, upload-time = "2026-03-15T18:53:10.542Z" },
- { url = "https://files.pythonhosted.org/packages/47/f7/a26b0a18e52b1a0f11f53c2c400ed062f386ac227a64ae4be4c5a64699be/charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d1a2ee9c1499fc8f86f4521f27a973c914b211ffa87322f4ee33bb35392da2c5", size = 221653, upload-time = "2026-03-15T18:53:12.394Z" },
- { url = "https://files.pythonhosted.org/packages/a7/3a/ed1d3b5bb55e3634bd5c31cedbe4fff79d0e5b8d9a062f663a757a07760d/charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:48696db7f18afb80a068821504296eb0787d9ce239b91ca15059d1d3eaacf13b", size = 205650, upload-time = "2026-03-15T18:53:13.934Z" },
- { url = "https://files.pythonhosted.org/packages/b1/27/c75819eea5ceeefc49bae329327bb91e81adc346e2a9873d9fdb9e77cde6/charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4f41da960b196ea355357285ad1316a00099f22d0929fe168343b99b254729c9", size = 216919, upload-time = "2026-03-15T18:53:15.44Z" },
- { url = "https://files.pythonhosted.org/packages/0f/42/6e91bf8b15f67b7c957091138a36057a083e60703cc27848d5e36ca1eb03/charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:802168e03fba8bbc5ce0d866d589e4b1ca751d06edee69f7f3a19c5a9fe6b597", size = 210101, upload-time = "2026-03-15T18:53:17.045Z" },
- { url = "https://files.pythonhosted.org/packages/99/ff/101af2605e66a7ee59961d7f9e1060df7c92e8ea54208a02ab881422c24e/charset_normalizer-3.4.6-cp39-cp39-win32.whl", hash = "sha256:8761ac29b6c81574724322a554605608a9960769ea83d2c73e396f3df896ad54", size = 144136, upload-time = "2026-03-15T18:53:19.152Z" },
- { url = "https://files.pythonhosted.org/packages/1d/da/de5942dfbf21f28c19e9202267dabf7bc73f195465d020a3a60054520cc5/charset_normalizer-3.4.6-cp39-cp39-win_amd64.whl", hash = "sha256:1cf0a70018692f85172348fe06d3a4b63f94ecb055e13a00c644d368eb82e5b8", size = 154210, upload-time = "2026-03-15T18:53:20.576Z" },
- { url = "https://files.pythonhosted.org/packages/06/df/1b780a25b86d22b1d736f6ac883afd38ffdf30ddc18e5dc0e82211f493f1/charset_normalizer-3.4.6-cp39-cp39-win_arm64.whl", hash = "sha256:3516bbb8d42169de9e61b8520cbeeeb716f12f4ecfe3fd30a9919aa16c806ca8", size = 143225, upload-time = "2026-03-15T18:53:22.072Z" },
- { url = "https://files.pythonhosted.org/packages/2a/68/687187c7e26cb24ccbd88e5069f5ef00eba804d36dde11d99aad0838ab45/charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69", size = 61455, upload-time = "2026-03-15T18:53:23.833Z" },
-]
-
-[[package]]
-name = "click"
-version = "8.1.8"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" },
-]
-
-[[package]]
-name = "click"
-version = "8.3.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" },
-]
-
-[[package]]
-name = "colorama"
-version = "0.4.6"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
-]
-
-[[package]]
-name = "contourpy"
-version = "1.3.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f5/f6/31a8f28b4a2a4fa0e01085e542f3081ab0588eff8e589d39d775172c9792/contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4", size = 13464370, upload-time = "2024-08-27T21:00:03.328Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/6c/e0/be8dcc796cfdd96708933e0e2da99ba4bb8f9b2caa9d560a50f3f09a65f3/contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7", size = 265366, upload-time = "2024-08-27T20:50:09.947Z" },
- { url = "https://files.pythonhosted.org/packages/50/d6/c953b400219443535d412fcbbc42e7a5e823291236bc0bb88936e3cc9317/contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42", size = 249226, upload-time = "2024-08-27T20:50:16.1Z" },
- { url = "https://files.pythonhosted.org/packages/6f/b4/6fffdf213ffccc28483c524b9dad46bb78332851133b36ad354b856ddc7c/contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7", size = 308460, upload-time = "2024-08-27T20:50:22.536Z" },
- { url = "https://files.pythonhosted.org/packages/cf/6c/118fc917b4050f0afe07179a6dcbe4f3f4ec69b94f36c9e128c4af480fb8/contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab", size = 347623, upload-time = "2024-08-27T20:50:28.806Z" },
- { url = "https://files.pythonhosted.org/packages/f9/a4/30ff110a81bfe3abf7b9673284d21ddce8cc1278f6f77393c91199da4c90/contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589", size = 317761, upload-time = "2024-08-27T20:50:35.126Z" },
- { url = "https://files.pythonhosted.org/packages/99/e6/d11966962b1aa515f5586d3907ad019f4b812c04e4546cc19ebf62b5178e/contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41", size = 322015, upload-time = "2024-08-27T20:50:40.318Z" },
- { url = "https://files.pythonhosted.org/packages/4d/e3/182383743751d22b7b59c3c753277b6aee3637049197624f333dac5b4c80/contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d", size = 1262672, upload-time = "2024-08-27T20:50:55.643Z" },
- { url = "https://files.pythonhosted.org/packages/78/53/974400c815b2e605f252c8fb9297e2204347d1755a5374354ee77b1ea259/contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223", size = 1321688, upload-time = "2024-08-27T20:51:11.293Z" },
- { url = "https://files.pythonhosted.org/packages/52/29/99f849faed5593b2926a68a31882af98afbeac39c7fdf7de491d9c85ec6a/contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f", size = 171145, upload-time = "2024-08-27T20:51:15.2Z" },
- { url = "https://files.pythonhosted.org/packages/a9/97/3f89bba79ff6ff2b07a3cbc40aa693c360d5efa90d66e914f0ff03b95ec7/contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b", size = 216019, upload-time = "2024-08-27T20:51:19.365Z" },
- { url = "https://files.pythonhosted.org/packages/b3/1f/9375917786cb39270b0ee6634536c0e22abf225825602688990d8f5c6c19/contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad", size = 266356, upload-time = "2024-08-27T20:51:24.146Z" },
- { url = "https://files.pythonhosted.org/packages/05/46/9256dd162ea52790c127cb58cfc3b9e3413a6e3478917d1f811d420772ec/contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49", size = 250915, upload-time = "2024-08-27T20:51:28.683Z" },
- { url = "https://files.pythonhosted.org/packages/e1/5d/3056c167fa4486900dfbd7e26a2fdc2338dc58eee36d490a0ed3ddda5ded/contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66", size = 310443, upload-time = "2024-08-27T20:51:33.675Z" },
- { url = "https://files.pythonhosted.org/packages/ca/c2/1a612e475492e07f11c8e267ea5ec1ce0d89971be496c195e27afa97e14a/contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081", size = 348548, upload-time = "2024-08-27T20:51:39.322Z" },
- { url = "https://files.pythonhosted.org/packages/45/cf/2c2fc6bb5874158277b4faf136847f0689e1b1a1f640a36d76d52e78907c/contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1", size = 319118, upload-time = "2024-08-27T20:51:44.717Z" },
- { url = "https://files.pythonhosted.org/packages/03/33/003065374f38894cdf1040cef474ad0546368eea7e3a51d48b8a423961f8/contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d", size = 323162, upload-time = "2024-08-27T20:51:49.683Z" },
- { url = "https://files.pythonhosted.org/packages/42/80/e637326e85e4105a802e42959f56cff2cd39a6b5ef68d5d9aee3ea5f0e4c/contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c", size = 1265396, upload-time = "2024-08-27T20:52:04.926Z" },
- { url = "https://files.pythonhosted.org/packages/7c/3b/8cbd6416ca1bbc0202b50f9c13b2e0b922b64be888f9d9ee88e6cfabfb51/contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb", size = 1324297, upload-time = "2024-08-27T20:52:21.843Z" },
- { url = "https://files.pythonhosted.org/packages/4d/2c/021a7afaa52fe891f25535506cc861c30c3c4e5a1c1ce94215e04b293e72/contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c", size = 171808, upload-time = "2024-08-27T20:52:25.163Z" },
- { url = "https://files.pythonhosted.org/packages/8d/2f/804f02ff30a7fae21f98198828d0857439ec4c91a96e20cf2d6c49372966/contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67", size = 217181, upload-time = "2024-08-27T20:52:29.13Z" },
- { url = "https://files.pythonhosted.org/packages/c9/92/8e0bbfe6b70c0e2d3d81272b58c98ac69ff1a4329f18c73bd64824d8b12e/contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f", size = 267838, upload-time = "2024-08-27T20:52:33.911Z" },
- { url = "https://files.pythonhosted.org/packages/e3/04/33351c5d5108460a8ce6d512307690b023f0cfcad5899499f5c83b9d63b1/contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6", size = 251549, upload-time = "2024-08-27T20:52:39.179Z" },
- { url = "https://files.pythonhosted.org/packages/51/3d/aa0fe6ae67e3ef9f178389e4caaaa68daf2f9024092aa3c6032e3d174670/contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639", size = 303177, upload-time = "2024-08-27T20:52:44.789Z" },
- { url = "https://files.pythonhosted.org/packages/56/c3/c85a7e3e0cab635575d3b657f9535443a6f5d20fac1a1911eaa4bbe1aceb/contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c", size = 341735, upload-time = "2024-08-27T20:52:51.05Z" },
- { url = "https://files.pythonhosted.org/packages/dd/8d/20f7a211a7be966a53f474bc90b1a8202e9844b3f1ef85f3ae45a77151ee/contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06", size = 314679, upload-time = "2024-08-27T20:52:58.473Z" },
- { url = "https://files.pythonhosted.org/packages/6e/be/524e377567defac0e21a46e2a529652d165fed130a0d8a863219303cee18/contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09", size = 320549, upload-time = "2024-08-27T20:53:06.593Z" },
- { url = "https://files.pythonhosted.org/packages/0f/96/fdb2552a172942d888915f3a6663812e9bc3d359d53dafd4289a0fb462f0/contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd", size = 1263068, upload-time = "2024-08-27T20:53:23.442Z" },
- { url = "https://files.pythonhosted.org/packages/2a/25/632eab595e3140adfa92f1322bf8915f68c932bac468e89eae9974cf1c00/contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35", size = 1322833, upload-time = "2024-08-27T20:53:39.243Z" },
- { url = "https://files.pythonhosted.org/packages/73/e3/69738782e315a1d26d29d71a550dbbe3eb6c653b028b150f70c1a5f4f229/contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb", size = 172681, upload-time = "2024-08-27T20:53:43.05Z" },
- { url = "https://files.pythonhosted.org/packages/0c/89/9830ba00d88e43d15e53d64931e66b8792b46eb25e2050a88fec4a0df3d5/contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b", size = 218283, upload-time = "2024-08-27T20:53:47.232Z" },
- { url = "https://files.pythonhosted.org/packages/53/a1/d20415febfb2267af2d7f06338e82171824d08614084714fb2c1dac9901f/contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3", size = 267879, upload-time = "2024-08-27T20:53:51.597Z" },
- { url = "https://files.pythonhosted.org/packages/aa/45/5a28a3570ff6218d8bdfc291a272a20d2648104815f01f0177d103d985e1/contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7", size = 251573, upload-time = "2024-08-27T20:53:55.659Z" },
- { url = "https://files.pythonhosted.org/packages/39/1c/d3f51540108e3affa84f095c8b04f0aa833bb797bc8baa218a952a98117d/contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84", size = 303184, upload-time = "2024-08-27T20:54:00.225Z" },
- { url = "https://files.pythonhosted.org/packages/00/56/1348a44fb6c3a558c1a3a0cd23d329d604c99d81bf5a4b58c6b71aab328f/contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0", size = 340262, upload-time = "2024-08-27T20:54:05.234Z" },
- { url = "https://files.pythonhosted.org/packages/2b/23/00d665ba67e1bb666152131da07e0f24c95c3632d7722caa97fb61470eca/contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b", size = 313806, upload-time = "2024-08-27T20:54:09.889Z" },
- { url = "https://files.pythonhosted.org/packages/5a/42/3cf40f7040bb8362aea19af9a5fb7b32ce420f645dd1590edcee2c657cd5/contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da", size = 319710, upload-time = "2024-08-27T20:54:14.536Z" },
- { url = "https://files.pythonhosted.org/packages/05/32/f3bfa3fc083b25e1a7ae09197f897476ee68e7386e10404bdf9aac7391f0/contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14", size = 1264107, upload-time = "2024-08-27T20:54:29.735Z" },
- { url = "https://files.pythonhosted.org/packages/1c/1e/1019d34473a736664f2439542b890b2dc4c6245f5c0d8cdfc0ccc2cab80c/contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8", size = 1322458, upload-time = "2024-08-27T20:54:45.507Z" },
- { url = "https://files.pythonhosted.org/packages/22/85/4f8bfd83972cf8909a4d36d16b177f7b8bdd942178ea4bf877d4a380a91c/contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294", size = 172643, upload-time = "2024-08-27T20:55:52.754Z" },
- { url = "https://files.pythonhosted.org/packages/cc/4a/fb3c83c1baba64ba90443626c228ca14f19a87c51975d3b1de308dd2cf08/contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087", size = 218301, upload-time = "2024-08-27T20:55:56.509Z" },
- { url = "https://files.pythonhosted.org/packages/76/65/702f4064f397821fea0cb493f7d3bc95a5d703e20954dce7d6d39bacf378/contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8", size = 278972, upload-time = "2024-08-27T20:54:50.347Z" },
- { url = "https://files.pythonhosted.org/packages/80/85/21f5bba56dba75c10a45ec00ad3b8190dbac7fd9a8a8c46c6116c933e9cf/contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b", size = 263375, upload-time = "2024-08-27T20:54:54.909Z" },
- { url = "https://files.pythonhosted.org/packages/0a/64/084c86ab71d43149f91ab3a4054ccf18565f0a8af36abfa92b1467813ed6/contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973", size = 307188, upload-time = "2024-08-27T20:55:00.184Z" },
- { url = "https://files.pythonhosted.org/packages/3d/ff/d61a4c288dc42da0084b8d9dc2aa219a850767165d7d9a9c364ff530b509/contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18", size = 345644, upload-time = "2024-08-27T20:55:05.673Z" },
- { url = "https://files.pythonhosted.org/packages/ca/aa/00d2313d35ec03f188e8f0786c2fc61f589306e02fdc158233697546fd58/contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8", size = 317141, upload-time = "2024-08-27T20:55:11.047Z" },
- { url = "https://files.pythonhosted.org/packages/8d/6a/b5242c8cb32d87f6abf4f5e3044ca397cb1a76712e3fa2424772e3ff495f/contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6", size = 323469, upload-time = "2024-08-27T20:55:15.914Z" },
- { url = "https://files.pythonhosted.org/packages/6f/a6/73e929d43028a9079aca4bde107494864d54f0d72d9db508a51ff0878593/contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2", size = 1260894, upload-time = "2024-08-27T20:55:31.553Z" },
- { url = "https://files.pythonhosted.org/packages/2b/1e/1e726ba66eddf21c940821df8cf1a7d15cb165f0682d62161eaa5e93dae1/contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927", size = 1314829, upload-time = "2024-08-27T20:55:47.837Z" },
- { url = "https://files.pythonhosted.org/packages/b3/e3/b9f72758adb6ef7397327ceb8b9c39c75711affb220e4f53c745ea1d5a9a/contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8", size = 265518, upload-time = "2024-08-27T20:56:01.333Z" },
- { url = "https://files.pythonhosted.org/packages/ec/22/19f5b948367ab5260fb41d842c7a78dae645603881ea6bc39738bcfcabf6/contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c", size = 249350, upload-time = "2024-08-27T20:56:05.432Z" },
- { url = "https://files.pythonhosted.org/packages/26/76/0c7d43263dd00ae21a91a24381b7e813d286a3294d95d179ef3a7b9fb1d7/contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca", size = 309167, upload-time = "2024-08-27T20:56:10.034Z" },
- { url = "https://files.pythonhosted.org/packages/96/3b/cadff6773e89f2a5a492c1a8068e21d3fccaf1a1c1df7d65e7c8e3ef60ba/contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f", size = 348279, upload-time = "2024-08-27T20:56:15.41Z" },
- { url = "https://files.pythonhosted.org/packages/e1/86/158cc43aa549d2081a955ab11c6bdccc7a22caacc2af93186d26f5f48746/contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc", size = 318519, upload-time = "2024-08-27T20:56:21.813Z" },
- { url = "https://files.pythonhosted.org/packages/05/11/57335544a3027e9b96a05948c32e566328e3a2f84b7b99a325b7a06d2b06/contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2", size = 321922, upload-time = "2024-08-27T20:56:26.983Z" },
- { url = "https://files.pythonhosted.org/packages/0b/e3/02114f96543f4a1b694333b92a6dcd4f8eebbefcc3a5f3bbb1316634178f/contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e", size = 1258017, upload-time = "2024-08-27T20:56:42.246Z" },
- { url = "https://files.pythonhosted.org/packages/f3/3b/bfe4c81c6d5881c1c643dde6620be0b42bf8aab155976dd644595cfab95c/contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800", size = 1316773, upload-time = "2024-08-27T20:56:58.58Z" },
- { url = "https://files.pythonhosted.org/packages/f1/17/c52d2970784383cafb0bd918b6fb036d98d96bbf0bc1befb5d1e31a07a70/contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5", size = 171353, upload-time = "2024-08-27T20:57:02.718Z" },
- { url = "https://files.pythonhosted.org/packages/53/23/db9f69676308e094d3c45f20cc52e12d10d64f027541c995d89c11ad5c75/contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843", size = 211817, upload-time = "2024-08-27T20:57:06.328Z" },
- { url = "https://files.pythonhosted.org/packages/d1/09/60e486dc2b64c94ed33e58dcfb6f808192c03dfc5574c016218b9b7680dc/contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c", size = 261886, upload-time = "2024-08-27T20:57:10.863Z" },
- { url = "https://files.pythonhosted.org/packages/19/20/b57f9f7174fcd439a7789fb47d764974ab646fa34d1790551de386457a8e/contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779", size = 311008, upload-time = "2024-08-27T20:57:15.588Z" },
- { url = "https://files.pythonhosted.org/packages/74/fc/5040d42623a1845d4f17a418e590fd7a79ae8cb2bad2b2f83de63c3bdca4/contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4", size = 215690, upload-time = "2024-08-27T20:57:19.321Z" },
- { url = "https://files.pythonhosted.org/packages/2b/24/dc3dcd77ac7460ab7e9d2b01a618cb31406902e50e605a8d6091f0a8f7cc/contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0", size = 261894, upload-time = "2024-08-27T20:57:23.873Z" },
- { url = "https://files.pythonhosted.org/packages/b1/db/531642a01cfec39d1682e46b5457b07cf805e3c3c584ec27e2a6223f8f6c/contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102", size = 311099, upload-time = "2024-08-27T20:57:28.58Z" },
- { url = "https://files.pythonhosted.org/packages/38/1e/94bda024d629f254143a134eead69e21c836429a2a6ce82209a00ddcb79a/contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb", size = 215838, upload-time = "2024-08-27T20:57:32.913Z" },
-]
-
-[[package]]
-name = "contourpy"
-version = "1.3.3"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/91/2e/c4390a31919d8a78b90e8ecf87cd4b4c4f05a5b48d05ec17db8e5404c6f4/contourpy-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:709a48ef9a690e1343202916450bc48b9e51c049b089c7f79a267b46cffcdaa1", size = 288773, upload-time = "2025-07-26T12:01:02.277Z" },
- { url = "https://files.pythonhosted.org/packages/0d/44/c4b0b6095fef4dc9c420e041799591e3b63e9619e3044f7f4f6c21c0ab24/contourpy-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:23416f38bfd74d5d28ab8429cc4d63fa67d5068bd711a85edb1c3fb0c3e2f381", size = 270149, upload-time = "2025-07-26T12:01:04.072Z" },
- { url = "https://files.pythonhosted.org/packages/30/2e/dd4ced42fefac8470661d7cb7e264808425e6c5d56d175291e93890cce09/contourpy-1.3.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:929ddf8c4c7f348e4c0a5a3a714b5c8542ffaa8c22954862a46ca1813b667ee7", size = 329222, upload-time = "2025-07-26T12:01:05.688Z" },
- { url = "https://files.pythonhosted.org/packages/f2/74/cc6ec2548e3d276c71389ea4802a774b7aa3558223b7bade3f25787fafc2/contourpy-1.3.3-cp311-cp311-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9e999574eddae35f1312c2b4b717b7885d4edd6cb46700e04f7f02db454e67c1", size = 377234, upload-time = "2025-07-26T12:01:07.054Z" },
- { url = "https://files.pythonhosted.org/packages/03/b3/64ef723029f917410f75c09da54254c5f9ea90ef89b143ccadb09df14c15/contourpy-1.3.3-cp311-cp311-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf67e0e3f482cb69779dd3061b534eb35ac9b17f163d851e2a547d56dba0a3a", size = 380555, upload-time = "2025-07-26T12:01:08.801Z" },
- { url = "https://files.pythonhosted.org/packages/5f/4b/6157f24ca425b89fe2eb7e7be642375711ab671135be21e6faa100f7448c/contourpy-1.3.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51e79c1f7470158e838808d4a996fa9bac72c498e93d8ebe5119bc1e6becb0db", size = 355238, upload-time = "2025-07-26T12:01:10.319Z" },
- { url = "https://files.pythonhosted.org/packages/98/56/f914f0dd678480708a04cfd2206e7c382533249bc5001eb9f58aa693e200/contourpy-1.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:598c3aaece21c503615fd59c92a3598b428b2f01bfb4b8ca9c4edeecc2438620", size = 1326218, upload-time = "2025-07-26T12:01:12.659Z" },
- { url = "https://files.pythonhosted.org/packages/fb/d7/4a972334a0c971acd5172389671113ae82aa7527073980c38d5868ff1161/contourpy-1.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:322ab1c99b008dad206d406bb61d014cf0174df491ae9d9d0fac6a6fda4f977f", size = 1392867, upload-time = "2025-07-26T12:01:15.533Z" },
- { url = "https://files.pythonhosted.org/packages/75/3e/f2cc6cd56dc8cff46b1a56232eabc6feea52720083ea71ab15523daab796/contourpy-1.3.3-cp311-cp311-win32.whl", hash = "sha256:fd907ae12cd483cd83e414b12941c632a969171bf90fc937d0c9f268a31cafff", size = 183677, upload-time = "2025-07-26T12:01:17.088Z" },
- { url = "https://files.pythonhosted.org/packages/98/4b/9bd370b004b5c9d8045c6c33cf65bae018b27aca550a3f657cdc99acdbd8/contourpy-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:3519428f6be58431c56581f1694ba8e50626f2dd550af225f82fb5f5814d2a42", size = 225234, upload-time = "2025-07-26T12:01:18.256Z" },
- { url = "https://files.pythonhosted.org/packages/d9/b6/71771e02c2e004450c12b1120a5f488cad2e4d5b590b1af8bad060360fe4/contourpy-1.3.3-cp311-cp311-win_arm64.whl", hash = "sha256:15ff10bfada4bf92ec8b31c62bf7c1834c244019b4a33095a68000d7075df470", size = 193123, upload-time = "2025-07-26T12:01:19.848Z" },
- { url = "https://files.pythonhosted.org/packages/be/45/adfee365d9ea3d853550b2e735f9d66366701c65db7855cd07621732ccfc/contourpy-1.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b08a32ea2f8e42cf1d4be3169a98dd4be32bafe4f22b6c4cb4ba810fa9e5d2cb", size = 293419, upload-time = "2025-07-26T12:01:21.16Z" },
- { url = "https://files.pythonhosted.org/packages/53/3e/405b59cfa13021a56bba395a6b3aca8cec012b45bf177b0eaf7a202cde2c/contourpy-1.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:556dba8fb6f5d8742f2923fe9457dbdd51e1049c4a43fd3986a0b14a1d815fc6", size = 273979, upload-time = "2025-07-26T12:01:22.448Z" },
- { url = "https://files.pythonhosted.org/packages/d4/1c/a12359b9b2ca3a845e8f7f9ac08bdf776114eb931392fcad91743e2ea17b/contourpy-1.3.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92d9abc807cf7d0e047b95ca5d957cf4792fcd04e920ca70d48add15c1a90ea7", size = 332653, upload-time = "2025-07-26T12:01:24.155Z" },
- { url = "https://files.pythonhosted.org/packages/63/12/897aeebfb475b7748ea67b61e045accdfcf0d971f8a588b67108ed7f5512/contourpy-1.3.3-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2e8faa0ed68cb29af51edd8e24798bb661eac3bd9f65420c1887b6ca89987c8", size = 379536, upload-time = "2025-07-26T12:01:25.91Z" },
- { url = "https://files.pythonhosted.org/packages/43/8a/a8c584b82deb248930ce069e71576fc09bd7174bbd35183b7943fb1064fd/contourpy-1.3.3-cp312-cp312-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:626d60935cf668e70a5ce6ff184fd713e9683fb458898e4249b63be9e28286ea", size = 384397, upload-time = "2025-07-26T12:01:27.152Z" },
- { url = "https://files.pythonhosted.org/packages/cc/8f/ec6289987824b29529d0dfda0d74a07cec60e54b9c92f3c9da4c0ac732de/contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1", size = 362601, upload-time = "2025-07-26T12:01:28.808Z" },
- { url = "https://files.pythonhosted.org/packages/05/0a/a3fe3be3ee2dceb3e615ebb4df97ae6f3828aa915d3e10549ce016302bd1/contourpy-1.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:451e71b5a7d597379ef572de31eeb909a87246974d960049a9848c3bc6c41bf7", size = 1331288, upload-time = "2025-07-26T12:01:31.198Z" },
- { url = "https://files.pythonhosted.org/packages/33/1d/acad9bd4e97f13f3e2b18a3977fe1b4a37ecf3d38d815333980c6c72e963/contourpy-1.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:459c1f020cd59fcfe6650180678a9993932d80d44ccde1fa1868977438f0b411", size = 1403386, upload-time = "2025-07-26T12:01:33.947Z" },
- { url = "https://files.pythonhosted.org/packages/cf/8f/5847f44a7fddf859704217a99a23a4f6417b10e5ab1256a179264561540e/contourpy-1.3.3-cp312-cp312-win32.whl", hash = "sha256:023b44101dfe49d7d53932be418477dba359649246075c996866106da069af69", size = 185018, upload-time = "2025-07-26T12:01:35.64Z" },
- { url = "https://files.pythonhosted.org/packages/19/e8/6026ed58a64563186a9ee3f29f41261fd1828f527dd93d33b60feca63352/contourpy-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:8153b8bfc11e1e4d75bcb0bff1db232f9e10b274e0929de9d608027e0d34ff8b", size = 226567, upload-time = "2025-07-26T12:01:36.804Z" },
- { url = "https://files.pythonhosted.org/packages/d1/e2/f05240d2c39a1ed228d8328a78b6f44cd695f7ef47beb3e684cf93604f86/contourpy-1.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:07ce5ed73ecdc4a03ffe3e1b3e3c1166db35ae7584be76f65dbbe28a7791b0cc", size = 193655, upload-time = "2025-07-26T12:01:37.999Z" },
- { url = "https://files.pythonhosted.org/packages/68/35/0167aad910bbdb9599272bd96d01a9ec6852f36b9455cf2ca67bd4cc2d23/contourpy-1.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:177fb367556747a686509d6fef71d221a4b198a3905fe824430e5ea0fda54eb5", size = 293257, upload-time = "2025-07-26T12:01:39.367Z" },
- { url = "https://files.pythonhosted.org/packages/96/e4/7adcd9c8362745b2210728f209bfbcf7d91ba868a2c5f40d8b58f54c509b/contourpy-1.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d002b6f00d73d69333dac9d0b8d5e84d9724ff9ef044fd63c5986e62b7c9e1b1", size = 274034, upload-time = "2025-07-26T12:01:40.645Z" },
- { url = "https://files.pythonhosted.org/packages/73/23/90e31ceeed1de63058a02cb04b12f2de4b40e3bef5e082a7c18d9c8ae281/contourpy-1.3.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:348ac1f5d4f1d66d3322420f01d42e43122f43616e0f194fc1c9f5d830c5b286", size = 334672, upload-time = "2025-07-26T12:01:41.942Z" },
- { url = "https://files.pythonhosted.org/packages/ed/93/b43d8acbe67392e659e1d984700e79eb67e2acb2bd7f62012b583a7f1b55/contourpy-1.3.3-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:655456777ff65c2c548b7c454af9c6f33f16c8884f11083244b5819cc214f1b5", size = 381234, upload-time = "2025-07-26T12:01:43.499Z" },
- { url = "https://files.pythonhosted.org/packages/46/3b/bec82a3ea06f66711520f75a40c8fc0b113b2a75edb36aa633eb11c4f50f/contourpy-1.3.3-cp313-cp313-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:644a6853d15b2512d67881586bd03f462c7ab755db95f16f14d7e238f2852c67", size = 385169, upload-time = "2025-07-26T12:01:45.219Z" },
- { url = "https://files.pythonhosted.org/packages/4b/32/e0f13a1c5b0f8572d0ec6ae2f6c677b7991fafd95da523159c19eff0696a/contourpy-1.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4debd64f124ca62069f313a9cb86656ff087786016d76927ae2cf37846b006c9", size = 362859, upload-time = "2025-07-26T12:01:46.519Z" },
- { url = "https://files.pythonhosted.org/packages/33/71/e2a7945b7de4e58af42d708a219f3b2f4cff7386e6b6ab0a0fa0033c49a9/contourpy-1.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a15459b0f4615b00bbd1e91f1b9e19b7e63aea7483d03d804186f278c0af2659", size = 1332062, upload-time = "2025-07-26T12:01:48.964Z" },
- { url = "https://files.pythonhosted.org/packages/12/fc/4e87ac754220ccc0e807284f88e943d6d43b43843614f0a8afa469801db0/contourpy-1.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca0fdcd73925568ca027e0b17ab07aad764be4706d0a925b89227e447d9737b7", size = 1403932, upload-time = "2025-07-26T12:01:51.979Z" },
- { url = "https://files.pythonhosted.org/packages/a6/2e/adc197a37443f934594112222ac1aa7dc9a98faf9c3842884df9a9d8751d/contourpy-1.3.3-cp313-cp313-win32.whl", hash = "sha256:b20c7c9a3bf701366556e1b1984ed2d0cedf999903c51311417cf5f591d8c78d", size = 185024, upload-time = "2025-07-26T12:01:53.245Z" },
- { url = "https://files.pythonhosted.org/packages/18/0b/0098c214843213759692cc638fce7de5c289200a830e5035d1791d7a2338/contourpy-1.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:1cadd8b8969f060ba45ed7c1b714fe69185812ab43bd6b86a9123fe8f99c3263", size = 226578, upload-time = "2025-07-26T12:01:54.422Z" },
- { url = "https://files.pythonhosted.org/packages/8a/9a/2f6024a0c5995243cd63afdeb3651c984f0d2bc727fd98066d40e141ad73/contourpy-1.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:fd914713266421b7536de2bfa8181aa8c699432b6763a0ea64195ebe28bff6a9", size = 193524, upload-time = "2025-07-26T12:01:55.73Z" },
- { url = "https://files.pythonhosted.org/packages/c0/b3/f8a1a86bd3298513f500e5b1f5fd92b69896449f6cab6a146a5d52715479/contourpy-1.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:88df9880d507169449d434c293467418b9f6cbe82edd19284aa0409e7fdb933d", size = 306730, upload-time = "2025-07-26T12:01:57.051Z" },
- { url = "https://files.pythonhosted.org/packages/3f/11/4780db94ae62fc0c2053909b65dc3246bd7cecfc4f8a20d957ad43aa4ad8/contourpy-1.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d06bb1f751ba5d417047db62bca3c8fde202b8c11fb50742ab3ab962c81e8216", size = 287897, upload-time = "2025-07-26T12:01:58.663Z" },
- { url = "https://files.pythonhosted.org/packages/ae/15/e59f5f3ffdd6f3d4daa3e47114c53daabcb18574a26c21f03dc9e4e42ff0/contourpy-1.3.3-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e4e6b05a45525357e382909a4c1600444e2a45b4795163d3b22669285591c1ae", size = 326751, upload-time = "2025-07-26T12:02:00.343Z" },
- { url = "https://files.pythonhosted.org/packages/0f/81/03b45cfad088e4770b1dcf72ea78d3802d04200009fb364d18a493857210/contourpy-1.3.3-cp313-cp313t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ab3074b48c4e2cf1a960e6bbeb7f04566bf36b1861d5c9d4d8ac04b82e38ba20", size = 375486, upload-time = "2025-07-26T12:02:02.128Z" },
- { url = "https://files.pythonhosted.org/packages/0c/ba/49923366492ffbdd4486e970d421b289a670ae8cf539c1ea9a09822b371a/contourpy-1.3.3-cp313-cp313t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c3d53c796f8647d6deb1abe867daeb66dcc8a97e8455efa729516b997b8ed99", size = 388106, upload-time = "2025-07-26T12:02:03.615Z" },
- { url = "https://files.pythonhosted.org/packages/9f/52/5b00ea89525f8f143651f9f03a0df371d3cbd2fccd21ca9b768c7a6500c2/contourpy-1.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50ed930df7289ff2a8d7afeb9603f8289e5704755c7e5c3bbd929c90c817164b", size = 352548, upload-time = "2025-07-26T12:02:05.165Z" },
- { url = "https://files.pythonhosted.org/packages/32/1d/a209ec1a3a3452d490f6b14dd92e72280c99ae3d1e73da74f8277d4ee08f/contourpy-1.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4feffb6537d64b84877da813a5c30f1422ea5739566abf0bd18065ac040e120a", size = 1322297, upload-time = "2025-07-26T12:02:07.379Z" },
- { url = "https://files.pythonhosted.org/packages/bc/9e/46f0e8ebdd884ca0e8877e46a3f4e633f6c9c8c4f3f6e72be3fe075994aa/contourpy-1.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2b7e9480ffe2b0cd2e787e4df64270e3a0440d9db8dc823312e2c940c167df7e", size = 1391023, upload-time = "2025-07-26T12:02:10.171Z" },
- { url = "https://files.pythonhosted.org/packages/b9/70/f308384a3ae9cd2209e0849f33c913f658d3326900d0ff5d378d6a1422d2/contourpy-1.3.3-cp313-cp313t-win32.whl", hash = "sha256:283edd842a01e3dcd435b1c5116798d661378d83d36d337b8dde1d16a5fc9ba3", size = 196157, upload-time = "2025-07-26T12:02:11.488Z" },
- { url = "https://files.pythonhosted.org/packages/b2/dd/880f890a6663b84d9e34a6f88cded89d78f0091e0045a284427cb6b18521/contourpy-1.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:87acf5963fc2b34825e5b6b048f40e3635dd547f590b04d2ab317c2619ef7ae8", size = 240570, upload-time = "2025-07-26T12:02:12.754Z" },
- { url = "https://files.pythonhosted.org/packages/80/99/2adc7d8ffead633234817ef8e9a87115c8a11927a94478f6bb3d3f4d4f7d/contourpy-1.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:3c30273eb2a55024ff31ba7d052dde990d7d8e5450f4bbb6e913558b3d6c2301", size = 199713, upload-time = "2025-07-26T12:02:14.4Z" },
- { url = "https://files.pythonhosted.org/packages/72/8b/4546f3ab60f78c514ffb7d01a0bd743f90de36f0019d1be84d0a708a580a/contourpy-1.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fde6c716d51c04b1c25d0b90364d0be954624a0ee9d60e23e850e8d48353d07a", size = 292189, upload-time = "2025-07-26T12:02:16.095Z" },
- { url = "https://files.pythonhosted.org/packages/fd/e1/3542a9cb596cadd76fcef413f19c79216e002623158befe6daa03dbfa88c/contourpy-1.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cbedb772ed74ff5be440fa8eee9bd49f64f6e3fc09436d9c7d8f1c287b121d77", size = 273251, upload-time = "2025-07-26T12:02:17.524Z" },
- { url = "https://files.pythonhosted.org/packages/b1/71/f93e1e9471d189f79d0ce2497007731c1e6bf9ef6d1d61b911430c3db4e5/contourpy-1.3.3-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:22e9b1bd7a9b1d652cd77388465dc358dafcd2e217d35552424aa4f996f524f5", size = 335810, upload-time = "2025-07-26T12:02:18.9Z" },
- { url = "https://files.pythonhosted.org/packages/91/f9/e35f4c1c93f9275d4e38681a80506b5510e9327350c51f8d4a5a724d178c/contourpy-1.3.3-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a22738912262aa3e254e4f3cb079a95a67132fc5a063890e224393596902f5a4", size = 382871, upload-time = "2025-07-26T12:02:20.418Z" },
- { url = "https://files.pythonhosted.org/packages/b5/71/47b512f936f66a0a900d81c396a7e60d73419868fba959c61efed7a8ab46/contourpy-1.3.3-cp314-cp314-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:afe5a512f31ee6bd7d0dda52ec9864c984ca3d66664444f2d72e0dc4eb832e36", size = 386264, upload-time = "2025-07-26T12:02:21.916Z" },
- { url = "https://files.pythonhosted.org/packages/04/5f/9ff93450ba96b09c7c2b3f81c94de31c89f92292f1380261bd7195bea4ea/contourpy-1.3.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f64836de09927cba6f79dcd00fdd7d5329f3fccc633468507079c829ca4db4e3", size = 363819, upload-time = "2025-07-26T12:02:23.759Z" },
- { url = "https://files.pythonhosted.org/packages/3e/a6/0b185d4cc480ee494945cde102cb0149ae830b5fa17bf855b95f2e70ad13/contourpy-1.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1fd43c3be4c8e5fd6e4f2baeae35ae18176cf2e5cced681cca908addf1cdd53b", size = 1333650, upload-time = "2025-07-26T12:02:26.181Z" },
- { url = "https://files.pythonhosted.org/packages/43/d7/afdc95580ca56f30fbcd3060250f66cedbde69b4547028863abd8aa3b47e/contourpy-1.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6afc576f7b33cf00996e5c1102dc2a8f7cc89e39c0b55df93a0b78c1bd992b36", size = 1404833, upload-time = "2025-07-26T12:02:28.782Z" },
- { url = "https://files.pythonhosted.org/packages/e2/e2/366af18a6d386f41132a48f033cbd2102e9b0cf6345d35ff0826cd984566/contourpy-1.3.3-cp314-cp314-win32.whl", hash = "sha256:66c8a43a4f7b8df8b71ee1840e4211a3c8d93b214b213f590e18a1beca458f7d", size = 189692, upload-time = "2025-07-26T12:02:30.128Z" },
- { url = "https://files.pythonhosted.org/packages/7d/c2/57f54b03d0f22d4044b8afb9ca0e184f8b1afd57b4f735c2fa70883dc601/contourpy-1.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:cf9022ef053f2694e31d630feaacb21ea24224be1c3ad0520b13d844274614fd", size = 232424, upload-time = "2025-07-26T12:02:31.395Z" },
- { url = "https://files.pythonhosted.org/packages/18/79/a9416650df9b525737ab521aa181ccc42d56016d2123ddcb7b58e926a42c/contourpy-1.3.3-cp314-cp314-win_arm64.whl", hash = "sha256:95b181891b4c71de4bb404c6621e7e2390745f887f2a026b2d99e92c17892339", size = 198300, upload-time = "2025-07-26T12:02:32.956Z" },
- { url = "https://files.pythonhosted.org/packages/1f/42/38c159a7d0f2b7b9c04c64ab317042bb6952b713ba875c1681529a2932fe/contourpy-1.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:33c82d0138c0a062380332c861387650c82e4cf1747aaa6938b9b6516762e772", size = 306769, upload-time = "2025-07-26T12:02:34.2Z" },
- { url = "https://files.pythonhosted.org/packages/c3/6c/26a8205f24bca10974e77460de68d3d7c63e282e23782f1239f226fcae6f/contourpy-1.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ea37e7b45949df430fe649e5de8351c423430046a2af20b1c1961cae3afcda77", size = 287892, upload-time = "2025-07-26T12:02:35.807Z" },
- { url = "https://files.pythonhosted.org/packages/66/06/8a475c8ab718ebfd7925661747dbb3c3ee9c82ac834ccb3570be49d129f4/contourpy-1.3.3-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d304906ecc71672e9c89e87c4675dc5c2645e1f4269a5063b99b0bb29f232d13", size = 326748, upload-time = "2025-07-26T12:02:37.193Z" },
- { url = "https://files.pythonhosted.org/packages/b4/a3/c5ca9f010a44c223f098fccd8b158bb1cb287378a31ac141f04730dc49be/contourpy-1.3.3-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca658cd1a680a5c9ea96dc61cdbae1e85c8f25849843aa799dfd3cb370ad4fbe", size = 375554, upload-time = "2025-07-26T12:02:38.894Z" },
- { url = "https://files.pythonhosted.org/packages/80/5b/68bd33ae63fac658a4145088c1e894405e07584a316738710b636c6d0333/contourpy-1.3.3-cp314-cp314t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ab2fd90904c503739a75b7c8c5c01160130ba67944a7b77bbf36ef8054576e7f", size = 388118, upload-time = "2025-07-26T12:02:40.642Z" },
- { url = "https://files.pythonhosted.org/packages/40/52/4c285a6435940ae25d7410a6c36bda5145839bc3f0beb20c707cda18b9d2/contourpy-1.3.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7301b89040075c30e5768810bc96a8e8d78085b47d8be6e4c3f5a0b4ed478a0", size = 352555, upload-time = "2025-07-26T12:02:42.25Z" },
- { url = "https://files.pythonhosted.org/packages/24/ee/3e81e1dd174f5c7fefe50e85d0892de05ca4e26ef1c9a59c2a57e43b865a/contourpy-1.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2a2a8b627d5cc6b7c41a4beff6c5ad5eb848c88255fda4a8745f7e901b32d8e4", size = 1322295, upload-time = "2025-07-26T12:02:44.668Z" },
- { url = "https://files.pythonhosted.org/packages/3c/b2/6d913d4d04e14379de429057cd169e5e00f6c2af3bb13e1710bcbdb5da12/contourpy-1.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fd6ec6be509c787f1caf6b247f0b1ca598bef13f4ddeaa126b7658215529ba0f", size = 1391027, upload-time = "2025-07-26T12:02:47.09Z" },
- { url = "https://files.pythonhosted.org/packages/93/8a/68a4ec5c55a2971213d29a9374913f7e9f18581945a7a31d1a39b5d2dfe5/contourpy-1.3.3-cp314-cp314t-win32.whl", hash = "sha256:e74a9a0f5e3fff48fb5a7f2fd2b9b70a3fe014a67522f79b7cca4c0c7e43c9ae", size = 202428, upload-time = "2025-07-26T12:02:48.691Z" },
- { url = "https://files.pythonhosted.org/packages/fa/96/fd9f641ffedc4fa3ace923af73b9d07e869496c9cc7a459103e6e978992f/contourpy-1.3.3-cp314-cp314t-win_amd64.whl", hash = "sha256:13b68d6a62db8eafaebb8039218921399baf6e47bf85006fd8529f2a08ef33fc", size = 250331, upload-time = "2025-07-26T12:02:50.137Z" },
- { url = "https://files.pythonhosted.org/packages/ae/8c/469afb6465b853afff216f9528ffda78a915ff880ed58813ba4faf4ba0b6/contourpy-1.3.3-cp314-cp314t-win_arm64.whl", hash = "sha256:b7448cb5a725bb1e35ce88771b86fba35ef418952474492cf7c764059933ff8b", size = 203831, upload-time = "2025-07-26T12:02:51.449Z" },
- { url = "https://files.pythonhosted.org/packages/a5/29/8dcfe16f0107943fa92388c23f6e05cff0ba58058c4c95b00280d4c75a14/contourpy-1.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cd5dfcaeb10f7b7f9dc8941717c6c2ade08f587be2226222c12b25f0483ed497", size = 278809, upload-time = "2025-07-26T12:02:52.74Z" },
- { url = "https://files.pythonhosted.org/packages/85/a9/8b37ef4f7dafeb335daee3c8254645ef5725be4d9c6aa70b50ec46ef2f7e/contourpy-1.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:0c1fc238306b35f246d61a1d416a627348b5cf0648648a031e14bb8705fcdfe8", size = 261593, upload-time = "2025-07-26T12:02:54.037Z" },
- { url = "https://files.pythonhosted.org/packages/0a/59/ebfb8c677c75605cc27f7122c90313fd2f375ff3c8d19a1694bda74aaa63/contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70f9aad7de812d6541d29d2bbf8feb22ff7e1c299523db288004e3157ff4674e", size = 302202, upload-time = "2025-07-26T12:02:55.947Z" },
- { url = "https://files.pythonhosted.org/packages/3c/37/21972a15834d90bfbfb009b9d004779bd5a07a0ec0234e5ba8f64d5736f4/contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ed3657edf08512fc3fe81b510e35c2012fbd3081d2e26160f27ca28affec989", size = 329207, upload-time = "2025-07-26T12:02:57.468Z" },
- { url = "https://files.pythonhosted.org/packages/0c/58/bd257695f39d05594ca4ad60df5bcb7e32247f9951fd09a9b8edb82d1daa/contourpy-1.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:3d1a3799d62d45c18bafd41c5fa05120b96a28079f2393af559b843d1a966a77", size = 225315, upload-time = "2025-07-26T12:02:58.801Z" },
-]
-
-[[package]]
-name = "cuda-bindings"
-version = "12.9.4"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "cuda-pathfinder", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/37/31/bfcc870f69c6a017c4ad5c42316207fc7551940db6f3639aa4466ec5faf3/cuda_bindings-12.9.4-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a022c96b8bd847e8dc0675523431149a4c3e872f440e3002213dbb9e08f0331a", size = 11800959, upload-time = "2025-10-21T14:51:26.458Z" },
- { url = "https://files.pythonhosted.org/packages/7a/d8/b546104b8da3f562c1ff8ab36d130c8fe1dd6a045ced80b4f6ad74f7d4e1/cuda_bindings-12.9.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d3c842c2a4303b2a580fe955018e31aea30278be19795ae05226235268032e5", size = 12148218, upload-time = "2025-10-21T14:51:28.855Z" },
- { url = "https://files.pythonhosted.org/packages/a9/2b/ebcbb60aa6dba830474cd360c42e10282f7a343c0a1f58d24fbd3b7c2d77/cuda_bindings-12.9.4-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a6a429dc6c13148ff1e27c44f40a3dd23203823e637b87fd0854205195988306", size = 11840604, upload-time = "2025-10-21T14:51:34.565Z" },
- { url = "https://files.pythonhosted.org/packages/45/e7/b47792cc2d01c7e1d37c32402182524774dadd2d26339bd224e0e913832e/cuda_bindings-12.9.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c912a3d9e6b6651853eed8eed96d6800d69c08e94052c292fec3f282c5a817c9", size = 12210593, upload-time = "2025-10-21T14:51:36.574Z" },
- { url = "https://files.pythonhosted.org/packages/0c/c2/65bfd79292b8ff18be4dd7f7442cea37bcbc1a228c1886f1dea515c45b67/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:694ba35023846625ef471257e6b5a4bc8af690f961d197d77d34b1d1db393f56", size = 11760260, upload-time = "2025-10-21T14:51:40.79Z" },
- { url = "https://files.pythonhosted.org/packages/a9/c1/dabe88f52c3e3760d861401bb994df08f672ec893b8f7592dc91626adcf3/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fda147a344e8eaeca0c6ff113d2851ffca8f7dfc0a6c932374ee5c47caa649c8", size = 12151019, upload-time = "2025-10-21T14:51:43.167Z" },
- { url = "https://files.pythonhosted.org/packages/05/8b/b4b2d1c7775fa403b64333e720cfcfccef8dcb9cdeb99947061ca5a77628/cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cf8bfaedc238f3b115d957d1fd6562b7e8435ba57f6d0e2f87d0e7149ccb2da5", size = 11570071, upload-time = "2025-10-21T14:51:47.472Z" },
- { url = "https://files.pythonhosted.org/packages/63/56/e465c31dc9111be3441a9ba7df1941fe98f4aa6e71e8788a3fb4534ce24d/cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:32bdc5a76906be4c61eb98f546a6786c5773a881f3b166486449b5d141e4a39f", size = 11906628, upload-time = "2025-10-21T14:51:49.905Z" },
- { url = "https://files.pythonhosted.org/packages/ec/07/6aff13bc1e977e35aaa6b22f52b172e2890c608c6db22438cf7ed2bf43a6/cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3adf4958dcf68ae7801a59b73fb00a8b37f8d0595060d66ceae111b1002de38d", size = 11566797, upload-time = "2025-10-21T14:51:54.581Z" },
- { url = "https://files.pythonhosted.org/packages/a3/84/1e6be415e37478070aeeee5884c2022713c1ecc735e6d82d744de0252eee/cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:56e0043c457a99ac473ddc926fe0dc4046694d99caef633e92601ab52cbe17eb", size = 11925991, upload-time = "2025-10-21T14:51:56.535Z" },
- { url = "https://files.pythonhosted.org/packages/1e/b5/96a6696e20c4ffd2b327f54c7d0fde2259bdb998d045c25d5dedbbe30290/cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f53a7f453d4b2643d8663d036bafe29b5ba89eb904c133180f295df6dc151e5", size = 11624530, upload-time = "2025-10-21T14:52:01.539Z" },
- { url = "https://files.pythonhosted.org/packages/d1/af/6dfd8f2ed90b1d4719bc053ff8940e494640fe4212dc3dd72f383e4992da/cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8b72ee72a9cc1b531db31eebaaee5c69a8ec3500e32c6933f2d3b15297b53686", size = 11922703, upload-time = "2025-10-21T14:52:03.585Z" },
- { url = "https://files.pythonhosted.org/packages/39/73/d2fc40c043bac699c3880bf88d3cebe9d88410cd043795382826c93a89f0/cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:20f2699d61d724de3eb3f3369d57e2b245f93085cab44fd37c3bea036cea1a6f", size = 11565056, upload-time = "2025-10-21T14:52:08.338Z" },
- { url = "https://files.pythonhosted.org/packages/6c/19/90ac264acc00f6df8a49378eedec9fd2db3061bf9263bf9f39fd3d8377c3/cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d80bffc357df9988dca279734bc9674c3934a654cab10cadeed27ce17d8635ee", size = 11924658, upload-time = "2025-10-21T14:52:10.411Z" },
- { url = "https://files.pythonhosted.org/packages/c3/78/892671d08634f125c61c6ebc33fffced4aa5c330320f61da9605f8e9cbba/cuda_bindings-12.9.4-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:893ca68114b5b769c1d4c02583b91ed22691887c3ed513b59467d23540104db4", size = 11802431, upload-time = "2025-10-21T14:52:14.358Z" },
- { url = "https://files.pythonhosted.org/packages/53/1d/f7f2bcffe788aebd4325a34d8a976b219a0751c06707aa89c9e70355ceae/cuda_bindings-12.9.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9866ceec83e39337d1a1d64837864c964ad902992478caa288a0bc1be95f21aa", size = 12152579, upload-time = "2025-10-21T14:52:16.731Z" },
-]
-
-[[package]]
-name = "cuda-pathfinder"
-version = "1.5.0"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/93/66/0c02bd330e7d976f83fa68583d6198d76f23581bcbb5c0e98a6148f326e5/cuda_pathfinder-1.5.0-py3-none-any.whl", hash = "sha256:498f90a9e9de36044a7924742aecce11c50c49f735f1bc53e05aa46de9ea4110", size = 49739, upload-time = "2026-03-24T21:14:30.869Z" },
-]
-
-[[package]]
-name = "cycler"
-version = "0.12.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615, upload-time = "2023-10-07T05:32:18.335Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321, upload-time = "2023-10-07T05:32:16.783Z" },
-]
-
-[[package]]
-name = "debugpy"
-version = "1.8.20"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e0/b7/cd8080344452e4874aae67c40d8940e2b4d47b01601a8fd9f44786c757c7/debugpy-1.8.20.tar.gz", hash = "sha256:55bc8701714969f1ab89a6d5f2f3d40c36f91b2cbe2f65d98bf8196f6a6a2c33", size = 1645207, upload-time = "2026-01-29T23:03:28.199Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/71/be/8bd693a0b9d53d48c8978fa5d889e06f3b5b03e45fd1ea1e78267b4887cb/debugpy-1.8.20-cp310-cp310-macosx_15_0_x86_64.whl", hash = "sha256:157e96ffb7f80b3ad36d808646198c90acb46fdcfd8bb1999838f0b6f2b59c64", size = 2099192, upload-time = "2026-01-29T23:03:29.707Z" },
- { url = "https://files.pythonhosted.org/packages/77/1b/85326d07432086a06361d493d2743edd0c4fc2ef62162be7f8618441ac37/debugpy-1.8.20-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:c1178ae571aff42e61801a38b007af504ec8e05fde1c5c12e5a7efef21009642", size = 3088568, upload-time = "2026-01-29T23:03:31.467Z" },
- { url = "https://files.pythonhosted.org/packages/e8/60/3e08462ee3eccd10998853eb35947c416e446bfe2bc37dbb886b9044586c/debugpy-1.8.20-cp310-cp310-win32.whl", hash = "sha256:c29dd9d656c0fbd77906a6e6a82ae4881514aa3294b94c903ff99303e789b4a2", size = 5284399, upload-time = "2026-01-29T23:03:33.678Z" },
- { url = "https://files.pythonhosted.org/packages/72/43/09d49106e770fe558ced5e80df2e3c2ebee10e576eda155dcc5670473663/debugpy-1.8.20-cp310-cp310-win_amd64.whl", hash = "sha256:3ca85463f63b5dd0aa7aaa933d97cbc47c174896dcae8431695872969f981893", size = 5316388, upload-time = "2026-01-29T23:03:35.095Z" },
- { url = "https://files.pythonhosted.org/packages/51/56/c3baf5cbe4dd77427fd9aef99fcdade259ad128feeb8a786c246adb838e5/debugpy-1.8.20-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:eada6042ad88fa1571b74bd5402ee8b86eded7a8f7b827849761700aff171f1b", size = 2208318, upload-time = "2026-01-29T23:03:36.481Z" },
- { url = "https://files.pythonhosted.org/packages/9a/7d/4fa79a57a8e69fe0d9763e98d1110320f9ecd7f1f362572e3aafd7417c9d/debugpy-1.8.20-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:7de0b7dfeedc504421032afba845ae2a7bcc32ddfb07dae2c3ca5442f821c344", size = 3171493, upload-time = "2026-01-29T23:03:37.775Z" },
- { url = "https://files.pythonhosted.org/packages/7d/f2/1e8f8affe51e12a26f3a8a8a4277d6e60aa89d0a66512f63b1e799d424a4/debugpy-1.8.20-cp311-cp311-win32.whl", hash = "sha256:773e839380cf459caf73cc533ea45ec2737a5cc184cf1b3b796cd4fd98504fec", size = 5209240, upload-time = "2026-01-29T23:03:39.109Z" },
- { url = "https://files.pythonhosted.org/packages/d5/92/1cb532e88560cbee973396254b21bece8c5d7c2ece958a67afa08c9f10dc/debugpy-1.8.20-cp311-cp311-win_amd64.whl", hash = "sha256:1f7650546e0eded1902d0f6af28f787fa1f1dbdbc97ddabaf1cd963a405930cb", size = 5233481, upload-time = "2026-01-29T23:03:40.659Z" },
- { url = "https://files.pythonhosted.org/packages/14/57/7f34f4736bfb6e00f2e4c96351b07805d83c9a7b33d28580ae01374430f7/debugpy-1.8.20-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:4ae3135e2089905a916909ef31922b2d733d756f66d87345b3e5e52b7a55f13d", size = 2550686, upload-time = "2026-01-29T23:03:42.023Z" },
- { url = "https://files.pythonhosted.org/packages/ab/78/b193a3975ca34458f6f0e24aaf5c3e3da72f5401f6054c0dfd004b41726f/debugpy-1.8.20-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:88f47850a4284b88bd2bfee1f26132147d5d504e4e86c22485dfa44b97e19b4b", size = 4310588, upload-time = "2026-01-29T23:03:43.314Z" },
- { url = "https://files.pythonhosted.org/packages/c1/55/f14deb95eaf4f30f07ef4b90a8590fc05d9e04df85ee379712f6fb6736d7/debugpy-1.8.20-cp312-cp312-win32.whl", hash = "sha256:4057ac68f892064e5f98209ab582abfee3b543fb55d2e87610ddc133a954d390", size = 5331372, upload-time = "2026-01-29T23:03:45.526Z" },
- { url = "https://files.pythonhosted.org/packages/a1/39/2bef246368bd42f9bd7cba99844542b74b84dacbdbea0833e610f384fee8/debugpy-1.8.20-cp312-cp312-win_amd64.whl", hash = "sha256:a1a8f851e7cf171330679ef6997e9c579ef6dd33c9098458bd9986a0f4ca52e3", size = 5372835, upload-time = "2026-01-29T23:03:47.245Z" },
- { url = "https://files.pythonhosted.org/packages/15/e2/fc500524cc6f104a9d049abc85a0a8b3f0d14c0a39b9c140511c61e5b40b/debugpy-1.8.20-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:5dff4bb27027821fdfcc9e8f87309a28988231165147c31730128b1c983e282a", size = 2539560, upload-time = "2026-01-29T23:03:48.738Z" },
- { url = "https://files.pythonhosted.org/packages/90/83/fb33dcea789ed6018f8da20c5a9bc9d82adc65c0c990faed43f7c955da46/debugpy-1.8.20-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:84562982dd7cf5ebebfdea667ca20a064e096099997b175fe204e86817f64eaf", size = 4293272, upload-time = "2026-01-29T23:03:50.169Z" },
- { url = "https://files.pythonhosted.org/packages/a6/25/b1e4a01bfb824d79a6af24b99ef291e24189080c93576dfd9b1a2815cd0f/debugpy-1.8.20-cp313-cp313-win32.whl", hash = "sha256:da11dea6447b2cadbf8ce2bec59ecea87cc18d2c574980f643f2d2dfe4862393", size = 5331208, upload-time = "2026-01-29T23:03:51.547Z" },
- { url = "https://files.pythonhosted.org/packages/13/f7/a0b368ce54ffff9e9028c098bd2d28cfc5b54f9f6c186929083d4c60ba58/debugpy-1.8.20-cp313-cp313-win_amd64.whl", hash = "sha256:eb506e45943cab2efb7c6eafdd65b842f3ae779f020c82221f55aca9de135ed7", size = 5372930, upload-time = "2026-01-29T23:03:53.585Z" },
- { url = "https://files.pythonhosted.org/packages/33/2e/f6cb9a8a13f5058f0a20fe09711a7b726232cd5a78c6a7c05b2ec726cff9/debugpy-1.8.20-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:9c74df62fc064cd5e5eaca1353a3ef5a5d50da5eb8058fcef63106f7bebe6173", size = 2538066, upload-time = "2026-01-29T23:03:54.999Z" },
- { url = "https://files.pythonhosted.org/packages/c5/56/6ddca50b53624e1ca3ce1d1e49ff22db46c47ea5fb4c0cc5c9b90a616364/debugpy-1.8.20-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:077a7447589ee9bc1ff0cdf443566d0ecf540ac8aa7333b775ebcb8ce9f4ecad", size = 4269425, upload-time = "2026-01-29T23:03:56.518Z" },
- { url = "https://files.pythonhosted.org/packages/c5/d9/d64199c14a0d4c476df46c82470a3ce45c8d183a6796cfb5e66533b3663c/debugpy-1.8.20-cp314-cp314-win32.whl", hash = "sha256:352036a99dd35053b37b7803f748efc456076f929c6a895556932eaf2d23b07f", size = 5331407, upload-time = "2026-01-29T23:03:58.481Z" },
- { url = "https://files.pythonhosted.org/packages/e0/d9/1f07395b54413432624d61524dfd98c1a7c7827d2abfdb8829ac92638205/debugpy-1.8.20-cp314-cp314-win_amd64.whl", hash = "sha256:a98eec61135465b062846112e5ecf2eebb855305acc1dfbae43b72903b8ab5be", size = 5372521, upload-time = "2026-01-29T23:03:59.864Z" },
- { url = "https://files.pythonhosted.org/packages/2b/6b/668f21567e3250463beb6a401e7d598baa2a0907224000d7f68b9442c243/debugpy-1.8.20-cp39-cp39-macosx_15_0_x86_64.whl", hash = "sha256:bff8990f040dacb4c314864da95f7168c5a58a30a66e0eea0fb85e2586a92cd6", size = 2100484, upload-time = "2026-01-29T23:04:09.929Z" },
- { url = "https://files.pythonhosted.org/packages/cf/49/223143d1da586b891f35a45515f152742ad85bfc10d2e02e697f65c83b32/debugpy-1.8.20-cp39-cp39-manylinux_2_34_x86_64.whl", hash = "sha256:70ad9ae09b98ac307b82c16c151d27ee9d68ae007a2e7843ba621b5ce65333b5", size = 3081272, upload-time = "2026-01-29T23:04:11.664Z" },
- { url = "https://files.pythonhosted.org/packages/b1/24/9f219c9290fe8bee4f63f9af8ebac440c802e6181d7f39a79abcb5fdff2f/debugpy-1.8.20-cp39-cp39-win32.whl", hash = "sha256:9eeed9f953f9a23850c85d440bf51e3c56ed5d25f8560eeb29add815bd32f7ee", size = 5285196, upload-time = "2026-01-29T23:04:13.105Z" },
- { url = "https://files.pythonhosted.org/packages/ba/f3/4a12d7b1b09e3b79ba6e3edfa0c677b8b8bdf110bc4b3607e0f29fb4e8b3/debugpy-1.8.20-cp39-cp39-win_amd64.whl", hash = "sha256:760813b4fff517c75bfe7923033c107104e76acfef7bda011ffea8736e9a66f8", size = 5317163, upload-time = "2026-01-29T23:04:15.264Z" },
- { url = "https://files.pythonhosted.org/packages/e0/c3/7f67dea8ccf8fdcb9c99033bbe3e90b9e7395415843accb81428c441be2d/debugpy-1.8.20-py2.py3-none-any.whl", hash = "sha256:5be9bed9ae3be00665a06acaa48f8329d2b9632f15fd09f6a9a8c8d9907e54d7", size = 5337658, upload-time = "2026-01-29T23:04:17.404Z" },
-]
-
-[[package]]
-name = "decorator"
-version = "5.2.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" },
-]
-
-[[package]]
-name = "exceptiongroup"
-version = "1.3.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "typing-extensions", marker = "python_full_version < '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" },
-]
-
-[[package]]
-name = "filelock"
-version = "3.19.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload-time = "2025-08-14T16:56:03.016Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" },
-]
-
-[[package]]
-name = "filelock"
-version = "3.25.2"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/94/b8/00651a0f559862f3bb7d6f7477b192afe3f583cc5e26403b44e59a55ab34/filelock-3.25.2.tar.gz", hash = "sha256:b64ece2b38f4ca29dd3e810287aa8c48182bbecd1ae6e9ae126c9b35f1382694", size = 40480, upload-time = "2026-03-11T20:45:38.487Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a4/a5/842ae8f0c08b61d6484b52f99a03510a3a72d23141942d216ebe81fefbce/filelock-3.25.2-py3-none-any.whl", hash = "sha256:ca8afb0da15f229774c9ad1b455ed96e85a81373065fb10446672f64444ddf70", size = 26759, upload-time = "2026-03-11T20:45:37.437Z" },
-]
-
-[[package]]
-name = "fonttools"
-version = "4.60.2"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/3e/c4/db6a7b5eb0656534c3aa2596c2c5e18830d74f1b9aa5aa8a7dff63a0b11d/fonttools-4.60.2.tar.gz", hash = "sha256:d29552e6b155ebfc685b0aecf8d429cb76c14ab734c22ef5d3dea6fdf800c92c", size = 3562254, upload-time = "2025-12-09T13:38:11.835Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ab/de/9e10a99fb3070accb8884886a41a4ce54e49bf2fa4fc63f48a6cf2061713/fonttools-4.60.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4e36fadcf7e8ca6e34d490eef86ed638d6fd9c55d2f514b05687622cfc4a7050", size = 2850403, upload-time = "2025-12-09T13:35:53.14Z" },
- { url = "https://files.pythonhosted.org/packages/e4/40/d5b369d1073b134f600a94a287e13b5bdea2191ba6347d813fa3da00e94a/fonttools-4.60.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6e500fc9c04bee749ceabfc20cb4903f6981c2139050d85720ea7ada61b75d5c", size = 2398629, upload-time = "2025-12-09T13:35:56.471Z" },
- { url = "https://files.pythonhosted.org/packages/7c/b5/123819369aaf99d1e4dc49f1de1925d4edc7379114d15a56a7dd2e9d56e6/fonttools-4.60.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:22efea5e784e1d1cd8d7b856c198e360a979383ebc6dea4604743b56da1cbc34", size = 4893471, upload-time = "2025-12-09T13:35:58.927Z" },
- { url = "https://files.pythonhosted.org/packages/24/29/f8f8acccb9716b899be4be45e9ce770d6aa76327573863e68448183091b0/fonttools-4.60.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:677aa92d84d335e4d301d8ba04afca6f575316bc647b6782cb0921943fcb6343", size = 4854686, upload-time = "2025-12-09T13:36:01.767Z" },
- { url = "https://files.pythonhosted.org/packages/5a/0d/f3f51d7519f44f2dd5c9a60d7cd41185ebcee4348f073e515a3a93af15ff/fonttools-4.60.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:edd49d3defbf35476e78b61ff737ff5efea811acff68d44233a95a5a48252334", size = 4871233, upload-time = "2025-12-09T13:36:06.094Z" },
- { url = "https://files.pythonhosted.org/packages/cc/3f/4d4fd47d3bc40ab4d76718555185f8adffb5602ea572eac4bbf200c47d22/fonttools-4.60.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:126839492b69cecc5baf2bddcde60caab2ffafd867bbae2a88463fce6078ca3a", size = 4988936, upload-time = "2025-12-09T13:36:08.42Z" },
- { url = "https://files.pythonhosted.org/packages/01/6f/83bbdefa43f2c3ae206fd8c4b9a481f3c913eef871b1ce9a453069239e39/fonttools-4.60.2-cp310-cp310-win32.whl", hash = "sha256:ffcab6f5537136046ca902ed2491ab081ba271b07591b916289b7c27ff845f96", size = 2278044, upload-time = "2025-12-09T13:36:10.641Z" },
- { url = "https://files.pythonhosted.org/packages/d4/04/7d9a137e919d6c9ef26704b7f7b2580d9cfc5139597588227aacebc0e3b7/fonttools-4.60.2-cp310-cp310-win_amd64.whl", hash = "sha256:9c68b287c7ffcd29dd83b5f961004b2a54a862a88825d52ea219c6220309ba45", size = 2326522, upload-time = "2025-12-09T13:36:12.981Z" },
- { url = "https://files.pythonhosted.org/packages/e0/80/b7693d37c02417e162cc83cdd0b19a4f58be82c638b5d4ce4de2dae050c4/fonttools-4.60.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a2aed0a7931401b3875265717a24c726f87ecfedbb7b3426c2ca4d2812e281ae", size = 2847809, upload-time = "2025-12-09T13:36:14.884Z" },
- { url = "https://files.pythonhosted.org/packages/f9/9a/9c2c13bf8a6496ac21607d704e74e9cc68ebf23892cf924c9a8b5c7566b9/fonttools-4.60.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dea6868e9d2b816c9076cfea77754686f3c19149873bdbc5acde437631c15df1", size = 2397302, upload-time = "2025-12-09T13:36:17.151Z" },
- { url = "https://files.pythonhosted.org/packages/56/f6/ce38ff6b2d2d58f6fd981d32f3942365bfa30eadf2b47d93b2d48bf6097f/fonttools-4.60.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2fa27f34950aa1fe0f0b1abe25eed04770a3b3b34ad94e5ace82cc341589678a", size = 5054418, upload-time = "2025-12-09T13:36:19.062Z" },
- { url = "https://files.pythonhosted.org/packages/88/06/5353bea128ff39e857c31de3dd605725b4add956badae0b31bc9a50d4c8e/fonttools-4.60.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:13a53d479d187b09bfaa4a35ffcbc334fc494ff355f0a587386099cb66674f1e", size = 5031652, upload-time = "2025-12-09T13:36:21.206Z" },
- { url = "https://files.pythonhosted.org/packages/71/05/ebca836437f6ebd57edd6428e7eff584e683ff0556ddb17d62e3b731f46c/fonttools-4.60.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fac5e921d3bd0ca3bb8517dced2784f0742bc8ca28579a68b139f04ea323a779", size = 5030321, upload-time = "2025-12-09T13:36:23.515Z" },
- { url = "https://files.pythonhosted.org/packages/57/f9/eb9d2a2ce30c99f840c1cc3940729a970923cf39d770caf88909d98d516b/fonttools-4.60.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:648f4f9186fd7f1f3cd57dbf00d67a583720d5011feca67a5e88b3a491952cfb", size = 5154255, upload-time = "2025-12-09T13:36:25.879Z" },
- { url = "https://files.pythonhosted.org/packages/08/a2/088b6ceba8272a9abb629d3c08f9c1e35e5ce42db0ccfe0c1f9f03e60d1d/fonttools-4.60.2-cp311-cp311-win32.whl", hash = "sha256:3274e15fad871bead5453d5ce02658f6d0c7bc7e7021e2a5b8b04e2f9e40da1a", size = 2276300, upload-time = "2025-12-09T13:36:27.772Z" },
- { url = "https://files.pythonhosted.org/packages/de/2f/8e4c3d908cc5dade7bb1316ce48589f6a24460c1056fd4b8db51f1fa309a/fonttools-4.60.2-cp311-cp311-win_amd64.whl", hash = "sha256:91d058d5a483a1525b367803abb69de0923fbd45e1f82ebd000f5c8aa65bc78e", size = 2327574, upload-time = "2025-12-09T13:36:30.89Z" },
- { url = "https://files.pythonhosted.org/packages/c0/30/530c9eddcd1c39219dc0aaede2b5a4c8ab80e0bb88d1b3ffc12944c4aac3/fonttools-4.60.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e0164b7609d2b5c5dd4e044b8085b7bd7ca7363ef8c269a4ab5b5d4885a426b2", size = 2847196, upload-time = "2025-12-09T13:36:33.262Z" },
- { url = "https://files.pythonhosted.org/packages/19/2f/4077a482836d5bbe3bc9dac1c004d02ee227cf04ed62b0a2dfc41d4f0dfd/fonttools-4.60.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1dd3d9574fc595c1e97faccae0f264dc88784ddf7fbf54c939528378bacc0033", size = 2395842, upload-time = "2025-12-09T13:36:35.47Z" },
- { url = "https://files.pythonhosted.org/packages/dd/05/aae5bb99c5398f8ed4a8b784f023fd9dd3568f0bd5d5b21e35b282550f11/fonttools-4.60.2-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:98d0719f1b11c2817307d2da2e94296a3b2a3503f8d6252a101dca3ee663b917", size = 4949713, upload-time = "2025-12-09T13:36:37.874Z" },
- { url = "https://files.pythonhosted.org/packages/b4/37/49067349fc78ff0efbf09fadefe80ddf41473ca8f8a25400e3770da38328/fonttools-4.60.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9d3ea26957dd07209f207b4fff64c702efe5496de153a54d3b91007ec28904dd", size = 4999907, upload-time = "2025-12-09T13:36:39.853Z" },
- { url = "https://files.pythonhosted.org/packages/16/31/d0f11c758bd0db36b664c92a0f9dfdcc2d7313749aa7d6629805c6946f21/fonttools-4.60.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ee301273b0850f3a515299f212898f37421f42ff9adfc341702582ca5073c13", size = 4939717, upload-time = "2025-12-09T13:36:43.075Z" },
- { url = "https://files.pythonhosted.org/packages/d9/bc/1cff0d69522e561bf1b99bee7c3911c08c25e919584827c3454a64651ce9/fonttools-4.60.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c6eb4694cc3b9c03b7c01d65a9cf35b577f21aa6abdbeeb08d3114b842a58153", size = 5089205, upload-time = "2025-12-09T13:36:45.468Z" },
- { url = "https://files.pythonhosted.org/packages/05/e6/fb174f0069b7122e19828c551298bfd34fdf9480535d2a6ac2ed37afacd3/fonttools-4.60.2-cp312-cp312-win32.whl", hash = "sha256:57f07b616c69c244cc1a5a51072eeef07dddda5ebef9ca5c6e9cf6d59ae65b70", size = 2264674, upload-time = "2025-12-09T13:36:49.238Z" },
- { url = "https://files.pythonhosted.org/packages/75/57/6552ffd6b582d3e6a9f01780c5275e6dfff1e70ca146101733aa1c12a129/fonttools-4.60.2-cp312-cp312-win_amd64.whl", hash = "sha256:310035802392f1fe5a7cf43d76f6ff4a24c919e4c72c0352e7b8176e2584b8a0", size = 2314701, upload-time = "2025-12-09T13:36:51.09Z" },
- { url = "https://files.pythonhosted.org/packages/2e/e4/8381d0ca6b6c6c484660b03517ec5b5b81feeefca3808726dece36c652a9/fonttools-4.60.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2bb5fd231e56ccd7403212636dcccffc96c5ae0d6f9e4721fa0a32cb2e3ca432", size = 2842063, upload-time = "2025-12-09T13:36:53.468Z" },
- { url = "https://files.pythonhosted.org/packages/b4/2c/4367117ee8ff4f4374787a1222da0bd413d80cf3522111f727a7b8f80d1d/fonttools-4.60.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:536b5fab7b6fec78ccf59b5c59489189d9d0a8b0d3a77ed1858be59afb096696", size = 2393792, upload-time = "2025-12-09T13:36:55.742Z" },
- { url = "https://files.pythonhosted.org/packages/49/b7/a76b6dffa193869e54e32ca2f9abb0d0e66784bc8a24e6f86eb093015481/fonttools-4.60.2-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6b9288fc38252ac86a9570f19313ecbc9ff678982e0f27c757a85f1f284d3400", size = 4924020, upload-time = "2025-12-09T13:36:58.229Z" },
- { url = "https://files.pythonhosted.org/packages/bd/4e/0078200e2259f0061c86a74075f507d64c43dd2ab38971956a5c0012d344/fonttools-4.60.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93fcb420791d839ef592eada2b69997c445d0ce9c969b5190f2e16828ec10607", size = 4980070, upload-time = "2025-12-09T13:37:00.311Z" },
- { url = "https://files.pythonhosted.org/packages/85/1f/d87c85a11cb84852c975251581862681e4a0c1c3bd456c648792203f311b/fonttools-4.60.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7916a381b094db4052ac284255186aebf74c5440248b78860cb41e300036f598", size = 4921411, upload-time = "2025-12-09T13:37:02.345Z" },
- { url = "https://files.pythonhosted.org/packages/75/c0/7efad650f5ed8e317c2633133ef3c64917e7adf2e4e2940c798f5d57ec6e/fonttools-4.60.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58c8c393d5e16b15662cfc2d988491940458aa87894c662154f50c7b49440bef", size = 5063465, upload-time = "2025-12-09T13:37:04.836Z" },
- { url = "https://files.pythonhosted.org/packages/18/a8/750518c4f8cdd79393b386bc81226047ade80239e58c6c9f5dbe1fdd8ea1/fonttools-4.60.2-cp313-cp313-win32.whl", hash = "sha256:19c6e0afd8b02008caa0aa08ab896dfce5d0bcb510c49b2c499541d5cb95a963", size = 2263443, upload-time = "2025-12-09T13:37:06.762Z" },
- { url = "https://files.pythonhosted.org/packages/b8/22/026c60376f165981f80a0e90bd98a79ae3334e9d89a3d046c4d2e265c724/fonttools-4.60.2-cp313-cp313-win_amd64.whl", hash = "sha256:6a500dc59e11b2338c2dba1f8cf11a4ae8be35ec24af8b2628b8759a61457b76", size = 2313800, upload-time = "2025-12-09T13:37:08.713Z" },
- { url = "https://files.pythonhosted.org/packages/7e/ab/7cf1f5204e1366ddf9dc5cdc2789b571feb9eebcee0e3463c3f457df5f52/fonttools-4.60.2-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9387c532acbe323bbf2a920f132bce3c408a609d5f9dcfc6532fbc7e37f8ccbb", size = 2841690, upload-time = "2025-12-09T13:37:10.696Z" },
- { url = "https://files.pythonhosted.org/packages/00/3c/0bf83c6f863cc8b934952567fa2bf737cfcec8fc4ffb59b3f93820095f89/fonttools-4.60.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e6f1c824185b5b8fb681297f315f26ae55abb0d560c2579242feea8236b1cfef", size = 2392191, upload-time = "2025-12-09T13:37:12.954Z" },
- { url = "https://files.pythonhosted.org/packages/00/f0/40090d148b8907fbea12e9bdf1ff149f30cdf1769e3b2c3e0dbf5106b88d/fonttools-4.60.2-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:55a3129d1e4030b1a30260f1b32fe76781b585fb2111d04a988e141c09eb6403", size = 4873503, upload-time = "2025-12-09T13:37:15.142Z" },
- { url = "https://files.pythonhosted.org/packages/dc/e0/d8b13f99e58b8c293781288ba62fe634f1f0697c9c4c0ae104d3215f3a10/fonttools-4.60.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b196e63753abc33b3b97a6fd6de4b7c4fef5552c0a5ba5e562be214d1e9668e0", size = 4968493, upload-time = "2025-12-09T13:37:18.272Z" },
- { url = "https://files.pythonhosted.org/packages/46/c5/960764d12c92bc225f02401d3067048cb7b282293d9e48e39fe2b0ec38a9/fonttools-4.60.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:de76c8d740fb55745f3b154f0470c56db92ae3be27af8ad6c2e88f1458260c9a", size = 4920015, upload-time = "2025-12-09T13:37:20.334Z" },
- { url = "https://files.pythonhosted.org/packages/4b/ab/839d8caf253d1eef3653ef4d34427d0326d17a53efaec9eb04056b670fff/fonttools-4.60.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6ba6303225c95998c9fda2d410aa792c3d2c1390a09df58d194b03e17583fa25", size = 5031165, upload-time = "2025-12-09T13:37:23.57Z" },
- { url = "https://files.pythonhosted.org/packages/de/bf/3bc862796a6841cbe0725bb5512d272239b809dba631a4b0301df885e62d/fonttools-4.60.2-cp314-cp314-win32.whl", hash = "sha256:0a89728ce10d7c816fedaa5380c06d2793e7a8a634d7ce16810e536c22047384", size = 2267526, upload-time = "2025-12-09T13:37:25.821Z" },
- { url = "https://files.pythonhosted.org/packages/fc/a1/c1909cacf00c76dc37b4743451561fbaaf7db4172c22a6d9394081d114c3/fonttools-4.60.2-cp314-cp314-win_amd64.whl", hash = "sha256:fa8446e6ab8bd778b82cb1077058a2addba86f30de27ab9cc18ed32b34bc8667", size = 2319096, upload-time = "2025-12-09T13:37:28.058Z" },
- { url = "https://files.pythonhosted.org/packages/29/b3/f66e71433f08e3a931b2b31a665aeed17fcc5e6911fc73529c70a232e421/fonttools-4.60.2-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:4063bc81ac5a4137642865cb63dd270e37b3cd1f55a07c0d6e41d072699ccca2", size = 2925167, upload-time = "2025-12-09T13:37:30.348Z" },
- { url = "https://files.pythonhosted.org/packages/2e/13/eeb491ff743594bbd0bee6e49422c03a59fe9c49002d3cc60eeb77414285/fonttools-4.60.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:ebfdb66fa69732ed604ab8e2a0431e6deff35e933a11d73418cbc7823d03b8e1", size = 2430923, upload-time = "2025-12-09T13:37:32.817Z" },
- { url = "https://files.pythonhosted.org/packages/b2/e5/db609f785e460796e53c4dbc3874a5f4948477f27beceb5e2d24b2537666/fonttools-4.60.2-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50b10b3b1a72d1d54c61b0e59239e1a94c0958f4a06a1febf97ce75388dd91a4", size = 4877729, upload-time = "2025-12-09T13:37:35.858Z" },
- { url = "https://files.pythonhosted.org/packages/5f/d6/85e4484dd4bfb03fee7bd370d65888cccbd3dee2681ee48c869dd5ccb23f/fonttools-4.60.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:beae16891a13b4a2ddec9b39b4de76092a3025e4d1c82362e3042b62295d5e4d", size = 5096003, upload-time = "2025-12-09T13:37:37.862Z" },
- { url = "https://files.pythonhosted.org/packages/30/49/1a98e44b71030b83d2046f981373b80571868259d98e6dae7bc20099dac6/fonttools-4.60.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:522f017fdb3766fd5d2d321774ef351cc6ce88ad4e6ac9efe643e4a2b9d528db", size = 4974410, upload-time = "2025-12-09T13:37:40.166Z" },
- { url = "https://files.pythonhosted.org/packages/42/07/d6f775d950ee8a841012472c7303f8819423d8cc3b4530915de7265ebfa2/fonttools-4.60.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:82cceceaf9c09a965a75b84a4b240dd3768e596ffb65ef53852681606fe7c9ba", size = 5002036, upload-time = "2025-12-09T13:37:42.639Z" },
- { url = "https://files.pythonhosted.org/packages/73/f6/ba6458f83ce1a9f8c3b17bd8f7b8a2205a126aac1055796b7e7cfebbd38f/fonttools-4.60.2-cp314-cp314t-win32.whl", hash = "sha256:bbfbc918a75437fe7e6d64d1b1e1f713237df1cf00f3a36dedae910b2ba01cee", size = 2330985, upload-time = "2025-12-09T13:37:45.157Z" },
- { url = "https://files.pythonhosted.org/packages/91/24/fea0ba4d3a32d4ed1103a1098bfd99dc78b5fe3bb97202920744a37b73dc/fonttools-4.60.2-cp314-cp314t-win_amd64.whl", hash = "sha256:0e5cd9b0830f6550d58c84f3ab151a9892b50c4f9d538c5603c0ce6fff2eb3f1", size = 2396226, upload-time = "2025-12-09T13:37:47.355Z" },
- { url = "https://files.pythonhosted.org/packages/55/ae/a6d9446cb258d3fe87e311c2d7bacf8e8da3e5809fbdc3a8306db4f6b14e/fonttools-4.60.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a3c75b8b42f7f93906bdba9eb1197bb76aecbe9a0a7cf6feec75f7605b5e8008", size = 2857184, upload-time = "2025-12-09T13:37:49.96Z" },
- { url = "https://files.pythonhosted.org/packages/3a/f3/1b41d0b6a8b908aa07f652111155dd653ebbf0b3385e66562556c5206685/fonttools-4.60.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0f86c8c37bc0ec0b9c141d5e90c717ff614e93c187f06d80f18c7057097f71bc", size = 2401877, upload-time = "2025-12-09T13:37:52.307Z" },
- { url = "https://files.pythonhosted.org/packages/71/57/048fd781680c38b05c5463657d0d95d5f2391a51972176e175c01de29d42/fonttools-4.60.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fe905403fe59683b0e9a45f234af2866834376b8821f34633b1c76fb731b6311", size = 4878073, upload-time = "2025-12-09T13:37:56.477Z" },
- { url = "https://files.pythonhosted.org/packages/45/bb/363364f052a893cebd3d449588b21244a9d873620fda03ad92702d2e1bc7/fonttools-4.60.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38ce703b60a906e421e12d9e3a7f064883f5e61bb23e8961f4be33cfe578500b", size = 4835385, upload-time = "2025-12-09T13:37:58.882Z" },
- { url = "https://files.pythonhosted.org/packages/1c/38/e392bb930b2436287e6021672345db26441bf1f85f1e98f8b9784334e41d/fonttools-4.60.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9e810c06f3e79185cecf120e58b343ea5a89b54dd695fd644446bcf8c026da5e", size = 4853084, upload-time = "2025-12-09T13:38:01.578Z" },
- { url = "https://files.pythonhosted.org/packages/65/60/0d77faeaecf7a3276a8a6dc49e2274357e6b3ed6a1774e2fdb2a7f142db0/fonttools-4.60.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:38faec8cc1d12122599814d15a402183f5123fb7608dac956121e7c6742aebc5", size = 4971144, upload-time = "2025-12-09T13:38:03.748Z" },
- { url = "https://files.pythonhosted.org/packages/ba/c7/6d3ac3afbcd598631bce24c3ecb919e7d0644a82fea8ddc4454312fc0be6/fonttools-4.60.2-cp39-cp39-win32.whl", hash = "sha256:80a45cf7bf659acb7b36578f300231873daba67bd3ca8cce181c73f861f14a37", size = 1499411, upload-time = "2025-12-09T13:38:05.586Z" },
- { url = "https://files.pythonhosted.org/packages/5a/1c/9dedf6420e23f9fa630bb97941839dddd2e1e57d1b2b85a902378dbe0bd2/fonttools-4.60.2-cp39-cp39-win_amd64.whl", hash = "sha256:c355d5972071938e1b1e0f5a1df001f68ecf1a62f34a3407dc8e0beccf052501", size = 1547943, upload-time = "2025-12-09T13:38:07.604Z" },
- { url = "https://files.pythonhosted.org/packages/79/6c/10280af05b44fafd1dff69422805061fa1af29270bc52dce031ac69540bf/fonttools-4.60.2-py3-none-any.whl", hash = "sha256:73cf92eeda67cf6ff10c8af56fc8f4f07c1647d989a979be9e388a49be26552a", size = 1144610, upload-time = "2025-12-09T13:38:09.5Z" },
-]
-
-[[package]]
-name = "fonttools"
-version = "4.62.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/9a/08/7012b00a9a5874311b639c3920270c36ee0c445b69d9989a85e5c92ebcb0/fonttools-4.62.1.tar.gz", hash = "sha256:e54c75fd6041f1122476776880f7c3c3295ffa31962dc6ebe2543c00dca58b5d", size = 3580737, upload-time = "2026-03-13T13:54:25.52Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/5a/ff/532ed43808b469c807e8cb6b21358da3fe6fd51486b3a8c93db0bb5d957f/fonttools-4.62.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ad5cca75776cd453b1b035b530e943334957ae152a36a88a320e779d61fc980c", size = 2873740, upload-time = "2026-03-13T13:52:11.822Z" },
- { url = "https://files.pythonhosted.org/packages/85/e4/2318d2b430562da7227010fb2bb029d2fa54d7b46443ae8942bab224e2a0/fonttools-4.62.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0b3ae47e8636156a9accff64c02c0924cbebad62854c4a6dbdc110cd5b4b341a", size = 2417649, upload-time = "2026-03-13T13:52:14.605Z" },
- { url = "https://files.pythonhosted.org/packages/4c/28/40f15523b5188598018e7956899fed94eb7debec89e2dd70cb4a8df90492/fonttools-4.62.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9b9e288b4da2f64fd6180644221749de651703e8d0c16bd4b719533a3a7d6e3", size = 4935213, upload-time = "2026-03-13T13:52:17.399Z" },
- { url = "https://files.pythonhosted.org/packages/42/09/7dbe3d7023f57d9b580cfa832109d521988112fd59dddfda3fddda8218f9/fonttools-4.62.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7bca7a1c1faf235ffe25d4f2e555246b4750220b38de8261d94ebc5ce8a23c23", size = 4892374, upload-time = "2026-03-13T13:52:20.175Z" },
- { url = "https://files.pythonhosted.org/packages/d1/2d/84509a2e32cb925371560ef5431365d8da2183c11d98e5b4b8b4e42426a5/fonttools-4.62.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4e0fcf265ad26e487c56cb12a42dffe7162de708762db951e1b3f755319507d", size = 4911856, upload-time = "2026-03-13T13:52:22.777Z" },
- { url = "https://files.pythonhosted.org/packages/a5/80/df28131379eed93d9e6e6fccd3bf6e3d077bebbfe98cc83f21bbcd83ed02/fonttools-4.62.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2d850f66830a27b0d498ee05adb13a3781637b1826982cd7e2b3789ef0cc71ae", size = 5031712, upload-time = "2026-03-13T13:52:25.14Z" },
- { url = "https://files.pythonhosted.org/packages/3d/03/3c8f09aad64230cd6d921ae7a19f9603c36f70930b00459f112706f6769a/fonttools-4.62.1-cp310-cp310-win32.whl", hash = "sha256:486f32c8047ccd05652aba17e4a8819a3a9d78570eb8a0e3b4503142947880ed", size = 1507878, upload-time = "2026-03-13T13:52:28.149Z" },
- { url = "https://files.pythonhosted.org/packages/dd/ec/f53f626f8f3e89f4cadd8fc08f3452c8fd182c951ad5caa35efac22b29ab/fonttools-4.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:5a648bde915fba9da05ae98856987ca91ba832949a9e2888b48c47ef8b96c5a9", size = 1556766, upload-time = "2026-03-13T13:52:30.814Z" },
- { url = "https://files.pythonhosted.org/packages/88/39/23ff32561ec8d45a4d48578b4d241369d9270dc50926c017570e60893701/fonttools-4.62.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:40975849bac44fb0b9253d77420c6d8b523ac4dcdcefeff6e4d706838a5b80f7", size = 2871039, upload-time = "2026-03-13T13:52:33.127Z" },
- { url = "https://files.pythonhosted.org/packages/24/7f/66d3f8a9338a9b67fe6e1739f47e1cd5cee78bd3bc1206ef9b0b982289a5/fonttools-4.62.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9dde91633f77fa576879a0c76b1d89de373cae751a98ddf0109d54e173b40f14", size = 2416346, upload-time = "2026-03-13T13:52:35.676Z" },
- { url = "https://files.pythonhosted.org/packages/aa/53/5276ceba7bff95da7793a07c5284e1da901cf00341ce5e2f3273056c0cca/fonttools-4.62.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6acb4109f8bee00fec985c8c7afb02299e35e9c94b57287f3ea542f28bd0b0a7", size = 5100897, upload-time = "2026-03-13T13:52:38.102Z" },
- { url = "https://files.pythonhosted.org/packages/cc/a1/40a5c4d8e28b0851d53a8eeeb46fbd73c325a2a9a165f290a5ed90e6c597/fonttools-4.62.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1c5c25671ce8805e0d080e2ffdeca7f1e86778c5cbfbeae86d7f866d8830517b", size = 5071078, upload-time = "2026-03-13T13:52:41.305Z" },
- { url = "https://files.pythonhosted.org/packages/e3/be/d378fca4c65ea1956fee6d90ace6e861776809cbbc5af22388a090c3c092/fonttools-4.62.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a5d8825e1140f04e6c99bb7d37a9e31c172f3bc208afbe02175339e699c710e1", size = 5076908, upload-time = "2026-03-13T13:52:44.122Z" },
- { url = "https://files.pythonhosted.org/packages/f8/d9/ae6a1d0693a4185a84605679c8a1f719a55df87b9c6e8e817bfdd9ef5936/fonttools-4.62.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:268abb1cb221e66c014acc234e872b7870d8b5d4657a83a8f4205094c32d2416", size = 5202275, upload-time = "2026-03-13T13:52:46.591Z" },
- { url = "https://files.pythonhosted.org/packages/54/6c/af95d9c4efb15cabff22642b608342f2bd67137eea6107202d91b5b03184/fonttools-4.62.1-cp311-cp311-win32.whl", hash = "sha256:942b03094d7edbb99bdf1ae7e9090898cad7bf9030b3d21f33d7072dbcb51a53", size = 2293075, upload-time = "2026-03-13T13:52:48.711Z" },
- { url = "https://files.pythonhosted.org/packages/d3/97/bf54c5b3f2be34e1f143e6db838dfdc54f2ffa3e68c738934c82f3b2a08d/fonttools-4.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:e8514f4924375f77084e81467e63238b095abda5107620f49421c368a6017ed2", size = 2344593, upload-time = "2026-03-13T13:52:50.725Z" },
- { url = "https://files.pythonhosted.org/packages/47/d4/dbacced3953544b9a93088cc10ef2b596d348c983d5c67a404fa41ec51ba/fonttools-4.62.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:90365821debbd7db678809c7491ca4acd1e0779b9624cdc6ddaf1f31992bf974", size = 2870219, upload-time = "2026-03-13T13:52:53.664Z" },
- { url = "https://files.pythonhosted.org/packages/66/9e/a769c8e99b81e5a87ab7e5e7236684de4e96246aae17274e5347d11ebd78/fonttools-4.62.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12859ff0b47dd20f110804c3e0d0970f7b832f561630cd879969011541a464a9", size = 2414891, upload-time = "2026-03-13T13:52:56.493Z" },
- { url = "https://files.pythonhosted.org/packages/69/64/f19a9e3911968c37e1e620e14dfc5778299e1474f72f4e57c5ec771d9489/fonttools-4.62.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c125ffa00c3d9003cdaaf7f2c79e6e535628093e14b5de1dccb08859b680936", size = 5033197, upload-time = "2026-03-13T13:52:59.179Z" },
- { url = "https://files.pythonhosted.org/packages/9b/8a/99c8b3c3888c5c474c08dbfd7c8899786de9604b727fcefb055b42c84bba/fonttools-4.62.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:149f7d84afca659d1a97e39a4778794a2f83bf344c5ee5134e09995086cc2392", size = 4988768, upload-time = "2026-03-13T13:53:02.761Z" },
- { url = "https://files.pythonhosted.org/packages/d1/c6/0f904540d3e6ab463c1243a0d803504826a11604c72dd58c2949796a1762/fonttools-4.62.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0aa72c43a601cfa9273bb1ae0518f1acadc01ee181a6fc60cd758d7fdadffc04", size = 4971512, upload-time = "2026-03-13T13:53:05.678Z" },
- { url = "https://files.pythonhosted.org/packages/29/0b/5cbef6588dc9bd6b5c9ad6a4d5a8ca384d0cea089da31711bbeb4f9654a6/fonttools-4.62.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:19177c8d96c7c36359266e571c5173bcee9157b59cfc8cb0153c5673dc5a3a7d", size = 5122723, upload-time = "2026-03-13T13:53:08.662Z" },
- { url = "https://files.pythonhosted.org/packages/4a/47/b3a5342d381595ef439adec67848bed561ab7fdb1019fa522e82101b7d9c/fonttools-4.62.1-cp312-cp312-win32.whl", hash = "sha256:a24decd24d60744ee8b4679d38e88b8303d86772053afc29b19d23bb8207803c", size = 2281278, upload-time = "2026-03-13T13:53:10.998Z" },
- { url = "https://files.pythonhosted.org/packages/28/b1/0c2ab56a16f409c6c8a68816e6af707827ad5d629634691ff60a52879792/fonttools-4.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:9e7863e10b3de72376280b515d35b14f5eeed639d1aa7824f4cf06779ec65e42", size = 2331414, upload-time = "2026-03-13T13:53:13.992Z" },
- { url = "https://files.pythonhosted.org/packages/3b/56/6f389de21c49555553d6a5aeed5ac9767631497ac836c4f076273d15bd72/fonttools-4.62.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c22b1014017111c401469e3acc5433e6acf6ebcc6aa9efb538a533c800971c79", size = 2865155, upload-time = "2026-03-13T13:53:16.132Z" },
- { url = "https://files.pythonhosted.org/packages/03/c5/0e3966edd5ec668d41dfe418787726752bc07e2f5fd8c8f208615e61fa89/fonttools-4.62.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:68959f5fc58ed4599b44aad161c2837477d7f35f5f79402d97439974faebfebe", size = 2412802, upload-time = "2026-03-13T13:53:18.878Z" },
- { url = "https://files.pythonhosted.org/packages/52/94/e6ac4b44026de7786fe46e3bfa0c87e51d5d70a841054065d49cd62bb909/fonttools-4.62.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef46db46c9447103b8f3ff91e8ba009d5fe181b1920a83757a5762551e32bb68", size = 5013926, upload-time = "2026-03-13T13:53:21.379Z" },
- { url = "https://files.pythonhosted.org/packages/e2/98/8b1e801939839d405f1f122e7d175cebe9aeb4e114f95bfc45e3152af9a7/fonttools-4.62.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6706d1cb1d5e6251a97ad3c1b9347505c5615c112e66047abbef0f8545fa30d1", size = 4964575, upload-time = "2026-03-13T13:53:23.857Z" },
- { url = "https://files.pythonhosted.org/packages/46/76/7d051671e938b1881670528fec69cc4044315edd71a229c7fd712eaa5119/fonttools-4.62.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2e7abd2b1e11736f58c1de27819e1955a53267c21732e78243fa2fa2e5c1e069", size = 4953693, upload-time = "2026-03-13T13:53:26.569Z" },
- { url = "https://files.pythonhosted.org/packages/1f/ae/b41f8628ec0be3c1b934fc12b84f4576a5c646119db4d3bdd76a217c90b5/fonttools-4.62.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:403d28ce06ebfc547fbcb0cb8b7f7cc2f7a2d3e1a67ba9a34b14632df9e080f9", size = 5094920, upload-time = "2026-03-13T13:53:29.329Z" },
- { url = "https://files.pythonhosted.org/packages/f2/f6/53a1e9469331a23dcc400970a27a4caa3d9f6edbf5baab0260285238b884/fonttools-4.62.1-cp313-cp313-win32.whl", hash = "sha256:93c316e0f5301b2adbe6a5f658634307c096fd5aae60a5b3412e4f3e1728ab24", size = 2279928, upload-time = "2026-03-13T13:53:32.352Z" },
- { url = "https://files.pythonhosted.org/packages/38/60/35186529de1db3c01f5ad625bde07c1f576305eab6d86bbda4c58445f721/fonttools-4.62.1-cp313-cp313-win_amd64.whl", hash = "sha256:7aa21ff53e28a9c2157acbc44e5b401149d3c9178107130e82d74ceb500e5056", size = 2330514, upload-time = "2026-03-13T13:53:34.991Z" },
- { url = "https://files.pythonhosted.org/packages/36/f0/2888cdac391807d68d90dcb16ef858ddc1b5309bfc6966195a459dd326e2/fonttools-4.62.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fa1d16210b6b10a826d71bed68dd9ec24a9e218d5a5e2797f37c573e7ec215ca", size = 2864442, upload-time = "2026-03-13T13:53:37.509Z" },
- { url = "https://files.pythonhosted.org/packages/4b/b2/e521803081f8dc35990816b82da6360fa668a21b44da4b53fc9e77efcd62/fonttools-4.62.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:aa69d10ed420d8121118e628ad47d86e4caa79ba37f968597b958f6cceab7eca", size = 2410901, upload-time = "2026-03-13T13:53:40.55Z" },
- { url = "https://files.pythonhosted.org/packages/00/a4/8c3511ff06e53110039358dbbdc1a65d72157a054638387aa2ada300a8b8/fonttools-4.62.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd13b7999d59c5eb1c2b442eb2d0c427cb517a0b7a1f5798fc5c9e003f5ff782", size = 4999608, upload-time = "2026-03-13T13:53:42.798Z" },
- { url = "https://files.pythonhosted.org/packages/28/63/cd0c3b26afe60995a5295f37c246a93d454023726c3261cfbb3559969bb9/fonttools-4.62.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8d337fdd49a79b0d51c4da87bc38169d21c3abbf0c1aa9367eff5c6656fb6dae", size = 4912726, upload-time = "2026-03-13T13:53:45.405Z" },
- { url = "https://files.pythonhosted.org/packages/70/b9/ac677cb07c24c685cf34f64e140617d58789d67a3dd524164b63648c6114/fonttools-4.62.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d241cdc4a67b5431c6d7f115fdf63335222414995e3a1df1a41e1182acd4bcc7", size = 4951422, upload-time = "2026-03-13T13:53:48.326Z" },
- { url = "https://files.pythonhosted.org/packages/e6/10/11c08419a14b85b7ca9a9faca321accccc8842dd9e0b1c8a72908de05945/fonttools-4.62.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c05557a78f8fa514da0f869556eeda40887a8abc77c76ee3f74cf241778afd5a", size = 5060979, upload-time = "2026-03-13T13:53:51.366Z" },
- { url = "https://files.pythonhosted.org/packages/4e/3c/12eea4a4cf054e7ab058ed5ceada43b46809fce2bf319017c4d63ae55bb4/fonttools-4.62.1-cp314-cp314-win32.whl", hash = "sha256:49a445d2f544ce4a69338694cad575ba97b9a75fff02720da0882d1a73f12800", size = 2283733, upload-time = "2026-03-13T13:53:53.606Z" },
- { url = "https://files.pythonhosted.org/packages/6b/67/74b070029043186b5dd13462c958cb7c7f811be0d2e634309d9a1ffb1505/fonttools-4.62.1-cp314-cp314-win_amd64.whl", hash = "sha256:1eecc128c86c552fb963fe846ca4e011b1be053728f798185a1687502f6d398e", size = 2335663, upload-time = "2026-03-13T13:53:56.23Z" },
- { url = "https://files.pythonhosted.org/packages/42/c5/4d2ed3ca6e33617fc5624467da353337f06e7f637707478903c785bd8e20/fonttools-4.62.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:1596aeaddf7f78e21e68293c011316a25267b3effdaccaf4d59bc9159d681b82", size = 2947288, upload-time = "2026-03-13T13:53:59.397Z" },
- { url = "https://files.pythonhosted.org/packages/1f/e9/7ab11ddfda48ed0f89b13380e5595ba572619c27077be0b2c447a63ff351/fonttools-4.62.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:8f8fca95d3bb3208f59626a4b0ea6e526ee51f5a8ad5d91821c165903e8d9260", size = 2449023, upload-time = "2026-03-13T13:54:01.642Z" },
- { url = "https://files.pythonhosted.org/packages/b2/10/a800fa090b5e8819942e54e19b55fc7c21fe14a08757c3aa3ca8db358939/fonttools-4.62.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee91628c08e76f77b533d65feb3fbe6d9dad699f95be51cf0d022db94089cdc4", size = 5137599, upload-time = "2026-03-13T13:54:04.495Z" },
- { url = "https://files.pythonhosted.org/packages/37/dc/8ccd45033fffd74deb6912fa1ca524643f584b94c87a16036855b498a1ed/fonttools-4.62.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f37df1cac61d906e7b836abe356bc2f34c99d4477467755c216b72aa3dc748b", size = 4920933, upload-time = "2026-03-13T13:54:07.557Z" },
- { url = "https://files.pythonhosted.org/packages/99/eb/e618adefb839598d25ac8136cd577925d6c513dc0d931d93b8af956210f0/fonttools-4.62.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:92bb00a947e666169c99b43753c4305fc95a890a60ef3aeb2a6963e07902cc87", size = 5016232, upload-time = "2026-03-13T13:54:10.611Z" },
- { url = "https://files.pythonhosted.org/packages/d9/5f/9b5c9bfaa8ec82def8d8168c4f13615990d6ce5996fe52bd49bfb5e05134/fonttools-4.62.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:bdfe592802ef939a0e33106ea4a318eeb17822c7ee168c290273cbd5fabd746c", size = 5042987, upload-time = "2026-03-13T13:54:13.569Z" },
- { url = "https://files.pythonhosted.org/packages/90/aa/dfbbe24c6a6afc5c203d90cc0343e24bcbb09e76d67c4d6eef8c2558d7ba/fonttools-4.62.1-cp314-cp314t-win32.whl", hash = "sha256:b820fcb92d4655513d8402d5b219f94481c4443d825b4372c75a2072aa4b357a", size = 2348021, upload-time = "2026-03-13T13:54:16.98Z" },
- { url = "https://files.pythonhosted.org/packages/13/6f/ae9c4e4dd417948407b680855c2c7790efb52add6009aaecff1e3bc50e8e/fonttools-4.62.1-cp314-cp314t-win_amd64.whl", hash = "sha256:59b372b4f0e113d3746b88985f1c796e7bf830dd54b28374cd85c2b8acd7583e", size = 2414147, upload-time = "2026-03-13T13:54:19.416Z" },
- { url = "https://files.pythonhosted.org/packages/fd/ba/56147c165442cc5ba7e82ecf301c9a68353cede498185869e6e02b4c264f/fonttools-4.62.1-py3-none-any.whl", hash = "sha256:7487782e2113861f4ddcc07c3436450659e3caa5e470b27dc2177cade2d8e7fd", size = 1152647, upload-time = "2026-03-13T13:54:22.735Z" },
-]
-
-[[package]]
-name = "fsspec"
-version = "2025.10.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/24/7f/2747c0d332b9acfa75dc84447a066fdf812b5a6b8d30472b74d309bfe8cb/fsspec-2025.10.0.tar.gz", hash = "sha256:b6789427626f068f9a83ca4e8a3cc050850b6c0f71f99ddb4f542b8266a26a59", size = 309285, upload-time = "2025-10-30T14:58:44.036Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/eb/02/a6b21098b1d5d6249b7c5ab69dde30108a71e4e819d4a9778f1de1d5b70d/fsspec-2025.10.0-py3-none-any.whl", hash = "sha256:7c7712353ae7d875407f97715f0e1ffcc21e33d5b24556cb1e090ae9409ec61d", size = 200966, upload-time = "2025-10-30T14:58:42.53Z" },
-]
-
-[[package]]
-name = "fsspec"
-version = "2026.2.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/51/7c/f60c259dcbf4f0c47cc4ddb8f7720d2dcdc8888c8e5ad84c73ea4531cc5b/fsspec-2026.2.0.tar.gz", hash = "sha256:6544e34b16869f5aacd5b90bdf1a71acb37792ea3ddf6125ee69a22a53fb8bff", size = 313441, upload-time = "2026-02-05T21:50:53.743Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e6/ab/fb21f4c939bb440104cc2b396d3be1d9b7a9fd3c6c2a53d98c45b3d7c954/fsspec-2026.2.0-py3-none-any.whl", hash = "sha256:98de475b5cb3bd66bedd5c4679e87b4fdfe1a3bf4d707b151b3c07e58c9a2437", size = 202505, upload-time = "2026-02-05T21:50:51.819Z" },
-]
-
-[[package]]
-name = "hypothesis"
-version = "6.141.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "attrs", marker = "python_full_version < '3.10'" },
- { name = "exceptiongroup", marker = "python_full_version < '3.10'" },
- { name = "sortedcontainers", marker = "python_full_version < '3.10'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/85/20/8aa62b3e69fea68bb30d35d50be5395c98979013acd8152d64dc927e4cdb/hypothesis-6.141.1.tar.gz", hash = "sha256:8ef356e1e18fbeaa8015aab3c805303b7fe4b868e5b506e87ad83c0bf951f46f", size = 467389, upload-time = "2025-10-15T19:12:25.262Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/bc/9a/f901858f139694dd669776983781b08a7c1717911025da6720e526bd8ce3/hypothesis-6.141.1-py3-none-any.whl", hash = "sha256:a5b3c39c16d98b7b4c3c5c8d4262e511e3b2255e6814ced8023af49087ad60b3", size = 535000, upload-time = "2025-10-15T19:12:21.659Z" },
-]
-
-[[package]]
-name = "hypothesis"
-version = "6.151.9"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "exceptiongroup", marker = "python_full_version == '3.10.*'" },
- { name = "sortedcontainers", marker = "python_full_version >= '3.10'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/19/e1/ef365ff480903b929d28e057f57b76cae51a30375943e33374ec9a165d9c/hypothesis-6.151.9.tar.gz", hash = "sha256:2f284428dda6c3c48c580de0e18470ff9c7f5ef628a647ee8002f38c3f9097ca", size = 463534, upload-time = "2026-02-16T22:59:23.09Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c4/f7/5cc291d701094754a1d327b44d80a44971e13962881d9a400235726171da/hypothesis-6.151.9-py3-none-any.whl", hash = "sha256:7b7220585c67759b1b1ef839b1e6e9e3d82ed468cfc1ece43c67184848d7edd9", size = 529307, upload-time = "2026-02-16T22:59:20.443Z" },
-]
-
-[[package]]
-name = "idna"
-version = "3.11"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" },
-]
-
-[[package]]
-name = "importlib-resources"
-version = "6.5.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "zipp", marker = "python_full_version < '3.10'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/cf/8c/f834fbf984f691b4f7ff60f50b514cc3de5cc08abfc3295564dd89c5e2e7/importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c", size = 44693, upload-time = "2025-01-03T18:51:56.698Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a4/ed/1f1afb2e9e7f38a545d628f864d562a5ae64fe6f7a10e28ffb9b185b4e89/importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec", size = 37461, upload-time = "2025-01-03T18:51:54.306Z" },
-]
-
-[[package]]
-name = "iniconfig"
-version = "2.1.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" },
-]
-
-[[package]]
-name = "iniconfig"
-version = "2.3.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
-]
-
-[[package]]
-name = "jinja2"
-version = "3.1.6"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "markupsafe" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
-]
-
-[[package]]
-name = "joblib"
-version = "1.5.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/41/f2/d34e8b3a08a9cc79a50b2208a93dce981fe615b64d5a4d4abee421d898df/joblib-1.5.3.tar.gz", hash = "sha256:8561a3269e6801106863fd0d6d84bb737be9e7631e33aaed3fb9ce5953688da3", size = 331603, upload-time = "2025-12-15T08:41:46.427Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl", hash = "sha256:5fc3c5039fc5ca8c0276333a188bbd59d6b7ab37fe6632daa76bc7f9ec18e713", size = 309071, upload-time = "2025-12-15T08:41:44.973Z" },
-]
-
-[[package]]
-name = "kiwisolver"
-version = "1.4.7"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/85/4d/2255e1c76304cbd60b48cee302b66d1dde4468dc5b1160e4b7cb43778f2a/kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60", size = 97286, upload-time = "2024-09-04T09:39:44.302Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/97/14/fc943dd65268a96347472b4fbe5dcc2f6f55034516f80576cd0dd3a8930f/kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6", size = 122440, upload-time = "2024-09-04T09:03:44.9Z" },
- { url = "https://files.pythonhosted.org/packages/1e/46/e68fed66236b69dd02fcdb506218c05ac0e39745d696d22709498896875d/kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17", size = 65758, upload-time = "2024-09-04T09:03:46.582Z" },
- { url = "https://files.pythonhosted.org/packages/ef/fa/65de49c85838681fc9cb05de2a68067a683717321e01ddafb5b8024286f0/kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9", size = 64311, upload-time = "2024-09-04T09:03:47.973Z" },
- { url = "https://files.pythonhosted.org/packages/42/9c/cc8d90f6ef550f65443bad5872ffa68f3dee36de4974768628bea7c14979/kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9", size = 1637109, upload-time = "2024-09-04T09:03:49.281Z" },
- { url = "https://files.pythonhosted.org/packages/55/91/0a57ce324caf2ff5403edab71c508dd8f648094b18cfbb4c8cc0fde4a6ac/kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c", size = 1617814, upload-time = "2024-09-04T09:03:51.444Z" },
- { url = "https://files.pythonhosted.org/packages/12/5d/c36140313f2510e20207708adf36ae4919416d697ee0236b0ddfb6fd1050/kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599", size = 1400881, upload-time = "2024-09-04T09:03:53.357Z" },
- { url = "https://files.pythonhosted.org/packages/56/d0/786e524f9ed648324a466ca8df86298780ef2b29c25313d9a4f16992d3cf/kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05", size = 1512972, upload-time = "2024-09-04T09:03:55.082Z" },
- { url = "https://files.pythonhosted.org/packages/67/5a/77851f2f201e6141d63c10a0708e996a1363efaf9e1609ad0441b343763b/kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407", size = 1444787, upload-time = "2024-09-04T09:03:56.588Z" },
- { url = "https://files.pythonhosted.org/packages/06/5f/1f5eaab84355885e224a6fc8d73089e8713dc7e91c121f00b9a1c58a2195/kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278", size = 2199212, upload-time = "2024-09-04T09:03:58.557Z" },
- { url = "https://files.pythonhosted.org/packages/b5/28/9152a3bfe976a0ae21d445415defc9d1cd8614b2910b7614b30b27a47270/kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5", size = 2346399, upload-time = "2024-09-04T09:04:00.178Z" },
- { url = "https://files.pythonhosted.org/packages/26/f6/453d1904c52ac3b400f4d5e240ac5fec25263716723e44be65f4d7149d13/kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad", size = 2308688, upload-time = "2024-09-04T09:04:02.216Z" },
- { url = "https://files.pythonhosted.org/packages/5a/9a/d4968499441b9ae187e81745e3277a8b4d7c60840a52dc9d535a7909fac3/kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895", size = 2445493, upload-time = "2024-09-04T09:04:04.571Z" },
- { url = "https://files.pythonhosted.org/packages/07/c9/032267192e7828520dacb64dfdb1d74f292765f179e467c1cba97687f17d/kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3", size = 2262191, upload-time = "2024-09-04T09:04:05.969Z" },
- { url = "https://files.pythonhosted.org/packages/6c/ad/db0aedb638a58b2951da46ddaeecf204be8b4f5454df020d850c7fa8dca8/kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc", size = 46644, upload-time = "2024-09-04T09:04:07.408Z" },
- { url = "https://files.pythonhosted.org/packages/12/ca/d0f7b7ffbb0be1e7c2258b53554efec1fd652921f10d7d85045aff93ab61/kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c", size = 55877, upload-time = "2024-09-04T09:04:08.869Z" },
- { url = "https://files.pythonhosted.org/packages/97/6c/cfcc128672f47a3e3c0d918ecb67830600078b025bfc32d858f2e2d5c6a4/kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a", size = 48347, upload-time = "2024-09-04T09:04:10.106Z" },
- { url = "https://files.pythonhosted.org/packages/e9/44/77429fa0a58f941d6e1c58da9efe08597d2e86bf2b2cce6626834f49d07b/kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54", size = 122442, upload-time = "2024-09-04T09:04:11.432Z" },
- { url = "https://files.pythonhosted.org/packages/e5/20/8c75caed8f2462d63c7fd65e16c832b8f76cda331ac9e615e914ee80bac9/kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95", size = 65762, upload-time = "2024-09-04T09:04:12.468Z" },
- { url = "https://files.pythonhosted.org/packages/f4/98/fe010f15dc7230f45bc4cf367b012d651367fd203caaa992fd1f5963560e/kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935", size = 64319, upload-time = "2024-09-04T09:04:13.635Z" },
- { url = "https://files.pythonhosted.org/packages/8b/1b/b5d618f4e58c0675654c1e5051bcf42c776703edb21c02b8c74135541f60/kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb", size = 1334260, upload-time = "2024-09-04T09:04:14.878Z" },
- { url = "https://files.pythonhosted.org/packages/b8/01/946852b13057a162a8c32c4c8d2e9ed79f0bb5d86569a40c0b5fb103e373/kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02", size = 1426589, upload-time = "2024-09-04T09:04:16.514Z" },
- { url = "https://files.pythonhosted.org/packages/70/d1/c9f96df26b459e15cf8a965304e6e6f4eb291e0f7a9460b4ad97b047561e/kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51", size = 1541080, upload-time = "2024-09-04T09:04:18.322Z" },
- { url = "https://files.pythonhosted.org/packages/d3/73/2686990eb8b02d05f3de759d6a23a4ee7d491e659007dd4c075fede4b5d0/kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052", size = 1470049, upload-time = "2024-09-04T09:04:20.266Z" },
- { url = "https://files.pythonhosted.org/packages/a7/4b/2db7af3ed3af7c35f388d5f53c28e155cd402a55432d800c543dc6deb731/kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18", size = 1426376, upload-time = "2024-09-04T09:04:22.419Z" },
- { url = "https://files.pythonhosted.org/packages/05/83/2857317d04ea46dc5d115f0df7e676997bbd968ced8e2bd6f7f19cfc8d7f/kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545", size = 2222231, upload-time = "2024-09-04T09:04:24.526Z" },
- { url = "https://files.pythonhosted.org/packages/0d/b5/866f86f5897cd4ab6d25d22e403404766a123f138bd6a02ecb2cdde52c18/kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b", size = 2368634, upload-time = "2024-09-04T09:04:25.899Z" },
- { url = "https://files.pythonhosted.org/packages/c1/ee/73de8385403faba55f782a41260210528fe3273d0cddcf6d51648202d6d0/kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36", size = 2329024, upload-time = "2024-09-04T09:04:28.523Z" },
- { url = "https://files.pythonhosted.org/packages/a1/e7/cd101d8cd2cdfaa42dc06c433df17c8303d31129c9fdd16c0ea37672af91/kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3", size = 2468484, upload-time = "2024-09-04T09:04:30.547Z" },
- { url = "https://files.pythonhosted.org/packages/e1/72/84f09d45a10bc57a40bb58b81b99d8f22b58b2040c912b7eb97ebf625bf2/kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523", size = 2284078, upload-time = "2024-09-04T09:04:33.218Z" },
- { url = "https://files.pythonhosted.org/packages/d2/d4/71828f32b956612dc36efd7be1788980cb1e66bfb3706e6dec9acad9b4f9/kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d", size = 46645, upload-time = "2024-09-04T09:04:34.371Z" },
- { url = "https://files.pythonhosted.org/packages/a1/65/d43e9a20aabcf2e798ad1aff6c143ae3a42cf506754bcb6a7ed8259c8425/kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b", size = 56022, upload-time = "2024-09-04T09:04:35.786Z" },
- { url = "https://files.pythonhosted.org/packages/35/b3/9f75a2e06f1b4ca00b2b192bc2b739334127d27f1d0625627ff8479302ba/kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376", size = 48536, upload-time = "2024-09-04T09:04:37.525Z" },
- { url = "https://files.pythonhosted.org/packages/97/9c/0a11c714cf8b6ef91001c8212c4ef207f772dd84540104952c45c1f0a249/kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2", size = 121808, upload-time = "2024-09-04T09:04:38.637Z" },
- { url = "https://files.pythonhosted.org/packages/f2/d8/0fe8c5f5d35878ddd135f44f2af0e4e1d379e1c7b0716f97cdcb88d4fd27/kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a", size = 65531, upload-time = "2024-09-04T09:04:39.694Z" },
- { url = "https://files.pythonhosted.org/packages/80/c5/57fa58276dfdfa612241d640a64ca2f76adc6ffcebdbd135b4ef60095098/kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee", size = 63894, upload-time = "2024-09-04T09:04:41.6Z" },
- { url = "https://files.pythonhosted.org/packages/8b/e9/26d3edd4c4ad1c5b891d8747a4f81b1b0aba9fb9721de6600a4adc09773b/kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640", size = 1369296, upload-time = "2024-09-04T09:04:42.886Z" },
- { url = "https://files.pythonhosted.org/packages/b6/67/3f4850b5e6cffb75ec40577ddf54f7b82b15269cc5097ff2e968ee32ea7d/kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f", size = 1461450, upload-time = "2024-09-04T09:04:46.284Z" },
- { url = "https://files.pythonhosted.org/packages/52/be/86cbb9c9a315e98a8dc6b1d23c43cffd91d97d49318854f9c37b0e41cd68/kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483", size = 1579168, upload-time = "2024-09-04T09:04:47.91Z" },
- { url = "https://files.pythonhosted.org/packages/0f/00/65061acf64bd5fd34c1f4ae53f20b43b0a017a541f242a60b135b9d1e301/kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258", size = 1507308, upload-time = "2024-09-04T09:04:49.465Z" },
- { url = "https://files.pythonhosted.org/packages/21/e4/c0b6746fd2eb62fe702118b3ca0cb384ce95e1261cfada58ff693aeec08a/kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e", size = 1464186, upload-time = "2024-09-04T09:04:50.949Z" },
- { url = "https://files.pythonhosted.org/packages/0a/0f/529d0a9fffb4d514f2782c829b0b4b371f7f441d61aa55f1de1c614c4ef3/kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107", size = 2247877, upload-time = "2024-09-04T09:04:52.388Z" },
- { url = "https://files.pythonhosted.org/packages/d1/e1/66603ad779258843036d45adcbe1af0d1a889a07af4635f8b4ec7dccda35/kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948", size = 2404204, upload-time = "2024-09-04T09:04:54.385Z" },
- { url = "https://files.pythonhosted.org/packages/8d/61/de5fb1ca7ad1f9ab7970e340a5b833d735df24689047de6ae71ab9d8d0e7/kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038", size = 2352461, upload-time = "2024-09-04T09:04:56.307Z" },
- { url = "https://files.pythonhosted.org/packages/ba/d2/0edc00a852e369827f7e05fd008275f550353f1f9bcd55db9363d779fc63/kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383", size = 2501358, upload-time = "2024-09-04T09:04:57.922Z" },
- { url = "https://files.pythonhosted.org/packages/84/15/adc15a483506aec6986c01fb7f237c3aec4d9ed4ac10b756e98a76835933/kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520", size = 2314119, upload-time = "2024-09-04T09:04:59.332Z" },
- { url = "https://files.pythonhosted.org/packages/36/08/3a5bb2c53c89660863a5aa1ee236912269f2af8762af04a2e11df851d7b2/kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b", size = 46367, upload-time = "2024-09-04T09:05:00.804Z" },
- { url = "https://files.pythonhosted.org/packages/19/93/c05f0a6d825c643779fc3c70876bff1ac221f0e31e6f701f0e9578690d70/kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb", size = 55884, upload-time = "2024-09-04T09:05:01.924Z" },
- { url = "https://files.pythonhosted.org/packages/d2/f9/3828d8f21b6de4279f0667fb50a9f5215e6fe57d5ec0d61905914f5b6099/kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a", size = 48528, upload-time = "2024-09-04T09:05:02.983Z" },
- { url = "https://files.pythonhosted.org/packages/c4/06/7da99b04259b0f18b557a4effd1b9c901a747f7fdd84cf834ccf520cb0b2/kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e", size = 121913, upload-time = "2024-09-04T09:05:04.072Z" },
- { url = "https://files.pythonhosted.org/packages/97/f5/b8a370d1aa593c17882af0a6f6755aaecd643640c0ed72dcfd2eafc388b9/kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6", size = 65627, upload-time = "2024-09-04T09:05:05.119Z" },
- { url = "https://files.pythonhosted.org/packages/2a/fc/6c0374f7503522539e2d4d1b497f5ebad3f8ed07ab51aed2af988dd0fb65/kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750", size = 63888, upload-time = "2024-09-04T09:05:06.191Z" },
- { url = "https://files.pythonhosted.org/packages/bf/3e/0b7172793d0f41cae5c923492da89a2ffcd1adf764c16159ca047463ebd3/kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d", size = 1369145, upload-time = "2024-09-04T09:05:07.919Z" },
- { url = "https://files.pythonhosted.org/packages/77/92/47d050d6f6aced2d634258123f2688fbfef8ded3c5baf2c79d94d91f1f58/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379", size = 1461448, upload-time = "2024-09-04T09:05:10.01Z" },
- { url = "https://files.pythonhosted.org/packages/9c/1b/8f80b18e20b3b294546a1adb41701e79ae21915f4175f311a90d042301cf/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c", size = 1578750, upload-time = "2024-09-04T09:05:11.598Z" },
- { url = "https://files.pythonhosted.org/packages/a4/fe/fe8e72f3be0a844f257cadd72689c0848c6d5c51bc1d60429e2d14ad776e/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34", size = 1507175, upload-time = "2024-09-04T09:05:13.22Z" },
- { url = "https://files.pythonhosted.org/packages/39/fa/cdc0b6105d90eadc3bee525fecc9179e2b41e1ce0293caaf49cb631a6aaf/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1", size = 1463963, upload-time = "2024-09-04T09:05:15.925Z" },
- { url = "https://files.pythonhosted.org/packages/6e/5c/0c03c4e542720c6177d4f408e56d1c8315899db72d46261a4e15b8b33a41/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f", size = 2248220, upload-time = "2024-09-04T09:05:17.434Z" },
- { url = "https://files.pythonhosted.org/packages/3d/ee/55ef86d5a574f4e767df7da3a3a7ff4954c996e12d4fbe9c408170cd7dcc/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b", size = 2404463, upload-time = "2024-09-04T09:05:18.997Z" },
- { url = "https://files.pythonhosted.org/packages/0f/6d/73ad36170b4bff4825dc588acf4f3e6319cb97cd1fb3eb04d9faa6b6f212/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27", size = 2352842, upload-time = "2024-09-04T09:05:21.299Z" },
- { url = "https://files.pythonhosted.org/packages/0b/16/fa531ff9199d3b6473bb4d0f47416cdb08d556c03b8bc1cccf04e756b56d/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a", size = 2501635, upload-time = "2024-09-04T09:05:23.588Z" },
- { url = "https://files.pythonhosted.org/packages/78/7e/aa9422e78419db0cbe75fb86d8e72b433818f2e62e2e394992d23d23a583/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee", size = 2314556, upload-time = "2024-09-04T09:05:25.907Z" },
- { url = "https://files.pythonhosted.org/packages/a8/b2/15f7f556df0a6e5b3772a1e076a9d9f6c538ce5f05bd590eca8106508e06/kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07", size = 46364, upload-time = "2024-09-04T09:05:27.184Z" },
- { url = "https://files.pythonhosted.org/packages/0b/db/32e897e43a330eee8e4770bfd2737a9584b23e33587a0812b8e20aac38f7/kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76", size = 55887, upload-time = "2024-09-04T09:05:28.372Z" },
- { url = "https://files.pythonhosted.org/packages/c8/a4/df2bdca5270ca85fd25253049eb6708d4127be2ed0e5c2650217450b59e9/kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650", size = 48530, upload-time = "2024-09-04T09:05:30.225Z" },
- { url = "https://files.pythonhosted.org/packages/11/88/37ea0ea64512997b13d69772db8dcdc3bfca5442cda3a5e4bb943652ee3e/kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd", size = 122449, upload-time = "2024-09-04T09:05:55.311Z" },
- { url = "https://files.pythonhosted.org/packages/4e/45/5a5c46078362cb3882dcacad687c503089263c017ca1241e0483857791eb/kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583", size = 65757, upload-time = "2024-09-04T09:05:56.906Z" },
- { url = "https://files.pythonhosted.org/packages/8a/be/a6ae58978772f685d48dd2e84460937761c53c4bbd84e42b0336473d9775/kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417", size = 64312, upload-time = "2024-09-04T09:05:58.384Z" },
- { url = "https://files.pythonhosted.org/packages/f4/04/18ef6f452d311e1e1eb180c9bf5589187fa1f042db877e6fe443ef10099c/kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904", size = 1626966, upload-time = "2024-09-04T09:05:59.855Z" },
- { url = "https://files.pythonhosted.org/packages/21/b1/40655f6c3fa11ce740e8a964fa8e4c0479c87d6a7944b95af799c7a55dfe/kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a", size = 1607044, upload-time = "2024-09-04T09:06:02.16Z" },
- { url = "https://files.pythonhosted.org/packages/fd/93/af67dbcfb9b3323bbd2c2db1385a7139d8f77630e4a37bb945b57188eb2d/kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8", size = 1391879, upload-time = "2024-09-04T09:06:03.908Z" },
- { url = "https://files.pythonhosted.org/packages/40/6f/d60770ef98e77b365d96061d090c0cd9e23418121c55fff188fa4bdf0b54/kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2", size = 1504751, upload-time = "2024-09-04T09:06:05.58Z" },
- { url = "https://files.pythonhosted.org/packages/fa/3a/5f38667d313e983c432f3fcd86932177519ed8790c724e07d77d1de0188a/kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88", size = 1436990, upload-time = "2024-09-04T09:06:08.126Z" },
- { url = "https://files.pythonhosted.org/packages/cb/3b/1520301a47326e6a6043b502647e42892be33b3f051e9791cc8bb43f1a32/kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde", size = 2191122, upload-time = "2024-09-04T09:06:10.345Z" },
- { url = "https://files.pythonhosted.org/packages/cf/c4/eb52da300c166239a2233f1f9c4a1b767dfab98fae27681bfb7ea4873cb6/kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c", size = 2338126, upload-time = "2024-09-04T09:06:12.321Z" },
- { url = "https://files.pythonhosted.org/packages/1a/cb/42b92fd5eadd708dd9107c089e817945500685f3437ce1fd387efebc6d6e/kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2", size = 2298313, upload-time = "2024-09-04T09:06:14.562Z" },
- { url = "https://files.pythonhosted.org/packages/4f/eb/be25aa791fe5fc75a8b1e0c965e00f942496bc04635c9aae8035f6b76dcd/kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb", size = 2437784, upload-time = "2024-09-04T09:06:16.767Z" },
- { url = "https://files.pythonhosted.org/packages/c5/22/30a66be7f3368d76ff95689e1c2e28d382383952964ab15330a15d8bfd03/kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327", size = 2253988, upload-time = "2024-09-04T09:06:18.705Z" },
- { url = "https://files.pythonhosted.org/packages/35/d3/5f2ecb94b5211c8a04f218a76133cc8d6d153b0f9cd0b45fad79907f0689/kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644", size = 46980, upload-time = "2024-09-04T09:06:20.106Z" },
- { url = "https://files.pythonhosted.org/packages/ef/17/cd10d020578764ea91740204edc6b3236ed8106228a46f568d716b11feb2/kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4", size = 55847, upload-time = "2024-09-04T09:06:21.407Z" },
- { url = "https://files.pythonhosted.org/packages/91/84/32232502020bd78d1d12be7afde15811c64a95ed1f606c10456db4e4c3ac/kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f", size = 48494, upload-time = "2024-09-04T09:06:22.648Z" },
- { url = "https://files.pythonhosted.org/packages/ac/59/741b79775d67ab67ced9bb38552da688c0305c16e7ee24bba7a2be253fb7/kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643", size = 59491, upload-time = "2024-09-04T09:06:24.188Z" },
- { url = "https://files.pythonhosted.org/packages/58/cc/fb239294c29a5656e99e3527f7369b174dd9cc7c3ef2dea7cb3c54a8737b/kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706", size = 57648, upload-time = "2024-09-04T09:06:25.559Z" },
- { url = "https://files.pythonhosted.org/packages/3b/ef/2f009ac1f7aab9f81efb2d837301d255279d618d27b6015780115ac64bdd/kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6", size = 84257, upload-time = "2024-09-04T09:06:27.038Z" },
- { url = "https://files.pythonhosted.org/packages/81/e1/c64f50987f85b68b1c52b464bb5bf73e71570c0f7782d626d1eb283ad620/kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2", size = 80906, upload-time = "2024-09-04T09:06:28.48Z" },
- { url = "https://files.pythonhosted.org/packages/fd/71/1687c5c0a0be2cee39a5c9c389e546f9c6e215e46b691d00d9f646892083/kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4", size = 79951, upload-time = "2024-09-04T09:06:29.966Z" },
- { url = "https://files.pythonhosted.org/packages/ea/8b/d7497df4a1cae9367adf21665dd1f896c2a7aeb8769ad77b662c5e2bcce7/kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a", size = 55715, upload-time = "2024-09-04T09:06:31.489Z" },
- { url = "https://files.pythonhosted.org/packages/d5/df/ce37d9b26f07ab90880923c94d12a6ff4d27447096b4c849bfc4339ccfdf/kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39", size = 58666, upload-time = "2024-09-04T09:06:43.756Z" },
- { url = "https://files.pythonhosted.org/packages/b0/d3/e4b04f43bc629ac8e186b77b2b1a251cdfa5b7610fa189dc0db622672ce6/kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e", size = 57088, upload-time = "2024-09-04T09:06:45.406Z" },
- { url = "https://files.pythonhosted.org/packages/30/1c/752df58e2d339e670a535514d2db4fe8c842ce459776b8080fbe08ebb98e/kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608", size = 84321, upload-time = "2024-09-04T09:06:47.557Z" },
- { url = "https://files.pythonhosted.org/packages/f0/f8/fe6484e847bc6e238ec9f9828089fb2c0bb53f2f5f3a79351fde5b565e4f/kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674", size = 80776, upload-time = "2024-09-04T09:06:49.235Z" },
- { url = "https://files.pythonhosted.org/packages/9b/57/d7163c0379f250ef763aba85330a19feefb5ce6cb541ade853aaba881524/kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225", size = 79984, upload-time = "2024-09-04T09:06:51.336Z" },
- { url = "https://files.pythonhosted.org/packages/8c/95/4a103776c265d13b3d2cd24fb0494d4e04ea435a8ef97e1b2c026d43250b/kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0", size = 55811, upload-time = "2024-09-04T09:06:53.078Z" },
-]
-
-[[package]]
-name = "kiwisolver"
-version = "1.5.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/d0/67/9c61eccb13f0bdca9307614e782fec49ffdde0f7a2314935d489fa93cd9c/kiwisolver-1.5.0.tar.gz", hash = "sha256:d4193f3d9dc3f6f79aaed0e5637f45d98850ebf01f7ca20e69457f3e8946b66a", size = 103482, upload-time = "2026-03-09T13:15:53.382Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ac/f8/06549565caa026e540b7e7bab5c5a90eb7ca986015f4c48dace243cd24d9/kiwisolver-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:32cc0a5365239a6ea0c6ed461e8838d053b57e397443c0ca894dcc8e388d4374", size = 122802, upload-time = "2026-03-09T13:12:37.515Z" },
- { url = "https://files.pythonhosted.org/packages/84/eb/8476a0818850c563ff343ea7c9c05dcdcbd689a38e01aa31657df01f91fa/kiwisolver-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cc0b66c1eec9021353a4b4483afb12dfd50e3669ffbb9152d6842eb34c7e29fd", size = 66216, upload-time = "2026-03-09T13:12:38.812Z" },
- { url = "https://files.pythonhosted.org/packages/f3/c4/f9c8a6b4c21aed4198566e45923512986d6cef530e7263b3a5f823546561/kiwisolver-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86e0287879f75621ae85197b0877ed2f8b7aa57b511c7331dce2eb6f4de7d476", size = 63917, upload-time = "2026-03-09T13:12:40.053Z" },
- { url = "https://files.pythonhosted.org/packages/f1/0e/ba4ae25d03722f64de8b2c13e80d82ab537a06b30fc7065183c6439357e3/kiwisolver-1.5.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:62f59da443c4f4849f73a51a193b1d9d258dcad0c41bc4d1b8fb2bcc04bfeb22", size = 1628776, upload-time = "2026-03-09T13:12:41.976Z" },
- { url = "https://files.pythonhosted.org/packages/8a/e4/3f43a011bc8a0860d1c96f84d32fa87439d3feedf66e672fef03bf5e8bac/kiwisolver-1.5.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9190426b7aa26c5229501fa297b8d0653cfd3f5a36f7990c264e157cbf886b3b", size = 1228164, upload-time = "2026-03-09T13:12:44.002Z" },
- { url = "https://files.pythonhosted.org/packages/4b/34/3a901559a1e0c218404f9a61a93be82d45cb8f44453ba43088644980f033/kiwisolver-1.5.0-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c8277104ded0a51e699c8c3aff63ce2c56d4ed5519a5f73e0fd7057f959a2b9e", size = 1246656, upload-time = "2026-03-09T13:12:45.557Z" },
- { url = "https://files.pythonhosted.org/packages/87/9e/f78c466ea20527822b95ad38f141f2de1dcd7f23fb8716b002b0d91bbe59/kiwisolver-1.5.0-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8f9baf6f0a6e7571c45c8863010b45e837c3ee1c2c77fcd6ef423be91b21fedb", size = 1295562, upload-time = "2026-03-09T13:12:47.562Z" },
- { url = "https://files.pythonhosted.org/packages/0a/66/fd0e4a612e3a286c24e6d6f3a5428d11258ed1909bc530ba3b59807fd980/kiwisolver-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cff8e5383db4989311f99e814feeb90c4723eb4edca425b9d5d9c3fefcdd9537", size = 2178473, upload-time = "2026-03-09T13:12:50.254Z" },
- { url = "https://files.pythonhosted.org/packages/dc/8e/6cac929e0049539e5ee25c1ee937556f379ba5204840d03008363ced662d/kiwisolver-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ebae99ed6764f2b5771c522477b311be313e8841d2e0376db2b10922daebbba4", size = 2274035, upload-time = "2026-03-09T13:12:51.785Z" },
- { url = "https://files.pythonhosted.org/packages/ca/d3/9d0c18f1b52ea8074b792452cf17f1f5a56bd0302a85191f405cfbf9da16/kiwisolver-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:d5cd5189fc2b6a538b75ae45433140c4823463918f7b1617c31e68b085c0022c", size = 2443217, upload-time = "2026-03-09T13:12:53.329Z" },
- { url = "https://files.pythonhosted.org/packages/45/2a/6e19368803a038b2a90857bf4ee9e3c7b667216d045866bf22d3439fd75e/kiwisolver-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f42c23db5d1521218a3276bb08666dcb662896a0be7347cba864eca45ff64ede", size = 2249196, upload-time = "2026-03-09T13:12:55.057Z" },
- { url = "https://files.pythonhosted.org/packages/75/2b/3f641dfcbe72e222175d626bacf2f72c3b34312afec949dd1c50afa400f5/kiwisolver-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:94eff26096eb5395136634622515b234ecb6c9979824c1f5004c6e3c3c85ccd2", size = 73389, upload-time = "2026-03-09T13:12:56.496Z" },
- { url = "https://files.pythonhosted.org/packages/da/88/299b137b9e0025d8982e03d2d52c123b0a2b159e84b0ef1501ef446339cf/kiwisolver-1.5.0-cp310-cp310-win_arm64.whl", hash = "sha256:dd952e03bfbb096cfe2dd35cd9e00f269969b67536cb4370994afc20ff2d0875", size = 64782, upload-time = "2026-03-09T13:12:57.609Z" },
- { url = "https://files.pythonhosted.org/packages/12/dd/a495a9c104be1c476f0386e714252caf2b7eca883915422a64c50b88c6f5/kiwisolver-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9eed0f7edbb274413b6ee781cca50541c8c0facd3d6fd289779e494340a2b85c", size = 122798, upload-time = "2026-03-09T13:12:58.963Z" },
- { url = "https://files.pythonhosted.org/packages/11/60/37b4047a2af0cf5ef6d8b4b26e91829ae6fc6a2d1f74524bcb0e7cd28a32/kiwisolver-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c4923e404d6bcd91b6779c009542e5647fef32e4a5d75e115e3bbac6f2335eb", size = 66216, upload-time = "2026-03-09T13:13:00.155Z" },
- { url = "https://files.pythonhosted.org/packages/0a/aa/510dc933d87767584abfe03efa445889996c70c2990f6f87c3ebaa0a18c5/kiwisolver-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0df54df7e686afa55e6f21fb86195224a6d9beb71d637e8d7920c95cf0f89aac", size = 63911, upload-time = "2026-03-09T13:13:01.671Z" },
- { url = "https://files.pythonhosted.org/packages/80/46/bddc13df6c2a40741e0cc7865bb1c9ed4796b6760bd04ce5fae3928ef917/kiwisolver-1.5.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2517e24d7315eb51c10664cdb865195df38ab74456c677df67bb47f12d088a27", size = 1438209, upload-time = "2026-03-09T13:13:03.385Z" },
- { url = "https://files.pythonhosted.org/packages/fd/d6/76621246f5165e5372f02f5e6f3f48ea336a8f9e96e43997d45b240ed8cd/kiwisolver-1.5.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ff710414307fefa903e0d9bdf300972f892c23477829f49504e59834f4195398", size = 1248888, upload-time = "2026-03-09T13:13:05.231Z" },
- { url = "https://files.pythonhosted.org/packages/b2/c1/31559ec6fb39a5b48035ce29bb63ade628f321785f38c384dee3e2c08bc1/kiwisolver-1.5.0-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6176c1811d9d5a04fa391c490cc44f451e240697a16977f11c6f722efb9041db", size = 1266304, upload-time = "2026-03-09T13:13:06.743Z" },
- { url = "https://files.pythonhosted.org/packages/5e/ef/1cb8276f2d29cc6a41e0a042f27946ca347d3a4a75acf85d0a16aa6dcc82/kiwisolver-1.5.0-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:50847dca5d197fcbd389c805aa1a1cf32f25d2e7273dc47ab181a517666b68cc", size = 1319650, upload-time = "2026-03-09T13:13:08.607Z" },
- { url = "https://files.pythonhosted.org/packages/4c/e4/5ba3cecd7ce6236ae4a80f67e5d5531287337d0e1f076ca87a5abe4cd5d0/kiwisolver-1.5.0-cp311-cp311-manylinux_2_39_riscv64.whl", hash = "sha256:01808c6d15f4c3e8559595d6d1fe6411c68e4a3822b4b9972b44473b24f4e679", size = 970949, upload-time = "2026-03-09T13:13:10.299Z" },
- { url = "https://files.pythonhosted.org/packages/5a/69/dc61f7ae9a2f071f26004ced87f078235b5507ab6e5acd78f40365655034/kiwisolver-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f1f9f4121ec58628c96baa3de1a55a4e3a333c5102c8e94b64e23bf7b2083309", size = 2199125, upload-time = "2026-03-09T13:13:11.841Z" },
- { url = "https://files.pythonhosted.org/packages/e5/7b/abbe0f1b5afa85f8d084b73e90e5f801c0939eba16ac2e49af7c61a6c28d/kiwisolver-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b7d335370ae48a780c6e6a6bbfa97342f563744c39c35562f3f367665f5c1de2", size = 2293783, upload-time = "2026-03-09T13:13:14.399Z" },
- { url = "https://files.pythonhosted.org/packages/8a/80/5908ae149d96d81580d604c7f8aefd0e98f4fd728cf172f477e9f2a81744/kiwisolver-1.5.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:800ee55980c18545af444d93fdd60c56b580db5cc54867d8cbf8a1dc0829938c", size = 1960726, upload-time = "2026-03-09T13:13:16.047Z" },
- { url = "https://files.pythonhosted.org/packages/84/08/a78cb776f8c085b7143142ce479859cfec086bd09ee638a317040b6ef420/kiwisolver-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c438f6ca858697c9ab67eb28246c92508af972e114cac34e57a6d4ba17a3ac08", size = 2464738, upload-time = "2026-03-09T13:13:17.897Z" },
- { url = "https://files.pythonhosted.org/packages/b1/e1/65584da5356ed6cb12c63791a10b208860ac40a83de165cb6a6751a686e3/kiwisolver-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8c63c91f95173f9c2a67c7c526b2cea976828a0e7fced9cdcead2802dc10f8a4", size = 2270718, upload-time = "2026-03-09T13:13:19.421Z" },
- { url = "https://files.pythonhosted.org/packages/be/6c/28f17390b62b8f2f520e2915095b3c94d88681ecf0041e75389d9667f202/kiwisolver-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:beb7f344487cdcb9e1efe4b7a29681b74d34c08f0043a327a74da852a6749e7b", size = 73480, upload-time = "2026-03-09T13:13:20.818Z" },
- { url = "https://files.pythonhosted.org/packages/d8/0e/2ee5debc4f77a625778fec5501ff3e8036fe361b7ee28ae402a485bb9694/kiwisolver-1.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:ad4ae4ffd1ee9cd11357b4c66b612da9888f4f4daf2f36995eda64bd45370cac", size = 64930, upload-time = "2026-03-09T13:13:21.997Z" },
- { url = "https://files.pythonhosted.org/packages/4d/b2/818b74ebea34dabe6d0c51cb1c572e046730e64844da6ed646d5298c40ce/kiwisolver-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:4e9750bc21b886308024f8a54ccb9a2cc38ac9fa813bf4348434e3d54f337ff9", size = 123158, upload-time = "2026-03-09T13:13:23.127Z" },
- { url = "https://files.pythonhosted.org/packages/bf/d9/405320f8077e8e1c5c4bd6adc45e1e6edf6d727b6da7f2e2533cf58bff71/kiwisolver-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:72ec46b7eba5b395e0a7b63025490d3214c11013f4aacb4f5e8d6c3041829588", size = 66388, upload-time = "2026-03-09T13:13:24.765Z" },
- { url = "https://files.pythonhosted.org/packages/99/9f/795fedf35634f746151ca8839d05681ceb6287fbed6cc1c9bf235f7887c2/kiwisolver-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ed3a984b31da7481b103f68776f7128a89ef26ed40f4dc41a2223cda7fb24819", size = 64068, upload-time = "2026-03-09T13:13:25.878Z" },
- { url = "https://files.pythonhosted.org/packages/c4/13/680c54afe3e65767bed7ec1a15571e1a2f1257128733851ade24abcefbcc/kiwisolver-1.5.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bb5136fb5352d3f422df33f0c879a1b0c204004324150cc3b5e3c4f310c9049f", size = 1477934, upload-time = "2026-03-09T13:13:27.166Z" },
- { url = "https://files.pythonhosted.org/packages/c8/2f/cebfcdb60fd6a9b0f6b47a9337198bcbad6fbe15e68189b7011fd914911f/kiwisolver-1.5.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b2af221f268f5af85e776a73d62b0845fc8baf8ef0abfae79d29c77d0e776aaf", size = 1278537, upload-time = "2026-03-09T13:13:28.707Z" },
- { url = "https://files.pythonhosted.org/packages/f2/0d/9b782923aada3fafb1d6b84e13121954515c669b18af0c26e7d21f579855/kiwisolver-1.5.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b0f172dc8ffaccb8522d7c5d899de00133f2f1ca7b0a49b7da98e901de87bf2d", size = 1296685, upload-time = "2026-03-09T13:13:30.528Z" },
- { url = "https://files.pythonhosted.org/packages/27/70/83241b6634b04fe44e892688d5208332bde130f38e610c0418f9ede47ded/kiwisolver-1.5.0-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6ab8ba9152203feec73758dad83af9a0bbe05001eb4639e547207c40cfb52083", size = 1346024, upload-time = "2026-03-09T13:13:32.818Z" },
- { url = "https://files.pythonhosted.org/packages/e4/db/30ed226fb271ae1a6431fc0fe0edffb2efe23cadb01e798caeb9f2ceae8f/kiwisolver-1.5.0-cp312-cp312-manylinux_2_39_riscv64.whl", hash = "sha256:cdee07c4d7f6d72008d3f73b9bf027f4e11550224c7c50d8df1ae4a37c1402a6", size = 987241, upload-time = "2026-03-09T13:13:34.435Z" },
- { url = "https://files.pythonhosted.org/packages/ec/bd/c314595208e4c9587652d50959ead9e461995389664e490f4dce7ff0f782/kiwisolver-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7c60d3c9b06fb23bd9c6139281ccbdc384297579ae037f08ae90c69f6845c0b1", size = 2227742, upload-time = "2026-03-09T13:13:36.4Z" },
- { url = "https://files.pythonhosted.org/packages/c1/43/0499cec932d935229b5543d073c2b87c9c22846aab48881e9d8d6e742a2d/kiwisolver-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:e315e5ec90d88e140f57696ff85b484ff68bb311e36f2c414aa4286293e6dee0", size = 2323966, upload-time = "2026-03-09T13:13:38.204Z" },
- { url = "https://files.pythonhosted.org/packages/3d/6f/79b0d760907965acfd9d61826a3d41f8f093c538f55cd2633d3f0db269f6/kiwisolver-1.5.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:1465387ac63576c3e125e5337a6892b9e99e0627d52317f3ca79e6930d889d15", size = 1977417, upload-time = "2026-03-09T13:13:39.966Z" },
- { url = "https://files.pythonhosted.org/packages/ab/31/01d0537c41cb75a551a438c3c7a80d0c60d60b81f694dac83dd436aec0d0/kiwisolver-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:530a3fd64c87cffa844d4b6b9768774763d9caa299e9b75d8eca6a4423b31314", size = 2491238, upload-time = "2026-03-09T13:13:41.698Z" },
- { url = "https://files.pythonhosted.org/packages/e4/34/8aefdd0be9cfd00a44509251ba864f5caf2991e36772e61c408007e7f417/kiwisolver-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1d9daea4ea6b9be74fe2f01f7fbade8d6ffab263e781274cffca0dba9be9eec9", size = 2294947, upload-time = "2026-03-09T13:13:43.343Z" },
- { url = "https://files.pythonhosted.org/packages/ad/cf/0348374369ca588f8fe9c338fae49fa4e16eeb10ffb3d012f23a54578a9e/kiwisolver-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:f18c2d9782259a6dc132fdc7a63c168cbc74b35284b6d75c673958982a378384", size = 73569, upload-time = "2026-03-09T13:13:45.792Z" },
- { url = "https://files.pythonhosted.org/packages/28/26/192b26196e2316e2bd29deef67e37cdf9870d9af8e085e521afff0fed526/kiwisolver-1.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:f7c7553b13f69c1b29a5bde08ddc6d9d0c8bfb84f9ed01c30db25944aeb852a7", size = 64997, upload-time = "2026-03-09T13:13:46.878Z" },
- { url = "https://files.pythonhosted.org/packages/9d/69/024d6711d5ba575aa65d5538042e99964104e97fa153a9f10bc369182bc2/kiwisolver-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:fd40bb9cd0891c4c3cb1ddf83f8bbfa15731a248fdc8162669405451e2724b09", size = 123166, upload-time = "2026-03-09T13:13:48.032Z" },
- { url = "https://files.pythonhosted.org/packages/ce/48/adbb40df306f587054a348831220812b9b1d787aff714cfbc8556e38fccd/kiwisolver-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c0e1403fd7c26d77c1f03e096dc58a5c726503fa0db0456678b8668f76f521e3", size = 66395, upload-time = "2026-03-09T13:13:49.365Z" },
- { url = "https://files.pythonhosted.org/packages/a8/3a/d0a972b34e1c63e2409413104216cd1caa02c5a37cb668d1687d466c1c45/kiwisolver-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dda366d548e89a90d88a86c692377d18d8bd64b39c1fb2b92cb31370e2896bbd", size = 64065, upload-time = "2026-03-09T13:13:50.562Z" },
- { url = "https://files.pythonhosted.org/packages/2b/0a/7b98e1e119878a27ba8618ca1e18b14f992ff1eda40f47bccccf4de44121/kiwisolver-1.5.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:332b4f0145c30b5f5ad9374881133e5aa64320428a57c2c2b61e9d891a51c2f3", size = 1477903, upload-time = "2026-03-09T13:13:52.084Z" },
- { url = "https://files.pythonhosted.org/packages/18/d8/55638d89ffd27799d5cc3d8aa28e12f4ce7a64d67b285114dbedc8ea4136/kiwisolver-1.5.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c50b89ffd3e1a911c69a1dd3de7173c0cd10b130f56222e57898683841e4f96", size = 1278751, upload-time = "2026-03-09T13:13:54.673Z" },
- { url = "https://files.pythonhosted.org/packages/b8/97/b4c8d0d18421ecceba20ad8701358453b88e32414e6f6950b5a4bad54e65/kiwisolver-1.5.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4db576bb8c3ef9365f8b40fe0f671644de6736ae2c27a2c62d7d8a1b4329f099", size = 1296793, upload-time = "2026-03-09T13:13:56.287Z" },
- { url = "https://files.pythonhosted.org/packages/c4/10/f862f94b6389d8957448ec9df59450b81bec4abb318805375c401a1e6892/kiwisolver-1.5.0-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0b85aad90cea8ac6797a53b5d5f2e967334fa4d1149f031c4537569972596cb8", size = 1346041, upload-time = "2026-03-09T13:13:58.269Z" },
- { url = "https://files.pythonhosted.org/packages/a3/6a/f1650af35821eaf09de398ec0bc2aefc8f211f0cda50204c9f1673741ba9/kiwisolver-1.5.0-cp313-cp313-manylinux_2_39_riscv64.whl", hash = "sha256:d36ca54cb4c6c4686f7cbb7b817f66f5911c12ddb519450bbe86707155028f87", size = 987292, upload-time = "2026-03-09T13:13:59.871Z" },
- { url = "https://files.pythonhosted.org/packages/de/19/d7fb82984b9238115fe629c915007be608ebd23dc8629703d917dbfaffd4/kiwisolver-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:38f4a703656f493b0ad185211ccfca7f0386120f022066b018eb5296d8613e23", size = 2227865, upload-time = "2026-03-09T13:14:01.401Z" },
- { url = "https://files.pythonhosted.org/packages/7f/b9/46b7f386589fd222dac9e9de9c956ce5bcefe2ee73b4e79891381dda8654/kiwisolver-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ac2360e93cb41be81121755c6462cff3beaa9967188c866e5fce5cf13170859", size = 2324369, upload-time = "2026-03-09T13:14:02.972Z" },
- { url = "https://files.pythonhosted.org/packages/92/8b/95e237cf3d9c642960153c769ddcbe278f182c8affb20cecc1cc983e7cc5/kiwisolver-1.5.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c95cab08d1965db3d84a121f1c7ce7479bdd4072c9b3dafd8fecce48a2e6b902", size = 1977989, upload-time = "2026-03-09T13:14:04.503Z" },
- { url = "https://files.pythonhosted.org/packages/1b/95/980c9df53501892784997820136c01f62bc1865e31b82b9560f980c0e649/kiwisolver-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fc20894c3d21194d8041a28b65622d5b86db786da6e3cfe73f0c762951a61167", size = 2491645, upload-time = "2026-03-09T13:14:06.106Z" },
- { url = "https://files.pythonhosted.org/packages/cb/32/900647fd0840abebe1561792c6b31e6a7c0e278fc3973d30572a965ca14c/kiwisolver-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7a32f72973f0f950c1920475d5c5ea3d971b81b6f0ec53b8d0a956cc965f22e0", size = 2295237, upload-time = "2026-03-09T13:14:08.891Z" },
- { url = "https://files.pythonhosted.org/packages/be/8a/be60e3bbcf513cc5a50f4a3e88e1dcecebb79c1ad607a7222877becaa101/kiwisolver-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bf3acf1419fa93064a4c2189ac0b58e3be7872bf6ee6177b0d4c63dc4cea276", size = 73573, upload-time = "2026-03-09T13:14:12.327Z" },
- { url = "https://files.pythonhosted.org/packages/4d/d2/64be2e429eb4fca7f7e1c52a91b12663aeaf25de3895e5cca0f47ef2a8d0/kiwisolver-1.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:fa8eb9ecdb7efb0b226acec134e0d709e87a909fa4971a54c0c4f6e88635484c", size = 64998, upload-time = "2026-03-09T13:14:13.469Z" },
- { url = "https://files.pythonhosted.org/packages/b0/69/ce68dd0c85755ae2de490bf015b62f2cea5f6b14ff00a463f9d0774449ff/kiwisolver-1.5.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:db485b3847d182b908b483b2ed133c66d88d49cacf98fd278fadafe11b4478d1", size = 125700, upload-time = "2026-03-09T13:14:14.636Z" },
- { url = "https://files.pythonhosted.org/packages/74/aa/937aac021cf9d4349990d47eb319309a51355ed1dbdc9c077cdc9224cb11/kiwisolver-1.5.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:be12f931839a3bdfe28b584db0e640a65a8bcbc24560ae3fdb025a449b3d754e", size = 67537, upload-time = "2026-03-09T13:14:15.808Z" },
- { url = "https://files.pythonhosted.org/packages/ee/20/3a87fbece2c40ad0f6f0aefa93542559159c5f99831d596050e8afae7a9f/kiwisolver-1.5.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:16b85d37c2cbb3253226d26e64663f755d88a03439a9c47df6246b35defbdfb7", size = 65514, upload-time = "2026-03-09T13:14:18.035Z" },
- { url = "https://files.pythonhosted.org/packages/f0/7f/f943879cda9007c45e1f7dba216d705c3a18d6b35830e488b6c6a4e7cdf0/kiwisolver-1.5.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4432b835675f0ea7414aab3d37d119f7226d24869b7a829caeab49ebda407b0c", size = 1584848, upload-time = "2026-03-09T13:14:19.745Z" },
- { url = "https://files.pythonhosted.org/packages/37/f8/4d4f85cc1870c127c88d950913370dd76138482161cd07eabbc450deff01/kiwisolver-1.5.0-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b0feb50971481a2cc44d94e88bdb02cdd497618252ae226b8eb1201b957e368", size = 1391542, upload-time = "2026-03-09T13:14:21.54Z" },
- { url = "https://files.pythonhosted.org/packages/04/0b/65dd2916c84d252b244bd405303220f729e7c17c9d7d33dca6feeff9ffc4/kiwisolver-1.5.0-cp313-cp313t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:56fa888f10d0f367155e76ce849fa1166fc9730d13bd2d65a2aa13b6f5424489", size = 1404447, upload-time = "2026-03-09T13:14:23.205Z" },
- { url = "https://files.pythonhosted.org/packages/39/5c/2606a373247babce9b1d056c03a04b65f3cf5290a8eac5d7bdead0a17e21/kiwisolver-1.5.0-cp313-cp313t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:940dda65d5e764406b9fb92761cbf462e4e63f712ab60ed98f70552e496f3bf1", size = 1455918, upload-time = "2026-03-09T13:14:24.74Z" },
- { url = "https://files.pythonhosted.org/packages/d5/d1/c6078b5756670658e9192a2ef11e939c92918833d2745f85cd14a6004bdf/kiwisolver-1.5.0-cp313-cp313t-manylinux_2_39_riscv64.whl", hash = "sha256:89fc958c702ee9a745e4700378f5d23fddbc46ff89e8fdbf5395c24d5c1452a3", size = 1072856, upload-time = "2026-03-09T13:14:26.597Z" },
- { url = "https://files.pythonhosted.org/packages/cb/c8/7def6ddf16eb2b3741d8b172bdaa9af882b03c78e9b0772975408801fa63/kiwisolver-1.5.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9027d773c4ff81487181a925945743413f6069634d0b122d0b37684ccf4f1e18", size = 2333580, upload-time = "2026-03-09T13:14:28.237Z" },
- { url = "https://files.pythonhosted.org/packages/9e/87/2ac1fce0eb1e616fcd3c35caa23e665e9b1948bb984f4764790924594128/kiwisolver-1.5.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:5b233ea3e165e43e35dba1d2b8ecc21cf070b45b65ae17dd2747d2713d942021", size = 2423018, upload-time = "2026-03-09T13:14:30.018Z" },
- { url = "https://files.pythonhosted.org/packages/67/13/c6700ccc6cc218716bfcda4935e4b2997039869b4ad8a94f364c5a3b8e63/kiwisolver-1.5.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:ce9bf03dad3b46408c08649c6fbd6ca28a9fce0eb32fdfffa6775a13103b5310", size = 2062804, upload-time = "2026-03-09T13:14:32.888Z" },
- { url = "https://files.pythonhosted.org/packages/1b/bd/877056304626943ff0f1f44c08f584300c199b887cb3176cd7e34f1515f1/kiwisolver-1.5.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:fc4d3f1fb9ca0ae9f97b095963bc6326f1dbfd3779d6679a1e016b9baaa153d3", size = 2597482, upload-time = "2026-03-09T13:14:34.971Z" },
- { url = "https://files.pythonhosted.org/packages/75/19/c60626c47bf0f8ac5dcf72c6c98e266d714f2fbbfd50cf6dab5ede3aaa50/kiwisolver-1.5.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f443b4825c50a51ee68585522ab4a1d1257fac65896f282b4c6763337ac9f5d2", size = 2394328, upload-time = "2026-03-09T13:14:36.816Z" },
- { url = "https://files.pythonhosted.org/packages/47/84/6a6d5e5bb8273756c27b7d810d47f7ef2f1f9b9fd23c9ee9a3f8c75c9cef/kiwisolver-1.5.0-cp313-cp313t-win_arm64.whl", hash = "sha256:893ff3a711d1b515ba9da14ee090519bad4610ed1962fbe298a434e8c5f8db53", size = 68410, upload-time = "2026-03-09T13:14:38.695Z" },
- { url = "https://files.pythonhosted.org/packages/e4/d7/060f45052f2a01ad5762c8fdecd6d7a752b43400dc29ff75cd47225a40fd/kiwisolver-1.5.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8df31fe574b8b3993cc61764f40941111b25c2d9fea13d3ce24a49907cd2d615", size = 123231, upload-time = "2026-03-09T13:14:41.323Z" },
- { url = "https://files.pythonhosted.org/packages/c2/a7/78da680eadd06ff35edef6ef68a1ad273bad3e2a0936c9a885103230aece/kiwisolver-1.5.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:1d49a49ac4cbfb7c1375301cd1ec90169dfeae55ff84710d782260ce77a75a02", size = 66489, upload-time = "2026-03-09T13:14:42.534Z" },
- { url = "https://files.pythonhosted.org/packages/49/b2/97980f3ad4fae37dd7fe31626e2bf75fbf8bdf5d303950ec1fab39a12da8/kiwisolver-1.5.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0cbe94b69b819209a62cb27bdfa5dc2a8977d8de2f89dfd97ba4f53ed3af754e", size = 64063, upload-time = "2026-03-09T13:14:44.759Z" },
- { url = "https://files.pythonhosted.org/packages/e7/f9/b06c934a6aa8bc91f566bd2a214fd04c30506c2d9e2b6b171953216a65b6/kiwisolver-1.5.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:80aa065ffd378ff784822a6d7c3212f2d5f5e9c3589614b5c228b311fd3063ac", size = 1475913, upload-time = "2026-03-09T13:14:46.247Z" },
- { url = "https://files.pythonhosted.org/packages/6b/f0/f768ae564a710135630672981231320bc403cf9152b5596ec5289de0f106/kiwisolver-1.5.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e7f886f47ab881692f278ae901039a234e4025a68e6dfab514263a0b1c4ae05", size = 1282782, upload-time = "2026-03-09T13:14:48.458Z" },
- { url = "https://files.pythonhosted.org/packages/e2/9f/1de7aad00697325f05238a5f2eafbd487fb637cc27a558b5367a5f37fb7f/kiwisolver-1.5.0-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5060731cc3ed12ca3a8b57acd4aeca5bbc2f49216dd0bec1650a1acd89486bcd", size = 1300815, upload-time = "2026-03-09T13:14:50.721Z" },
- { url = "https://files.pythonhosted.org/packages/5a/c2/297f25141d2e468e0ce7f7a7b92e0cf8918143a0cbd3422c1ad627e85a06/kiwisolver-1.5.0-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7a4aa69609f40fce3cbc3f87b2061f042eee32f94b8f11db707b66a26461591a", size = 1347925, upload-time = "2026-03-09T13:14:52.304Z" },
- { url = "https://files.pythonhosted.org/packages/b9/d3/f4c73a02eb41520c47610207b21afa8cdd18fdbf64ffd94674ae21c4812d/kiwisolver-1.5.0-cp314-cp314-manylinux_2_39_riscv64.whl", hash = "sha256:d168fda2dbff7b9b5f38e693182d792a938c31db4dac3a80a4888de603c99554", size = 991322, upload-time = "2026-03-09T13:14:54.637Z" },
- { url = "https://files.pythonhosted.org/packages/7b/46/d3f2efef7732fcda98d22bf4ad5d3d71d545167a852ca710a494f4c15343/kiwisolver-1.5.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:413b820229730d358efd838ecbab79902fe97094565fdc80ddb6b0a18c18a581", size = 2232857, upload-time = "2026-03-09T13:14:56.471Z" },
- { url = "https://files.pythonhosted.org/packages/3f/ec/2d9756bf2b6d26ae4349b8d3662fb3993f16d80c1f971c179ce862b9dbae/kiwisolver-1.5.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5124d1ea754509b09e53738ec185584cc609aae4a3b510aaf4ed6aa047ef9303", size = 2329376, upload-time = "2026-03-09T13:14:58.072Z" },
- { url = "https://files.pythonhosted.org/packages/8f/9f/876a0a0f2260f1bde92e002b3019a5fabc35e0939c7d945e0fa66185eb20/kiwisolver-1.5.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e4415a8db000bf49a6dd1c478bf70062eaacff0f462b92b0ba68791a905861f9", size = 1982549, upload-time = "2026-03-09T13:14:59.668Z" },
- { url = "https://files.pythonhosted.org/packages/6c/4f/ba3624dfac23a64d54ac4179832860cb537c1b0af06024936e82ca4154a0/kiwisolver-1.5.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:d618fd27420381a4f6044faa71f46d8bfd911bd077c555f7138ed88729bfbe79", size = 2494680, upload-time = "2026-03-09T13:15:01.364Z" },
- { url = "https://files.pythonhosted.org/packages/39/b7/97716b190ab98911b20d10bf92eca469121ec483b8ce0edd314f51bc85af/kiwisolver-1.5.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5092eb5b1172947f57d6ea7d89b2f29650414e4293c47707eb499ec07a0ac796", size = 2297905, upload-time = "2026-03-09T13:15:03.925Z" },
- { url = "https://files.pythonhosted.org/packages/a3/36/4e551e8aa55c9188bca9abb5096805edbf7431072b76e2298e34fd3a3008/kiwisolver-1.5.0-cp314-cp314-win_amd64.whl", hash = "sha256:d76e2d8c75051d58177e762164d2e9ab92886534e3a12e795f103524f221dd8e", size = 75086, upload-time = "2026-03-09T13:15:07.775Z" },
- { url = "https://files.pythonhosted.org/packages/70/15/9b90f7df0e31a003c71649cf66ef61c3c1b862f48c81007fa2383c8bd8d7/kiwisolver-1.5.0-cp314-cp314-win_arm64.whl", hash = "sha256:fa6248cd194edff41d7ea9425ced8ca3a6f838bfb295f6f1d6e6bb694a8518df", size = 66577, upload-time = "2026-03-09T13:15:09.139Z" },
- { url = "https://files.pythonhosted.org/packages/17/01/7dc8c5443ff42b38e72731643ed7cf1ed9bf01691ae5cdca98501999ed83/kiwisolver-1.5.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:d1ffeb80b5676463d7a7d56acbe8e37a20ce725570e09549fe738e02ca6b7e1e", size = 125794, upload-time = "2026-03-09T13:15:10.525Z" },
- { url = "https://files.pythonhosted.org/packages/46/8a/b4ebe46ebaac6a303417fab10c2e165c557ddaff558f9699d302b256bc53/kiwisolver-1.5.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:bc4d8e252f532ab46a1de9349e2d27b91fce46736a9eedaa37beaca66f574ed4", size = 67646, upload-time = "2026-03-09T13:15:12.016Z" },
- { url = "https://files.pythonhosted.org/packages/60/35/10a844afc5f19d6f567359bf4789e26661755a2f36200d5d1ed8ad0126e5/kiwisolver-1.5.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6783e069732715ad0c3ce96dbf21dbc2235ab0593f2baf6338101f70371f4028", size = 65511, upload-time = "2026-03-09T13:15:13.311Z" },
- { url = "https://files.pythonhosted.org/packages/f8/8a/685b297052dd041dcebce8e8787b58923b6e78acc6115a0dc9189011c44b/kiwisolver-1.5.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e7c4c09a490dc4d4a7f8cbee56c606a320f9dc28cf92a7157a39d1ce7676a657", size = 1584858, upload-time = "2026-03-09T13:15:15.103Z" },
- { url = "https://files.pythonhosted.org/packages/9e/80/04865e3d4638ac5bddec28908916df4a3075b8c6cc101786a96803188b96/kiwisolver-1.5.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2a075bd7bd19c70cf67c8badfa36cf7c5d8de3c9ddb8420c51e10d9c50e94920", size = 1392539, upload-time = "2026-03-09T13:15:16.661Z" },
- { url = "https://files.pythonhosted.org/packages/ba/01/77a19cacc0893fa13fafa46d1bba06fb4dc2360b3292baf4b56d8e067b24/kiwisolver-1.5.0-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bdd3e53429ff02aa319ba59dfe4ceeec345bf46cf180ec2cf6fd5b942e7975e9", size = 1405310, upload-time = "2026-03-09T13:15:18.229Z" },
- { url = "https://files.pythonhosted.org/packages/53/39/bcaf5d0cca50e604cfa9b4e3ae1d64b50ca1ae5b754122396084599ef903/kiwisolver-1.5.0-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cdcb35dc9d807259c981a85531048ede628eabcffb3239adf3d17463518992d", size = 1456244, upload-time = "2026-03-09T13:15:20.444Z" },
- { url = "https://files.pythonhosted.org/packages/d0/7a/72c187abc6975f6978c3e39b7cf67aeb8b3c0a8f9790aa7fd412855e9e1f/kiwisolver-1.5.0-cp314-cp314t-manylinux_2_39_riscv64.whl", hash = "sha256:70d593af6a6ca332d1df73d519fddb5148edb15cd90d5f0155e3746a6d4fcc65", size = 1073154, upload-time = "2026-03-09T13:15:22.039Z" },
- { url = "https://files.pythonhosted.org/packages/c7/ca/cf5b25783ebbd59143b4371ed0c8428a278abe68d6d0104b01865b1bbd0f/kiwisolver-1.5.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:377815a8616074cabbf3f53354e1d040c35815a134e01d7614b7692e4bf8acfa", size = 2334377, upload-time = "2026-03-09T13:15:23.741Z" },
- { url = "https://files.pythonhosted.org/packages/4a/e5/b1f492adc516796e88751282276745340e2a72dcd0d36cf7173e0daf3210/kiwisolver-1.5.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:0255a027391d52944eae1dbb5d4cc5903f57092f3674e8e544cdd2622826b3f0", size = 2425288, upload-time = "2026-03-09T13:15:25.789Z" },
- { url = "https://files.pythonhosted.org/packages/e6/e5/9b21fbe91a61b8f409d74a26498706e97a48008bfcd1864373d32a6ba31c/kiwisolver-1.5.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:012b1eb16e28718fa782b5e61dc6f2da1f0792ca73bd05d54de6cb9561665fc9", size = 2063158, upload-time = "2026-03-09T13:15:27.63Z" },
- { url = "https://files.pythonhosted.org/packages/b1/02/83f47986138310f95ea95531f851b2a62227c11cbc3e690ae1374fe49f0f/kiwisolver-1.5.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:0e3aafb33aed7479377e5e9a82e9d4bf87063741fc99fc7ae48b0f16e32bdd6f", size = 2597260, upload-time = "2026-03-09T13:15:29.421Z" },
- { url = "https://files.pythonhosted.org/packages/07/18/43a5f24608d8c313dd189cf838c8e68d75b115567c6279de7796197cfb6a/kiwisolver-1.5.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e7a116ae737f0000343218c4edf5bd45893bfeaff0993c0b215d7124c9f77646", size = 2394403, upload-time = "2026-03-09T13:15:31.517Z" },
- { url = "https://files.pythonhosted.org/packages/3b/b5/98222136d839b8afabcaa943b09bd05888c2d36355b7e448550211d1fca4/kiwisolver-1.5.0-cp314-cp314t-win_amd64.whl", hash = "sha256:1dd9b0b119a350976a6d781e7278ec7aca0b201e1a9e2d23d9804afecb6ca681", size = 79687, upload-time = "2026-03-09T13:15:33.204Z" },
- { url = "https://files.pythonhosted.org/packages/99/a2/ca7dc962848040befed12732dff6acae7fb3c4f6fc4272b3f6c9a30b8713/kiwisolver-1.5.0-cp314-cp314t-win_arm64.whl", hash = "sha256:58f812017cd2985c21fbffb4864d59174d4903dd66fa23815e74bbc7a0e2dd57", size = 70032, upload-time = "2026-03-09T13:15:34.411Z" },
- { url = "https://files.pythonhosted.org/packages/1c/fa/2910df836372d8761bb6eff7d8bdcb1613b5c2e03f260efe7abe34d388a7/kiwisolver-1.5.0-graalpy312-graalpy250_312_native-macosx_10_13_x86_64.whl", hash = "sha256:5ae8e62c147495b01a0f4765c878e9bfdf843412446a247e28df59936e99e797", size = 130262, upload-time = "2026-03-09T13:15:35.629Z" },
- { url = "https://files.pythonhosted.org/packages/0f/41/c5f71f9f00aabcc71fee8b7475e3f64747282580c2fe748961ba29b18385/kiwisolver-1.5.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:f6764a4ccab3078db14a632420930f6186058750df066b8ea2a7106df91d3203", size = 138036, upload-time = "2026-03-09T13:15:36.894Z" },
- { url = "https://files.pythonhosted.org/packages/fa/06/7399a607f434119c6e1fdc8ec89a8d51ccccadf3341dee4ead6bd14caaf5/kiwisolver-1.5.0-graalpy312-graalpy250_312_native-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c31c13da98624f957b0fb1b5bae5383b2333c2c3f6793d9825dd5ce79b525cb7", size = 194295, upload-time = "2026-03-09T13:15:38.22Z" },
- { url = "https://files.pythonhosted.org/packages/b5/91/53255615acd2a1eaca307ede3c90eb550bae9c94581f8c00081b6b1c8f44/kiwisolver-1.5.0-graalpy312-graalpy250_312_native-win_amd64.whl", hash = "sha256:1f1489f769582498610e015a8ef2d36f28f505ab3096d0e16b4858a9ec214f57", size = 75987, upload-time = "2026-03-09T13:15:39.65Z" },
- { url = "https://files.pythonhosted.org/packages/17/6f/6fd4f690a40c2582fa34b97d2678f718acf3706b91d270c65ecb455d0a06/kiwisolver-1.5.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:295d9ffe712caa9f8a3081de8d32fc60191b4b51c76f02f951fd8407253528f4", size = 59606, upload-time = "2026-03-09T13:15:40.81Z" },
- { url = "https://files.pythonhosted.org/packages/82/a0/2355d5e3b338f13ce63f361abb181e3b6ea5fffdb73f739b3e80efa76159/kiwisolver-1.5.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:51e8c4084897de9f05898c2c2a39af6318044ae969d46ff7a34ed3f96274adca", size = 57537, upload-time = "2026-03-09T13:15:42.071Z" },
- { url = "https://files.pythonhosted.org/packages/c8/b9/1d50e610ecadebe205b71d6728fd224ce0e0ca6aba7b9cbe1da049203ac5/kiwisolver-1.5.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b83af57bdddef03c01a9138034c6ff03181a3028d9a1003b301eb1a55e161a3f", size = 79888, upload-time = "2026-03-09T13:15:43.317Z" },
- { url = "https://files.pythonhosted.org/packages/cd/ee/b85ffcd75afed0357d74f0e6fc02a4507da441165de1ca4760b9f496390d/kiwisolver-1.5.0-pp310-pypy310_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf4679a3d71012a7c2bf360e5cd878fbd5e4fcac0896b56393dec239d81529ed", size = 77584, upload-time = "2026-03-09T13:15:44.605Z" },
- { url = "https://files.pythonhosted.org/packages/6b/dd/644d0dde6010a8583b4cd66dd41c5f83f5325464d15c4f490b3340ab73b4/kiwisolver-1.5.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:41024ed50e44ab1a60d3fe0a9d15a4ccc9f5f2b1d814ff283c8d01134d5b81bc", size = 73390, upload-time = "2026-03-09T13:15:45.832Z" },
- { url = "https://files.pythonhosted.org/packages/e9/eb/5fcbbbf9a0e2c3a35effb88831a483345326bbc3a030a3b5b69aee647f84/kiwisolver-1.5.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ec4c85dc4b687c7f7f15f553ff26a98bfe8c58f5f7f0ac8905f0ba4c7be60232", size = 59532, upload-time = "2026-03-09T13:15:47.047Z" },
- { url = "https://files.pythonhosted.org/packages/c3/9b/e17104555bb4db148fd52327feea1e96be4b88e8e008b029002c281a21ab/kiwisolver-1.5.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:12e91c215a96e39f57989c8912ae761286ac5a9584d04030ceb3368a357f017a", size = 57420, upload-time = "2026-03-09T13:15:48.199Z" },
- { url = "https://files.pythonhosted.org/packages/48/44/2b5b95b7aa39fb2d8d9d956e0f3d5d45aef2ae1d942d4c3ffac2f9cfed1a/kiwisolver-1.5.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:be4a51a55833dc29ab5d7503e7bcb3b3af3402d266018137127450005cdfe737", size = 79892, upload-time = "2026-03-09T13:15:49.694Z" },
- { url = "https://files.pythonhosted.org/packages/52/7d/7157f9bba6b455cfb4632ed411e199fc8b8977642c2b12082e1bd9e6d173/kiwisolver-1.5.0-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:daae526907e262de627d8f70058a0f64acc9e2641c164c99c8f594b34a799a16", size = 77603, upload-time = "2026-03-09T13:15:50.945Z" },
- { url = "https://files.pythonhosted.org/packages/0a/dd/8050c947d435c8d4bc94e3252f4d8bb8a76cfb424f043a8680be637a57f1/kiwisolver-1.5.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:59cd8683f575d96df5bb48f6add94afc055012c29e28124fcae2b63661b9efb1", size = 73558, upload-time = "2026-03-09T13:15:52.112Z" },
-]
-
-[[package]]
-name = "lazy-loader"
-version = "0.5"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "packaging" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/49/ac/21a1f8aa3777f5658576777ea76bfb124b702c520bbe90edf4ae9915eafa/lazy_loader-0.5.tar.gz", hash = "sha256:717f9179a0dbed357012ddad50a5ad3d5e4d9a0b8712680d4e687f5e6e6ed9b3", size = 15294, upload-time = "2026-03-06T15:45:09.054Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8a/a1/8d812e53a5da1687abb10445275d41a8b13adb781bbf7196ddbcf8d88505/lazy_loader-0.5-py3-none-any.whl", hash = "sha256:ab0ea149e9c554d4ffeeb21105ac60bed7f3b4fd69b1d2360a4add51b170b005", size = 8044, upload-time = "2026-03-06T15:45:07.668Z" },
-]
-
-[[package]]
-name = "librosa"
-version = "0.11.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "audioread" },
- { name = "decorator" },
- { name = "joblib" },
- { name = "lazy-loader" },
- { name = "msgpack" },
- { name = "numba", version = "0.60.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "numba", version = "0.64.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "pooch" },
- { name = "scikit-learn", version = "1.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "scikit-learn", version = "1.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "scipy", version = "1.13.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "scipy", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "soundfile" },
- { name = "soxr" },
- { name = "standard-aifc", marker = "python_full_version >= '3.13'" },
- { name = "standard-sunau", marker = "python_full_version >= '3.13'" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/64/36/360b5aafa0238e29758729e9486c6ed92a6f37fa403b7875e06c115cdf4a/librosa-0.11.0.tar.gz", hash = "sha256:f5ed951ca189b375bbe2e33b2abd7e040ceeee302b9bbaeeffdfddb8d0ace908", size = 327001, upload-time = "2025-03-11T15:09:54.884Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b5/ba/c63c5786dfee4c3417094c4b00966e61e4a63efecee22cb7b4c0387dda83/librosa-0.11.0-py3-none-any.whl", hash = "sha256:0b6415c4fd68bff4c29288abe67c6d80b587e0e1e2cfb0aad23e4559504a7fa1", size = 260749, upload-time = "2025-03-11T15:09:52.982Z" },
-]
-
-[[package]]
-name = "llvmlite"
-version = "0.43.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/9f/3d/f513755f285db51ab363a53e898b85562e950f79a2e6767a364530c2f645/llvmlite-0.43.0.tar.gz", hash = "sha256:ae2b5b5c3ef67354824fb75517c8db5fbe93bc02cd9671f3c62271626bc041d5", size = 157069, upload-time = "2024-06-13T18:09:32.641Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/23/ff/6ca7e98998b573b4bd6566f15c35e5c8bea829663a6df0c7aa55ab559da9/llvmlite-0.43.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a289af9a1687c6cf463478f0fa8e8aa3b6fb813317b0d70bf1ed0759eab6f761", size = 31064408, upload-time = "2024-06-13T18:08:13.462Z" },
- { url = "https://files.pythonhosted.org/packages/ca/5c/a27f9257f86f0cda3f764ff21d9f4217b9f6a0d45e7a39ecfa7905f524ce/llvmlite-0.43.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d4fd101f571a31acb1559ae1af30f30b1dc4b3186669f92ad780e17c81e91bc", size = 28793153, upload-time = "2024-06-13T18:08:17.336Z" },
- { url = "https://files.pythonhosted.org/packages/7e/3c/4410f670ad0a911227ea2ecfcba9f672a77cf1924df5280c4562032ec32d/llvmlite-0.43.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d434ec7e2ce3cc8f452d1cd9a28591745de022f931d67be688a737320dfcead", size = 42857276, upload-time = "2024-06-13T18:08:21.071Z" },
- { url = "https://files.pythonhosted.org/packages/c6/21/2ffbab5714e72f2483207b4a1de79b2eecd9debbf666ff4e7067bcc5c134/llvmlite-0.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6912a87782acdff6eb8bf01675ed01d60ca1f2551f8176a300a886f09e836a6a", size = 43871781, upload-time = "2024-06-13T18:08:26.32Z" },
- { url = "https://files.pythonhosted.org/packages/f2/26/b5478037c453554a61625ef1125f7e12bb1429ae11c6376f47beba9b0179/llvmlite-0.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:14f0e4bf2fd2d9a75a3534111e8ebeb08eda2f33e9bdd6dfa13282afacdde0ed", size = 28123487, upload-time = "2024-06-13T18:08:30.348Z" },
- { url = "https://files.pythonhosted.org/packages/95/8c/de3276d773ab6ce3ad676df5fab5aac19696b2956319d65d7dd88fb10f19/llvmlite-0.43.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8d0618cb9bfe40ac38a9633f2493d4d4e9fcc2f438d39a4e854f39cc0f5f98", size = 31064409, upload-time = "2024-06-13T18:08:34.006Z" },
- { url = "https://files.pythonhosted.org/packages/ee/e1/38deed89ced4cf378c61e232265cfe933ccde56ae83c901aa68b477d14b1/llvmlite-0.43.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0a9a1a39d4bf3517f2af9d23d479b4175ead205c592ceeb8b89af48a327ea57", size = 28793149, upload-time = "2024-06-13T18:08:37.42Z" },
- { url = "https://files.pythonhosted.org/packages/2f/b2/4429433eb2dc8379e2cb582502dca074c23837f8fd009907f78a24de4c25/llvmlite-0.43.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1da416ab53e4f7f3bc8d4eeba36d801cc1894b9fbfbf2022b29b6bad34a7df2", size = 42857277, upload-time = "2024-06-13T18:08:40.822Z" },
- { url = "https://files.pythonhosted.org/packages/6b/99/5d00a7d671b1ba1751fc9f19d3b36f3300774c6eebe2bcdb5f6191763eb4/llvmlite-0.43.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977525a1e5f4059316b183fb4fd34fa858c9eade31f165427a3977c95e3ee749", size = 43871781, upload-time = "2024-06-13T18:08:46.41Z" },
- { url = "https://files.pythonhosted.org/packages/20/ab/ed5ed3688c6ba4f0b8d789da19fd8e30a9cf7fc5852effe311bc5aefe73e/llvmlite-0.43.0-cp311-cp311-win_amd64.whl", hash = "sha256:d5bd550001d26450bd90777736c69d68c487d17bf371438f975229b2b8241a91", size = 28107433, upload-time = "2024-06-13T18:08:50.834Z" },
- { url = "https://files.pythonhosted.org/packages/0b/67/9443509e5d2b6d8587bae3ede5598fa8bd586b1c7701696663ea8af15b5b/llvmlite-0.43.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f99b600aa7f65235a5a05d0b9a9f31150c390f31261f2a0ba678e26823ec38f7", size = 31064409, upload-time = "2024-06-13T18:08:54.375Z" },
- { url = "https://files.pythonhosted.org/packages/a2/9c/24139d3712d2d352e300c39c0e00d167472c08b3bd350c3c33d72c88ff8d/llvmlite-0.43.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:35d80d61d0cda2d767f72de99450766250560399edc309da16937b93d3b676e7", size = 28793145, upload-time = "2024-06-13T18:08:57.953Z" },
- { url = "https://files.pythonhosted.org/packages/bf/f1/4c205a48488e574ee9f6505d50e84370a978c90f08dab41a42d8f2c576b6/llvmlite-0.43.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eccce86bba940bae0d8d48ed925f21dbb813519169246e2ab292b5092aba121f", size = 42857276, upload-time = "2024-06-13T18:09:02.067Z" },
- { url = "https://files.pythonhosted.org/packages/00/5f/323c4d56e8401c50185fd0e875fcf06b71bf825a863699be1eb10aa2a9cb/llvmlite-0.43.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6509e1507ca0760787a199d19439cc887bfd82226f5af746d6977bd9f66844", size = 43871781, upload-time = "2024-06-13T18:09:06.667Z" },
- { url = "https://files.pythonhosted.org/packages/c6/94/dea10e263655ce78d777e78d904903faae39d1fc440762be4a9dc46bed49/llvmlite-0.43.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a2872ee80dcf6b5dbdc838763d26554c2a18aa833d31a2635bff16aafefb9c9", size = 28107442, upload-time = "2024-06-13T18:09:10.709Z" },
- { url = "https://files.pythonhosted.org/packages/2a/73/12925b1bbb3c2beb6d96f892ef5b4d742c34f00ddb9f4a125e9e87b22f52/llvmlite-0.43.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cd2a7376f7b3367019b664c21f0c61766219faa3b03731113ead75107f3b66c", size = 31064410, upload-time = "2024-06-13T18:09:14.091Z" },
- { url = "https://files.pythonhosted.org/packages/cc/61/58c70aa0808a8cba825a7d98cc65bef4801b99328fba80837bfcb5fc767f/llvmlite-0.43.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18e9953c748b105668487b7c81a3e97b046d8abf95c4ddc0cd3c94f4e4651ae8", size = 28793145, upload-time = "2024-06-13T18:09:17.531Z" },
- { url = "https://files.pythonhosted.org/packages/c8/c6/9324eb5de2ba9d99cbed853d85ba7a318652a48e077797bec27cf40f911d/llvmlite-0.43.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74937acd22dc11b33946b67dca7680e6d103d6e90eeaaaf932603bec6fe7b03a", size = 42857276, upload-time = "2024-06-13T18:09:21.377Z" },
- { url = "https://files.pythonhosted.org/packages/e0/d0/889e9705107db7b1ec0767b03f15d7b95b4c4f9fdf91928ab1c7e9ffacf6/llvmlite-0.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9efc739cc6ed760f795806f67889923f7274276f0eb45092a1473e40d9b867", size = 43871777, upload-time = "2024-06-13T18:09:25.76Z" },
- { url = "https://files.pythonhosted.org/packages/df/41/73cc26a2634b538cfe813f618c91e7e9960b8c163f8f0c94a2b0f008b9da/llvmlite-0.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:47e147cdda9037f94b399bf03bfd8a6b6b1f2f90be94a454e3386f006455a9b4", size = 28123489, upload-time = "2024-06-13T18:09:29.78Z" },
-]
-
-[[package]]
-name = "llvmlite"
-version = "0.46.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/74/cd/08ae687ba099c7e3d21fe2ea536500563ef1943c5105bf6ab4ee3829f68e/llvmlite-0.46.0.tar.gz", hash = "sha256:227c9fd6d09dce2783c18b754b7cd9d9b3b3515210c46acc2d3c5badd9870ceb", size = 193456, upload-time = "2025-12-08T18:15:36.295Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3d/a4/3959e1c61c5ca9db7921e5fd115b344c29b9d57a5dadd87bef97963ca1a5/llvmlite-0.46.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4323177e936d61ae0f73e653e2e614284d97d14d5dd12579adc92b6c2b0597b0", size = 37232766, upload-time = "2025-12-08T18:14:34.765Z" },
- { url = "https://files.pythonhosted.org/packages/c2/a5/a4d916f1015106e1da876028606a8e87fd5d5c840f98c87bc2d5153b6a2f/llvmlite-0.46.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a2d461cb89537b7c20feb04c46c32e12d5ad4f0896c9dfc0f60336219ff248e", size = 56275176, upload-time = "2025-12-08T18:14:37.944Z" },
- { url = "https://files.pythonhosted.org/packages/79/7f/a7f2028805dac8c1a6fae7bda4e739b7ebbcd45b29e15bf6d21556fcd3d5/llvmlite-0.46.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b1f6595a35b7b39c3518b85a28bf18f45e075264e4b2dce3f0c2a4f232b4a910", size = 55128629, upload-time = "2025-12-08T18:14:41.674Z" },
- { url = "https://files.pythonhosted.org/packages/b2/bc/4689e1ba0c073c196b594471eb21be0aa51d9e64b911728aa13cd85ef0ae/llvmlite-0.46.0-cp310-cp310-win_amd64.whl", hash = "sha256:e7a34d4aa6f9a97ee006b504be6d2b8cb7f755b80ab2f344dda1ef992f828559", size = 38138651, upload-time = "2025-12-08T18:14:45.845Z" },
- { url = "https://files.pythonhosted.org/packages/7a/a1/2ad4b2367915faeebe8447f0a057861f646dbf5fbbb3561db42c65659cf3/llvmlite-0.46.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:82f3d39b16f19aa1a56d5fe625883a6ab600d5cc9ea8906cca70ce94cabba067", size = 37232766, upload-time = "2025-12-08T18:14:48.836Z" },
- { url = "https://files.pythonhosted.org/packages/12/b5/99cf8772fdd846c07da4fd70f07812a3c8fd17ea2409522c946bb0f2b277/llvmlite-0.46.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a3df43900119803bbc52720e758c76f316a9a0f34612a886862dfe0a5591a17e", size = 56275175, upload-time = "2025-12-08T18:14:51.604Z" },
- { url = "https://files.pythonhosted.org/packages/38/f2/ed806f9c003563732da156139c45d970ee435bd0bfa5ed8de87ba972b452/llvmlite-0.46.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de183fefc8022d21b0aa37fc3e90410bc3524aed8617f0ff76732fc6c3af5361", size = 55128630, upload-time = "2025-12-08T18:14:55.107Z" },
- { url = "https://files.pythonhosted.org/packages/19/0c/8f5a37a65fc9b7b17408508145edd5f86263ad69c19d3574e818f533a0eb/llvmlite-0.46.0-cp311-cp311-win_amd64.whl", hash = "sha256:e8b10bc585c58bdffec9e0c309bb7d51be1f2f15e169a4b4d42f2389e431eb93", size = 38138652, upload-time = "2025-12-08T18:14:58.171Z" },
- { url = "https://files.pythonhosted.org/packages/2b/f8/4db016a5e547d4e054ff2f3b99203d63a497465f81ab78ec8eb2ff7b2304/llvmlite-0.46.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b9588ad4c63b4f0175a3984b85494f0c927c6b001e3a246a3a7fb3920d9a137", size = 37232767, upload-time = "2025-12-08T18:15:00.737Z" },
- { url = "https://files.pythonhosted.org/packages/aa/85/4890a7c14b4fa54400945cb52ac3cd88545bbdb973c440f98ca41591cdc5/llvmlite-0.46.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3535bd2bb6a2d7ae4012681ac228e5132cdb75fefb1bcb24e33f2f3e0c865ed4", size = 56275176, upload-time = "2025-12-08T18:15:03.936Z" },
- { url = "https://files.pythonhosted.org/packages/6a/07/3d31d39c1a1a08cd5337e78299fca77e6aebc07c059fbd0033e3edfab45c/llvmlite-0.46.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cbfd366e60ff87ea6cc62f50bc4cd800ebb13ed4c149466f50cf2163a473d1e", size = 55128630, upload-time = "2025-12-08T18:15:07.196Z" },
- { url = "https://files.pythonhosted.org/packages/2a/6b/d139535d7590a1bba1ceb68751bef22fadaa5b815bbdf0e858e3875726b2/llvmlite-0.46.0-cp312-cp312-win_amd64.whl", hash = "sha256:398b39db462c39563a97b912d4f2866cd37cba60537975a09679b28fbbc0fb38", size = 38138940, upload-time = "2025-12-08T18:15:10.162Z" },
- { url = "https://files.pythonhosted.org/packages/e6/ff/3eba7eb0aed4b6fca37125387cd417e8c458e750621fce56d2c541f67fa8/llvmlite-0.46.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:30b60892d034bc560e0ec6654737aaa74e5ca327bd8114d82136aa071d611172", size = 37232767, upload-time = "2025-12-08T18:15:13.22Z" },
- { url = "https://files.pythonhosted.org/packages/0e/54/737755c0a91558364b9200702c3c9c15d70ed63f9b98a2c32f1c2aa1f3ba/llvmlite-0.46.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6cc19b051753368a9c9f31dc041299059ee91aceec81bd57b0e385e5d5bf1a54", size = 56275176, upload-time = "2025-12-08T18:15:16.339Z" },
- { url = "https://files.pythonhosted.org/packages/e6/91/14f32e1d70905c1c0aa4e6609ab5d705c3183116ca02ac6df2091868413a/llvmlite-0.46.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bca185892908f9ede48c0acd547fe4dc1bafefb8a4967d47db6cf664f9332d12", size = 55128629, upload-time = "2025-12-08T18:15:19.493Z" },
- { url = "https://files.pythonhosted.org/packages/4a/a7/d526ae86708cea531935ae777b6dbcabe7db52718e6401e0fb9c5edea80e/llvmlite-0.46.0-cp313-cp313-win_amd64.whl", hash = "sha256:67438fd30e12349ebb054d86a5a1a57fd5e87d264d2451bcfafbbbaa25b82a35", size = 38138941, upload-time = "2025-12-08T18:15:22.536Z" },
- { url = "https://files.pythonhosted.org/packages/95/ae/af0ffb724814cc2ea64445acad05f71cff5f799bb7efb22e47ee99340dbc/llvmlite-0.46.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:d252edfb9f4ac1fcf20652258e3f102b26b03eef738dc8a6ffdab7d7d341d547", size = 37232768, upload-time = "2025-12-08T18:15:25.055Z" },
- { url = "https://files.pythonhosted.org/packages/c9/19/5018e5352019be753b7b07f7759cdabb69ca5779fea2494be8839270df4c/llvmlite-0.46.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:379fdd1c59badeff8982cb47e4694a6143bec3bb49aa10a466e095410522064d", size = 56275173, upload-time = "2025-12-08T18:15:28.109Z" },
- { url = "https://files.pythonhosted.org/packages/9f/c9/d57877759d707e84c082163c543853245f91b70c804115a5010532890f18/llvmlite-0.46.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e8cbfff7f6db0fa2c771ad24154e2a7e457c2444d7673e6de06b8b698c3b269", size = 55128628, upload-time = "2025-12-08T18:15:31.098Z" },
- { url = "https://files.pythonhosted.org/packages/30/a8/e61a8c2b3cc7a597073d9cde1fcbb567e9d827f1db30c93cf80422eac70d/llvmlite-0.46.0-cp314-cp314-win_amd64.whl", hash = "sha256:7821eda3ec1f18050f981819756631d60b6d7ab1a6cf806d9efefbe3f4082d61", size = 39153056, upload-time = "2025-12-08T18:15:33.938Z" },
-]
-
-[[package]]
-name = "markupsafe"
-version = "3.0.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e8/4b/3541d44f3937ba468b75da9eebcae497dcf67adb65caa16760b0a6807ebb/markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559", size = 11631, upload-time = "2025-09-27T18:36:05.558Z" },
- { url = "https://files.pythonhosted.org/packages/98/1b/fbd8eed11021cabd9226c37342fa6ca4e8a98d8188a8d9b66740494960e4/markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419", size = 12057, upload-time = "2025-09-27T18:36:07.165Z" },
- { url = "https://files.pythonhosted.org/packages/40/01/e560d658dc0bb8ab762670ece35281dec7b6c1b33f5fbc09ebb57a185519/markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695", size = 22050, upload-time = "2025-09-27T18:36:08.005Z" },
- { url = "https://files.pythonhosted.org/packages/af/cd/ce6e848bbf2c32314c9b237839119c5a564a59725b53157c856e90937b7a/markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591", size = 20681, upload-time = "2025-09-27T18:36:08.881Z" },
- { url = "https://files.pythonhosted.org/packages/c9/2a/b5c12c809f1c3045c4d580b035a743d12fcde53cf685dbc44660826308da/markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c", size = 20705, upload-time = "2025-09-27T18:36:10.131Z" },
- { url = "https://files.pythonhosted.org/packages/cf/e3/9427a68c82728d0a88c50f890d0fc072a1484de2f3ac1ad0bfc1a7214fd5/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f", size = 21524, upload-time = "2025-09-27T18:36:11.324Z" },
- { url = "https://files.pythonhosted.org/packages/bc/36/23578f29e9e582a4d0278e009b38081dbe363c5e7165113fad546918a232/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6", size = 20282, upload-time = "2025-09-27T18:36:12.573Z" },
- { url = "https://files.pythonhosted.org/packages/56/21/dca11354e756ebd03e036bd8ad58d6d7168c80ce1fe5e75218e4945cbab7/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1", size = 20745, upload-time = "2025-09-27T18:36:13.504Z" },
- { url = "https://files.pythonhosted.org/packages/87/99/faba9369a7ad6e4d10b6a5fbf71fa2a188fe4a593b15f0963b73859a1bbd/markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa", size = 14571, upload-time = "2025-09-27T18:36:14.779Z" },
- { url = "https://files.pythonhosted.org/packages/d6/25/55dc3ab959917602c96985cb1253efaa4ff42f71194bddeb61eb7278b8be/markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8", size = 15056, upload-time = "2025-09-27T18:36:16.125Z" },
- { url = "https://files.pythonhosted.org/packages/d0/9e/0a02226640c255d1da0b8d12e24ac2aa6734da68bff14c05dd53b94a0fc3/markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1", size = 13932, upload-time = "2025-09-27T18:36:17.311Z" },
- { url = "https://files.pythonhosted.org/packages/08/db/fefacb2136439fc8dd20e797950e749aa1f4997ed584c62cfb8ef7c2be0e/markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad", size = 11631, upload-time = "2025-09-27T18:36:18.185Z" },
- { url = "https://files.pythonhosted.org/packages/e1/2e/5898933336b61975ce9dc04decbc0a7f2fee78c30353c5efba7f2d6ff27a/markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a", size = 12058, upload-time = "2025-09-27T18:36:19.444Z" },
- { url = "https://files.pythonhosted.org/packages/1d/09/adf2df3699d87d1d8184038df46a9c80d78c0148492323f4693df54e17bb/markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50", size = 24287, upload-time = "2025-09-27T18:36:20.768Z" },
- { url = "https://files.pythonhosted.org/packages/30/ac/0273f6fcb5f42e314c6d8cd99effae6a5354604d461b8d392b5ec9530a54/markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf", size = 22940, upload-time = "2025-09-27T18:36:22.249Z" },
- { url = "https://files.pythonhosted.org/packages/19/ae/31c1be199ef767124c042c6c3e904da327a2f7f0cd63a0337e1eca2967a8/markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f", size = 21887, upload-time = "2025-09-27T18:36:23.535Z" },
- { url = "https://files.pythonhosted.org/packages/b2/76/7edcab99d5349a4532a459e1fe64f0b0467a3365056ae550d3bcf3f79e1e/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a", size = 23692, upload-time = "2025-09-27T18:36:24.823Z" },
- { url = "https://files.pythonhosted.org/packages/a4/28/6e74cdd26d7514849143d69f0bf2399f929c37dc2b31e6829fd2045b2765/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115", size = 21471, upload-time = "2025-09-27T18:36:25.95Z" },
- { url = "https://files.pythonhosted.org/packages/62/7e/a145f36a5c2945673e590850a6f8014318d5577ed7e5920a4b3448e0865d/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a", size = 22923, upload-time = "2025-09-27T18:36:27.109Z" },
- { url = "https://files.pythonhosted.org/packages/0f/62/d9c46a7f5c9adbeeeda52f5b8d802e1094e9717705a645efc71b0913a0a8/markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19", size = 14572, upload-time = "2025-09-27T18:36:28.045Z" },
- { url = "https://files.pythonhosted.org/packages/83/8a/4414c03d3f891739326e1783338e48fb49781cc915b2e0ee052aa490d586/markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01", size = 15077, upload-time = "2025-09-27T18:36:29.025Z" },
- { url = "https://files.pythonhosted.org/packages/35/73/893072b42e6862f319b5207adc9ae06070f095b358655f077f69a35601f0/markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c", size = 13876, upload-time = "2025-09-27T18:36:29.954Z" },
- { url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" },
- { url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" },
- { url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" },
- { url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" },
- { url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" },
- { url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" },
- { url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" },
- { url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" },
- { url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" },
- { url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" },
- { url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" },
- { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" },
- { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" },
- { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" },
- { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" },
- { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" },
- { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" },
- { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" },
- { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" },
- { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" },
- { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" },
- { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" },
- { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" },
- { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" },
- { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" },
- { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" },
- { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" },
- { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" },
- { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" },
- { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" },
- { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" },
- { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" },
- { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" },
- { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" },
- { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" },
- { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" },
- { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" },
- { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" },
- { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" },
- { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" },
- { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" },
- { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" },
- { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" },
- { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" },
- { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" },
- { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" },
- { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" },
- { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" },
- { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" },
- { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" },
- { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" },
- { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" },
- { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" },
- { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" },
- { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" },
- { url = "https://files.pythonhosted.org/packages/56/23/0d8c13a44bde9154821586520840643467aee574d8ce79a17da539ee7fed/markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26", size = 11623, upload-time = "2025-09-27T18:37:29.296Z" },
- { url = "https://files.pythonhosted.org/packages/fd/23/07a2cb9a8045d5f3f0890a8c3bc0859d7a47bfd9a560b563899bec7b72ed/markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc", size = 12049, upload-time = "2025-09-27T18:37:30.234Z" },
- { url = "https://files.pythonhosted.org/packages/bc/e4/6be85eb81503f8e11b61c0b6369b6e077dcf0a74adbd9ebf6b349937b4e9/markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c", size = 21923, upload-time = "2025-09-27T18:37:31.177Z" },
- { url = "https://files.pythonhosted.org/packages/6f/bc/4dc914ead3fe6ddaef035341fee0fc956949bbd27335b611829292b89ee2/markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42", size = 20543, upload-time = "2025-09-27T18:37:32.168Z" },
- { url = "https://files.pythonhosted.org/packages/89/6e/5fe81fbcfba4aef4093d5f856e5c774ec2057946052d18d168219b7bd9f9/markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b", size = 20585, upload-time = "2025-09-27T18:37:33.166Z" },
- { url = "https://files.pythonhosted.org/packages/f6/f6/e0e5a3d3ae9c4020f696cd055f940ef86b64fe88de26f3a0308b9d3d048c/markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758", size = 21387, upload-time = "2025-09-27T18:37:34.185Z" },
- { url = "https://files.pythonhosted.org/packages/c8/25/651753ef4dea08ea790f4fbb65146a9a44a014986996ca40102e237aa49a/markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2", size = 20133, upload-time = "2025-09-27T18:37:35.138Z" },
- { url = "https://files.pythonhosted.org/packages/dc/0a/c3cf2b4fef5f0426e8a6d7fce3cb966a17817c568ce59d76b92a233fdbec/markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d", size = 20588, upload-time = "2025-09-27T18:37:36.096Z" },
- { url = "https://files.pythonhosted.org/packages/cd/1b/a7782984844bd519ad4ffdbebbba2671ec5d0ebbeac34736c15fb86399e8/markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7", size = 14566, upload-time = "2025-09-27T18:37:37.09Z" },
- { url = "https://files.pythonhosted.org/packages/18/1f/8d9c20e1c9440e215a44be5ab64359e207fcb4f675543f1cf9a2a7f648d0/markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e", size = 15053, upload-time = "2025-09-27T18:37:38.054Z" },
- { url = "https://files.pythonhosted.org/packages/4e/d3/fe08482b5cd995033556d45041a4f4e76e7f0521112a9c9991d40d39825f/markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8", size = 13928, upload-time = "2025-09-27T18:37:39.037Z" },
-]
-
-[[package]]
-name = "matplotlib"
-version = "3.9.4"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "contourpy", version = "1.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "cycler", marker = "python_full_version < '3.10'" },
- { name = "fonttools", version = "4.60.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "importlib-resources", marker = "python_full_version < '3.10'" },
- { name = "kiwisolver", version = "1.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "packaging", marker = "python_full_version < '3.10'" },
- { name = "pillow", version = "11.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "pyparsing", marker = "python_full_version < '3.10'" },
- { name = "python-dateutil", marker = "python_full_version < '3.10'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/df/17/1747b4154034befd0ed33b52538f5eb7752d05bb51c5e2a31470c3bc7d52/matplotlib-3.9.4.tar.gz", hash = "sha256:1e00e8be7393cbdc6fedfa8a6fba02cf3e83814b285db1c60b906a023ba41bc3", size = 36106529, upload-time = "2024-12-13T05:56:34.184Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7e/94/27d2e2c30d54b56c7b764acc1874a909e34d1965a427fc7092bb6a588b63/matplotlib-3.9.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c5fdd7abfb706dfa8d307af64a87f1a862879ec3cd8d0ec8637458f0885b9c50", size = 7885089, upload-time = "2024-12-13T05:54:24.224Z" },
- { url = "https://files.pythonhosted.org/packages/c6/25/828273307e40a68eb8e9df832b6b2aaad075864fdc1de4b1b81e40b09e48/matplotlib-3.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d89bc4e85e40a71d1477780366c27fb7c6494d293e1617788986f74e2a03d7ff", size = 7770600, upload-time = "2024-12-13T05:54:27.214Z" },
- { url = "https://files.pythonhosted.org/packages/f2/65/f841a422ec994da5123368d76b126acf4fc02ea7459b6e37c4891b555b83/matplotlib-3.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddf9f3c26aae695c5daafbf6b94e4c1a30d6cd617ba594bbbded3b33a1fcfa26", size = 8200138, upload-time = "2024-12-13T05:54:29.497Z" },
- { url = "https://files.pythonhosted.org/packages/07/06/272aca07a38804d93b6050813de41ca7ab0e29ba7a9dd098e12037c919a9/matplotlib-3.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18ebcf248030173b59a868fda1fe42397253f6698995b55e81e1f57431d85e50", size = 8312711, upload-time = "2024-12-13T05:54:34.396Z" },
- { url = "https://files.pythonhosted.org/packages/98/37/f13e23b233c526b7e27ad61be0a771894a079e0f7494a10d8d81557e0e9a/matplotlib-3.9.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974896ec43c672ec23f3f8c648981e8bc880ee163146e0312a9b8def2fac66f5", size = 9090622, upload-time = "2024-12-13T05:54:36.808Z" },
- { url = "https://files.pythonhosted.org/packages/4f/8c/b1f5bd2bd70e60f93b1b54c4d5ba7a992312021d0ddddf572f9a1a6d9348/matplotlib-3.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:4598c394ae9711cec135639374e70871fa36b56afae17bdf032a345be552a88d", size = 7828211, upload-time = "2024-12-13T05:54:40.596Z" },
- { url = "https://files.pythonhosted.org/packages/74/4b/65be7959a8fa118a3929b49a842de5b78bb55475236fcf64f3e308ff74a0/matplotlib-3.9.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4dd29641d9fb8bc4492420c5480398dd40a09afd73aebe4eb9d0071a05fbe0c", size = 7894430, upload-time = "2024-12-13T05:54:44.049Z" },
- { url = "https://files.pythonhosted.org/packages/e9/18/80f70d91896e0a517b4a051c3fd540daa131630fd75e02e250365353b253/matplotlib-3.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30e5b22e8bcfb95442bf7d48b0d7f3bdf4a450cbf68986ea45fca3d11ae9d099", size = 7780045, upload-time = "2024-12-13T05:54:46.414Z" },
- { url = "https://files.pythonhosted.org/packages/a2/73/ccb381026e3238c5c25c3609ba4157b2d1a617ec98d65a8b4ee4e1e74d02/matplotlib-3.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bb0030d1d447fd56dcc23b4c64a26e44e898f0416276cac1ebc25522e0ac249", size = 8209906, upload-time = "2024-12-13T05:54:49.459Z" },
- { url = "https://files.pythonhosted.org/packages/ab/33/1648da77b74741c89f5ea95cbf42a291b4b364f2660b316318811404ed97/matplotlib-3.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca90ed222ac3565d2752b83dbb27627480d27662671e4d39da72e97f657a423", size = 8322873, upload-time = "2024-12-13T05:54:53.066Z" },
- { url = "https://files.pythonhosted.org/packages/57/d3/8447ba78bc6593c9044c372d1609f8ea10fb1e071e7a9e0747bea74fc16c/matplotlib-3.9.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a181b2aa2906c608fcae72f977a4a2d76e385578939891b91c2550c39ecf361e", size = 9099566, upload-time = "2024-12-13T05:54:55.522Z" },
- { url = "https://files.pythonhosted.org/packages/23/e1/4f0e237bf349c02ff9d1b6e7109f1a17f745263809b9714a8576dc17752b/matplotlib-3.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:1f6882828231eca17f501c4dcd98a05abb3f03d157fbc0769c6911fe08b6cfd3", size = 7838065, upload-time = "2024-12-13T05:54:58.337Z" },
- { url = "https://files.pythonhosted.org/packages/1a/2b/c918bf6c19d6445d1cefe3d2e42cb740fb997e14ab19d4daeb6a7ab8a157/matplotlib-3.9.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:dfc48d67e6661378a21c2983200a654b72b5c5cdbd5d2cf6e5e1ece860f0cc70", size = 7891131, upload-time = "2024-12-13T05:55:02.837Z" },
- { url = "https://files.pythonhosted.org/packages/c1/e5/b4e8fc601ca302afeeabf45f30e706a445c7979a180e3a978b78b2b681a4/matplotlib-3.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:47aef0fab8332d02d68e786eba8113ffd6f862182ea2999379dec9e237b7e483", size = 7776365, upload-time = "2024-12-13T05:55:05.158Z" },
- { url = "https://files.pythonhosted.org/packages/99/06/b991886c506506476e5d83625c5970c656a491b9f80161458fed94597808/matplotlib-3.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fba1f52c6b7dc764097f52fd9ab627b90db452c9feb653a59945de16752e965f", size = 8200707, upload-time = "2024-12-13T05:55:09.48Z" },
- { url = "https://files.pythonhosted.org/packages/c3/e2/556b627498cb27e61026f2d1ba86a78ad1b836fef0996bef5440e8bc9559/matplotlib-3.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173ac3748acaac21afcc3fa1633924609ba1b87749006bc25051c52c422a5d00", size = 8313761, upload-time = "2024-12-13T05:55:12.95Z" },
- { url = "https://files.pythonhosted.org/packages/58/ff/165af33ec766ff818306ea88e91f9f60d2a6ed543be1eb122a98acbf3b0d/matplotlib-3.9.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320edea0cadc07007765e33f878b13b3738ffa9745c5f707705692df70ffe0e0", size = 9095284, upload-time = "2024-12-13T05:55:16.199Z" },
- { url = "https://files.pythonhosted.org/packages/9f/8b/3d0c7a002db3b1ed702731c2a9a06d78d035f1f2fb0fb936a8e43cc1e9f4/matplotlib-3.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a4a4cfc82330b27042a7169533da7991e8789d180dd5b3daeaee57d75cd5a03b", size = 7841160, upload-time = "2024-12-13T05:55:19.991Z" },
- { url = "https://files.pythonhosted.org/packages/49/b1/999f89a7556d101b23a2f0b54f1b6e140d73f56804da1398f2f0bc0924bc/matplotlib-3.9.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37eeffeeca3c940985b80f5b9a7b95ea35671e0e7405001f249848d2b62351b6", size = 7891499, upload-time = "2024-12-13T05:55:22.142Z" },
- { url = "https://files.pythonhosted.org/packages/87/7b/06a32b13a684977653396a1bfcd34d4e7539c5d55c8cbfaa8ae04d47e4a9/matplotlib-3.9.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3e7465ac859ee4abcb0d836137cd8414e7bb7ad330d905abced457217d4f0f45", size = 7776802, upload-time = "2024-12-13T05:55:25.947Z" },
- { url = "https://files.pythonhosted.org/packages/65/87/ac498451aff739e515891bbb92e566f3c7ef31891aaa878402a71f9b0910/matplotlib-3.9.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4c12302c34afa0cf061bea23b331e747e5e554b0fa595c96e01c7b75bc3b858", size = 8200802, upload-time = "2024-12-13T05:55:28.461Z" },
- { url = "https://files.pythonhosted.org/packages/f8/6b/9eb761c00e1cb838f6c92e5f25dcda3f56a87a52f6cb8fdfa561e6cf6a13/matplotlib-3.9.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8c97917f21b75e72108b97707ba3d48f171541a74aa2a56df7a40626bafc64", size = 8313880, upload-time = "2024-12-13T05:55:30.965Z" },
- { url = "https://files.pythonhosted.org/packages/d7/a2/c8eaa600e2085eec7e38cbbcc58a30fc78f8224939d31d3152bdafc01fd1/matplotlib-3.9.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0229803bd7e19271b03cb09f27db76c918c467aa4ce2ae168171bc67c3f508df", size = 9094637, upload-time = "2024-12-13T05:55:33.701Z" },
- { url = "https://files.pythonhosted.org/packages/71/1f/c6e1daea55b7bfeb3d84c6cb1abc449f6a02b181e7e2a5e4db34c3afb793/matplotlib-3.9.4-cp313-cp313-win_amd64.whl", hash = "sha256:7c0d8ef442ebf56ff5e206f8083d08252ee738e04f3dc88ea882853a05488799", size = 7841311, upload-time = "2024-12-13T05:55:36.737Z" },
- { url = "https://files.pythonhosted.org/packages/c0/3a/2757d3f7d388b14dd48f5a83bea65b6d69f000e86b8f28f74d86e0d375bd/matplotlib-3.9.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a04c3b00066a688834356d196136349cb32f5e1003c55ac419e91585168b88fb", size = 7919989, upload-time = "2024-12-13T05:55:39.024Z" },
- { url = "https://files.pythonhosted.org/packages/24/28/f5077c79a4f521589a37fe1062d6a6ea3534e068213f7357e7cfffc2e17a/matplotlib-3.9.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:04c519587f6c210626741a1e9a68eefc05966ede24205db8982841826af5871a", size = 7809417, upload-time = "2024-12-13T05:55:42.412Z" },
- { url = "https://files.pythonhosted.org/packages/36/c8/c523fd2963156692916a8eb7d4069084cf729359f7955cf09075deddfeaf/matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308afbf1a228b8b525fcd5cec17f246bbbb63b175a3ef6eb7b4d33287ca0cf0c", size = 8226258, upload-time = "2024-12-13T05:55:47.259Z" },
- { url = "https://files.pythonhosted.org/packages/f6/88/499bf4b8fa9349b6f5c0cf4cead0ebe5da9d67769129f1b5651e5ac51fbc/matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddb3b02246ddcffd3ce98e88fed5b238bc5faff10dbbaa42090ea13241d15764", size = 8335849, upload-time = "2024-12-13T05:55:49.763Z" },
- { url = "https://files.pythonhosted.org/packages/b8/9f/20a4156b9726188646a030774ee337d5ff695a965be45ce4dbcb9312c170/matplotlib-3.9.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8a75287e9cb9eee48cb79ec1d806f75b29c0fde978cb7223a1f4c5848d696041", size = 9102152, upload-time = "2024-12-13T05:55:51.997Z" },
- { url = "https://files.pythonhosted.org/packages/10/11/237f9c3a4e8d810b1759b67ff2da7c32c04f9c80aa475e7beb36ed43a8fb/matplotlib-3.9.4-cp313-cp313t-win_amd64.whl", hash = "sha256:488deb7af140f0ba86da003e66e10d55ff915e152c78b4b66d231638400b1965", size = 7896987, upload-time = "2024-12-13T05:55:55.941Z" },
- { url = "https://files.pythonhosted.org/packages/56/eb/501b465c9fef28f158e414ea3a417913dc2ac748564c7ed41535f23445b4/matplotlib-3.9.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3c3724d89a387ddf78ff88d2a30ca78ac2b4c89cf37f2db4bd453c34799e933c", size = 7885919, upload-time = "2024-12-13T05:55:59.66Z" },
- { url = "https://files.pythonhosted.org/packages/da/36/236fbd868b6c91309a5206bd90c3f881f4f44b2d997cd1d6239ef652f878/matplotlib-3.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d5f0a8430ffe23d7e32cfd86445864ccad141797f7d25b7c41759a5b5d17cfd7", size = 7771486, upload-time = "2024-12-13T05:56:04.264Z" },
- { url = "https://files.pythonhosted.org/packages/e0/4b/105caf2d54d5ed11d9f4335398f5103001a03515f2126c936a752ccf1461/matplotlib-3.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bb0141a21aef3b64b633dc4d16cbd5fc538b727e4958be82a0e1c92a234160e", size = 8201838, upload-time = "2024-12-13T05:56:06.792Z" },
- { url = "https://files.pythonhosted.org/packages/5d/a7/bb01188fb4013d34d274caf44a2f8091255b0497438e8b6c0a7c1710c692/matplotlib-3.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57aa235109e9eed52e2c2949db17da185383fa71083c00c6c143a60e07e0888c", size = 8314492, upload-time = "2024-12-13T05:56:09.964Z" },
- { url = "https://files.pythonhosted.org/packages/33/19/02e1a37f7141fc605b193e927d0a9cdf9dc124a20b9e68793f4ffea19695/matplotlib-3.9.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b18c600061477ccfdd1e6fd050c33d8be82431700f3452b297a56d9ed7037abb", size = 9092500, upload-time = "2024-12-13T05:56:13.55Z" },
- { url = "https://files.pythonhosted.org/packages/57/68/c2feb4667adbf882ffa4b3e0ac9967f848980d9f8b5bebd86644aa67ce6a/matplotlib-3.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:ef5f2d1b67d2d2145ff75e10f8c008bfbf71d45137c4b648c87193e7dd053eac", size = 7822962, upload-time = "2024-12-13T05:56:16.358Z" },
- { url = "https://files.pythonhosted.org/packages/0c/22/2ef6a364cd3f565442b0b055e0599744f1e4314ec7326cdaaa48a4d864d7/matplotlib-3.9.4-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:44e0ed786d769d85bc787b0606a53f2d8d2d1d3c8a2608237365e9121c1a338c", size = 7877995, upload-time = "2024-12-13T05:56:18.805Z" },
- { url = "https://files.pythonhosted.org/packages/87/b8/2737456e566e9f4d94ae76b8aa0d953d9acb847714f9a7ad80184474f5be/matplotlib-3.9.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:09debb9ce941eb23ecdbe7eab972b1c3e0276dcf01688073faff7b0f61d6c6ca", size = 7769300, upload-time = "2024-12-13T05:56:21.315Z" },
- { url = "https://files.pythonhosted.org/packages/b2/1f/e709c6ec7b5321e6568769baa288c7178e60a93a9da9e682b39450da0e29/matplotlib-3.9.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc53cf157a657bfd03afab14774d54ba73aa84d42cfe2480c91bd94873952db", size = 8313423, upload-time = "2024-12-13T05:56:26.719Z" },
- { url = "https://files.pythonhosted.org/packages/5e/b6/5a1f868782cd13f053a679984e222007ecff654a9bfbac6b27a65f4eeb05/matplotlib-3.9.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ad45da51be7ad02387801fd154ef74d942f49fe3fcd26a64c94842ba7ec0d865", size = 7854624, upload-time = "2024-12-13T05:56:29.359Z" },
-]
-
-[[package]]
-name = "matplotlib"
-version = "3.10.8"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "contourpy", version = "1.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" },
- { name = "contourpy", version = "1.3.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "cycler", marker = "python_full_version >= '3.10'" },
- { name = "fonttools", version = "4.62.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "kiwisolver", version = "1.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "packaging", marker = "python_full_version >= '3.10'" },
- { name = "pillow", version = "12.1.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "pyparsing", marker = "python_full_version >= '3.10'" },
- { name = "python-dateutil", marker = "python_full_version >= '3.10'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/8a/76/d3c6e3a13fe484ebe7718d14e269c9569c4eb0020a968a327acb3b9a8fe6/matplotlib-3.10.8.tar.gz", hash = "sha256:2299372c19d56bcd35cf05a2738308758d32b9eaed2371898d8f5bd33f084aa3", size = 34806269, upload-time = "2025-12-10T22:56:51.155Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/58/be/a30bd917018ad220c400169fba298f2bb7003c8ccbc0c3e24ae2aacad1e8/matplotlib-3.10.8-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:00270d217d6b20d14b584c521f810d60c5c78406dc289859776550df837dcda7", size = 8239828, upload-time = "2025-12-10T22:55:02.313Z" },
- { url = "https://files.pythonhosted.org/packages/58/27/ca01e043c4841078e82cf6e80a6993dfecd315c3d79f5f3153afbb8e1ec6/matplotlib-3.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37b3c1cc42aa184b3f738cfa18c1c1d72fd496d85467a6cf7b807936d39aa656", size = 8128050, upload-time = "2025-12-10T22:55:04.997Z" },
- { url = "https://files.pythonhosted.org/packages/cb/aa/7ab67f2b729ae6a91bcf9dcac0affb95fb8c56f7fd2b2af894ae0b0cf6fa/matplotlib-3.10.8-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ee40c27c795bda6a5292e9cff9890189d32f7e3a0bf04e0e3c9430c4a00c37df", size = 8700452, upload-time = "2025-12-10T22:55:07.47Z" },
- { url = "https://files.pythonhosted.org/packages/73/ae/2d5817b0acee3c49b7e7ccfbf5b273f284957cc8e270adf36375db353190/matplotlib-3.10.8-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a48f2b74020919552ea25d222d5cc6af9ca3f4eb43a93e14d068457f545c2a17", size = 9534928, upload-time = "2025-12-10T22:55:10.566Z" },
- { url = "https://files.pythonhosted.org/packages/c9/5b/8e66653e9f7c39cb2e5cab25fce4810daffa2bff02cbf5f3077cea9e942c/matplotlib-3.10.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f254d118d14a7f99d616271d6c3c27922c092dac11112670b157798b89bf4933", size = 9586377, upload-time = "2025-12-10T22:55:12.362Z" },
- { url = "https://files.pythonhosted.org/packages/e2/e2/fd0bbadf837f81edb0d208ba8f8cb552874c3b16e27cb91a31977d90875d/matplotlib-3.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:f9b587c9c7274c1613a30afabf65a272114cd6cdbe67b3406f818c79d7ab2e2a", size = 8128127, upload-time = "2025-12-10T22:55:14.436Z" },
- { url = "https://files.pythonhosted.org/packages/f8/86/de7e3a1cdcfc941483af70609edc06b83e7c8a0e0dc9ac325200a3f4d220/matplotlib-3.10.8-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6be43b667360fef5c754dda5d25a32e6307a03c204f3c0fc5468b78fa87b4160", size = 8251215, upload-time = "2025-12-10T22:55:16.175Z" },
- { url = "https://files.pythonhosted.org/packages/fd/14/baad3222f424b19ce6ad243c71de1ad9ec6b2e4eb1e458a48fdc6d120401/matplotlib-3.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2b336e2d91a3d7006864e0990c83b216fcdca64b5a6484912902cef87313d78", size = 8139625, upload-time = "2025-12-10T22:55:17.712Z" },
- { url = "https://files.pythonhosted.org/packages/8f/a0/7024215e95d456de5883e6732e708d8187d9753a21d32f8ddb3befc0c445/matplotlib-3.10.8-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:efb30e3baaea72ce5928e32bab719ab4770099079d66726a62b11b1ef7273be4", size = 8712614, upload-time = "2025-12-10T22:55:20.8Z" },
- { url = "https://files.pythonhosted.org/packages/5a/f4/b8347351da9a5b3f41e26cf547252d861f685c6867d179a7c9d60ad50189/matplotlib-3.10.8-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d56a1efd5bfd61486c8bc968fa18734464556f0fb8e51690f4ac25d85cbbbbc2", size = 9540997, upload-time = "2025-12-10T22:55:23.258Z" },
- { url = "https://files.pythonhosted.org/packages/9e/c0/c7b914e297efe0bc36917bf216b2acb91044b91e930e878ae12981e461e5/matplotlib-3.10.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:238b7ce5717600615c895050239ec955d91f321c209dd110db988500558e70d6", size = 9596825, upload-time = "2025-12-10T22:55:25.217Z" },
- { url = "https://files.pythonhosted.org/packages/6f/d3/a4bbc01c237ab710a1f22b4da72f4ff6d77eb4c7735ea9811a94ae239067/matplotlib-3.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:18821ace09c763ec93aef5eeff087ee493a24051936d7b9ebcad9662f66501f9", size = 8135090, upload-time = "2025-12-10T22:55:27.162Z" },
- { url = "https://files.pythonhosted.org/packages/89/dd/a0b6588f102beab33ca6f5218b31725216577b2a24172f327eaf6417d5c9/matplotlib-3.10.8-cp311-cp311-win_arm64.whl", hash = "sha256:bab485bcf8b1c7d2060b4fcb6fc368a9e6f4cd754c9c2fea281f4be21df394a2", size = 8012377, upload-time = "2025-12-10T22:55:29.185Z" },
- { url = "https://files.pythonhosted.org/packages/9e/67/f997cdcbb514012eb0d10cd2b4b332667997fb5ebe26b8d41d04962fa0e6/matplotlib-3.10.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:64fcc24778ca0404ce0cb7b6b77ae1f4c7231cdd60e6778f999ee05cbd581b9a", size = 8260453, upload-time = "2025-12-10T22:55:30.709Z" },
- { url = "https://files.pythonhosted.org/packages/7e/65/07d5f5c7f7c994f12c768708bd2e17a4f01a2b0f44a1c9eccad872433e2e/matplotlib-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9a5ca4ac220a0cdd1ba6bcba3608547117d30468fefce49bb26f55c1a3d5c58", size = 8148321, upload-time = "2025-12-10T22:55:33.265Z" },
- { url = "https://files.pythonhosted.org/packages/3e/f3/c5195b1ae57ef85339fd7285dfb603b22c8b4e79114bae5f4f0fcf688677/matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04", size = 8716944, upload-time = "2025-12-10T22:55:34.922Z" },
- { url = "https://files.pythonhosted.org/packages/00/f9/7638f5cc82ec8a7aa005de48622eecc3ed7c9854b96ba15bd76b7fd27574/matplotlib-3.10.8-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24d50994d8c5816ddc35411e50a86ab05f575e2530c02752e02538122613371f", size = 9550099, upload-time = "2025-12-10T22:55:36.789Z" },
- { url = "https://files.pythonhosted.org/packages/57/61/78cd5920d35b29fd2a0fe894de8adf672ff52939d2e9b43cb83cd5ce1bc7/matplotlib-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:99eefd13c0dc3b3c1b4d561c1169e65fe47aab7b8158754d7c084088e2329466", size = 9613040, upload-time = "2025-12-10T22:55:38.715Z" },
- { url = "https://files.pythonhosted.org/packages/30/4e/c10f171b6e2f44d9e3a2b96efa38b1677439d79c99357600a62cc1e9594e/matplotlib-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:dd80ecb295460a5d9d260df63c43f4afbdd832d725a531f008dad1664f458adf", size = 8142717, upload-time = "2025-12-10T22:55:41.103Z" },
- { url = "https://files.pythonhosted.org/packages/f1/76/934db220026b5fef85f45d51a738b91dea7d70207581063cd9bd8fafcf74/matplotlib-3.10.8-cp312-cp312-win_arm64.whl", hash = "sha256:3c624e43ed56313651bc18a47f838b60d7b8032ed348911c54906b130b20071b", size = 8012751, upload-time = "2025-12-10T22:55:42.684Z" },
- { url = "https://files.pythonhosted.org/packages/3d/b9/15fd5541ef4f5b9a17eefd379356cf12175fe577424e7b1d80676516031a/matplotlib-3.10.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3f2e409836d7f5ac2f1c013110a4d50b9f7edc26328c108915f9075d7d7a91b6", size = 8261076, upload-time = "2025-12-10T22:55:44.648Z" },
- { url = "https://files.pythonhosted.org/packages/8d/a0/2ba3473c1b66b9c74dc7107c67e9008cb1782edbe896d4c899d39ae9cf78/matplotlib-3.10.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:56271f3dac49a88d7fca5060f004d9d22b865f743a12a23b1e937a0be4818ee1", size = 8148794, upload-time = "2025-12-10T22:55:46.252Z" },
- { url = "https://files.pythonhosted.org/packages/75/97/a471f1c3eb1fd6f6c24a31a5858f443891d5127e63a7788678d14e249aea/matplotlib-3.10.8-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a0a7f52498f72f13d4a25ea70f35f4cb60642b466cbb0a9be951b5bc3f45a486", size = 8718474, upload-time = "2025-12-10T22:55:47.864Z" },
- { url = "https://files.pythonhosted.org/packages/01/be/cd478f4b66f48256f42927d0acbcd63a26a893136456cd079c0cc24fbabf/matplotlib-3.10.8-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646d95230efb9ca614a7a594d4fcacde0ac61d25e37dd51710b36477594963ce", size = 9549637, upload-time = "2025-12-10T22:55:50.048Z" },
- { url = "https://files.pythonhosted.org/packages/5d/7c/8dc289776eae5109e268c4fb92baf870678dc048a25d4ac903683b86d5bf/matplotlib-3.10.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f89c151aab2e2e23cb3fe0acad1e8b82841fd265379c4cecd0f3fcb34c15e0f6", size = 9613678, upload-time = "2025-12-10T22:55:52.21Z" },
- { url = "https://files.pythonhosted.org/packages/64/40/37612487cc8a437d4dd261b32ca21fe2d79510fe74af74e1f42becb1bdb8/matplotlib-3.10.8-cp313-cp313-win_amd64.whl", hash = "sha256:e8ea3e2d4066083e264e75c829078f9e149fa119d27e19acd503de65e0b13149", size = 8142686, upload-time = "2025-12-10T22:55:54.253Z" },
- { url = "https://files.pythonhosted.org/packages/66/52/8d8a8730e968185514680c2a6625943f70269509c3dcfc0dcf7d75928cb8/matplotlib-3.10.8-cp313-cp313-win_arm64.whl", hash = "sha256:c108a1d6fa78a50646029cb6d49808ff0fc1330fda87fa6f6250c6b5369b6645", size = 8012917, upload-time = "2025-12-10T22:55:56.268Z" },
- { url = "https://files.pythonhosted.org/packages/b5/27/51fe26e1062f298af5ef66343d8ef460e090a27fea73036c76c35821df04/matplotlib-3.10.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ad3d9833a64cf48cc4300f2b406c3d0f4f4724a91c0bd5640678a6ba7c102077", size = 8305679, upload-time = "2025-12-10T22:55:57.856Z" },
- { url = "https://files.pythonhosted.org/packages/2c/1e/4de865bc591ac8e3062e835f42dd7fe7a93168d519557837f0e37513f629/matplotlib-3.10.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:eb3823f11823deade26ce3b9f40dcb4a213da7a670013929f31d5f5ed1055b22", size = 8198336, upload-time = "2025-12-10T22:55:59.371Z" },
- { url = "https://files.pythonhosted.org/packages/c6/cb/2f7b6e75fb4dce87ef91f60cac4f6e34f4c145ab036a22318ec837971300/matplotlib-3.10.8-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d9050fee89a89ed57b4fb2c1bfac9a3d0c57a0d55aed95949eedbc42070fea39", size = 8731653, upload-time = "2025-12-10T22:56:01.032Z" },
- { url = "https://files.pythonhosted.org/packages/46/b3/bd9c57d6ba670a37ab31fb87ec3e8691b947134b201f881665b28cc039ff/matplotlib-3.10.8-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b44d07310e404ba95f8c25aa5536f154c0a8ec473303535949e52eb71d0a1565", size = 9561356, upload-time = "2025-12-10T22:56:02.95Z" },
- { url = "https://files.pythonhosted.org/packages/c0/3d/8b94a481456dfc9dfe6e39e93b5ab376e50998cddfd23f4ae3b431708f16/matplotlib-3.10.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0a33deb84c15ede243aead39f77e990469fff93ad1521163305095b77b72ce4a", size = 9614000, upload-time = "2025-12-10T22:56:05.411Z" },
- { url = "https://files.pythonhosted.org/packages/bd/cd/bc06149fe5585ba800b189a6a654a75f1f127e8aab02fd2be10df7fa500c/matplotlib-3.10.8-cp313-cp313t-win_amd64.whl", hash = "sha256:3a48a78d2786784cc2413e57397981fb45c79e968d99656706018d6e62e57958", size = 8220043, upload-time = "2025-12-10T22:56:07.551Z" },
- { url = "https://files.pythonhosted.org/packages/e3/de/b22cf255abec916562cc04eef457c13e58a1990048de0c0c3604d082355e/matplotlib-3.10.8-cp313-cp313t-win_arm64.whl", hash = "sha256:15d30132718972c2c074cd14638c7f4592bd98719e2308bccea40e0538bc0cb5", size = 8062075, upload-time = "2025-12-10T22:56:09.178Z" },
- { url = "https://files.pythonhosted.org/packages/3c/43/9c0ff7a2f11615e516c3b058e1e6e8f9614ddeca53faca06da267c48345d/matplotlib-3.10.8-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b53285e65d4fa4c86399979e956235deb900be5baa7fc1218ea67fbfaeaadd6f", size = 8262481, upload-time = "2025-12-10T22:56:10.885Z" },
- { url = "https://files.pythonhosted.org/packages/6f/ca/e8ae28649fcdf039fda5ef554b40a95f50592a3c47e6f7270c9561c12b07/matplotlib-3.10.8-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:32f8dce744be5569bebe789e46727946041199030db8aeb2954d26013a0eb26b", size = 8151473, upload-time = "2025-12-10T22:56:12.377Z" },
- { url = "https://files.pythonhosted.org/packages/f1/6f/009d129ae70b75e88cbe7e503a12a4c0670e08ed748a902c2568909e9eb5/matplotlib-3.10.8-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf267add95b1c88300d96ca837833d4112756045364f5c734a2276038dae27d", size = 9553896, upload-time = "2025-12-10T22:56:14.432Z" },
- { url = "https://files.pythonhosted.org/packages/f5/26/4221a741eb97967bc1fd5e4c52b9aa5a91b2f4ec05b59f6def4d820f9df9/matplotlib-3.10.8-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2cf5bd12cecf46908f286d7838b2abc6c91cda506c0445b8223a7c19a00df008", size = 9824193, upload-time = "2025-12-10T22:56:16.29Z" },
- { url = "https://files.pythonhosted.org/packages/1f/f3/3abf75f38605772cf48a9daf5821cd4f563472f38b4b828c6fba6fa6d06e/matplotlib-3.10.8-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:41703cc95688f2516b480f7f339d8851a6035f18e100ee6a32bc0b8536a12a9c", size = 9615444, upload-time = "2025-12-10T22:56:18.155Z" },
- { url = "https://files.pythonhosted.org/packages/93/a5/de89ac80f10b8dc615807ee1133cd99ac74082581196d4d9590bea10690d/matplotlib-3.10.8-cp314-cp314-win_amd64.whl", hash = "sha256:83d282364ea9f3e52363da262ce32a09dfe241e4080dcedda3c0db059d3c1f11", size = 8272719, upload-time = "2025-12-10T22:56:20.366Z" },
- { url = "https://files.pythonhosted.org/packages/69/ce/b006495c19ccc0a137b48083168a37bd056392dee02f87dba0472f2797fe/matplotlib-3.10.8-cp314-cp314-win_arm64.whl", hash = "sha256:2c1998e92cd5999e295a731bcb2911c75f597d937341f3030cc24ef2733d78a8", size = 8144205, upload-time = "2025-12-10T22:56:22.239Z" },
- { url = "https://files.pythonhosted.org/packages/68/d9/b31116a3a855bd313c6fcdb7226926d59b041f26061c6c5b1be66a08c826/matplotlib-3.10.8-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b5a2b97dbdc7d4f353ebf343744f1d1f1cca8aa8bfddb4262fcf4306c3761d50", size = 8305785, upload-time = "2025-12-10T22:56:24.218Z" },
- { url = "https://files.pythonhosted.org/packages/1e/90/6effe8103f0272685767ba5f094f453784057072f49b393e3ea178fe70a5/matplotlib-3.10.8-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3f5c3e4da343bba819f0234186b9004faba952cc420fbc522dc4e103c1985908", size = 8198361, upload-time = "2025-12-10T22:56:26.787Z" },
- { url = "https://files.pythonhosted.org/packages/d7/65/a73188711bea603615fc0baecca1061429ac16940e2385433cc778a9d8e7/matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f62550b9a30afde8c1c3ae450e5eb547d579dd69b25c2fc7a1c67f934c1717a", size = 9561357, upload-time = "2025-12-10T22:56:28.953Z" },
- { url = "https://files.pythonhosted.org/packages/f4/3d/b5c5d5d5be8ce63292567f0e2c43dde9953d3ed86ac2de0a72e93c8f07a1/matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:495672de149445ec1b772ff2c9ede9b769e3cb4f0d0aa7fa730d7f59e2d4e1c1", size = 9823610, upload-time = "2025-12-10T22:56:31.455Z" },
- { url = "https://files.pythonhosted.org/packages/4d/4b/e7beb6bbd49f6bae727a12b270a2654d13c397576d25bd6786e47033300f/matplotlib-3.10.8-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:595ba4d8fe983b88f0eec8c26a241e16d6376fe1979086232f481f8f3f67494c", size = 9614011, upload-time = "2025-12-10T22:56:33.85Z" },
- { url = "https://files.pythonhosted.org/packages/7c/e6/76f2813d31f032e65f6f797e3f2f6e4aab95b65015924b1c51370395c28a/matplotlib-3.10.8-cp314-cp314t-win_amd64.whl", hash = "sha256:25d380fe8b1dc32cf8f0b1b448470a77afb195438bafdf1d858bfb876f3edf7b", size = 8362801, upload-time = "2025-12-10T22:56:36.107Z" },
- { url = "https://files.pythonhosted.org/packages/5d/49/d651878698a0b67f23aa28e17f45a6d6dd3d3f933fa29087fa4ce5947b5a/matplotlib-3.10.8-cp314-cp314t-win_arm64.whl", hash = "sha256:113bb52413ea508ce954a02c10ffd0d565f9c3bc7f2eddc27dfe1731e71c7b5f", size = 8192560, upload-time = "2025-12-10T22:56:38.008Z" },
- { url = "https://files.pythonhosted.org/packages/f5/43/31d59500bb950b0d188e149a2e552040528c13d6e3d6e84d0cccac593dcd/matplotlib-3.10.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f97aeb209c3d2511443f8797e3e5a569aebb040d4f8bc79aa3ee78a8fb9e3dd8", size = 8237252, upload-time = "2025-12-10T22:56:39.529Z" },
- { url = "https://files.pythonhosted.org/packages/0c/2c/615c09984f3c5f907f51c886538ad785cf72e0e11a3225de2c0f9442aecc/matplotlib-3.10.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fb061f596dad3a0f52b60dc6a5dec4a0c300dec41e058a7efe09256188d170b7", size = 8124693, upload-time = "2025-12-10T22:56:41.758Z" },
- { url = "https://files.pythonhosted.org/packages/91/e1/2757277a1c56041e1fc104b51a0f7b9a4afc8eb737865d63cababe30bc61/matplotlib-3.10.8-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:12d90df9183093fcd479f4172ac26b322b1248b15729cb57f42f71f24c7e37a3", size = 8702205, upload-time = "2025-12-10T22:56:43.415Z" },
- { url = "https://files.pythonhosted.org/packages/04/30/3afaa31c757f34b7725ab9d2ba8b48b5e89c2019c003e7d0ead143aabc5a/matplotlib-3.10.8-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:6da7c2ce169267d0d066adcf63758f0604aa6c3eebf67458930f9d9b79ad1db1", size = 8249198, upload-time = "2025-12-10T22:56:45.584Z" },
- { url = "https://files.pythonhosted.org/packages/48/2f/6334aec331f57485a642a7c8be03cb286f29111ae71c46c38b363230063c/matplotlib-3.10.8-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9153c3292705be9f9c64498a8872118540c3f4123d1a1c840172edf262c8be4a", size = 8136817, upload-time = "2025-12-10T22:56:47.339Z" },
- { url = "https://files.pythonhosted.org/packages/73/e4/6d6f14b2a759c622f191b2d67e9075a3f56aaccb3be4bb9bb6890030d0a0/matplotlib-3.10.8-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ae029229a57cd1e8fe542485f27e7ca7b23aa9e8944ddb4985d0bc444f1eca2", size = 8713867, upload-time = "2025-12-10T22:56:48.954Z" },
-]
-
-[[package]]
-name = "mpmath"
-version = "1.3.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" },
-]
-
-[[package]]
-name = "msgpack"
-version = "1.1.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/4d/f2/bfb55a6236ed8725a96b0aa3acbd0ec17588e6a2c3b62a93eb513ed8783f/msgpack-1.1.2.tar.gz", hash = "sha256:3b60763c1373dd60f398488069bcdc703cd08a711477b5d480eecc9f9626f47e", size = 173581, upload-time = "2025-10-08T09:15:56.596Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f5/a2/3b68a9e769db68668b25c6108444a35f9bd163bb848c0650d516761a59c0/msgpack-1.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0051fffef5a37ca2cd16978ae4f0aef92f164df86823871b5162812bebecd8e2", size = 81318, upload-time = "2025-10-08T09:14:38.722Z" },
- { url = "https://files.pythonhosted.org/packages/5b/e1/2b720cc341325c00be44e1ed59e7cfeae2678329fbf5aa68f5bda57fe728/msgpack-1.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a605409040f2da88676e9c9e5853b3449ba8011973616189ea5ee55ddbc5bc87", size = 83786, upload-time = "2025-10-08T09:14:40.082Z" },
- { url = "https://files.pythonhosted.org/packages/71/e5/c2241de64bfceac456b140737812a2ab310b10538a7b34a1d393b748e095/msgpack-1.1.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b696e83c9f1532b4af884045ba7f3aa741a63b2bc22617293a2c6a7c645f251", size = 398240, upload-time = "2025-10-08T09:14:41.151Z" },
- { url = "https://files.pythonhosted.org/packages/b7/09/2a06956383c0fdebaef5aa9246e2356776f12ea6f2a44bd1368abf0e46c4/msgpack-1.1.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:365c0bbe981a27d8932da71af63ef86acc59ed5c01ad929e09a0b88c6294e28a", size = 406070, upload-time = "2025-10-08T09:14:42.821Z" },
- { url = "https://files.pythonhosted.org/packages/0e/74/2957703f0e1ef20637d6aead4fbb314330c26f39aa046b348c7edcf6ca6b/msgpack-1.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:41d1a5d875680166d3ac5c38573896453bbbea7092936d2e107214daf43b1d4f", size = 393403, upload-time = "2025-10-08T09:14:44.38Z" },
- { url = "https://files.pythonhosted.org/packages/a5/09/3bfc12aa90f77b37322fc33e7a8a7c29ba7c8edeadfa27664451801b9860/msgpack-1.1.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:354e81bcdebaab427c3df4281187edc765d5d76bfb3a7c125af9da7a27e8458f", size = 398947, upload-time = "2025-10-08T09:14:45.56Z" },
- { url = "https://files.pythonhosted.org/packages/4b/4f/05fcebd3b4977cb3d840f7ef6b77c51f8582086de5e642f3fefee35c86fc/msgpack-1.1.2-cp310-cp310-win32.whl", hash = "sha256:e64c8d2f5e5d5fda7b842f55dec6133260ea8f53c4257d64494c534f306bf7a9", size = 64769, upload-time = "2025-10-08T09:14:47.334Z" },
- { url = "https://files.pythonhosted.org/packages/d0/3e/b4547e3a34210956382eed1c85935fff7e0f9b98be3106b3745d7dec9c5e/msgpack-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:db6192777d943bdaaafb6ba66d44bf65aa0e9c5616fa1d2da9bb08828c6b39aa", size = 71293, upload-time = "2025-10-08T09:14:48.665Z" },
- { url = "https://files.pythonhosted.org/packages/2c/97/560d11202bcd537abca693fd85d81cebe2107ba17301de42b01ac1677b69/msgpack-1.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e86a607e558d22985d856948c12a3fa7b42efad264dca8a3ebbcfa2735d786c", size = 82271, upload-time = "2025-10-08T09:14:49.967Z" },
- { url = "https://files.pythonhosted.org/packages/83/04/28a41024ccbd67467380b6fb440ae916c1e4f25e2cd4c63abe6835ac566e/msgpack-1.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:283ae72fc89da59aa004ba147e8fc2f766647b1251500182fac0350d8af299c0", size = 84914, upload-time = "2025-10-08T09:14:50.958Z" },
- { url = "https://files.pythonhosted.org/packages/71/46/b817349db6886d79e57a966346cf0902a426375aadc1e8e7a86a75e22f19/msgpack-1.1.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61c8aa3bd513d87c72ed0b37b53dd5c5a0f58f2ff9f26e1555d3bd7948fb7296", size = 416962, upload-time = "2025-10-08T09:14:51.997Z" },
- { url = "https://files.pythonhosted.org/packages/da/e0/6cc2e852837cd6086fe7d8406af4294e66827a60a4cf60b86575a4a65ca8/msgpack-1.1.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:454e29e186285d2ebe65be34629fa0e8605202c60fbc7c4c650ccd41870896ef", size = 426183, upload-time = "2025-10-08T09:14:53.477Z" },
- { url = "https://files.pythonhosted.org/packages/25/98/6a19f030b3d2ea906696cedd1eb251708e50a5891d0978b012cb6107234c/msgpack-1.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7bc8813f88417599564fafa59fd6f95be417179f76b40325b500b3c98409757c", size = 411454, upload-time = "2025-10-08T09:14:54.648Z" },
- { url = "https://files.pythonhosted.org/packages/b7/cd/9098fcb6adb32187a70b7ecaabf6339da50553351558f37600e53a4a2a23/msgpack-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bafca952dc13907bdfdedfc6a5f579bf4f292bdd506fadb38389afa3ac5b208e", size = 422341, upload-time = "2025-10-08T09:14:56.328Z" },
- { url = "https://files.pythonhosted.org/packages/e6/ae/270cecbcf36c1dc85ec086b33a51a4d7d08fc4f404bdbc15b582255d05ff/msgpack-1.1.2-cp311-cp311-win32.whl", hash = "sha256:602b6740e95ffc55bfb078172d279de3773d7b7db1f703b2f1323566b878b90e", size = 64747, upload-time = "2025-10-08T09:14:57.882Z" },
- { url = "https://files.pythonhosted.org/packages/2a/79/309d0e637f6f37e83c711f547308b91af02b72d2326ddd860b966080ef29/msgpack-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:d198d275222dc54244bf3327eb8cbe00307d220241d9cec4d306d49a44e85f68", size = 71633, upload-time = "2025-10-08T09:14:59.177Z" },
- { url = "https://files.pythonhosted.org/packages/73/4d/7c4e2b3d9b1106cd0aa6cb56cc57c6267f59fa8bfab7d91df5adc802c847/msgpack-1.1.2-cp311-cp311-win_arm64.whl", hash = "sha256:86f8136dfa5c116365a8a651a7d7484b65b13339731dd6faebb9a0242151c406", size = 64755, upload-time = "2025-10-08T09:15:00.48Z" },
- { url = "https://files.pythonhosted.org/packages/ad/bd/8b0d01c756203fbab65d265859749860682ccd2a59594609aeec3a144efa/msgpack-1.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:70a0dff9d1f8da25179ffcf880e10cf1aad55fdb63cd59c9a49a1b82290062aa", size = 81939, upload-time = "2025-10-08T09:15:01.472Z" },
- { url = "https://files.pythonhosted.org/packages/34/68/ba4f155f793a74c1483d4bdef136e1023f7bcba557f0db4ef3db3c665cf1/msgpack-1.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:446abdd8b94b55c800ac34b102dffd2f6aa0ce643c55dfc017ad89347db3dbdb", size = 85064, upload-time = "2025-10-08T09:15:03.764Z" },
- { url = "https://files.pythonhosted.org/packages/f2/60/a064b0345fc36c4c3d2c743c82d9100c40388d77f0b48b2f04d6041dbec1/msgpack-1.1.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c63eea553c69ab05b6747901b97d620bb2a690633c77f23feb0c6a947a8a7b8f", size = 417131, upload-time = "2025-10-08T09:15:05.136Z" },
- { url = "https://files.pythonhosted.org/packages/65/92/a5100f7185a800a5d29f8d14041f61475b9de465ffcc0f3b9fba606e4505/msgpack-1.1.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:372839311ccf6bdaf39b00b61288e0557916c3729529b301c52c2d88842add42", size = 427556, upload-time = "2025-10-08T09:15:06.837Z" },
- { url = "https://files.pythonhosted.org/packages/f5/87/ffe21d1bf7d9991354ad93949286f643b2bb6ddbeab66373922b44c3b8cc/msgpack-1.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2929af52106ca73fcb28576218476ffbb531a036c2adbcf54a3664de124303e9", size = 404920, upload-time = "2025-10-08T09:15:08.179Z" },
- { url = "https://files.pythonhosted.org/packages/ff/41/8543ed2b8604f7c0d89ce066f42007faac1eaa7d79a81555f206a5cdb889/msgpack-1.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be52a8fc79e45b0364210eef5234a7cf8d330836d0a64dfbb878efa903d84620", size = 415013, upload-time = "2025-10-08T09:15:09.83Z" },
- { url = "https://files.pythonhosted.org/packages/41/0d/2ddfaa8b7e1cee6c490d46cb0a39742b19e2481600a7a0e96537e9c22f43/msgpack-1.1.2-cp312-cp312-win32.whl", hash = "sha256:1fff3d825d7859ac888b0fbda39a42d59193543920eda9d9bea44d958a878029", size = 65096, upload-time = "2025-10-08T09:15:11.11Z" },
- { url = "https://files.pythonhosted.org/packages/8c/ec/d431eb7941fb55a31dd6ca3404d41fbb52d99172df2e7707754488390910/msgpack-1.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:1de460f0403172cff81169a30b9a92b260cb809c4cb7e2fc79ae8d0510c78b6b", size = 72708, upload-time = "2025-10-08T09:15:12.554Z" },
- { url = "https://files.pythonhosted.org/packages/c5/31/5b1a1f70eb0e87d1678e9624908f86317787b536060641d6798e3cf70ace/msgpack-1.1.2-cp312-cp312-win_arm64.whl", hash = "sha256:be5980f3ee0e6bd44f3a9e9dea01054f175b50c3e6cdb692bc9424c0bbb8bf69", size = 64119, upload-time = "2025-10-08T09:15:13.589Z" },
- { url = "https://files.pythonhosted.org/packages/6b/31/b46518ecc604d7edf3a4f94cb3bf021fc62aa301f0cb849936968164ef23/msgpack-1.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4efd7b5979ccb539c221a4c4e16aac1a533efc97f3b759bb5a5ac9f6d10383bf", size = 81212, upload-time = "2025-10-08T09:15:14.552Z" },
- { url = "https://files.pythonhosted.org/packages/92/dc/c385f38f2c2433333345a82926c6bfa5ecfff3ef787201614317b58dd8be/msgpack-1.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:42eefe2c3e2af97ed470eec850facbe1b5ad1d6eacdbadc42ec98e7dcf68b4b7", size = 84315, upload-time = "2025-10-08T09:15:15.543Z" },
- { url = "https://files.pythonhosted.org/packages/d3/68/93180dce57f684a61a88a45ed13047558ded2be46f03acb8dec6d7c513af/msgpack-1.1.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1fdf7d83102bf09e7ce3357de96c59b627395352a4024f6e2458501f158bf999", size = 412721, upload-time = "2025-10-08T09:15:16.567Z" },
- { url = "https://files.pythonhosted.org/packages/5d/ba/459f18c16f2b3fc1a1ca871f72f07d70c07bf768ad0a507a698b8052ac58/msgpack-1.1.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fac4be746328f90caa3cd4bc67e6fe36ca2bf61d5c6eb6d895b6527e3f05071e", size = 424657, upload-time = "2025-10-08T09:15:17.825Z" },
- { url = "https://files.pythonhosted.org/packages/38/f8/4398c46863b093252fe67368b44edc6c13b17f4e6b0e4929dbf0bdb13f23/msgpack-1.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fffee09044073e69f2bad787071aeec727183e7580443dfeb8556cbf1978d162", size = 402668, upload-time = "2025-10-08T09:15:19.003Z" },
- { url = "https://files.pythonhosted.org/packages/28/ce/698c1eff75626e4124b4d78e21cca0b4cc90043afb80a507626ea354ab52/msgpack-1.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5928604de9b032bc17f5099496417f113c45bc6bc21b5c6920caf34b3c428794", size = 419040, upload-time = "2025-10-08T09:15:20.183Z" },
- { url = "https://files.pythonhosted.org/packages/67/32/f3cd1667028424fa7001d82e10ee35386eea1408b93d399b09fb0aa7875f/msgpack-1.1.2-cp313-cp313-win32.whl", hash = "sha256:a7787d353595c7c7e145e2331abf8b7ff1e6673a6b974ded96e6d4ec09f00c8c", size = 65037, upload-time = "2025-10-08T09:15:21.416Z" },
- { url = "https://files.pythonhosted.org/packages/74/07/1ed8277f8653c40ebc65985180b007879f6a836c525b3885dcc6448ae6cb/msgpack-1.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:a465f0dceb8e13a487e54c07d04ae3ba131c7c5b95e2612596eafde1dccf64a9", size = 72631, upload-time = "2025-10-08T09:15:22.431Z" },
- { url = "https://files.pythonhosted.org/packages/e5/db/0314e4e2db56ebcf450f277904ffd84a7988b9e5da8d0d61ab2d057df2b6/msgpack-1.1.2-cp313-cp313-win_arm64.whl", hash = "sha256:e69b39f8c0aa5ec24b57737ebee40be647035158f14ed4b40e6f150077e21a84", size = 64118, upload-time = "2025-10-08T09:15:23.402Z" },
- { url = "https://files.pythonhosted.org/packages/22/71/201105712d0a2ff07b7873ed3c220292fb2ea5120603c00c4b634bcdafb3/msgpack-1.1.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e23ce8d5f7aa6ea6d2a2b326b4ba46c985dbb204523759984430db7114f8aa00", size = 81127, upload-time = "2025-10-08T09:15:24.408Z" },
- { url = "https://files.pythonhosted.org/packages/1b/9f/38ff9e57a2eade7bf9dfee5eae17f39fc0e998658050279cbb14d97d36d9/msgpack-1.1.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:6c15b7d74c939ebe620dd8e559384be806204d73b4f9356320632d783d1f7939", size = 84981, upload-time = "2025-10-08T09:15:25.812Z" },
- { url = "https://files.pythonhosted.org/packages/8e/a9/3536e385167b88c2cc8f4424c49e28d49a6fc35206d4a8060f136e71f94c/msgpack-1.1.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e2cb7b9031568a2a5c73aa077180f93dd2e95b4f8d3b8e14a73ae94a9e667e", size = 411885, upload-time = "2025-10-08T09:15:27.22Z" },
- { url = "https://files.pythonhosted.org/packages/2f/40/dc34d1a8d5f1e51fc64640b62b191684da52ca469da9cd74e84936ffa4a6/msgpack-1.1.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:180759d89a057eab503cf62eeec0aa61c4ea1200dee709f3a8e9397dbb3b6931", size = 419658, upload-time = "2025-10-08T09:15:28.4Z" },
- { url = "https://files.pythonhosted.org/packages/3b/ef/2b92e286366500a09a67e03496ee8b8ba00562797a52f3c117aa2b29514b/msgpack-1.1.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:04fb995247a6e83830b62f0b07bf36540c213f6eac8e851166d8d86d83cbd014", size = 403290, upload-time = "2025-10-08T09:15:29.764Z" },
- { url = "https://files.pythonhosted.org/packages/78/90/e0ea7990abea5764e4655b8177aa7c63cdfa89945b6e7641055800f6c16b/msgpack-1.1.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8e22ab046fa7ede9e36eeb4cfad44d46450f37bb05d5ec482b02868f451c95e2", size = 415234, upload-time = "2025-10-08T09:15:31.022Z" },
- { url = "https://files.pythonhosted.org/packages/72/4e/9390aed5db983a2310818cd7d3ec0aecad45e1f7007e0cda79c79507bb0d/msgpack-1.1.2-cp314-cp314-win32.whl", hash = "sha256:80a0ff7d4abf5fecb995fcf235d4064b9a9a8a40a3ab80999e6ac1e30b702717", size = 66391, upload-time = "2025-10-08T09:15:32.265Z" },
- { url = "https://files.pythonhosted.org/packages/6e/f1/abd09c2ae91228c5f3998dbd7f41353def9eac64253de3c8105efa2082f7/msgpack-1.1.2-cp314-cp314-win_amd64.whl", hash = "sha256:9ade919fac6a3e7260b7f64cea89df6bec59104987cbea34d34a2fa15d74310b", size = 73787, upload-time = "2025-10-08T09:15:33.219Z" },
- { url = "https://files.pythonhosted.org/packages/6a/b0/9d9f667ab48b16ad4115c1935d94023b82b3198064cb84a123e97f7466c1/msgpack-1.1.2-cp314-cp314-win_arm64.whl", hash = "sha256:59415c6076b1e30e563eb732e23b994a61c159cec44deaf584e5cc1dd662f2af", size = 66453, upload-time = "2025-10-08T09:15:34.225Z" },
- { url = "https://files.pythonhosted.org/packages/16/67/93f80545eb1792b61a217fa7f06d5e5cb9e0055bed867f43e2b8e012e137/msgpack-1.1.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:897c478140877e5307760b0ea66e0932738879e7aa68144d9b78ea4c8302a84a", size = 85264, upload-time = "2025-10-08T09:15:35.61Z" },
- { url = "https://files.pythonhosted.org/packages/87/1c/33c8a24959cf193966ef11a6f6a2995a65eb066bd681fd085afd519a57ce/msgpack-1.1.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a668204fa43e6d02f89dbe79a30b0d67238d9ec4c5bd8a940fc3a004a47b721b", size = 89076, upload-time = "2025-10-08T09:15:36.619Z" },
- { url = "https://files.pythonhosted.org/packages/fc/6b/62e85ff7193663fbea5c0254ef32f0c77134b4059f8da89b958beb7696f3/msgpack-1.1.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5559d03930d3aa0f3aacb4c42c776af1a2ace2611871c84a75afe436695e6245", size = 435242, upload-time = "2025-10-08T09:15:37.647Z" },
- { url = "https://files.pythonhosted.org/packages/c1/47/5c74ecb4cc277cf09f64e913947871682ffa82b3b93c8dad68083112f412/msgpack-1.1.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:70c5a7a9fea7f036b716191c29047374c10721c389c21e9ffafad04df8c52c90", size = 432509, upload-time = "2025-10-08T09:15:38.794Z" },
- { url = "https://files.pythonhosted.org/packages/24/a4/e98ccdb56dc4e98c929a3f150de1799831c0a800583cde9fa022fa90602d/msgpack-1.1.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f2cb069d8b981abc72b41aea1c580ce92d57c673ec61af4c500153a626cb9e20", size = 415957, upload-time = "2025-10-08T09:15:40.238Z" },
- { url = "https://files.pythonhosted.org/packages/da/28/6951f7fb67bc0a4e184a6b38ab71a92d9ba58080b27a77d3e2fb0be5998f/msgpack-1.1.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d62ce1f483f355f61adb5433ebfd8868c5f078d1a52d042b0a998682b4fa8c27", size = 422910, upload-time = "2025-10-08T09:15:41.505Z" },
- { url = "https://files.pythonhosted.org/packages/f0/03/42106dcded51f0a0b5284d3ce30a671e7bd3f7318d122b2ead66ad289fed/msgpack-1.1.2-cp314-cp314t-win32.whl", hash = "sha256:1d1418482b1ee984625d88aa9585db570180c286d942da463533b238b98b812b", size = 75197, upload-time = "2025-10-08T09:15:42.954Z" },
- { url = "https://files.pythonhosted.org/packages/15/86/d0071e94987f8db59d4eeb386ddc64d0bb9b10820a8d82bcd3e53eeb2da6/msgpack-1.1.2-cp314-cp314t-win_amd64.whl", hash = "sha256:5a46bf7e831d09470ad92dff02b8b1ac92175ca36b087f904a0519857c6be3ff", size = 85772, upload-time = "2025-10-08T09:15:43.954Z" },
- { url = "https://files.pythonhosted.org/packages/81/f2/08ace4142eb281c12701fc3b93a10795e4d4dc7f753911d836675050f886/msgpack-1.1.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d99ef64f349d5ec3293688e91486c5fdb925ed03807f64d98d205d2713c60b46", size = 70868, upload-time = "2025-10-08T09:15:44.959Z" },
- { url = "https://files.pythonhosted.org/packages/46/73/85469b4aa71d25e5949fee50d3c2cf46f69cea619fe97cfe309058080f75/msgpack-1.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ea5405c46e690122a76531ab97a079e184c0daf491e588592d6a23d3e32af99e", size = 81529, upload-time = "2025-10-08T09:15:46.069Z" },
- { url = "https://files.pythonhosted.org/packages/6c/3a/7d4077e8ae720b29d2b299a9591969f0d105146960681ea6f4121e6d0f8d/msgpack-1.1.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9fba231af7a933400238cb357ecccf8ab5d51535ea95d94fc35b7806218ff844", size = 84106, upload-time = "2025-10-08T09:15:47.064Z" },
- { url = "https://files.pythonhosted.org/packages/df/c0/da451c74746ed9388dca1b4ec647c82945f4e2f8ce242c25fb7c0e12181f/msgpack-1.1.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a8f6e7d30253714751aa0b0c84ae28948e852ee7fb0524082e6716769124bc23", size = 396656, upload-time = "2025-10-08T09:15:48.118Z" },
- { url = "https://files.pythonhosted.org/packages/e5/a1/20486c29a31ec9f0f88377fdf7eb7a67f30bcb5e0f89b7550f6f16d9373b/msgpack-1.1.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:94fd7dc7d8cb0a54432f296f2246bc39474e017204ca6f4ff345941d4ed285a7", size = 404722, upload-time = "2025-10-08T09:15:49.328Z" },
- { url = "https://files.pythonhosted.org/packages/ad/ae/e613b0a526d54ce85447d9665c2ff8c3210a784378d50573321d43d324b8/msgpack-1.1.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:350ad5353a467d9e3b126d8d1b90fe05ad081e2e1cef5753f8c345217c37e7b8", size = 391838, upload-time = "2025-10-08T09:15:50.517Z" },
- { url = "https://files.pythonhosted.org/packages/49/6a/07f3e10ed4503045b882ef7bf8512d01d8a9e25056950a977bd5f50df1c2/msgpack-1.1.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6bde749afe671dc44893f8d08e83bf475a1a14570d67c4bb5cec5573463c8833", size = 397516, upload-time = "2025-10-08T09:15:51.646Z" },
- { url = "https://files.pythonhosted.org/packages/76/9b/a86828e75986c12a3809c1e5062f5eba8e0cae3dfa2bf724ed2b1bb72b4c/msgpack-1.1.2-cp39-cp39-win32.whl", hash = "sha256:ad09b984828d6b7bb52d1d1d0c9be68ad781fa004ca39216c8a1e63c0f34ba3c", size = 64863, upload-time = "2025-10-08T09:15:53.118Z" },
- { url = "https://files.pythonhosted.org/packages/14/a7/b1992b4fb3da3b413f5fb78a63bad42f256c3be2352eb69273c3789c2c96/msgpack-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:67016ae8c8965124fdede9d3769528ad8284f14d635337ffa6a713a580f6c030", size = 71540, upload-time = "2025-10-08T09:15:55.573Z" },
-]
-
-[[package]]
-name = "networkx"
-version = "3.2.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/c4/80/a84676339aaae2f1cfdf9f418701dd634aef9cc76f708ef55c36ff39c3ca/networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6", size = 2073928, upload-time = "2023-10-28T08:41:39.364Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d5/f0/8fbc882ca80cf077f1b246c0e3c3465f7f415439bdea6b899f6b19f61f70/networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2", size = 1647772, upload-time = "2023-10-28T08:41:36.945Z" },
-]
-
-[[package]]
-name = "networkx"
-version = "3.6.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/6a/51/63fe664f3908c97be9d2e4f1158eb633317598cfa6e1fc14af5383f17512/networkx-3.6.1.tar.gz", hash = "sha256:26b7c357accc0c8cde558ad486283728b65b6a95d85ee1cd66bafab4c8168509", size = 2517025, upload-time = "2025-12-08T17:02:39.908Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" },
-]
-
-[[package]]
-name = "nodeenv"
-version = "1.10.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" },
-]
-
-[[package]]
-name = "numba"
-version = "0.60.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "llvmlite", version = "0.43.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/3c/93/2849300a9184775ba274aba6f82f303343669b0592b7bb0849ea713dabb0/numba-0.60.0.tar.gz", hash = "sha256:5df6158e5584eece5fc83294b949fd30b9f1125df7708862205217e068aabf16", size = 2702171, upload-time = "2024-06-13T18:11:19.869Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f7/cf/baa13a7e3556d73d9e38021e6d6aa4aeb30d8b94545aa8b70d0f24a1ccc4/numba-0.60.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d761de835cd38fb400d2c26bb103a2726f548dc30368853121d66201672e651", size = 2647627, upload-time = "2024-06-13T18:10:29.857Z" },
- { url = "https://files.pythonhosted.org/packages/ac/ba/4b57fa498564457c3cc9fc9e570a6b08e6086c74220f24baaf04e54b995f/numba-0.60.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:159e618ef213fba758837f9837fb402bbe65326e60ba0633dbe6c7f274d42c1b", size = 2650322, upload-time = "2024-06-13T18:10:32.849Z" },
- { url = "https://files.pythonhosted.org/packages/28/98/7ea97ee75870a54f938a8c70f7e0be4495ba5349c5f9db09d467c4a5d5b7/numba-0.60.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1527dc578b95c7c4ff248792ec33d097ba6bef9eda466c948b68dfc995c25781", size = 3407390, upload-time = "2024-06-13T18:10:34.741Z" },
- { url = "https://files.pythonhosted.org/packages/79/58/cb4ac5b8f7ec64200460aef1fed88258fb872ceef504ab1f989d2ff0f684/numba-0.60.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe0b28abb8d70f8160798f4de9d486143200f34458d34c4a214114e445d7124e", size = 3699694, upload-time = "2024-06-13T18:10:37.295Z" },
- { url = "https://files.pythonhosted.org/packages/1c/b0/c61a93ca947d12233ff45de506ddbf52af3f752066a0b8be4d27426e16da/numba-0.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:19407ced081d7e2e4b8d8c36aa57b7452e0283871c296e12d798852bc7d7f198", size = 2687030, upload-time = "2024-06-13T18:10:39.47Z" },
- { url = "https://files.pythonhosted.org/packages/98/ad/df18d492a8f00d29a30db307904b9b296e37507034eedb523876f3a2e13e/numba-0.60.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a17b70fc9e380ee29c42717e8cc0bfaa5556c416d94f9aa96ba13acb41bdece8", size = 2647254, upload-time = "2024-06-13T18:10:41.69Z" },
- { url = "https://files.pythonhosted.org/packages/9a/51/a4dc2c01ce7a850b8e56ff6d5381d047a5daea83d12bad08aa071d34b2ee/numba-0.60.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fb02b344a2a80efa6f677aa5c40cd5dd452e1b35f8d1c2af0dfd9ada9978e4b", size = 2649970, upload-time = "2024-06-13T18:10:44.682Z" },
- { url = "https://files.pythonhosted.org/packages/f9/4c/8889ac94c0b33dca80bed11564b8c6d9ea14d7f094e674c58e5c5b05859b/numba-0.60.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f4fde652ea604ea3c86508a3fb31556a6157b2c76c8b51b1d45eb40c8598703", size = 3412492, upload-time = "2024-06-13T18:10:47.1Z" },
- { url = "https://files.pythonhosted.org/packages/57/03/2b4245b05b71c0cee667e6a0b51606dfa7f4157c9093d71c6b208385a611/numba-0.60.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4142d7ac0210cc86432b818338a2bc368dc773a2f5cf1e32ff7c5b378bd63ee8", size = 3705018, upload-time = "2024-06-13T18:10:49.539Z" },
- { url = "https://files.pythonhosted.org/packages/79/89/2d924ca60dbf949f18a6fec223a2445f5f428d9a5f97a6b29c2122319015/numba-0.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:cac02c041e9b5bc8cf8f2034ff6f0dbafccd1ae9590dc146b3a02a45e53af4e2", size = 2686920, upload-time = "2024-06-13T18:10:51.937Z" },
- { url = "https://files.pythonhosted.org/packages/eb/5c/b5ec752c475e78a6c3676b67c514220dbde2725896bbb0b6ec6ea54b2738/numba-0.60.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7da4098db31182fc5ffe4bc42c6f24cd7d1cb8a14b59fd755bfee32e34b8404", size = 2647866, upload-time = "2024-06-13T18:10:54.453Z" },
- { url = "https://files.pythonhosted.org/packages/65/42/39559664b2e7c15689a638c2a38b3b74c6e69a04e2b3019b9f7742479188/numba-0.60.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38d6ea4c1f56417076ecf8fc327c831ae793282e0ff51080c5094cb726507b1c", size = 2650208, upload-time = "2024-06-13T18:10:56.779Z" },
- { url = "https://files.pythonhosted.org/packages/67/88/c4459ccc05674ef02119abf2888ccd3e2fed12a323f52255f4982fc95876/numba-0.60.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:62908d29fb6a3229c242e981ca27e32a6e606cc253fc9e8faeb0e48760de241e", size = 3466946, upload-time = "2024-06-13T18:10:58.961Z" },
- { url = "https://files.pythonhosted.org/packages/8b/41/ac11cf33524def12aa5bd698226ae196a1185831c05ed29dc0c56eaa308b/numba-0.60.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0ebaa91538e996f708f1ab30ef4d3ddc344b64b5227b67a57aa74f401bb68b9d", size = 3761463, upload-time = "2024-06-13T18:11:01.657Z" },
- { url = "https://files.pythonhosted.org/packages/ca/bd/0fe29fcd1b6a8de479a4ed25c6e56470e467e3611c079d55869ceef2b6d1/numba-0.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:f75262e8fe7fa96db1dca93d53a194a38c46da28b112b8a4aca168f0df860347", size = 2707588, upload-time = "2024-06-13T18:11:04.261Z" },
- { url = "https://files.pythonhosted.org/packages/68/1a/87c53f836cdf557083248c3f47212271f220280ff766538795e77c8c6bbf/numba-0.60.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:01ef4cd7d83abe087d644eaa3d95831b777aa21d441a23703d649e06b8e06b74", size = 2647186, upload-time = "2024-06-13T18:11:06.753Z" },
- { url = "https://files.pythonhosted.org/packages/28/14/a5baa1f2edea7b49afa4dc1bb1b126645198cf1075186853b5b497be826e/numba-0.60.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:819a3dfd4630d95fd574036f99e47212a1af41cbcb019bf8afac63ff56834449", size = 2650038, upload-time = "2024-06-13T18:11:10.869Z" },
- { url = "https://files.pythonhosted.org/packages/3b/bd/f1985719ff34e37e07bb18f9d3acd17e5a21da255f550c8eae031e2ddf5f/numba-0.60.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b983bd6ad82fe868493012487f34eae8bf7dd94654951404114f23c3466d34b", size = 3403010, upload-time = "2024-06-13T18:11:13.057Z" },
- { url = "https://files.pythonhosted.org/packages/54/9b/cd73d3f6617ddc8398a63ef97d8dc9139a9879b9ca8a7ca4b8789056ea46/numba-0.60.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c151748cd269ddeab66334bd754817ffc0cabd9433acb0f551697e5151917d25", size = 3695086, upload-time = "2024-06-13T18:11:15.497Z" },
- { url = "https://files.pythonhosted.org/packages/01/01/8b7b670c77c5ea0e47e283d82332969bf672ab6410d0b2610cac5b7a3ded/numba-0.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:3031547a015710140e8c87226b4cfe927cac199835e5bf7d4fe5cb64e814e3ab", size = 2686978, upload-time = "2024-06-13T18:11:17.765Z" },
-]
-
-[[package]]
-name = "numba"
-version = "0.64.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "llvmlite", version = "0.46.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/23/c9/a0fb41787d01d621046138da30f6c2100d80857bf34b3390dd68040f27a3/numba-0.64.0.tar.gz", hash = "sha256:95e7300af648baa3308127b1955b52ce6d11889d16e8cfe637b4f85d2fca52b1", size = 2765679, upload-time = "2026-02-18T18:41:20.974Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4c/5e/604fed821cd7e3426bb3bc99a7ed6ac0bcb489f4cd93052256437d082f95/numba-0.64.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc09b79440952e3098eeebea4bf6e8d2355fb7f12734fcd9fc5039f0dca90727", size = 2683250, upload-time = "2026-02-18T18:40:45.829Z" },
- { url = "https://files.pythonhosted.org/packages/4f/9f/9275a723d050b5f1a9b1c7fb7dbfce324fef301a8e50c5f88338569db06c/numba-0.64.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1afe3a80b8c2f376b211fb7a49e536ef9eafc92436afc95a2f41ea5392f8cc65", size = 3742168, upload-time = "2026-02-18T18:40:48.066Z" },
- { url = "https://files.pythonhosted.org/packages/e2/d1/97ca7dddaa36b16f4c46319bdb6b4913ba15d0245317d0d8ccde7b2d7d92/numba-0.64.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23804194b93b8cd416c6444b5fbc4956082a45fed2d25436ef49c594666e7f7e", size = 3449103, upload-time = "2026-02-18T18:40:49.905Z" },
- { url = "https://files.pythonhosted.org/packages/52/0a/b9e137ad78415373e3353564500e8bf29dbce3c0d73633bb384d4e5d7537/numba-0.64.0-cp310-cp310-win_amd64.whl", hash = "sha256:e2a9fe998bb2cf848960b34db02c2c3b5e02cf82c07a26d9eef3494069740278", size = 2749950, upload-time = "2026-02-18T18:40:51.536Z" },
- { url = "https://files.pythonhosted.org/packages/89/a3/1a4286a1c16136c8896d8e2090d950e79b3ec626d3a8dc9620f6234d5a38/numba-0.64.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:766156ee4b8afeeb2b2e23c81307c5d19031f18d5ce76ae2c5fb1429e72fa92b", size = 2682938, upload-time = "2026-02-18T18:40:52.897Z" },
- { url = "https://files.pythonhosted.org/packages/19/16/aa6e3ba3cd45435c117d1101b278b646444ed05b7c712af631b91353f573/numba-0.64.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d17071b4ffc9d39b75d8e6c101a36f0c81b646123859898c9799cb31807c8f78", size = 3747376, upload-time = "2026-02-18T18:40:54.925Z" },
- { url = "https://files.pythonhosted.org/packages/c0/f1/dd2f25e18d75fdf897f730b78c5a7b00cc4450f2405564dbebfaf359f21f/numba-0.64.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4ead5630434133bac87fa67526eacb264535e4e9a2d5ec780e0b4fc381a7d275", size = 3453292, upload-time = "2026-02-18T18:40:56.818Z" },
- { url = "https://files.pythonhosted.org/packages/31/29/e09d5630578a50a2b3fa154990b6b839cf95327aa0709e2d50d0b6816cd1/numba-0.64.0-cp311-cp311-win_amd64.whl", hash = "sha256:f2b1fd93e7aaac07d6fbaed059c00679f591f2423885c206d8c1b55d65ca3f2d", size = 2749824, upload-time = "2026-02-18T18:40:58.392Z" },
- { url = "https://files.pythonhosted.org/packages/70/a6/9fc52cb4f0d5e6d8b5f4d81615bc01012e3cf24e1052a60f17a68deb8092/numba-0.64.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:69440a8e8bc1a81028446f06b363e28635aa67bd51b1e498023f03b812e0ce68", size = 2683418, upload-time = "2026-02-18T18:40:59.886Z" },
- { url = "https://files.pythonhosted.org/packages/9b/89/1a74ea99b180b7a5587b0301ed1b183a2937c4b4b67f7994689b5d36fc34/numba-0.64.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f13721011f693ba558b8dd4e4db7f2640462bba1b855bdc804be45bbeb55031a", size = 3804087, upload-time = "2026-02-18T18:41:01.699Z" },
- { url = "https://files.pythonhosted.org/packages/91/e1/583c647404b15f807410510fec1eb9b80cb8474165940b7749f026f21cbc/numba-0.64.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0b180b1133f2b5d8b3f09d96b6d7a9e51a7da5dda3c09e998b5bcfac85d222c", size = 3504309, upload-time = "2026-02-18T18:41:03.252Z" },
- { url = "https://files.pythonhosted.org/packages/85/23/0fce5789b8a5035e7ace21216a468143f3144e02013252116616c58339aa/numba-0.64.0-cp312-cp312-win_amd64.whl", hash = "sha256:e63dc94023b47894849b8b106db28ccb98b49d5498b98878fac1a38f83ac007a", size = 2752740, upload-time = "2026-02-18T18:41:05.097Z" },
- { url = "https://files.pythonhosted.org/packages/52/80/2734de90f9300a6e2503b35ee50d9599926b90cbb7ac54f9e40074cd07f1/numba-0.64.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:3bab2c872194dcd985f1153b70782ec0fbbe348fffef340264eacd3a76d59fd6", size = 2683392, upload-time = "2026-02-18T18:41:06.563Z" },
- { url = "https://files.pythonhosted.org/packages/42/e8/14b5853ebefd5b37723ef365c5318a30ce0702d39057eaa8d7d76392859d/numba-0.64.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:703a246c60832cad231d2e73c1182f25bf3cc8b699759ec8fe58a2dbc689a70c", size = 3812245, upload-time = "2026-02-18T18:41:07.963Z" },
- { url = "https://files.pythonhosted.org/packages/8a/a2/f60dc6c96d19b7185144265a5fbf01c14993d37ff4cd324b09d0212aa7ce/numba-0.64.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e2e49a7900ee971d32af7609adc0cfe6aa7477c6f6cccdf6d8138538cf7756f", size = 3511328, upload-time = "2026-02-18T18:41:09.504Z" },
- { url = "https://files.pythonhosted.org/packages/9c/2a/fe7003ea7e7237ee7014f8eaeeb7b0d228a2db22572ca85bab2648cf52cb/numba-0.64.0-cp313-cp313-win_amd64.whl", hash = "sha256:396f43c3f77e78d7ec84cdfc6b04969c78f8f169351b3c4db814b97e7acf4245", size = 2752668, upload-time = "2026-02-18T18:41:11.455Z" },
- { url = "https://files.pythonhosted.org/packages/3d/8a/77d26afe0988c592dd97cb8d4e80bfb3dfc7dbdacfca7d74a7c5c81dd8c2/numba-0.64.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:f565d55eaeff382cbc86c63c8c610347453af3d1e7afb2b6569aac1c9b5c93ce", size = 2683590, upload-time = "2026-02-18T18:41:12.897Z" },
- { url = "https://files.pythonhosted.org/packages/8e/4b/600b8b7cdbc7f9cebee9ea3d13bb70052a79baf28944024ffcb59f0712e3/numba-0.64.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9b55169b18892c783f85e9ad9e6f5297a6d12967e4414e6b71361086025ff0bb", size = 3781163, upload-time = "2026-02-18T18:41:15.377Z" },
- { url = "https://files.pythonhosted.org/packages/ff/73/53f2d32bfa45b7175e9944f6b816d8c32840178c3eee9325033db5bf838e/numba-0.64.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:196bcafa02c9dd1707e068434f6d5cedde0feb787e3432f7f1f0e993cc336c4c", size = 3481172, upload-time = "2026-02-18T18:41:17.281Z" },
- { url = "https://files.pythonhosted.org/packages/b5/00/aebd2f7f1e11e38814bb96e95a27580817a7b340608d3ac085fdbab83174/numba-0.64.0-cp314-cp314-win_amd64.whl", hash = "sha256:213e9acbe7f1c05090592e79020315c1749dd52517b90e94c517dca3f014d4a1", size = 2754700, upload-time = "2026-02-18T18:41:19.277Z" },
-]
-
-[[package]]
-name = "numpy"
-version = "2.0.2"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a9/75/10dd1f8116a8b796cb2c737b674e02d02e80454bda953fa7e65d8c12b016/numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78", size = 18902015, upload-time = "2024-08-26T20:19:40.945Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/21/91/3495b3237510f79f5d81f2508f9f13fea78ebfdf07538fc7444badda173d/numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece", size = 21165245, upload-time = "2024-08-26T20:04:14.625Z" },
- { url = "https://files.pythonhosted.org/packages/05/33/26178c7d437a87082d11019292dce6d3fe6f0e9026b7b2309cbf3e489b1d/numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04", size = 13738540, upload-time = "2024-08-26T20:04:36.784Z" },
- { url = "https://files.pythonhosted.org/packages/ec/31/cc46e13bf07644efc7a4bf68df2df5fb2a1a88d0cd0da9ddc84dc0033e51/numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66", size = 5300623, upload-time = "2024-08-26T20:04:46.491Z" },
- { url = "https://files.pythonhosted.org/packages/6e/16/7bfcebf27bb4f9d7ec67332ffebee4d1bf085c84246552d52dbb548600e7/numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b", size = 6901774, upload-time = "2024-08-26T20:04:58.173Z" },
- { url = "https://files.pythonhosted.org/packages/f9/a3/561c531c0e8bf082c5bef509d00d56f82e0ea7e1e3e3a7fc8fa78742a6e5/numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd", size = 13907081, upload-time = "2024-08-26T20:05:19.098Z" },
- { url = "https://files.pythonhosted.org/packages/fa/66/f7177ab331876200ac7563a580140643d1179c8b4b6a6b0fc9838de2a9b8/numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318", size = 19523451, upload-time = "2024-08-26T20:05:47.479Z" },
- { url = "https://files.pythonhosted.org/packages/25/7f/0b209498009ad6453e4efc2c65bcdf0ae08a182b2b7877d7ab38a92dc542/numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8", size = 19927572, upload-time = "2024-08-26T20:06:17.137Z" },
- { url = "https://files.pythonhosted.org/packages/3e/df/2619393b1e1b565cd2d4c4403bdd979621e2c4dea1f8532754b2598ed63b/numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326", size = 14400722, upload-time = "2024-08-26T20:06:39.16Z" },
- { url = "https://files.pythonhosted.org/packages/22/ad/77e921b9f256d5da36424ffb711ae79ca3f451ff8489eeca544d0701d74a/numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97", size = 6472170, upload-time = "2024-08-26T20:06:50.361Z" },
- { url = "https://files.pythonhosted.org/packages/10/05/3442317535028bc29cf0c0dd4c191a4481e8376e9f0db6bcf29703cadae6/numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131", size = 15905558, upload-time = "2024-08-26T20:07:13.881Z" },
- { url = "https://files.pythonhosted.org/packages/8b/cf/034500fb83041aa0286e0fb16e7c76e5c8b67c0711bb6e9e9737a717d5fe/numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448", size = 21169137, upload-time = "2024-08-26T20:07:45.345Z" },
- { url = "https://files.pythonhosted.org/packages/4a/d9/32de45561811a4b87fbdee23b5797394e3d1504b4a7cf40c10199848893e/numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195", size = 13703552, upload-time = "2024-08-26T20:08:06.666Z" },
- { url = "https://files.pythonhosted.org/packages/c1/ca/2f384720020c7b244d22508cb7ab23d95f179fcfff33c31a6eeba8d6c512/numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57", size = 5298957, upload-time = "2024-08-26T20:08:15.83Z" },
- { url = "https://files.pythonhosted.org/packages/0e/78/a3e4f9fb6aa4e6fdca0c5428e8ba039408514388cf62d89651aade838269/numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a", size = 6905573, upload-time = "2024-08-26T20:08:27.185Z" },
- { url = "https://files.pythonhosted.org/packages/a0/72/cfc3a1beb2caf4efc9d0b38a15fe34025230da27e1c08cc2eb9bfb1c7231/numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669", size = 13914330, upload-time = "2024-08-26T20:08:48.058Z" },
- { url = "https://files.pythonhosted.org/packages/ba/a8/c17acf65a931ce551fee11b72e8de63bf7e8a6f0e21add4c937c83563538/numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951", size = 19534895, upload-time = "2024-08-26T20:09:16.536Z" },
- { url = "https://files.pythonhosted.org/packages/ba/86/8767f3d54f6ae0165749f84648da9dcc8cd78ab65d415494962c86fac80f/numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9", size = 19937253, upload-time = "2024-08-26T20:09:46.263Z" },
- { url = "https://files.pythonhosted.org/packages/df/87/f76450e6e1c14e5bb1eae6836478b1028e096fd02e85c1c37674606ab752/numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15", size = 14414074, upload-time = "2024-08-26T20:10:08.483Z" },
- { url = "https://files.pythonhosted.org/packages/5c/ca/0f0f328e1e59f73754f06e1adfb909de43726d4f24c6a3f8805f34f2b0fa/numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4", size = 6470640, upload-time = "2024-08-26T20:10:19.732Z" },
- { url = "https://files.pythonhosted.org/packages/eb/57/3a3f14d3a759dcf9bf6e9eda905794726b758819df4663f217d658a58695/numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc", size = 15910230, upload-time = "2024-08-26T20:10:43.413Z" },
- { url = "https://files.pythonhosted.org/packages/45/40/2e117be60ec50d98fa08c2f8c48e09b3edea93cfcabd5a9ff6925d54b1c2/numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b", size = 20895803, upload-time = "2024-08-26T20:11:13.916Z" },
- { url = "https://files.pythonhosted.org/packages/46/92/1b8b8dee833f53cef3e0a3f69b2374467789e0bb7399689582314df02651/numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e", size = 13471835, upload-time = "2024-08-26T20:11:34.779Z" },
- { url = "https://files.pythonhosted.org/packages/7f/19/e2793bde475f1edaea6945be141aef6c8b4c669b90c90a300a8954d08f0a/numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c", size = 5038499, upload-time = "2024-08-26T20:11:43.902Z" },
- { url = "https://files.pythonhosted.org/packages/e3/ff/ddf6dac2ff0dd50a7327bcdba45cb0264d0e96bb44d33324853f781a8f3c/numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c", size = 6633497, upload-time = "2024-08-26T20:11:55.09Z" },
- { url = "https://files.pythonhosted.org/packages/72/21/67f36eac8e2d2cd652a2e69595a54128297cdcb1ff3931cfc87838874bd4/numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692", size = 13621158, upload-time = "2024-08-26T20:12:14.95Z" },
- { url = "https://files.pythonhosted.org/packages/39/68/e9f1126d757653496dbc096cb429014347a36b228f5a991dae2c6b6cfd40/numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a", size = 19236173, upload-time = "2024-08-26T20:12:44.049Z" },
- { url = "https://files.pythonhosted.org/packages/d1/e9/1f5333281e4ebf483ba1c888b1d61ba7e78d7e910fdd8e6499667041cc35/numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c", size = 19634174, upload-time = "2024-08-26T20:13:13.634Z" },
- { url = "https://files.pythonhosted.org/packages/71/af/a469674070c8d8408384e3012e064299f7a2de540738a8e414dcfd639996/numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded", size = 14099701, upload-time = "2024-08-26T20:13:34.851Z" },
- { url = "https://files.pythonhosted.org/packages/d0/3d/08ea9f239d0e0e939b6ca52ad403c84a2bce1bde301a8eb4888c1c1543f1/numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5", size = 6174313, upload-time = "2024-08-26T20:13:45.653Z" },
- { url = "https://files.pythonhosted.org/packages/b2/b5/4ac39baebf1fdb2e72585c8352c56d063b6126be9fc95bd2bb5ef5770c20/numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a", size = 15606179, upload-time = "2024-08-26T20:14:08.786Z" },
- { url = "https://files.pythonhosted.org/packages/43/c1/41c8f6df3162b0c6ffd4437d729115704bd43363de0090c7f913cfbc2d89/numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c", size = 21169942, upload-time = "2024-08-26T20:14:40.108Z" },
- { url = "https://files.pythonhosted.org/packages/39/bc/fd298f308dcd232b56a4031fd6ddf11c43f9917fbc937e53762f7b5a3bb1/numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd", size = 13711512, upload-time = "2024-08-26T20:15:00.985Z" },
- { url = "https://files.pythonhosted.org/packages/96/ff/06d1aa3eeb1c614eda245c1ba4fb88c483bee6520d361641331872ac4b82/numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b", size = 5306976, upload-time = "2024-08-26T20:15:10.876Z" },
- { url = "https://files.pythonhosted.org/packages/2d/98/121996dcfb10a6087a05e54453e28e58694a7db62c5a5a29cee14c6e047b/numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729", size = 6906494, upload-time = "2024-08-26T20:15:22.055Z" },
- { url = "https://files.pythonhosted.org/packages/15/31/9dffc70da6b9bbf7968f6551967fc21156207366272c2a40b4ed6008dc9b/numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1", size = 13912596, upload-time = "2024-08-26T20:15:42.452Z" },
- { url = "https://files.pythonhosted.org/packages/b9/14/78635daab4b07c0930c919d451b8bf8c164774e6a3413aed04a6d95758ce/numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd", size = 19526099, upload-time = "2024-08-26T20:16:11.048Z" },
- { url = "https://files.pythonhosted.org/packages/26/4c/0eeca4614003077f68bfe7aac8b7496f04221865b3a5e7cb230c9d055afd/numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d", size = 19932823, upload-time = "2024-08-26T20:16:40.171Z" },
- { url = "https://files.pythonhosted.org/packages/f1/46/ea25b98b13dccaebddf1a803f8c748680d972e00507cd9bc6dcdb5aa2ac1/numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d", size = 14404424, upload-time = "2024-08-26T20:17:02.604Z" },
- { url = "https://files.pythonhosted.org/packages/c8/a6/177dd88d95ecf07e722d21008b1b40e681a929eb9e329684d449c36586b2/numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa", size = 6476809, upload-time = "2024-08-26T20:17:13.553Z" },
- { url = "https://files.pythonhosted.org/packages/ea/2b/7fc9f4e7ae5b507c1a3a21f0f15ed03e794c1242ea8a242ac158beb56034/numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73", size = 15911314, upload-time = "2024-08-26T20:17:36.72Z" },
- { url = "https://files.pythonhosted.org/packages/8f/3b/df5a870ac6a3be3a86856ce195ef42eec7ae50d2a202be1f5a4b3b340e14/numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8", size = 21025288, upload-time = "2024-08-26T20:18:07.732Z" },
- { url = "https://files.pythonhosted.org/packages/2c/97/51af92f18d6f6f2d9ad8b482a99fb74e142d71372da5d834b3a2747a446e/numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4", size = 6762793, upload-time = "2024-08-26T20:18:19.125Z" },
- { url = "https://files.pythonhosted.org/packages/12/46/de1fbd0c1b5ccaa7f9a005b66761533e2f6a3e560096682683a223631fe9/numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c", size = 19334885, upload-time = "2024-08-26T20:18:47.237Z" },
- { url = "https://files.pythonhosted.org/packages/cc/dc/d330a6faefd92b446ec0f0dfea4c3207bb1fef3c4771d19cf4543efd2c78/numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385", size = 15828784, upload-time = "2024-08-26T20:19:11.19Z" },
-]
-
-[[package]]
-name = "numpy"
-version = "2.4.3"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/10/8b/c265f4823726ab832de836cdd184d0986dcf94480f81e8739692a7ac7af2/numpy-2.4.3.tar.gz", hash = "sha256:483a201202b73495f00dbc83796c6ae63137a9bdade074f7648b3e32613412dd", size = 20727743, upload-time = "2026-03-09T07:58:53.426Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f9/51/5093a2df15c4dc19da3f79d1021e891f5dcf1d9d1db6ba38891d5590f3fe/numpy-2.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:33b3bf58ee84b172c067f56aeadc7ee9ab6de69c5e800ab5b10295d54c581adb", size = 16957183, upload-time = "2026-03-09T07:55:57.774Z" },
- { url = "https://files.pythonhosted.org/packages/b5/7c/c061f3de0630941073d2598dc271ac2f6cbcf5c83c74a5870fea07488333/numpy-2.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8ba7b51e71c05aa1f9bc3641463cd82308eab40ce0d5c7e1fd4038cbf9938147", size = 14968734, upload-time = "2026-03-09T07:56:00.494Z" },
- { url = "https://files.pythonhosted.org/packages/ef/27/d26c85cbcd86b26e4f125b0668e7a7c0542d19dd7d23ee12e87b550e95b5/numpy-2.4.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:a1988292870c7cb9d0ebb4cc96b4d447513a9644801de54606dc7aabf2b7d920", size = 5475288, upload-time = "2026-03-09T07:56:02.857Z" },
- { url = "https://files.pythonhosted.org/packages/2b/09/3c4abbc1dcd8010bf1a611d174c7aa689fc505585ec806111b4406f6f1b1/numpy-2.4.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:23b46bb6d8ecb68b58c09944483c135ae5f0e9b8d8858ece5e4ead783771d2a9", size = 6805253, upload-time = "2026-03-09T07:56:04.53Z" },
- { url = "https://files.pythonhosted.org/packages/21/bc/e7aa3f6817e40c3f517d407742337cbb8e6fc4b83ce0b55ab780c829243b/numpy-2.4.3-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a016db5c5dba78fa8fe9f5d80d6708f9c42ab087a739803c0ac83a43d686a470", size = 15969479, upload-time = "2026-03-09T07:56:06.638Z" },
- { url = "https://files.pythonhosted.org/packages/78/51/9f5d7a41f0b51649ddf2f2320595e15e122a40610b233d51928dd6c92353/numpy-2.4.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:715de7f82e192e8cae5a507a347d97ad17598f8e026152ca97233e3666daaa71", size = 16901035, upload-time = "2026-03-09T07:56:09.405Z" },
- { url = "https://files.pythonhosted.org/packages/64/6e/b221dd847d7181bc5ee4857bfb026182ef69499f9305eb1371cbb1aea626/numpy-2.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2ddb7919366ee468342b91dea2352824c25b55814a987847b6c52003a7c97f15", size = 17325657, upload-time = "2026-03-09T07:56:12.067Z" },
- { url = "https://files.pythonhosted.org/packages/eb/b8/8f3fd2da596e1063964b758b5e3c970aed1949a05200d7e3d46a9d46d643/numpy-2.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a315e5234d88067f2d97e1f2ef670a7569df445d55400f1e33d117418d008d52", size = 18635512, upload-time = "2026-03-09T07:56:14.629Z" },
- { url = "https://files.pythonhosted.org/packages/5c/24/2993b775c37e39d2f8ab4125b44337ab0b2ba106c100980b7c274a22bee7/numpy-2.4.3-cp311-cp311-win32.whl", hash = "sha256:2b3f8d2c4589b1a2028d2a770b0fc4d1f332fb5e01521f4de3199a896d158ddd", size = 6238100, upload-time = "2026-03-09T07:56:17.243Z" },
- { url = "https://files.pythonhosted.org/packages/76/1d/edccf27adedb754db7c4511d5eac8b83f004ae948fe2d3509e8b78097d4c/numpy-2.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:77e76d932c49a75617c6d13464e41203cd410956614d0a0e999b25e9e8d27eec", size = 12609816, upload-time = "2026-03-09T07:56:19.089Z" },
- { url = "https://files.pythonhosted.org/packages/92/82/190b99153480076c8dce85f4cfe7d53ea84444145ffa54cb58dcd460d66b/numpy-2.4.3-cp311-cp311-win_arm64.whl", hash = "sha256:eb610595dd91560905c132c709412b512135a60f1851ccbd2c959e136431ff67", size = 10485757, upload-time = "2026-03-09T07:56:21.753Z" },
- { url = "https://files.pythonhosted.org/packages/a9/ed/6388632536f9788cea23a3a1b629f25b43eaacd7d7377e5d6bc7b9deb69b/numpy-2.4.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:61b0cbabbb6126c8df63b9a3a0c4b1f44ebca5e12ff6997b80fcf267fb3150ef", size = 16669628, upload-time = "2026-03-09T07:56:24.252Z" },
- { url = "https://files.pythonhosted.org/packages/74/1b/ee2abfc68e1ce728b2958b6ba831d65c62e1b13ce3017c13943f8f9b5b2e/numpy-2.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7395e69ff32526710748f92cd8c9849b361830968ea3e24a676f272653e8983e", size = 14696872, upload-time = "2026-03-09T07:56:26.991Z" },
- { url = "https://files.pythonhosted.org/packages/ba/d1/780400e915ff5638166f11ca9dc2c5815189f3d7cf6f8759a1685e586413/numpy-2.4.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:abdce0f71dcb4a00e4e77f3faf05e4616ceccfe72ccaa07f47ee79cda3b7b0f4", size = 5203489, upload-time = "2026-03-09T07:56:29.414Z" },
- { url = "https://files.pythonhosted.org/packages/0b/bb/baffa907e9da4cc34a6e556d6d90e032f6d7a75ea47968ea92b4858826c4/numpy-2.4.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:48da3a4ee1336454b07497ff7ec83903efa5505792c4e6d9bf83d99dc07a1e18", size = 6550814, upload-time = "2026-03-09T07:56:32.225Z" },
- { url = "https://files.pythonhosted.org/packages/7b/12/8c9f0c6c95f76aeb20fc4a699c33e9f827fa0d0f857747c73bb7b17af945/numpy-2.4.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32e3bef222ad6b052280311d1d60db8e259e4947052c3ae7dd6817451fc8a4c5", size = 15666601, upload-time = "2026-03-09T07:56:34.461Z" },
- { url = "https://files.pythonhosted.org/packages/bd/79/cc665495e4d57d0aa6fbcc0aa57aa82671dfc78fbf95fe733ed86d98f52a/numpy-2.4.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e7dd01a46700b1967487141a66ac1a3cf0dd8ebf1f08db37d46389401512ca97", size = 16621358, upload-time = "2026-03-09T07:56:36.852Z" },
- { url = "https://files.pythonhosted.org/packages/a8/40/b4ecb7224af1065c3539f5ecfff879d090de09608ad1008f02c05c770cb3/numpy-2.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:76f0f283506c28b12bba319c0fab98217e9f9b54e6160e9c79e9f7348ba32e9c", size = 17016135, upload-time = "2026-03-09T07:56:39.337Z" },
- { url = "https://files.pythonhosted.org/packages/f7/b1/6a88e888052eed951afed7a142dcdf3b149a030ca59b4c71eef085858e43/numpy-2.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:737f630a337364665aba3b5a77e56a68cc42d350edd010c345d65a3efa3addcc", size = 18345816, upload-time = "2026-03-09T07:56:42.31Z" },
- { url = "https://files.pythonhosted.org/packages/f3/8f/103a60c5f8c3d7fc678c19cd7b2476110da689ccb80bc18050efbaeae183/numpy-2.4.3-cp312-cp312-win32.whl", hash = "sha256:26952e18d82a1dbbc2f008d402021baa8d6fc8e84347a2072a25e08b46d698b9", size = 5960132, upload-time = "2026-03-09T07:56:44.851Z" },
- { url = "https://files.pythonhosted.org/packages/d7/7c/f5ee1bf6ed888494978046a809df2882aad35d414b622893322df7286879/numpy-2.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:65f3c2455188f09678355f5cae1f959a06b778bc66d535da07bf2ef20cd319d5", size = 12316144, upload-time = "2026-03-09T07:56:47.057Z" },
- { url = "https://files.pythonhosted.org/packages/71/46/8d1cb3f7a00f2fb6394140e7e6623696e54c6318a9d9691bb4904672cf42/numpy-2.4.3-cp312-cp312-win_arm64.whl", hash = "sha256:2abad5c7fef172b3377502bde47892439bae394a71bc329f31df0fd829b41a9e", size = 10220364, upload-time = "2026-03-09T07:56:49.849Z" },
- { url = "https://files.pythonhosted.org/packages/b6/d0/1fe47a98ce0df229238b77611340aff92d52691bcbc10583303181abf7fc/numpy-2.4.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b346845443716c8e542d54112966383b448f4a3ba5c66409771b8c0889485dd3", size = 16665297, upload-time = "2026-03-09T07:56:52.296Z" },
- { url = "https://files.pythonhosted.org/packages/27/d9/4e7c3f0e68dfa91f21c6fb6cf839bc829ec920688b1ce7ec722b1a6202fb/numpy-2.4.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2629289168f4897a3c4e23dc98d6f1731f0fc0fe52fb9db19f974041e4cc12b9", size = 14691853, upload-time = "2026-03-09T07:56:54.992Z" },
- { url = "https://files.pythonhosted.org/packages/3a/66/bd096b13a87549683812b53ab211e6d413497f84e794fb3c39191948da97/numpy-2.4.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:bb2e3cf95854233799013779216c57e153c1ee67a0bf92138acca0e429aefaee", size = 5198435, upload-time = "2026-03-09T07:56:57.184Z" },
- { url = "https://files.pythonhosted.org/packages/a2/2f/687722910b5a5601de2135c891108f51dfc873d8e43c8ed9f4ebb440b4a2/numpy-2.4.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:7f3408ff897f8ab07a07fbe2823d7aee6ff644c097cc1f90382511fe982f647f", size = 6546347, upload-time = "2026-03-09T07:56:59.531Z" },
- { url = "https://files.pythonhosted.org/packages/bf/ec/7971c4e98d86c564750393fab8d7d83d0a9432a9d78bb8a163a6dc59967a/numpy-2.4.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:decb0eb8a53c3b009b0962378065589685d66b23467ef5dac16cbe818afde27f", size = 15664626, upload-time = "2026-03-09T07:57:01.385Z" },
- { url = "https://files.pythonhosted.org/packages/7e/eb/7daecbea84ec935b7fc732e18f532073064a3816f0932a40a17f3349185f/numpy-2.4.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5f51900414fc9204a0e0da158ba2ac52b75656e7dce7e77fb9f84bfa343b4cc", size = 16608916, upload-time = "2026-03-09T07:57:04.008Z" },
- { url = "https://files.pythonhosted.org/packages/df/58/2a2b4a817ffd7472dca4421d9f0776898b364154e30c95f42195041dc03b/numpy-2.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6bd06731541f89cdc01b261ba2c9e037f1543df7472517836b78dfb15bd6e476", size = 17015824, upload-time = "2026-03-09T07:57:06.347Z" },
- { url = "https://files.pythonhosted.org/packages/4a/ca/627a828d44e78a418c55f82dd4caea8ea4a8ef24e5144d9e71016e52fb40/numpy-2.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:22654fe6be0e5206f553a9250762c653d3698e46686eee53b399ab90da59bd92", size = 18334581, upload-time = "2026-03-09T07:57:09.114Z" },
- { url = "https://files.pythonhosted.org/packages/cd/c0/76f93962fc79955fcba30a429b62304332345f22d4daec1cb33653425643/numpy-2.4.3-cp313-cp313-win32.whl", hash = "sha256:d71e379452a2f670ccb689ec801b1218cd3983e253105d6e83780967e899d687", size = 5958618, upload-time = "2026-03-09T07:57:11.432Z" },
- { url = "https://files.pythonhosted.org/packages/b1/3c/88af0040119209b9b5cb59485fa48b76f372c73068dbf9254784b975ac53/numpy-2.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:0a60e17a14d640f49146cb38e3f105f571318db7826d9b6fef7e4dce758faecd", size = 12312824, upload-time = "2026-03-09T07:57:13.586Z" },
- { url = "https://files.pythonhosted.org/packages/58/ce/3d07743aced3d173f877c3ef6a454c2174ba42b584ab0b7e6d99374f51ed/numpy-2.4.3-cp313-cp313-win_arm64.whl", hash = "sha256:c9619741e9da2059cd9c3f206110b97583c7152c1dc9f8aafd4beb450ac1c89d", size = 10221218, upload-time = "2026-03-09T07:57:16.183Z" },
- { url = "https://files.pythonhosted.org/packages/62/09/d96b02a91d09e9d97862f4fc8bfebf5400f567d8eb1fe4b0cc4795679c15/numpy-2.4.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7aa4e54f6469300ebca1d9eb80acd5253cdfa36f2c03d79a35883687da430875", size = 14819570, upload-time = "2026-03-09T07:57:18.564Z" },
- { url = "https://files.pythonhosted.org/packages/b5/ca/0b1aba3905fdfa3373d523b2b15b19029f4f3031c87f4066bd9d20ef6c6b/numpy-2.4.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d1b90d840b25874cf5cd20c219af10bac3667db3876d9a495609273ebe679070", size = 5326113, upload-time = "2026-03-09T07:57:21.052Z" },
- { url = "https://files.pythonhosted.org/packages/c0/63/406e0fd32fcaeb94180fd6a4c41e55736d676c54346b7efbce548b94a914/numpy-2.4.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a749547700de0a20a6718293396ec237bb38218049cfce788e08fcb716e8cf73", size = 6646370, upload-time = "2026-03-09T07:57:22.804Z" },
- { url = "https://files.pythonhosted.org/packages/b6/d0/10f7dc157d4b37af92720a196be6f54f889e90dcd30dce9dc657ed92c257/numpy-2.4.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f3c4a151a2e529adf49c1d54f0f57ff8f9b233ee4d44af623a81553ab86368", size = 15723499, upload-time = "2026-03-09T07:57:24.693Z" },
- { url = "https://files.pythonhosted.org/packages/66/f1/d1c2bf1161396629701bc284d958dc1efa3a5a542aab83cf11ee6eb4cba5/numpy-2.4.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22c31dc07025123aedf7f2db9e91783df13f1776dc52c6b22c620870dc0fab22", size = 16657164, upload-time = "2026-03-09T07:57:27.676Z" },
- { url = "https://files.pythonhosted.org/packages/1a/be/cca19230b740af199ac47331a21c71e7a3d0ba59661350483c1600d28c37/numpy-2.4.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:148d59127ac95979d6f07e4d460f934ebdd6eed641db9c0db6c73026f2b2101a", size = 17081544, upload-time = "2026-03-09T07:57:30.664Z" },
- { url = "https://files.pythonhosted.org/packages/b9/c5/9602b0cbb703a0936fb40f8a95407e8171935b15846de2f0776e08af04c7/numpy-2.4.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a97cbf7e905c435865c2d939af3d93f99d18eaaa3cabe4256f4304fb51604349", size = 18380290, upload-time = "2026-03-09T07:57:33.763Z" },
- { url = "https://files.pythonhosted.org/packages/ed/81/9f24708953cd30be9ee36ec4778f4b112b45165812f2ada4cc5ea1c1f254/numpy-2.4.3-cp313-cp313t-win32.whl", hash = "sha256:be3b8487d725a77acccc9924f65fd8bce9af7fac8c9820df1049424a2115af6c", size = 6082814, upload-time = "2026-03-09T07:57:36.491Z" },
- { url = "https://files.pythonhosted.org/packages/e2/9e/52f6eaa13e1a799f0ab79066c17f7016a4a8ae0c1aefa58c82b4dab690b4/numpy-2.4.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1ec84fd7c8e652b0f4aaaf2e6e9cc8eaa9b1b80a537e06b2e3a2fb176eedcb26", size = 12452673, upload-time = "2026-03-09T07:57:38.281Z" },
- { url = "https://files.pythonhosted.org/packages/c4/04/b8cece6ead0b30c9fbd99bb835ad7ea0112ac5f39f069788c5558e3b1ab2/numpy-2.4.3-cp313-cp313t-win_arm64.whl", hash = "sha256:120df8c0a81ebbf5b9020c91439fccd85f5e018a927a39f624845be194a2be02", size = 10290907, upload-time = "2026-03-09T07:57:40.747Z" },
- { url = "https://files.pythonhosted.org/packages/70/ae/3936f79adebf8caf81bd7a599b90a561334a658be4dcc7b6329ebf4ee8de/numpy-2.4.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:5884ce5c7acfae1e4e1b6fde43797d10aa506074d25b531b4f54bde33c0c31d4", size = 16664563, upload-time = "2026-03-09T07:57:43.817Z" },
- { url = "https://files.pythonhosted.org/packages/9b/62/760f2b55866b496bb1fa7da2a6db076bef908110e568b02fcfc1422e2a3a/numpy-2.4.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:297837823f5bc572c5f9379b0c9f3a3365f08492cbdc33bcc3af174372ebb168", size = 14702161, upload-time = "2026-03-09T07:57:46.169Z" },
- { url = "https://files.pythonhosted.org/packages/32/af/a7a39464e2c0a21526fb4fb76e346fb172ebc92f6d1c7a07c2c139cc17b1/numpy-2.4.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:a111698b4a3f8dcbe54c64a7708f049355abd603e619013c346553c1fd4ca90b", size = 5208738, upload-time = "2026-03-09T07:57:48.506Z" },
- { url = "https://files.pythonhosted.org/packages/29/8c/2a0cf86a59558fa078d83805589c2de490f29ed4fb336c14313a161d358a/numpy-2.4.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:4bd4741a6a676770e0e97fe9ab2e51de01183df3dcbcec591d26d331a40de950", size = 6543618, upload-time = "2026-03-09T07:57:50.591Z" },
- { url = "https://files.pythonhosted.org/packages/aa/b8/612ce010c0728b1c363fa4ea3aa4c22fe1c5da1de008486f8c2f5cb92fae/numpy-2.4.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:54f29b877279d51e210e0c80709ee14ccbbad647810e8f3d375561c45ef613dd", size = 15680676, upload-time = "2026-03-09T07:57:52.34Z" },
- { url = "https://files.pythonhosted.org/packages/a9/7e/4f120ecc54ba26ddf3dc348eeb9eb063f421de65c05fc961941798feea18/numpy-2.4.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:679f2a834bae9020f81534671c56fd0cc76dd7e5182f57131478e23d0dc59e24", size = 16613492, upload-time = "2026-03-09T07:57:54.91Z" },
- { url = "https://files.pythonhosted.org/packages/2c/86/1b6020db73be330c4b45d5c6ee4295d59cfeef0e3ea323959d053e5a6909/numpy-2.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d84f0f881cb2225c2dfd7f78a10a5645d487a496c6668d6cc39f0f114164f3d0", size = 17031789, upload-time = "2026-03-09T07:57:57.641Z" },
- { url = "https://files.pythonhosted.org/packages/07/3a/3b90463bf41ebc21d1b7e06079f03070334374208c0f9a1f05e4ae8455e7/numpy-2.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d213c7e6e8d211888cc359bab7199670a00f5b82c0978b9d1c75baf1eddbeac0", size = 18339941, upload-time = "2026-03-09T07:58:00.577Z" },
- { url = "https://files.pythonhosted.org/packages/a8/74/6d736c4cd962259fd8bae9be27363eb4883a2f9069763747347544c2a487/numpy-2.4.3-cp314-cp314-win32.whl", hash = "sha256:52077feedeff7c76ed7c9f1a0428558e50825347b7545bbb8523da2cd55c547a", size = 6007503, upload-time = "2026-03-09T07:58:03.331Z" },
- { url = "https://files.pythonhosted.org/packages/48/39/c56ef87af669364356bb011922ef0734fc49dad51964568634c72a009488/numpy-2.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:0448e7f9caefb34b4b7dd2b77f21e8906e5d6f0365ad525f9f4f530b13df2afc", size = 12444915, upload-time = "2026-03-09T07:58:06.353Z" },
- { url = "https://files.pythonhosted.org/packages/9d/1f/ab8528e38d295fd349310807496fabb7cf9fe2e1f70b97bc20a483ea9d4a/numpy-2.4.3-cp314-cp314-win_arm64.whl", hash = "sha256:b44fd60341c4d9783039598efadd03617fa28d041fc37d22b62d08f2027fa0e7", size = 10494875, upload-time = "2026-03-09T07:58:08.734Z" },
- { url = "https://files.pythonhosted.org/packages/e6/ef/b7c35e4d5ef141b836658ab21a66d1a573e15b335b1d111d31f26c8ef80f/numpy-2.4.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0a195f4216be9305a73c0e91c9b026a35f2161237cf1c6de9b681637772ea657", size = 14822225, upload-time = "2026-03-09T07:58:11.034Z" },
- { url = "https://files.pythonhosted.org/packages/cd/8d/7730fa9278cf6648639946cc816e7cc89f0d891602584697923375f801ed/numpy-2.4.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:cd32fbacb9fd1bf041bf8e89e4576b6f00b895f06d00914820ae06a616bdfef7", size = 5328769, upload-time = "2026-03-09T07:58:13.67Z" },
- { url = "https://files.pythonhosted.org/packages/47/01/d2a137317c958b074d338807c1b6a383406cdf8b8e53b075d804cc3d211d/numpy-2.4.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:2e03c05abaee1f672e9d67bc858f300b5ccba1c21397211e8d77d98350972093", size = 6649461, upload-time = "2026-03-09T07:58:15.912Z" },
- { url = "https://files.pythonhosted.org/packages/5c/34/812ce12bc0f00272a4b0ec0d713cd237cb390666eb6206323d1cc9cedbb2/numpy-2.4.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d1ce23cce91fcea443320a9d0ece9b9305d4368875bab09538f7a5b4131938a", size = 15725809, upload-time = "2026-03-09T07:58:17.787Z" },
- { url = "https://files.pythonhosted.org/packages/25/c0/2aed473a4823e905e765fee3dc2cbf504bd3e68ccb1150fbdabd5c39f527/numpy-2.4.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c59020932feb24ed49ffd03704fbab89f22aa9c0d4b180ff45542fe8918f5611", size = 16655242, upload-time = "2026-03-09T07:58:20.476Z" },
- { url = "https://files.pythonhosted.org/packages/f2/c8/7e052b2fc87aa0e86de23f20e2c42bd261c624748aa8efd2c78f7bb8d8c6/numpy-2.4.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9684823a78a6cd6ad7511fc5e25b07947d1d5b5e2812c93fe99d7d4195130720", size = 17080660, upload-time = "2026-03-09T07:58:23.067Z" },
- { url = "https://files.pythonhosted.org/packages/f3/3d/0876746044db2adcb11549f214d104f2e1be00f07a67edbb4e2812094847/numpy-2.4.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0200b25c687033316fb39f0ff4e3e690e8957a2c3c8d22499891ec58c37a3eb5", size = 18380384, upload-time = "2026-03-09T07:58:25.839Z" },
- { url = "https://files.pythonhosted.org/packages/07/12/8160bea39da3335737b10308df4f484235fd297f556745f13092aa039d3b/numpy-2.4.3-cp314-cp314t-win32.whl", hash = "sha256:5e10da9e93247e554bb1d22f8edc51847ddd7dde52d85ce31024c1b4312bfba0", size = 6154547, upload-time = "2026-03-09T07:58:28.289Z" },
- { url = "https://files.pythonhosted.org/packages/42/f3/76534f61f80d74cc9cdf2e570d3d4eeb92c2280a27c39b0aaf471eda7b48/numpy-2.4.3-cp314-cp314t-win_amd64.whl", hash = "sha256:45f003dbdffb997a03da2d1d0cb41fbd24a87507fb41605c0420a3db5bd4667b", size = 12633645, upload-time = "2026-03-09T07:58:30.384Z" },
- { url = "https://files.pythonhosted.org/packages/1f/b6/7c0d4334c15983cec7f92a69e8ce9b1e6f31857e5ee3a413ac424e6bd63d/numpy-2.4.3-cp314-cp314t-win_arm64.whl", hash = "sha256:4d382735cecd7bcf090172489a525cd7d4087bc331f7df9f60ddc9a296cf208e", size = 10565454, upload-time = "2026-03-09T07:58:33.031Z" },
- { url = "https://files.pythonhosted.org/packages/64/e4/4dab9fb43c83719c29241c535d9e07be73bea4bc0c6686c5816d8e1b6689/numpy-2.4.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c6b124bfcafb9e8d3ed09130dbee44848c20b3e758b6bbf006e641778927c028", size = 16834892, upload-time = "2026-03-09T07:58:35.334Z" },
- { url = "https://files.pythonhosted.org/packages/c9/29/f8b6d4af90fed3dfda84ebc0df06c9833d38880c79ce954e5b661758aa31/numpy-2.4.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:76dbb9d4e43c16cf9aa711fcd8de1e2eeb27539dcefb60a1d5e9f12fae1d1ed8", size = 14893070, upload-time = "2026-03-09T07:58:37.7Z" },
- { url = "https://files.pythonhosted.org/packages/9a/04/a19b3c91dbec0a49269407f15d5753673a09832daed40c45e8150e6fa558/numpy-2.4.3-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:29363fbfa6f8ee855d7569c96ce524845e3d726d6c19b29eceec7dd555dab152", size = 5399609, upload-time = "2026-03-09T07:58:39.853Z" },
- { url = "https://files.pythonhosted.org/packages/79/34/4d73603f5420eab89ea8a67097b31364bf7c30f811d4dd84b1659c7476d9/numpy-2.4.3-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:bc71942c789ef415a37f0d4eab90341425a00d538cd0642445d30b41023d3395", size = 6714355, upload-time = "2026-03-09T07:58:42.365Z" },
- { url = "https://files.pythonhosted.org/packages/58/ad/1100d7229bb248394939a12a8074d485b655e8ed44207d328fdd7fcebc7b/numpy-2.4.3-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e58765ad74dcebd3ef0208a5078fba32dc8ec3578fe84a604432950cd043d79", size = 15800434, upload-time = "2026-03-09T07:58:44.837Z" },
- { url = "https://files.pythonhosted.org/packages/0c/fd/16d710c085d28ba4feaf29ac60c936c9d662e390344f94a6beaa2ac9899b/numpy-2.4.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8e236dbda4e1d319d681afcbb136c0c4a8e0f1a5c58ceec2adebb547357fe857", size = 16729409, upload-time = "2026-03-09T07:58:47.972Z" },
- { url = "https://files.pythonhosted.org/packages/57/a7/b35835e278c18b85206834b3aa3abe68e77a98769c59233d1f6300284781/numpy-2.4.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4b42639cdde6d24e732ff823a3fa5b701d8acad89c4142bc1d0bd6dc85200ba5", size = 12504685, upload-time = "2026-03-09T07:58:50.525Z" },
-]
-
-[[package]]
-name = "nvidia-cublas-cu11"
-version = "11.11.3.6"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/46/be/c222e33e60d28ecd496a46fc4d78ccae0ee28e1fd7dc705b6288b4cad27e/nvidia_cublas_cu11-11.11.3.6-py3-none-manylinux1_x86_64.whl", hash = "sha256:39fb40e8f486dd8a2ddb8fdeefe1d5b28f5b99df01c87ab3676f057a74a5a6f3", size = 417870452, upload-time = "2022-10-18T21:17:48.638Z" },
- { url = "https://files.pythonhosted.org/packages/ea/2e/9d99c60771d275ecf6c914a612e9a577f740a615bc826bec132368e1d3ae/nvidia_cublas_cu11-11.11.3.6-py3-none-manylinux2014_x86_64.whl", hash = "sha256:60252822adea5d0b10cd990a7dc7bedf7435f30ae40083c7a624a85a43225abc", size = 417870460, upload-time = "2024-08-17T00:00:26.889Z" },
-]
-
-[[package]]
-name = "nvidia-cublas-cu12"
-version = "12.6.4.1"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/af/eb/ff4b8c503fa1f1796679dce648854d58751982426e4e4b37d6fce49d259c/nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:08ed2686e9875d01b58e3cb379c6896df8e76c75e0d4a7f7dace3d7b6d9ef8eb", size = 393138322, upload-time = "2024-11-20T17:40:25.65Z" },
- { url = "https://files.pythonhosted.org/packages/97/0d/f1f0cadbf69d5b9ef2e4f744c9466cb0a850741d08350736dfdb4aa89569/nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:235f728d6e2a409eddf1df58d5b0921cf80cfa9e72b9f2775ccb7b4a87984668", size = 390794615, upload-time = "2024-11-20T17:39:52.715Z" },
-]
-
-[[package]]
-name = "nvidia-cuda-cupti-cu11"
-version = "11.8.87"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/27/c9/b4b15f709a694ea9f84871c6c4fbeeb54bab225962d852665a2c6f77f90d/nvidia_cuda_cupti_cu11-11.8.87-py3-none-manylinux1_x86_64.whl", hash = "sha256:0e50c707df56c75a2c0703dc6b886f3c97a22f37d6f63839f75b7418ba672a8d", size = 13093657, upload-time = "2022-10-03T21:46:12.544Z" },
- { url = "https://files.pythonhosted.org/packages/74/42/9f5c5cc084ce6f3073048c4f6806f45ba4c8c73f227c9587215d9c372e05/nvidia_cuda_cupti_cu11-11.8.87-py3-none-manylinux2014_x86_64.whl", hash = "sha256:4191a17913a706b5098681280cd089cd7d8d3df209a6f5cb79384974a96d24f2", size = 13093662, upload-time = "2024-08-16T23:56:38.082Z" },
-]
-
-[[package]]
-name = "nvidia-cuda-cupti-cu12"
-version = "12.6.80"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e6/8b/2f6230cb715646c3a9425636e513227ce5c93c4d65823a734f4bb86d43c3/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:166ee35a3ff1587f2490364f90eeeb8da06cd867bd5b701bf7f9a02b78bc63fc", size = 8236764, upload-time = "2024-11-20T17:35:41.03Z" },
- { url = "https://files.pythonhosted.org/packages/25/0f/acb326ac8fd26e13c799e0b4f3b2751543e1834f04d62e729485872198d4/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.whl", hash = "sha256:358b4a1d35370353d52e12f0a7d1769fc01ff74a191689d3870b2123156184c4", size = 8236756, upload-time = "2024-10-01T16:57:45.507Z" },
- { url = "https://files.pythonhosted.org/packages/49/60/7b6497946d74bcf1de852a21824d63baad12cd417db4195fc1bfe59db953/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6768bad6cab4f19e8292125e5f1ac8aa7d1718704012a0e3272a6f61c4bce132", size = 8917980, upload-time = "2024-11-20T17:36:04.019Z" },
- { url = "https://files.pythonhosted.org/packages/a5/24/120ee57b218d9952c379d1e026c4479c9ece9997a4fb46303611ee48f038/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a3eff6cdfcc6a4c35db968a06fcadb061cbc7d6dde548609a941ff8701b98b73", size = 8917972, upload-time = "2024-10-01T16:58:06.036Z" },
-]
-
-[[package]]
-name = "nvidia-cuda-nvrtc-cu11"
-version = "11.8.89"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/83/08/a9833e4e9f9165bedb7f36033b47aa399b053b9cb2eaf7b84d1e28705cf7/nvidia_cuda_nvrtc_cu11-11.8.89-py3-none-manylinux1_x86_64.whl", hash = "sha256:1f27d67b0f72902e9065ae568b4f6268dfe49ba3ed269c9a3da99bb86d1d2008", size = 23173264, upload-time = "2022-10-03T21:47:00.705Z" },
- { url = "https://files.pythonhosted.org/packages/60/44/202e027c224c26e15a53f01c5c7604c7f6b4fd368882d3164ea08fead207/nvidia_cuda_nvrtc_cu11-11.8.89-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a8d02f3cba345be56b1ffc3e74d8f61f02bb758dd31b0f20e12277a5a244f756", size = 23173745, upload-time = "2024-08-16T23:58:16.539Z" },
-]
-
-[[package]]
-name = "nvidia-cuda-nvrtc-cu12"
-version = "12.6.77"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f4/2f/72df534873235983cc0a5371c3661bebef7c4682760c275590b972c7b0f9/nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5847f1d6e5b757f1d2b3991a01082a44aad6f10ab3c5c0213fa3e25bddc25a13", size = 23162955, upload-time = "2024-10-01T16:59:50.922Z" },
- { url = "https://files.pythonhosted.org/packages/75/2e/46030320b5a80661e88039f59060d1790298b4718944a65a7f2aeda3d9e9/nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:35b0cc6ee3a9636d5409133e79273ce1f3fd087abb0532d2d2e8fff1fe9efc53", size = 23650380, upload-time = "2024-10-01T17:00:14.643Z" },
-]
-
-[[package]]
-name = "nvidia-cuda-runtime-cu11"
-version = "11.8.89"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/45/3e/84db02be49fe6d6df6e42f69fd64501c22d0f9ada9c9877f885612085d20/nvidia_cuda_runtime_cu11-11.8.89-py3-none-manylinux1_x86_64.whl", hash = "sha256:f587bd726eb2f7612cf77ce38a2c1e65cf23251ff49437f6161ce0d647f64f7c", size = 875585, upload-time = "2022-10-03T21:46:03.05Z" },
- { url = "https://files.pythonhosted.org/packages/a6/ec/a540f28b31de7bc1ed49eecc72035d4cb77db88ead1d42f7bfa5ae407ac6/nvidia_cuda_runtime_cu11-11.8.89-py3-none-manylinux2014_x86_64.whl", hash = "sha256:92d04069a987e1fbc9213f8376d265df0f7bb42617d44f5eda1f496acea7f2d1", size = 875592, upload-time = "2024-08-16T23:56:18.774Z" },
-]
-
-[[package]]
-name = "nvidia-cuda-runtime-cu12"
-version = "12.6.77"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8f/ea/590b2ac00d772a8abd1c387a92b46486d2679ca6622fd25c18ff76265663/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6116fad3e049e04791c0256a9778c16237837c08b27ed8c8401e2e45de8d60cd", size = 908052, upload-time = "2024-11-20T17:35:19.905Z" },
- { url = "https://files.pythonhosted.org/packages/b7/3d/159023799677126e20c8fd580cca09eeb28d5c5a624adc7f793b9aa8bbfa/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d461264ecb429c84c8879a7153499ddc7b19b5f8d84c204307491989a365588e", size = 908040, upload-time = "2024-10-01T16:57:22.221Z" },
- { url = "https://files.pythonhosted.org/packages/e1/23/e717c5ac26d26cf39a27fbc076240fad2e3b817e5889d671b67f4f9f49c5/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ba3b56a4f896141e25e19ab287cd71e52a6a0f4b29d0d31609f60e3b4d5219b7", size = 897690, upload-time = "2024-11-20T17:35:30.697Z" },
- { url = "https://files.pythonhosted.org/packages/f0/62/65c05e161eeddbafeca24dc461f47de550d9fa8a7e04eb213e32b55cfd99/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a84d15d5e1da416dd4774cb42edf5e954a3e60cc945698dc1d5be02321c44dc8", size = 897678, upload-time = "2024-10-01T16:57:33.821Z" },
-]
-
-[[package]]
-name = "nvidia-cudnn-cu11"
-version = "9.1.0.70"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "nvidia-cublas-cu11", marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/00/3b/0b776f04e364cd99e4cf152c2a9eadb5934c67c9a91429da55169a9447fd/nvidia_cudnn_cu11-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:e6135ac63fe9d5b0b89cfb35c3fc1c1349f2b995becadf2e9dc21bca89d9633d", size = 663919573, upload-time = "2024-04-22T15:20:24.839Z" },
-]
-
-[[package]]
-name = "nvidia-cudnn-cu12"
-version = "9.10.2.21"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "nvidia-cublas-cu12", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/fa/41/e79269ce215c857c935fd86bcfe91a451a584dfc27f1e068f568b9ad1ab7/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8", size = 705026878, upload-time = "2025-06-06T21:52:51.348Z" },
- { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" },
-]
-
-[[package]]
-name = "nvidia-cufft-cu11"
-version = "10.9.0.58"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/74/79/b912a77e38e41f15a0581a59f5c3548d1ddfdda3225936fb67c342719e7a/nvidia_cufft_cu11-10.9.0.58-py3-none-manylinux1_x86_64.whl", hash = "sha256:222f9da70c80384632fd6035e4c3f16762d64ea7a843829cb278f98b3cb7dd81", size = 168405414, upload-time = "2022-10-03T23:29:47.505Z" },
- { url = "https://files.pythonhosted.org/packages/64/c8/133717b43182ba063803e983e7680a94826a9f4ff5734af0ca315803f1b3/nvidia_cufft_cu11-10.9.0.58-py3-none-manylinux2014_x86_64.whl", hash = "sha256:e21037259995243cc370dd63c430d77ae9280bedb68d5b5a18226bfc92e5d748", size = 168405419, upload-time = "2024-08-17T00:02:03.562Z" },
-]
-
-[[package]]
-name = "nvidia-cufft-cu12"
-version = "11.3.0.4"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "nvidia-nvjitlink-cu12", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/1f/37/c50d2b2f2c07e146776389e3080f4faf70bcc4fa6e19d65bb54ca174ebc3/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d16079550df460376455cba121db6564089176d9bac9e4f360493ca4741b22a6", size = 200164144, upload-time = "2024-11-20T17:40:58.288Z" },
- { url = "https://files.pythonhosted.org/packages/ce/f5/188566814b7339e893f8d210d3a5332352b1409815908dad6a363dcceac1/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8510990de9f96c803a051822618d42bf6cb8f069ff3f48d93a8486efdacb48fb", size = 200164135, upload-time = "2024-10-01T17:03:24.212Z" },
- { url = "https://files.pythonhosted.org/packages/8f/16/73727675941ab8e6ffd86ca3a4b7b47065edcca7a997920b831f8147c99d/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ccba62eb9cef5559abd5e0d54ceed2d9934030f51163df018532142a8ec533e5", size = 200221632, upload-time = "2024-11-20T17:41:32.357Z" },
- { url = "https://files.pythonhosted.org/packages/60/de/99ec247a07ea40c969d904fc14f3a356b3e2a704121675b75c366b694ee1/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_x86_64.whl", hash = "sha256:768160ac89f6f7b459bee747e8d175dbf53619cfe74b2a5636264163138013ca", size = 200221622, upload-time = "2024-10-01T17:03:58.79Z" },
-]
-
-[[package]]
-name = "nvidia-cufile-cu12"
-version = "1.11.1.6"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b2/66/cc9876340ac68ae71b15c743ddb13f8b30d5244af344ec8322b449e35426/nvidia_cufile_cu12-1.11.1.6-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc23469d1c7e52ce6c1d55253273d32c565dd22068647f3aa59b3c6b005bf159", size = 1142103, upload-time = "2024-11-20T17:42:11.83Z" },
- { url = "https://files.pythonhosted.org/packages/17/bf/cc834147263b929229ce4aadd62869f0b195e98569d4c28b23edc72b85d9/nvidia_cufile_cu12-1.11.1.6-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:8f57a0051dcf2543f6dc2b98a98cb2719c37d3cee1baba8965d57f3bbc90d4db", size = 1066155, upload-time = "2024-11-20T17:41:49.376Z" },
-]
-
-[[package]]
-name = "nvidia-curand-cu11"
-version = "10.3.0.86"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/49/28/c47f8e2439ddbcbeae3cf74d43ed572b651d630ea72863d5357f3759eb66/nvidia_curand_cu11-10.3.0.86-py3-none-manylinux1_x86_64.whl", hash = "sha256:ac439548c88580269a1eb6aeb602a5aed32f0dbb20809a31d9ed7d01d77f6bf5", size = 58124493, upload-time = "2022-10-03T23:30:05.413Z" },
- { url = "https://files.pythonhosted.org/packages/58/e5/ce5806afc48a6e4e0dddd25316ac60b6fa94fd1791bdbf4ca17bf52696ea/nvidia_curand_cu11-10.3.0.86-py3-none-manylinux2014_x86_64.whl", hash = "sha256:cd4cffbf78bb06580206b4814d5dc696d1161c902aae37b2bba00056832379e6", size = 58124497, upload-time = "2024-08-17T00:03:01.833Z" },
-]
-
-[[package]]
-name = "nvidia-curand-cu12"
-version = "10.3.7.77"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/42/ac/36543605358a355632f1a6faa3e2d5dfb91eab1e4bc7d552040e0383c335/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:6e82df077060ea28e37f48a3ec442a8f47690c7499bff392a5938614b56c98d8", size = 56289881, upload-time = "2024-10-01T17:04:18.981Z" },
- { url = "https://files.pythonhosted.org/packages/73/1b/44a01c4e70933637c93e6e1a8063d1e998b50213a6b65ac5a9169c47e98e/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a42cd1344297f70b9e39a1e4f467a4e1c10f1da54ff7a85c12197f6c652c8bdf", size = 56279010, upload-time = "2024-11-20T17:42:50.958Z" },
- { url = "https://files.pythonhosted.org/packages/4a/aa/2c7ff0b5ee02eaef890c0ce7d4f74bc30901871c5e45dee1ae6d0083cd80/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:99f1a32f1ac2bd134897fc7a203f779303261268a65762a623bf30cc9fe79117", size = 56279000, upload-time = "2024-10-01T17:04:45.274Z" },
- { url = "https://files.pythonhosted.org/packages/a6/02/5362a9396f23f7de1dd8a64369e87c85ffff8216fc8194ace0fa45ba27a5/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:7b2ed8e95595c3591d984ea3603dd66fe6ce6812b886d59049988a712ed06b6e", size = 56289882, upload-time = "2024-11-20T17:42:25.222Z" },
-]
-
-[[package]]
-name = "nvidia-cusolver-cu11"
-version = "11.4.1.48"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "nvidia-cublas-cu11", marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/55/ee/939ff0104991dd7bdabb4c9767994c612ba0e1c9a55672a1ddd42f5e5b16/nvidia_cusolver_cu11-11.4.1.48-py3-none-manylinux1_x86_64.whl", hash = "sha256:ca538f545645b7e6629140786d3127fe067b3d5a085bd794cde5bfe877c8926f", size = 128240842, upload-time = "2022-10-03T23:30:24.348Z" },
- { url = "https://files.pythonhosted.org/packages/52/fe/866e87e6e6a1b0a5fcf8524a058042656702f2057e22bfdb8899a7c38e10/nvidia_cusolver_cu11-11.4.1.48-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea9fb1ad8c644ca9ed55af13cc39af3b7ba4c3eb5aef18471fe1fe77d94383cb", size = 128246438, upload-time = "2024-08-17T00:03:52.432Z" },
-]
-
-[[package]]
-name = "nvidia-cusolver-cu12"
-version = "11.7.1.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "nvidia-cublas-cu12", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "nvidia-cusparse-cu12", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "nvidia-nvjitlink-cu12", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/93/17/dbe1aa865e4fdc7b6d4d0dd308fdd5aaab60f939abfc0ea1954eac4fb113/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0ce237ef60acde1efc457335a2ddadfd7610b892d94efee7b776c64bb1cac9e0", size = 157833628, upload-time = "2024-10-01T17:05:05.591Z" },
- { url = "https://files.pythonhosted.org/packages/f0/6e/c2cf12c9ff8b872e92b4a5740701e51ff17689c4d726fca91875b07f655d/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e9e49843a7707e42022babb9bcfa33c29857a93b88020c4e4434656a655b698c", size = 158229790, upload-time = "2024-11-20T17:43:43.211Z" },
- { url = "https://files.pythonhosted.org/packages/9f/81/baba53585da791d043c10084cf9553e074548408e04ae884cfe9193bd484/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6cf28f17f64107a0c4d7802be5ff5537b2130bfc112f25d5a30df227058ca0e6", size = 158229780, upload-time = "2024-10-01T17:05:39.875Z" },
- { url = "https://files.pythonhosted.org/packages/7c/5f/07d0ba3b7f19be5a5ec32a8679fc9384cfd9fc6c869825e93be9f28d6690/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dbbe4fc38ec1289c7e5230e16248365e375c3673c9c8bac5796e2e20db07f56e", size = 157833630, upload-time = "2024-11-20T17:43:16.77Z" },
-]
-
-[[package]]
-name = "nvidia-cusparse-cu11"
-version = "11.7.5.86"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c1/e0/21b829c535d569831835a4ca5d049a19ba00d3e91f3e12ab4ad27bd7385f/nvidia_cusparse_cu11-11.7.5.86-py3-none-manylinux1_x86_64.whl", hash = "sha256:4ae709fe78d3f23f60acaba8c54b8ad556cf16ca486e0cc1aa92dca7555d2d2b", size = 204126221, upload-time = "2022-10-18T21:19:28.04Z" },
- { url = "https://files.pythonhosted.org/packages/ed/5c/b0333b07c51ced77397c2fb0d9826072cea0da9d421aa7e792aa0f8ecc72/nvidia_cusparse_cu11-11.7.5.86-py3-none-manylinux2014_x86_64.whl", hash = "sha256:8d7cf1628fd8d462b5d2ba6678fae34733a48ecb80495b9c68672ec6a6dde5ef", size = 204126227, upload-time = "2024-08-17T00:05:20.798Z" },
-]
-
-[[package]]
-name = "nvidia-cusparse-cu12"
-version = "12.5.4.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "nvidia-nvjitlink-cu12", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/eb/eb/6681efd0aa7df96b4f8067b3ce7246833dd36830bb4cec8896182773db7d/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d25b62fb18751758fe3c93a4a08eff08effedfe4edf1c6bb5afd0890fe88f887", size = 216451147, upload-time = "2024-11-20T17:44:18.055Z" },
- { url = "https://files.pythonhosted.org/packages/d3/56/3af21e43014eb40134dea004e8d0f1ef19d9596a39e4d497d5a7de01669f/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7aa32fa5470cf754f72d1116c7cbc300b4e638d3ae5304cfa4a638a5b87161b1", size = 216451135, upload-time = "2024-10-01T17:06:03.826Z" },
- { url = "https://files.pythonhosted.org/packages/06/1e/b8b7c2f4099a37b96af5c9bb158632ea9e5d9d27d7391d7eb8fc45236674/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7556d9eca156e18184b94947ade0fba5bb47d69cec46bf8660fd2c71a4b48b73", size = 216561367, upload-time = "2024-11-20T17:44:54.824Z" },
- { url = "https://files.pythonhosted.org/packages/43/ac/64c4316ba163e8217a99680c7605f779accffc6a4bcd0c778c12948d3707/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:23749a6571191a215cb74d1cdbff4a86e7b19f1200c071b3fcf844a5bea23a2f", size = 216561357, upload-time = "2024-10-01T17:06:29.861Z" },
-]
-
-[[package]]
-name = "nvidia-cusparselt-cu12"
-version = "0.7.1"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/73/b9/598f6ff36faaece4b3c50d26f50e38661499ff34346f00e057760b35cc9d/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8878dce784d0fac90131b6817b607e803c36e629ba34dc5b433471382196b6a5", size = 283835557, upload-time = "2025-02-26T00:16:54.265Z" },
- { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" },
-]
-
-[[package]]
-name = "nvidia-nccl-cu11"
-version = "2.21.5"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ac/9a/8b6a28b3b87d5fddab0e92cd835339eb8fbddaa71ae67518c8c1b3d05bae/nvidia_nccl_cu11-2.21.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:49d8350629c7888701d1fd200934942671cb5c728f49acc5a0b3a768820bed29", size = 147811630, upload-time = "2024-04-03T15:33:12.879Z" },
-]
-
-[[package]]
-name = "nvidia-nccl-cu12"
-version = "2.27.5"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/bb/1c/857979db0ef194ca5e21478a0612bcdbbe59458d7694361882279947b349/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:31432ad4d1fb1004eb0c56203dc9bc2178a1ba69d1d9e02d64a6938ab5e40e7a", size = 322400625, upload-time = "2025-06-26T04:11:04.496Z" },
- { url = "https://files.pythonhosted.org/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457", size = 322348229, upload-time = "2025-06-26T04:11:28.385Z" },
-]
-
-[[package]]
-name = "nvidia-nvjitlink-cu12"
-version = "12.6.85"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/9d/d7/c5383e47c7e9bf1c99d5bd2a8c935af2b6d705ad831a7ec5c97db4d82f4f/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a", size = 19744971, upload-time = "2024-11-20T17:46:53.366Z" },
- { url = "https://files.pythonhosted.org/packages/31/db/dc71113d441f208cdfe7ae10d4983884e13f464a6252450693365e166dcf/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41", size = 19270338, upload-time = "2024-11-20T17:46:29.758Z" },
-]
-
-[[package]]
-name = "nvidia-nvshmem-cu12"
-version = "3.4.5"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/1d/6a/03aa43cc9bd3ad91553a88b5f6fb25ed6a3752ae86ce2180221962bc2aa5/nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b48363fc6964dede448029434c6abed6c5e37f823cb43c3bcde7ecfc0457e15", size = 138936938, upload-time = "2025-09-06T00:32:05.589Z" },
- { url = "https://files.pythonhosted.org/packages/b5/09/6ea3ea725f82e1e76684f0708bbedd871fc96da89945adeba65c3835a64c/nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:042f2500f24c021db8a06c5eec2539027d57460e1c1a762055a6554f72c369bd", size = 139103095, upload-time = "2025-09-06T00:32:31.266Z" },
-]
-
-[[package]]
-name = "nvidia-nvtx-cu11"
-version = "11.8.86"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d5/a2/23214c23118784dc2189ac2d2e48190df3e4206e2f73eb17d47140797a2b/nvidia_nvtx_cu11-11.8.86-py3-none-manylinux1_x86_64.whl", hash = "sha256:890656d8bd9b4e280231c832e1f0d03459200ba4824ddda3dcb59b1e1989b9f5", size = 99125, upload-time = "2022-10-03T21:47:19.565Z" },
- { url = "https://files.pythonhosted.org/packages/b5/ad/973a187b137a3d45dc3faac421ef1275fb41fc169fd3889e2d5ceb0daa54/nvidia_nvtx_cu11-11.8.86-py3-none-manylinux2014_x86_64.whl", hash = "sha256:979f5b2aef5da164c5c53c64c85c3dfa61b8b4704f4f963bb568bf98fa8472e8", size = 99130, upload-time = "2024-08-16T23:58:33.479Z" },
-]
-
-[[package]]
-name = "nvidia-nvtx-cu12"
-version = "12.6.77"
-source = { registry = "https://pypi.org/simple" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b9/93/80f8a520375af9d7ee44571a6544653a176e53c2b8ccce85b97b83c2491b/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f44f8d86bb7d5629988d61c8d3ae61dddb2015dee142740536bc7481b022fe4b", size = 90549, upload-time = "2024-11-20T17:38:17.387Z" },
- { url = "https://files.pythonhosted.org/packages/2b/53/36e2fd6c7068997169b49ffc8c12d5af5e5ff209df6e1a2c4d373b3a638f/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:adcaabb9d436c9761fca2b13959a2d237c5f9fd406c8e4b723c695409ff88059", size = 90539, upload-time = "2024-10-01T17:00:27.179Z" },
- { url = "https://files.pythonhosted.org/packages/56/9a/fff8376f8e3d084cd1530e1ef7b879bb7d6d265620c95c1b322725c694f4/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b90bed3df379fa79afbd21be8e04a0314336b8ae16768b58f2d34cb1d04cd7d2", size = 89276, upload-time = "2024-11-20T17:38:27.621Z" },
- { url = "https://files.pythonhosted.org/packages/9e/4e/0d0c945463719429b7bd21dece907ad0bde437a2ff12b9b12fee94722ab0/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6574241a3ec5fdc9334353ab8c479fe75841dbe8f4532a8fc97ce63503330ba1", size = 89265, upload-time = "2024-10-01T17:00:38.172Z" },
-]
-
-[[package]]
-name = "packaging"
-version = "26.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" },
-]
-
-[[package]]
-name = "pandas"
-version = "2.3.3"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "python-dateutil", marker = "python_full_version < '3.11'" },
- { name = "pytz", marker = "python_full_version < '3.11'" },
- { name = "tzdata", marker = "python_full_version < '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/33/01/d40b85317f86cf08d853a4f495195c73815fdf205eef3993821720274518/pandas-2.3.3.tar.gz", hash = "sha256:e05e1af93b977f7eafa636d043f9f94c7ee3ac81af99c13508215942e64c993b", size = 4495223, upload-time = "2025-09-29T23:34:51.853Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3d/f7/f425a00df4fcc22b292c6895c6831c0c8ae1d9fac1e024d16f98a9ce8749/pandas-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:376c6446ae31770764215a6c937f72d917f214b43560603cd60da6408f183b6c", size = 11555763, upload-time = "2025-09-29T23:16:53.287Z" },
- { url = "https://files.pythonhosted.org/packages/13/4f/66d99628ff8ce7857aca52fed8f0066ce209f96be2fede6cef9f84e8d04f/pandas-2.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e19d192383eab2f4ceb30b412b22ea30690c9e618f78870357ae1d682912015a", size = 10801217, upload-time = "2025-09-29T23:17:04.522Z" },
- { url = "https://files.pythonhosted.org/packages/1d/03/3fc4a529a7710f890a239cc496fc6d50ad4a0995657dccc1d64695adb9f4/pandas-2.3.3-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5caf26f64126b6c7aec964f74266f435afef1c1b13da3b0636c7518a1fa3e2b1", size = 12148791, upload-time = "2025-09-29T23:17:18.444Z" },
- { url = "https://files.pythonhosted.org/packages/40/a8/4dac1f8f8235e5d25b9955d02ff6f29396191d4e665d71122c3722ca83c5/pandas-2.3.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd7478f1463441ae4ca7308a70e90b33470fa593429f9d4c578dd00d1fa78838", size = 12769373, upload-time = "2025-09-29T23:17:35.846Z" },
- { url = "https://files.pythonhosted.org/packages/df/91/82cc5169b6b25440a7fc0ef3a694582418d875c8e3ebf796a6d6470aa578/pandas-2.3.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4793891684806ae50d1288c9bae9330293ab4e083ccd1c5e383c34549c6e4250", size = 13200444, upload-time = "2025-09-29T23:17:49.341Z" },
- { url = "https://files.pythonhosted.org/packages/10/ae/89b3283800ab58f7af2952704078555fa60c807fff764395bb57ea0b0dbd/pandas-2.3.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:28083c648d9a99a5dd035ec125d42439c6c1c525098c58af0fc38dd1a7a1b3d4", size = 13858459, upload-time = "2025-09-29T23:18:03.722Z" },
- { url = "https://files.pythonhosted.org/packages/85/72/530900610650f54a35a19476eca5104f38555afccda1aa11a92ee14cb21d/pandas-2.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:503cf027cf9940d2ceaa1a93cfb5f8c8c7e6e90720a2850378f0b3f3b1e06826", size = 11346086, upload-time = "2025-09-29T23:18:18.505Z" },
- { url = "https://files.pythonhosted.org/packages/c1/fa/7ac648108144a095b4fb6aa3de1954689f7af60a14cf25583f4960ecb878/pandas-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:602b8615ebcc4a0c1751e71840428ddebeb142ec02c786e8ad6b1ce3c8dec523", size = 11578790, upload-time = "2025-09-29T23:18:30.065Z" },
- { url = "https://files.pythonhosted.org/packages/9b/35/74442388c6cf008882d4d4bdfc4109be87e9b8b7ccd097ad1e7f006e2e95/pandas-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8fe25fc7b623b0ef6b5009149627e34d2a4657e880948ec3c840e9402e5c1b45", size = 10833831, upload-time = "2025-09-29T23:38:56.071Z" },
- { url = "https://files.pythonhosted.org/packages/fe/e4/de154cbfeee13383ad58d23017da99390b91d73f8c11856f2095e813201b/pandas-2.3.3-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b468d3dad6ff947df92dcb32ede5b7bd41a9b3cceef0a30ed925f6d01fb8fa66", size = 12199267, upload-time = "2025-09-29T23:18:41.627Z" },
- { url = "https://files.pythonhosted.org/packages/bf/c9/63f8d545568d9ab91476b1818b4741f521646cbdd151c6efebf40d6de6f7/pandas-2.3.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b98560e98cb334799c0b07ca7967ac361a47326e9b4e5a7dfb5ab2b1c9d35a1b", size = 12789281, upload-time = "2025-09-29T23:18:56.834Z" },
- { url = "https://files.pythonhosted.org/packages/f2/00/a5ac8c7a0e67fd1a6059e40aa08fa1c52cc00709077d2300e210c3ce0322/pandas-2.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37b5848ba49824e5c30bedb9c830ab9b7751fd049bc7914533e01c65f79791", size = 13240453, upload-time = "2025-09-29T23:19:09.247Z" },
- { url = "https://files.pythonhosted.org/packages/27/4d/5c23a5bc7bd209231618dd9e606ce076272c9bc4f12023a70e03a86b4067/pandas-2.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:db4301b2d1f926ae677a751eb2bd0e8c5f5319c9cb3f88b0becbbb0b07b34151", size = 13890361, upload-time = "2025-09-29T23:19:25.342Z" },
- { url = "https://files.pythonhosted.org/packages/8e/59/712db1d7040520de7a4965df15b774348980e6df45c129b8c64d0dbe74ef/pandas-2.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:f086f6fe114e19d92014a1966f43a3e62285109afe874f067f5abbdcbb10e59c", size = 11348702, upload-time = "2025-09-29T23:19:38.296Z" },
- { url = "https://files.pythonhosted.org/packages/9c/fb/231d89e8637c808b997d172b18e9d4a4bc7bf31296196c260526055d1ea0/pandas-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d21f6d74eb1725c2efaa71a2bfc661a0689579b58e9c0ca58a739ff0b002b53", size = 11597846, upload-time = "2025-09-29T23:19:48.856Z" },
- { url = "https://files.pythonhosted.org/packages/5c/bd/bf8064d9cfa214294356c2d6702b716d3cf3bb24be59287a6a21e24cae6b/pandas-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3fd2f887589c7aa868e02632612ba39acb0b8948faf5cc58f0850e165bd46f35", size = 10729618, upload-time = "2025-09-29T23:39:08.659Z" },
- { url = "https://files.pythonhosted.org/packages/57/56/cf2dbe1a3f5271370669475ead12ce77c61726ffd19a35546e31aa8edf4e/pandas-2.3.3-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecaf1e12bdc03c86ad4a7ea848d66c685cb6851d807a26aa245ca3d2017a1908", size = 11737212, upload-time = "2025-09-29T23:19:59.765Z" },
- { url = "https://files.pythonhosted.org/packages/e5/63/cd7d615331b328e287d8233ba9fdf191a9c2d11b6af0c7a59cfcec23de68/pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b3d11d2fda7eb164ef27ffc14b4fcab16a80e1ce67e9f57e19ec0afaf715ba89", size = 12362693, upload-time = "2025-09-29T23:20:14.098Z" },
- { url = "https://files.pythonhosted.org/packages/a6/de/8b1895b107277d52f2b42d3a6806e69cfef0d5cf1d0ba343470b9d8e0a04/pandas-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a68e15f780eddf2b07d242e17a04aa187a7ee12b40b930bfdd78070556550e98", size = 12771002, upload-time = "2025-09-29T23:20:26.76Z" },
- { url = "https://files.pythonhosted.org/packages/87/21/84072af3187a677c5893b170ba2c8fbe450a6ff911234916da889b698220/pandas-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:371a4ab48e950033bcf52b6527eccb564f52dc826c02afd9a1bc0ab731bba084", size = 13450971, upload-time = "2025-09-29T23:20:41.344Z" },
- { url = "https://files.pythonhosted.org/packages/86/41/585a168330ff063014880a80d744219dbf1dd7a1c706e75ab3425a987384/pandas-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:a16dcec078a01eeef8ee61bf64074b4e524a2a3f4b3be9326420cabe59c4778b", size = 10992722, upload-time = "2025-09-29T23:20:54.139Z" },
- { url = "https://files.pythonhosted.org/packages/cd/4b/18b035ee18f97c1040d94debd8f2e737000ad70ccc8f5513f4eefad75f4b/pandas-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:56851a737e3470de7fa88e6131f41281ed440d29a9268dcbf0002da5ac366713", size = 11544671, upload-time = "2025-09-29T23:21:05.024Z" },
- { url = "https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bdcd9d1167f4885211e401b3036c0c8d9e274eee67ea8d0758a256d60704cfe8", size = 10680807, upload-time = "2025-09-29T23:21:15.979Z" },
- { url = "https://files.pythonhosted.org/packages/16/87/9472cf4a487d848476865321de18cc8c920b8cab98453ab79dbbc98db63a/pandas-2.3.3-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e32e7cc9af0f1cc15548288a51a3b681cc2a219faa838e995f7dc53dbab1062d", size = 11709872, upload-time = "2025-09-29T23:21:27.165Z" },
- { url = "https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:318d77e0e42a628c04dc56bcef4b40de67918f7041c2b061af1da41dcff670ac", size = 12306371, upload-time = "2025-09-29T23:21:40.532Z" },
- { url = "https://files.pythonhosted.org/packages/33/81/a3afc88fca4aa925804a27d2676d22dcd2031c2ebe08aabd0ae55b9ff282/pandas-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e0a175408804d566144e170d0476b15d78458795bb18f1304fb94160cabf40c", size = 12765333, upload-time = "2025-09-29T23:21:55.77Z" },
- { url = "https://files.pythonhosted.org/packages/8d/0f/b4d4ae743a83742f1153464cf1a8ecfafc3ac59722a0b5c8602310cb7158/pandas-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2d9ab0fc11822b5eece72ec9587e172f63cff87c00b062f6e37448ced4493", size = 13418120, upload-time = "2025-09-29T23:22:10.109Z" },
- { url = "https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:f8bfc0e12dc78f777f323f55c58649591b2cd0c43534e8355c51d3fede5f4dee", size = 10993991, upload-time = "2025-09-29T23:25:04.889Z" },
- { url = "https://files.pythonhosted.org/packages/f9/ca/3f8d4f49740799189e1395812f3bf23b5e8fc7c190827d55a610da72ce55/pandas-2.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:75ea25f9529fdec2d2e93a42c523962261e567d250b0013b16210e1d40d7c2e5", size = 12048227, upload-time = "2025-09-29T23:22:24.343Z" },
- { url = "https://files.pythonhosted.org/packages/0e/5a/f43efec3e8c0cc92c4663ccad372dbdff72b60bdb56b2749f04aa1d07d7e/pandas-2.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74ecdf1d301e812db96a465a525952f4dde225fdb6d8e5a521d47e1f42041e21", size = 11411056, upload-time = "2025-09-29T23:22:37.762Z" },
- { url = "https://files.pythonhosted.org/packages/46/b1/85331edfc591208c9d1a63a06baa67b21d332e63b7a591a5ba42a10bb507/pandas-2.3.3-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6435cb949cb34ec11cc9860246ccb2fdc9ecd742c12d3304989017d53f039a78", size = 11645189, upload-time = "2025-09-29T23:22:51.688Z" },
- { url = "https://files.pythonhosted.org/packages/44/23/78d645adc35d94d1ac4f2a3c4112ab6f5b8999f4898b8cdf01252f8df4a9/pandas-2.3.3-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:900f47d8f20860de523a1ac881c4c36d65efcb2eb850e6948140fa781736e110", size = 12121912, upload-time = "2025-09-29T23:23:05.042Z" },
- { url = "https://files.pythonhosted.org/packages/53/da/d10013df5e6aaef6b425aa0c32e1fc1f3e431e4bcabd420517dceadce354/pandas-2.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a45c765238e2ed7d7c608fc5bc4a6f88b642f2f01e70c0c23d2224dd21829d86", size = 12712160, upload-time = "2025-09-29T23:23:28.57Z" },
- { url = "https://files.pythonhosted.org/packages/bd/17/e756653095a083d8a37cbd816cb87148debcfcd920129b25f99dd8d04271/pandas-2.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c4fc4c21971a1a9f4bdb4c73978c7f7256caa3e62b323f70d6cb80db583350bc", size = 13199233, upload-time = "2025-09-29T23:24:24.876Z" },
- { url = "https://files.pythonhosted.org/packages/04/fd/74903979833db8390b73b3a8a7d30d146d710bd32703724dd9083950386f/pandas-2.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ee15f284898e7b246df8087fc82b87b01686f98ee67d85a17b7ab44143a3a9a0", size = 11540635, upload-time = "2025-09-29T23:25:52.486Z" },
- { url = "https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1611aedd912e1ff81ff41c745822980c49ce4a7907537be8692c8dbc31924593", size = 10759079, upload-time = "2025-09-29T23:26:33.204Z" },
- { url = "https://files.pythonhosted.org/packages/ca/05/d01ef80a7a3a12b2f8bbf16daba1e17c98a2f039cbc8e2f77a2c5a63d382/pandas-2.3.3-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d2cefc361461662ac48810cb14365a365ce864afe85ef1f447ff5a1e99ea81c", size = 11814049, upload-time = "2025-09-29T23:27:15.384Z" },
- { url = "https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ee67acbbf05014ea6c763beb097e03cd629961c8a632075eeb34247120abcb4b", size = 12332638, upload-time = "2025-09-29T23:27:51.625Z" },
- { url = "https://files.pythonhosted.org/packages/c5/33/dd70400631b62b9b29c3c93d2feee1d0964dc2bae2e5ad7a6c73a7f25325/pandas-2.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c46467899aaa4da076d5abc11084634e2d197e9460643dd455ac3db5856b24d6", size = 12886834, upload-time = "2025-09-29T23:28:21.289Z" },
- { url = "https://files.pythonhosted.org/packages/d3/18/b5d48f55821228d0d2692b34fd5034bb185e854bdb592e9c640f6290e012/pandas-2.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6253c72c6a1d990a410bc7de641d34053364ef8bcd3126f7e7450125887dffe3", size = 13409925, upload-time = "2025-09-29T23:28:58.261Z" },
- { url = "https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:1b07204a219b3b7350abaae088f451860223a52cfb8a6c53358e7948735158e5", size = 11109071, upload-time = "2025-09-29T23:32:27.484Z" },
- { url = "https://files.pythonhosted.org/packages/89/9c/0e21c895c38a157e0faa1fb64587a9226d6dd46452cac4532d80c3c4a244/pandas-2.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2462b1a365b6109d275250baaae7b760fd25c726aaca0054649286bcfbb3e8ec", size = 12048504, upload-time = "2025-09-29T23:29:31.47Z" },
- { url = "https://files.pythonhosted.org/packages/d7/82/b69a1c95df796858777b68fbe6a81d37443a33319761d7c652ce77797475/pandas-2.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0242fe9a49aa8b4d78a4fa03acb397a58833ef6199e9aa40a95f027bb3a1b6e7", size = 11410702, upload-time = "2025-09-29T23:29:54.591Z" },
- { url = "https://files.pythonhosted.org/packages/f9/88/702bde3ba0a94b8c73a0181e05144b10f13f29ebfc2150c3a79062a8195d/pandas-2.3.3-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a21d830e78df0a515db2b3d2f5570610f5e6bd2e27749770e8bb7b524b89b450", size = 11634535, upload-time = "2025-09-29T23:30:21.003Z" },
- { url = "https://files.pythonhosted.org/packages/a4/1e/1bac1a839d12e6a82ec6cb40cda2edde64a2013a66963293696bbf31fbbb/pandas-2.3.3-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e3ebdb170b5ef78f19bfb71b0dc5dc58775032361fa188e814959b74d726dd5", size = 12121582, upload-time = "2025-09-29T23:30:43.391Z" },
- { url = "https://files.pythonhosted.org/packages/44/91/483de934193e12a3b1d6ae7c8645d083ff88dec75f46e827562f1e4b4da6/pandas-2.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d051c0e065b94b7a3cea50eb1ec32e912cd96dba41647eb24104b6c6c14c5788", size = 12699963, upload-time = "2025-09-29T23:31:10.009Z" },
- { url = "https://files.pythonhosted.org/packages/70/44/5191d2e4026f86a2a109053e194d3ba7a31a2d10a9c2348368c63ed4e85a/pandas-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3869faf4bd07b3b66a9f462417d0ca3a9df29a9f6abd5d0d0dbab15dac7abe87", size = 13202175, upload-time = "2025-09-29T23:31:59.173Z" },
- { url = "https://files.pythonhosted.org/packages/56/b4/52eeb530a99e2a4c55ffcd352772b599ed4473a0f892d127f4147cf0f88e/pandas-2.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c503ba5216814e295f40711470446bc3fd00f0faea8a086cbc688808e26f92a2", size = 11567720, upload-time = "2025-09-29T23:33:06.209Z" },
- { url = "https://files.pythonhosted.org/packages/48/4a/2d8b67632a021bced649ba940455ed441ca854e57d6e7658a6024587b083/pandas-2.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a637c5cdfa04b6d6e2ecedcb81fc52ffb0fd78ce2ebccc9ea964df9f658de8c8", size = 10810302, upload-time = "2025-09-29T23:33:35.846Z" },
- { url = "https://files.pythonhosted.org/packages/13/e6/d2465010ee0569a245c975dc6967b801887068bc893e908239b1f4b6c1ac/pandas-2.3.3-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:854d00d556406bffe66a4c0802f334c9ad5a96b4f1f868adf036a21b11ef13ff", size = 12154874, upload-time = "2025-09-29T23:33:49.939Z" },
- { url = "https://files.pythonhosted.org/packages/1f/18/aae8c0aa69a386a3255940e9317f793808ea79d0a525a97a903366bb2569/pandas-2.3.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bf1f8a81d04ca90e32a0aceb819d34dbd378a98bf923b6398b9a3ec0bf44de29", size = 12790141, upload-time = "2025-09-29T23:34:05.655Z" },
- { url = "https://files.pythonhosted.org/packages/f7/26/617f98de789de00c2a444fbe6301bb19e66556ac78cff933d2c98f62f2b4/pandas-2.3.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:23ebd657a4d38268c7dfbdf089fbc31ea709d82e4923c5ffd4fbd5747133ce73", size = 13208697, upload-time = "2025-09-29T23:34:21.835Z" },
- { url = "https://files.pythonhosted.org/packages/b9/fb/25709afa4552042bd0e15717c75e9b4a2294c3dc4f7e6ea50f03c5136600/pandas-2.3.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5554c929ccc317d41a5e3d1234f3be588248e61f08a74dd17c9eabb535777dc9", size = 13879233, upload-time = "2025-09-29T23:34:35.079Z" },
- { url = "https://files.pythonhosted.org/packages/98/af/7be05277859a7bc399da8ba68b88c96b27b48740b6cf49688899c6eb4176/pandas-2.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:d3e28b3e83862ccf4d85ff19cf8c20b2ae7e503881711ff2d534dc8f761131aa", size = 11359119, upload-time = "2025-09-29T23:34:46.339Z" },
-]
-
-[[package]]
-name = "pandas"
-version = "3.0.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "python-dateutil", marker = "python_full_version >= '3.11'" },
- { name = "tzdata", marker = "(python_full_version >= '3.11' and sys_platform == 'emscripten') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/2e/0c/b28ed414f080ee0ad153f848586d61d1878f91689950f037f976ce15f6c8/pandas-3.0.1.tar.gz", hash = "sha256:4186a699674af418f655dbd420ed87f50d56b4cd6603784279d9eef6627823c8", size = 4641901, upload-time = "2026-02-17T22:20:16.434Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ff/07/c7087e003ceee9b9a82539b40414ec557aa795b584a1a346e89180853d79/pandas-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de09668c1bf3b925c07e5762291602f0d789eca1b3a781f99c1c78f6cac0e7ea", size = 10323380, upload-time = "2026-02-17T22:18:16.133Z" },
- { url = "https://files.pythonhosted.org/packages/c1/27/90683c7122febeefe84a56f2cde86a9f05f68d53885cebcc473298dfc33e/pandas-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:24ba315ba3d6e5806063ac6eb717504e499ce30bd8c236d8693a5fd3f084c796", size = 9923455, upload-time = "2026-02-17T22:18:19.13Z" },
- { url = "https://files.pythonhosted.org/packages/0e/f1/ed17d927f9950643bc7631aa4c99ff0cc83a37864470bc419345b656a41f/pandas-3.0.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:406ce835c55bac912f2a0dcfaf27c06d73c6b04a5dde45f1fd3169ce31337389", size = 10753464, upload-time = "2026-02-17T22:18:21.134Z" },
- { url = "https://files.pythonhosted.org/packages/2e/7c/870c7e7daec2a6c7ff2ac9e33b23317230d4e4e954b35112759ea4a924a7/pandas-3.0.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:830994d7e1f31dd7e790045235605ab61cff6c94defc774547e8b7fdfbff3dc7", size = 11255234, upload-time = "2026-02-17T22:18:24.175Z" },
- { url = "https://files.pythonhosted.org/packages/5c/39/3653fe59af68606282b989c23d1a543ceba6e8099cbcc5f1d506a7bae2aa/pandas-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a64ce8b0f2de1d2efd2ae40b0abe7f8ae6b29fbfb3812098ed5a6f8e235ad9bf", size = 11767299, upload-time = "2026-02-17T22:18:26.824Z" },
- { url = "https://files.pythonhosted.org/packages/9b/31/1daf3c0c94a849c7a8dab8a69697b36d313b229918002ba3e409265c7888/pandas-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9832c2c69da24b602c32e0c7b1b508a03949c18ba08d4d9f1c1033426685b447", size = 12333292, upload-time = "2026-02-17T22:18:28.996Z" },
- { url = "https://files.pythonhosted.org/packages/1f/67/af63f83cd6ca603a00fe8530c10a60f0879265b8be00b5930e8e78c5b30b/pandas-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:84f0904a69e7365f79a0c77d3cdfccbfb05bf87847e3a51a41e1426b0edb9c79", size = 9892176, upload-time = "2026-02-17T22:18:31.79Z" },
- { url = "https://files.pythonhosted.org/packages/79/ab/9c776b14ac4b7b4140788eca18468ea39894bc7340a408f1d1e379856a6b/pandas-3.0.1-cp311-cp311-win_arm64.whl", hash = "sha256:4a68773d5a778afb31d12e34f7dd4612ab90de8c6fb1d8ffe5d4a03b955082a1", size = 9151328, upload-time = "2026-02-17T22:18:35.721Z" },
- { url = "https://files.pythonhosted.org/packages/37/51/b467209c08dae2c624873d7491ea47d2b47336e5403309d433ea79c38571/pandas-3.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:476f84f8c20c9f5bc47252b66b4bb25e1a9fc2fa98cead96744d8116cb85771d", size = 10344357, upload-time = "2026-02-17T22:18:38.262Z" },
- { url = "https://files.pythonhosted.org/packages/7c/f1/e2567ffc8951ab371db2e40b2fe068e36b81d8cf3260f06ae508700e5504/pandas-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ab749dfba921edf641d4036c4c21c0b3ea70fea478165cb98a998fb2a261955", size = 9884543, upload-time = "2026-02-17T22:18:41.476Z" },
- { url = "https://files.pythonhosted.org/packages/d7/39/327802e0b6d693182403c144edacbc27eb82907b57062f23ef5a4c4a5ea7/pandas-3.0.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8e36891080b87823aff3640c78649b91b8ff6eea3c0d70aeabd72ea43ab069b", size = 10396030, upload-time = "2026-02-17T22:18:43.822Z" },
- { url = "https://files.pythonhosted.org/packages/3d/fe/89d77e424365280b79d99b3e1e7d606f5165af2f2ecfaf0c6d24c799d607/pandas-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:532527a701281b9dd371e2f582ed9094f4c12dd9ffb82c0c54ee28d8ac9520c4", size = 10876435, upload-time = "2026-02-17T22:18:45.954Z" },
- { url = "https://files.pythonhosted.org/packages/b5/a6/2a75320849dd154a793f69c951db759aedb8d1dd3939eeacda9bdcfa1629/pandas-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:356e5c055ed9b0da1580d465657bc7d00635af4fd47f30afb23025352ba764d1", size = 11405133, upload-time = "2026-02-17T22:18:48.533Z" },
- { url = "https://files.pythonhosted.org/packages/58/53/1d68fafb2e02d7881df66aa53be4cd748d25cbe311f3b3c85c93ea5d30ca/pandas-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9d810036895f9ad6345b8f2a338dd6998a74e8483847403582cab67745bff821", size = 11932065, upload-time = "2026-02-17T22:18:50.837Z" },
- { url = "https://files.pythonhosted.org/packages/75/08/67cc404b3a966b6df27b38370ddd96b3b023030b572283d035181854aac5/pandas-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:536232a5fe26dd989bd633e7a0c450705fdc86a207fec7254a55e9a22950fe43", size = 9741627, upload-time = "2026-02-17T22:18:53.905Z" },
- { url = "https://files.pythonhosted.org/packages/86/4f/caf9952948fb00d23795f09b893d11f1cacb384e666854d87249530f7cbe/pandas-3.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f463ebfd8de7f326d38037c7363c6dacb857c5881ab8961fb387804d6daf2f7", size = 9052483, upload-time = "2026-02-17T22:18:57.31Z" },
- { url = "https://files.pythonhosted.org/packages/0b/48/aad6ec4f8d007534c091e9a7172b3ec1b1ee6d99a9cbb936b5eab6c6cf58/pandas-3.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5272627187b5d9c20e55d27caf5f2cd23e286aba25cadf73c8590e432e2b7262", size = 10317509, upload-time = "2026-02-17T22:18:59.498Z" },
- { url = "https://files.pythonhosted.org/packages/a8/14/5990826f779f79148ae9d3a2c39593dc04d61d5d90541e71b5749f35af95/pandas-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:661e0f665932af88c7877f31da0dc743fe9c8f2524bdffe23d24fdcb67ef9d56", size = 9860561, upload-time = "2026-02-17T22:19:02.265Z" },
- { url = "https://files.pythonhosted.org/packages/fa/80/f01ff54664b6d70fed71475543d108a9b7c888e923ad210795bef04ffb7d/pandas-3.0.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:75e6e292ff898679e47a2199172593d9f6107fd2dd3617c22c2946e97d5df46e", size = 10365506, upload-time = "2026-02-17T22:19:05.017Z" },
- { url = "https://files.pythonhosted.org/packages/f2/85/ab6d04733a7d6ff32bfc8382bf1b07078228f5d6ebec5266b91bfc5c4ff7/pandas-3.0.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1ff8cf1d2896e34343197685f432450ec99a85ba8d90cce2030c5eee2ef98791", size = 10873196, upload-time = "2026-02-17T22:19:07.204Z" },
- { url = "https://files.pythonhosted.org/packages/48/a9/9301c83d0b47c23ac5deab91c6b39fd98d5b5db4d93b25df8d381451828f/pandas-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eca8b4510f6763f3d37359c2105df03a7a221a508f30e396a51d0713d462e68a", size = 11370859, upload-time = "2026-02-17T22:19:09.436Z" },
- { url = "https://files.pythonhosted.org/packages/59/fe/0c1fc5bd2d29c7db2ab372330063ad555fb83e08422829c785f5ec2176ca/pandas-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:06aff2ad6f0b94a17822cf8b83bbb563b090ed82ff4fe7712db2ce57cd50d9b8", size = 11924584, upload-time = "2026-02-17T22:19:11.562Z" },
- { url = "https://files.pythonhosted.org/packages/d6/7d/216a1588b65a7aa5f4535570418a599d943c85afb1d95b0876fc00aa1468/pandas-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fea306c783e28884c29057a1d9baa11a349bbf99538ec1da44c8476563d1b25", size = 9742769, upload-time = "2026-02-17T22:19:13.926Z" },
- { url = "https://files.pythonhosted.org/packages/c4/cb/810a22a6af9a4e97c8ab1c946b47f3489c5bca5adc483ce0ffc84c9cc768/pandas-3.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:a8d37a43c52917427e897cb2e429f67a449327394396a81034a4449b99afda59", size = 9043855, upload-time = "2026-02-17T22:19:16.09Z" },
- { url = "https://files.pythonhosted.org/packages/92/fa/423c89086cca1f039cf1253c3ff5b90f157b5b3757314aa635f6bf3e30aa/pandas-3.0.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d54855f04f8246ed7b6fc96b05d4871591143c46c0b6f4af874764ed0d2d6f06", size = 10752673, upload-time = "2026-02-17T22:19:18.304Z" },
- { url = "https://files.pythonhosted.org/packages/22/23/b5a08ec1f40020397f0faba72f1e2c11f7596a6169c7b3e800abff0e433f/pandas-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e1b677accee34a09e0dc2ce5624e4a58a1870ffe56fc021e9caf7f23cd7668f", size = 10404967, upload-time = "2026-02-17T22:19:20.726Z" },
- { url = "https://files.pythonhosted.org/packages/5c/81/94841f1bb4afdc2b52a99daa895ac2c61600bb72e26525ecc9543d453ebc/pandas-3.0.1-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a9cabbdcd03f1b6cd254d6dda8ae09b0252524be1592594c00b7895916cb1324", size = 10320575, upload-time = "2026-02-17T22:19:24.919Z" },
- { url = "https://files.pythonhosted.org/packages/0a/8b/2ae37d66a5342a83adadfd0cb0b4bf9c3c7925424dd5f40d15d6cfaa35ee/pandas-3.0.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ae2ab1f166668b41e770650101e7090824fd34d17915dd9cd479f5c5e0065e9", size = 10710921, upload-time = "2026-02-17T22:19:27.181Z" },
- { url = "https://files.pythonhosted.org/packages/a2/61/772b2e2757855e232b7ccf7cb8079a5711becb3a97f291c953def15a833f/pandas-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6bf0603c2e30e2cafac32807b06435f28741135cb8697eae8b28c7d492fc7d76", size = 11334191, upload-time = "2026-02-17T22:19:29.411Z" },
- { url = "https://files.pythonhosted.org/packages/1b/08/b16c6df3ef555d8495d1d265a7963b65be166785d28f06a350913a4fac78/pandas-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c426422973973cae1f4a23e51d4ae85974f44871b24844e4f7de752dd877098", size = 11782256, upload-time = "2026-02-17T22:19:32.34Z" },
- { url = "https://files.pythonhosted.org/packages/55/80/178af0594890dee17e239fca96d3d8670ba0f5ff59b7d0439850924a9c09/pandas-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b03f91ae8c10a85c1613102c7bef5229b5379f343030a3ccefeca8a33414cf35", size = 10485047, upload-time = "2026-02-17T22:19:34.605Z" },
- { url = "https://files.pythonhosted.org/packages/bb/8b/4bb774a998b97e6c2fd62a9e6cfdaae133b636fd1c468f92afb4ae9a447a/pandas-3.0.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:99d0f92ed92d3083d140bf6b97774f9f13863924cf3f52a70711f4e7588f9d0a", size = 10322465, upload-time = "2026-02-17T22:19:36.803Z" },
- { url = "https://files.pythonhosted.org/packages/72/3a/5b39b51c64159f470f1ca3b1c2a87da290657ca022f7cd11442606f607d1/pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3b66857e983208654294bb6477b8a63dee26b37bdd0eb34d010556e91261784f", size = 9910632, upload-time = "2026-02-17T22:19:39.001Z" },
- { url = "https://files.pythonhosted.org/packages/4e/f7/b449ffb3f68c11da12fc06fbf6d2fa3a41c41e17d0284d23a79e1c13a7e4/pandas-3.0.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56cf59638bf24dc9bdf2154c81e248b3289f9a09a6d04e63608c159022352749", size = 10440535, upload-time = "2026-02-17T22:19:41.157Z" },
- { url = "https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1a9f55e0f46951874b863d1f3906dcb57df2d9be5c5847ba4dfb55b2c815249", size = 10893940, upload-time = "2026-02-17T22:19:43.493Z" },
- { url = "https://files.pythonhosted.org/packages/03/30/f1b502a72468c89412c1b882a08f6eed8a4ee9dc033f35f65d0663df6081/pandas-3.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1849f0bba9c8a2fb0f691d492b834cc8dadf617e29015c66e989448d58d011ee", size = 11442711, upload-time = "2026-02-17T22:19:46.074Z" },
- { url = "https://files.pythonhosted.org/packages/0d/f0/ebb6ddd8fc049e98cabac5c2924d14d1dda26a20adb70d41ea2e428d3ec4/pandas-3.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3d288439e11b5325b02ae6e9cc83e6805a62c40c5a6220bea9beb899c073b1c", size = 11963918, upload-time = "2026-02-17T22:19:48.838Z" },
- { url = "https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:93325b0fe372d192965f4cca88d97667f49557398bbf94abdda3bf1b591dbe66", size = 9862099, upload-time = "2026-02-17T22:19:51.081Z" },
- { url = "https://files.pythonhosted.org/packages/e6/b7/6af9aac41ef2456b768ef0ae60acf8abcebb450a52043d030a65b4b7c9bd/pandas-3.0.1-cp314-cp314-win_arm64.whl", hash = "sha256:97ca08674e3287c7148f4858b01136f8bdfe7202ad25ad04fec602dd1d29d132", size = 9185333, upload-time = "2026-02-17T22:19:53.266Z" },
- { url = "https://files.pythonhosted.org/packages/66/fc/848bb6710bc6061cb0c5badd65b92ff75c81302e0e31e496d00029fe4953/pandas-3.0.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:58eeb1b2e0fb322befcf2bbc9ba0af41e616abadb3d3414a6bc7167f6cbfce32", size = 10772664, upload-time = "2026-02-17T22:19:55.806Z" },
- { url = "https://files.pythonhosted.org/packages/69/5c/866a9bbd0f79263b4b0db6ec1a341be13a1473323f05c122388e0f15b21d/pandas-3.0.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:cd9af1276b5ca9e298bd79a26bda32fa9cc87ed095b2a9a60978d2ca058eaf87", size = 10421286, upload-time = "2026-02-17T22:19:58.091Z" },
- { url = "https://files.pythonhosted.org/packages/51/a4/2058fb84fb1cfbfb2d4a6d485e1940bb4ad5716e539d779852494479c580/pandas-3.0.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f87a04984d6b63788327cd9f79dda62b7f9043909d2440ceccf709249ca988", size = 10342050, upload-time = "2026-02-17T22:20:01.376Z" },
- { url = "https://files.pythonhosted.org/packages/22/1b/674e89996cc4be74db3c4eb09240c4bb549865c9c3f5d9b086ff8fcfbf00/pandas-3.0.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85fe4c4df62e1e20f9db6ebfb88c844b092c22cd5324bdcf94bfa2fc1b391221", size = 10740055, upload-time = "2026-02-17T22:20:04.328Z" },
- { url = "https://files.pythonhosted.org/packages/d0/f8/e954b750764298c22fa4614376531fe63c521ef517e7059a51f062b87dca/pandas-3.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:331ca75a2f8672c365ae25c0b29e46f5ac0c6551fdace8eec4cd65e4fac271ff", size = 11357632, upload-time = "2026-02-17T22:20:06.647Z" },
- { url = "https://files.pythonhosted.org/packages/6d/02/c6e04b694ffd68568297abd03588b6d30295265176a5c01b7459d3bc35a3/pandas-3.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:15860b1fdb1973fffade772fdb931ccf9b2f400a3f5665aef94a00445d7d8dd5", size = 11810974, upload-time = "2026-02-17T22:20:08.946Z" },
- { url = "https://files.pythonhosted.org/packages/89/41/d7dfb63d2407f12055215070c42fc6ac41b66e90a2946cdc5e759058398b/pandas-3.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:44f1364411d5670efa692b146c748f4ed013df91ee91e9bec5677fb1fd58b937", size = 10884622, upload-time = "2026-02-17T22:20:11.711Z" },
- { url = "https://files.pythonhosted.org/packages/68/b0/34937815889fa982613775e4b97fddd13250f11012d769949c5465af2150/pandas-3.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:108dd1790337a494aa80e38def654ca3f0968cf4f362c85f44c15e471667102d", size = 9452085, upload-time = "2026-02-17T22:20:14.331Z" },
-]
-
-[[package]]
-name = "pillow"
-version = "11.3.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4c/5d/45a3553a253ac8763f3561371432a90bdbe6000fbdcf1397ffe502aa206c/pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860", size = 5316554, upload-time = "2025-07-01T09:13:39.342Z" },
- { url = "https://files.pythonhosted.org/packages/7c/c8/67c12ab069ef586a25a4a79ced553586748fad100c77c0ce59bb4983ac98/pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad", size = 4686548, upload-time = "2025-07-01T09:13:41.835Z" },
- { url = "https://files.pythonhosted.org/packages/2f/bd/6741ebd56263390b382ae4c5de02979af7f8bd9807346d068700dd6d5cf9/pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0", size = 5859742, upload-time = "2025-07-03T13:09:47.439Z" },
- { url = "https://files.pythonhosted.org/packages/ca/0b/c412a9e27e1e6a829e6ab6c2dca52dd563efbedf4c9c6aa453d9a9b77359/pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b", size = 7633087, upload-time = "2025-07-03T13:09:51.796Z" },
- { url = "https://files.pythonhosted.org/packages/59/9d/9b7076aaf30f5dd17e5e5589b2d2f5a5d7e30ff67a171eb686e4eecc2adf/pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50", size = 5963350, upload-time = "2025-07-01T09:13:43.865Z" },
- { url = "https://files.pythonhosted.org/packages/f0/16/1a6bf01fb622fb9cf5c91683823f073f053005c849b1f52ed613afcf8dae/pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae", size = 6631840, upload-time = "2025-07-01T09:13:46.161Z" },
- { url = "https://files.pythonhosted.org/packages/7b/e6/6ff7077077eb47fde78739e7d570bdcd7c10495666b6afcd23ab56b19a43/pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9", size = 6074005, upload-time = "2025-07-01T09:13:47.829Z" },
- { url = "https://files.pythonhosted.org/packages/c3/3a/b13f36832ea6d279a697231658199e0a03cd87ef12048016bdcc84131601/pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e", size = 6708372, upload-time = "2025-07-01T09:13:52.145Z" },
- { url = "https://files.pythonhosted.org/packages/6c/e4/61b2e1a7528740efbc70b3d581f33937e38e98ef3d50b05007267a55bcb2/pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6", size = 6277090, upload-time = "2025-07-01T09:13:53.915Z" },
- { url = "https://files.pythonhosted.org/packages/a9/d3/60c781c83a785d6afbd6a326ed4d759d141de43aa7365725cbcd65ce5e54/pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f", size = 6985988, upload-time = "2025-07-01T09:13:55.699Z" },
- { url = "https://files.pythonhosted.org/packages/9f/28/4f4a0203165eefb3763939c6789ba31013a2e90adffb456610f30f613850/pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f", size = 2422899, upload-time = "2025-07-01T09:13:57.497Z" },
- { url = "https://files.pythonhosted.org/packages/db/26/77f8ed17ca4ffd60e1dcd220a6ec6d71210ba398cfa33a13a1cd614c5613/pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722", size = 5316531, upload-time = "2025-07-01T09:13:59.203Z" },
- { url = "https://files.pythonhosted.org/packages/cb/39/ee475903197ce709322a17a866892efb560f57900d9af2e55f86db51b0a5/pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288", size = 4686560, upload-time = "2025-07-01T09:14:01.101Z" },
- { url = "https://files.pythonhosted.org/packages/d5/90/442068a160fd179938ba55ec8c97050a612426fae5ec0a764e345839f76d/pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d", size = 5870978, upload-time = "2025-07-03T13:09:55.638Z" },
- { url = "https://files.pythonhosted.org/packages/13/92/dcdd147ab02daf405387f0218dcf792dc6dd5b14d2573d40b4caeef01059/pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494", size = 7641168, upload-time = "2025-07-03T13:10:00.37Z" },
- { url = "https://files.pythonhosted.org/packages/6e/db/839d6ba7fd38b51af641aa904e2960e7a5644d60ec754c046b7d2aee00e5/pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58", size = 5973053, upload-time = "2025-07-01T09:14:04.491Z" },
- { url = "https://files.pythonhosted.org/packages/f2/2f/d7675ecae6c43e9f12aa8d58b6012683b20b6edfbdac7abcb4e6af7a3784/pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f", size = 6640273, upload-time = "2025-07-01T09:14:06.235Z" },
- { url = "https://files.pythonhosted.org/packages/45/ad/931694675ede172e15b2ff03c8144a0ddaea1d87adb72bb07655eaffb654/pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e", size = 6082043, upload-time = "2025-07-01T09:14:07.978Z" },
- { url = "https://files.pythonhosted.org/packages/3a/04/ba8f2b11fc80d2dd462d7abec16351b45ec99cbbaea4387648a44190351a/pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94", size = 6715516, upload-time = "2025-07-01T09:14:10.233Z" },
- { url = "https://files.pythonhosted.org/packages/48/59/8cd06d7f3944cc7d892e8533c56b0acb68399f640786313275faec1e3b6f/pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0", size = 6274768, upload-time = "2025-07-01T09:14:11.921Z" },
- { url = "https://files.pythonhosted.org/packages/f1/cc/29c0f5d64ab8eae20f3232da8f8571660aa0ab4b8f1331da5c2f5f9a938e/pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac", size = 6986055, upload-time = "2025-07-01T09:14:13.623Z" },
- { url = "https://files.pythonhosted.org/packages/c6/df/90bd886fabd544c25addd63e5ca6932c86f2b701d5da6c7839387a076b4a/pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd", size = 2423079, upload-time = "2025-07-01T09:14:15.268Z" },
- { url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" },
- { url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" },
- { url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" },
- { url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" },
- { url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" },
- { url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" },
- { url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" },
- { url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" },
- { url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" },
- { url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" },
- { url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" },
- { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" },
- { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" },
- { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" },
- { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" },
- { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" },
- { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" },
- { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" },
- { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" },
- { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" },
- { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" },
- { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" },
- { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" },
- { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" },
- { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" },
- { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" },
- { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" },
- { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" },
- { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" },
- { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" },
- { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" },
- { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" },
- { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" },
- { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" },
- { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" },
- { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" },
- { url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" },
- { url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" },
- { url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" },
- { url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" },
- { url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" },
- { url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" },
- { url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" },
- { url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" },
- { url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" },
- { url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" },
- { url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" },
- { url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" },
- { url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" },
- { url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" },
- { url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" },
- { url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" },
- { url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" },
- { url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" },
- { url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" },
- { url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" },
- { url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" },
- { url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" },
- { url = "https://files.pythonhosted.org/packages/9e/8e/9c089f01677d1264ab8648352dcb7773f37da6ad002542760c80107da816/pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f", size = 5316478, upload-time = "2025-07-01T09:15:52.209Z" },
- { url = "https://files.pythonhosted.org/packages/b5/a9/5749930caf674695867eb56a581e78eb5f524b7583ff10b01b6e5048acb3/pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081", size = 4686522, upload-time = "2025-07-01T09:15:54.162Z" },
- { url = "https://files.pythonhosted.org/packages/43/46/0b85b763eb292b691030795f9f6bb6fcaf8948c39413c81696a01c3577f7/pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4", size = 5853376, upload-time = "2025-07-03T13:11:01.066Z" },
- { url = "https://files.pythonhosted.org/packages/5e/c6/1a230ec0067243cbd60bc2dad5dc3ab46a8a41e21c15f5c9b52b26873069/pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc", size = 7626020, upload-time = "2025-07-03T13:11:06.479Z" },
- { url = "https://files.pythonhosted.org/packages/63/dd/f296c27ffba447bfad76c6a0c44c1ea97a90cb9472b9304c94a732e8dbfb/pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06", size = 5956732, upload-time = "2025-07-01T09:15:56.111Z" },
- { url = "https://files.pythonhosted.org/packages/a5/a0/98a3630f0b57f77bae67716562513d3032ae70414fcaf02750279c389a9e/pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a", size = 6624404, upload-time = "2025-07-01T09:15:58.245Z" },
- { url = "https://files.pythonhosted.org/packages/de/e6/83dfba5646a290edd9a21964da07674409e410579c341fc5b8f7abd81620/pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978", size = 6067760, upload-time = "2025-07-01T09:16:00.003Z" },
- { url = "https://files.pythonhosted.org/packages/bc/41/15ab268fe6ee9a2bc7391e2bbb20a98d3974304ab1a406a992dcb297a370/pillow-11.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:97afb3a00b65cc0804d1c7abddbf090a81eaac02768af58cbdcaaa0a931e0b6d", size = 6700534, upload-time = "2025-07-01T09:16:02.29Z" },
- { url = "https://files.pythonhosted.org/packages/64/79/6d4f638b288300bed727ff29f2a3cb63db054b33518a95f27724915e3fbc/pillow-11.3.0-cp39-cp39-win32.whl", hash = "sha256:ea944117a7974ae78059fcc1800e5d3295172bb97035c0c1d9345fca1419da71", size = 6277091, upload-time = "2025-07-01T09:16:04.4Z" },
- { url = "https://files.pythonhosted.org/packages/46/05/4106422f45a05716fd34ed21763f8ec182e8ea00af6e9cb05b93a247361a/pillow-11.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:e5c5858ad8ec655450a7c7df532e9842cf8df7cc349df7225c60d5d348c8aada", size = 6986091, upload-time = "2025-07-01T09:16:06.342Z" },
- { url = "https://files.pythonhosted.org/packages/63/c6/287fd55c2c12761d0591549d48885187579b7c257bef0c6660755b0b59ae/pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb", size = 2422632, upload-time = "2025-07-01T09:16:08.142Z" },
- { url = "https://files.pythonhosted.org/packages/6f/8b/209bd6b62ce8367f47e68a218bffac88888fdf2c9fcf1ecadc6c3ec1ebc7/pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967", size = 5270556, upload-time = "2025-07-01T09:16:09.961Z" },
- { url = "https://files.pythonhosted.org/packages/2e/e6/231a0b76070c2cfd9e260a7a5b504fb72da0a95279410fa7afd99d9751d6/pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe", size = 4654625, upload-time = "2025-07-01T09:16:11.913Z" },
- { url = "https://files.pythonhosted.org/packages/13/f4/10cf94fda33cb12765f2397fc285fa6d8eb9c29de7f3185165b702fc7386/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c", size = 4874207, upload-time = "2025-07-03T13:11:10.201Z" },
- { url = "https://files.pythonhosted.org/packages/72/c9/583821097dc691880c92892e8e2d41fe0a5a3d6021f4963371d2f6d57250/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25", size = 6583939, upload-time = "2025-07-03T13:11:15.68Z" },
- { url = "https://files.pythonhosted.org/packages/3b/8e/5c9d410f9217b12320efc7c413e72693f48468979a013ad17fd690397b9a/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27", size = 4957166, upload-time = "2025-07-01T09:16:13.74Z" },
- { url = "https://files.pythonhosted.org/packages/62/bb/78347dbe13219991877ffb3a91bf09da8317fbfcd4b5f9140aeae020ad71/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a", size = 5581482, upload-time = "2025-07-01T09:16:16.107Z" },
- { url = "https://files.pythonhosted.org/packages/d9/28/1000353d5e61498aaeaaf7f1e4b49ddb05f2c6575f9d4f9f914a3538b6e1/pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f", size = 6984596, upload-time = "2025-07-01T09:16:18.07Z" },
- { url = "https://files.pythonhosted.org/packages/9e/e3/6fa84033758276fb31da12e5fb66ad747ae83b93c67af17f8c6ff4cc8f34/pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6", size = 5270566, upload-time = "2025-07-01T09:16:19.801Z" },
- { url = "https://files.pythonhosted.org/packages/5b/ee/e8d2e1ab4892970b561e1ba96cbd59c0d28cf66737fc44abb2aec3795a4e/pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438", size = 4654618, upload-time = "2025-07-01T09:16:21.818Z" },
- { url = "https://files.pythonhosted.org/packages/f2/6d/17f80f4e1f0761f02160fc433abd4109fa1548dcfdca46cfdadaf9efa565/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3", size = 4874248, upload-time = "2025-07-03T13:11:20.738Z" },
- { url = "https://files.pythonhosted.org/packages/de/5f/c22340acd61cef960130585bbe2120e2fd8434c214802f07e8c03596b17e/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c", size = 6583963, upload-time = "2025-07-03T13:11:26.283Z" },
- { url = "https://files.pythonhosted.org/packages/31/5e/03966aedfbfcbb4d5f8aa042452d3361f325b963ebbadddac05b122e47dd/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361", size = 4957170, upload-time = "2025-07-01T09:16:23.762Z" },
- { url = "https://files.pythonhosted.org/packages/cc/2d/e082982aacc927fc2cab48e1e731bdb1643a1406acace8bed0900a61464e/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7", size = 5581505, upload-time = "2025-07-01T09:16:25.593Z" },
- { url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598, upload-time = "2025-07-01T09:16:27.732Z" },
-]
-
-[[package]]
-name = "pillow"
-version = "12.1.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/1d/30/5bd3d794762481f8c8ae9c80e7b76ecea73b916959eb587521358ef0b2f9/pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0", size = 5304099, upload-time = "2026-02-11T04:20:06.13Z" },
- { url = "https://files.pythonhosted.org/packages/bd/c1/aab9e8f3eeb4490180e357955e15c2ef74b31f64790ff356c06fb6cf6d84/pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713", size = 4657880, upload-time = "2026-02-11T04:20:09.291Z" },
- { url = "https://files.pythonhosted.org/packages/f1/0a/9879e30d56815ad529d3985aeff5af4964202425c27261a6ada10f7cbf53/pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b", size = 6222587, upload-time = "2026-02-11T04:20:10.82Z" },
- { url = "https://files.pythonhosted.org/packages/5a/5f/a1b72ff7139e4f89014e8d451442c74a774d5c43cd938fb0a9f878576b37/pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b", size = 8027678, upload-time = "2026-02-11T04:20:12.455Z" },
- { url = "https://files.pythonhosted.org/packages/e2/c2/c7cb187dac79a3d22c3ebeae727abee01e077c8c7d930791dc592f335153/pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4", size = 6335777, upload-time = "2026-02-11T04:20:14.441Z" },
- { url = "https://files.pythonhosted.org/packages/0c/7b/f9b09a7804ec7336effb96c26d37c29d27225783dc1501b7d62dcef6ae25/pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4", size = 7027140, upload-time = "2026-02-11T04:20:16.387Z" },
- { url = "https://files.pythonhosted.org/packages/98/b2/2fa3c391550bd421b10849d1a2144c44abcd966daadd2f7c12e19ea988c4/pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e", size = 6449855, upload-time = "2026-02-11T04:20:18.554Z" },
- { url = "https://files.pythonhosted.org/packages/96/ff/9caf4b5b950c669263c39e96c78c0d74a342c71c4f43fd031bb5cb7ceac9/pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff", size = 7151329, upload-time = "2026-02-11T04:20:20.646Z" },
- { url = "https://files.pythonhosted.org/packages/7b/f8/4b24841f582704da675ca535935bccb32b00a6da1226820845fac4a71136/pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40", size = 6325574, upload-time = "2026-02-11T04:20:22.43Z" },
- { url = "https://files.pythonhosted.org/packages/f8/f9/9f6b01c0881d7036063aa6612ef04c0e2cad96be21325a1e92d0203f8e91/pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23", size = 7032347, upload-time = "2026-02-11T04:20:23.932Z" },
- { url = "https://files.pythonhosted.org/packages/79/13/c7922edded3dcdaf10c59297540b72785620abc0538872c819915746757d/pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9", size = 2453457, upload-time = "2026-02-11T04:20:25.392Z" },
- { url = "https://files.pythonhosted.org/packages/2b/46/5da1ec4a5171ee7bf1a0efa064aba70ba3d6e0788ce3f5acd1375d23c8c0/pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32", size = 5304084, upload-time = "2026-02-11T04:20:27.501Z" },
- { url = "https://files.pythonhosted.org/packages/78/93/a29e9bc02d1cf557a834da780ceccd54e02421627200696fcf805ebdc3fb/pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38", size = 4657866, upload-time = "2026-02-11T04:20:29.827Z" },
- { url = "https://files.pythonhosted.org/packages/13/84/583a4558d492a179d31e4aae32eadce94b9acf49c0337c4ce0b70e0a01f2/pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5", size = 6232148, upload-time = "2026-02-11T04:20:31.329Z" },
- { url = "https://files.pythonhosted.org/packages/d5/e2/53c43334bbbb2d3b938978532fbda8e62bb6e0b23a26ce8592f36bcc4987/pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090", size = 8038007, upload-time = "2026-02-11T04:20:34.225Z" },
- { url = "https://files.pythonhosted.org/packages/b8/a6/3d0e79c8a9d58150dd98e199d7c1c56861027f3829a3a60b3c2784190180/pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af", size = 6345418, upload-time = "2026-02-11T04:20:35.858Z" },
- { url = "https://files.pythonhosted.org/packages/a2/c8/46dfeac5825e600579157eea177be43e2f7ff4a99da9d0d0a49533509ac5/pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b", size = 7034590, upload-time = "2026-02-11T04:20:37.91Z" },
- { url = "https://files.pythonhosted.org/packages/af/bf/e6f65d3db8a8bbfeaf9e13cc0417813f6319863a73de934f14b2229ada18/pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5", size = 6458655, upload-time = "2026-02-11T04:20:39.496Z" },
- { url = "https://files.pythonhosted.org/packages/f9/c2/66091f3f34a25894ca129362e510b956ef26f8fb67a0e6417bc5744e56f1/pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d", size = 7159286, upload-time = "2026-02-11T04:20:41.139Z" },
- { url = "https://files.pythonhosted.org/packages/7b/5a/24bc8eb526a22f957d0cec6243146744966d40857e3d8deb68f7902ca6c1/pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c", size = 6328663, upload-time = "2026-02-11T04:20:43.184Z" },
- { url = "https://files.pythonhosted.org/packages/31/03/bef822e4f2d8f9d7448c133d0a18185d3cce3e70472774fffefe8b0ed562/pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563", size = 7031448, upload-time = "2026-02-11T04:20:44.696Z" },
- { url = "https://files.pythonhosted.org/packages/49/70/f76296f53610bd17b2e7d31728b8b7825e3ac3b5b3688b51f52eab7c0818/pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80", size = 2453651, upload-time = "2026-02-11T04:20:46.243Z" },
- { url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" },
- { url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" },
- { url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" },
- { url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" },
- { url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" },
- { url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" },
- { url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" },
- { url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" },
- { url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" },
- { url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" },
- { url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" },
- { url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" },
- { url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" },
- { url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" },
- { url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" },
- { url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" },
- { url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" },
- { url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" },
- { url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" },
- { url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" },
- { url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" },
- { url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" },
- { url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" },
- { url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" },
- { url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" },
- { url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" },
- { url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" },
- { url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" },
- { url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" },
- { url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" },
- { url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" },
- { url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" },
- { url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" },
- { url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" },
- { url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" },
- { url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" },
- { url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" },
- { url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" },
- { url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" },
- { url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" },
- { url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" },
- { url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" },
- { url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" },
- { url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" },
- { url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" },
- { url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" },
- { url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" },
- { url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" },
- { url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" },
- { url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" },
- { url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" },
- { url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" },
- { url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" },
- { url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" },
- { url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" },
- { url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" },
- { url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" },
- { url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" },
- { url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" },
- { url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" },
- { url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" },
- { url = "https://files.pythonhosted.org/packages/56/11/5d43209aa4cb58e0cc80127956ff1796a68b928e6324bbf06ef4db34367b/pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f", size = 5228606, upload-time = "2026-02-11T04:22:52.106Z" },
- { url = "https://files.pythonhosted.org/packages/5f/d5/3b005b4e4fda6698b371fa6c21b097d4707585d7db99e98d9b0b87ac612a/pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9", size = 4622321, upload-time = "2026-02-11T04:22:53.827Z" },
- { url = "https://files.pythonhosted.org/packages/df/36/ed3ea2d594356fd8037e5a01f6156c74bc8d92dbb0fa60746cc96cabb6e8/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e", size = 5247579, upload-time = "2026-02-11T04:22:56.094Z" },
- { url = "https://files.pythonhosted.org/packages/54/9a/9cc3e029683cf6d20ae5085da0dafc63148e3252c2f13328e553aaa13cfb/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9", size = 6989094, upload-time = "2026-02-11T04:22:58.288Z" },
- { url = "https://files.pythonhosted.org/packages/00/98/fc53ab36da80b88df0967896b6c4b4cd948a0dc5aa40a754266aa3ae48b3/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3", size = 5313850, upload-time = "2026-02-11T04:23:00.554Z" },
- { url = "https://files.pythonhosted.org/packages/30/02/00fa585abfd9fe9d73e5f6e554dc36cc2b842898cbfc46d70353dae227f8/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735", size = 5963343, upload-time = "2026-02-11T04:23:02.934Z" },
- { url = "https://files.pythonhosted.org/packages/f2/26/c56ce33ca856e358d27fda9676c055395abddb82c35ac0f593877ed4562e/pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e", size = 7029880, upload-time = "2026-02-11T04:23:04.783Z" },
-]
-
-[[package]]
-name = "platformdirs"
-version = "4.4.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" },
-]
-
-[[package]]
-name = "platformdirs"
-version = "4.9.4"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/19/56/8d4c30c8a1d07013911a8fdbd8f89440ef9f08d07a1b50ab8ca8be5a20f9/platformdirs-4.9.4.tar.gz", hash = "sha256:1ec356301b7dc906d83f371c8f487070e99d3ccf9e501686456394622a01a934", size = 28737, upload-time = "2026-03-05T18:34:13.271Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/63/d7/97f7e3a6abb67d8080dd406fd4df842c2be0efaf712d1c899c32a075027c/platformdirs-4.9.4-py3-none-any.whl", hash = "sha256:68a9a4619a666ea6439f2ff250c12a853cd1cbd5158d258bd824a7df6be2f868", size = 21216, upload-time = "2026-03-05T18:34:12.172Z" },
-]
-
-[[package]]
-name = "pluggy"
-version = "1.6.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
-]
-
-[[package]]
-name = "pooch"
-version = "1.9.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "packaging" },
- { name = "platformdirs", version = "4.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "platformdirs", version = "4.9.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "requests" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/83/43/85ef45e8b36c6a48546af7b266592dc32d7f67837a6514d111bced6d7d75/pooch-1.9.0.tar.gz", hash = "sha256:de46729579b9857ffd3e741987a2f6d5e0e03219892c167c6578c0091fb511ed", size = 61788, upload-time = "2026-01-30T19:15:09.649Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2a/2d/d4bf65e47cea8ff2c794a600c4fd1273a7902f268757c531e0ee9f18aa58/pooch-1.9.0-py3-none-any.whl", hash = "sha256:f265597baa9f760d25ceb29d0beb8186c243d6607b0f60b83ecf14078dbc703b", size = 67175, upload-time = "2026-01-30T19:15:08.36Z" },
-]
-
-[[package]]
-name = "pycparser"
-version = "2.23"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" },
-]
-
-[[package]]
-name = "pycparser"
-version = "3.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-sdist = { url = "https://files.pythonhosted.org/packages/1b/7d/92392ff7815c21062bea51aa7b87d45576f649f16458d78b7cf94b9ab2e6/pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29", size = 103492, upload-time = "2026-01-21T14:26:51.89Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0c/c3/44f3fbbfa403ea2a7c779186dc20772604442dde72947e7d01069cbe98e3/pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992", size = 48172, upload-time = "2026-01-21T14:26:50.693Z" },
-]
-
-[[package]]
-name = "pygments"
-version = "2.19.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
-]
-
-[[package]]
-name = "pyparsing"
-version = "3.3.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f3/91/9c6ee907786a473bf81c5f53cf703ba0957b23ab84c264080fb5a450416f/pyparsing-3.3.2.tar.gz", hash = "sha256:c777f4d763f140633dcb6d8a3eda953bf7a214dc4eff598413c070bcdc117cbc", size = 6851574, upload-time = "2026-01-21T03:57:59.36Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/10/bd/c038d7cc38edc1aa5bf91ab8068b63d4308c66c4c8bb3cbba7dfbc049f9c/pyparsing-3.3.2-py3-none-any.whl", hash = "sha256:850ba148bd908d7e2411587e247a1e4f0327839c40e2e5e6d05a007ecc69911d", size = 122781, upload-time = "2026-01-21T03:57:55.912Z" },
-]
-
-[[package]]
-name = "pyright"
-version = "1.1.408"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "nodeenv" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/74/b2/5db700e52554b8f025faa9c3c624c59f1f6c8841ba81ab97641b54322f16/pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684", size = 4400578, upload-time = "2026-01-08T08:07:38.795Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0c/82/a2c93e32800940d9573fb28c346772a14778b84ba7524e691b324620ab89/pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1", size = 6399144, upload-time = "2026-01-08T08:07:37.082Z" },
-]
-
-[[package]]
-name = "pytest"
-version = "8.4.2"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" },
- { name = "exceptiongroup", marker = "python_full_version < '3.10'" },
- { name = "iniconfig", version = "2.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
- { name = "packaging", marker = "python_full_version < '3.10'" },
- { name = "pluggy", marker = "python_full_version < '3.10'" },
- { name = "pygments", marker = "python_full_version < '3.10'" },
- { name = "tomli", marker = "python_full_version < '3.10'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" },
-]
-
-[[package]]
-name = "pytest"
-version = "9.0.2"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" },
- { name = "exceptiongroup", marker = "python_full_version == '3.10.*'" },
- { name = "iniconfig", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
- { name = "packaging", marker = "python_full_version >= '3.10'" },
- { name = "pluggy", marker = "python_full_version >= '3.10'" },
- { name = "pygments", marker = "python_full_version >= '3.10'" },
- { name = "tomli", marker = "python_full_version == '3.10.*'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
-]
-
-[[package]]
-name = "python-dateutil"
-version = "2.9.0.post0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "six" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
-]
-
-[[package]]
-name = "pytz"
-version = "2026.1.post1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/56/db/b8721d71d945e6a8ac63c0fc900b2067181dbb50805958d4d4661cf7d277/pytz-2026.1.post1.tar.gz", hash = "sha256:3378dde6a0c3d26719182142c56e60c7f9af7e968076f31aae569d72a0358ee1", size = 321088, upload-time = "2026-03-03T07:47:50.683Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/10/99/781fe0c827be2742bcc775efefccb3b048a3a9c6ce9aec0cbf4a101677e5/pytz-2026.1.post1-py2.py3-none-any.whl", hash = "sha256:f2fd16142fda348286a75e1a524be810bb05d444e5a081f37f7affc635035f7a", size = 510489, upload-time = "2026-03-03T07:47:49.167Z" },
-]
-
-[[package]]
-name = "requests"
-version = "2.32.5"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "certifi" },
- { name = "charset-normalizer" },
- { name = "idna" },
- { name = "urllib3" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
-]
-
-[[package]]
-name = "ruff"
-version = "0.15.7"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a1/22/9e4f66ee588588dc6c9af6a994e12d26e19efbe874d1a909d09a6dac7a59/ruff-0.15.7.tar.gz", hash = "sha256:04f1ae61fc20fe0b148617c324d9d009b5f63412c0b16474f3d5f1a1a665f7ac", size = 4601277, upload-time = "2026-03-19T16:26:22.605Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/41/2f/0b08ced94412af091807b6119ca03755d651d3d93a242682bf020189db94/ruff-0.15.7-py3-none-linux_armv6l.whl", hash = "sha256:a81cc5b6910fb7dfc7c32d20652e50fa05963f6e13ead3c5915c41ac5d16668e", size = 10489037, upload-time = "2026-03-19T16:26:32.47Z" },
- { url = "https://files.pythonhosted.org/packages/91/4a/82e0fa632e5c8b1eba5ee86ecd929e8ff327bbdbfb3c6ac5d81631bef605/ruff-0.15.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:722d165bd52403f3bdabc0ce9e41fc47070ac56d7a91b4e0d097b516a53a3477", size = 10955433, upload-time = "2026-03-19T16:27:00.205Z" },
- { url = "https://files.pythonhosted.org/packages/ab/10/12586735d0ff42526ad78c049bf51d7428618c8b5c467e72508c694119df/ruff-0.15.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7fbc2448094262552146cbe1b9643a92f66559d3761f1ad0656d4991491af49e", size = 10269302, upload-time = "2026-03-19T16:26:26.183Z" },
- { url = "https://files.pythonhosted.org/packages/eb/5d/32b5c44ccf149a26623671df49cbfbd0a0ae511ff3df9d9d2426966a8d57/ruff-0.15.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b39329b60eba44156d138275323cc726bbfbddcec3063da57caa8a8b1d50adf", size = 10607625, upload-time = "2026-03-19T16:27:03.263Z" },
- { url = "https://files.pythonhosted.org/packages/5d/f1/f0001cabe86173aaacb6eb9bb734aa0605f9a6aa6fa7d43cb49cbc4af9c9/ruff-0.15.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:87768c151808505f2bfc93ae44e5f9e7c8518943e5074f76ac21558ef5627c85", size = 10324743, upload-time = "2026-03-19T16:27:09.791Z" },
- { url = "https://files.pythonhosted.org/packages/7a/87/b8a8f3d56b8d848008559e7c9d8bf367934d5367f6d932ba779456e2f73b/ruff-0.15.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb0511670002c6c529ec66c0e30641c976c8963de26a113f3a30456b702468b0", size = 11138536, upload-time = "2026-03-19T16:27:06.101Z" },
- { url = "https://files.pythonhosted.org/packages/e4/f2/4fd0d05aab0c5934b2e1464784f85ba2eab9d54bffc53fb5430d1ed8b829/ruff-0.15.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0d19644f801849229db8345180a71bee5407b429dd217f853ec515e968a6912", size = 11994292, upload-time = "2026-03-19T16:26:48.718Z" },
- { url = "https://files.pythonhosted.org/packages/64/22/fc4483871e767e5e95d1622ad83dad5ebb830f762ed0420fde7dfa9d9b08/ruff-0.15.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4806d8e09ef5e84eb19ba833d0442f7e300b23fe3f0981cae159a248a10f0036", size = 11398981, upload-time = "2026-03-19T16:26:54.513Z" },
- { url = "https://files.pythonhosted.org/packages/b0/99/66f0343176d5eab02c3f7fcd2de7a8e0dd7a41f0d982bee56cd1c24db62b/ruff-0.15.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dce0896488562f09a27b9c91b1f58a097457143931f3c4d519690dea54e624c5", size = 11242422, upload-time = "2026-03-19T16:26:29.277Z" },
- { url = "https://files.pythonhosted.org/packages/5d/3a/a7060f145bfdcce4c987ea27788b30c60e2c81d6e9a65157ca8afe646328/ruff-0.15.7-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:1852ce241d2bc89e5dc823e03cff4ce73d816b5c6cdadd27dbfe7b03217d2a12", size = 11232158, upload-time = "2026-03-19T16:26:42.321Z" },
- { url = "https://files.pythonhosted.org/packages/a7/53/90fbb9e08b29c048c403558d3cdd0adf2668b02ce9d50602452e187cd4af/ruff-0.15.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5f3e4b221fb4bd293f79912fc5e93a9063ebd6d0dcbd528f91b89172a9b8436c", size = 10577861, upload-time = "2026-03-19T16:26:57.459Z" },
- { url = "https://files.pythonhosted.org/packages/2f/aa/5f486226538fe4d0f0439e2da1716e1acf895e2a232b26f2459c55f8ddad/ruff-0.15.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b15e48602c9c1d9bdc504b472e90b90c97dc7d46c7028011ae67f3861ceba7b4", size = 10327310, upload-time = "2026-03-19T16:26:35.909Z" },
- { url = "https://files.pythonhosted.org/packages/99/9e/271afdffb81fe7bfc8c43ba079e9d96238f674380099457a74ccb3863857/ruff-0.15.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1b4705e0e85cedc74b0a23cf6a179dbb3df184cb227761979cc76c0440b5ab0d", size = 10840752, upload-time = "2026-03-19T16:26:45.723Z" },
- { url = "https://files.pythonhosted.org/packages/bf/29/a4ae78394f76c7759953c47884eb44de271b03a66634148d9f7d11e721bd/ruff-0.15.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:112c1fa316a558bb34319282c1200a8bf0495f1b735aeb78bfcb2991e6087580", size = 11336961, upload-time = "2026-03-19T16:26:39.076Z" },
- { url = "https://files.pythonhosted.org/packages/26/6b/8786ba5736562220d588a2f6653e6c17e90c59ced34a2d7b512ef8956103/ruff-0.15.7-py3-none-win32.whl", hash = "sha256:6d39e2d3505b082323352f733599f28169d12e891f7dd407f2d4f54b4c2886de", size = 10582538, upload-time = "2026-03-19T16:26:15.992Z" },
- { url = "https://files.pythonhosted.org/packages/2b/e9/346d4d3fffc6871125e877dae8d9a1966b254fbd92a50f8561078b88b099/ruff-0.15.7-py3-none-win_amd64.whl", hash = "sha256:4d53d712ddebcd7dace1bc395367aec12c057aacfe9adbb6d832302575f4d3a1", size = 11755839, upload-time = "2026-03-19T16:26:19.897Z" },
- { url = "https://files.pythonhosted.org/packages/8f/e8/726643a3ea68c727da31570bde48c7a10f1aa60eddd628d94078fec586ff/ruff-0.15.7-py3-none-win_arm64.whl", hash = "sha256:18e8d73f1c3fdf27931497972250340f92e8c861722161a9caeb89a58ead6ed2", size = 11023304, upload-time = "2026-03-19T16:26:51.669Z" },
-]
-
-[[package]]
-name = "scikit-learn"
-version = "1.6.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "joblib", marker = "python_full_version < '3.11'" },
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "scipy", version = "1.13.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "threadpoolctl", marker = "python_full_version < '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/9e/a5/4ae3b3a0755f7b35a280ac90b28817d1f380318973cff14075ab41ef50d9/scikit_learn-1.6.1.tar.gz", hash = "sha256:b4fc2525eca2c69a59260f583c56a7557c6ccdf8deafdba6e060f94c1c59738e", size = 7068312, upload-time = "2025-01-10T08:07:55.348Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2e/3a/f4597eb41049110b21ebcbb0bcb43e4035017545daa5eedcfeb45c08b9c5/scikit_learn-1.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d056391530ccd1e501056160e3c9673b4da4805eb67eb2bdf4e983e1f9c9204e", size = 12067702, upload-time = "2025-01-10T08:05:56.515Z" },
- { url = "https://files.pythonhosted.org/packages/37/19/0423e5e1fd1c6ec5be2352ba05a537a473c1677f8188b9306097d684b327/scikit_learn-1.6.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:0c8d036eb937dbb568c6242fa598d551d88fb4399c0344d95c001980ec1c7d36", size = 11112765, upload-time = "2025-01-10T08:06:00.272Z" },
- { url = "https://files.pythonhosted.org/packages/70/95/d5cb2297a835b0f5fc9a77042b0a2d029866379091ab8b3f52cc62277808/scikit_learn-1.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8634c4bd21a2a813e0a7e3900464e6d593162a29dd35d25bdf0103b3fce60ed5", size = 12643991, upload-time = "2025-01-10T08:06:04.813Z" },
- { url = "https://files.pythonhosted.org/packages/b7/91/ab3c697188f224d658969f678be86b0968ccc52774c8ab4a86a07be13c25/scikit_learn-1.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:775da975a471c4f6f467725dff0ced5c7ac7bda5e9316b260225b48475279a1b", size = 13497182, upload-time = "2025-01-10T08:06:08.42Z" },
- { url = "https://files.pythonhosted.org/packages/17/04/d5d556b6c88886c092cc989433b2bab62488e0f0dafe616a1d5c9cb0efb1/scikit_learn-1.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:8a600c31592bd7dab31e1c61b9bbd6dea1b3433e67d264d17ce1017dbdce8002", size = 11125517, upload-time = "2025-01-10T08:06:12.783Z" },
- { url = "https://files.pythonhosted.org/packages/6c/2a/e291c29670795406a824567d1dfc91db7b699799a002fdaa452bceea8f6e/scikit_learn-1.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:72abc587c75234935e97d09aa4913a82f7b03ee0b74111dcc2881cba3c5a7b33", size = 12102620, upload-time = "2025-01-10T08:06:16.675Z" },
- { url = "https://files.pythonhosted.org/packages/25/92/ee1d7a00bb6b8c55755d4984fd82608603a3cc59959245068ce32e7fb808/scikit_learn-1.6.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:b3b00cdc8f1317b5f33191df1386c0befd16625f49d979fe77a8d44cae82410d", size = 11116234, upload-time = "2025-01-10T08:06:21.83Z" },
- { url = "https://files.pythonhosted.org/packages/30/cd/ed4399485ef364bb25f388ab438e3724e60dc218c547a407b6e90ccccaef/scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc4765af3386811c3ca21638f63b9cf5ecf66261cc4815c1db3f1e7dc7b79db2", size = 12592155, upload-time = "2025-01-10T08:06:27.309Z" },
- { url = "https://files.pythonhosted.org/packages/a8/f3/62fc9a5a659bb58a03cdd7e258956a5824bdc9b4bb3c5d932f55880be569/scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25fc636bdaf1cc2f4a124a116312d837148b5e10872147bdaf4887926b8c03d8", size = 13497069, upload-time = "2025-01-10T08:06:32.515Z" },
- { url = "https://files.pythonhosted.org/packages/a1/a6/c5b78606743a1f28eae8f11973de6613a5ee87366796583fb74c67d54939/scikit_learn-1.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:fa909b1a36e000a03c382aade0bd2063fd5680ff8b8e501660c0f59f021a6415", size = 11139809, upload-time = "2025-01-10T08:06:35.514Z" },
- { url = "https://files.pythonhosted.org/packages/0a/18/c797c9b8c10380d05616db3bfb48e2a3358c767affd0857d56c2eb501caa/scikit_learn-1.6.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:926f207c804104677af4857b2c609940b743d04c4c35ce0ddc8ff4f053cddc1b", size = 12104516, upload-time = "2025-01-10T08:06:40.009Z" },
- { url = "https://files.pythonhosted.org/packages/c4/b7/2e35f8e289ab70108f8cbb2e7a2208f0575dc704749721286519dcf35f6f/scikit_learn-1.6.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c2cae262064e6a9b77eee1c8e768fc46aa0b8338c6a8297b9b6759720ec0ff2", size = 11167837, upload-time = "2025-01-10T08:06:43.305Z" },
- { url = "https://files.pythonhosted.org/packages/a4/f6/ff7beaeb644bcad72bcfd5a03ff36d32ee4e53a8b29a639f11bcb65d06cd/scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1061b7c028a8663fb9a1a1baf9317b64a257fcb036dae5c8752b2abef31d136f", size = 12253728, upload-time = "2025-01-10T08:06:47.618Z" },
- { url = "https://files.pythonhosted.org/packages/29/7a/8bce8968883e9465de20be15542f4c7e221952441727c4dad24d534c6d99/scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e69fab4ebfc9c9b580a7a80111b43d214ab06250f8a7ef590a4edf72464dd86", size = 13147700, upload-time = "2025-01-10T08:06:50.888Z" },
- { url = "https://files.pythonhosted.org/packages/62/27/585859e72e117fe861c2079bcba35591a84f801e21bc1ab85bce6ce60305/scikit_learn-1.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:70b1d7e85b1c96383f872a519b3375f92f14731e279a7b4c6cfd650cf5dffc52", size = 11110613, upload-time = "2025-01-10T08:06:54.115Z" },
- { url = "https://files.pythonhosted.org/packages/2e/59/8eb1872ca87009bdcdb7f3cdc679ad557b992c12f4b61f9250659e592c63/scikit_learn-1.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ffa1e9e25b3d93990e74a4be2c2fc61ee5af85811562f1288d5d055880c4322", size = 12010001, upload-time = "2025-01-10T08:06:58.613Z" },
- { url = "https://files.pythonhosted.org/packages/9d/05/f2fc4effc5b32e525408524c982c468c29d22f828834f0625c5ef3d601be/scikit_learn-1.6.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:dc5cf3d68c5a20ad6d571584c0750ec641cc46aeef1c1507be51300e6003a7e1", size = 11096360, upload-time = "2025-01-10T08:07:01.556Z" },
- { url = "https://files.pythonhosted.org/packages/c8/e4/4195d52cf4f113573fb8ebc44ed5a81bd511a92c0228889125fac2f4c3d1/scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c06beb2e839ecc641366000ca84f3cf6fa9faa1777e29cf0c04be6e4d096a348", size = 12209004, upload-time = "2025-01-10T08:07:06.931Z" },
- { url = "https://files.pythonhosted.org/packages/94/be/47e16cdd1e7fcf97d95b3cb08bde1abb13e627861af427a3651fcb80b517/scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8ca8cb270fee8f1f76fa9bfd5c3507d60c6438bbee5687f81042e2bb98e5a97", size = 13171776, upload-time = "2025-01-10T08:07:11.715Z" },
- { url = "https://files.pythonhosted.org/packages/34/b0/ca92b90859070a1487827dbc672f998da95ce83edce1270fc23f96f1f61a/scikit_learn-1.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:7a1c43c8ec9fde528d664d947dc4c0789be4077a3647f232869f41d9bf50e0fb", size = 11071865, upload-time = "2025-01-10T08:07:16.088Z" },
- { url = "https://files.pythonhosted.org/packages/12/ae/993b0fb24a356e71e9a894e42b8a9eec528d4c70217353a1cd7a48bc25d4/scikit_learn-1.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a17c1dea1d56dcda2fac315712f3651a1fea86565b64b48fa1bc090249cbf236", size = 11955804, upload-time = "2025-01-10T08:07:20.385Z" },
- { url = "https://files.pythonhosted.org/packages/d6/54/32fa2ee591af44507eac86406fa6bba968d1eb22831494470d0a2e4a1eb1/scikit_learn-1.6.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a7aa5f9908f0f28f4edaa6963c0a6183f1911e63a69aa03782f0d924c830a35", size = 11100530, upload-time = "2025-01-10T08:07:23.675Z" },
- { url = "https://files.pythonhosted.org/packages/3f/58/55856da1adec655bdce77b502e94a267bf40a8c0b89f8622837f89503b5a/scikit_learn-1.6.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0650e730afb87402baa88afbf31c07b84c98272622aaba002559b614600ca691", size = 12433852, upload-time = "2025-01-10T08:07:26.817Z" },
- { url = "https://files.pythonhosted.org/packages/ff/4f/c83853af13901a574f8f13b645467285a48940f185b690936bb700a50863/scikit_learn-1.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:3f59fe08dc03ea158605170eb52b22a105f238a5d512c4470ddeca71feae8e5f", size = 11337256, upload-time = "2025-01-10T08:07:31.084Z" },
- { url = "https://files.pythonhosted.org/packages/d2/37/b305b759cc65829fe1b8853ff3e308b12cdd9d8884aa27840835560f2b42/scikit_learn-1.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6849dd3234e87f55dce1db34c89a810b489ead832aaf4d4550b7ea85628be6c1", size = 12101868, upload-time = "2025-01-10T08:07:34.189Z" },
- { url = "https://files.pythonhosted.org/packages/83/74/f64379a4ed5879d9db744fe37cfe1978c07c66684d2439c3060d19a536d8/scikit_learn-1.6.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e7be3fa5d2eb9be7d77c3734ff1d599151bb523674be9b834e8da6abe132f44e", size = 11144062, upload-time = "2025-01-10T08:07:37.67Z" },
- { url = "https://files.pythonhosted.org/packages/fd/dc/d5457e03dc9c971ce2b0d750e33148dd060fefb8b7dc71acd6054e4bb51b/scikit_learn-1.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44a17798172df1d3c1065e8fcf9019183f06c87609b49a124ebdf57ae6cb0107", size = 12693173, upload-time = "2025-01-10T08:07:42.713Z" },
- { url = "https://files.pythonhosted.org/packages/79/35/b1d2188967c3204c78fa79c9263668cf1b98060e8e58d1a730fe5b2317bb/scikit_learn-1.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b7a3b86e411e4bce21186e1c180d792f3d99223dcfa3b4f597ecc92fa1a422", size = 13518605, upload-time = "2025-01-10T08:07:46.551Z" },
- { url = "https://files.pythonhosted.org/packages/fb/d8/8d603bdd26601f4b07e2363032b8565ab82eb857f93d86d0f7956fcf4523/scikit_learn-1.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7a73d457070e3318e32bdb3aa79a8d990474f19035464dfd8bede2883ab5dc3b", size = 11155078, upload-time = "2025-01-10T08:07:51.376Z" },
-]
-
-[[package]]
-name = "scikit-learn"
-version = "1.8.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "joblib", marker = "python_full_version >= '3.11'" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "scipy", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
- { name = "threadpoolctl", marker = "python_full_version >= '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/0e/d4/40988bf3b8e34feec1d0e6a051446b1f66225f8529b9309becaeef62b6c4/scikit_learn-1.8.0.tar.gz", hash = "sha256:9bccbb3b40e3de10351f8f5068e105d0f4083b1a65fa07b6634fbc401a6287fd", size = 7335585, upload-time = "2025-12-10T07:08:53.618Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c9/92/53ea2181da8ac6bf27170191028aee7251f8f841f8d3edbfdcaf2008fde9/scikit_learn-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:146b4d36f800c013d267b29168813f7a03a43ecd2895d04861f1240b564421da", size = 8595835, upload-time = "2025-12-10T07:07:39.385Z" },
- { url = "https://files.pythonhosted.org/packages/01/18/d154dc1638803adf987910cdd07097d9c526663a55666a97c124d09fb96a/scikit_learn-1.8.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f984ca4b14914e6b4094c5d52a32ea16b49832c03bd17a110f004db3c223e8e1", size = 8080381, upload-time = "2025-12-10T07:07:41.93Z" },
- { url = "https://files.pythonhosted.org/packages/8a/44/226142fcb7b7101e64fdee5f49dbe6288d4c7af8abf593237b70fca080a4/scikit_learn-1.8.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e30adb87f0cc81c7690a84f7932dd66be5bac57cfe16b91cb9151683a4a2d3b", size = 8799632, upload-time = "2025-12-10T07:07:43.899Z" },
- { url = "https://files.pythonhosted.org/packages/36/4d/4a67f30778a45d542bbea5db2dbfa1e9e100bf9ba64aefe34215ba9f11f6/scikit_learn-1.8.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ada8121bcb4dac28d930febc791a69f7cb1673c8495e5eee274190b73a4559c1", size = 9103788, upload-time = "2025-12-10T07:07:45.982Z" },
- { url = "https://files.pythonhosted.org/packages/89/3c/45c352094cfa60050bcbb967b1faf246b22e93cb459f2f907b600f2ceda5/scikit_learn-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:c57b1b610bd1f40ba43970e11ce62821c2e6569e4d74023db19c6b26f246cb3b", size = 8081706, upload-time = "2025-12-10T07:07:48.111Z" },
- { url = "https://files.pythonhosted.org/packages/3d/46/5416595bb395757f754feb20c3d776553a386b661658fb21b7c814e89efe/scikit_learn-1.8.0-cp311-cp311-win_arm64.whl", hash = "sha256:2838551e011a64e3053ad7618dda9310175f7515f1742fa2d756f7c874c05961", size = 7688451, upload-time = "2025-12-10T07:07:49.873Z" },
- { url = "https://files.pythonhosted.org/packages/90/74/e6a7cc4b820e95cc38cf36cd74d5aa2b42e8ffc2d21fe5a9a9c45c1c7630/scikit_learn-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5fb63362b5a7ddab88e52b6dbb47dac3fd7dafeee740dc6c8d8a446ddedade8e", size = 8548242, upload-time = "2025-12-10T07:07:51.568Z" },
- { url = "https://files.pythonhosted.org/packages/49/d8/9be608c6024d021041c7f0b3928d4749a706f4e2c3832bbede4fb4f58c95/scikit_learn-1.8.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:5025ce924beccb28298246e589c691fe1b8c1c96507e6d27d12c5fadd85bfd76", size = 8079075, upload-time = "2025-12-10T07:07:53.697Z" },
- { url = "https://files.pythonhosted.org/packages/dd/47/f187b4636ff80cc63f21cd40b7b2d177134acaa10f6bb73746130ee8c2e5/scikit_learn-1.8.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4496bb2cf7a43ce1a2d7524a79e40bc5da45cf598dbf9545b7e8316ccba47bb4", size = 8660492, upload-time = "2025-12-10T07:07:55.574Z" },
- { url = "https://files.pythonhosted.org/packages/97/74/b7a304feb2b49df9fafa9382d4d09061a96ee9a9449a7cbea7988dda0828/scikit_learn-1.8.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0bcfe4d0d14aec44921545fd2af2338c7471de9cb701f1da4c9d85906ab847a", size = 8931904, upload-time = "2025-12-10T07:07:57.666Z" },
- { url = "https://files.pythonhosted.org/packages/9f/c4/0ab22726a04ede56f689476b760f98f8f46607caecff993017ac1b64aa5d/scikit_learn-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:35c007dedb2ffe38fe3ee7d201ebac4a2deccd2408e8621d53067733e3c74809", size = 8019359, upload-time = "2025-12-10T07:07:59.838Z" },
- { url = "https://files.pythonhosted.org/packages/24/90/344a67811cfd561d7335c1b96ca21455e7e472d281c3c279c4d3f2300236/scikit_learn-1.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:8c497fff237d7b4e07e9ef1a640887fa4fb765647f86fbe00f969ff6280ce2bb", size = 7641898, upload-time = "2025-12-10T07:08:01.36Z" },
- { url = "https://files.pythonhosted.org/packages/03/aa/e22e0768512ce9255eba34775be2e85c2048da73da1193e841707f8f039c/scikit_learn-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0d6ae97234d5d7079dc0040990a6f7aeb97cb7fa7e8945f1999a429b23569e0a", size = 8513770, upload-time = "2025-12-10T07:08:03.251Z" },
- { url = "https://files.pythonhosted.org/packages/58/37/31b83b2594105f61a381fc74ca19e8780ee923be2d496fcd8d2e1147bd99/scikit_learn-1.8.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:edec98c5e7c128328124a029bceb09eda2d526997780fef8d65e9a69eead963e", size = 8044458, upload-time = "2025-12-10T07:08:05.336Z" },
- { url = "https://files.pythonhosted.org/packages/2d/5a/3f1caed8765f33eabb723596666da4ebbf43d11e96550fb18bdec42b467b/scikit_learn-1.8.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:74b66d8689d52ed04c271e1329f0c61635bcaf5b926db9b12d58914cdc01fe57", size = 8610341, upload-time = "2025-12-10T07:08:07.732Z" },
- { url = "https://files.pythonhosted.org/packages/38/cf/06896db3f71c75902a8e9943b444a56e727418f6b4b4a90c98c934f51ed4/scikit_learn-1.8.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8fdf95767f989b0cfedb85f7ed8ca215d4be728031f56ff5a519ee1e3276dc2e", size = 8900022, upload-time = "2025-12-10T07:08:09.862Z" },
- { url = "https://files.pythonhosted.org/packages/1c/f9/9b7563caf3ec8873e17a31401858efab6b39a882daf6c1bfa88879c0aa11/scikit_learn-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:2de443b9373b3b615aec1bb57f9baa6bb3a9bd093f1269ba95c17d870422b271", size = 7989409, upload-time = "2025-12-10T07:08:12.028Z" },
- { url = "https://files.pythonhosted.org/packages/49/bd/1f4001503650e72c4f6009ac0c4413cb17d2d601cef6f71c0453da2732fc/scikit_learn-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:eddde82a035681427cbedded4e6eff5e57fa59216c2e3e90b10b19ab1d0a65c3", size = 7619760, upload-time = "2025-12-10T07:08:13.688Z" },
- { url = "https://files.pythonhosted.org/packages/d2/7d/a630359fc9dcc95496588c8d8e3245cc8fd81980251079bc09c70d41d951/scikit_learn-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7cc267b6108f0a1499a734167282c00c4ebf61328566b55ef262d48e9849c735", size = 8826045, upload-time = "2025-12-10T07:08:15.215Z" },
- { url = "https://files.pythonhosted.org/packages/cc/56/a0c86f6930cfcd1c7054a2bc417e26960bb88d32444fe7f71d5c2cfae891/scikit_learn-1.8.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:fe1c011a640a9f0791146011dfd3c7d9669785f9fed2b2a5f9e207536cf5c2fd", size = 8420324, upload-time = "2025-12-10T07:08:17.561Z" },
- { url = "https://files.pythonhosted.org/packages/46/1e/05962ea1cebc1cf3876667ecb14c283ef755bf409993c5946ade3b77e303/scikit_learn-1.8.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72358cce49465d140cc4e7792015bb1f0296a9742d5622c67e31399b75468b9e", size = 8680651, upload-time = "2025-12-10T07:08:19.952Z" },
- { url = "https://files.pythonhosted.org/packages/fe/56/a85473cd75f200c9759e3a5f0bcab2d116c92a8a02ee08ccd73b870f8bb4/scikit_learn-1.8.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:80832434a6cc114f5219211eec13dcbc16c2bac0e31ef64c6d346cde3cf054cb", size = 8925045, upload-time = "2025-12-10T07:08:22.11Z" },
- { url = "https://files.pythonhosted.org/packages/cc/b7/64d8cfa896c64435ae57f4917a548d7ac7a44762ff9802f75a79b77cb633/scikit_learn-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ee787491dbfe082d9c3013f01f5991658b0f38aa8177e4cd4bf434c58f551702", size = 8507994, upload-time = "2025-12-10T07:08:23.943Z" },
- { url = "https://files.pythonhosted.org/packages/5e/37/e192ea709551799379958b4c4771ec507347027bb7c942662c7fbeba31cb/scikit_learn-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf97c10a3f5a7543f9b88cbf488d33d175e9146115a451ae34568597ba33dcde", size = 7869518, upload-time = "2025-12-10T07:08:25.71Z" },
- { url = "https://files.pythonhosted.org/packages/24/05/1af2c186174cc92dcab2233f327336058c077d38f6fe2aceb08e6ab4d509/scikit_learn-1.8.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c22a2da7a198c28dd1a6e1136f19c830beab7fdca5b3e5c8bba8394f8a5c45b3", size = 8528667, upload-time = "2025-12-10T07:08:27.541Z" },
- { url = "https://files.pythonhosted.org/packages/a8/25/01c0af38fe969473fb292bba9dc2b8f9b451f3112ff242c647fee3d0dfe7/scikit_learn-1.8.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:6b595b07a03069a2b1740dc08c2299993850ea81cce4fe19b2421e0c970de6b7", size = 8066524, upload-time = "2025-12-10T07:08:29.822Z" },
- { url = "https://files.pythonhosted.org/packages/be/ce/a0623350aa0b68647333940ee46fe45086c6060ec604874e38e9ab7d8e6c/scikit_learn-1.8.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:29ffc74089f3d5e87dfca4c2c8450f88bdc61b0fc6ed5d267f3988f19a1309f6", size = 8657133, upload-time = "2025-12-10T07:08:31.865Z" },
- { url = "https://files.pythonhosted.org/packages/b8/cb/861b41341d6f1245e6ca80b1c1a8c4dfce43255b03df034429089ca2a2c5/scikit_learn-1.8.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fb65db5d7531bccf3a4f6bec3462223bea71384e2cda41da0f10b7c292b9e7c4", size = 8923223, upload-time = "2025-12-10T07:08:34.166Z" },
- { url = "https://files.pythonhosted.org/packages/76/18/a8def8f91b18cd1ba6e05dbe02540168cb24d47e8dcf69e8d00b7da42a08/scikit_learn-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:56079a99c20d230e873ea40753102102734c5953366972a71d5cb39a32bc40c6", size = 8096518, upload-time = "2025-12-10T07:08:36.339Z" },
- { url = "https://files.pythonhosted.org/packages/d1/77/482076a678458307f0deb44e29891d6022617b2a64c840c725495bee343f/scikit_learn-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:3bad7565bc9cf37ce19a7c0d107742b320c1285df7aab1a6e2d28780df167242", size = 7754546, upload-time = "2025-12-10T07:08:38.128Z" },
- { url = "https://files.pythonhosted.org/packages/2d/d1/ef294ca754826daa043b2a104e59960abfab4cf653891037d19dd5b6f3cf/scikit_learn-1.8.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:4511be56637e46c25721e83d1a9cea9614e7badc7040c4d573d75fbe257d6fd7", size = 8848305, upload-time = "2025-12-10T07:08:41.013Z" },
- { url = "https://files.pythonhosted.org/packages/5b/e2/b1f8b05138ee813b8e1a4149f2f0d289547e60851fd1bb268886915adbda/scikit_learn-1.8.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:a69525355a641bf8ef136a7fa447672fb54fe8d60cab5538d9eb7c6438543fb9", size = 8432257, upload-time = "2025-12-10T07:08:42.873Z" },
- { url = "https://files.pythonhosted.org/packages/26/11/c32b2138a85dcb0c99f6afd13a70a951bfdff8a6ab42d8160522542fb647/scikit_learn-1.8.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c2656924ec73e5939c76ac4c8b026fc203b83d8900362eb2599d8aee80e4880f", size = 8678673, upload-time = "2025-12-10T07:08:45.362Z" },
- { url = "https://files.pythonhosted.org/packages/c7/57/51f2384575bdec454f4fe4e7a919d696c9ebce914590abf3e52d47607ab8/scikit_learn-1.8.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15fc3b5d19cc2be65404786857f2e13c70c83dd4782676dd6814e3b89dc8f5b9", size = 8922467, upload-time = "2025-12-10T07:08:47.408Z" },
- { url = "https://files.pythonhosted.org/packages/35/4d/748c9e2872637a57981a04adc038dacaa16ba8ca887b23e34953f0b3f742/scikit_learn-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:00d6f1d66fbcf4eba6e356e1420d33cc06c70a45bb1363cd6f6a8e4ebbbdece2", size = 8774395, upload-time = "2025-12-10T07:08:49.337Z" },
- { url = "https://files.pythonhosted.org/packages/60/22/d7b2ebe4704a5e50790ba089d5c2ae308ab6bb852719e6c3bd4f04c3a363/scikit_learn-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f28dd15c6bb0b66ba09728cf09fd8736c304be29409bd8445a080c1280619e8c", size = 8002647, upload-time = "2025-12-10T07:08:51.601Z" },
-]
-
-[[package]]
-name = "scipy"
-version = "1.13.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ae/00/48c2f661e2816ccf2ecd77982f6605b2950afe60f60a52b4cbbc2504aa8f/scipy-1.13.1.tar.gz", hash = "sha256:095a87a0312b08dfd6a6155cbbd310a8c51800fc931b8c0b84003014b874ed3c", size = 57210720, upload-time = "2024-05-23T03:29:26.079Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/33/59/41b2529908c002ade869623b87eecff3e11e3ce62e996d0bdcb536984187/scipy-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:20335853b85e9a49ff7572ab453794298bcf0354d8068c5f6775a0eabf350aca", size = 39328076, upload-time = "2024-05-23T03:19:01.687Z" },
- { url = "https://files.pythonhosted.org/packages/d5/33/f1307601f492f764062ce7dd471a14750f3360e33cd0f8c614dae208492c/scipy-1.13.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d605e9c23906d1994f55ace80e0125c587f96c020037ea6aa98d01b4bd2e222f", size = 30306232, upload-time = "2024-05-23T03:19:09.089Z" },
- { url = "https://files.pythonhosted.org/packages/c0/66/9cd4f501dd5ea03e4a4572ecd874936d0da296bd04d1c45ae1a4a75d9c3a/scipy-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfa31f1def5c819b19ecc3a8b52d28ffdcc7ed52bb20c9a7589669dd3c250989", size = 33743202, upload-time = "2024-05-23T03:19:15.138Z" },
- { url = "https://files.pythonhosted.org/packages/a3/ba/7255e5dc82a65adbe83771c72f384d99c43063648456796436c9a5585ec3/scipy-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26264b282b9da0952a024ae34710c2aff7d27480ee91a2e82b7b7073c24722f", size = 38577335, upload-time = "2024-05-23T03:19:21.984Z" },
- { url = "https://files.pythonhosted.org/packages/49/a5/bb9ded8326e9f0cdfdc412eeda1054b914dfea952bda2097d174f8832cc0/scipy-1.13.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eccfa1906eacc02de42d70ef4aecea45415f5be17e72b61bafcfd329bdc52e94", size = 38820728, upload-time = "2024-05-23T03:19:28.225Z" },
- { url = "https://files.pythonhosted.org/packages/12/30/df7a8fcc08f9b4a83f5f27cfaaa7d43f9a2d2ad0b6562cced433e5b04e31/scipy-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:2831f0dc9c5ea9edd6e51e6e769b655f08ec6db6e2e10f86ef39bd32eb11da54", size = 46210588, upload-time = "2024-05-23T03:19:35.661Z" },
- { url = "https://files.pythonhosted.org/packages/b4/15/4a4bb1b15bbd2cd2786c4f46e76b871b28799b67891f23f455323a0cdcfb/scipy-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27e52b09c0d3a1d5b63e1105f24177e544a222b43611aaf5bc44d4a0979e32f9", size = 39333805, upload-time = "2024-05-23T03:19:43.081Z" },
- { url = "https://files.pythonhosted.org/packages/ba/92/42476de1af309c27710004f5cdebc27bec62c204db42e05b23a302cb0c9a/scipy-1.13.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:54f430b00f0133e2224c3ba42b805bfd0086fe488835effa33fa291561932326", size = 30317687, upload-time = "2024-05-23T03:19:48.799Z" },
- { url = "https://files.pythonhosted.org/packages/80/ba/8be64fe225360a4beb6840f3cbee494c107c0887f33350d0a47d55400b01/scipy-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e89369d27f9e7b0884ae559a3a956e77c02114cc60a6058b4e5011572eea9299", size = 33694638, upload-time = "2024-05-23T03:19:55.104Z" },
- { url = "https://files.pythonhosted.org/packages/36/07/035d22ff9795129c5a847c64cb43c1fa9188826b59344fee28a3ab02e283/scipy-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a78b4b3345f1b6f68a763c6e25c0c9a23a9fd0f39f5f3d200efe8feda560a5fa", size = 38569931, upload-time = "2024-05-23T03:20:01.82Z" },
- { url = "https://files.pythonhosted.org/packages/d9/10/f9b43de37e5ed91facc0cfff31d45ed0104f359e4f9a68416cbf4e790241/scipy-1.13.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45484bee6d65633752c490404513b9ef02475b4284c4cfab0ef946def50b3f59", size = 38838145, upload-time = "2024-05-23T03:20:09.173Z" },
- { url = "https://files.pythonhosted.org/packages/4a/48/4513a1a5623a23e95f94abd675ed91cfb19989c58e9f6f7d03990f6caf3d/scipy-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:5713f62f781eebd8d597eb3f88b8bf9274e79eeabf63afb4a737abc6c84ad37b", size = 46196227, upload-time = "2024-05-23T03:20:16.433Z" },
- { url = "https://files.pythonhosted.org/packages/f2/7b/fb6b46fbee30fc7051913068758414f2721003a89dd9a707ad49174e3843/scipy-1.13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5d72782f39716b2b3509cd7c33cdc08c96f2f4d2b06d51e52fb45a19ca0c86a1", size = 39357301, upload-time = "2024-05-23T03:20:23.538Z" },
- { url = "https://files.pythonhosted.org/packages/dc/5a/2043a3bde1443d94014aaa41e0b50c39d046dda8360abd3b2a1d3f79907d/scipy-1.13.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:017367484ce5498445aade74b1d5ab377acdc65e27095155e448c88497755a5d", size = 30363348, upload-time = "2024-05-23T03:20:29.885Z" },
- { url = "https://files.pythonhosted.org/packages/e7/cb/26e4a47364bbfdb3b7fb3363be6d8a1c543bcd70a7753ab397350f5f189a/scipy-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:949ae67db5fa78a86e8fa644b9a6b07252f449dcf74247108c50e1d20d2b4627", size = 33406062, upload-time = "2024-05-23T03:20:36.012Z" },
- { url = "https://files.pythonhosted.org/packages/88/ab/6ecdc526d509d33814835447bbbeedbebdec7cca46ef495a61b00a35b4bf/scipy-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3ade0e53bc1f21358aa74ff4830235d716211d7d077e340c7349bc3542e884", size = 38218311, upload-time = "2024-05-23T03:20:42.086Z" },
- { url = "https://files.pythonhosted.org/packages/0b/00/9f54554f0f8318100a71515122d8f4f503b1a2c4b4cfab3b4b68c0eb08fa/scipy-1.13.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2ac65fb503dad64218c228e2dc2d0a0193f7904747db43014645ae139c8fad16", size = 38442493, upload-time = "2024-05-23T03:20:48.292Z" },
- { url = "https://files.pythonhosted.org/packages/3e/df/963384e90733e08eac978cd103c34df181d1fec424de383cdc443f418dd4/scipy-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:cdd7dacfb95fea358916410ec61bbc20440f7860333aee6d882bb8046264e949", size = 45910955, upload-time = "2024-05-23T03:20:55.091Z" },
- { url = "https://files.pythonhosted.org/packages/7f/29/c2ea58c9731b9ecb30b6738113a95d147e83922986b34c685b8f6eefde21/scipy-1.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:436bbb42a94a8aeef855d755ce5a465479c721e9d684de76bf61a62e7c2b81d5", size = 39352927, upload-time = "2024-05-23T03:21:01.95Z" },
- { url = "https://files.pythonhosted.org/packages/5c/c0/e71b94b20ccf9effb38d7147c0064c08c622309fd487b1b677771a97d18c/scipy-1.13.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8335549ebbca860c52bf3d02f80784e91a004b71b059e3eea9678ba994796a24", size = 30324538, upload-time = "2024-05-23T03:21:07.634Z" },
- { url = "https://files.pythonhosted.org/packages/6d/0f/aaa55b06d474817cea311e7b10aab2ea1fd5d43bc6a2861ccc9caec9f418/scipy-1.13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d533654b7d221a6a97304ab63c41c96473ff04459e404b83275b60aa8f4b7004", size = 33732190, upload-time = "2024-05-23T03:21:14.41Z" },
- { url = "https://files.pythonhosted.org/packages/35/f5/d0ad1a96f80962ba65e2ce1de6a1e59edecd1f0a7b55990ed208848012e0/scipy-1.13.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637e98dcf185ba7f8e663e122ebf908c4702420477ae52a04f9908707456ba4d", size = 38612244, upload-time = "2024-05-23T03:21:21.827Z" },
- { url = "https://files.pythonhosted.org/packages/8d/02/1165905f14962174e6569076bcc3315809ae1291ed14de6448cc151eedfd/scipy-1.13.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a014c2b3697bde71724244f63de2476925596c24285c7a637364761f8710891c", size = 38845637, upload-time = "2024-05-23T03:21:28.729Z" },
- { url = "https://files.pythonhosted.org/packages/3e/77/dab54fe647a08ee4253963bcd8f9cf17509c8ca64d6335141422fe2e2114/scipy-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:392e4ec766654852c25ebad4f64e4e584cf19820b980bc04960bca0b0cd6eaa2", size = 46227440, upload-time = "2024-05-23T03:21:35.888Z" },
-]
-
-[[package]]
-name = "scipy"
-version = "1.17.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/7a/97/5a3609c4f8d58b039179648e62dd220f89864f56f7357f5d4f45c29eb2cc/scipy-1.17.1.tar.gz", hash = "sha256:95d8e012d8cb8816c226aef832200b1d45109ed4464303e997c5b13122b297c0", size = 30573822, upload-time = "2026-02-23T00:26:24.851Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/df/75/b4ce781849931fef6fd529afa6b63711d5a733065722d0c3e2724af9e40a/scipy-1.17.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:1f95b894f13729334fb990162e911c9e5dc1ab390c58aa6cbecb389c5b5e28ec", size = 31613675, upload-time = "2026-02-23T00:16:00.13Z" },
- { url = "https://files.pythonhosted.org/packages/f7/58/bccc2861b305abdd1b8663d6130c0b3d7cc22e8d86663edbc8401bfd40d4/scipy-1.17.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:e18f12c6b0bc5a592ed23d3f7b891f68fd7f8241d69b7883769eb5d5dfb52696", size = 28162057, upload-time = "2026-02-23T00:16:09.456Z" },
- { url = "https://files.pythonhosted.org/packages/6d/ee/18146b7757ed4976276b9c9819108adbc73c5aad636e5353e20746b73069/scipy-1.17.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:a3472cfbca0a54177d0faa68f697d8ba4c80bbdc19908c3465556d9f7efce9ee", size = 20334032, upload-time = "2026-02-23T00:16:17.358Z" },
- { url = "https://files.pythonhosted.org/packages/ec/e6/cef1cf3557f0c54954198554a10016b6a03b2ec9e22a4e1df734936bd99c/scipy-1.17.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:766e0dc5a616d026a3a1cffa379af959671729083882f50307e18175797b3dfd", size = 22709533, upload-time = "2026-02-23T00:16:25.791Z" },
- { url = "https://files.pythonhosted.org/packages/4d/60/8804678875fc59362b0fb759ab3ecce1f09c10a735680318ac30da8cd76b/scipy-1.17.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:744b2bf3640d907b79f3fd7874efe432d1cf171ee721243e350f55234b4cec4c", size = 33062057, upload-time = "2026-02-23T00:16:36.931Z" },
- { url = "https://files.pythonhosted.org/packages/09/7d/af933f0f6e0767995b4e2d705a0665e454d1c19402aa7e895de3951ebb04/scipy-1.17.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43af8d1f3bea642559019edfe64e9b11192a8978efbd1539d7bc2aaa23d92de4", size = 35349300, upload-time = "2026-02-23T00:16:49.108Z" },
- { url = "https://files.pythonhosted.org/packages/b4/3d/7ccbbdcbb54c8fdc20d3b6930137c782a163fa626f0aef920349873421ba/scipy-1.17.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd96a1898c0a47be4520327e01f874acfd61fb48a9420f8aa9f6483412ffa444", size = 35127333, upload-time = "2026-02-23T00:17:01.293Z" },
- { url = "https://files.pythonhosted.org/packages/e8/19/f926cb11c42b15ba08e3a71e376d816ac08614f769b4f47e06c3580c836a/scipy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4eb6c25dd62ee8d5edf68a8e1c171dd71c292fdae95d8aeb3dd7d7de4c364082", size = 37741314, upload-time = "2026-02-23T00:17:12.576Z" },
- { url = "https://files.pythonhosted.org/packages/95/da/0d1df507cf574b3f224ccc3d45244c9a1d732c81dcb26b1e8a766ae271a8/scipy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:d30e57c72013c2a4fe441c2fcb8e77b14e152ad48b5464858e07e2ad9fbfceff", size = 36607512, upload-time = "2026-02-23T00:17:23.424Z" },
- { url = "https://files.pythonhosted.org/packages/68/7f/bdd79ceaad24b671543ffe0ef61ed8e659440eb683b66f033454dcee90eb/scipy-1.17.1-cp311-cp311-win_arm64.whl", hash = "sha256:9ecb4efb1cd6e8c4afea0daa91a87fbddbce1b99d2895d151596716c0b2e859d", size = 24599248, upload-time = "2026-02-23T00:17:34.561Z" },
- { url = "https://files.pythonhosted.org/packages/35/48/b992b488d6f299dbe3f11a20b24d3dda3d46f1a635ede1c46b5b17a7b163/scipy-1.17.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:35c3a56d2ef83efc372eaec584314bd0ef2e2f0d2adb21c55e6ad5b344c0dcb8", size = 31610954, upload-time = "2026-02-23T00:17:49.855Z" },
- { url = "https://files.pythonhosted.org/packages/b2/02/cf107b01494c19dc100f1d0b7ac3cc08666e96ba2d64db7626066cee895e/scipy-1.17.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:fcb310ddb270a06114bb64bbe53c94926b943f5b7f0842194d585c65eb4edd76", size = 28172662, upload-time = "2026-02-23T00:18:01.64Z" },
- { url = "https://files.pythonhosted.org/packages/cf/a9/599c28631bad314d219cf9ffd40e985b24d603fc8a2f4ccc5ae8419a535b/scipy-1.17.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:cc90d2e9c7e5c7f1a482c9875007c095c3194b1cfedca3c2f3291cdc2bc7c086", size = 20344366, upload-time = "2026-02-23T00:18:12.015Z" },
- { url = "https://files.pythonhosted.org/packages/35/f5/906eda513271c8deb5af284e5ef0206d17a96239af79f9fa0aebfe0e36b4/scipy-1.17.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:c80be5ede8f3f8eded4eff73cc99a25c388ce98e555b17d31da05287015ffa5b", size = 22704017, upload-time = "2026-02-23T00:18:21.502Z" },
- { url = "https://files.pythonhosted.org/packages/da/34/16f10e3042d2f1d6b66e0428308ab52224b6a23049cb2f5c1756f713815f/scipy-1.17.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e19ebea31758fac5893a2ac360fedd00116cbb7628e650842a6691ba7ca28a21", size = 32927842, upload-time = "2026-02-23T00:18:35.367Z" },
- { url = "https://files.pythonhosted.org/packages/01/8e/1e35281b8ab6d5d72ebe9911edcdffa3f36b04ed9d51dec6dd140396e220/scipy-1.17.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02ae3b274fde71c5e92ac4d54bc06c42d80e399fec704383dcd99b301df37458", size = 35235890, upload-time = "2026-02-23T00:18:49.188Z" },
- { url = "https://files.pythonhosted.org/packages/c5/5c/9d7f4c88bea6e0d5a4f1bc0506a53a00e9fcb198de372bfe4d3652cef482/scipy-1.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a604bae87c6195d8b1045eddece0514d041604b14f2727bbc2b3020172045eb", size = 35003557, upload-time = "2026-02-23T00:18:54.74Z" },
- { url = "https://files.pythonhosted.org/packages/65/94/7698add8f276dbab7a9de9fb6b0e02fc13ee61d51c7c3f85ac28b65e1239/scipy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f590cd684941912d10becc07325a3eeb77886fe981415660d9265c4c418d0bea", size = 37625856, upload-time = "2026-02-23T00:19:00.307Z" },
- { url = "https://files.pythonhosted.org/packages/a2/84/dc08d77fbf3d87d3ee27f6a0c6dcce1de5829a64f2eae85a0ecc1f0daa73/scipy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:41b71f4a3a4cab9d366cd9065b288efc4d4f3c0b37a91a8e0947fb5bd7f31d87", size = 36549682, upload-time = "2026-02-23T00:19:07.67Z" },
- { url = "https://files.pythonhosted.org/packages/bc/98/fe9ae9ffb3b54b62559f52dedaebe204b408db8109a8c66fdd04869e6424/scipy-1.17.1-cp312-cp312-win_arm64.whl", hash = "sha256:f4115102802df98b2b0db3cce5cb9b92572633a1197c77b7553e5203f284a5b3", size = 24547340, upload-time = "2026-02-23T00:19:12.024Z" },
- { url = "https://files.pythonhosted.org/packages/76/27/07ee1b57b65e92645f219b37148a7e7928b82e2b5dbeccecb4dff7c64f0b/scipy-1.17.1-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:5e3c5c011904115f88a39308379c17f91546f77c1667cea98739fe0fccea804c", size = 31590199, upload-time = "2026-02-23T00:19:17.192Z" },
- { url = "https://files.pythonhosted.org/packages/ec/ae/db19f8ab842e9b724bf5dbb7db29302a91f1e55bc4d04b1025d6d605a2c5/scipy-1.17.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:6fac755ca3d2c3edcb22f479fceaa241704111414831ddd3bc6056e18516892f", size = 28154001, upload-time = "2026-02-23T00:19:22.241Z" },
- { url = "https://files.pythonhosted.org/packages/5b/58/3ce96251560107b381cbd6e8413c483bbb1228a6b919fa8652b0d4090e7f/scipy-1.17.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:7ff200bf9d24f2e4d5dc6ee8c3ac64d739d3a89e2326ba68aaf6c4a2b838fd7d", size = 20325719, upload-time = "2026-02-23T00:19:26.329Z" },
- { url = "https://files.pythonhosted.org/packages/b2/83/15087d945e0e4d48ce2377498abf5ad171ae013232ae31d06f336e64c999/scipy-1.17.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:4b400bdc6f79fa02a4d86640310dde87a21fba0c979efff5248908c6f15fad1b", size = 22683595, upload-time = "2026-02-23T00:19:30.304Z" },
- { url = "https://files.pythonhosted.org/packages/b4/e0/e58fbde4a1a594c8be8114eb4aac1a55bcd6587047efc18a61eb1f5c0d30/scipy-1.17.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2b64ca7d4aee0102a97f3ba22124052b4bd2152522355073580bf4845e2550b6", size = 32896429, upload-time = "2026-02-23T00:19:35.536Z" },
- { url = "https://files.pythonhosted.org/packages/f5/5f/f17563f28ff03c7b6799c50d01d5d856a1d55f2676f537ca8d28c7f627cd/scipy-1.17.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:581b2264fc0aa555f3f435a5944da7504ea3a065d7029ad60e7c3d1ae09c5464", size = 35203952, upload-time = "2026-02-23T00:19:42.259Z" },
- { url = "https://files.pythonhosted.org/packages/8d/a5/9afd17de24f657fdfe4df9a3f1ea049b39aef7c06000c13db1530d81ccca/scipy-1.17.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:beeda3d4ae615106d7094f7e7cef6218392e4465cc95d25f900bebabfded0950", size = 34979063, upload-time = "2026-02-23T00:19:47.547Z" },
- { url = "https://files.pythonhosted.org/packages/8b/13/88b1d2384b424bf7c924f2038c1c409f8d88bb2a8d49d097861dd64a57b2/scipy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6609bc224e9568f65064cfa72edc0f24ee6655b47575954ec6339534b2798369", size = 37598449, upload-time = "2026-02-23T00:19:53.238Z" },
- { url = "https://files.pythonhosted.org/packages/35/e5/d6d0e51fc888f692a35134336866341c08655d92614f492c6860dc45bb2c/scipy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:37425bc9175607b0268f493d79a292c39f9d001a357bebb6b88fdfaff13f6448", size = 36510943, upload-time = "2026-02-23T00:20:50.89Z" },
- { url = "https://files.pythonhosted.org/packages/2a/fd/3be73c564e2a01e690e19cc618811540ba5354c67c8680dce3281123fb79/scipy-1.17.1-cp313-cp313-win_arm64.whl", hash = "sha256:5cf36e801231b6a2059bf354720274b7558746f3b1a4efb43fcf557ccd484a87", size = 24545621, upload-time = "2026-02-23T00:20:55.871Z" },
- { url = "https://files.pythonhosted.org/packages/6f/6b/17787db8b8114933a66f9dcc479a8272e4b4da75fe03b0c282f7b0ade8cd/scipy-1.17.1-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:d59c30000a16d8edc7e64152e30220bfbd724c9bbb08368c054e24c651314f0a", size = 31936708, upload-time = "2026-02-23T00:19:58.694Z" },
- { url = "https://files.pythonhosted.org/packages/38/2e/524405c2b6392765ab1e2b722a41d5da33dc5c7b7278184a8ad29b6cb206/scipy-1.17.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:010f4333c96c9bb1a4516269e33cb5917b08ef2166d5556ca2fd9f082a9e6ea0", size = 28570135, upload-time = "2026-02-23T00:20:03.934Z" },
- { url = "https://files.pythonhosted.org/packages/fd/c3/5bd7199f4ea8556c0c8e39f04ccb014ac37d1468e6cfa6a95c6b3562b76e/scipy-1.17.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:2ceb2d3e01c5f1d83c4189737a42d9cb2fc38a6eeed225e7515eef71ad301dce", size = 20741977, upload-time = "2026-02-23T00:20:07.935Z" },
- { url = "https://files.pythonhosted.org/packages/d9/b8/8ccd9b766ad14c78386599708eb745f6b44f08400a5fd0ade7cf89b6fc93/scipy-1.17.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:844e165636711ef41f80b4103ed234181646b98a53c8f05da12ca5ca289134f6", size = 23029601, upload-time = "2026-02-23T00:20:12.161Z" },
- { url = "https://files.pythonhosted.org/packages/6d/a0/3cb6f4d2fb3e17428ad2880333cac878909ad1a89f678527b5328b93c1d4/scipy-1.17.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:158dd96d2207e21c966063e1635b1063cd7787b627b6f07305315dd73d9c679e", size = 33019667, upload-time = "2026-02-23T00:20:17.208Z" },
- { url = "https://files.pythonhosted.org/packages/f3/c3/2d834a5ac7bf3a0c806ad1508efc02dda3c8c61472a56132d7894c312dea/scipy-1.17.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74cbb80d93260fe2ffa334efa24cb8f2f0f622a9b9febf8b483c0b865bfb3475", size = 35264159, upload-time = "2026-02-23T00:20:23.087Z" },
- { url = "https://files.pythonhosted.org/packages/4d/77/d3ed4becfdbd217c52062fafe35a72388d1bd82c2d0ba5ca19d6fcc93e11/scipy-1.17.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:dbc12c9f3d185f5c737d801da555fb74b3dcfa1a50b66a1a93e09190f41fab50", size = 35102771, upload-time = "2026-02-23T00:20:28.636Z" },
- { url = "https://files.pythonhosted.org/packages/bd/12/d19da97efde68ca1ee5538bb261d5d2c062f0c055575128f11a2730e3ac1/scipy-1.17.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:94055a11dfebe37c656e70317e1996dc197e1a15bbcc351bcdd4610e128fe1ca", size = 37665910, upload-time = "2026-02-23T00:20:34.743Z" },
- { url = "https://files.pythonhosted.org/packages/06/1c/1172a88d507a4baaf72c5a09bb6c018fe2ae0ab622e5830b703a46cc9e44/scipy-1.17.1-cp313-cp313t-win_amd64.whl", hash = "sha256:e30bdeaa5deed6bc27b4cc490823cd0347d7dae09119b8803ae576ea0ce52e4c", size = 36562980, upload-time = "2026-02-23T00:20:40.575Z" },
- { url = "https://files.pythonhosted.org/packages/70/b0/eb757336e5a76dfa7911f63252e3b7d1de00935d7705cf772db5b45ec238/scipy-1.17.1-cp313-cp313t-win_arm64.whl", hash = "sha256:a720477885a9d2411f94a93d16f9d89bad0f28ca23c3f8daa521e2dcc3f44d49", size = 24856543, upload-time = "2026-02-23T00:20:45.313Z" },
- { url = "https://files.pythonhosted.org/packages/cf/83/333afb452af6f0fd70414dc04f898647ee1423979ce02efa75c3b0f2c28e/scipy-1.17.1-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:a48a72c77a310327f6a3a920092fa2b8fd03d7deaa60f093038f22d98e096717", size = 31584510, upload-time = "2026-02-23T00:21:01.015Z" },
- { url = "https://files.pythonhosted.org/packages/ed/a6/d05a85fd51daeb2e4ea71d102f15b34fedca8e931af02594193ae4fd25f7/scipy-1.17.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:45abad819184f07240d8a696117a7aacd39787af9e0b719d00285549ed19a1e9", size = 28170131, upload-time = "2026-02-23T00:21:05.888Z" },
- { url = "https://files.pythonhosted.org/packages/db/7b/8624a203326675d7746a254083a187398090a179335b2e4a20e2ddc46e83/scipy-1.17.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:3fd1fcdab3ea951b610dc4cef356d416d5802991e7e32b5254828d342f7b7e0b", size = 20342032, upload-time = "2026-02-23T00:21:09.904Z" },
- { url = "https://files.pythonhosted.org/packages/c9/35/2c342897c00775d688d8ff3987aced3426858fd89d5a0e26e020b660b301/scipy-1.17.1-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:7bdf2da170b67fdf10bca777614b1c7d96ae3ca5794fd9587dce41eb2966e866", size = 22678766, upload-time = "2026-02-23T00:21:14.313Z" },
- { url = "https://files.pythonhosted.org/packages/ef/f2/7cdb8eb308a1a6ae1e19f945913c82c23c0c442a462a46480ce487fdc0ac/scipy-1.17.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:adb2642e060a6549c343603a3851ba76ef0b74cc8c079a9a58121c7ec9fe2350", size = 32957007, upload-time = "2026-02-23T00:21:19.663Z" },
- { url = "https://files.pythonhosted.org/packages/0b/2e/7eea398450457ecb54e18e9d10110993fa65561c4f3add5e8eccd2b9cd41/scipy-1.17.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eee2cfda04c00a857206a4330f0c5e3e56535494e30ca445eb19ec624ae75118", size = 35221333, upload-time = "2026-02-23T00:21:25.278Z" },
- { url = "https://files.pythonhosted.org/packages/d9/77/5b8509d03b77f093a0d52e606d3c4f79e8b06d1d38c441dacb1e26cacf46/scipy-1.17.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d2650c1fb97e184d12d8ba010493ee7b322864f7d3d00d3f9bb97d9c21de4068", size = 35042066, upload-time = "2026-02-23T00:21:31.358Z" },
- { url = "https://files.pythonhosted.org/packages/f9/df/18f80fb99df40b4070328d5ae5c596f2f00fffb50167e31439e932f29e7d/scipy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:08b900519463543aa604a06bec02461558a6e1cef8fdbb8098f77a48a83c8118", size = 37612763, upload-time = "2026-02-23T00:21:37.247Z" },
- { url = "https://files.pythonhosted.org/packages/4b/39/f0e8ea762a764a9dc52aa7dabcfad51a354819de1f0d4652b6a1122424d6/scipy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:3877ac408e14da24a6196de0ddcace62092bfc12a83823e92e49e40747e52c19", size = 37290984, upload-time = "2026-02-23T00:22:35.023Z" },
- { url = "https://files.pythonhosted.org/packages/7c/56/fe201e3b0f93d1a8bcf75d3379affd228a63d7e2d80ab45467a74b494947/scipy-1.17.1-cp314-cp314-win_arm64.whl", hash = "sha256:f8885db0bc2bffa59d5c1b72fad7a6a92d3e80e7257f967dd81abb553a90d293", size = 25192877, upload-time = "2026-02-23T00:22:39.798Z" },
- { url = "https://files.pythonhosted.org/packages/96/ad/f8c414e121f82e02d76f310f16db9899c4fcde36710329502a6b2a3c0392/scipy-1.17.1-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:1cc682cea2ae55524432f3cdff9e9a3be743d52a7443d0cba9017c23c87ae2f6", size = 31949750, upload-time = "2026-02-23T00:21:42.289Z" },
- { url = "https://files.pythonhosted.org/packages/7c/b0/c741e8865d61b67c81e255f4f0a832846c064e426636cd7de84e74d209be/scipy-1.17.1-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:2040ad4d1795a0ae89bfc7e8429677f365d45aa9fd5e4587cf1ea737f927b4a1", size = 28585858, upload-time = "2026-02-23T00:21:47.706Z" },
- { url = "https://files.pythonhosted.org/packages/ed/1b/3985219c6177866628fa7c2595bfd23f193ceebbe472c98a08824b9466ff/scipy-1.17.1-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:131f5aaea57602008f9822e2115029b55d4b5f7c070287699fe45c661d051e39", size = 20757723, upload-time = "2026-02-23T00:21:52.039Z" },
- { url = "https://files.pythonhosted.org/packages/c0/19/2a04aa25050d656d6f7b9e7b685cc83d6957fb101665bfd9369ca6534563/scipy-1.17.1-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9cdc1a2fcfd5c52cfb3045feb399f7b3ce822abdde3a193a6b9a60b3cb5854ca", size = 23043098, upload-time = "2026-02-23T00:21:56.185Z" },
- { url = "https://files.pythonhosted.org/packages/86/f1/3383beb9b5d0dbddd030335bf8a8b32d4317185efe495374f134d8be6cce/scipy-1.17.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e3dcd57ab780c741fde8dc68619de988b966db759a3c3152e8e9142c26295ad", size = 33030397, upload-time = "2026-02-23T00:22:01.404Z" },
- { url = "https://files.pythonhosted.org/packages/41/68/8f21e8a65a5a03f25a79165ec9d2b28c00e66dc80546cf5eb803aeeff35b/scipy-1.17.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a9956e4d4f4a301ebf6cde39850333a6b6110799d470dbbb1e25326ac447f52a", size = 35281163, upload-time = "2026-02-23T00:22:07.024Z" },
- { url = "https://files.pythonhosted.org/packages/84/8d/c8a5e19479554007a5632ed7529e665c315ae7492b4f946b0deb39870e39/scipy-1.17.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:a4328d245944d09fd639771de275701ccadf5f781ba0ff092ad141e017eccda4", size = 35116291, upload-time = "2026-02-23T00:22:12.585Z" },
- { url = "https://files.pythonhosted.org/packages/52/52/e57eceff0e342a1f50e274264ed47497b59e6a4e3118808ee58ddda7b74a/scipy-1.17.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a77cbd07b940d326d39a1d1b37817e2ee4d79cb30e7338f3d0cddffae70fcaa2", size = 37682317, upload-time = "2026-02-23T00:22:18.513Z" },
- { url = "https://files.pythonhosted.org/packages/11/2f/b29eafe4a3fbc3d6de9662b36e028d5f039e72d345e05c250e121a230dd4/scipy-1.17.1-cp314-cp314t-win_amd64.whl", hash = "sha256:eb092099205ef62cd1782b006658db09e2fed75bffcae7cc0d44052d8aa0f484", size = 37345327, upload-time = "2026-02-23T00:22:24.442Z" },
- { url = "https://files.pythonhosted.org/packages/07/39/338d9219c4e87f3e708f18857ecd24d22a0c3094752393319553096b98af/scipy-1.17.1-cp314-cp314t-win_arm64.whl", hash = "sha256:200e1050faffacc162be6a486a984a0497866ec54149a01270adc8a59b7c7d21", size = 25489165, upload-time = "2026-02-23T00:22:29.563Z" },
-]
-
-[[package]]
-name = "setuptools"
-version = "82.0.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/4f/db/cfac1baf10650ab4d1c111714410d2fbb77ac5a616db26775db562c8fab2/setuptools-82.0.1.tar.gz", hash = "sha256:7d872682c5d01cfde07da7bccc7b65469d3dca203318515ada1de5eda35efbf9", size = 1152316, upload-time = "2026-03-09T12:47:17.221Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/9d/76/f789f7a86709c6b087c5a2f52f911838cad707cc613162401badc665acfe/setuptools-82.0.1-py3-none-any.whl", hash = "sha256:a59e362652f08dcd477c78bb6e7bd9d80a7995bc73ce773050228a348ce2e5bb", size = 1006223, upload-time = "2026-03-09T12:47:15.026Z" },
-]
-
-[[package]]
-name = "six"
-version = "1.17.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
-]
-
-[[package]]
-name = "sortedcontainers"
-version = "2.4.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594, upload-time = "2021-05-16T22:03:42.897Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575, upload-time = "2021-05-16T22:03:41.177Z" },
-]
-
-[[package]]
-name = "soundfile"
-version = "0.13.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "cffi" },
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/e1/41/9b873a8c055582859b239be17902a85339bec6a30ad162f98c9b0288a2cc/soundfile-0.13.1.tar.gz", hash = "sha256:b2c68dab1e30297317080a5b43df57e302584c49e2942defdde0acccc53f0e5b", size = 46156, upload-time = "2025-01-25T09:17:04.831Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/64/28/e2a36573ccbcf3d57c00626a21fe51989380636e821b341d36ccca0c1c3a/soundfile-0.13.1-py2.py3-none-any.whl", hash = "sha256:a23c717560da2cf4c7b5ae1142514e0fd82d6bbd9dfc93a50423447142f2c445", size = 25751, upload-time = "2025-01-25T09:16:44.235Z" },
- { url = "https://files.pythonhosted.org/packages/ea/ab/73e97a5b3cc46bba7ff8650a1504348fa1863a6f9d57d7001c6b67c5f20e/soundfile-0.13.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:82dc664d19831933fe59adad199bf3945ad06d84bc111a5b4c0d3089a5b9ec33", size = 1142250, upload-time = "2025-01-25T09:16:47.583Z" },
- { url = "https://files.pythonhosted.org/packages/a0/e5/58fd1a8d7b26fc113af244f966ee3aecf03cb9293cb935daaddc1e455e18/soundfile-0.13.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:743f12c12c4054921e15736c6be09ac26b3b3d603aef6fd69f9dde68748f2593", size = 1101406, upload-time = "2025-01-25T09:16:49.662Z" },
- { url = "https://files.pythonhosted.org/packages/58/ae/c0e4a53d77cf6e9a04179535766b3321b0b9ced5f70522e4caf9329f0046/soundfile-0.13.1-py2.py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:9c9e855f5a4d06ce4213f31918653ab7de0c5a8d8107cd2427e44b42df547deb", size = 1235729, upload-time = "2025-01-25T09:16:53.018Z" },
- { url = "https://files.pythonhosted.org/packages/57/5e/70bdd9579b35003a489fc850b5047beeda26328053ebadc1fb60f320f7db/soundfile-0.13.1-py2.py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:03267c4e493315294834a0870f31dbb3b28a95561b80b134f0bd3cf2d5f0e618", size = 1313646, upload-time = "2025-01-25T09:16:54.872Z" },
- { url = "https://files.pythonhosted.org/packages/fe/df/8c11dc4dfceda14e3003bb81a0d0edcaaf0796dd7b4f826ea3e532146bba/soundfile-0.13.1-py2.py3-none-win32.whl", hash = "sha256:c734564fab7c5ddf8e9be5bf70bab68042cd17e9c214c06e365e20d64f9a69d5", size = 899881, upload-time = "2025-01-25T09:16:56.663Z" },
- { url = "https://files.pythonhosted.org/packages/14/e9/6b761de83277f2f02ded7e7ea6f07828ec78e4b229b80e4ca55dd205b9dc/soundfile-0.13.1-py2.py3-none-win_amd64.whl", hash = "sha256:1e70a05a0626524a69e9f0f4dd2ec174b4e9567f4d8b6c11d38b5c289be36ee9", size = 1019162, upload-time = "2025-01-25T09:16:59.573Z" },
-]
-
-[[package]]
-name = "soxr"
-version = "1.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/42/7e/f4b461944662ad75036df65277d6130f9411002bfb79e9df7dff40a31db9/soxr-1.0.0.tar.gz", hash = "sha256:e07ee6c1d659bc6957034f4800c60cb8b98de798823e34d2a2bba1caa85a4509", size = 171415, upload-time = "2025-09-07T13:22:21.317Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/1e/a7/11c36d71595b52fe84a220040ace679035953acf06b83bf2c7117c565d2c/soxr-1.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:b876a3156f67c76aef0cff1084eaf4088d9ca584bb569cb993f89a52ec5f399f", size = 206459, upload-time = "2025-09-07T13:21:46.904Z" },
- { url = "https://files.pythonhosted.org/packages/43/5e/8962f2aeea7777d2a6e65a24a2b83c6aea1a28badeda027fd328f7f03bb7/soxr-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4d3b957a7b0cc19ae6aa45d40b2181474e53a8dd00efd7bce6bcf4e60e020892", size = 164808, upload-time = "2025-09-07T13:21:48.83Z" },
- { url = "https://files.pythonhosted.org/packages/fc/91/00384166f110a3888ea8efd44523ba7168dd2dc39e3e43c931cc2d069fa9/soxr-1.0.0-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89685faedebc45af71f08f9957b61cc6143bc94ba43fe38e97067f81e272969", size = 208586, upload-time = "2025-09-07T13:21:50.341Z" },
- { url = "https://files.pythonhosted.org/packages/75/34/e18f1003e242aabed44ed8902534814d3e64209e4d1d874f5b9b67d73cde/soxr-1.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d255741b2f0084fd02d4a2ddd77cd495be9e7e7b6f9dba1c9494f86afefac65b", size = 242310, upload-time = "2025-09-07T13:21:51.56Z" },
- { url = "https://files.pythonhosted.org/packages/61/9c/a1c5ed106b40cc1e2e12cd58831b7f1b61c5fbdb8eceeca4b3a0b0dbef6c/soxr-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:158a4a9055958c4b95ef91dbbe280cabb00946b5423b25a9b0ce31bd9e0a271e", size = 173561, upload-time = "2025-09-07T13:21:53.03Z" },
- { url = "https://files.pythonhosted.org/packages/65/ce/a3262bc8733d3a4ce5f660ed88c3d97f4b12658b0909e71334cba1721dcb/soxr-1.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:28e19d74a5ef45c0d7000f3c70ec1719e89077379df2a1215058914d9603d2d8", size = 206739, upload-time = "2025-09-07T13:21:54.572Z" },
- { url = "https://files.pythonhosted.org/packages/64/dc/e8cbd100b652697cc9865dbed08832e7e135ff533f453eb6db9e6168d153/soxr-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8dc69fc18884e53b72f6141fdf9d80997edbb4fec9dc2942edcb63abbe0d023", size = 165233, upload-time = "2025-09-07T13:21:55.887Z" },
- { url = "https://files.pythonhosted.org/packages/75/12/4b49611c9ba5e9fe6f807d0a83352516808e8e573f8b4e712fc0c17f3363/soxr-1.0.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3f15450e6f65f22f02fcd4c5a9219c873b1e583a73e232805ff160c759a6b586", size = 208867, upload-time = "2025-09-07T13:21:57.076Z" },
- { url = "https://files.pythonhosted.org/packages/cc/70/92146ab970a3ef8c43ac160035b1e52fde5417f89adb10572f7e788d9596/soxr-1.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f73f57452f9df37b4de7a4052789fcbd474a5b28f38bba43278ae4b489d4384", size = 242633, upload-time = "2025-09-07T13:21:58.621Z" },
- { url = "https://files.pythonhosted.org/packages/b5/a7/628479336206959463d08260bffed87905e7ba9e3bd83ca6b405a0736e94/soxr-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:9f417c3d69236051cf5a1a7bad7c4bff04eb3d8fcaa24ac1cb06e26c8d48d8dc", size = 173814, upload-time = "2025-09-07T13:21:59.798Z" },
- { url = "https://files.pythonhosted.org/packages/c5/c7/f92b81f1a151c13afb114f57799b86da9330bec844ea5a0d3fe6a8732678/soxr-1.0.0-cp312-abi3-macosx_10_14_x86_64.whl", hash = "sha256:abecf4e39017f3fadb5e051637c272ae5778d838e5c3926a35db36a53e3a607f", size = 205508, upload-time = "2025-09-07T13:22:01.252Z" },
- { url = "https://files.pythonhosted.org/packages/ff/1d/c945fea9d83ea1f2be9d116b3674dbaef26ed090374a77c394b31e3b083b/soxr-1.0.0-cp312-abi3-macosx_11_0_arm64.whl", hash = "sha256:e973d487ee46aa8023ca00a139db6e09af053a37a032fe22f9ff0cc2e19c94b4", size = 163568, upload-time = "2025-09-07T13:22:03.558Z" },
- { url = "https://files.pythonhosted.org/packages/b5/80/10640970998a1d2199bef6c4d92205f36968cddaf3e4d0e9fe35ddd405bd/soxr-1.0.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e8ce273cca101aff3d8c387db5a5a41001ba76ef1837883438d3c652507a9ccc", size = 204707, upload-time = "2025-09-07T13:22:05.125Z" },
- { url = "https://files.pythonhosted.org/packages/b1/87/2726603c13c2126cb8ded9e57381b7377f4f0df6ba4408e1af5ddbfdc3dd/soxr-1.0.0-cp312-abi3-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8f2a69686f2856d37823bbb7b78c3d44904f311fe70ba49b893af11d6b6047b", size = 238032, upload-time = "2025-09-07T13:22:06.428Z" },
- { url = "https://files.pythonhosted.org/packages/ce/04/530252227f4d0721a5524a936336485dfb429bb206a66baf8e470384f4a2/soxr-1.0.0-cp312-abi3-win_amd64.whl", hash = "sha256:2a3b77b115ae7c478eecdbd060ed4f61beda542dfb70639177ac263aceda42a2", size = 172070, upload-time = "2025-09-07T13:22:07.62Z" },
- { url = "https://files.pythonhosted.org/packages/99/77/d3b3c25b4f1b1aa4a73f669355edcaee7a52179d0c50407697200a0e55b9/soxr-1.0.0-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:392a5c70c04eb939c9c176bd6f654dec9a0eaa9ba33d8f1024ed63cf68cdba0a", size = 209509, upload-time = "2025-09-07T13:22:08.773Z" },
- { url = "https://files.pythonhosted.org/packages/8a/ee/3ca73e18781bb2aff92b809f1c17c356dfb9a1870652004bd432e79afbfa/soxr-1.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fdc41a1027ba46777186f26a8fba7893be913383414135577522da2fcc684490", size = 167690, upload-time = "2025-09-07T13:22:10.259Z" },
- { url = "https://files.pythonhosted.org/packages/bd/f0/eea8b5f587a2531657dc5081d2543a5a845f271a3bea1c0fdee5cebde021/soxr-1.0.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:449acd1dfaf10f0ce6dfd75c7e2ef984890df94008765a6742dafb42061c1a24", size = 209541, upload-time = "2025-09-07T13:22:11.739Z" },
- { url = "https://files.pythonhosted.org/packages/64/59/2430a48c705565eb09e78346950b586f253a11bd5313426ced3ecd9b0feb/soxr-1.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:38b35c99e408b8f440c9376a5e1dd48014857cd977c117bdaa4304865ae0edd0", size = 243025, upload-time = "2025-09-07T13:22:12.877Z" },
- { url = "https://files.pythonhosted.org/packages/3c/1b/f84a2570a74094e921bbad5450b2a22a85d58585916e131d9b98029c3e69/soxr-1.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:a39b519acca2364aa726b24a6fd55acf29e4c8909102e0b858c23013c38328e5", size = 184850, upload-time = "2025-09-07T13:22:14.068Z" },
- { url = "https://files.pythonhosted.org/packages/5f/d8/2a979590cc53d9c8fa63010b80018c2aa522fcd4e132067be4b130b9f4ce/soxr-1.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:c120775b7d0ef9e974a5797a4695861e88653f7ecd0a2a532f089bc4452ba130", size = 206652, upload-time = "2025-09-07T13:22:15.229Z" },
- { url = "https://files.pythonhosted.org/packages/d6/65/cc7d4c172afae668a9861d0c0e221830ccb3a88640148b37d5d5343e2d35/soxr-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e59e5f648bd6144e79a6e0596aa486218876293f5ddce3ca84b9d8f8aa34d6d", size = 165050, upload-time = "2025-09-07T13:22:16.303Z" },
- { url = "https://files.pythonhosted.org/packages/64/e4/449c59781d9d942bfa88f691099935b41b207f39704515cc7d368c017f9f/soxr-1.0.0-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bb86c342862697dbd4a44043f275e5196f2d2c49dca374c78f19b7893988675d", size = 208806, upload-time = "2025-09-07T13:22:17.487Z" },
- { url = "https://files.pythonhosted.org/packages/e8/89/9af231937ba3bc0c6d42955431290eeab0efcf37987bf0d69f5734bda78a/soxr-1.0.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3d2a4fadd88207c2991fb08c29fc189e7b2e298b598a94ea1747e42c8acb7a01", size = 242642, upload-time = "2025-09-07T13:22:18.727Z" },
- { url = "https://files.pythonhosted.org/packages/d9/34/b707da1fce2ccd60477bc47f0149e94e49fb98f20f8670a1dbf67b93a4a1/soxr-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c7f5ace8f04f924b21caedeeb69f2a7b3d83d2d436639498c08b2cebe181af14", size = 174063, upload-time = "2025-09-07T13:22:20.173Z" },
-]
-
-[[package]]
-name = "standard-aifc"
-version = "3.13.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "audioop-lts", marker = "python_full_version >= '3.13'" },
- { name = "standard-chunk", marker = "python_full_version >= '3.13'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/c4/53/6050dc3dde1671eb3db592c13b55a8005e5040131f7509cef0215212cb84/standard_aifc-3.13.0.tar.gz", hash = "sha256:64e249c7cb4b3daf2fdba4e95721f811bde8bdfc43ad9f936589b7bb2fae2e43", size = 15240, upload-time = "2024-10-30T16:01:31.772Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c3/52/5fbb203394cc852334d1575cc020f6bcec768d2265355984dfd361968f36/standard_aifc-3.13.0-py3-none-any.whl", hash = "sha256:f7ae09cc57de1224a0dd8e3eb8f73830be7c3d0bc485de4c1f82b4a7f645ac66", size = 10492, upload-time = "2024-10-30T16:01:07.071Z" },
-]
-
-[[package]]
-name = "standard-chunk"
-version = "3.13.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/43/06/ce1bb165c1f111c7d23a1ad17204d67224baa69725bb6857a264db61beaf/standard_chunk-3.13.0.tar.gz", hash = "sha256:4ac345d37d7e686d2755e01836b8d98eda0d1a3ee90375e597ae43aaf064d654", size = 4672, upload-time = "2024-10-30T16:18:28.326Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7a/90/a5c1084d87767d787a6caba615aa50dc587229646308d9420c960cb5e4c0/standard_chunk-3.13.0-py3-none-any.whl", hash = "sha256:17880a26c285189c644bd5bd8f8ed2bdb795d216e3293e6dbe55bbd848e2982c", size = 4944, upload-time = "2024-10-30T16:18:26.694Z" },
-]
-
-[[package]]
-name = "standard-sunau"
-version = "3.13.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "audioop-lts", marker = "python_full_version >= '3.13'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/66/e3/ce8d38cb2d70e05ffeddc28bb09bad77cfef979eb0a299c9117f7ed4e6a9/standard_sunau-3.13.0.tar.gz", hash = "sha256:b319a1ac95a09a2378a8442f403c66f4fd4b36616d6df6ae82b8e536ee790908", size = 9368, upload-time = "2024-10-30T16:01:41.626Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/34/ae/e3707f6c1bc6f7aa0df600ba8075bfb8a19252140cd595335be60e25f9ee/standard_sunau-3.13.0-py3-none-any.whl", hash = "sha256:53af624a9529c41062f4c2fd33837f297f3baa196b0cfceffea6555654602622", size = 7364, upload-time = "2024-10-30T16:01:28.003Z" },
-]
-
-[[package]]
-name = "sympy"
-version = "1.14.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "mpmath" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" },
-]
-
-[[package]]
-name = "threadpoolctl"
-version = "3.6.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b7/4d/08c89e34946fce2aec4fbb45c9016efd5f4d7f24af8e5d93296e935631d8/threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e", size = 21274, upload-time = "2025-03-13T13:49:23.031Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" },
-]
-
-[[package]]
-name = "tomli"
-version = "2.4.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" },
- { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" },
- { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" },
- { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" },
- { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" },
- { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" },
- { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" },
- { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" },
- { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" },
- { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" },
- { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" },
- { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" },
- { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" },
- { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" },
- { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" },
- { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" },
- { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" },
- { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" },
- { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" },
- { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" },
- { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" },
- { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" },
- { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" },
- { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" },
- { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" },
- { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" },
- { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" },
- { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" },
- { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" },
- { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" },
- { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" },
- { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" },
- { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" },
- { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" },
- { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" },
- { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" },
- { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" },
- { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" },
- { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" },
- { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" },
- { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" },
- { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" },
- { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" },
- { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" },
- { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" },
- { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" },
-]
-
-[[package]]
-name = "torch"
-version = "2.7.1+cu118"
-source = { registry = "https://download.pytorch.org/whl/cu118" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
-]
-dependencies = [
- { name = "filelock", version = "3.19.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "fsspec", version = "2025.10.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "jinja2", marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "networkx", version = "3.2.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "nvidia-cublas-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-cuda-cupti-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-cuda-nvrtc-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-cuda-runtime-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-cudnn-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-cufft-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-curand-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-cusolver-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-cusparse-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-nccl-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "nvidia-nvtx-cu11", marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "sympy", marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "triton", version = "3.3.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
- { name = "typing-extensions", marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:89433c62b02ec802d4c0887c867d935887ae8f00d7cc549ecf1c2640d096bd4c" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp310-cp310-win_amd64.whl", hash = "sha256:af4833e36a8e964681a4dad7775f559cf043bd42c9d0c0b5e0619f9d0e44cb56" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a596d91c747d1fa601724e85b9c8797c8d7c62140aa1acf245773e911254bc45" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp311-cp311-win_amd64.whl", hash = "sha256:584e5ee99d29286b93be2fba3b3f1f5b9d7a4b9055a288eb31b33100a1f09ed9" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:91454dcfdb81f181fdf216d6d6d9912fbd8795578b90384b3b8b8132737072bb" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp312-cp312-win_amd64.whl", hash = "sha256:80855ec840b7b06372ff43535d01393a8ec101842618d1f9ed629572b52aed71" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:a3f02b2795165eaf6dfe18c963519049a45a9c588488795cebc5015dac77ab46" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp313-cp313-win_amd64.whl", hash = "sha256:3122e59a5fe4e9ee991e7ad4e7002afa549b2873e421759df6454f20f53a6c74" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:627b7248b429d97b3955f1d0375aad1192b8f20f37556384848b6c622e491eb5" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp313-cp313t-win_amd64.whl", hash = "sha256:e06a205f15b3a045924d72f788af0664ca5f20e610eaac7162189721cf31a771" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:27fa1fc8d1bd14d55abece300fea978bd02ec6af933779627e22d6336133e29f" },
- { url = "https://download.pytorch.org/whl/cu118/torch-2.7.1%2Bcu118-cp39-cp39-win_amd64.whl", hash = "sha256:6cdd52fe299bf7a0557fa52d63c7657a59178aaed6fe729864003fd974870ae7" },
-]
-
-[[package]]
-name = "torch"
-version = "2.8.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "filelock", version = "3.19.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "fsspec", version = "2025.10.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "jinja2", marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "networkx", version = "3.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "sympy", marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "typing-extensions", marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ef/d6/e6d4c57e61c2b2175d3aafbfb779926a2cfd7c32eeda7c543925dceec923/torch-2.8.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:a3f16a58a9a800f589b26d47ee15aca3acf065546137fc2af039876135f4c760", size = 73611154, upload-time = "2025-08-06T14:53:10.919Z" },
- { url = "https://files.pythonhosted.org/packages/a4/5e/05a5c46085d9b97e928f3f037081d3d2b87fb4b4195030fc099aaec5effc/torch-2.8.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:5ae0524688fb6707c57a530c2325e13bb0090b745ba7b4a2cd6a3ce262572916", size = 73621174, upload-time = "2025-08-06T14:53:25.44Z" },
- { url = "https://files.pythonhosted.org/packages/be/66/5c9a321b325aaecb92d4d1855421e3a055abd77903b7dab6575ca07796db/torch-2.8.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:619c2869db3ada2c0105487ba21b5008defcc472d23f8b80ed91ac4a380283b0", size = 73630478, upload-time = "2025-08-06T14:53:57.144Z" },
- { url = "https://files.pythonhosted.org/packages/de/69/8b7b13bba430f5e21d77708b616f767683629fc4f8037564a177d20f90ed/torch-2.8.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:1a62a1ec4b0498930e2543535cf70b1bef8c777713de7ceb84cd79115f553767", size = 73915128, upload-time = "2025-08-06T14:54:34.769Z" },
- { url = "https://files.pythonhosted.org/packages/04/6e/650bb7f28f771af0cb791b02348db8b7f5f64f40f6829ee82aa6ce99aabe/torch-2.8.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:7b677e17f5a3e69fdef7eb3b9da72622f8d322692930297e4ccb52fefc6c8211", size = 73632395, upload-time = "2025-08-06T14:55:28.645Z" },
- { url = "https://files.pythonhosted.org/packages/ae/95/ae26263aceb3d57b821179f827d0e321373ed49423e603dd5906ab14a730/torch-2.8.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:e9f071f5b52a9f6970dc8a919694b27a91ae9dc08898b2b988abbef5eddfd1ae", size = 73610795, upload-time = "2025-08-06T14:57:11.513Z" },
-]
-
-[[package]]
-name = "torch"
-version = "2.10.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "filelock", version = "3.25.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "fsspec", version = "2026.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "jinja2", marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "networkx", version = "3.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "networkx", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "setuptools", marker = "python_full_version >= '3.12' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "sympy", marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "typing-extensions", marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/5b/30/bfebdd8ec77db9a79775121789992d6b3b75ee5494971294d7b4b7c999bc/torch-2.10.0-2-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:2b980edd8d7c0a68c4e951ee1856334a43193f98730d97408fbd148c1a933313", size = 79411457, upload-time = "2026-02-10T21:44:59.189Z" },
- { url = "https://files.pythonhosted.org/packages/0f/8b/4b61d6e13f7108f36910df9ab4b58fd389cc2520d54d81b88660804aad99/torch-2.10.0-2-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:418997cb02d0a0f1497cf6a09f63166f9f5df9f3e16c8a716ab76a72127c714f", size = 79423467, upload-time = "2026-02-10T21:44:48.711Z" },
- { url = "https://files.pythonhosted.org/packages/d3/54/a2ba279afcca44bbd320d4e73675b282fcee3d81400ea1b53934efca6462/torch-2.10.0-2-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:13ec4add8c3faaed8d13e0574f5cd4a323c11655546f91fbe6afa77b57423574", size = 79498202, upload-time = "2026-02-10T21:44:52.603Z" },
- { url = "https://files.pythonhosted.org/packages/ec/23/2c9fe0c9c27f7f6cb865abcea8a4568f29f00acaeadfc6a37f6801f84cb4/torch-2.10.0-2-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:e521c9f030a3774ed770a9c011751fb47c4d12029a3d6522116e48431f2ff89e", size = 79498254, upload-time = "2026-02-10T21:44:44.095Z" },
- { url = "https://files.pythonhosted.org/packages/76/bb/d820f90e69cda6c8169b32a0c6a3ab7b17bf7990b8f2c680077c24a3c14c/torch-2.10.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:35e407430795c8d3edb07a1d711c41cc1f9eaddc8b2f1cc0a165a6767a8fb73d", size = 79411450, upload-time = "2026-01-21T16:25:30.692Z" },
- { url = "https://files.pythonhosted.org/packages/61/d8/15b9d9d3a6b0c01b883787bd056acbe5cc321090d4b216d3ea89a8fcfdf3/torch-2.10.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:b7bd80f3477b830dd166c707c5b0b82a898e7b16f59a7d9d42778dd058272e8b", size = 79423461, upload-time = "2026-01-21T16:24:50.266Z" },
- { url = "https://files.pythonhosted.org/packages/c9/5c/dee910b87c4d5c0fcb41b50839ae04df87c1cfc663cf1b5fca7ea565eeaa/torch-2.10.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:6d3707a61863d1c4d6ebba7be4ca320f42b869ee657e9b2c21c736bf17000294", size = 79498198, upload-time = "2026-01-21T16:24:34.704Z" },
- { url = "https://files.pythonhosted.org/packages/1a/0b/39929b148f4824bc3ad6f9f72a29d4ad865bcf7ebfc2fa67584773e083d2/torch-2.10.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:3202429f58309b9fa96a614885eace4b7995729f44beb54d3e4a47773649d382", size = 79851305, upload-time = "2026-01-21T16:24:09.209Z" },
- { url = "https://files.pythonhosted.org/packages/0e/13/e76b4d9c160e89fff48bf16b449ea324bda84745d2ab30294c37c2434c0d/torch-2.10.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:cdf2a523d699b70d613243211ecaac14fe9c5df8a0b0a9c02add60fb2a413e0f", size = 79498248, upload-time = "2026-01-21T16:23:09.315Z" },
- { url = "https://files.pythonhosted.org/packages/4f/93/716b5ac0155f1be70ed81bacc21269c3ece8dba0c249b9994094110bfc51/torch-2.10.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:bf0d9ff448b0218e0433aeb198805192346c4fd659c852370d5cc245f602a06a", size = 79464992, upload-time = "2026-01-21T16:23:05.162Z" },
- { url = "https://files.pythonhosted.org/packages/d8/94/71994e7d0d5238393df9732fdab607e37e2b56d26a746cb59fdb415f8966/torch-2.10.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:f5ab4ba32383061be0fb74bda772d470140a12c1c3b58a0cfbf3dae94d164c28", size = 79850324, upload-time = "2026-01-21T16:22:09.494Z" },
-]
-
-[[package]]
-name = "torch"
-version = "2.10.0+cu126"
-source = { registry = "https://download.pytorch.org/whl/cu126" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
-]
-dependencies = [
- { name = "cuda-bindings", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "filelock", version = "3.25.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "fsspec", version = "2026.2.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "jinja2", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "networkx", version = "3.2.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')" },
- { name = "networkx", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
- { name = "nvidia-cublas-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cuda-cupti-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cuda-nvrtc-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cuda-runtime-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cudnn-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cufft-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cufile-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-curand-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cusolver-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cusparse-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-cusparselt-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-nccl-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-nvjitlink-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-nvshmem-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "nvidia-nvtx-cu12", marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "setuptools", marker = "(python_full_version >= '3.12' and sys_platform == 'linux') or (python_full_version >= '3.12' and sys_platform == 'win32')" },
- { name = "sympy", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "triton", version = "3.6.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and sys_platform == 'linux'" },
- { name = "typing-extensions", marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:dae63a4756c9c455f299309b7b093f1b7c3460e63b53769cab10543b51a1d827" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a256b51e8ca00770a47fe7ab865e3211d2a080d4f1cdc814cdcfb073b36cf1a1" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp310-cp310-win_amd64.whl", hash = "sha256:b91012be20b6c0370800ed7c153fd5b51582495f00f7341c38fa0cb6b9c9a968" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:3a5fb967ffb53ffa0d2579c9819491cfc36c557040de6fdeabcfcfb45df019bc" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a9a9ba3b2baf23c044499ffbcbed88e04b6e38b94189c7dc42dd2cfcdd8c55c0" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp311-cp311-win_amd64.whl", hash = "sha256:4749cd32e32ed55179ff2ff0407e0ae5077fe4d332bfa49258f4578d09eccb40" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:81264238b3d8840276dd30c31f393e325b8f5da6390d18ac2a80dacecfd693ea" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2a7a569206f07965eff69b28e147676540bb0ba6e1a39410802b6e4708cb8356" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp312-cp312-win_amd64.whl", hash = "sha256:95d8409b8a15191de4c2958e86ca47f3ea8f9739b994ee4ca0e7586f37336413" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9ffbf240bc193841ba0a79976510aa9ec14c95a57699257b581bc782316b592f" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:8568052253534abe27b3ac56d301f69d35ef5ce16479e6a3d7808fb052310919" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp313-cp313-win_amd64.whl", hash = "sha256:91e21e7ad572bf0136e5b7f192714f120c8abde8e128f1a0759f158951643822" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:c3480edd0ecc95df5f3418687f584037c072392646f94f5181d32bba5446724f" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:270918b7a7ae46951fae6150bee9fcbd6a908242a1acc8d7e73de1194a041902" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp313-cp313t-win_amd64.whl", hash = "sha256:06335b76cbaae9ee94071e69dd79ecfadab76a48edd4ef79a95de0fbf1bc04b4" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:6935902d55007b3031a1e1ce74f9d0e1a6780cb02990818133a868560197dfa6" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:4cf597403f339a5068ad5a96fac562a2664a7cc584f24689d3136bf3deb0d07e" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp314-cp314-win_amd64.whl", hash = "sha256:ef8d62917bf7886929f6b3d8fbab372f8ac660b61cca47c19e0354c23fb860cf" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:55f4639ea3d0f232281bbe8acce7e04f53e6789594ff354aff7560b22e2d8241" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1aba08d6a66cb7577afc13fcc2bec8b15133438098a5acd512cee920c40c16a8" },
- { url = "https://download.pytorch.org/whl/cu126/torch-2.10.0%2Bcu126-cp314-cp314t-win_amd64.whl", hash = "sha256:78bc0feb3357037b902562a8c0b72ca78cef65e2d2b782c214c7892df87b96a3" },
-]
-
-[[package]]
-name = "torchaudio"
-version = "2.7.1+cu118"
-source = { registry = "https://download.pytorch.org/whl/cu118" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
-]
-dependencies = [
- { name = "torch", version = "2.7.1+cu118", source = { registry = "https://download.pytorch.org/whl/cu118" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:effef2c79a95830d11b481f6fbe461312988b49b4c0b3dbe893d0b56d5ce032f" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp310-cp310-win_amd64.whl", hash = "sha256:da55035d325d4523981c48413da8e34321ef6d9306fa71b04b96dc04b3ed8318" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76629345b5e095b5b9dd6abbeb1919e50dd8f3c2082d89b54ad5cd7d25158de9" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp311-cp311-win_amd64.whl", hash = "sha256:c33d68f7a0c35810dbd74150f8994ef850c54813d0f8c5b7b1e5d71f2b6c7159" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6b64a313866b2a5bb43d4c6906e8820d3dc844f89a97c00dc5c8b92e6b70fe17" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp312-cp312-win_amd64.whl", hash = "sha256:e214b04cd40be5e733f5915879f8cbe2de889661ca179fd86998bbadf3740f44" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:550f0c97348d02e5e18d708a4b29f54b63920d5263b56467f0592ab7544b1772" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp313-cp313-win_amd64.whl", hash = "sha256:15a3bc02bd0ecf6a3a90c7aa06b54f2b89862deac220eac2421794347eaf9737" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:29da4e463bc2f94dcf2b46c0ba05dee3751efa7f880278cbfdf38b147e359a3d" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp313-cp313t-win_amd64.whl", hash = "sha256:28b31c05d75b0b35e34e8627409f30def4bc8f2ebebd2dc75956999e690ed007" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7571361f1e431610e1e8317fabf9bcf121de635a173162b21f46afbf6fe88aca" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchaudio-2.7.1%2Bcu118-cp39-cp39-win_amd64.whl", hash = "sha256:697082f870d81222850f627e56fc4a12ed1aa49d85b4a6f89cbf098e158a0268" },
-]
-
-[[package]]
-name = "torchaudio"
-version = "2.11.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8c/d9/357eb5fe4e19a861e6fa1af4d9f535e8fa8692336e6cf436e8a21262e054/torchaudio-2.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ebb59c694909eccb5d61b7cc199d297692012c43286e36d92983aa7bad7586d", size = 684145, upload-time = "2026-03-23T18:13:46.671Z" },
- { url = "https://files.pythonhosted.org/packages/94/77/0eec7f175d88f312296bd5b11c23bd58da37c1021f53da3db4df449ce3ee/torchaudio-2.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:492dd64645e9d0bb843e94f1d9a4d1e31426262ffc594fafecc1697df9df5eb9", size = 684142, upload-time = "2026-03-23T18:13:36.805Z" },
- { url = "https://files.pythonhosted.org/packages/f1/b1/77658817acacd01a72b714440c62f419efc4d90170e704e8e7a2c0918988/torchaudio-2.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a1cf1acc883bee9cb906a933572fed6a8a933f86ef34e9ea7d803f72317e8c1b", size = 684226, upload-time = "2026-03-23T18:13:40.023Z" },
- { url = "https://files.pythonhosted.org/packages/fb/9e/f76fcd9877c8c78f258ee34e0fb8291fdb91e6218d582d9ca66b1e4bd4ae/torchaudio-2.11.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:e3f9696a9ef1d49acc452159b052370c636406d072e9d8f10895fda87b591ea9", size = 679904, upload-time = "2026-03-23T18:13:28.329Z" },
- { url = "https://files.pythonhosted.org/packages/fe/ce/52c652d30af7d6e96c8f1735d26131e94708e3f38d852b8fa97958804dd8/torchaudio-2.11.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:bda09ea630ae7207384fb0f28c35e4f8c0d82dd6eba020b6b335ad0caa9fed49", size = 680814, upload-time = "2026-03-23T18:13:17.08Z" },
- { url = "https://files.pythonhosted.org/packages/39/fe/ffa618b4f0d9732d7df7a2fa2bd48657d896599bc224e5af3c70d46c546b/torchaudio-2.11.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:cc09cd1f6015b8549e7fe255fb1be5346b57e7fee06541d3f3dbb012d8c4715f", size = 679901, upload-time = "2026-03-23T18:13:25.472Z" },
- { url = "https://files.pythonhosted.org/packages/60/84/1c792b0b700eac9a96772cfd9f96c097b17bca3234a2fde3c64b8063660d/torchaudio-2.11.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:da2725e250866da42a12934c9a6552f65a18b7187fd7a6221387f0e605fb3b96", size = 679926, upload-time = "2026-03-23T18:13:24.452Z" },
-]
-
-[[package]]
-name = "torchaudio"
-version = "2.11.0+cu126"
-source = { registry = "https://download.pytorch.org/whl/cu126" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
-]
-wheels = [
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:60898c30cf8a7c4837aecae9522e5e4a0e942fd9a37a4ea7c40726b78cc8ddec" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:8352b8d541ebb4abe2bbb9b4523c6ba10c9736a702d265dff54a3bfe2f29265d" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp310-cp310-win_amd64.whl", hash = "sha256:a3a4d763f582bccda1e1b44ccc2cf059040f5dcbdd897ef673644e812668594b" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:8a35d88d4a96edc2d7e5038b359a637026ba7441fa6011fd217453f0f8698149" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:233dca0d0048bc86ca0505c57fee08ff7c019d11dd14db397050d15d8fa94520" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp311-cp311-win_amd64.whl", hash = "sha256:bb5f58154b78c618006716d1eebbe38a161149e6b46f88a0b1b12ce6ede0acd3" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2ccbbe4555d17c6a7d9bddf76a9c608d6e4ed9b9ecb63cc02f4c4bcf4ae8f05b" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:fe8ba48c690cacc58087603c4d853580006e3d1c0ebe22c0d75f99a004bc981a" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp312-cp312-win_amd64.whl", hash = "sha256:ca5b7815c6952c79c65dce9a78eb96be8b73a8b291f82ca473812a910cdc9fbc" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:6899f8f5f2e4cb046417da52ef0fa10f19551327d8ba4b5f874ad64883860be5" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:641f84f2dd1aebf7f1cc6043a70ebae51182b216b5a7a251c6d51949cee762b5" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp313-cp313-win_amd64.whl", hash = "sha256:bb5d55842b8b52ab7a67a6bd0ec23b56dfca4f163e8d2e053d6a24a8192f46c0" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:aa117dcaab59e3dd295a54b6423430d0bfba4011b80e4cff65fb03086abc0a15" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:67e8f380d0d2c5861e5fc135780c2939c9f40b02431ebcb61d996562d8f57682" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp313-cp313t-win_amd64.whl", hash = "sha256:7ab11a044dd5cdf72e9ed90be1874fa31460dfd120cfb475addc8a92abc50c4b" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:218948655e79cd5cbdba4d6763d29465405401b95e52aee2edb56b05b0428c93" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:08800408b1ab0a1992aea704b788103285703d009e232228886f9efa3cbc6a1b" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp314-cp314-win_amd64.whl", hash = "sha256:6c13443d27de1297ebbf37b84c2b6a1f0cfe36459fc7740fe28cced68264b1ee" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:8aa73ae00e0e0bbf901015089141c25e4080216f56bb9b33367ac209890af7d3" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7fb725b644927fff30b0b23cbd2de155016b759382efd7b02a33ec77372072f5" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchaudio-2.11.0%2Bcu126-cp314-cp314t-win_amd64.whl", hash = "sha256:589a66aacbb183db71b64ed05c46f08b3ce3b15ceefd51e3935ffab16905ef11" },
-]
-
-[[package]]
-name = "torchvision"
-version = "0.22.1+cu118"
-source = { registry = "https://download.pytorch.org/whl/cu118" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
-]
-dependencies = [
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "pillow", version = "11.3.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
- { name = "torch", version = "2.7.1+cu118", source = { registry = "https://download.pytorch.org/whl/cu118" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:64954400493caf614b5aebded9b5a63eda8753cea9f8a1e32752eb2adc6ed383" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp310-cp310-win_amd64.whl", hash = "sha256:608ea6cb93eb66cdf273cab41122f83191c198c3c4ff583609f0e6e184f52865" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:6dd3d825fb4a75eae887665d1da812a360d69273118bfa17616c836bfb466627" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp311-cp311-win_amd64.whl", hash = "sha256:98a626661670cd96faeef05b33a140a09f9f134b0ab38d127b6ebbcf73d42bf5" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e4a8696261b8c64297a2683656f4ee64418d7ee391627702fe5c06b72ce81123" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp312-cp312-win_amd64.whl", hash = "sha256:3e927a3b0b08c7582cfa09e5f16b35435de390a612cfe76eed1418ab7b68d6b6" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:9e0306a2f0452ea454a5e8b9e89b331c2d7fed6d4c4dcc1358b4f8cc21fc130e" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp313-cp313-win_amd64.whl", hash = "sha256:2dd71de1013c1929ea404832b863f769f3fa8b466cc7653b567becf16eedc91b" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:c8a1e3862f5f7a0c0446211db467be247efe03c46889e1eecbae760f96d2b98e" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp313-cp313t-win_amd64.whl", hash = "sha256:ab848efeb898a7844d7a245e62d2a7982b3d56daf0619283103d8fff6b694d3c" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:11d1b97a236bd58bafcb8ce067b676de5f7793402e3869c832eeb8d30d0542fc" },
- { url = "https://download-r2.pytorch.org/whl/cu118/torchvision-0.22.1%2Bcu118-cp39-cp39-win_amd64.whl", hash = "sha256:6f62a2d9f4d2ac69e83108f9375e03e31350c57db82d017db4a57c9fbb41fd61" },
-]
-
-[[package]]
-name = "torchvision"
-version = "0.23.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "pillow", version = "11.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4d/49/5ad5c3ff4920be0adee9eb4339b4fb3b023a0fc55b9ed8dbc73df92946b8/torchvision-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7266871daca00ad46d1c073e55d972179d12a58fa5c9adec9a3db9bbed71284a", size = 1856885, upload-time = "2025-08-06T14:57:55.024Z" },
- { url = "https://files.pythonhosted.org/packages/f0/d7/15d3d7bd8d0239211b21673d1bac7bc345a4ad904a8e25bb3fd8a9cf1fbc/torchvision-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:49aa20e21f0c2bd458c71d7b449776cbd5f16693dd5807195a820612b8a229b7", size = 1856884, upload-time = "2025-08-06T14:58:00.237Z" },
- { url = "https://files.pythonhosted.org/packages/df/1d/0ea0b34bde92a86d42620f29baa6dcbb5c2fc85990316df5cb8f7abb8ea2/torchvision-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e0e2c04a91403e8dd3af9756c6a024a1d9c0ed9c0d592a8314ded8f4fe30d440", size = 1856885, upload-time = "2025-08-06T14:58:06.503Z" },
- { url = "https://files.pythonhosted.org/packages/91/37/45a5b9407a7900f71d61b2b2f62db4b7c632debca397f205fdcacb502780/torchvision-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1c37e325e09a184b730c3ef51424f383ec5745378dc0eca244520aca29722600", size = 1856886, upload-time = "2025-08-06T14:58:05.491Z" },
- { url = "https://files.pythonhosted.org/packages/05/35/72f91ad9ac7c19a849dedf083d347dc1123f0adeb401f53974f84f1d04c8/torchvision-0.23.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:2df618e1143805a7673aaf82cb5720dd9112d4e771983156aaf2ffff692eebf9", size = 2047192, upload-time = "2025-08-06T14:58:11.813Z" },
- { url = "https://files.pythonhosted.org/packages/d5/3e/f1f3bb3dd452b98ec2eba4820d777440abceb3d3a428a6c8243006fe47e5/torchvision-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b190db205f90206c230fc2f91cbdfd5733334babc0e0d19bddb90a40b8cf26c2", size = 1856927, upload-time = "2025-08-06T14:58:18.919Z" },
-]
-
-[[package]]
-name = "torchvision"
-version = "0.25.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "python_full_version >= '3.14' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.13.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.12.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.11.*' and sys_platform != 'linux' and sys_platform != 'win32'",
- "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'",
-]
-dependencies = [
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "pillow", version = "12.1.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
- { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and sys_platform != 'linux' and sys_platform != 'win32'" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/50/ae/cbf727421eb73f1cf907fbe5788326a08f111b3f6b6ddca15426b53fec9a/torchvision-0.25.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a95c47abb817d4e90ea1a8e57bd0d728e3e6b533b3495ae77d84d883c4d11f56", size = 1874919, upload-time = "2026-01-21T16:27:47.617Z" },
- { url = "https://files.pythonhosted.org/packages/3e/be/c704bceaf11c4f6b19d64337a34a877fcdfe3bd68160a8c9ae9bea4a35a3/torchvision-0.25.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:db74a551946b75d19f9996c419a799ffdf6a223ecf17c656f90da011f1d75b20", size = 1874923, upload-time = "2026-01-21T16:27:46.574Z" },
- { url = "https://files.pythonhosted.org/packages/56/3a/6ea0d73f49a9bef38a1b3a92e8dd455cea58470985d25635beab93841748/torchvision-0.25.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c2abe430c90b1d5e552680037d68da4eb80a5852ebb1c811b2b89d299b10573b", size = 1874920, upload-time = "2026-01-21T16:27:45.348Z" },
- { url = "https://files.pythonhosted.org/packages/f5/5b/1562a04a6a5a4cf8cf40016a0cdeda91ede75d6962cff7f809a85ae966a5/torchvision-0.25.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:24e11199e4d84ba9c5ee7825ebdf1cd37ce8deec225117f10243cae984ced3ec", size = 1874918, upload-time = "2026-01-21T16:27:39.02Z" },
- { url = "https://files.pythonhosted.org/packages/52/99/dca81ed21ebaeff2b67cc9f815a20fdaa418b69f5f9ea4c6ed71721470db/torchvision-0.25.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a8f8061284395ce31bcd460f2169013382ccf411148ceb2ee38e718e9860f5a7", size = 1896209, upload-time = "2026-01-21T16:27:32.159Z" },
- { url = "https://files.pythonhosted.org/packages/9e/1f/fa839532660e2602b7e704d65010787c5bb296258b44fa8b9c1cd6175e7d/torchvision-0.25.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:620a236288d594dcec7634c754484542dc0a5c1b0e0b83a34bda5e91e9b7c3a1", size = 1896193, upload-time = "2026-01-21T16:27:24.785Z" },
- { url = "https://files.pythonhosted.org/packages/97/36/96374a4c7ab50dea9787ce987815614ccfe988a42e10ac1a2e3e5b60319a/torchvision-0.25.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ad9a8a5877782944d99186e4502a614770fe906626d76e9cd32446a0ac3075f2", size = 1896207, upload-time = "2026-01-21T16:27:23.383Z" },
-]
-
-[[package]]
-name = "torchvision"
-version = "0.25.0+cu126"
-source = { registry = "https://download.pytorch.org/whl/cu126" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
-]
-dependencies = [
- { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')" },
- { name = "numpy", version = "2.4.3", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" },
- { name = "pillow", version = "12.1.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
- { name = "torch", version = "2.10.0+cu126", source = { registry = "https://download.pytorch.org/whl/cu126" }, marker = "(python_full_version >= '3.10' and sys_platform == 'linux') or (python_full_version >= '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:74cb9b5cf8ea52d5d6837356ad4efd87f7a208f45b6c50bc1b75cab00c071ab3" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:02525b6a66df95c2b2a8ef9fdfc006e979ae4673e02fb8ecb492d4bdb40ce5c9" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp310-cp310-win_amd64.whl", hash = "sha256:a2e560743e6c8f0c2627accc47f33f0713734ca34de5866db5ac68da0826b0b8" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:1bacf3a2c4c5d77615be7fa77b39976aace0dfeeb580a549776cae192790401d" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:09a1c80106b4cbf750af8af4ef0cde98a03ddd963dcf0f843b89e03a061959ae" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp311-cp311-win_amd64.whl", hash = "sha256:3476ee36355960b9559beee84491b8bd3d062e63ef612dd84a54b3c127eaa5d8" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:968f7dea6a16c8127d23416a55845dffcfe1b08dee7f50cb8cdeb950683a6752" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:5831326b6710366b44c0124ce197b416b7b896efa27340c235081cc7f52870e5" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp312-cp312-win_amd64.whl", hash = "sha256:57a8a103814c344d91c32425ba38a53daac4bf4aab074aaaea7b6bab4c22fb7b" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:96e7bf1662ed63fa8c10ba5b3e63bc86e2099464bf2cce958e3f281b69c1165c" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:7440fa4db8971e795e1788f639a581aa4ba3e5bf36b27e62670f5db0f0675283" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp313-cp313-win_amd64.whl", hash = "sha256:6f7994a27b7fe26f6b4828df20421af004986d01091b98ddbc9e763e384ce60a" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:984bbe9df94d6c948d7ed1d571292bad5a4bb09290f1a66df829eb51d7ed7c9f" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:6952a6f8ae8c428f23f779fa811439f6e57e882a71a25b3132f2d8294917a58c" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp313-cp313t-win_amd64.whl", hash = "sha256:76c2ee4de6b20539a13a45da7b6f5c8be6134131d32b84f342e91ae58112e50e" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:f4cdfc5cec5b3171448644e90986080b929d1a85cab3d425183f3df88443d169" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:8c5805d64a2d07af6c3fda4a5e3feefd6e56b5427a7e0889d8f5e3b35e9db64d" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp314-cp314-win_amd64.whl", hash = "sha256:de1ef298132e23f407ced636e0e849c712ece18849b2762eb3dc14815e1a24bf" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:a3668931bed85a8b6a2942499708b6a1aa2b494b6a5a5a192c03eb57beddd804" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:103dd7793228623f530277abf74d37d083ede9b151cd672530c387ad923def2f" },
- { url = "https://download-r2.pytorch.org/whl/cu126/torchvision-0.25.0%2Bcu126-cp314-cp314t-win_amd64.whl", hash = "sha256:534d3876a834a2dea807402a5c4981b31236a8796f2045869963ee8df87e6633" },
-]
-
-[[package]]
-name = "triton"
-version = "3.3.1"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')",
-]
-dependencies = [
- { name = "setuptools", marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'win32')" },
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8d/a9/549e51e9b1b2c9b854fd761a1d23df0ba2fbc60bd0c13b489ffa518cfcb7/triton-3.3.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b74db445b1c562844d3cfad6e9679c72e93fdfb1a90a24052b03bb5c49d1242e", size = 155600257, upload-time = "2025-05-29T23:39:36.085Z" },
- { url = "https://files.pythonhosted.org/packages/21/2f/3e56ea7b58f80ff68899b1dbe810ff257c9d177d288c6b0f55bf2fe4eb50/triton-3.3.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b31e3aa26f8cb3cc5bf4e187bf737cbacf17311e1112b781d4a059353dfd731b", size = 155689937, upload-time = "2025-05-29T23:39:44.182Z" },
- { url = "https://files.pythonhosted.org/packages/24/5f/950fb373bf9c01ad4eb5a8cd5eaf32cdf9e238c02f9293557a2129b9c4ac/triton-3.3.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9999e83aba21e1a78c1f36f21bce621b77bcaa530277a50484a7cb4a822f6e43", size = 155669138, upload-time = "2025-05-29T23:39:51.771Z" },
- { url = "https://files.pythonhosted.org/packages/74/1f/dfb531f90a2d367d914adfee771babbd3f1a5b26c3f5fbc458dee21daa78/triton-3.3.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b89d846b5a4198317fec27a5d3a609ea96b6d557ff44b56c23176546023c4240", size = 155673035, upload-time = "2025-05-29T23:40:02.468Z" },
- { url = "https://files.pythonhosted.org/packages/28/71/bd20ffcb7a64c753dc2463489a61bf69d531f308e390ad06390268c4ea04/triton-3.3.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3198adb9d78b77818a5388bff89fa72ff36f9da0bc689db2f0a651a67ce6a42", size = 155735832, upload-time = "2025-05-29T23:40:10.522Z" },
- { url = "https://files.pythonhosted.org/packages/6d/81/ac4d50af22f594c4cb7c84fd2ad5ba1e0c03e2a83fe3483ddd79edcd7ec7/triton-3.3.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f6139aeb04a146b0b8e0fbbd89ad1e65861c57cfed881f21d62d3cb94a36bab7", size = 155596799, upload-time = "2025-05-29T23:40:18.949Z" },
-]
-
-[[package]]
-name = "triton"
-version = "3.6.0"
-source = { registry = "https://pypi.org/simple" }
-resolution-markers = [
- "(python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')",
- "(python_full_version == '3.13.*' and sys_platform == 'linux') or (python_full_version == '3.13.*' and sys_platform == 'win32')",
- "(python_full_version == '3.12.*' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'win32')",
- "(python_full_version == '3.11.*' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'win32')",
- "(python_full_version == '3.10.*' and sys_platform == 'linux') or (python_full_version == '3.10.*' and sys_platform == 'win32')",
-]
-wheels = [
- { url = "https://files.pythonhosted.org/packages/44/ba/b1b04f4b291a3205d95ebd24465de0e5bf010a2df27a4e58a9b5f039d8f2/triton-3.6.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6c723cfb12f6842a0ae94ac307dba7e7a44741d720a40cf0e270ed4a4e3be781", size = 175972180, upload-time = "2026-01-20T16:15:53.664Z" },
- { url = "https://files.pythonhosted.org/packages/8c/f7/f1c9d3424ab199ac53c2da567b859bcddbb9c9e7154805119f8bd95ec36f/triton-3.6.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6550fae429e0667e397e5de64b332d1e5695b73650ee75a6146e2e902770bea", size = 188105201, upload-time = "2026-01-20T16:00:29.272Z" },
- { url = "https://files.pythonhosted.org/packages/0f/2c/96f92f3c60387e14cc45aed49487f3486f89ea27106c1b1376913c62abe4/triton-3.6.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49df5ef37379c0c2b5c0012286f80174fcf0e073e5ade1ca9a86c36814553651", size = 176081190, upload-time = "2026-01-20T16:16:00.523Z" },
- { url = "https://files.pythonhosted.org/packages/e0/12/b05ba554d2c623bffa59922b94b0775673de251f468a9609bc9e45de95e9/triton-3.6.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8e323d608e3a9bfcc2d9efcc90ceefb764a82b99dea12a86d643c72539ad5d3", size = 188214640, upload-time = "2026-01-20T16:00:35.869Z" },
- { url = "https://files.pythonhosted.org/packages/17/5d/08201db32823bdf77a0e2b9039540080b2e5c23a20706ddba942924ebcd6/triton-3.6.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:374f52c11a711fd062b4bfbb201fd9ac0a5febd28a96fb41b4a0f51dde3157f4", size = 176128243, upload-time = "2026-01-20T16:16:07.857Z" },
- { url = "https://files.pythonhosted.org/packages/ab/a8/cdf8b3e4c98132f965f88c2313a4b493266832ad47fb52f23d14d4f86bb5/triton-3.6.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74caf5e34b66d9f3a429af689c1c7128daba1d8208df60e81106b115c00d6fca", size = 188266850, upload-time = "2026-01-20T16:00:43.041Z" },
- { url = "https://files.pythonhosted.org/packages/3c/12/34d71b350e89a204c2c7777a9bba0dcf2f19a5bfdd70b57c4dbc5ffd7154/triton-3.6.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448e02fe6dc898e9e5aa89cf0ee5c371e99df5aa5e8ad976a80b93334f3494fd", size = 176133521, upload-time = "2026-01-20T16:16:13.321Z" },
- { url = "https://files.pythonhosted.org/packages/f9/0b/37d991d8c130ce81a8728ae3c25b6e60935838e9be1b58791f5997b24a54/triton-3.6.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c7f76c6e72d2ef08df639e3d0d30729112f47a56b0c81672edc05ee5116ac9", size = 188289450, upload-time = "2026-01-20T16:00:49.136Z" },
- { url = "https://files.pythonhosted.org/packages/ce/4e/41b0c8033b503fd3cfcd12392cdd256945026a91ff02452bef40ec34bee7/triton-3.6.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1722e172d34e32abc3eb7711d0025bb69d7959ebea84e3b7f7a341cd7ed694d6", size = 176276087, upload-time = "2026-01-20T16:16:18.989Z" },
- { url = "https://files.pythonhosted.org/packages/35/f8/9c66bfc55361ec6d0e4040a0337fb5924ceb23de4648b8a81ae9d33b2b38/triton-3.6.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d002e07d7180fd65e622134fbd980c9a3d4211fb85224b56a0a0efbd422ab72f", size = 188400296, upload-time = "2026-01-20T16:00:56.042Z" },
- { url = "https://files.pythonhosted.org/packages/49/55/5ecf0dcaa0f2fbbd4420f7ef227ee3cb172e91e5fede9d0ecaddc43363b4/triton-3.6.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5523241e7d1abca00f1d240949eebdd7c673b005edbbce0aca95b8191f1d43", size = 176138577, upload-time = "2026-01-20T16:16:25.426Z" },
- { url = "https://files.pythonhosted.org/packages/df/3d/9e7eee57b37c80cec63322c0231bb6da3cfe535a91d7a4d64896fcb89357/triton-3.6.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a17a5d5985f0ac494ed8a8e54568f092f7057ef60e1b0fa09d3fd1512064e803", size = 188273063, upload-time = "2026-01-20T16:01:07.278Z" },
- { url = "https://files.pythonhosted.org/packages/48/db/56ee649cab5eaff4757541325aca81f52d02d4a7cd3506776cad2451e060/triton-3.6.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b3a97e8ed304dfa9bd23bb41ca04cdf6b2e617d5e782a8653d616037a5d537d", size = 176274804, upload-time = "2026-01-20T16:16:31.528Z" },
- { url = "https://files.pythonhosted.org/packages/f6/56/6113c23ff46c00aae423333eb58b3e60bdfe9179d542781955a5e1514cb3/triton-3.6.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:46bd1c1af4b6704e554cad2eeb3b0a6513a980d470ccfa63189737340c7746a7", size = 188397994, upload-time = "2026-01-20T16:01:14.236Z" },
-]
-
-[[package]]
-name = "typing-extensions"
-version = "4.15.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
-]
-
-[[package]]
-name = "tzdata"
-version = "2025.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/5e/a7/c202b344c5ca7daf398f3b8a477eeb205cf3b6f32e7ec3a6bac0629ca975/tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7", size = 196772, upload-time = "2025-12-13T17:45:35.667Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" },
-]
-
-[[package]]
-name = "urllib3"
-version = "2.6.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
-]
-
-[[package]]
-name = "zipp"
-version = "3.23.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" },
-]