diff --git a/.github/workflows/nextflow-ci.yml b/.github/workflows/nextflow-ci.yml index 239a8a6c..02b5b968 100644 --- a/.github/workflows/nextflow-ci.yml +++ b/.github/workflows/nextflow-ci.yml @@ -5,19 +5,21 @@ on: branches: ["main", "dev"] paths: - "workflows/**" + - ".github/workflows/nextflow-ci.yml" pull_request: branches: ["main"] paths: - "workflows/**" + - ".github/workflows/nextflow-ci.yml" jobs: lint-and-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - uses: actions/setup-java@v4 + - uses: actions/setup-java@v5.2.0 with: distribution: temurin java-version: 17 @@ -30,7 +32,7 @@ jobs: - name: Install nf-test run: | - NF_TEST_VERSION=$(curl -s https://api.github.com/repos/askimed/nf-test/releases/latest | jq -r .tag_name | sed 's/^v//') + NF_TEST_VERSION=0.9.5 wget -qO nf-test.tar.gz "https://github.com/askimed/nf-test/releases/download/v${NF_TEST_VERSION}/nf-test-${NF_TEST_VERSION}.tar.gz" tar -xzf nf-test.tar.gz mkdir -p "$HOME/.nf-test" @@ -39,11 +41,26 @@ jobs: sudo chmod +x /usr/local/bin/nf-test - name: Lint Nextflow files + # `nextflow lint` does not load Groovy classes from `lib/`, so it + # falsely reports `Helpers` as undefined wherever the workflow calls + # `Helpers.X(...)`. We strip ANSI colour codes from the output, drop + # those known false-positives, and re-derive the exit status. run: | + tmp=$(mktemp) nextflow lint \ workflows/preproc/preproc_rawtiles.nf \ workflows/reconst_3d/soct_3d_reconst.nf \ - workflows/reconst_2.5d/soct_2.5d_reconst.nf + workflows/reconst_2.5d/soct_2.5d_reconst.nf \ + > "$tmp" 2>&1 || true + sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' "$tmp" \ + | awk '/`Helpers` is not defined/ { skip = 3; next } skip > 0 { skip--; next } { print }' \ + > "$tmp.filt" + cat "$tmp.filt" + if grep -qE '^Error ' "$tmp.filt"; then + echo "::error::nextflow lint reported errors (excluding Helpers false-positives)" + exit 1 + fi + echo "Lint passed (Helpers false-positives ignored: lib/ classes are loaded at runtime but not by 'nextflow lint')." - name: Validate Nextflow configs run: | diff --git a/.gitignore b/.gitignore index 8a38dc35..11725bae 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,8 @@ eggs/ .eggs/ lib/ lib64/ +!workflows/**/lib/ +!workflows/**/lib/** parts/ sdist/ var/ @@ -184,6 +186,13 @@ workflows/work/ *.nii.gz *.sif +# Test stub-data placeholders. The `*.zarr` rule above ignores zarr directories +# wholesale; for nf-test stub runs we still need the empty directory markers +# (`.gitkeep`) tracked so the workflow's input glob finds at least one +# `mosaic_grid*.ome.zarr` directory. Re-include the directory and its keepers. +!workflows/**/tests/data/*.ome.zarr +!workflows/**/tests/data/*.ome.zarr/.gitkeep + # Config files *.ini @@ -196,4 +205,6 @@ draft.py # Exculde subject data folders (symlinks) sub-* -.python-version \ No newline at end of file +.python-version +# Sphinx auto-generated API docs (autoapi) +docs/api/ diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 575f578e..fe730638 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,23 +1,23 @@ # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details -# Required version: 2 -# Set the OS, Python version, and other tools you might need build: os: ubuntu-24.04 tools: - python: "3.13" + python: "3.12" + jobs: + create_environment: + - asdf plugin add uv + - asdf install uv latest + - asdf global uv latest + - uv venv "${READTHEDOCS_VIRTUALENV_PATH}" + install: + - UV_PROJECT_ENVIRONMENT="${READTHEDOCS_VIRTUALENV_PATH}" uv sync --extra docs --frozen + build: + html: + - UV_PROJECT_ENVIRONMENT="${READTHEDOCS_VIRTUALENV_PATH}" uv run sphinx-build -T --keep-going -b html docs $READTHEDOCS_OUTPUT/html -# Build documentation in the "docs/" directory with Sphinx sphinx: - configuration: docs/conf.py - -# Optionally, but recommended, -# declare the Python requirements required to build your documentation -# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html -# python: -# install: -# - requirements: docs/requirements.txt - + configuration: docs/conf.py diff --git a/docs/AI_DOCUMENTATION_INDEX.md b/docs/AI_DOCUMENTATION_INDEX.md deleted file mode 100644 index c4bb681c..00000000 --- a/docs/AI_DOCUMENTATION_INDEX.md +++ /dev/null @@ -1,123 +0,0 @@ -# Documentation Index - -> **Note**: This documentation was generated with AI assistance (Claude, Anthropic) - ---- - -## Overview - -Complete documentation for the linumpy microscopy processing library. The library provides tools for processing Serial Optical Coherence Tomography (S-OCT) data into reconstructed 3D volumes. - ---- - -## Pipeline Documentation - -### Workflow Guides - -1. **[PIPELINE_OVERVIEW.md](PIPELINE_OVERVIEW.md)** - Complete overview of preprocessing and 3D reconstruction pipelines -2. **[NEXTFLOW_WORKFLOWS.md](NEXTFLOW_WORKFLOWS.md)** - Nextflow workflow configuration and execution guide -3. **[RECONST_2_5D_WORKFLOW.md](RECONST_2_5D_WORKFLOW.md)** - Legacy 2.5D reconstruction workflow (TIFF mosaic grids) - -### Data Formats - -4. **[MOSAIC_GRID_FORMAT.md](MOSAIC_GRID_FORMAT.md)** - OME-Zarr mosaic grid format specification -5. **[SHIFTS_FILE_FORMAT.md](SHIFTS_FILE_FORMAT.md)** - XY shifts CSV file format and usage - ---- - -## Feature Documentation - -6. **[SLICE_CONFIG_FEATURE.md](SLICE_CONFIG_FEATURE.md)** - Slice selection and filtering system -7. **[SLICE_INTERPOLATION_FEATURE.md](SLICE_INTERPOLATION_FEATURE.md)** - Missing slice reconstruction using registration-based morphing -8. **[GPU_ACCELERATION.md](GPU_ACCELERATION.md)** - GPU acceleration using NVIDIA CUDA/CuPy - ---- - -## Reference - -9. **[SCRIPTS_REFERENCE.md](SCRIPTS_REFERENCE.md)** - Command-line scripts reference guide -10. **[LIBRARY_MODULES.md](LIBRARY_MODULES.md)** - Python library module documentation -11. **[RECONSTRUCTION_DIAGNOSTICS.md](RECONSTRUCTION_DIAGNOSTICS.md)** - Diagnostic tools for troubleshooting reconstruction artifacts -12. **[PIPELINE_PERFORMANCE_ANALYSIS.md](PIPELINE_PERFORMANCE_ANALYSIS.md)** - Pipeline performance benchmarks and optimization guide - ---- - -## Contributing - -13. **[CONTRIBUTING.md](CONTRIBUTING.md)** - Contribution guidelines - ---- - -## Source Code Structure - -``` -linumpy/ -├── _thread_config.py # CPU thread management (NumPy/SciPy/Dask/JAX/ITK) -├── io/ # Input/output modules -│ ├── allen.py # Allen Brain Atlas integration -│ ├── data_io.py # Legacy slicer / NIfTI readers -│ ├── npz.py # NPZ file handling -│ ├── slice_config.py # slice_config.csv read/write/stamp helpers -│ ├── test_data.py # Test dataset access -│ ├── thorlabs.py # Thorlabs microscope support -│ └── zarr.py # OME-Zarr I/O -├── gpu/ # GPU acceleration -│ ├── __init__.py # GPU detection & utilities -│ ├── array_ops.py # Per-pixel operations -│ ├── corrections.py # Galvo detection -│ ├── cuda_env.py # CUDA environment utilities -│ ├── fft_ops.py # FFT & phase correlation -│ ├── image_quality.py # GPU image quality assessment -│ ├── interpolation.py # Resampling & transforms -│ ├── morphology.py # Binary operations & filtering -│ └── registration.py # Hybrid GPU/CPU registration -├── microscope/ # Microscope-specific modules -│ └── oct.py # OCT tile reading -├── preproc/ # Preprocessing modules -│ ├── icorr.py # Illumination correction -│ ├── normalization.py # Intensity normalization -│ ├── resampling.py # Mosaic grid resampling utilities -│ └── xyzcorr.py # XYZ correction & galvo shift detection -├── psf/ # Point spread function -│ └── psf_estimator.py # PSF estimation -├── shifts/ # XY shift utilities (cumulative shifts, unit detection) -│ └── utils.py -├── stitching/ # Image stitching -│ ├── FileUtils.py # File handling utilities -│ ├── interpolation.py # Missing-slice interpolation (zmorph) -│ ├── manual_registration.py # GUI-based manual registration -│ ├── mosaic_grid.py # MosaicGrid class + diffusion blending -│ ├── motor.py # Motor-position-based tile placement -│ ├── registration.py # Image registration -│ ├── stacking.py # 3D slice stacking utilities -│ ├── stitch_utils.py # Stitching utilities -│ └── topology.py # Mosaic topology -├── utils/ # Utility modules -│ ├── image_quality.py # Image quality assessment (SSIM, edge, variance) -│ ├── io.py # CLI argument helpers -│ ├── metrics.py # Quality metrics collection -│ ├── orientation.py # Volume orientation codes & RAS transforms -│ └── visualization.py # Orthogonal view screenshots -├── reconstruction.py # Core reconstruction -├── segmentation.py # Segmentation tools -└── utils_images.py # Image utilities -``` - ---- - -## Workflow Files - -``` -workflows/ -├── preproc/ -│ ├── nextflow.config # Preprocessing config -│ └── preproc_rawtiles.nf # Raw tiles → mosaic grids -├── reconst_3d/ -│ ├── diagnostics.nf # Optional diagnostic processes -│ ├── nextflow.config # 3D reconstruction config -│ └── soct_3d_reconst.nf # Mosaic grids → 3D volume -└── reconst_2.5d/ - ├── soct_2.5d_reconst.nf # 2.5D reconstruction workflow - ├── soct_2.5d_reconst_beluga.config # Beluga HPC cluster config - └── soct_2.5d_reconst_docker.config # Docker container config -``` diff --git a/docs/GPU_ACCELERATION.md b/docs/GPU_ACCELERATION.md index 9ad7e082..c41d871f 100644 --- a/docs/GPU_ACCELERATION.md +++ b/docs/GPU_ACCELERATION.md @@ -1,8 +1,6 @@ # GPU Acceleration ---- - ## Overview linumpy supports GPU acceleration for compute-intensive operations using NVIDIA CUDA via CuPy. GPU acceleration is **optional** - all functions automatically fall back to CPU (NumPy/SciPy) if: @@ -11,6 +9,21 @@ linumpy supports GPU acceleration for compute-intensive operations using NVIDIA - No CUDA-capable GPU is available - GPU memory is insufficient +```mermaid +flowchart TD + CALL[GPU-aware function called
backend='auto' default] --> CHK1{CuPy installed?} + CHK1 -->|no| CPU[Run on CPU
NumPy / SciPy / SimpleITK] + CHK1 -->|yes| CHK2{CUDA device available?} + CHK2 -->|no| CPU + CHK2 -->|yes| PICK[Auto-select least-loaded GPU] + PICK --> RUN[Run CuPy kernel on device] + RUN -->|OOM / runtime error| CPU + RUN -->|success| OUT([Result]) + CPU --> OUT +``` + +backend selection is per-call (`backend="cpu" | "gpu" | "auto"`); the auto path is the safe default and is what the Nextflow workflows use when `use_gpu=true`. + --- ## Quick Start @@ -21,8 +34,7 @@ nvidia-smi | grep "CUDA Version" # Install linumpy with GPU support (choose your CUDA version) uv pip install 'linumpy[gpu]' # CUDA 12.x (default) -uv pip install 'linumpy[gpu-cuda11]' # CUDA 11.x -uv pip install 'linumpy[gpu-cuda13]' # CUDA 13.x (requires extra setup for JAX) +uv pip install 'linumpy[gpu-cuda13]' # CUDA 13.x # Verify GPU linum_gpu_info.py @@ -43,97 +55,32 @@ linum_diagnose_pipeline.py --benchmark ### CuPy Version Reference -| CUDA Version | CuPy Package | -|--------------|--------------| -| CUDA 11.x | `cupy-cuda11x` | -| CUDA 12.x | `cupy-cuda12x` | -| CUDA 13.x | `cupy-cuda13x` | +| CUDA Version | CuPy Package | linumpy extra | +|--------------|--------------|---------------| +| CUDA 12.x | `cupy-cuda12x` | `linumpy[gpu]` | +| CUDA 13.x | `cupy-cuda13x` | `linumpy[gpu-cuda13]` | --- -## JAX GPU for BaSiCPy (fix_illumination) - -The `fix_illumination` step uses BaSiCPy which is built on JAX. JAX GPU requires additional setup. - -### Important: JAX 0.4.23 Library Requirements - -BaSiCPy requires `jax<=0.4.23`. JAX 0.4.23 was compiled against specific library versions: -- cuSOLVER 11 (libcusolver.so.11) -- cuSPARSE 12 (libcusparse.so.12) -- cuFFT 11 (libcufft.so.11) -- cuBLAS 12 (libcublas.so.12) -- cuDNN 8 (libcudnn.so.8) - -These exact versions are only available in **specific pinned versions** of the `nvidia-xxx-cu12` packages. Newer versions of these packages have different `.so` versions that are **incompatible**. +## BaSiCPy (fix_illumination) -### Automated Setup (Recommended) +The `fix_illumination` step uses BaSiCPy 2.x, which now ships with a +**PyTorch backend** (no JAX). BaSiCPy will use a CUDA-enabled PyTorch wheel +automatically when one is installed; otherwise it runs on CPU. -```bash -# Run the fix script - handles everything -source shell_scripts/fix_jax_cuda_plugin.sh -``` - -This script: -1. Removes conflicting nvidia packages -2. Installs JAX 0.4.23 with **pinned nvidia package versions**: - - `nvidia-cublas-cu12==12.3.4.1` - - `nvidia-cudnn-cu12==8.9.7.29` - - `nvidia-cusolver-cu12==11.5.4.101` - - etc. -3. Applies patchelf fix (required for Linux 6.x+ kernels) -4. Sets up LD_LIBRARY_PATH -5. Tests JAX CUDA with SVD operation - -### Manual Setup - -If you prefer manual setup: +If you only need linumpy's CuPy paths (resampling, FFT, morphology, N4), +no extra steps beyond `pip install 'linumpy[gpu]'` are required. To enable +GPU acceleration of BaSiCPy as well, install a CUDA build of PyTorch: ```bash -# 1. Uninstall all conflicting packages -uv pip uninstall jax jaxlib jax-cuda12-plugin nvidia-cusolver nvidia-cufft \ - nvidia-cusparse nvidia-cublas nvidia-cuda-runtime nvidia-cudnn nvidia-nvjitlink \ - nvidia-cublas-cu12 nvidia-cuda-cupti-cu12 nvidia-cuda-runtime-cu12 \ - nvidia-cudnn-cu12 nvidia-cufft-cu12 nvidia-cusolver-cu12 nvidia-cusparse-cu12 \ - nvidia-nccl-cu12 nvidia-nvjitlink-cu12 - -# 2. Install JAX 0.4.23 with CUDA wheel -uv pip install 'jax==0.4.23' 'jaxlib==0.4.23+cuda12.cudnn89' \ - -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html - -# 3. Install PINNED nvidia package versions (critical - newer versions won't work!) -uv pip install \ - 'nvidia-cublas-cu12==12.3.4.1' \ - 'nvidia-cuda-cupti-cu12==12.3.101' \ - 'nvidia-cuda-runtime-cu12==12.3.101' \ - 'nvidia-cudnn-cu12==8.9.7.29' \ - 'nvidia-cufft-cu12==11.0.12.1' \ - 'nvidia-cusolver-cu12==11.5.4.101' \ - 'nvidia-cusparse-cu12==12.2.0.103' \ - 'nvidia-nccl-cu12==2.19.3' \ - 'nvidia-nvjitlink-cu12==12.3.101' - -# 4. Apply patchelf fix (required for modern Linux kernels) -sudo apt install patchelf -JAXLIB_PATH=$(python -c "import jaxlib; print(jaxlib.__path__[0])") -find "$JAXLIB_PATH" -name "*.so" -exec patchelf --clear-execstack {} \; -find $(python -c "import site; print(site.getsitepackages()[0])")/jax_plugins \ - -name "*.so" -exec patchelf --clear-execstack {} \; - -# 5. Set LD_LIBRARY_PATH (before running JAX/BaSiCPy) -SP=$(python -c "import site; print(site.getsitepackages()[0])") -export LD_LIBRARY_PATH="${SP}/nvidia/cublas/lib:${SP}/nvidia/cuda_runtime/lib:${SP}/nvidia/cusolver/lib:${SP}/nvidia/cusparse/lib:${SP}/nvidia/cufft/lib:${SP}/nvidia/cudnn/lib:${LD_LIBRARY_PATH}" - -# 6. Test -python -c "import jax; print(jax.devices()); import jax.numpy as jnp; print(jnp.linalg.svd(jnp.eye(2)))" +# Pick the index URL that matches your CUDA toolkit +uv pip install torch --index-url https://download.pytorch.org/whl/cu121 ``` -### Verify Installation +Verify: ```bash -# Check GPU availability linum_gpu_info.py - -# Run full pipeline diagnostics linum_diagnose_pipeline.py --benchmark ``` @@ -151,7 +98,7 @@ separate `_gpu.py` variant is needed. | `linum_create_mosaic_grid_3d.py` | Volume resize | 5-12x | | `linum_resample_mosaic_grid.py` | Volume resize | 5-12x | | `linum_normalize_intensities_per_slice.py` | Gaussian filter, Otsu threshold | 4-10x | -| `linum_fix_illumination_3d.py` | BaSiCPy via JAX/CUDA | 2-5x | +| `linum_fix_illumination_3d.py` | BaSiCPy via PyTorch/CUDA | 2-5x | | `linum_assess_slice_quality.py` | SSIM, morphology | 3-8x | | `linum_aip_png.py` | Mean projection | ≤1x | | `linum_generate_mosaic_aips.py` | Mean projection | ≤1x | @@ -232,16 +179,19 @@ python -c "import cupy; print(cupy.cuda.runtime.getDeviceCount())" linum_gpu_info.py ``` -### JAX CUDA Issues +### BaSiCPy / PyTorch CUDA Issues -```bash -# Run the fix script -source shell_scripts/fix_jax_cuda_plugin.sh +If `linum_fix_illumination_3d.py` falls back to CPU unexpectedly, verify the +PyTorch CUDA build is installed and visible: -# Or check diagnostics +```bash +python -c "import torch; print(torch.cuda.is_available(), torch.cuda.get_device_name(0))" linum_diagnose_pipeline.py --debug-cuda ``` +Reinstall PyTorch from the matching CUDA index URL (see the BaSiCPy section +above) if `torch.cuda.is_available()` returns `False`. + ### Out of Memory - Reduce batch size / chunk size diff --git a/docs/LIBRARY_MODULES.md b/docs/LIBRARY_MODULES.md index 0b02d316..943bde24 100644 --- a/docs/LIBRARY_MODULES.md +++ b/docs/LIBRARY_MODULES.md @@ -1,854 +1,208 @@ -# Library Modules Documentation +# Library Modules +A concise map of the `linumpy` Python package. Use this page to find which +subpackage owns what, then follow the auto-generated [API +reference](api/linumpy/index) for full signatures and docstrings. ---- - -## Overview - -The linumpy library provides Python modules for microscopy data processing. This document describes the main modules and their functionality. - ---- - -## Module Structure +## Package layout ``` linumpy/ -├── _thread_config.py # CPU thread management -├── __init__.py -├── io/ # Input/Output modules (zarr, allen, npz, thorlabs, -│ # test_data, slice_config, data_io) -├── gpu/ # GPU acceleration modules -├── microscope/ # Microscope-specific modules (oct) -├── preproc/ # Preprocessing modules (icorr, normalization, -│ # resampling, xyzcorr) -├── psf/ # Point spread function -├── shifts/ # XY shift CSV utilities -├── stitching/ # Image stitching (registration, motor, stacking, -│ # interpolation, topology, mosaic_grid, ...) -├── utils/ # Utility modules (io, metrics, orientation, -│ # image_quality, visualization) -├── reconstruction.py # Core reconstruction -├── segmentation.py # Segmentation tools -└── utils_images.py # Image utilities -``` - ---- - -## I/O Modules (`linumpy.io`) - -### zarr.py - OME-Zarr I/O - -Read and write OME-Zarr format files. - -```python -from linumpy.io.zarr import read_omezarr, save_omezarr, OmeZarrWriter, AnalysisOmeZarrWriter - -# Read OME-Zarr -image, resolution = read_omezarr("input.ome.zarr") -# image: dask.array.Array -# resolution: tuple (z, x, y) in mm/pixel - -# Save OME-Zarr -save_omezarr(image, "output.ome.zarr", resolution, chunks=(10, 128, 128)) - -# Write large volumes incrementally (traditional power-of-2 pyramid) -writer = OmeZarrWriter("output.ome.zarr", shape, chunks, dtype) -writer[0:10] = data_slice -writer.finalize(resolution, n_levels=5) # 2x downsampling per level - -# Write with analysis-optimized resolutions (10, 25, 50, 100 µm) -writer = AnalysisOmeZarrWriter("output.ome.zarr", shape, chunks, dtype) -writer[0:10] = data_slice -writer.finalize(resolution, [10, 25, 50, 100]) # or use default -writer.finalize(resolution) # defaults to [10, 25, 50, 100] µm +├── cli/ # argparse helpers shared by linum_* scripts +├── config/ # thread/process limits (BLAS, OpenMP, threadpoolctl) +├── geometry/ # crop, galvo correction, interface detection, resampling +├── gpu/ # CuPy-backed FFT, morphology, B-spline, N4, registration +├── imaging/ # orientation, overlays, simple transforms, visualization +├── intensity/ # attenuation, bias-field, normalization, PSF model, +│ # vignette, intensity conversion +├── io/ # OME-Zarr I/O, slice-config, ThorLabs raw, test data +├── metrics/ # PipelineMetrics + per-step metric collectors +├── microscope/ # OCT-specific helpers +├── mosaic/ # mosaic-grid layout, motor positions, stacking, +│ # interpolation, blending, discovery +├── psf/ # PSF extraction and synthetic generation +├── reference/ # Allen brain atlas helpers +├── registration/ # phase correlation, SimpleITK, manual, refinement +├── segmentation/ # brain tissue segmentation +└── stack_alignment/ # shifts CSV I/O, unit detection, filtering ``` -### allen.py - Allen Brain Atlas +## Subpackage cheat sheet -Download and access Allen Brain Atlas data. +### `linumpy.io` -```python -from linumpy.io.allen import download_allen_atlas - -# Download atlas -download_allen_atlas(output_dir) -``` - -### npz.py - NPZ Files - -Handle NumPy compressed archives. - -```python -from linumpy.io.npz import load_npz, save_npz - -data = load_npz("data.npz") -save_npz("output.npz", array) -``` - -### thorlabs.py - Thorlabs Files - -Read Thorlabs microscope data formats. - -```python -from linumpy.io.thorlabs import read_thorlabs - -data = read_thorlabs("thorlabs_file") -``` +OME-Zarr is the canonical on-disk format. Two writers are provided: -### test_data.py - Test Data +* `OmeZarrWriter` — generic chunked writer with power-of-2 pyramid levels. +* `AnalysisOmeZarrWriter` — writes analysis-friendly resolution levels + (10/25/50/100 µm) for the final 3D volume. -Access test datasets for development. +Common entry points: ```python +from linumpy.io.zarr import read_omezarr, save_omezarr, OmeZarrWriter, AnalysisOmeZarrWriter from linumpy.io.test_data import get_data - -# Get path to test data -raw_tiles_path = get_data('raw_tiles') -``` - -### slice_config.py - Slice Config I/O - -Shared helpers for reading, writing, and stamping `slice_config.csv` — the -per-slice trace file threaded through the reconstruction pipeline. Each -pipeline stage that makes a per-slice decision (quality assessment, rehoming -correction, auto-exclusion, missing-slice interpolation) stamps its flag -columns via this module and hands the enriched file to the next stage. - -```python -from linumpy.io.slice_config import ( - CANONICAL_COLUMNS, # list of pipeline decision columns - read, # load slice_config.csv as dict keyed by slice_id - write, # write dict back to CSV with canonical column order - stamp, # update per-slice flags (use, auto_excluded, etc.) and write - merge_fragments, # merge per-slice manifest fragments into a single CSV - force_skip_slices, # return set of slice_ids where use=false or auto_excluded=true -) -``` - -The module does not implement file locking; concurrency safety is provided by -Nextflow's channel discipline (each process receives an immutable copy; merges -happen in a single downstream process). - -### data_io.py - Slicer Data I/O - -Legacy readers for slicer-format volumes, NIfTI files and tile metadata. - -```python -from linumpy.io.data_io import ( - listSlicesInDir, - getSliceListIndices, - load_volume, - loadTiffImage, - saveImage, -) -``` - ---- - -## Microscope Module (`linumpy.microscope`) - -### oct.py - OCT Tile Reading - -Read raw OCT tiles with metadata and optional corrections. - -```python -from linumpy.microscope.oct import OCT - -# Open tile -tile = OCT("/path/to/tile_x00_y00_z00") - -# Access properties -shape = tile.shape # (z, x, y) -resolution = tile.resolution # (res_z, res_x, res_y) -dimension = tile.dimension # Physical dimensions -position = tile.position # Stage position (if available) - -# Read data with corrections -data = tile.load_image( - crop=True, # Crop galvo return region - fix_galvo_shift=True, # Auto-detect and fix galvo artifacts - fix_camera_shift=False # Fix camera timing shift (old data) -) -``` - -**Properties:** -- `shape`: Data shape (z, x, y) -- `resolution`: Pixel size in mm -- `dimension`: Physical dimensions in mm -- `position`: Stage position (x, y, z) in mm -- `position_available`: Whether position metadata exists - -**load_image() Parameters:** -- `crop`: Remove extra pixels from galvo return -- `fix_galvo_shift`: - - `True`: Auto-detect artifact and fix if confident (≥0.3 confidence) - - `int`: Apply specific shift value - - `False`: No correction -- `fix_camera_shift`: Correct camera timing offset (legacy data) - ---- - -## Preprocessing Modules (`linumpy.preproc`) - -### normalization.py - Intensity Normalization - -Normalize OCT volume intensities based on agarose background. - -```python -from linumpy.preproc.normalization import normalize_volume - -# Normalize volume intensities -# agarose_mask: 2D binary mask indicating agarose regions -normalized, background_thresholds = normalize_volume( - vol, # Input volume (Z, X, Y) - agarose_mask, # 2D mask for agarose detection - percentile_max=99.9 # Clip values above this percentile -) +from linumpy.io import slice_config # slice_config.json reader/writer +from linumpy.io.thorlabs import ThorImageOCT ``` -### icorr.py - Illumination Correction +`read_omezarr(path, level=0)` returns a `(zarr.Array, voxel_size)` tuple. The +voxel size is ordered to match the array axes (Z, Y, X for 3D volumes; +Y, X for 2D mosaics). -Correct illumination inhomogeneity. +See [Mosaic Grid Format](MOSAIC_GRID_FORMAT.md) and +[Slice Config Feature](SLICE_CONFIG_FEATURE.md) for format details. -```python -from linumpy.preproc.icorr import estimate_illumination, apply_illumination_correction +### `linumpy.cli` and `linumpy.config` -# Estimate illumination profile -profile = estimate_illumination(image) +`linumpy.cli.args` provides `add_overwrite_arg`, `add_processes_arg`, +`parse_processes_arg`, `assert_output_exists`, and `get_available_cpus` +(honours `LINUMPY_MAX_CPUS` / `LINUMPY_RESERVED_CPUS` env vars). -# Apply correction -corrected = apply_illumination_correction(image, profile) -``` +`linumpy.config.threads` configures BLAS/OpenMP/threadpoolctl thread caps and +is imported as a side-effect at the top of every `linum_*` script (before +NumPy/SciPy). -### xyzcorr.py - XYZ Corrections +### `linumpy.metrics` -Apply spatial corrections including galvo shift detection and correction. +Structured metrics collected at every pipeline stage and aggregated into +`PipelineMetrics`. Per-step collectors live alongside the dataclass. ```python -from linumpy.preproc.xyzcorr import ( - detect_galvo_shift, - detect_galvo_for_slice, - detect_galvo_artifact_presence, - fix_galvo_shift, - findTissueInterface, - cropVolume -) - -# Detect galvo shift with confidence score -aip = volume.mean(axis=0) # Average intensity projection -shift, confidence = detect_galvo_shift( - aip, - n_pixel_return=40, # Number of pixels in galvo return region - return_confidence=True # Return confidence score (0-1) -) - -# Only apply fix if confident (artifact is present) -if confidence >= 0.3: - corrected = fix_galvo_shift(volume, shift=shift, axis=1) - -# Or apply with known shift value -corrected = fix_galvo_shift(volume, shift=15, axis=1) - -# For slice-level detection (samples multiple tiles, skips background) -shift, confidence = detect_galvo_for_slice( - tiles, # zarr array of tiles - n_extra=40, # Number of extra pixels from galvo return - threshold=0.6, # Minimum confidence threshold - n_samples=5, # Number of tiles to sample - min_intensity=20.0 # Skip tiles with mean intensity below this -) - -# For batch processing: check artifact presence separately -presence_score = detect_galvo_artifact_presence( - aip, - n_pixel_return=40, - detected_shift=shift -) -``` - -**Galvo Shift Detection:** - -The galvo mirror in OCT systems can cause horizontal banding artifacts when the galvo return region is not at the edge of the raw tile data. The detection system has three parts: - -1. **`detect_galvo_shift()`** - Finds *where* the galvo return boundary is located - - Analyzes average A-line intensity profile - - Searches for the shift that maximizes boundary discontinuities - - Returns shift value (in pixels) and optional confidence score - -2. **`detect_galvo_for_slice(tiles, n_extra, ...)`** - Slice-level detection - - Samples tiles from the center of the mosaic (more likely to contain tissue) - - Skips background tiles with low mean intensity - - Returns the detection with highest confidence - - Returns `(0, confidence)` if no artifact detected above threshold - -**Confidence Score Interpretation (0-1):** -| Score | Meaning | Action | -|-------|---------|--------| -| < 0.5 | No clear artifact detected | Skip correction | -| ≥ 0.5 | Galvo artifact likely present | Apply correction | -| > 0.7 | Clear galvo artifact | High confidence | - -**Key Algorithm Details:** -- Uses **gradient-based detection** to find intensity discontinuities -- Finds pairs of high gradients separated by exactly `n_pixel_return` pixels -- Checks B-scan subregions for subtle artifacts only visible in parts of the tile -- Validates using peak dominance, boundary gradient ranking, and intensity contrast -- Default threshold: 0.6 (configurable via `galvo_threshold` parameter) - -### resampling.py - Mosaic Grid Resampling - -Resample mosaic grid volumes to a target isotropic resolution, processing tile-by-tile to avoid loading the full volume into memory. - -```python -from linumpy.preproc.resampling import resample_mosaic_grid - -# Resample to 10 µm isotropic -resample_mosaic_grid( - vol, # Dask/Zarr array with chunk structure (Z, nx*h, ny*w) - source_res, # Source resolution (res_z, res_y, res_x) - target_res_um=10.0, # Target isotropic resolution in µm - n_levels=5, # Number of output pyramid levels - out_path="output.ome.zarr" -) -``` - ---- - -## PSF Module (`linumpy.psf`) - -### psf_estimator.py - PSF Estimation - -Estimate and model point spread functions. - -```python -from linumpy.psf.psf_estimator import estimate_psf, apply_deconvolution - -# Estimate PSF from data -psf = estimate_psf(image) - -# Apply deconvolution -deconvolved = apply_deconvolution(image, psf) -``` - ---- - -## Stitching Module (`linumpy.stitching`) - -### registration.py - Image Registration - -Register images using various methods. - -```python -from linumpy.stitching.registration import ( - register_2d_images_sitk, - apply_transform, - pairWisePhaseCorrelation -) - -# Get initial translation estimate using phase correlation -deltas = pairWisePhaseCorrelation(fixed_image, moving_image) -initial_translation = (deltas[1], deltas[0]) # (x, y) order for SimpleITK - -# Register 2D images with phase correlation initialization -transform, moving_registered, error = register_2d_images_sitk( - fixed_image, - moving_image, - metric='MSE', # MSE, CC, AntsCC, MI - method='affine', # affine, euler, translation - max_iterations=2500, - grad_mag_tol=1e-6, - moving_mask=None, - fixed_mask=None, - return_3d_transform=True, - initial_translation=initial_translation, # Optional: phase correlation result - initial_step=None # Optional: optimizer step size (auto-reduced with initial_translation) -) - -# Apply transform to volume -transformed = apply_transform(volume, transform) -``` - -**Note:** When `initial_translation` is provided, the optimizer uses a smaller step size (1.0 vs 4.0 pixels) to prevent drifting away from the correct solution. - -### stitch_utils.py - Stitching Utilities - -Helper functions for stitching operations. - -```python -from linumpy.stitching.stitch_utils import ( - compute_overlap, - blend_images -) -``` - -### topology.py - Mosaic Topology - -Manage tile arrangements and topology. - -```python -from linumpy.stitching.topology import ( - build_topology_graph, - find_optimal_path -) -``` - -### motor.py - Motor-Position Tile Placement - -Compute tile pixel positions from motor grid geometry (regular grid with configurable overlap, scale, and rotation). - -```python -from linumpy.stitching.motor import compute_motor_positions - -positions, step_y, step_x = compute_motor_positions( - nx=3, # Number of tiles in X - ny=3, # Number of tiles in Y - tile_shape=(100, 512, 512), - overlap_fraction=0.2, - scale_factor=1.0, # Scale step size (default: no scaling) - rotation_deg=0.0 # Global grid rotation -) -# positions: list of (row_pos, col_pos) pixel positions -``` - -### stacking.py - 3D Slice Stacking Utilities - -Z-overlap detection between consecutive slices using normalized cross-correlation. - -```python -from linumpy.stitching.stacking import find_z_overlap - -best_overlap, best_corr = find_z_overlap( - fixed_vol, # Bottom (fixed) slice volume (Z, Y, X) - moving_vol, # Top (moving) slice volume (Z, Y, X) - slicing_interval_mm=0.05, - search_range_mm=0.1, - resolution_um=3.5 -) -# best_overlap: optimal overlap in Z voxels -# best_corr: correlation score at optimal overlap -``` - -### interpolation.py - Missing Slice Interpolation - -Z-aware morphing interpolation for missing serial sections. The primary entry -point is `interpolate_z_morph(vol_before, vol_after)`, which warps the two -boundary planes via fractional affine transforms (`T**alpha`) and cross-fades -them. Falls back to `interpolate_weighted` when quality gates fail. - -```python -from linumpy.stitching.interpolation import interpolate_z_morph - -volume, diagnostics = interpolate_z_morph(vol_before, vol_after) -# vol_before, vol_after: 3D neighbours on either side of a gap, shape (Z, Y, X) -# Returns: interpolated volume (shape matching min(nz_before, nz_after), H, W) -# and a JSON-serialisable diagnostics dict (method_used, pre/post -# NCC, affine_determinant, fallback_reason, ...). -``` - -See `docs/SLICE_INTERPOLATION_FEATURE.md` for the physical model. - -### manual_registration.py - Manual Registration - -GUI-based manual registration tools. - -```python -from linumpy.stitching.manual_registration import ManualRegistrationGUI -``` - -### FileUtils.py - File Utilities - -File handling utilities for stitching. - -```python -from linumpy.stitching.FileUtils import ( - list_tiles, - parse_tile_name -) -``` - -### mosaic_grid.py - Mosaic Grid Class - -The `MosaicGrid` class manages 2D mosaic grid images (a 2D image containing -all tiles for a slice without overlap). Provides tile iteration, affine tile -placement, and stitching utilities including diffusion-based blending. - -```python -from linumpy.stitching.mosaic_grid import MosaicGrid, getDiffusionBlendingWeights - -mg = MosaicGrid(image, tile_shape=(512, 512), overlap_fraction=0.2) -stitched = mg.stitch(blending_method='diffusion') - -# Blending weights helper (also used standalone) -weights = getDiffusionBlendingWeights(mask_fixed, mask_moving, factor=2) -``` - ---- - -## Utilities Module (`linumpy.utils`) - -### io.py - I/O Utilities - -Command-line argument helpers. - -```python -from linumpy.utils.io import ( - add_overwrite_arg, - add_processes_arg, - assert_output_exists, - get_available_cpus, - parse_processes_arg -) - -# Add standard arguments to parser -parser = argparse.ArgumentParser() -add_overwrite_arg(parser) -add_processes_arg(parser) - -# Parse and validate -args = parser.parse_args() -n_processes = parse_processes_arg(args.n_processes) -assert_output_exists(args.output, parser, args) - -# Get available CPUs (respects LINUMPY_MAX_CPUS and LINUMPY_RESERVED_CPUS env vars) -available = get_available_cpus() -``` - -#### CPU Core Management - -The `get_available_cpus()` function respects environment variables for limiting CPU usage: - -```python -import os -from linumpy.utils.io import get_available_cpus - -# Default: uses all CPUs minus 1 -cpus = get_available_cpus() # e.g., 15 on a 16-core system - -# With LINUMPY_RESERVED_CPUS=4 -os.environ['LINUMPY_RESERVED_CPUS'] = '4' -cpus = get_available_cpus() # e.g., 12 on a 16-core system - -# With LINUMPY_MAX_CPUS=8 (takes precedence) -os.environ['LINUMPY_MAX_CPUS'] = '8' -cpus = get_available_cpus() # 8 (or total if less than 8) -``` - -### metrics.py - Pipeline Quality Metrics - -Collect, save, and aggregate quality metrics from pipeline steps. - -```python -from linumpy.utils.metrics import ( +from linumpy.metrics import ( PipelineMetrics, collect_normalization_metrics, - collect_xy_transform_metrics, collect_pairwise_registration_metrics, - collect_interface_crop_metrics, - collect_psf_compensation_metrics, - collect_stack_metrics, - collect_stitch_3d_metrics, aggregate_metrics, - compute_summary_statistics + compute_summary_statistics, ) - -# Manual metrics collection -metrics = PipelineMetrics('my_step', output_dir) -metrics.add_info('input', input_path, 'Input file path') -metrics.add_metric('error', 0.05, unit='pixels', threshold_name='registration_error') -metrics.save() - -# Use step-specific collectors (recommended - simpler) -collect_normalization_metrics(vol, agarose_mask, otsu_thresh, bg_thresh, output_path) -collect_pairwise_registration_metrics(error, tx, ty, rot, best_z, expected_z, output_path) - -# Aggregate metrics from pipeline run -aggregated = aggregate_metrics('/path/to/pipeline/output') -# Returns: {'step_name': [list of metrics dicts], ...} - -# Compute summary statistics -summary = compute_summary_statistics(aggregated['pairwise_registration']) -# Returns: {'count': N, 'status_counts': {...}, 'metric_name': {'mean': ..., 'std': ...}} -``` - -**Available Threshold Names:** -| Name | Warning | Error | Higher is Better | -|------|---------|-------|------------------| -| `registration_error` | 0.05 | 0.15 | No | -| `translation_magnitude` | 30.0 | 50.0 | No | -| `rotation_degrees` | 1.0 | 2.0 | No | -| `correlation` | 0.7 | 0.5 | Yes | -| `mask_coverage` | 0.05 | 0.01 | Yes | -| `agarose_coverage` | 0.05 | 0.01 | Yes | -| `rms_residual` | 5.0 | 15.0 | No | -| `z_offset_std` | 10.0 | 25.0 | No | -| `z_offset_range` | 15.0 | 30.0 | No | - -## XY Shifts Module (`linumpy.shifts`) - -### shifts/utils.py - XY Shift Utilities - -Load and process XY shift CSV files for inter-slice alignment. - -```python -from linumpy.shifts.utils import load_shifts_csv, detect_shift_units - -# Load shifts and compute cumulative positions -cumsum, all_ids = load_shifts_csv("shifts_xy.csv") -# cumsum: {slice_id: (cumulative_dx_mm, cumulative_dy_mm)} -# all_ids: sorted list of all slice IDs - -# Detect resolution units (mm vs µm) -res_x_um, res_y_um = detect_shift_units(resolution) -``` - -### orientation.py - Volume Orientation - -Parse 3-letter orientation codes and compute axis permutations/flips for RAS alignment. - -```python -from linumpy.utils.orientation import parse_orientation_code - -# Parse orientation for RAS alignment -axis_permutation, axis_flips = parse_orientation_code('PIR') -# axis_permutation: source indices for each target dimension -# axis_flips: +1 (keep) or -1 (flip) per axis after permutation - -# Example: PIR → (1, 2, 0), (-1, 1, -1) -# Example: SRA → (0, 1, 2), (1, 1, 1) # identity ``` -**Supported letters:** R/L (Right/Left), A/P (Anterior/Posterior), S/I (Superior/Inferior) - -### image_quality.py - Image Quality Assessment - -CPU-based image quality metrics for slice analysis. - -```python -from linumpy.utils.image_quality import ( - compute_ssim_2d, - compute_ssim_3d, - compute_edge_score, - compute_variance_score, - assess_slice_quality, -) - -# Compare two 3D volumes -ssim = compute_ssim_3d(vol1, vol2) - -# Assess overall quality of a slice relative to its neighbors -quality, metrics = assess_slice_quality( - vol, # The slice to assess - vol_before, # Previous slice - vol_after # Next slice -) -# quality: float in [0, 1] -# metrics: dict with ssim, edge_score, variance_score, etc. -``` - -For GPU-accelerated versions see `linumpy.gpu.image_quality`. - -### visualization.py - Orthogonal View Screenshots - -Save orthogonal (XY, XZ, YZ) views of a 3D volume as a figure. +See [Reconstruction Diagnostics](RECONSTRUCTION_DIAGNOSTICS.md). -```python -from linumpy.utils.visualization import save_orthogonal_views, save_annotated_views - -# Basic orthogonal view -save_orthogonal_views( - image, # 3D volume (Z, X, Y) - "view.png", - z_slice=None, # Default: center - x_slice=None, - y_slice=None, - cmap='magma', - percentile_max=99.9 -) +### `linumpy.geometry` -# Annotated view with Z-slice index labels -save_annotated_views( - image, - "annotated_view.png", - n_slices=50, # Number of input slices (for label spacing) - slice_ids=None, # Optional explicit slice IDs - font_size=7, - label_every=1, - show_lines=False -) -``` +Pixel/world geometry, plus OCT-specific corrections: ---- +* `geometry.galvo` — `detect_galvo_shift`, `fix_galvo_shift`, + `detect_galvo_for_slice` (B-scan galvo artifact correction). +* `geometry.interface` — tissue-surface detection. +* `geometry.crop` — crop volumes around the tissue interface. +* `geometry.resampling` — resample mosaic grids and 3D volumes to a target + isotropic resolution. -## Core Modules +### `linumpy.intensity` -### reconstruction.py - Core Reconstruction +Intensity-domain corrections used during preprocessing. -Core reconstruction functions. +* `intensity.attenuation` — depth-dependent attenuation modelling. +* `intensity.bias_field` — N4 bias-field correction (CPU SimpleITK and the + GPU port; `n4_correct`, `n4_correct_per_section`, `compute_tissue_mask`). +* `intensity.normalization` — per-slice histogram matching, Z-profile + smoothing, agarose flattening. +* `intensity.psf_model` — `estimate_psf` and PSF-based deconvolution. +* `intensity.vignette` — vignette/illumination flat-field correction. -```python -from linumpy.reconstruction import ( - get_tiles_ids, - get_mosaic_info, - getLargestCC -) +### `linumpy.mosaic` -# Get tile IDs from directory -tiles, tile_ids = get_tiles_ids(directory, z=None) -# tiles: list of Path objects -# tile_ids: list of (mx, my, mz) tuples - -# Get mosaic information -mosaic_info = get_mosaic_info( - directory, - z=0, - overlap_fraction=0.2, - use_stage_positions=True -) -# Returns dict with: -# - mosaic_shape, tile_positions, etc. -# - mosaic_xmin_mm, mosaic_ymin_mm -# - tile_resolution +Everything that turns a folder of tiles into a stacked 3D volume. -# Get largest connected component -largest_cc = getLargestCC(binary_mask) -``` +* `mosaic.grid` — `MosaicGrid` (pixel-space tile layout). +* `mosaic.motor` — motor-position handling (`compute_motor_positions`). +* `mosaic.stacking` — `find_z_overlap`, slab assembly utilities. +* `mosaic.interpolation` — `interpolate_z_morph`, `interpolate_weighted`. +* `mosaic.discovery` — discover tiles on disk. +* `mosaic.overlap` — overlap-region utilities. +* `mosaic.quick_stitch` — fast diagnostic stitching. -### segmentation.py - Segmentation +### `linumpy.registration` -Image segmentation tools. +* `registration.phase_correlation` — masked phase correlation primitives. +* `registration.sitk` — `register_2d_images_sitk`, `apply_transform`. +* `registration.refinement` — `find_best_z`, `register_refinement`, + `gradient_magnitude_alignment`, `centre_of_mass_offset`. +* `registration.transforms` — transform composition / decomposition / I/O. +* `registration.manual` — manual landmark registration GUI. -```python -from linumpy.segmentation import ( - segment_tissue -) -``` - -### utils_images.py - Image Utilities +### `linumpy.gpu` -General image processing utilities. - -```python -from linumpy.utils_images import ( - apply_xy_shift, - normalize_image -) +CuPy-backed versions of hot paths. Each public entry point either takes a +`backend="cpu"|"gpu"|"auto"` flag or has an explicit `_gpu` suffix. -# Apply XY shift to image -shifted = apply_xy_shift( - image, # Source image - reference, # Reference (determines output shape) - dy, # Y shift in pixels - dx # X shift in pixels -) -``` +* `gpu.fft_ops`, `gpu.array_ops`, `gpu.morphology`, `gpu.interpolation` + — FFT, element-wise ops, morphology, interpolation primitives. +* `gpu.bias_field`, `gpu.n4`, `gpu.bspline` — GPU N4 and B-spline grid. +* `gpu.image_quality` — GPU image-quality metrics. +* `gpu.registration`, `gpu.corrections` — GPU registration and correction + passes used by `linum_estimate_transform.py`, + `linum_normalize_intensities_per_slice.py`, etc. ---- +See [GPU Acceleration](GPU_ACCELERATION.md) and [N4 GPU](N4_GPU.md). -## Common Patterns +### `linumpy.stack_alignment` -### Reading and Processing a Mosaic Grid +Shifts CSV utilities used to align serial sections. ```python -from linumpy.io.zarr import read_omezarr, save_omezarr -import numpy as np - -# Read -image, resolution = read_omezarr("input.ome.zarr") - -# Process (example: normalize) -data = image[:] # Load into memory -data = (data - np.min(data)) / (np.max(data) - np.min(data)) - -# Save -import dask.array as da -save_omezarr(da.from_array(data), "output.ome.zarr", resolution) +from linumpy.stack_alignment.io import load_shifts_csv, write_shifts_csv +from linumpy.stack_alignment.units import detect_shift_units +from linumpy.stack_alignment.filter import filter_outliers ``` -### Parallel Processing with Multiple Tiles +See [Shifts File Format](SHIFTS_FILE_FORMAT.md). -```python -from linumpy.reconstruction import get_tiles_ids -from linumpy.microscope.oct import OCT -from tqdm.contrib.concurrent import process_map +### `linumpy.imaging` -def process_tile(tile_path): - tile = OCT(tile_path) - # Process tile... - return result +Convenience helpers for figures and quick previews. -tiles, tile_ids = get_tiles_ids(directory) -results = process_map(process_tile, tiles, max_workers=8) -``` +* `imaging.orientation` — anatomical-axis labelling. +* `imaging.overlay` — overlay generation for diagnostics. +* `imaging.transform` — 2D affine helpers. +* `imaging.visualization` — matplotlib panels used by diagnostic scripts. -### Registration Workflow +### `linumpy.psf` -```python -from linumpy.io.zarr import read_omezarr -from linumpy.stitching.registration import register_2d_images_sitk, apply_transform - -# Load images -fixed, _ = read_omezarr("fixed.ome.zarr") -moving, _ = read_omezarr("moving.ome.zarr") - -# Register -transform, _, error = register_2d_images_sitk( - fixed[0], # 2D slice - moving[0], - method='affine', - metric='MSE' -) +* `psf.extract` — extract PSF estimates from acquisitions. +* `psf.synthetic` — synthetic PSF generators for tests. -# Apply to full volume -registered = apply_transform(moving, transform) -``` +(Higher-level PSF model lives in `linumpy.intensity.psf_model`.) ---- +### `linumpy.reference` -## Type Hints +* `reference.allen` — download the Allen mouse atlas template, register a + 3D volume to it, and produce RAS-aligned templates. -Most functions include type hints for better IDE support: +### `linumpy.segmentation` -```python -def read_omezarr(path: str | Path) -> tuple[da.Array, tuple[float, ...]]: - ... - -def apply_xy_shift( - image: np.ndarray, - reference: np.ndarray, - dy: float, - dx: float -) -> np.ndarray: - ... -``` +* `segmentation.brain` — tissue / background segmentation in 3D OCT. ---- +### `linumpy.microscope` -## Error Handling +* `microscope.oct.OCT` — OCT acquisition metadata wrapper. -Functions typically raise standard Python exceptions: - -```python -try: - image, res = read_omezarr("nonexistent.ome.zarr") -except FileNotFoundError: - print("File not found") -except ValueError as e: - print(f"Invalid format: {e}") -``` +## How the pieces fit together ---- +A typical pipeline run wires these subpackages as follows: -## Dependencies +1. **Discovery** (`mosaic.discovery`) finds raw tiles on disk and reads + ThorLabs metadata via `io.thorlabs`. +2. **Tile preprocessing** uses `geometry.galvo`, `intensity.vignette`, + `intensity.attenuation`, and `intensity.normalization`. +3. **Mosaic assembly** builds 2D AIPs (`mosaic.grid`) and 3D mosaic grids + per slice, then writes OME-Zarr via `io.zarr`. +4. **Stitching** uses `registration.phase_correlation` (CPU/GPU) plus + `stack_alignment` to compute and store shifts. +5. **Stacking** matches consecutive slices with `registration.refinement` + and assembles the volume with `mosaic.stacking`. +6. **Bias-field & global polish** run `intensity.bias_field` (with the + `gpu.n4` backend when CUDA is available) and `imaging.orientation` / RAS + alignment via `reference.allen`. +7. **Diagnostics** use `metrics` collectors throughout; see + [Reconstruction Diagnostics](RECONSTRUCTION_DIAGNOSTICS.md). -Key dependencies used by the library: +## See also -| Package | Purpose | -|---------|---------| -| `numpy` | Array operations | -| `dask` | Lazy/parallel arrays | -| `zarr` | Chunked array storage | -| `SimpleITK` | Image registration | -| `scikit-image` | Image processing | -| `scipy` | Scientific computing | -| `pandas` | Data manipulation | -| `tqdm` | Progress bars | +* Auto-generated API reference: [api/linumpy](api/linumpy/index.rst) +* [Pipeline Overview](PIPELINE_OVERVIEW.md) +* [Scripts Reference](SCRIPTS_REFERENCE.md) +* [Nextflow Workflows](NEXTFLOW_WORKFLOWS.md) diff --git a/docs/MOSAIC_GRID_FORMAT.md b/docs/MOSAIC_GRID_FORMAT.md index 68d81132..655d26bd 100644 --- a/docs/MOSAIC_GRID_FORMAT.md +++ b/docs/MOSAIC_GRID_FORMAT.md @@ -1,8 +1,6 @@ # Mosaic Grid Format (OME-Zarr) ---- - ## Overview Mosaic grids in linumpy are stored in the **OME-Zarr** format, a cloud-optimized, chunked array format designed for large microscopy datasets. The format supports multi-resolution pyramids, metadata, and efficient partial reads. @@ -64,19 +62,19 @@ This is controlled by the `--pyramid_resolutions` parameter in `linum_stack_slic ### 3D Mosaic Grid ``` -Shape: (Z, X, Y) +Shape: (Z, Y, X) ``` | Dimension | Description | |-----------|-------------| | Z | Depth/axial dimension | -| X | First lateral dimension | -| Y | Second lateral dimension | +| Y | First lateral dimension | +| X | Second lateral dimension | ### 2D Mosaic Grid (AIP) ``` -Shape: (X, Y) +Shape: (Y, X) ``` --- @@ -136,8 +134,8 @@ from linumpy.io.zarr import read_omezarr # Read image and resolution image, resolution = read_omezarr("mosaic_grid_3d_z00.ome.zarr") -# image: dask.array.Array with shape (Z, X, Y) -# resolution: tuple (res_z, res_x, res_y) in mm/pixel +# image: dask.array.Array with shape (Z, Y, X) +# resolution: tuple (res_z, res_y, res_x) in mm/pixel print(f"Shape: {image.shape}") print(f"Resolution: {resolution} mm/pixel") diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..3eb3a286 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,22 @@ +# Minimal makefile for Sphinx documentation +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +.PHONY: help clean html livehtml + +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +clean: + rm -rf "$(BUILDDIR)" api + +html: + @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +livehtml: + sphinx-autobuild "$(SOURCEDIR)" "$(BUILDDIR)/html" $(SPHINXOPTS) $(O) + +%: + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/N4_GPU.md b/docs/N4_GPU.md index c45e7df3..1b0c965d 100644 --- a/docs/N4_GPU.md +++ b/docs/N4_GPU.md @@ -82,12 +82,11 @@ SimpleITK: where a plain weighted-mean kernel regression would absorb signal into the bias estimate. The fit is computed as three sequential 1-D `tensordot` contractions; per-axis B-spline basis matrices are cached - per pyramid level (see - [linumpy/gpu/bspline.py](../linumpy/gpu/bspline.py)). + per pyramid level (see {py:mod}`linumpy.gpu.bspline`). - **Centred-Gaussian Wiener deconvolution** for histogram sharpening instead of the Vidal-Pantaleoni asymmetric kernel SimpleITK ships. The weighted bin update uses a single `cupy.bincount` call over the full - volume (see [linumpy/gpu/n4.py](../linumpy/gpu/n4.py)). + volume (see {py:mod}`linumpy.gpu.n4`). - **Separable Catmull-Rom upsample** for re-projecting the B-spline lattice back to image space, rather than `cupyx.scipy.ndimage.zoom`. - **Single host→device transfer** per call: the volume and mask are pushed @@ -102,8 +101,7 @@ bit-equivalent to SimpleITK. Section 4 quantifies the resulting envelope. ## 4. Equivalency tests -The unit tests in -[linumpy/tests/test_n4_gpu_equivalency.py](../linumpy/tests/test_n4_gpu_equivalency.py) +The unit tests in `linumpy/tests/test_n4_gpu_equivalency.py` pin the GPU backend against SimpleITK on synthetic spherical phantoms with known multiplicative bias. The phantom is built as `vol = truth × bias` inside a sphere mask of radius 1.2 (in normalised @@ -135,8 +133,8 @@ server. ## 5. Performance -Benchmarks measured on a single NVIDIA GPU (47 GB) using -[scripts/diagnostics/linum_benchmark_n4_gpu.py](../scripts/diagnostics/linum_benchmark_n4_gpu.py). +Benchmarks measured on a single NVIDIA GPU (47 GB) using the +`linum_benchmark_n4_gpu` script (see [Scripts reference](SCRIPTS_REFERENCE.md)). The CPU path is `SimpleITK.N4BiasFieldCorrectionImageFilter` with the same control-point spacing and iteration schedule as the GPU path. Both paths include the `shrink_factor` downsample. The GPU column already excludes a @@ -219,8 +217,8 @@ directory. ## 7. Pipeline integration The Nextflow `reconst_3d` workflow exposes a single global GPU switch -(`params.use_gpu`, defined in -[workflows/reconst_3d/nextflow.config](../workflows/reconst_3d/nextflow.config)). +(`params.use_gpu`, defined in the reconst_3d Nextflow config; see +[Nextflow workflows](NEXTFLOW_WORKFLOWS.md)). When set, the `correct_bias_field` process runs the GPU N4 backend with `maxForks = 1` to avoid GPU contention; otherwise it uses the SimpleITK CPU path with `params.processes` threads. No per-process flag is needed: diff --git a/docs/NEXTFLOW_WORKFLOWS.md b/docs/NEXTFLOW_WORKFLOWS.md index 3a6ab3c5..2a078e64 100644 --- a/docs/NEXTFLOW_WORKFLOWS.md +++ b/docs/NEXTFLOW_WORKFLOWS.md @@ -1,8 +1,6 @@ # Nextflow Workflows Guide ---- - ## Overview linumpy uses [Nextflow](https://www.nextflow.io/) for orchestrating complex processing pipelines. Nextflow provides: @@ -609,7 +607,7 @@ Both workflows support GPU acceleration using NVIDIA CUDA via CuPy. GPU processi | `preproc_rawtiles.nf` | `create_mosaic_grid` | Galvo detection, volume resize | | `preproc_rawtiles.nf` | `generate_aip` | Mean projection | | `soct_3d_reconst.nf` | `resample_mosaic_grid` | Volume resize | -| `soct_3d_reconst.nf` | `fix_illumination` | BaSiCPy background correction (JAX on GPU) | +| `soct_3d_reconst.nf` | `fix_illumination` | BaSiCPy background correction (PyTorch on GPU) | | `soct_3d_reconst.nf` | `normalize` | Intensity normalization, percentile clipping | ### Usage @@ -660,13 +658,16 @@ The pipelines provide fine-grained control over CPU usage, allowing you to reser ### Configuration Options -Both pipelines support two approaches: +Both pipelines support two approaches. Defaults differ between workflows: +the **preproc** pipeline ships with `max_cpus = null` and `reserved_cpus = 2`, +while the **3D reconstruction** pipeline uses `max_cpus = 16` and +`reserved_cpus = 4` (see `workflows//nextflow.config`). -| Parameter | Default | Description | -|-----------|---------|-------------| -| `max_cpus` | `null` | Explicit maximum CPUs to use (takes precedence) | -| `reserved_cpus` | `2` | Number of cores to keep free for overhead | -| `processes` | `1` | Python processes per Nextflow task | +| Parameter | preproc default | reconst_3d default | Description | +|-----------|-----------------|--------------------|-------------| +| `max_cpus` | `null` | `16` | Explicit maximum CPUs to use (takes precedence) | +| `reserved_cpus` | `2` | `4` | Number of cores to keep free for overhead | +| `processes` | `1` | `1` | Python processes per Nextflow task | ### Usage Examples diff --git a/docs/PIPELINE_OVERVIEW.md b/docs/PIPELINE_OVERVIEW.md index 970cc057..c8b55d0c 100644 --- a/docs/PIPELINE_OVERVIEW.md +++ b/docs/PIPELINE_OVERVIEW.md @@ -1,8 +1,6 @@ # Pipeline Overview ---- - ## Overview The linumpy processing pipeline converts raw S-OCT (Serial Optical Coherence Tomography) microscopy data into reconstructed 3D volumes. The pipeline consists of two main stages: @@ -463,7 +461,7 @@ and shift each A-line so that $z'(y, x) = z - (\hat{z}_0(y, x) - \min \hat{z}_0) $$ \min_{F, D, B_i} \quad \sum_i \| A_i - (F \odot B_i + D)\|_1 + \lambda_F \|F\|_* + \lambda_D \|D\|_1 $$ -where $F$ is the flat-field (smooth, nuclear-norm regularised), $D$ is the dark-field, and $B_i$ is the per-tile baseline. Each tile is then corrected as $\hat{B}_i = (A_i - D) / F$. The JAX implementation runs this on GPU. +where $F$ is the flat-field (smooth, nuclear-norm regularised), $D$ is the dark-field, and $B_i$ is the per-tile baseline. Each tile is then corrected as $\hat{B}_i = (A_i - D) / F$. BaSiCPy 2.x runs this on the GPU via its PyTorch backend when a CUDA build of PyTorch is available. --- @@ -649,7 +647,7 @@ Both pipelines support optional GPU acceleration using NVIDIA CUDA via CuPy. GPU | Preprocessing | `create_mosaic_grid` | Galvo detection, volume resize | | Preprocessing | `generate_aip` | Mean projection | | 3D Reconstruction | `resample_mosaic_grid` | Volume resize | -| 3D Reconstruction | `fix_illumination` | BaSiCPy background correction (JAX on GPU) | +| 3D Reconstruction | `fix_illumination` | BaSiCPy background correction (PyTorch on GPU) | | 3D Reconstruction | `normalize` | Intensity normalization, percentile clipping | ### Running with GPU diff --git a/docs/PIPELINE_PERFORMANCE_ANALYSIS.md b/docs/PIPELINE_PERFORMANCE_ANALYSIS.md deleted file mode 100644 index 55635ac1..00000000 --- a/docs/PIPELINE_PERFORMANCE_ANALYSIS.md +++ /dev/null @@ -1,301 +0,0 @@ -# 3D Reconstruction Pipeline Performance Analysis - -## Executive Summary - -**Root cause of slowdown: `params.processes = 1` default** - -The `fix_illumination` step is now the bottleneck because it defaults to running with `processes = 1`, meaning each slice is processed sequentially. With ~20 slices and the BaSiC algorithm taking 5-10 minutes per slice, this alone could account for 2-3 hours of additional runtime. - -However, the 8x slowdown (4-6h → 32h) suggests **additional factors** may be at play: - -1. Thread limiting being too aggressive -2. GPU acceleration not being utilized -3. **SimpleITK thread pool not being limited** (CRITICAL) -4. Multiprocessing workers not respecting thread limits - ---- - -## Identified CPU Limiting Gaps - -### Gap 1: SimpleITK Thread Pool (CRITICAL) -**Problem**: SimpleITK spawns its own thread pool for registration operations that **ignores** environment variables like `OMP_NUM_THREADS`. - -**Impact**: Each `register_pairwise` process can spawn 48+ threads, leading to massive thread oversubscription when multiple slices are processed. - -**Fix Applied**: Added `configure_all_libraries()` calls after SimpleITK import in: -- `linum_estimate_transform.py` -- `linum_interpolate_missing_slice.py` -- `linum_stack_slices_3d.py` (deprecated) - -### Gap 2: Multiprocessing Workers Re-Import Libraries -**Problem**: When using `multiprocessing.Pool` or `pqdm`, each worker process is a fresh Python interpreter that re-imports all libraries. Even though environment variables are inherited, libraries like SimpleITK and numpy need runtime configuration. - -**Impact**: Worker processes don't respect thread limits configured in the main process. - -**Fix Applied**: -- Added `worker_initializer` function in `_thread_config.py` -- Updated `multiprocessing.Pool` calls to use the initializer -- Added `apply_threadpool_limits()` call in `process_tile()` for pqdm workers - -### Gap 3: configure_sitk() Never Called -**Problem**: The `configure_sitk()` function existed but was never called anywhere in the codebase. - -**Fix Applied**: Added automatic SimpleITK configuration in `configure_all_libraries()`. - -### Gap 4: Dask Configuration Only in zarr.py -**Problem**: `configure_dask()` was only called from `linumpy/io/zarr.py`, so scripts that use Dask without zarr.py wouldn't have proper thread limits. - -**Fix Applied**: Included Dask configuration in `configure_all_libraries()`. - -### Gap 5: JAX/XLA Thread Pool (BaSiCPy) -**Problem**: JAX (used by BaSiCPy for the BaSiC algorithm) has its own thread pool controlled by XLA_FLAGS, which was not being set. - -**Impact**: The `fix_illumination` step could spawn excessive threads even when OMP_NUM_THREADS was limited, because JAX ignores OpenMP settings. - -**Fix Applied**: -- Added `XLA_FLAGS` environment variable setting in `_thread_config.py` -- Added `XLA_FLAGS` to Nextflow `beforeScript` in both workflow configs -- Added explicit `XLA_FLAGS` setting in `linum_fix_illumination_3d.py` before JAX import - -**XLA_FLAGS format**: -```bash -export XLA_FLAGS='--xla_cpu_multi_thread_eigen=false intra_op_parallelism_threads=N' -``` - ---- - -## Thread Configuration Architecture - -### Before (Gaps Present) -``` -Nextflow beforeScript → sets OMP_NUM_THREADS - ↓ -Python script imports linumpy._thread_config → sets env vars - ↓ -NumPy/SciPy import → ✓ respects OMP_NUM_THREADS -SimpleITK import → ✗ IGNORES limits, spawns all CPU threads -Dask import → ✗ May not be configured - ↓ -Subprocess workers (pqdm/Pool) → Re-import libraries - ↓ -Workers: NumPy → ✓ inherits env vars -Workers: SimpleITK → ✗ IGNORES limits again -``` - -### After (Gaps Fixed) -``` -Nextflow beforeScript → sets OMP_NUM_THREADS, XLA_FLAGS - ↓ -Python script imports linumpy._thread_config → sets env vars (incl. XLA_FLAGS) - ↓ -All imports complete - ↓ -configure_all_libraries() called → - ✓ NumPy/SciPy (threadpoolctl) - ✓ SimpleITK (ProcessObject.SetGlobalDefaultNumberOfThreads) - ✓ Dask (dask.config) - ✓ Numba (set_num_threads) - ✓ JAX/XLA (XLA_FLAGS environment variable) - ↓ -Subprocess workers created with worker_initializer → - ✓ Re-applies all limits in each worker -``` - -### Process-by-Process Breakdown - -| Process | GPU Support | Parallelism | Est. Time/Slice | Bottleneck Risk | -|---------|-------------|-------------|-----------------|-----------------| -| `resample_mosaic_grid` | ✅ Yes | Per-slice parallel | ~2 min | Low | -| `fix_focal_curvature` | ❌ No | Per-slice parallel | ~1 min | Low | -| **`fix_illumination`** | ❌ No | **Uses `params.processes`** | **5-10 min** | **HIGH** | -| `generate_aip` | ❌ No | Per-slice parallel | ~30 sec | Low | -| `estimate_xy_transformation` | ✅ Yes | Per-slice parallel | ~1 min | Low | -| `stitch_3d` | ❌ No | Per-slice parallel | ~2 min | Medium | -| `beam_profile_correction` | ❌ No | Per-slice parallel | ~2 min | Medium | -| `crop_interface` | ❌ No | Per-slice parallel | ~1 min | Low | -| `normalize` | ✅ Yes | Per-slice parallel | ~1 min | Low | -| `create_registration_masks` | ✅ Yes | Per-slice parallel | ~1 min | Low | -| `register_pairwise` | ❌ No | Sequential (by design) | ~5 min | Medium | -| `stack` | ❌ No | Single process | ~10 min | Low | - -### Critical Issue: `fix_illumination` - -```groovy -// In soct_3d_reconst.nf line 62-72 -process fix_illumination { - cpus params.processes // <-- Uses params.processes - - script: - """ - linum_fix_illumination_3d.py ${mosaic_grid} ... --n_processes ${params.processes} - """ -} -``` - -```groovy -// In nextflow.config line 7 -params { - processes = 1 // <-- DEFAULT IS 1! -} -``` - -**Impact**: With 20 slices × 10 minutes each = 200 minutes (~3.3 hours) when running sequentially. - -With `processes = 12`: All slices can run in parallel with internal parallelism, reducing to ~15-20 minutes total. - ---- - -## Thread Configuration Analysis - -The pipeline has a complex thread limiting system: - -### Nextflow `beforeScript` (nextflow.config lines 110-140) -```groovy -int threadsPerProcess = Math.max(1, (int)(maxCpus / numProcesses)) -envVars << "export OMP_NUM_THREADS=${threadsPerProcess}" -``` - -### Python Override (linum_fix_illumination_3d.py lines 13-14) -```python -from os import environ -environ["OMP_NUM_THREADS"] = "1" # <-- HARDCODED TO 1! -``` - -**Problem**: The Python script overrides Nextflow's thread configuration, forcing BaSiC to use single-threaded execution. - ---- - -## Recommended Configuration - -For your 48-core, 512GB RAM, dual A6000 server: - -### nextflow.config changes: - -```groovy -params { - // Change from 1 to 12-16 for parallel processing - processes = 12 // Recommended for 48-core server - - // CPU management - enable_cpu_limits = true - reserved_cpus = 4 // Leave 4 cores for system overhead - - // GPU (already enabled) - use_gpu = true -} -``` - -### Explanation of `processes = 12`: -- 48 cores total - 4 reserved = 44 available -- BaSiC algorithm benefits from ~3-4 threads per worker -- 44 / 3 ≈ 14-15 workers maximum -- Using 12 provides headroom for I/O and other processes - ---- - -## Diagnostic Scripts Created - -### 1. `linum_diagnose_pipeline.py` -Comprehensive Python diagnostic script: -```bash -# Quick check -python scripts/linum_diagnose_pipeline.py --quick - -# Full benchmark -python scripts/linum_diagnose_pipeline.py --benchmark - -# Save to file -python scripts/linum_diagnose_pipeline.py --output diagnosis.json -``` - -### 2. Server Checks -Quick server verification commands: -```bash -nproc && nvidia-smi && python -c "import cupy; print('CuPy OK')" -``` - ---- - -## Verification Checklist - -Run these on your server before the next pipeline execution: - -### 1. Check CPU Configuration -```bash -nproc # Should show 48 -``` - -### 2. Check GPU Availability -```bash -nvidia-smi # Should show 2x A6000 -``` - -### 3. Check CuPy Installation -```bash -python3 -c "import cupy; print(cupy.__version__)" -``` - -### 4. Check linumpy GPU Module -```bash -python3 -c "from linumpy.gpu import GPU_AVAILABLE; print(f'GPU: {GPU_AVAILABLE}')" -``` - -### 5. Verify nextflow.config Parameters -```bash -grep "processes" /path/to/workflows/reconst_3d/nextflow.config -# Should show: processes = 12 (or similar) -``` - ---- - -## Quick Fix - -If you want to run immediately without modifying config files: - -```bash -nextflow run soct_3d_reconst.nf \ - --input /path/to/input \ - --output /path/to/output \ - --processes 12 \ - --use_gpu true \ - --reserved_cpus 4 \ - -resume -``` - ---- - -## Additional Optimizations - -### 1. Consider GPU-accelerated Illumination Correction -The BaSiC algorithm doesn't have a GPU version, but the processing could potentially be optimized by: -- Using JAX backend for BaSiC (if available) -- Processing multiple slices simultaneously on GPU - -### 2. I/O Optimization -```groovy -// In nextflow.config -process { - scratch = true // Currently enabled - stageInMode = 'symlink' // Uses symlinks (good) - stageOutMode = 'rsync' // Consider 'move' for faster output -} -``` - -### 3. Memory-Intensive Process Limits -For processes that use a lot of memory, add `maxForks`: -```groovy -withName: "fix_illumination" { - maxForks = 6 // Limit concurrent instances -} -``` - ---- - -## Expected Performance After Fixes - -| Scenario | fix_illumination Time | Total Pipeline | -|----------|----------------------|----------------| -| Current (processes=1) | ~3-4 hours | 25-35 hours | -| Optimized (processes=12) | ~20-30 min | 4-6 hours | - -The 8x improvement comes primarily from parallelizing the fix_illumination step. diff --git a/docs/RECONSTRUCTION_DIAGNOSTICS.md b/docs/RECONSTRUCTION_DIAGNOSTICS.md index 1653e0c7..722587aa 100644 --- a/docs/RECONSTRUCTION_DIAGNOSTICS.md +++ b/docs/RECONSTRUCTION_DIAGNOSTICS.md @@ -189,6 +189,27 @@ linum_diagnose_reconstruction.py \ ## Troubleshooting Workflow +```mermaid +flowchart TD + A[Reconstruction artifact:
edge mismatch / overhang] --> B[linum_diagnose_reconstruction.py
diagnostic_report.txt] + B --> C{Symptom?} + C -->|cumulative rotation drift| D[Rotation drift] + C -->|individual slice quality| E[Bad slice] + C -->|large inter-slice XY jumps| F[Motor / encoder issue] + C -->|tile-level mismatch| G[Tile dilation] + D --> D1[Set registration_transform=euler
raise registration_max_rotation] + E --> E1[auto_assess_quality=true
auto_exclude_enabled=true] + F --> F1[detect_rehoming=true
tile_fov_mm=0.875
common_space_refine_unreliable=true] + G --> G1[linum_analyze_tile_dilation.py
then per-tile correction] + D1 --> R[Re-run with --debug_slices subset] + E1 --> R + F1 --> R + G1 --> R + R --> V[Re-run diagnostics
verify improvement] + V -->|still bad| C + V -->|fixed| DONE([Done]) +``` + ### Step 1: Quick Assessment ```bash # Run diagnostics on existing output diff --git a/docs/RECONST_2_5D_WORKFLOW.md b/docs/RECONST_2_5D_WORKFLOW.md index 51210127..1e0e0a6f 100644 --- a/docs/RECONST_2_5D_WORKFLOW.md +++ b/docs/RECONST_2_5D_WORKFLOW.md @@ -1,7 +1,5 @@ # 2.5D Reconstruction Workflow ---- - ## Overview The 2.5D reconstruction workflow (`soct_2.5d_reconst.nf`) converts a set of per-slice 2D mosaic @@ -118,19 +116,18 @@ A border is removed from each tile before processing to avoid edge artifacts. The workflow runs processes in a linear sequence: -``` -crop_tiles - └─→ estimate_illumination_bias - └─→ compensate_illumination_bias - │ - ├─→ estimate_position (pools all compensated grids) - │ - └─→ stitch_mosaic (per-slice, uses shared position transform) - └─→ stack_mosaic - ├─→ compress_stack → stack.zarr.zip - ├─→ convert_to_omezarr - │ └─→ resample_stack → stack_10um.nii.gz - └─→ (stack.zarr itself) +```mermaid +flowchart TD + A[crop_tiles] --> B[estimate_illumination_bias] + B --> C[compensate_illumination_bias] + C --> D[estimate_position
pools all grids] + C --> E[stitch_mosaic
per-slice] + D --> E + E --> F[stack_mosaic] + F --> G[compress_stack
stack.zarr.zip] + F --> H[convert_to_omezarr] + H --> I[resample_stack
stack_10um.nii.gz] + F --> J[(stack.zarr)] ``` ### 1. `crop_tiles` diff --git a/docs/SCRIPTS_REFERENCE.md b/docs/SCRIPTS_REFERENCE.md index 68cdf7bd..4450867f 100644 --- a/docs/SCRIPTS_REFERENCE.md +++ b/docs/SCRIPTS_REFERENCE.md @@ -1,12 +1,15 @@ # Scripts Reference ---- - ## Overview linumpy provides a comprehensive set of command-line scripts for microscopy data processing. All scripts follow a consistent interface with `--help` for usage information. +> **Source layout.** Most scripts live in `scripts/`. Diagnostic and +> benchmark tools live in `scripts/diagnostics/`. All scripts are exposed +> as console entry points by `pyproject.toml`, so they can be invoked +> directly by name regardless of source location. + --- ## Script Categories @@ -297,7 +300,7 @@ linum_correct_bias_field.py \ | `--spline_distance_mm` | auto | B-spline control point spacing (default: 2.0 mm per-section, 10.0 mm global) | | `--mask_smoothing_sigma` | `2.0` | Gaussian sigma for tissue mask smoothing | | `--save_bias_field` | — | If given, write the estimated bias field to this OME-Zarr path | -| `--n_levels` | `3` | Pyramid levels in output OME-Zarr | +| `--n_levels` | `None` | Pyramid levels in output OME-Zarr (auto-chosen if unset) | --- @@ -388,12 +391,12 @@ linum_stitch_motor_only.py \ [--blending_method {none,average,diffusion}] ``` -### linum_stack_slices.py +### linum_stack_slices_2d.py -Stack 2D slices into 3D volume. +Stack 2D AIPs into a 3D volume using `shifts_xy.csv`. ```bash -linum_stack_slices.py --xy_shifts +linum_stack_slices_2d.py --xy_shifts ``` ### linum_stack_slices_3d.py @@ -1014,14 +1017,16 @@ linum_extract_pyramid_levels.py 0 2 Output files are named `_level_.nii.gz` and saved next to the input. -### linum_resample.py +### linum_resample_nifti.py -Resample image to different resolution. +Resample a NIfTI image to a target isotropic resolution. ```bash -linum_resample.py -r +linum_resample_nifti.py -r ``` +For resampling 2D mosaic grids see `linum_resample_mosaic_grid.py`. + ### linum_reorient_nifti_to_ras.py Reorient NIfTI to RAS orientation. @@ -1030,12 +1035,12 @@ Reorient NIfTI to RAS orientation. linum_reorient_nifti_to_ras.py ``` -### linum_axis_XYZ_to_ZYX.py +### linum_axis_xyz_to_zyx.py Transpose axes from XYZ to ZYX. ```bash -linum_axis_XYZ_to_ZYX.py +linum_axis_xyz_to_zyx.py ``` ### linum_segment_brain_3d.py diff --git a/docs/SHIFTS_FILE_FORMAT.md b/docs/SHIFTS_FILE_FORMAT.md index 3f9736e0..c563c740 100644 --- a/docs/SHIFTS_FILE_FORMAT.md +++ b/docs/SHIFTS_FILE_FORMAT.md @@ -1,8 +1,6 @@ # XY Shifts File Format ---- - ## Overview The `shifts_xy.csv` file contains pairwise XY shifts between consecutive slices in a serial sectioning dataset. This file is essential for aligning slices during 3D reconstruction. @@ -19,7 +17,7 @@ Generated by preprocessing pipeline at: `{output}/shifts_xy.csv` CSV format with header row: -```csv +```text fixed_id,moving_id,x_shift,y_shift,x_shift_mm,y_shift_mm 0,1,156.234,-23.456,0.0234,-0.0035 1,2,142.567,-18.234,0.0214,-0.0027 @@ -288,7 +286,7 @@ linum_analyze_shifts.py shifts_xy.csv output_dir/ --iqr_multiplier 1.5 ### Sample shifts_xy.csv -```csv +```text fixed_id,moving_id,x_shift,y_shift,x_shift_mm,y_shift_mm 0,1,156.234,-23.456,0.0234,-0.0035 1,2,142.567,-18.234,0.0214,-0.0027 diff --git a/docs/SLICE_CONFIG_FEATURE.md b/docs/SLICE_CONFIG_FEATURE.md index d20993d9..77ea5d6c 100644 --- a/docs/SLICE_CONFIG_FEATURE.md +++ b/docs/SLICE_CONFIG_FEATURE.md @@ -1,8 +1,6 @@ # Slice Configuration Feature ---- - ## Overview `slice_config.csv` is the **single source of truth for per-slice pipeline decisions**. It controls which slices are included in the 3D reconstruction pipeline and records how every stage has acted on each slice, in a machine- and human-readable audit trail. @@ -52,7 +50,7 @@ This caused errors when: A CSV file (`slice_config.csv`) records per-slice pipeline decisions. Below is a fully-populated example after a complete run: -```csv +```text slice_id,use,quality_score,galvo_confidence,galvo_fix,rehomed,rehoming_reliable,auto_excluded,auto_exclude_reason,interpolated,interpolation_failed,interpolation_method_used,interpolation_fallback_reason,notes 00,false,0.000,0.234,false,false,,false,,false,false,,,calibration_slice 01,true,0.812,0.891,true,false,,false,,false,false,,, @@ -167,7 +165,7 @@ The first slice in an acquisition is typically a **calibration slice** that is t When quality assessment runs, the writer only populates canonical columns: -```csv +```text slice_id,use,quality_score,notes 00,false,0.000,calibration_slice 01,true,0.812, @@ -489,7 +487,7 @@ All Python-side slice filtering (e.g. `linum_estimate_global_transform.py`) uses The shifts file contains pairwise shifts between consecutive slices: -```csv +```text fixed_id,moving_id,x_shift,y_shift,x_shift_mm,y_shift_mm 0,1,10,5,0.01,0.005 1,2,8,3,0.008,0.003 diff --git a/docs/SLICE_INTERPOLATION_FEATURE.md b/docs/SLICE_INTERPOLATION_FEATURE.md index f8b71279..42b86034 100644 --- a/docs/SLICE_INTERPOLATION_FEATURE.md +++ b/docs/SLICE_INTERPOLATION_FEATURE.md @@ -1,7 +1,5 @@ # Slice Interpolation Feature ---- - ## Overview The slice interpolation feature reconstructs missing slices in Serial OCT @@ -83,6 +81,26 @@ The only scientifically motivated reconstruction method is `zmorph`. `average` and `weighted` are simpler baselines kept for comparison and as fallbacks; they do not use any 2D registration. +### Decision flow at a glance + +```mermaid +flowchart TD + START([Missing slice between
vol_before and vol_after]) --> PLANES[find_best_overlap_planes
foreground filter + NCC search] + PLANES -->|no foreground planes| F1[fallback_reason:
no_foreground_planes] + PLANES -->|best NCC < min_overlap_correlation| F2[fallback_reason:
low_overlap_ncc] + PLANES -->|good pair| REG[2D ITK registration
boundary plane → reference] + REG -->|optimiser raised| F3[fallback_reason:
registration_exception] + REG -->|det T ≤ 0| F4[fallback_reason:
affine_determinant_non_positive] + REG -->|post-reg NCC ↑ < threshold| F5[fallback_reason:
reg_did_not_improve] + REG -->|gates pass| WARP[For each output plane at α = z / (nz_out-1):
warp vol_before by T^α
warp vol_after by T^(α-1)
gaussian-feathered cross-fade] + WARP --> OUT([Interpolated zarr
+ manifest + diagnostics]) + F1 --> SKIP([Hard skip:
no zarr written,
genuine gap in stack]) + F2 --> SKIP + F3 --> SKIP + F4 --> SKIP + F5 --> SKIP +``` + ### `zmorph` — z-aware morphing (default) #### Physical model @@ -288,7 +306,7 @@ Each interpolation run also emits a human-readable JSON file at `${output}/interpolate_missing_slice/slice_z{NN}_interpolated_diagnostics.json` with the full trace: -```jsonc +```json { "method": "zmorph", "method_used": "zmorph", diff --git a/docs/_static/.gitkeep b/docs/_static/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000..d69adaaa --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,222 @@ +# Configuration file for the Sphinx documentation builder. +# https://www.sphinx-doc.org/en/master/usage/configuration.html +"""Sphinx configuration for the linumpy documentation.""" + +from __future__ import annotations + +import sys +from datetime import datetime +from pathlib import Path + +# Make the project package importable for autoapi/autodoc. +ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(ROOT)) +sys.path.insert(1, str(ROOT / "scripts")) + +# -- Project information ---------------------------------------------------- +project = "linumpy" +author = "The LINUM developers" +copyright = f"{datetime.now().year}, LINUM" + +# Pull version from installed package metadata when available. +try: + from importlib.metadata import version as _get_version + + release = _get_version("linumpy") +except Exception: + release = "0.1.1" +version = ".".join(release.split(".")[:2]) + +# -- General configuration -------------------------------------------------- +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "autoapi.extension", + "sphinxarg.ext", + "myst_parser", + "sphinx_design", + "sphinxcontrib.mermaid", + "sphinx_copybutton", + "notfound.extension", + "sphinx_sitemap", + "sphinxext.opengraph", +] + +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +# MyST: render the existing Markdown docs. +source_suffix = { + ".rst": "restructuredtext", + ".md": "markdown", +} +myst_enable_extensions = [ + "amsmath", + "colon_fence", + "deflist", + "dollarmath", + "fieldlist", + "linkify", + "substitution", + "tasklist", +] +myst_fence_as_directive = ["mermaid"] + +# Mermaid: interactive zoom/pan + fullscreen, with readable defaults. +mermaid_d3_zoom = True +mermaid_fullscreen = True +mermaid_fullscreen_button = "⛶" +mermaid_height = "640px" +mermaid_light_theme = "neutral" +mermaid_dark_theme = "dark" +# startOnLoad must be False — sphinxcontrib-mermaid's default.js calls +# mermaid.run() itself after wiring d3 zoom and the fullscreen modal. With +# startOnLoad=True mermaid auto-renders before the wrapper attaches, leaving +# diagrams without zoom/fullscreen buttons. +mermaid_init_config = { + "startOnLoad": False, + "securityLevel": "loose", + "flowchart": {"htmlLabels": True, "curve": "basis", "useMaxWidth": True}, + "themeVariables": {"fontSize": "16px"}, +} +myst_heading_anchors = 4 + +# Autoapi: generate API reference from the linumpy package. +autoapi_type = "python" +autoapi_dirs = [str(ROOT / "linumpy")] +autoapi_root = "api" +autoapi_options = [ + "members", + "undoc-members", + "show-inheritance", + "show-module-summary", + "imported-members", +] +autoapi_ignore = ["*/tests/*", "*/config/threads*"] +autoapi_keep_files = True +autoapi_add_toctree_entry = True + +# Autodoc settings. +autodoc_typehints = "description" +autodoc_member_order = "bysource" + +# Napoleon: support Google + NumPy docstrings. +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_use_param = True +napoleon_use_rtype = True +napoleon_attr_annotations = True +# Render "Attributes:" docstring sections as :ivar: fields instead of +# emitting a separate :py:attribute: directive for each — avoids the +# "duplicate object description" warnings when autoapi also documents +# the same class attributes from their type annotations. +napoleon_use_ivar = True + +# Intersphinx mappings. +intersphinx_mapping = { + "python": ("https://docs.python.org/3", None), + "numpy": ("https://numpy.org/doc/stable/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/", None), + "skimage": ("https://scikit-image.org/docs/stable/", None), + "zarr": ("https://zarr.readthedocs.io/en/stable/", None), + "SimpleITK": ("https://simpleitk.readthedocs.io/en/master/", None), + "pandas": ("https://pandas.pydata.org/docs/", None), + "dask": ("https://docs.dask.org/en/stable/", None), + "matplotlib": ("https://matplotlib.org/stable/", None), + "PIL": ("https://pillow.readthedocs.io/en/stable/", None), +} + +# Nitpicky mode is kept off. autoapi-extracted API pages contain many +# informal type names from numpy-style docstrings (``ndarray``, ``optional``, +# ``array-like``, ...) that are not real Python xrefs and cannot be resolved +# without rewriting every docstring. Strict builds use ``-W`` (treat warnings +# as errors) but not ``-n``; run ``sphinx-build -n`` ad-hoc when reviewing API +# docstring quality. +nitpicky = False + +# -- HTML output ------------------------------------------------------------ +# pydata-sphinx-theme: https://pydata-sphinx-theme.readthedocs.io/ +html_theme = "pydata_sphinx_theme" +html_title = "linumpy" +html_static_path = ["_static"] + +html_theme_options = { + "github_url": "https://github.com/linum-uqam/linumpy", + "use_edit_page_button": True, + "show_toc_level": 2, + "navigation_with_keys": True, + "show_prev_next": True, + "header_links_before_dropdown": 4, + "icon_links": [ + { + "name": "GitHub", + "url": "https://github.com/linum-uqam/linumpy", + "icon": "fa-brands fa-github", + }, + ], + "navbar_align": "left", + "navbar_end": ["theme-switcher", "navbar-icon-links"], + "footer_start": ["copyright"], + "footer_end": ["sphinx-version", "theme-version"], +} + +# Hide the right "On this page" sidebar by default for narrative docs; +# autoapi pages still show it. This lets prose pages use the full width. +html_theme_options["secondary_sidebar_items"] = { + "**": ["page-toc"], + "index": [], + "getting_started": [], + "pipelines": [], + "formats": [], + "reference": [], +} + +html_context = { + "github_user": "linum-uqam", + "github_repo": "linumpy", + "github_version": "dev", + "doc_path": "docs", +} + +# Suppress noisy warnings from autoapi when imports fail in optional modules. +suppress_warnings = [ + "autoapi.python_import_resolution", + # autoapi sometimes emits the same attribute twice when it's both a + # dataclass field and a property — harmless, just noisy. + "ref.python", + # Tolerate ambiguous Python xrefs like ``shape`` resolving to several + # classes; autoapi can't disambiguate without manual annotations. + "misc.highlighting_failure", +] + +# -- UX extensions ---------------------------------------------------------- +# sphinx-copybutton: copy button on code blocks; strip prompt characters. +copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " +copybutton_prompt_is_regexp = True +copybutton_only_copy_prompt_lines = False + +# sphinx-notfound-page: serve a friendly 404 with absolute links to assets. +notfound_context = { + "title": "Page not found", + "body": ( + "

Page not found

" + "

Sorry, we couldn't find that page. Try the " + "documentation home or use the search box above.

" + ), +} +notfound_urls_prefix = "/" + +# sphinx-sitemap: emit sitemap.xml at the docs root for SEO. +html_baseurl = "https://linumpy.readthedocs.io/en/latest/" +sitemap_url_scheme = "{link}" + +# sphinxext-opengraph: rich link previews on social platforms. +ogp_site_url = html_baseurl +ogp_site_name = "linumpy documentation" +ogp_image = "https://linumpy.readthedocs.io/en/latest/_static/linumpy-logo.png" +ogp_use_first_image = True +ogp_enable_meta_description = True diff --git a/docs/formats.rst b/docs/formats.rst new file mode 100644 index 00000000..31b75c4f --- /dev/null +++ b/docs/formats.rst @@ -0,0 +1,12 @@ +Data formats & features +======================= + +.. toctree:: + :maxdepth: 2 + + MOSAIC_GRID_FORMAT + SHIFTS_FILE_FORMAT + SLICE_CONFIG_FEATURE + SLICE_INTERPOLATION_FEATURE + GPU_ACCELERATION + N4_GPU diff --git a/docs/getting_started.rst b/docs/getting_started.rst new file mode 100644 index 00000000..ec84cb1d --- /dev/null +++ b/docs/getting_started.rst @@ -0,0 +1,8 @@ +Getting started +=============== + +.. toctree:: + :maxdepth: 2 + + usage + CONTRIBUTING diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000..0c48a8a2 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,57 @@ +linumpy +======= + +**linumpy** is the main library of the *Laboratoire d'Imagerie Numérique, +Neurophotonique et Microscopie* (`LINUM `_) at +Université du Québec à Montréal. It provides Python tools and CLI scripts +for serial optical coherence tomography (S-OCT) and serial blockface +histology: tile acquisition, preprocessing, stitching, 3-D reconstruction, +stacking, and downstream analysis. + +.. grid:: 1 2 2 2 + :gutter: 3 + + .. grid-item-card:: Pipelines + :link: pipelines + :link-type: doc + + End-to-end preprocessing and 3-D reconstruction workflows, including + Nextflow integration. + + .. grid-item-card:: Scripts + :link: scripts + :link-type: doc + + Reference for every ``linum_*`` command-line tool, generated from the + argparse definitions. + + .. grid-item-card:: Library API + :link: api/index + :link-type: doc + + Auto-generated reference for the ``linumpy`` Python package. + + .. grid-item-card:: Contributing + :link: CONTRIBUTING + :link-type: doc + + How to set up a development environment, run tests, and contribute. + + +.. toctree:: + :maxdepth: 1 + :hidden: + + getting_started + pipelines + formats + reference + api/index + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/n4_gpu_benchmark.json b/docs/n4_gpu_benchmark.json deleted file mode 100644 index 91c1ed6c..00000000 --- a/docs/n4_gpu_benchmark.json +++ /dev/null @@ -1,182 +0,0 @@ -[ - { - "label": "phantom_64x128x128", - "shape": [ - 64, - 128, - 128 - ], - "shrink_factor": 2, - "n_iter": [ - 25, - 25, - 25 - ], - "spline_distance_mm": 20.0, - "t_cpu_s": 0.6398408049717546, - "t_gpu_s": 0.15978975599864498, - "speedup": 4.004266737707393, - "cv_bias_cpu": 0.003987109754234552, - "cv_bias_gpu": 0.029909836128354073, - "bias_correlation": 0.9638656032665468, - "median_corrected_rel_err": 0.018394112586975098, - "p95_corrected_rel_err": 0.034419916570186615, - "mean_input": 0.4594367742538452, - "mean_corr_cpu": 0.5018937587738037, - "mean_corr_gpu": 0.4343510568141937 - }, - { - "label": "phantom_128x256x256", - "shape": [ - 128, - 256, - 256 - ], - "shrink_factor": 2, - "n_iter": [ - 25, - 25, - 25 - ], - "spline_distance_mm": 20.0, - "t_cpu_s": 2.0511507620103657, - "t_gpu_s": 0.2029316599946469, - "speedup": 10.107593670028978, - "cv_bias_cpu": 0.010777103714644909, - "cv_bias_gpu": 0.015105887316167355, - "bias_correlation": 0.9954930906193947, - "median_corrected_rel_err": 0.004510283470153809, - "p95_corrected_rel_err": 0.014960646629333496, - "mean_input": 0.4600476622581482, - "mean_corr_cpu": 0.48029136657714844, - "mean_corr_gpu": 0.3547338843345642 - }, - { - "label": "phantom_128x512x512", - "shape": [ - 128, - 512, - 512 - ], - "shrink_factor": 2, - "n_iter": [ - 25, - 25, - 25 - ], - "spline_distance_mm": 20.0, - "t_cpu_s": 5.717736949969549, - "t_gpu_s": 0.6198505479842424, - "speedup": 9.224379922811494, - "cv_bias_cpu": 0.015291067771613598, - "cv_bias_gpu": 0.0184138435870409, - "bias_correlation": 0.995320060178931, - "median_corrected_rel_err": 0.004615187644958496, - "p95_corrected_rel_err": 0.015849407762289047, - "mean_input": 0.46004652976989746, - "mean_corr_cpu": 0.4799730181694031, - "mean_corr_gpu": 0.3696020543575287 - }, - { - "label": "phantom_256x512x512", - "shape": [ - 256, - 512, - 512 - ], - "shrink_factor": 2, - "n_iter": [ - 25, - 25, - 25 - ], - "spline_distance_mm": 20.0, - "t_cpu_s": 21.725288335001096, - "t_gpu_s": 1.2978258999646641, - "speedup": 16.739755567825092, - "cv_bias_cpu": 0.045397549867630005, - "cv_bias_gpu": 0.10285799205303192, - "bias_correlation": 0.9444814620336685, - "median_corrected_rel_err": 0.037838224321603775, - "p95_corrected_rel_err": 0.061823610216379166, - "mean_input": 0.4603418707847595, - "mean_corr_cpu": 0.49322256445884705, - "mean_corr_gpu": 0.3906034827232361 - }, - { - "label": "phantom_128x1024x1024", - "shape": [ - 128, - 1024, - 1024 - ], - "shrink_factor": 4, - "n_iter": [ - 25, - 25, - 25 - ], - "spline_distance_mm": 20.0, - "t_cpu_s": 9.617111314029898, - "t_gpu_s": 0.817965931026265, - "speedup": 11.757349480269601, - "cv_bias_cpu": 0.014734203927218914, - "cv_bias_gpu": 0.05159847065806389, - "bias_correlation": 0.9445970174331155, - "median_corrected_rel_err": 0.021271109580993652, - "p95_corrected_rel_err": 0.054694563150405884, - "mean_input": 0.4600576162338257, - "mean_corr_cpu": 0.47571316361427307, - "mean_corr_gpu": 0.37728744745254517 - }, - { - "label": "phantom_128x1536x1536", - "shape": [ - 128, - 1536, - 1536 - ], - "shrink_factor": 4, - "n_iter": [ - 25, - 25, - 25 - ], - "spline_distance_mm": 20.0, - "t_cpu_s": 24.079912445973605, - "t_gpu_s": 2.3647980869864114, - "speedup": 10.182650509777739, - "cv_bias_cpu": 0.016914930194616318, - "cv_bias_gpu": 0.050779879093170166, - "bias_correlation": 0.9520318700714921, - "median_corrected_rel_err": 0.01949763298034668, - "p95_corrected_rel_err": 0.0521697998046875, - "mean_input": 0.46005597710609436, - "mean_corr_cpu": 0.47711148858070374, - "mean_corr_gpu": 0.3874017596244812 - }, - { - "label": "live_oct_full", - "shape": [ - 256, - 1024, - 769 - ], - "shrink_factor": 4, - "n_iter": [ - 40, - 40, - 40 - ], - "spline_distance_mm": 10.0, - "t_cpu_s": 130.6813125850167, - "t_gpu_s": 1.716215402004309, - "speedup": 76.1450529067613, - "bias_correlation": 0.48179547578066834, - "median_corrected_rel_err": 0.1079474687576294, - "p95_corrected_rel_err": 0.5709668397903442, - "mean_input": 0.04045163094997406, - "mean_corr_cpu": 0.0214995089918375, - "mean_corr_gpu": 0.030925488099455833 - } -] \ No newline at end of file diff --git a/docs/pipelines.rst b/docs/pipelines.rst new file mode 100644 index 00000000..25d6c691 --- /dev/null +++ b/docs/pipelines.rst @@ -0,0 +1,10 @@ +Pipelines +========= + +.. toctree:: + :maxdepth: 2 + + PIPELINE_OVERVIEW + NEXTFLOW_WORKFLOWS + RECONST_2_5D_WORKFLOW + RECONSTRUCTION_DIAGNOSTICS diff --git a/docs/reference.rst b/docs/reference.rst new file mode 100644 index 00000000..7e82b63e --- /dev/null +++ b/docs/reference.rst @@ -0,0 +1,9 @@ +Reference +========= + +.. toctree:: + :maxdepth: 2 + + scripts + LIBRARY_MODULES + SCRIPTS_REFERENCE diff --git a/docs/scripts.rst b/docs/scripts.rst new file mode 100644 index 00000000..54b8dac2 --- /dev/null +++ b/docs/scripts.rst @@ -0,0 +1,82 @@ +Command-line scripts +==================== + +linumpy ships a large collection of ``linum_*`` command-line tools. The +authoritative inventory lives in :doc:`SCRIPTS_REFERENCE`, which groups +scripts by purpose (preprocessing, mosaic creation, stitching, stacking, +diagnostics, conversion, GPU benchmarks, …). + +Each script uses ``argparse`` and prints its full options list with +``--help``. A handful of high-traffic entry points are documented here +inline using :mod:`sphinx-argparse`; the rest can be browsed in the +reference page above or queried directly on the command line. + +Mosaic acquisition & preprocessing +---------------------------------- + +.. argparse:: + :module: scripts.linum_create_mosaic_grid_3d + :func: _build_arg_parser + :prog: linum_create_mosaic_grid_3d + +.. argparse:: + :module: scripts.linum_compensate_psf_from_model + :func: _build_arg_parser + :prog: linum_compensate_psf_from_model + +.. argparse:: + :module: scripts.linum_compensate_attenuation + :func: _build_arg_parser + :prog: linum_compensate_attenuation + +Stitching, stacking & alignment +------------------------------- + +.. argparse:: + :module: scripts.linum_register_pairwise + :func: _build_arg_parser + :prog: linum_register_pairwise + +.. argparse:: + :module: scripts.linum_stack_slices_3d + :func: _build_arg_parser + :prog: linum_stack_slices_3d + +.. argparse:: + :module: scripts.linum_align_to_ras + :func: _build_arg_parser + :prog: linum_align_to_ras + +Slice quality & interpolation +----------------------------- + +.. argparse:: + :module: scripts.linum_assess_slice_quality + :func: _build_arg_parser + :prog: linum_assess_slice_quality + +.. argparse:: + :module: scripts.linum_interpolate_missing_slice + :func: _build_arg_parser + :prog: linum_interpolate_missing_slice + +GPU & diagnostics +----------------- + +.. argparse:: + :module: scripts.linum_gpu_info + :func: _build_arg_parser + :prog: linum_gpu_info + +.. argparse:: + :module: scripts.linum_correct_bias_field + :func: _build_arg_parser + :prog: linum_correct_bias_field + +.. note:: + + To document additional scripts here, add an ``.. argparse::`` block with + the script's module path and the name of the function that returns its + ``argparse.ArgumentParser`` (most linumpy scripts expose + ``_build_arg_parser`` — verify with ``grep -n _build_arg_parser + scripts/