From 2aaf6f9a4dc0921b62dff7dbcc40b2094e926be5 Mon Sep 17 00:00:00 2001 From: Frans Irgolitsch Date: Wed, 29 Apr 2026 15:38:15 -0400 Subject: [PATCH] chore: post-rebase refinements and additional diagnostics (#111) --- .github/workflows/nextflow-ci.yml | 25 +- .gitignore | 9 - docs/AI_DOCUMENTATION_INDEX.md | 19 +- docs/GPU_ACCELERATION.md | 14 +- docs/MOSAIC_GRID_FORMAT.md | 2 +- docs/NEXTFLOW_WORKFLOWS.md | 84 +- docs/PIPELINE_OVERVIEW.md | 475 +++++++--- docs/RECONSTRUCTION_DIAGNOSTICS.md | 115 +-- docs/RECONST_2_5D_WORKFLOW.md | 275 ++++++ docs/SCRIPTS_REFERENCE.md | 141 ++- docs/workflow_reconstruction_2-5d.png | Bin 88858 -> 0 bytes linumpy/intensity/psf_model.py | 8 +- linumpy/mosaic/grid.py | 13 +- linumpy/tests/test_mosaic_motor.py | 133 +++ linumpy/tests/test_mosaic_stacking.py | 163 ++++ scripts/diagnostics/__init__.py | 1 + .../linum_aggregate_dilation_analysis.py | 437 +++++++++ .../linum_analyze_acquisition_rotation.py | 572 ++++++++++++ .../linum_analyze_registration_transforms.py | 452 +++++++++ .../linum_analyze_stitch_affine.py | 314 +++++++ .../linum_analyze_tile_dilation.py | 385 ++++++++ scripts/diagnostics/linum_benchmark_gpu.py | 507 ++++++++++ .../diagnostics/linum_compare_stitching.py | 153 +++ .../diagnostics/linum_diagnose_pipeline.py | 872 ++++++++++++++++++ .../linum_diagnose_reconstruction.py | 450 +++++++++ scripts/diagnostics/linum_stack_motor_only.py | 402 ++++++++ .../diagnostics/linum_stitch_motor_only.py | 170 ++++ scripts/diagnostics/linum_suggest_params.py | 702 ++++++++++++++ scripts/linum_axis_xyz_to_zyx.py | 4 +- scripts/linum_export_manual_align.py | 56 +- scripts/linum_generate_pipeline_report.py | 2 +- .../linum_normalize_intensities_per_slice.py | 11 +- scripts/linum_resample_nifti.py | 74 ++ scripts/linum_stack_slices_2d.py | 136 +++ uv.lock | 62 +- workflows/preproc/nextflow.config | 44 +- workflows/preproc/preproc_rawtiles.nf | 18 +- workflows/reconst_2.5d/soct_2.5d_reconst.nf | 4 +- .../soct_2.5d_reconst_beluga.config | 7 +- .../soct_2.5d_reconst_docker.config | 4 +- workflows/reconst_3d/lib/Helpers.groovy | 303 ------ workflows/reconst_3d/nextflow.config | 79 +- workflows/reconst_3d/soct_3d_reconst.nf | 346 ++++++- .../data/mosaic_grid_z01.ome.zarr/.gitkeep | 0 .../data/mosaic_grid_z02.ome.zarr/.gitkeep | 0 .../reconst_3d/tests/soct_3d_reconst.nf.test | 3 - workflows/shared/cpu_limits.config | 43 - 47 files changed, 7272 insertions(+), 817 deletions(-) create mode 100644 docs/RECONST_2_5D_WORKFLOW.md delete mode 100644 docs/workflow_reconstruction_2-5d.png create mode 100644 linumpy/tests/test_mosaic_motor.py create mode 100644 linumpy/tests/test_mosaic_stacking.py create mode 100644 scripts/diagnostics/__init__.py create mode 100644 scripts/diagnostics/linum_aggregate_dilation_analysis.py create mode 100644 scripts/diagnostics/linum_analyze_acquisition_rotation.py create mode 100644 scripts/diagnostics/linum_analyze_registration_transforms.py create mode 100644 scripts/diagnostics/linum_analyze_stitch_affine.py create mode 100644 scripts/diagnostics/linum_analyze_tile_dilation.py create mode 100644 scripts/diagnostics/linum_benchmark_gpu.py create mode 100644 scripts/diagnostics/linum_compare_stitching.py create mode 100644 scripts/diagnostics/linum_diagnose_pipeline.py create mode 100644 scripts/diagnostics/linum_diagnose_reconstruction.py create mode 100644 scripts/diagnostics/linum_stack_motor_only.py create mode 100644 scripts/diagnostics/linum_stitch_motor_only.py create mode 100644 scripts/diagnostics/linum_suggest_params.py create mode 100644 scripts/linum_resample_nifti.py create mode 100644 scripts/linum_stack_slices_2d.py delete mode 100644 workflows/reconst_3d/lib/Helpers.groovy delete mode 100644 workflows/reconst_3d/tests/data/mosaic_grid_z01.ome.zarr/.gitkeep delete mode 100644 workflows/reconst_3d/tests/data/mosaic_grid_z02.ome.zarr/.gitkeep delete mode 100644 workflows/shared/cpu_limits.config diff --git a/.github/workflows/nextflow-ci.yml b/.github/workflows/nextflow-ci.yml index 02b5b968..239a8a6c 100644 --- a/.github/workflows/nextflow-ci.yml +++ b/.github/workflows/nextflow-ci.yml @@ -5,21 +5,19 @@ on: branches: ["main", "dev"] paths: - "workflows/**" - - ".github/workflows/nextflow-ci.yml" pull_request: branches: ["main"] paths: - "workflows/**" - - ".github/workflows/nextflow-ci.yml" jobs: lint-and-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - - uses: actions/setup-java@v5.2.0 + - uses: actions/setup-java@v4 with: distribution: temurin java-version: 17 @@ -32,7 +30,7 @@ jobs: - name: Install nf-test run: | - NF_TEST_VERSION=0.9.5 + NF_TEST_VERSION=$(curl -s https://api.github.com/repos/askimed/nf-test/releases/latest | jq -r .tag_name | sed 's/^v//') wget -qO nf-test.tar.gz "https://github.com/askimed/nf-test/releases/download/v${NF_TEST_VERSION}/nf-test-${NF_TEST_VERSION}.tar.gz" tar -xzf nf-test.tar.gz mkdir -p "$HOME/.nf-test" @@ -41,26 +39,11 @@ jobs: sudo chmod +x /usr/local/bin/nf-test - name: Lint Nextflow files - # `nextflow lint` does not load Groovy classes from `lib/`, so it - # falsely reports `Helpers` as undefined wherever the workflow calls - # `Helpers.X(...)`. We strip ANSI colour codes from the output, drop - # those known false-positives, and re-derive the exit status. run: | - tmp=$(mktemp) nextflow lint \ workflows/preproc/preproc_rawtiles.nf \ workflows/reconst_3d/soct_3d_reconst.nf \ - workflows/reconst_2.5d/soct_2.5d_reconst.nf \ - > "$tmp" 2>&1 || true - sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' "$tmp" \ - | awk '/`Helpers` is not defined/ { skip = 3; next } skip > 0 { skip--; next } { print }' \ - > "$tmp.filt" - cat "$tmp.filt" - if grep -qE '^Error ' "$tmp.filt"; then - echo "::error::nextflow lint reported errors (excluding Helpers false-positives)" - exit 1 - fi - echo "Lint passed (Helpers false-positives ignored: lib/ classes are loaded at runtime but not by 'nextflow lint')." + workflows/reconst_2.5d/soct_2.5d_reconst.nf - name: Validate Nextflow configs run: | diff --git a/.gitignore b/.gitignore index 479a3461..8a38dc35 100644 --- a/.gitignore +++ b/.gitignore @@ -18,8 +18,6 @@ eggs/ .eggs/ lib/ lib64/ -!workflows/**/lib/ -!workflows/**/lib/** parts/ sdist/ var/ @@ -186,13 +184,6 @@ workflows/work/ *.nii.gz *.sif -# Test stub-data placeholders. The `*.zarr` rule above ignores zarr directories -# wholesale; for nf-test stub runs we still need the empty directory markers -# (`.gitkeep`) tracked so the workflow's input glob finds at least one -# `mosaic_grid*.ome.zarr` directory. Re-include the directory and its keepers. -!workflows/**/tests/data/*.ome.zarr -!workflows/**/tests/data/*.ome.zarr/.gitkeep - # Config files *.ini diff --git a/docs/AI_DOCUMENTATION_INDEX.md b/docs/AI_DOCUMENTATION_INDEX.md index c3f448b4..c4bb681c 100644 --- a/docs/AI_DOCUMENTATION_INDEX.md +++ b/docs/AI_DOCUMENTATION_INDEX.md @@ -16,32 +16,35 @@ Complete documentation for the linumpy microscopy processing library. The librar 1. **[PIPELINE_OVERVIEW.md](PIPELINE_OVERVIEW.md)** - Complete overview of preprocessing and 3D reconstruction pipelines 2. **[NEXTFLOW_WORKFLOWS.md](NEXTFLOW_WORKFLOWS.md)** - Nextflow workflow configuration and execution guide +3. **[RECONST_2_5D_WORKFLOW.md](RECONST_2_5D_WORKFLOW.md)** - Legacy 2.5D reconstruction workflow (TIFF mosaic grids) ### Data Formats -3. **[MOSAIC_GRID_FORMAT.md](MOSAIC_GRID_FORMAT.md)** - OME-Zarr mosaic grid format specification -4. **[SHIFTS_FILE_FORMAT.md](SHIFTS_FILE_FORMAT.md)** - XY shifts CSV file format and usage +4. **[MOSAIC_GRID_FORMAT.md](MOSAIC_GRID_FORMAT.md)** - OME-Zarr mosaic grid format specification +5. **[SHIFTS_FILE_FORMAT.md](SHIFTS_FILE_FORMAT.md)** - XY shifts CSV file format and usage --- ## Feature Documentation -5. **[SLICE_CONFIG_FEATURE.md](SLICE_CONFIG_FEATURE.md)** - Slice selection and filtering system -6. **[SLICE_INTERPOLATION_FEATURE.md](SLICE_INTERPOLATION_FEATURE.md)** - Missing slice reconstruction using registration-based morphing -7. **[GPU_ACCELERATION.md](GPU_ACCELERATION.md)** - GPU acceleration using NVIDIA CUDA/CuPy +6. **[SLICE_CONFIG_FEATURE.md](SLICE_CONFIG_FEATURE.md)** - Slice selection and filtering system +7. **[SLICE_INTERPOLATION_FEATURE.md](SLICE_INTERPOLATION_FEATURE.md)** - Missing slice reconstruction using registration-based morphing +8. **[GPU_ACCELERATION.md](GPU_ACCELERATION.md)** - GPU acceleration using NVIDIA CUDA/CuPy --- ## Reference -8. **[SCRIPTS_REFERENCE.md](SCRIPTS_REFERENCE.md)** - Command-line scripts reference guide -9. **[LIBRARY_MODULES.md](LIBRARY_MODULES.md)** - Python library module documentation +9. **[SCRIPTS_REFERENCE.md](SCRIPTS_REFERENCE.md)** - Command-line scripts reference guide +10. **[LIBRARY_MODULES.md](LIBRARY_MODULES.md)** - Python library module documentation +11. **[RECONSTRUCTION_DIAGNOSTICS.md](RECONSTRUCTION_DIAGNOSTICS.md)** - Diagnostic tools for troubleshooting reconstruction artifacts +12. **[PIPELINE_PERFORMANCE_ANALYSIS.md](PIPELINE_PERFORMANCE_ANALYSIS.md)** - Pipeline performance benchmarks and optimization guide --- ## Contributing -10. **[CONTRIBUTING.md](CONTRIBUTING.md)** - Contribution guidelines +13. **[CONTRIBUTING.md](CONTRIBUTING.md)** - Contribution guidelines --- diff --git a/docs/GPU_ACCELERATION.md b/docs/GPU_ACCELERATION.md index 0a7036c7..9ad7e082 100644 --- a/docs/GPU_ACCELERATION.md +++ b/docs/GPU_ACCELERATION.md @@ -20,9 +20,9 @@ linumpy supports GPU acceleration for compute-intensive operations using NVIDIA nvidia-smi | grep "CUDA Version" # Install linumpy with GPU support (choose your CUDA version) -pip install linumpy[gpu] # CUDA 12.x (default) -pip install linumpy[gpu-cuda11] # CUDA 11.x -pip install linumpy[gpu-cuda13] # CUDA 13.x (requires extra setup for JAX) +uv pip install 'linumpy[gpu]' # CUDA 12.x (default) +uv pip install 'linumpy[gpu-cuda11]' # CUDA 11.x +uv pip install 'linumpy[gpu-cuda13]' # CUDA 13.x (requires extra setup for JAX) # Verify GPU linum_gpu_info.py @@ -90,18 +90,18 @@ If you prefer manual setup: ```bash # 1. Uninstall all conflicting packages -pip uninstall -y jax jaxlib jax-cuda12-plugin nvidia-cusolver nvidia-cufft \ +uv pip uninstall jax jaxlib jax-cuda12-plugin nvidia-cusolver nvidia-cufft \ nvidia-cusparse nvidia-cublas nvidia-cuda-runtime nvidia-cudnn nvidia-nvjitlink \ nvidia-cublas-cu12 nvidia-cuda-cupti-cu12 nvidia-cuda-runtime-cu12 \ nvidia-cudnn-cu12 nvidia-cufft-cu12 nvidia-cusolver-cu12 nvidia-cusparse-cu12 \ nvidia-nccl-cu12 nvidia-nvjitlink-cu12 # 2. Install JAX 0.4.23 with CUDA wheel -pip install 'jax==0.4.23' 'jaxlib==0.4.23+cuda12.cudnn89' \ +uv pip install 'jax==0.4.23' 'jaxlib==0.4.23+cuda12.cudnn89' \ -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html # 3. Install PINNED nvidia package versions (critical - newer versions won't work!) -pip install \ +uv pip install \ 'nvidia-cublas-cu12==12.3.4.1' \ 'nvidia-cuda-cupti-cu12==12.3.101' \ 'nvidia-cuda-runtime-cu12==12.3.101' \ @@ -155,7 +155,7 @@ separate `_gpu.py` variant is needed. | `linum_assess_slice_quality.py` | SSIM, morphology | 3-8x | | `linum_aip_png.py` | Mean projection | ≤1x | | `linum_generate_mosaic_aips.py` | Mean projection | ≤1x | -| `linum_normalize_z_intensity.py` | Scale-factor computation | varies | +| `linum_correct_bias_field.py` | N4 bias field estimation | varies | | `linum_estimate_global_transform.py` | Phase correlation | 8-16x | --- diff --git a/docs/MOSAIC_GRID_FORMAT.md b/docs/MOSAIC_GRID_FORMAT.md index 05706049..68d81132 100644 --- a/docs/MOSAIC_GRID_FORMAT.md +++ b/docs/MOSAIC_GRID_FORMAT.md @@ -308,7 +308,7 @@ slice_z00_normalize.ome.zarr ### Napari (Recommended) ```bash -pip install napari[all] +uv pip install 'napari[all]' napari mosaic_grid_3d_z00.ome.zarr ``` diff --git a/docs/NEXTFLOW_WORKFLOWS.md b/docs/NEXTFLOW_WORKFLOWS.md index ac0a8551..3a6ab3c5 100644 --- a/docs/NEXTFLOW_WORKFLOWS.md +++ b/docs/NEXTFLOW_WORKFLOWS.md @@ -98,21 +98,18 @@ nextflow run preproc_rawtiles.nf \ | `processes` | `1` | Parallel Python processes per task (CPU mode only) | | `max_mosaic_forks` | `4` | Max concurrent `create_mosaic_grid` GPU jobs | | `max_aip_forks` | `4` | Max concurrent `generate_aip` GPU jobs | -| `max_quality_forks` | `2` | Max concurrent `assess_slice_quality` GPU jobs | | `axial_resolution` | `1.36` | Axial resolution (µm) | | `resolution` | `-1` | Output resolution (-1 = full native resolution) | | `sharding_factor` | `4` | Zarr sharding (NxN chunks/shard) | | `fix_galvo_shift` | `true` | Correct galvo shifts | | `fix_camera_shift` | `false` | Correct camera shifts | +| `preprocess` | `false` | Apply rotation/flip preprocessing (true for legacy data) | | `galvo_confidence_threshold` | `0.6` | Minimum confidence to apply galvo fix | | `generate_slice_config` | `true` | Generate slice_config.csv | | `exclude_first_slices` | `1` | Number of leading slices to mark as excluded | | `detect_galvo` | `false` | Include galvo detection results in slice_config.csv | | `generate_previews` | `false` | Generate orthogonal view previews of mosaic grids | | `generate_aips` | `false` | Generate AIP images from mosaic grids for QC | -| `assess_quality` | `false` | Run quality assessment and update slice_config | -| `min_quality_score` | `0.2` | Minimum quality score to include slice (0 = report only) | -| `quality_sample_depth` | `10` | Z-planes sampled per slice during quality assessment | ### Outputs @@ -207,7 +204,7 @@ nextflow run soct_3d_reconst.nf \ | `fix_curvature_enabled` | `false` | Detect and compensate focal curvature artifacts | | `fix_illum_enabled` | `true` | Fix illumination inhomogeneity (BaSiCPy algorithm) | | `crop_interface_out_depth` | `600` | Maximum tissue depth after interface crop (µm) | -| `normalize_min_contrast` | `0.1` | Min contrast fraction to prevent over-amplification of empty slices (0–1) | + #### Tile Stitching @@ -362,17 +359,6 @@ and correlation or physics-based Z-matching. | `stack_translation_smooth_sigma` | `3.0` | Gaussian sigma (slices) for smoothing accumulated translations (0 = disabled) | | `stack_translation_min_zcorr` | `0.2` | Min z_correlation to use a slice's translation in accumulation | -**Legacy post-hoc rehoming (for re-stacking old data):** - -| Parameter | Default | Description | -|-----------|---------|-------------| -| `stitch_rehoming_enabled` | `false` | Apply one-time segment offset at re-homing event boundaries during stacking | -| `stitch_rehoming_threshold_mm` | `0.7` | Motor shift magnitude that identifies a re-homing event (mm) | -| `stitch_rehoming_use_motor` | `false` | Use motor delta instead of pairwise registration for the correction | - -Modern pipelines should rely on `detect_rehoming` in common-space alignment -instead of these stacking-time corrections. - **Output pyramid:** | Parameter | Default | Description | @@ -390,30 +376,15 @@ The `pyramid_resolutions` parameter controls the multi-resolution pyramid in the **Note:** Only resolutions ≥ the base `resolution` parameter will be included. For example, if `resolution = 25`, then only 25, 50, and 100 µm levels will be created. -#### Z-Intensity Normalization - -Corrects slow intensity drift across serial sections after stacking. Disabled by default. - -| Parameter | Default | Description | -|-----------|---------|-------------| -| `normalize_z_slices` | `false` | Enable post-stacking Z-intensity normalization | -| `znorm_mode` | `'histogram'` | Normalization mode: `histogram` (preserves contrast) or `percentile` (linear scaling) | -| `znorm_strength` | `0.5` | Correction mixing strength (0 = passthrough, 1 = full correction) | - -**Histogram mode** (`znorm_mode = 'histogram'`): - -| Parameter | Default | Description | -|-----------|---------|-------------| -| `znorm_tissue_threshold` | `0.02` | Minimum intensity to classify as tissue (below this left unchanged) | +#### Bias Field Correction -**Percentile mode** (`znorm_mode = 'percentile'`): +Corrects slow intensity drift and bias field across serial sections after stacking using N4 bias field correction (SimpleITK). Disabled by default. | Parameter | Default | Description | |-----------|---------|-------------| -| `znorm_smooth_sigma` | `10.0` | Gaussian smoothing sigma (sections); ~10 corrects ~2mm drift and preserves anatomy | -| `znorm_percentile` | `80.0` | Percentile of non-zero tissue voxels used as intensity reference | -| `znorm_max_scale` | `2.0` | Maximum correction scale factor | -| `znorm_min_scale` | `0.5` | Minimum correction scale factor | +| `correct_bias_field` | `false` | Enable post-stacking N4 bias field correction | +| `bias_mode` | `'two_pass'` | Correction mode: `per_section` (N4 per thick section), `global` (single volume pass), or `two_pass` (per-section then global) | +| `bias_strength` | `1.0` | Correction mixing strength (0 = passthrough, 1 = full correction) | #### Atlas Registration (RAS Alignment) @@ -542,7 +513,7 @@ tight image-based registration. | Parameter | Default | Description | |-----------|---------|-------------| -| `analyze_shifts` | `false` | Generate shifts analysis report and drift plots | +| `analyze_shifts` | `true` | Generate shifts analysis report and drift plots | | `debug_slices` | `""` | Comma-separated slice IDs or ranges to process (e.g. `"25,26"` or `"25-29"`); leave empty to process all | The `analyze_shifts` option runs drift analysis on the shifts file before processing, producing: @@ -559,7 +530,6 @@ Diagnostic mode enables additional analysis processes for troubleshooting recons | `diagnostic_mode` | `false` | Master switch: enables all diagnostic analyses | | `analyze_rotation_drift` | `false` | Analyze cumulative rotation between slices | | `analyze_acquisition_rotation` | `false` | Analyze acquisition-time rotation from shifts + registration | -| `analyze_tile_dilation` | `false` | Analyze tile position refinements for scale drift (works best with `max_blend_refinement_px = 0`) | | `motor_only_stitch` | `false` | Stitch slices using motor positions only (no image registration) | | `motor_only_stack` | `false` | Stack slices using motor positions only (no pairwise registration) | | `compare_stitching` | `false` | Compare motor-only vs refined stitching side-by-side | @@ -588,36 +558,42 @@ Diagnostic outputs are written to `{output}/diagnostics/` and include rotation p ``` output/ ├── README/readme.txt +├── analyze_shifts/ # Only when analyze_shifts = true ├── resample_mosaic_grid/ ├── fix_focal_curvature/ ├── fix_illumination/ -├── generate_aip/ -├── estimate_xy_transformation/ -├── stitch_3d/ +├── stitch_3d_with_refinement/ +├── previews/stitched_slices/ # Only when stitch_preview = true ├── beam_profile_correction/ ├── crop_interface/ ├── normalize/ +├── detect_rehoming_events/ # Only when detect_rehoming = true +├── auto_assess_quality/ # Only when auto_assess_quality = true ├── bring_to_common_space/ +├── common_space_previews/ # Only when common_space_preview = true +├── interpolate_missing_slice/ # Only when interpolate_missing_slices = true +├── finalise_interpolation/ ├── register_pairwise/ +├── auto_exclude_slices/ # Only when auto_exclude_enabled = true ├── stack/ -│ ├── 3d_volume.ome.zarr -│ ├── 3d_volume.ome.zarr.zip -│ └── 3d_volume.png -├── normalize_z_intensity/ # Only when normalize_z_slices = true -│ └── 3d_volume_znorm.ome.zarr +│ ├── {subject}.ome.zarr +│ ├── {subject}.ome.zarr.zip +│ ├── {subject}.png +│ └── {subject}_annotated.png +├── correct_bias_field/ # Only when correct_bias_field = true +│ └── {subject}_corrected.ome.zarr ├── align_to_ras/ # Only when align_to_ras_enabled = true -│ ├── {subject}_ras.ome.zarr # RAS-aligned volume (all pyramid levels) -│ ├── {subject}_ras_transform.tfm # Registration transform (SimpleITK) -│ └── {subject}_ras_preview.png # 3-panel alignment comparison +│ ├── {subject}_ras.ome.zarr +│ ├── {subject}_ras_transform.tfm +│ └── {subject}_ras_preview.png ├── diagnostics/ # Only when diagnostic_mode = true or individual flags set │ ├── rotation_analysis/ │ ├── acquisition_rotation/ -│ ├── dilation_analysis/ -│ ├── aggregated_dilation/ │ ├── motor_only_stitch/ +│ ├── refined_stitch/ │ ├── motor_only_stack/ │ └── stitch_comparison/ -└── {subject}_quality_report.html +└── {subject}_quality_report.html # Only when generate_report = true ``` --- @@ -632,10 +608,8 @@ Both workflows support GPU acceleration using NVIDIA CUDA via CuPy. GPU processi |----------|---------|----------------| | `preproc_rawtiles.nf` | `create_mosaic_grid` | Galvo detection, volume resize | | `preproc_rawtiles.nf` | `generate_aip` | Mean projection | -| `preproc_rawtiles.nf` | `assess_slice_quality` | SSIM, edge detection (Sobel) | | `soct_3d_reconst.nf` | `resample_mosaic_grid` | Volume resize | | `soct_3d_reconst.nf` | `fix_illumination` | BaSiCPy background correction (JAX on GPU) | -| `soct_3d_reconst.nf` | `estimate_xy_transformation` | Phase correlation (FFT) | | `soct_3d_reconst.nf` | `normalize` | Intensity normalization, percentile clipping | ### Usage @@ -665,7 +639,7 @@ params { For GPU support: - NVIDIA GPU with CUDA support -- CuPy installed: `pip install cupy-cuda12x` +- CuPy installed: `uv pip install cupy-cuda12x` - See [GPU_ACCELERATION.md](GPU_ACCELERATION.md) for detailed setup ### Expected Speedups diff --git a/docs/PIPELINE_OVERVIEW.md b/docs/PIPELINE_OVERVIEW.md index c32fede9..970cc057 100644 --- a/docs/PIPELINE_OVERVIEW.md +++ b/docs/PIPELINE_OVERVIEW.md @@ -10,54 +10,55 @@ The linumpy processing pipeline converts raw S-OCT (Serial Optical Coherence Tom 1. **Preprocessing Pipeline** (`preproc_rawtiles.nf`) - Converts raw tiles to mosaic grids 2. **3D Reconstruction Pipeline** (`soct_3d_reconst.nf`) - Creates 3D volumes from mosaic grids -![Workflow Diagram](workflow_reconstruction_2-5d.png) - --- ## Data Flow -``` -Raw Tiles (tile_x*_y*_z*) - ↓ -┌───────────────────────────────┐ -│ PREPROCESSING PIPELINE │ -│ preproc_rawtiles.nf │ -├───────────────────────────────┤ -│ • Create 3D mosaic grids │ -│ • Estimate XY shifts │ -│ • Generate slice config │ -│ • [opt] Generate AIPs │ -│ • [opt] Assess slice quality │ -│ • [opt] Generate previews │ -└───────────────────────────────┘ - ↓ -Mosaic Grids (*.ome.zarr) + shifts_xy.csv + slice_config.csv - ↓ -┌───────────────────────────────┐ -│ 3D RECONSTRUCTION PIPELINE │ -│ soct_3d_reconst.nf │ -├───────────────────────────────┤ -│ • [opt] Resample mosaic grids │ -│ • [opt] Fix focal curvature │ -│ • [opt] Fix illumination │ -│ • Generate AIP │ -│ • Estimate XY transforms │ -│ • Stitch tiles in 3D │ -│ • Beam profile correction │ -│ • Crop at interface │ -│ • Normalize intensities │ -│ • [opt] Auto-assess quality │ -│ • [opt] Detect re-homing │ -│ • Align to common space │ -│ • [opt] Interpolate gaps │ -│ • Pairwise registration │ -│ • [opt] Auto-exclude clusters │ -│ • Stack into 3D volume │ -│ • [opt] Z-intensity normalize │ -│ • [opt] Register to atlas │ -└───────────────────────────────┘ - ↓ -3D Volume (3d_volume.ome.zarr) +```mermaid +flowchart TD + RAW["📂 Raw Tiles\n(tile_x*_y*_z*)"] + + subgraph PREPROC[" PREPROCESSING · preproc_rawtiles.nf "] + P1["Create 3D mosaic grids"] + P2["Estimate XY shifts"] + P3["Generate slice config"] + P4["[opt] Generate AIPs"]:::opt + P5["[opt] Generate previews"]:::opt + P1 --> P2 --> P3 + P1 -.-> P4 + P1 -.-> P5 + end + + INTER["Mosaic Grids (*.ome.zarr)\nshifts_xy.csv · slice_config.csv"] + + subgraph RECONST[" 3D RECONSTRUCTION · soct_3d_reconst.nf "] + R00["[opt] Analyze shifts"]:::opt + R01["[opt] 1 · Resample mosaic grids"]:::opt --> R02["[opt] 2 · Fix focal curvature"]:::opt --> R03["[opt] 3 · Fix illumination"]:::opt --> R04["4 · Stitch tiles in 3D"] --> R05["5 · Beam profile correction"] --> R06["6 · Crop at interface"] --> R07["7 · Normalize intensities"] --> R08["[opt] 8 · Auto-assess quality"]:::opt --> R09["[opt] 9 · Detect re-homing"]:::opt --> R10["10 · Align to common space"] --> R11["[opt] 11 · Interpolate missing slices"]:::opt --> R12["12 · Pairwise registration"] --> R13["[opt] 13 · Auto-exclude clusters"]:::opt --> R14["14 · Stack into 3D volume"] + R03a["[opt] 3a · Estimate global transform"]:::opt -.-> R04 + R11 -.-> R11f["Finalise interpolation"] + R12r["[opt] Refine manual transforms"]:::opt --> R13 + R14 -.-> R14b["[opt] 15 · Bias field correction"]:::opt + R14b -.-> REPORT["[opt] Generate report"]:::opt + R14 --> R16["[opt] 16 · Register to atlas"]:::opt + R14b --> R16 + end + + OUT[("Subject Volume\n({subject}.ome.zarr)")] + + RAW --> P1 + P3 --> INTER + INTER --> R00 + INTER --> R01 + R14 --> OUT + R14b --> OUT + R16 --> OUT + + classDef opt fill:#f5f5f5,stroke:#999,stroke-dasharray:3 3,color:#555 + style RAW fill:#fff3e0,stroke:#FF9800,stroke-width:2px + style INTER fill:#fff3e0,stroke:#FF9800,stroke-width:2px + style OUT fill:#fff3e0,stroke:#FF9800,stroke-width:2px + style PREPROC fill:#e3f2fd,stroke:#1976D2,stroke-width:2px,color:#000 + style RECONST fill:#e8f5e9,stroke:#388E3C,stroke-width:2px,color:#000 ``` --- @@ -91,21 +92,18 @@ Converts raw OCT tiles into organized mosaic grids and extracts metadata for sub | `reserved_cpus` | `2` | CPUs to keep free for overhead | | `max_mosaic_forks` | `4` | Max concurrent `create_mosaic_grid` GPU jobs | | `max_aip_forks` | `4` | Max concurrent `generate_aip` GPU jobs | -| `max_quality_forks` | `2` | Max concurrent `assess_slice_quality` GPU jobs | | `axial_resolution` | `1.36` | Axial resolution in microns | | `resolution` | `-1` | Output resolution (-1 = full native resolution) | | `sharding_factor` | `4` | Zarr sharding factor | | `fix_galvo_shift` | `true` | Enable galvo shift detection and correction | | `fix_camera_shift` | `false` | Correct camera shifts (old data) | +| `preprocess` | `false` | Apply rotation/flip preprocessing (true for legacy data) | | `galvo_confidence_threshold` | `0.6` | Minimum confidence to apply galvo fix | | `generate_slice_config` | `true` | Generate slice_config.csv | | `exclude_first_slices` | `1` | Number of leading slices to mark as excluded in slice_config | | `detect_galvo` | `false` | Include galvo detection results in slice_config.csv | | `generate_previews` | `false` | Generate orthogonal view previews of mosaic grids | | `generate_aips` | `false` | Generate AIP images from mosaic grids for QC | -| `assess_quality` | `false` | Run quality assessment and update slice_config | -| `min_quality_score` | `0.2` | Minimum quality score to include slice (0 = report only) | -| `quality_sample_depth` | `10` | Z-planes sampled per slice during quality assessment | ### Processes @@ -114,7 +112,6 @@ Converts raw OCT tiles into organized mosaic grids and extracts metadata for sub 3. **generate_slice_config**: Creates slice configuration file 4. **generate_aip** *(optional, `generate_aips = true`)*: Generates AIP images for QC visualisation 5. **generate_mosaic_preview** *(optional, `generate_previews = true`)*: Generates orthogonal view previews -6. **assess_slice_quality** *(optional, `assess_quality = true`)*: Runs GPU-accelerated quality assessment and updates slice_config ### Galvo Shift Correction @@ -159,9 +156,9 @@ Processes mosaic grids through multiple correction and stitching steps to produc ### Output -1. **3D volume**: `3d_volume.ome.zarr` -2. **Compressed volume**: `3d_volume.ome.zarr.zip` -3. **Preview image**: `3d_volume.png` +1. **3D volume**: `{subject_name}.ome.zarr` (subject auto-extracted from input path; override with `subject_name` param) +2. **Compressed volume**: `{subject_name}.ome.zarr.zip` +3. **Preview images**: `{subject_name}.png`, `{subject_name}_annotated.png`; per-slice previews in `common_space_previews/` and `previews/stitched_slices/`; atlas preview in `align_to_ras/` (when `align_to_ras_enabled = true`) ### Processing Steps @@ -190,30 +187,16 @@ fix_illumination - Uses BaSiC algorithm - Enabled by `fix_illum_enabled = true` -#### 4. Average Intensity Projection - -``` -generate_aip -``` -- Creates 2D AIP from 3D mosaic for registration - -#### 5. XY Transformation Estimation +#### 4. 3D Stitching ``` -estimate_xy_transformation +stitch_3d_with_refinement ``` -- Estimates tile positions from AIP mosaic grid - -#### 6. 3D Stitching - -``` -stitch_3d -``` -- Stitches tiles into 3D slice using estimated transforms +- Internally computes the per-tile AIP, estimates tile positions via phase correlation (or a global transform when `stitch_global_transform = true`), then stitches tiles into a 3D slice - Tile blending is controlled by `stitch_blending_method` (default: `'diffusion'`); sub-pixel refinement by `max_blend_refinement_px` - Motor-only stitching is available for diagnostics via `motor_only_stitch = true` -#### 7. Beam Profile Correction +#### 5. Beam Profile Correction ``` beam_profile_correction @@ -221,7 +204,7 @@ beam_profile_correction - Model-free PSF compensation - Corrects axial intensity variations -#### 8. Interface Cropping +#### 6. Interface Cropping ``` crop_interface @@ -229,7 +212,7 @@ crop_interface - Crops volume below sample interface - Removes agarose/mounting medium -#### 9. Intensity Normalization +#### 7. Intensity Normalization ``` normalize @@ -237,33 +220,51 @@ normalize - Normalizes intensities per slice - Compensates signal attenuation with depth -#### 10. Common Space Alignment +#### 8. Auto Slice Quality Assessment (Optional) ``` -bring_to_common_space +auto_assess_quality +``` +- Runs GPU-accelerated quality scoring on all normalized slices +- Enabled by `auto_assess_quality = true` +- Stamps quality scores into `slice_config.csv`; slices below `auto_assess_min_quality` are marked `auto_excluded` +- Any existing manually-excluded slices are preserved (merged with the incoming config) + +**Key Parameters:** +- `auto_assess_quality`, `auto_assess_min_quality`, `auto_assess_exclude_first`, `auto_assess_roi_size` + +#### 9. Re-homing Detection (Optional) + ``` -- Aligns all slices using XY shifts from microscope metadata -- Resamples to common shape -- **Re-homing detection** (`detect_rehoming = true`, default): encoder glitch - spikes (large step that self-cancels with the adjacent step) are zeroed - before alignment. Genuine re-homing events (large step that stays) are - preserved. Optional `tile_fov_mm` also corrects mosaic-column expansion - artefacts in legacy shifts files. -- **Excluded-slice handling**: shifts involving excluded slices are replaced - using the neighbours (`local_median` by default). -- **Optional image-based refinement** (`common_space_refine_unreliable`) uses - 2-D phase correlation to recompute shifts flagged `reliable=0`. -- Centres drift around the middle slice to keep tissue in the common volume. +detect_rehoming_events +``` +- Corrects encoder-glitch spikes and mosaic-column expansion jumps in `shifts_xy.csv` before common-space alignment +- Enabled by `detect_rehoming = true` (default) +- **Spike correction**: a step that self-cancels with the adjacent step is zeroed +- **Tile FOV correction** (`tile_fov_mm`): genuine re-homing events that are integer multiples of the tile FOV are preserved and corrected in position space +- Outputs `shifts_xy_clean.csv` with a `reliable` column; `reliable=0` flags transitions that exceeded `rehoming_max_shift_mm` and may need image-based verification **Key Parameters:** - `detect_rehoming`, `rehoming_return_fraction`, `rehoming_max_shift_mm` - `tile_fov_mm` (legacy shifts only) -- `common_space_refine_unreliable` + `common_space_refine_min_correlation` +- `rehoming_diagnostics` — write `rehoming_report.json` + diagnostic plot + +#### 10. Common Space Alignment + +``` +bring_to_common_space +``` +- Aligns all slices using (optionally corrected) XY shifts from microscope metadata +- Resamples to a shared canvas; centres drift around the middle slice to keep tissue in the common volume +- **Excluded-slice handling**: shifts involving excluded slices are replaced using the neighbours (`local_median` by default) +- **Optional image-based refinement** (`common_space_refine_unreliable`) uses 2-D phase correlation to recompute shifts flagged `reliable=0` + +**Key Parameters:** +- `common_space_refine_unreliable` + `common_space_refine_min_correlation` + `common_space_refine_max_discrepancy_px` - `common_space_excluded_slice_mode` (`keep` | `local_median` | `median` | `zero`) **Debugging:** - Enable `common_space_preview = true` for aligned-slice previews -- Enable `rehoming_diagnostics = true` for a `rehoming_report.json` / plot - Check `bring_to_common_space/` for the aligned slices #### 11. Missing Slice Interpolation (Optional) @@ -293,7 +294,19 @@ register_pairwise The `bring_to_common_space` step (step 10) provides initial XY alignment using microscope metadata, while pairwise registration fine-tunes the alignment between adjacent slices. -#### 13. Volume Stacking +#### 13. Auto-Exclude Clusters (Optional) + +``` +auto_exclude_slices +``` +- Detects clusters of consecutive low-quality pairwise registrations using DBSCAN on registration metrics (NCC, translation magnitude) +- Enabled by `auto_exclude_quality = true` +- Stamps `auto_excluded` / `auto_exclude_reason` into `slice_config.csv`; stacking falls back to motor-only positioning for excluded slices + +**Key Parameters:** +- `auto_exclude_quality`, `auto_exclude_consecutive`, `auto_exclude_z_corr` + +#### 14. Volume Stacking ``` stack @@ -306,17 +319,17 @@ stack - Confidence-based transform degradation: high-confidence pairs get full transforms, low-confidence get rotation-only, very low get skipped - Translation accumulation steers the viewing plane to reduce inter-slice drift -#### 14. Z-Intensity Normalization (Optional) +#### 15. Bias Field Correction (Optional) ``` -normalize_z_intensity +correct_bias_field ``` -- Corrects slow intensity drift across serial sections after stacking -- Enabled by `normalize_z_slices = true` -- Two modes: `histogram` (preserves relative tissue contrast) or `percentile` (linear scaling) -- Controlled by `znorm_strength` (0 = passthrough, 1 = full correction) +- Corrects slow intensity drift and bias field across serial sections after stacking using N4 bias field correction +- Enabled by `correct_bias_field = true` +- Three modes: `per_section` (N4 per thick section), `global` (single volume pass), or `two_pass` (per-section then global) +- Controlled by `bias_strength` (0 = passthrough, 1 = full correction) -#### 15. Atlas Registration (Optional) +#### 16. Atlas Registration (Optional) ``` align_to_ras @@ -364,9 +377,9 @@ The final 3D volume is stored as an OME-Zarr with multiple resolution levels opt | `apply_rotation_only` | `false` | Apply only rotation from pairwise registration during stacking | | `stack_accumulate_translations` | `true` | Accumulate pairwise translations as cumulative canvas offsets | | `stack_confidence_weight_translations` | `true` | Weight translations by confidence before accumulating | -| `stack_max_cumulative_drift_px` | `0` | Max cumulative drift from motor baseline (0 = unlimited) | +| `stack_max_cumulative_drift_px` | `50` | Max cumulative drift from motor baseline (0 = unlimited) | | `interpolation_blend_method` | `'gaussian'` | Blend method: `gaussian` (feathered) or `linear` | -| `normalize_z_slices` | `false` | Enable post-stacking Z-intensity normalization | +| `correct_bias_field` | `false` | Enable post-stacking N4 bias field correction | | `pyramid_resolutions` | `[10, 25, 50, 100]` | Pyramid resolution levels (µm) | | `pyramid_make_isotropic` | `true` | Resample to isotropic voxel spacing | | `use_gpu` | `true` | Enable GPU acceleration (auto-fallback to CPU) | @@ -387,6 +400,242 @@ The pairwise registration uses a two-step approach: - Set `registration_slicing_interval_mm` to match your actual slice thickness +--- + +## Algorithmic Details + +This section gives both an **intuitive** ("what is this doing and why") and a **mathematical** ("what does the code compute") description of each major pipeline step. Notation: + +- $I(z, y, x)$ — a 3D volume indexed in (Z, Y, X) order +- $\mathrm{AIP}(y, x) = \frac{1}{N_z} \sum_z I(z, y, x)$ — average intensity projection along Z +- $\mathrm{NCC}(a, b)$ — zero-lag normalized cross-correlation (Pearson): $\frac{\sum (a - \bar{a})(b - \bar{b})}{\sqrt{\sum(a-\bar{a})^2 \cdot \sum(b-\bar{b})^2}}$ +- Shifts are pixel-valued; $\delta = (\delta_y, \delta_x)$ denotes an XY translation + +### Galvo Shift Correction (preprocessing) + +**Intuition.** The galvo mirror scans across the sample along the A-line axis; when it returns to its starting position the detector still records a few lines. These lines sit at a slightly different optical state, so they form a dark (or sometimes bright) **band** at a fixed position in every tile. If not removed, the band propagates into the stitched mosaic as a horizontal stripe. + +**Math.** Given the per-tile AIP $A \in \mathbb{R}^{N_a \times N_b}$ (A-lines × B-scans), let $\bar{A}(y) = \mathrm{mean}_x A(y, x)$ be the B-scan–averaged 1D profile (median-filtered, size 5). The window $[p, p+w)$ is found by one of two paths depending on data availability: + +**Path 1 — raw `.bin` tiles** (window width $w = n_\text{extra}$ known from acquisition metadata): +The position $p$ is found by matching a pair of large gradients separated by exactly $w$ pixels: +$$ +\mathbf{g}(y) = |\bar{A}(y+1) - \bar{A}(y)|, \qquad +p = \arg\max_{y} \; \mathbf{g}(y) \cdot \mathbf{g}(y + w). +$$ +The product is large only where two sharp edges occur exactly $w$ apart — the entry and exit of the galvo return zone. The corrective circular shift is $s = N_a - p - w$ (moves the window to the end of the A-line axis). + +**Path 2 — assembled mosaic tiles** (neither $p$ nor $w$ is known a priori): +A robust tissue baseline $b_{75} = P_{75}(\bar{A})$ is computed (75th percentile sits above the dark band but below saturation). Pixels below $b_{75}(1 - f_\text{drop})$ (default $f_\text{drop} = 0.40$) are marked as dark. Consecutive dark runs separated by $\le 2$ pixels are merged; the group with the largest cumulative intensity deficit $\sum_y (b_{75}(1-f_\text{drop}) - \bar{A}(y))^+$ is selected as $[p, p+w)$. Groups wider than $0.2\,N_a$ are rejected as non-galvo structure (e.g. tissue boundary). + +**Confidence scoring** (both paths). Once $[p, p+w)$ is established, three components are computed over $N_c \le 20$ representative columns: + +1. **Boundary contrast** — mean relative intensity drop of the band vs. surrounding rows + $$ + c_b = \frac{1}{N_c}\sum_{j=1}^{N_c} \max\!\left(0,\frac{\bar{A}_\text{sur}(j) - \bar{A}_\text{band}(j)}{\bar{A}_\text{sur}(j)}\right) + $$ +2. **Significant drop ratio** — fraction of columns where the relative drop exceeds 10 % +3. **Average drop depth** — mean relative drop across dark columns, normalised to 0.3 + +Combined: $C = 0.40\,c_b + 0.35\,c_s + 0.25\,c_d \in [0, 1]$ (columns with $\bar{A}_\text{sur} < 10$ are excluded). If column consistency $c_b < 0.5$ the score is capped at $0.3 \cdot c_b$ to reject spurious detections in sparse tiles. + +The corrective circular shift is only applied when $C \ge$ `galvo_confidence_threshold`. For multi-tile slices (Path 1), up to 5 centre-region tiles are sampled and the best-confidence detection is penalised by a cross-tile consistency factor $\sqrt{n_\text{consistent}/n_\text{sampled}}$ before thresholding. + +--- + +### Focal Curvature Correction + +**Intuition.** The focal plane of the OCT objective is not flat — it bends slightly, so the tissue–water interface appears as a curved surface in the reconstructed volume even when the physical surface is flat. The correction fits a smooth surface to this apparent interface and resamples each A-line so the surface becomes flat. + +**Math.** Detect the interface depth map $z_0(y, x) = \arg\max_z \partial_z I(z, y, x)$ (after Gaussian smoothing). Fit a quadratic surface +$$ +\hat{z}_0(y, x) = a + by + cx + dy^2 + exy + fx^2, +$$ +and shift each A-line so that $z'(y, x) = z - (\hat{z}_0(y, x) - \min \hat{z}_0)$. The shift is per-pixel; pixels that fall off the bottom are zero-padded. + +--- + +### Illumination Correction (BaSiC) + +**Intuition.** Each tile has a slowly varying brightness shading from the illumination profile — typically brighter in the centre and darker at the corners. BaSiC learns this shading pattern jointly across all tiles by assuming it's a smooth, low-rank flat-field that multiplies the true signal. + +**Math.** Given a stack of tile AIPs $A_i$, BaSiC solves +$$ +\min_{F, D, B_i} \quad \sum_i \| A_i - (F \odot B_i + D)\|_1 + \lambda_F \|F\|_* + \lambda_D \|D\|_1 +$$ +where $F$ is the flat-field (smooth, nuclear-norm regularised), $D$ is the dark-field, and $B_i$ is the per-tile baseline. Each tile is then corrected as $\hat{B}_i = (A_i - D) / F$. The JAX implementation runs this on GPU. + +--- + +### XY Transform Estimation (Phase Correlation) + +**Intuition.** For two overlapping tiles, their correct relative offset is the one that makes them most similar. Phase correlation finds this offset in Fourier space: a translation in real space is a linear phase in frequency, so the cross-power spectrum has a sharp peak at the correct shift. + +**Math.** For overlapping AIP patches $a, b$: +$$ +R(\xi) = \mathcal{F}^{-1}\!\left\{ \frac{\mathcal{F}(a)\,\overline{\mathcal{F}(b)}}{|\mathcal{F}(a)\,\overline{\mathcal{F}(b)}|} \right\}, \qquad \hat{\delta} = \arg\max_{\xi} R(\xi). +$$ +The top-$n$ peaks are returned (see `phase_correlation`), then consensus-filtered to reject spurious peaks caused by repetitive tissue features. A residual RMS across all tile pairs is logged as a quality metric. + +--- + +### 3D Stitching with Diffusion Blending + +**Intuition.** Pasting tiles next to each other with a hard boundary leaves visible seams because neighbouring tiles never have exactly the same brightness. Diffusion blending treats the overlap region like a heat-diffusion problem: known intensities on the tile boundaries act as Dirichlet boundary conditions, and intermediate values are obtained by solving Laplace's equation, producing a smooth transition with no visible seam. + +**Math.** In each overlap region $\Omega$ with two tiles $t_1, t_2$, solve +$$ +\nabla^2 u = 0 \quad \text{in } \Omega, \qquad u|_{\partial \Omega_1} = t_1, \quad u|_{\partial \Omega_2} = t_2. +$$ +The sub-pixel refinement step first runs 2D phase correlation on the Z-projected overlap; if the estimated shift has magnitude $\le$ `max_blend_refinement_px` it is applied before blending (otherwise discarded as unreliable). + +--- + +### Beam Profile Correction (Model-Free PSF) + +**Intuition.** The OCT signal decays with depth because (i) the focused beam is only in focus near its waist and (ii) light is attenuated by tissue. A model-free correction estimates this depth-dependent envelope from the data itself (tissue-averaged A-line profile) and divides it out, so signal at every depth is on an equal footing. + +**Math.** Let $\mathcal{M}$ be the tissue mask. Compute the tissue-averaged axial profile +$$ +p(z) = \mathrm{median}_{(y,x) \in \mathcal{M}} I(z, y, x), +$$ +fit a smooth envelope $\hat{p}(z)$ (parametric or low-pass), then apply $I'(z, y, x) = I(z, y, x) \cdot \bar{p} / \hat{p}(z)$ where $\bar{p}$ is the overall reference level. Pipeline metrics record the peak depth $z^\star = \arg\max \hat{p}$ and the agarose coverage used for the mask. + +--- + +### Interface Cropping + +**Intuition.** The mounting agarose above the tissue is bright and distracting. The surface of the tissue is detected as the sharpest Z-gradient (water → tissue transition) and a fixed physical depth below it is kept. + +**Math.** Smooth with Gaussians $\sigma_{xy}, \sigma_z$, then for every $(y, x)$ compute $z_0(y, x) = \arg\max_z \partial_z I$. Keep only pixels where $\max_z \partial_z I > 0.1 \cdot \max(\max_z \partial_z I)$ as "tissue pixels", and take the median over those to get a single interface depth per slice. The output volume is $I[z_0 : z_0 + D]$ where $D = $ `crop_interface_out_depth` / `axial_resolution`. + +--- + +### Per-Slice Intensity Normalization + +**Intuition.** Different slices have different overall brightness (illumination power, sample state). Without correction, the stacked volume shows horizontal bands between slices. Normalization rescales each slice so a reference tissue percentile matches across slices. + +**Math.** For slice $k$, compute a robust upper percentile (e.g. $p_{99}$) over **non-zero tissue voxels** above an Otsu threshold: $q_k = P_{99}(\{I_k > t_{\text{otsu}}\})$. Smooth the per-chunk values $q_k$ with a 1-D Gaussian ($\sigma = $ `smooth_sigma`) to produce $\tilde{q}_k$, then rescale +$$ +I'_k = I_k \cdot \frac{Q^\star}{\tilde{q}_k}, \qquad Q^\star = \mathrm{median}_k\,\tilde{q}_k, +$$ +with the multiplier clipped to $[\min\_scale, \max\_scale]$. + +--- + +### Re-homing Detection (for `shifts_xy.csv`) + +**Intuition.** Two kinds of large pairwise motor shifts occur: + +- **Re-homing event** — the microscope moved to a new tissue region; the big step *stays* (subsequent shifts are small around the new position). +- **Encoder glitch** — the encoder reported a spurious value for one slice; the following shift reverses it (round-trip ≈ 0). + +We only want to correct glitches. + +**Math.** For each pairwise step $s_i = (\Delta x_i, \Delta y_i)$ with $|s_i| > $ `max_shift_mm`, check its neighbours. Step $s_i$ is classified as a **glitch spike** iff +$$ +\exists j \in \{i-1, i+1\} : \| s_i + s_j \| < f_\text{return} \cdot \|s_i\|, +$$ +with $f_\text{return} = $ `rehoming_return_fraction` (default 0.4). Glitches are replaced by the local median of their non-outlier neighbours; re-homing events are left unchanged. Steps flagged `reliable=0` are those that were corrected or exceeded thresholds. + +--- + +### Common-Space Alignment + +**Intuition.** Each slice was acquired at a motor position, but we need every slice to live in the same global canvas. We integrate the pairwise shifts into cumulative positions, centre the sequence so the middle slice is at the canvas centre, and paint each slice into its assigned location. + +**Math.** Given cleaned pairwise shifts $s_i$ (mm), the cumulative position is $C_k = \sum_{i \le k} s_i$. Convert to pixels, $P_k = C_k / r$ where $r$ is resolution in mm/pixel, then centre: +$$ +P'_k = P_k - P_{k_\text{mid}}. +$$ +Excluded slices' shifts are replaced by the local median of their neighbours before integration (`common_space_excluded_slice_mode = local_median`). Optionally, transitions with `reliable = 0` are re-estimated from image content by running 2D phase correlation on the neighbouring AIPs (`common_space_refine_unreliable`), provided the best peak correlation exceeds `common_space_refine_min_correlation`. + +--- + +### Missing Slice Interpolation (zmorph) + +**Intuition.** When a slice is missing or excluded we want to fill the gap with a synthetic slice that *morphs* from the slice above to the slice below rather than simply averaging them (averaging creates ghost/double-contour artefacts if the two neighbours differ). zmorph registers the two neighbours with an affine transform, then for each output plane at a fractional depth $\alpha \in [0,1]$ warps each boundary by the appropriate fractional power of that transform and cross-fades. + +**Math.** Register the after-boundary slice $B_\text{after}$ to the before-boundary slice $B_\text{before}$ to obtain the 2D affine $T$ (matrix $M$, translation $t$). For output plane $\alpha \in (0, 1)$: +$$ +P_\alpha(\mathbf{x}) \;=\; (1 - \alpha)\, B_\text{before}(T^{\alpha}\mathbf{x}) \;+\; \alpha\, B_\text{after}(T^{\alpha - 1}\mathbf{x}), +$$ +where the fractional affine $T^{\alpha}$ is built from the **matrix fractional power** $M^{\alpha}$ and a translation component $t_{\alpha} = (I - M^{\alpha})(I - M)^{-1}\,t$ (equivalently $(M^{\alpha} - I)(M - I)^{-1}\,t$). $M^{\alpha}$ is computed via `scipy.linalg.fractional_matrix_power` (Schur decomposition internally); the real part is kept and the max relative imaginary magnitude is tracked as a diagnostic (warning emitted above $10^{-3}$), and affines with $\det(M) \le 0$ are rejected upstream. When $I - M$ is near-singular (pure translation) the code falls back to the exact linear form $t_{\alpha} \approx \alpha\,t$. + +**Quality gates** that force a hard skip (no volume emitted): +- Overlap $\mathrm{NCC}(B_\text{before}, B_\text{after}) < $ `min_overlap_correlation` +- $\mathrm{NCC}$ improvement after registration $< $ `min_ncc_improvement` +- $\det(M) \le 0$ (reflection/degenerate) +- Registration exception + +This is intentional — a failed interpolation leaves a real gap rather than fabricating plausible-looking tissue. + +--- + +### Pairwise Slice Registration + +**Intuition.** Two consecutive slices share a physical overlap zone (a few tens of microns of tissue). We find the Z-plane in the lower slice that best matches the top of the upper slice, then refine XY (optionally + rotation) with a gradient-descent registration. + +**Math.** Step 1 — **Z-plane matching**. For each candidate offset $z^\star$ in a search range around the expected overlap, +$$ +z^\star = \arg\max_{z} \mathrm{NCC}\!\left(F[z, \mathrm{crop}], M[0, \mathrm{crop}]\right), +$$ +computed on a centre-cropped 25 % margin-free ROI. + +Step 2 — **Intensity refinement** (SimpleITK): +$$ +\hat{T} = \arg\min_{T \in \mathcal{E}} \; -\mathrm{NCC}\!\left(F_{z^\star},\ M_0 \circ T\right), +$$ +with $\mathcal{E}$ = Euler2D (rotation + translation) or pure translation, optimised by regular-step gradient descent at 3 pyramid levels, learning rate $4.0$, minimum step $10^{-2}$, max $200$ iterations. Parameters are bounded by `registration_max_translation` and `registration_max_rotation`; a fallback identity transform is written whenever the bound is hit or the metric fails to improve. + +--- + +### Volume Stacking + +**Intuition.** Each slice is placed in the canvas at its motor position; the Z-overlap determined by correlation tells us how much it should intrude into the previous slice; the pairwise rotation (and optionally translation) polishes alignment; and translations are accumulated across the stack to avoid persistent drift in one direction. + +**Math.** +1. **Z-overlap.** For the candidate overlaps $o \in [o_\text{exp} - \Delta, o_\text{exp} + \Delta]$ with $o_\text{exp}$ from slice thickness, + $$ + \hat{o}_k = \arg\max_o \mathrm{NCC}\!\left(F_k[-o:, \mathrm{crop}],\ M_{k+1}[:o, \mathrm{crop}]\right). + $$ + Outlier overlaps deviating from the median by more than `outlier_threshold_frac` are corrected from neighbours (unless the pair has high registration confidence). +2. **Translation accumulation.** Let $\tau_k$ be the XY translation from pairwise registration and $c_k$ its confidence. The cumulative canvas offset is + $$ + u_k = \mathrm{smooth}_\sigma\!\left(\sum_{i \le k} c_i\,\tau_i\right), + $$ + smoothed with a 1D Gaussian of standard deviation `stack_translation_smooth_sigma`. When `stack_max_cumulative_drift_px > 0`, $\|u_k\|$ is capped. +3. **Z-blend.** In the overlap region the Hann weight + $$ + w(z) = \tfrac{1}{2}\left(1 - \cos\!\tfrac{\pi z}{n_z - 1}\right) + $$ + is used to cross-fade: $B = (1 - w) F + w M$ where both are valid, otherwise the single-valid side is kept. Zero slope at both endpoints avoids seams. +4. **Sub-pixel refinement.** Before blending, a 2D phase correlation on the Z-projected overlap may shift the moving slice by $\le$ `max_blend_refinement_px`. + +--- + +### Post-Stacking Z-Intensity Normalization + +**Intuition.** Even after per-slice normalization a slow drift in contrast can remain across dozens of sections. This step either (a) rescales each Z-plane by a smooth factor (percentile mode) or (b) matches each plane's tissue histogram to a reference distribution (histogram mode). + +**Math.** Let $h_k(v)$ be the tissue-voxel histogram of plane $k$ ($v > t_\text{tissue}$) and $H(v) = \frac{1}{K}\sum_k h_k(v)$ the reference histogram with CDF $\Phi$. Define per-plane CDF $\Phi_k$, then the mapping +$$ +m_k(v) = \Phi^{-1}\!\big(\Phi_k(v)\big), \qquad v' = (1 - \alpha)\,v + \alpha\,m_k(v), +$$ +with $\alpha = $ `znorm_strength` $\in [0, 1]$ controlling partial application. Percentile mode uses the simpler scaling $I'_k = I_k \cdot Q^\star / \tilde{q}_k$ (same form as per-slice normalization, but computed post-stack). + +--- + +### Atlas / RAS Alignment + +**Intuition.** The reconstructed volume has some arbitrary orientation (depends on how the sample was mounted). For comparison across subjects we need a standard frame. A rigid registration to the Allen Mouse Brain atlas (CCF, downsampled to match) finds the rotation + translation that best aligns the subject's coarse anatomy to the atlas, and we resample the subject volume into the atlas's RAS grid. + +**Math.** Let $V$ be the subject volume reoriented to `ras_input_orientation` and $A$ the atlas template at matching resolution. Solve +$$ +\hat{T} = \arg\min_{T \in \mathrm{SE}(3)} \; -\mathrm{NCC}(A,\ V \circ T), +$$ +with a multi-resolution pyramid. The resulting `.tfm` file and the resampled OME-Zarr (at all pyramid levels) are saved alongside a 3-panel preview comparing subject-in-atlas with the atlas itself. + --- ## GPU Acceleration @@ -399,10 +648,8 @@ Both pipelines support optional GPU acceleration using NVIDIA CUDA via CuPy. GPU |----------|---------|----------------| | Preprocessing | `create_mosaic_grid` | Galvo detection, volume resize | | Preprocessing | `generate_aip` | Mean projection | -| Preprocessing | `assess_slice_quality` | SSIM, edge detection (Sobel) | | 3D Reconstruction | `resample_mosaic_grid` | Volume resize | | 3D Reconstruction | `fix_illumination` | BaSiCPy background correction (JAX on GPU) | -| 3D Reconstruction | `estimate_xy_transformation` | Phase correlation (FFT) | | 3D Reconstruction | `normalize` | Intensity normalization, percentile clipping | ### Running with GPU @@ -418,7 +665,7 @@ nextflow run preproc_rawtiles.nf --input /path/to/data --output /path/to/output ### Requirements - NVIDIA GPU with CUDA support -- CuPy installed (`pip install cupy-cuda12x`) +- CuPy installed (`uv pip install cupy-cuda12x`) - See [GPU_ACCELERATION.md](GPU_ACCELERATION.md) for detailed setup --- @@ -580,18 +827,19 @@ When working with a subset of slices: output/ ├── README/ │ └── readme.txt # Pipeline parameters +├── analyze_shifts/ # Shifts analysis report and drift plots (when analyze_shifts = true) ├── resample_mosaic_grid/ # Resampled mosaics (if enabled) ├── fix_focal_curvature/ # Curvature-corrected mosaics ├── fix_illumination/ # Illumination-corrected mosaics -├── generate_aip/ # AIP projections -├── estimate_xy_transformation/ # XY transforms (per-slice or global) -├── stitch_3d/ # Stitched 3D slices +├── stitch_3d_with_refinement/ # Stitched 3D slices +├── previews/stitched_slices/ # Stitch previews (when stitch_preview = true) ├── beam_profile_correction/ # PSF-corrected slices ├── crop_interface/ # Cropped slices ├── normalize/ # Normalized slices ├── auto_assess_quality/ # (auto_assess_quality = true) Quality-stamped slice_config.csv -├── detect_rehoming/ # Corrected shifts_xy_clean.csv + diagnostics (when enabled) +├── detect_rehoming_events/ # Corrected shifts_xy_clean.csv + diagnostics (when enabled) ├── bring_to_common_space/ # Aligned slices +├── common_space_previews/ # Common space previews (when common_space_preview = true) ├── interpolate_missing_slice/ # (interpolate_missing_slices = true) │ ├── slice_z*.ome.zarr # (only when interpolation succeeded) │ └── manifests/ # Per-slice CSV fragments @@ -600,13 +848,14 @@ output/ ├── auto_exclude_slices/ # (auto_exclude_enabled) slice_config with auto_excluded flags ├── export_manual_align/ # (export_manual_align) AIPs + transforms for manual tool ├── stack/ -│ ├── 3d_volume.ome.zarr # Final 3D volume -│ ├── 3d_volume.ome.zarr.zip # Compressed volume +│ ├── {subject}.ome.zarr # Final 3D volume +│ ├── {subject}.ome.zarr.zip # Compressed volume +│ ├── {subject}.png # Preview image +│ ├── {subject}_annotated.png # Annotated preview │ ├── z_matches.csv # Z-overlap decisions -│ ├── stacking_decisions.csv # Per-slice transform loading decisions -│ └── 3d_volume.png # Preview image -├── normalize_z_intensity/ # (normalize_z_slices) Z-normalized volume -│ └── 3d_volume_znorm.ome.zarr +│ └── stacking_decisions.csv # Per-slice transform loading decisions +├── correct_bias_field/ # (correct_bias_field) Bias-field-corrected volume +│ └── {subject}_corrected.ome.zarr ├── align_to_ras/ # (align_to_ras_enabled) │ ├── {subject}_ras.ome.zarr # RAS-aligned volume (all pyramid levels) │ ├── {subject}_ras_transform.tfm # Registration transform (SimpleITK) @@ -614,9 +863,9 @@ output/ ├── diagnostics/ # (diagnostic_mode or individual flags) │ ├── rotation_analysis/ │ ├── acquisition_rotation/ -│ ├── dilation_analysis/ │ ├── motor_only_stitch/ +│ ├── refined_stitch/ │ ├── motor_only_stack/ │ └── stitch_comparison/ -└── {subject}_quality_report.html # Quality report +└── {subject}_quality_report.html # Quality report (when generate_report = true) ``` diff --git a/docs/RECONSTRUCTION_DIAGNOSTICS.md b/docs/RECONSTRUCTION_DIAGNOSTICS.md index bf8ae898..1653e0c7 100644 --- a/docs/RECONSTRUCTION_DIAGNOSTICS.md +++ b/docs/RECONSTRUCTION_DIAGNOSTICS.md @@ -52,7 +52,6 @@ If you only need specific diagnostics, keep `diagnostic_mode=false` and enable i params { // Enable specific diagnostics analyze_rotation_drift = true // Analyze cumulative rotation between slices - analyze_tile_dilation = true // Compare motor vs registration positions motor_only_stitch = true // Create slices using only motor positions // Configure diagnostics @@ -207,85 +206,25 @@ registration_transform = 'euler' registration_max_rotation = 35.0 // Increase if needed for oblique cuts ``` -**If dilation is detected**: - -The most common cause of edge misalignment ("overhangs") is systematic tile dilation/contraction. -The diagnostic analysis reveals that motor positions don't match the actual image positions. - -**Solution**: Apply scale correction during 3D assembly: - -#### Using Nextflow Pipeline (Recommended) - -**Option A: Auto-aggregate and apply** (fully automated): -```bash -nextflow run soct_3d_reconst.nf \ - --input /path/to/data \ - --scale_correction_enabled true \ - --aggregate_dilation true -``` -This will: -1. Run dilation analysis on all slices -2. Aggregate results to compute optimal correction factors -3. Apply scale correction during common space alignment - -**Option B: Use pre-computed correction factors**: -```bash -# First run diagnostic mode to generate analysis -nextflow run soct_3d_reconst.nf \ - --input /path/to/data \ - --diagnostic_mode true \ - --debug_slices "30-40" # Test subset first - -# Then re-run with scale correction using the generated JSON -nextflow run soct_3d_reconst.nf \ - --input /path/to/data \ - --scale_correction_enabled true \ - --dilation_json /path/to/output/diagnostics/aggregated_dilation/aggregated_dilation_analysis.json -``` - -**Option C: Manual scale factors**: -```bash -nextflow run soct_3d_reconst.nf \ - --input /path/to/data \ - --scale_correction_enabled true \ - --scale_correction_y 1.031 \ - --scale_correction_x 1.009 -``` - -#### Using Command Line (Manual) - -```bash -# Step 1: Aggregate dilation analysis from multiple slices -linum_aggregate_dilation_analysis.py \ - sub-18/diagnostics/dilation_analysis \ - sub-18/diagnostics/aggregated_dilation - -# Step 2: Apply correction during 3D alignment -linum_align_mosaics_3d_from_shifts.py \ - mosaics_dir \ - shifts_xy.csv \ - aligned_output \ - --dilation_json sub-18/diagnostics/aggregated_dilation/aggregated_dilation_analysis.json - -# Or use manual values: -linum_align_mosaics_3d_from_shifts.py \ - mosaics_dir \ - shifts_xy.csv \ - aligned_output \ - --scale_y 1.031 \ - --scale_x 1.009 -``` - -**Understanding scale factors**: -- `scale < 1` measured → mosaic is smaller than expected → use correction `> 1` to expand -- `scale > 1` measured → mosaic is larger than expected → use correction `< 1` to shrink -- The aggregation script outputs the **correction** factors directly (inverse of measured) - **If specific slices are problematic**: ```groovy // Exclude degraded slices via slice_config.csv -// Or limit analysis: -debug_slices = "20-40" // Focus on problem region +// Or enable automatic quality assessment and exclusion: +auto_assess_quality = true +auto_assess_min_quality = 0.3 +auto_exclude_enabled = true +``` + +**If large inter-slice shifts are causing misalignment**: +```groovy +// Enable rehoming detection to correct encoder glitch spikes: +detect_rehoming = true +rehoming_max_shift_mm = 0.5 +// For tile-column expansion events (legacy data): +tile_fov_mm = 0.875 +// Enable image-based refinement for transitions flagged reliable=0: +common_space_refine_unreliable = true +common_space_refine_max_discrepancy_px = 0 ``` ### Step 3: Reprocess with Fixes @@ -316,10 +255,7 @@ linum_diagnose_reconstruction.py sub-18 sub-18/diagnostics 2. **Check rotation analysis** (`diagnostics/rotation_analysis.txt`): - If cumulative rotation > 5°, edges will misalign -3. **Check dilation analysis** (`diagnostics/dilation_analysis/*/dilation_analysis.txt`): - - Scale factor deviation from 1.0 indicates motor vs registration mismatch - -4. **Compare motor-only stitches** with registered stitches: +3. **Compare motor-only stitches** with registered stitches: - Visual inspection of differences reveals registration contribution ## Parameters Reference @@ -330,7 +266,6 @@ linum_diagnose_reconstruction.py sub-18 sub-18/diagnostics |-----------|---------|-------------| | `diagnostic_mode` | false | **Master switch**: enables ALL diagnostics when true | | `analyze_rotation_drift` | false | Analyze inter-slice rotation (mosaic-level) | -| `analyze_tile_dilation` | false | Compare motor vs registration positions (tile-level) | | `motor_only_stitch` | false | Create motor-position-only stitches | | `motor_only_stack` | false | Create 3D stack using motor positions only | | `diagnostic_rotation_threshold` | 2.0 | Flag rotations above this (degrees) | @@ -366,15 +301,13 @@ output/ │ │ ├── rotation_data.csv │ │ ├── rotation_analysis.png │ │ └── rotation_analysis.txt -│ ├── dilation_analysis/ -│ │ ├── 02/ -│ │ │ ├── dilation_analysis.json -│ │ │ └── dilation_analysis.png -│ │ ├── 03/ +│ ├── acquisition_rotation/ +│ ├── motor_only_stitch/ +│ │ ├── slice_z02_motor_only.ome.zarr +│ │ ├── slice_z03_motor_only.ome.zarr │ │ └── ... -│ └── motor_only_stitch/ -│ ├── slice_z02_motor_only.ome.zarr -│ ├── slice_z03_motor_only.ome.zarr -│ └── ... +│ ├── refined_stitch/ +│ ├── motor_only_stack/ +│ └── stitch_comparison/ └── ... ``` diff --git a/docs/RECONST_2_5D_WORKFLOW.md b/docs/RECONST_2_5D_WORKFLOW.md new file mode 100644 index 00000000..51210127 --- /dev/null +++ b/docs/RECONST_2_5D_WORKFLOW.md @@ -0,0 +1,275 @@ +# 2.5D Reconstruction Workflow + +--- + +## Overview + +The 2.5D reconstruction workflow (`soct_2.5d_reconst.nf`) converts a set of per-slice 2D mosaic +grids (TIFF format) into a stacked 3D volume using illumination correction, image-based tile +placement, and XY-shift-based stacking. + +This is the **legacy** workflow. New acquisitions should use the full +[3D Reconstruction Workflow](NEXTFLOW_WORKFLOWS.md#3d-reconstruction-workflow) +(`soct_3d_reconst.nf`), which operates on OME-Zarr mosaic grids and provides much richer +correction and diagnostics. + +--- + +## When to Use This Workflow + +- Data acquired as TIFF-format mosaic grids (`mosaic_grid_z*.tiff`) +- Re-processing older datasets that pre-date the OME-Zarr pipeline +- Quick 2D stitching + stacking without the full 3D pipeline overhead + +--- + +## Location + +``` +workflows/reconst_2.5d/ +├── soct_2.5d_reconst.nf # Workflow definition +├── soct_2.5d_reconst_beluga.config # Compute Canada / Beluga cluster config +└── soct_2.5d_reconst_docker.config # Docker-based config +``` + +--- + +## Input + +| Item | Description | +|------|-------------| +| `{directory}/mosaicgrids/` | Directory of `mosaic_grid_z*.tiff` files, one per slice | +| `{directory}/shifts_xy.csv` | XY inter-slice shifts file (standard linumpy format) | + +--- + +## Running + +```bash +# Basic usage (run from workflow directory or with full path) +nextflow run soct_2.5d_reconst.nf \ + --directory /path/to/subject + +# With custom output directory and resolution +nextflow run soct_2.5d_reconst.nf \ + --directory /path/to/subject \ + --output_directory /path/to/output + +# Resume after partial run +nextflow run soct_2.5d_reconst.nf \ + --directory /path/to/subject \ + -resume +``` + +--- + +## Parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `directory` | `"."` | Root subject directory | +| `input_directory` | `{directory}/mosaicgrids` | Directory containing `mosaic_grid_z*.tiff` files | +| `xy_shift_file` | `{directory}/shifts_xy.csv` | Inter-slice XY shifts file | +| `output_directory` | `{directory}` | Output directory | + +### Tile Shape + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `tile_nx` | `400` | Tile width in pixels | +| `tile_ny` | `400` | Tile height in pixels | +| `spacing_xy` | `1.875` | Lateral pixel spacing (µm) | +| `spacing_z` | `200.0` | Axial (slice) spacing (µm) | + +### Tile Cropping + +A border is removed from each tile before processing to avoid edge artifacts. + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `xmin` | `10` | Left crop (pixels) | +| `xmax` | `390` | Right crop (pixels); effective tile width = `xmax - xmin` = 380 px | +| `ymin` | `10` | Top crop (pixels) | +| `ymax` | `390` | Bottom crop (pixels); effective tile height = `ymax - ymin` = 380 px | + +### Illumination Bias + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `illum_n_samples` | `512` | Tiles sampled to estimate flat-field | +| `pos_n_samples` | `512` | Tiles sampled to estimate dark-field | +| `basic_working_size` | `128` | Internal BaSIC working resolution (pixels) | + +### Stitching + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `initial_overlap` | `0.2` | Expected tile overlap fraction for position estimation | + +### Output Resolution + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `resolution_nifti` | `10.0` | Isotropic resolution for the resampled NIfTI output (µm) | + +--- + +## Pipeline Processes + +The workflow runs processes in a linear sequence: + +``` +crop_tiles + └─→ estimate_illumination_bias + └─→ compensate_illumination_bias + │ + ├─→ estimate_position (pools all compensated grids) + │ + └─→ stitch_mosaic (per-slice, uses shared position transform) + └─→ stack_mosaic + ├─→ compress_stack → stack.zarr.zip + ├─→ convert_to_omezarr + │ └─→ resample_stack → stack_10um.nii.gz + └─→ (stack.zarr itself) +``` + +### 1. `crop_tiles` + +Crops a border from each tile within the mosaic grid TIFF to remove edge artifacts. + +``` +linum_crop_tiles.py --xmin --xmax --ymin --ymax --tile_shape +``` + +**Input**: `mosaic_grid_z*.tiff` directory +**Output**: `{basename}_cropped.tiff` + +### 2. `estimate_illumination_bias` + +Estimates per-tile flat-field and dark-field using the BaSIC algorithm. + +``` +linum_estimate_illumination.py --tile_shape --output_darkfield +``` + +**Input**: Cropped mosaic grid (per slice) +**Output**: `{key}_flatfield.nii.gz`, `{key}_darkfield.nii.gz` + +### 3. `compensate_illumination_bias` + +Applies the estimated flat/dark field correction to each mosaic grid. + +``` +linum_compensate_illumination.py --flatfield --darkfield --tile_shape +``` + +**Input**: Cropped mosaic + flat/dark field +**Output**: `{key}_mosaic_grid_compensated.nii.gz` + +### 4. `estimate_position` + +Pools all compensated mosaic grids to estimate a single shared tile-placement transform (`.npy`). +This single transform is applied to all slices, avoiding per-slice jitter. + +``` +linum_estimate_transform.py --tile_shape --initial_overlap +``` + +**Input**: All compensated mosaic grids (collected) +**Output**: `position_transform.npy` + +### 5. `stitch_mosaic` + +Stitches each compensated mosaic grid into a 2D slice using the shared position transform. +Blending method is `diffusion`. + +``` +linum_stitch_2d.py --blending_method diffusion --tile_shape +``` + +**Input**: Compensated mosaic (per slice) + position transform +**Output**: `{key}_stitched.nii.gz` + +### 6. `stack_mosaic` + +Stacks all stitched 2D slices into a 3D volume using XY shifts from `shifts_xy.csv`. + +``` +linum_stack_slices.py stack.zarr --xy_shifts --resolution_xy --resolution_z +``` + +**Input**: All stitched slices (collected) + shifts CSV +**Output**: `stack.zarr` + +### 7. `compress_stack` + +Compresses the Zarr stack to a ZIP archive for transfer. + +**Input**: `stack.zarr` +**Output**: `stack.zarr.zip` + +### 8. `convert_to_omezarr` + +Converts the Zarr stack to OME-Zarr format for visualization in napari/neuroglancer. + +**Input**: `stack.zarr` +**Output**: `stack.ome.zarr` + +### 9. `resample_stack` + +Resamples the OME-Zarr to isotropic resolution and exports a NIfTI file. + +``` +linum_convert_omezarr_to_nifti.py stack.ome.zarr stack_10um.nii.gz --resolution 10.0 +``` + +**Input**: `stack.ome.zarr` +**Output**: `stack_10um.nii.gz` + +--- + +## Outputs + +| File | Description | +|------|-------------| +| `stack.zarr` | Full-resolution 3D volume (Zarr format) | +| `stack.zarr.zip` | Compressed archive of `stack.zarr` | +| `stack.ome.zarr` | OME-Zarr for visualization | +| `stack_10um.nii.gz` | Isotropic 10 µm NIfTI for atlas registration and analysis | + +--- + +## Cluster / Container Configs + +| Config | Use case | +|--------|----------| +| `soct_2.5d_reconst_beluga.config` | Compute Canada Beluga cluster (SLURM) | +| `soct_2.5d_reconst_docker.config` | Docker container execution | + +```bash +# Beluga cluster +nextflow run soct_2.5d_reconst.nf \ + --directory /path/to/subject \ + -c soct_2.5d_reconst_beluga.config + +# Docker +nextflow run soct_2.5d_reconst.nf \ + --directory /path/to/subject \ + -c soct_2.5d_reconst_docker.config +``` + +--- + +## Differences from the 3D Workflow + +| Aspect | 2.5D workflow | 3D workflow | +|--------|---------------|-------------| +| Input format | TIFF mosaic grids | OME-Zarr mosaic grids | +| Tile illumination | BaSIC (per-slice) | BaSIC (per-slice via `fix_illumination`) | +| Tile placement | Image-based (phase correlation) | Motor positions + optional image refinement | +| Slice alignment | Motor XY shifts only | Motor + pairwise registration | +| Quality assessment | None | Optional auto quality assessment | +| Re-homing correction | None | `detect_rehoming` pass | +| Interpolation | None | `interpolate_missing_slice` | +| Atlas registration | None (requires separate NIfTI step) | Integrated `align_to_ras` | +| Output | `stack.zarr`, `stack_10um.nii.gz` | `{subject}.ome.zarr` (multi-resolution) | diff --git a/docs/SCRIPTS_REFERENCE.md b/docs/SCRIPTS_REFERENCE.md index a774a333..68cdf7bd 100644 --- a/docs/SCRIPTS_REFERENCE.md +++ b/docs/SCRIPTS_REFERENCE.md @@ -267,30 +267,37 @@ General intensity normalization. linum_intensity_normalization.py ``` -### linum_normalize_z_intensity.py +### linum_correct_bias_field.py -Correct slow intensity drift across serial sections after stacking. Two modes are supported: `histogram` (per-section histogram matching that preserves relative contrast) and `percentile` (linear scaling to a smoothed percentile curve). +Correct slow intensity drift and bias field across serial sections after stacking using N4 bias field correction (SimpleITK). Three modes are supported: `per_section` (N4 applied independently per thick section), `global` (single N4 pass over the whole volume), and `two_pass` (per-section pass followed by a global pass). ```bash -linum_normalize_z_intensity.py \ - [--mode {histogram,percentile}] \ +linum_correct_bias_field.py \ + [--mode {per_section,global,two_pass}] \ [--strength <0.0-1.0>] \ - [--tissue_threshold ] \ - [--smooth_sigma ] \ - [--percentile ] \ - [--max_scale ] \ - [--min_scale ] + [--n_serial_slices ] \ + [--n_processes ] \ + [--shrink_factor ] \ + [--n_iterations [ ...]] \ + [--spline_distance_mm ] \ + [--mask_smoothing_sigma ] \ + [--save_bias_field ] \ + [--n_levels ] \ + [--verbose] ``` | Option | Default | Description | |--------|---------|-------------| -| `--mode` | `histogram` | Normalization mode: `histogram` or `percentile` | -| `--strength` | `0.5` | Mixing strength (0 = passthrough, 1 = full correction) | -| `--tissue_threshold` | `0.02` | Minimum intensity to classify as tissue (histogram mode) | -| `--smooth_sigma` | `10.0` | Smoothing sigma in sections for trend estimation (percentile mode) | -| `--percentile` | `80.0` | Tissue percentile used as reference intensity (percentile mode) | -| `--max_scale` | `2.0` | Maximum scale factor | -| `--min_scale` | `0.5` | Minimum scale factor | +| `--mode` | `two_pass` | Correction mode: `per_section`, `global`, or `two_pass` | +| `--strength` | `1.0` | Mixing strength (0 = passthrough, 1 = full correction) | +| `--n_serial_slices` | `1` | Slices per section for `per_section` mode | +| `--n_processes` | `1` | Number of parallel worker processes | +| `--shrink_factor` | `4` | Downsampling factor before N4 fitting (faster, less memory) | +| `--n_iterations` | `[50,50,50,50]` | N4 iterations per fitting level (length = number of levels) | +| `--spline_distance_mm` | auto | B-spline control point spacing (default: 2.0 mm per-section, 10.0 mm global) | +| `--mask_smoothing_sigma` | `2.0` | Gaussian sigma for tissue mask smoothing | +| `--save_bias_field` | — | If given, write the estimated bias field to this OME-Zarr path | +| `--n_levels` | `3` | Pyramid levels in output OME-Zarr | --- @@ -403,23 +410,59 @@ rotation smoothing, auto-exclude, and richer diagnostics. Stack slices into a 3D volume using motor positions for XY placement and correlation-based Z matching. This is the primary stacking script used by the pipeline. ```bash -linum_stack_slices_motor.py \ - [--blending {none,average,max,feather}] \ - [--apply_rotation_only] \ - [--max_rotation_deg ] \ +linum_stack_slices_motor.py \ + [--transforms_dir ] \ + [--rotation_only] \ + [--max_rotation_deg 1.0] \ + [--accumulate_translations] \ + [--max_pairwise_translation 0] \ + [--confidence_weight_translations] \ + [--max_cumulative_drift_px 0] \ + [--smooth_window 0] \ + [--translation_smooth_sigma 0] \ [--skip_error_transforms] \ - [--rehoming_threshold_mm ] \ - [--smooth_window ] + [--skip_warning_transforms] \ + [--no_xy_shift] \ + [--slicing_interval_mm 0.200] \ + [--search_range_mm 0.100] \ + [--use_expected_overlap] \ + [--z_overlap_min_corr 0.5] \ + [--moving_z_first_index 8] \ + [--blend] \ + [--blend_depth] \ + [--blend_refinement_px 0] \ + [--blend_z_refine_vox 0] \ + [--pyramid_resolutions 10 25 50 100] \ + [--make_isotropic | --no_isotropic] \ + [--max_slices ] \ + [--output_z_matches] \ + [--output_stacking_decisions] \ + [--confidence_high 0.6] \ + [--confidence_low 0.3] \ + [--blend_z_refine_min_confidence 0.5] \ + [--slice_config ] \ + [--load_min_zcorr ] \ + [--load_max_rotation ] \ + [--translation_min_zcorr ] \ + [--manual_transforms_dir ] ``` -| Option | Description | -|--------|-------------| -| `--blending` | Blending method for overlapping regions | -| `--apply_rotation_only` | Apply only the rotation component from pairwise registration | -| `--max_rotation_deg` | Clamp rotations larger than this value | -| `--skip_error_transforms` | Skip transforms with error status | -| `--rehoming_threshold_mm` | Motor shift threshold to detect re-homing events | -| `--smooth_window` | Moving-average window for smoothing per-slice rotations | +| Option | Default | Description | +|--------|---------|-------------| +| `--transforms_dir` | — | Directory of pairwise registration transforms | +| `--rotation_only` | off | Apply only the rotation component from pairwise transforms | +| `--max_rotation_deg` | `1.0` | Clamp rotations larger than this value (degrees) | +| `--accumulate_translations` | off | Accumulate pairwise translations as cumulative canvas offsets | +| `--max_pairwise_translation` | `0` | Zero out translations near this optimizer-boundary limit (0 = accumulate all) | +| `--confidence_weight_translations` | off | Weight translations by confidence before accumulating | +| `--max_cumulative_drift_px` | `0` | Cap cumulative drift from motor baseline (0 = unlimited) | +| `--smooth_window` | `0` | Moving-average window (slices) for per-slice rotation smoothing (0 = disabled) | +| `--translation_smooth_sigma` | `0` | Gaussian sigma (slices) for smoothing accumulated translations (0 = disabled) | +| `--skip_error_transforms` | off | Skip transforms flagged `overall_status="error"` | +| `--skip_warning_transforms` | off | Skip transforms flagged `overall_status="warning"` | +| `--no_xy_shift` | off | Ignore XY shifts from motor CSV (stack without XY displacement) | +| `--slice_config` | — | CSV to filter which slices are included / motor-only | +| `--load_max_rotation` | — | Metric-based gate: skip transforms with rotation above this threshold | ### linum_stack_motor_only.py @@ -626,20 +669,30 @@ Two outputs are produced per pair: ```bash linum_register_pairwise.py \ - [--slicing_interval_mm ] \ - [--search_range_mm ] \ - [--robustness {0,1,2}] \ - [--use_mask] \ - [--mask_dir ] + [--slicing_interval_mm 0.200] \ + [--search_range_mm 0.100] \ + [--moving_z_index 0] \ + [--enable_rotation | --no-enable_rotation] \ + [--max_rotation_deg 5.0] \ + [--max_translation_px 20.0] \ + [--initial_alignment {none,com,gradient,both}] \ + [--out_transform transform.tfm] \ + [--out_offsets offsets.txt] \ + [--screenshot ] ``` | Option | Default | Description | |--------|---------|-------------| -| `--slicing_interval_mm` | `0.05` | Expected physical slice thickness in mm | -| `--search_range_mm` | `0.1` | Z search range around expected overlap | -| `--robustness` | `1` | Robustness level: 0=fast, 1=balanced, 2=thorough | -| `--use_mask` | off | Use tissue masks to focus registration on tissue regions | -| `--mask_dir` | — | Directory containing mask OME-Zarr files | +| `--slicing_interval_mm` | `0.200` | Expected physical slice thickness in mm | +| `--search_range_mm` | `0.100` | Z search range around expected overlap | +| `--moving_z_index` | `0` | Starting Z-index in the moving volume | +| `--enable_rotation` | on | Enable rotation in the transform (use `--no-enable_rotation` to disable) | +| `--max_rotation_deg` | `5.0` | Maximum rotation to consider (degrees) | +| `--max_translation_px` | `20.0` | Maximum translation per axis (pixels) | +| `--initial_alignment` | `both` | Pre-registration alignment: `none`, `com`, `gradient`, or `both` | +| `--out_transform` | `transform.tfm` | Output SimpleITK transform path | +| `--out_offsets` | `offsets.txt` | Output Z-index correspondence path | +| `--screenshot` | — | Save a PNG of the registration result | --- @@ -1035,11 +1088,9 @@ linum_suggest_params.py \ **Estimated parameters:** From `shifts_xy.csv`: -- `stitch_rehoming_threshold_mm` — re-homing boundary threshold (MAD-robust detection) -- `stitch_rehoming_enabled` — true if re-homing events are detected -- `stitch_rehoming_use_motor` — always recommended true when re-homing is present -- `max_shift_mm` — IQR upper bound of normal inter-slice shifts -- `common_space_max_step_mm` — 95th percentile of consecutive normal shift changes +- `max_shift_mm` — IQR upper bound of normal inter-slice shifts (used for `rehoming_max_shift_mm`) +- `common_space_max_step_mm` — 95th percentile of consecutive normal shift changes (used for `common_space_excluded_slice_mode` tuning) +- `interpolate_missing_slices` — suggested based on gap pattern in shift data From `--data_dir` (raw data directory): - `registration_slicing_interval_mm` — from `slice_thickness` in `metadata.json` / `state.json` diff --git a/docs/workflow_reconstruction_2-5d.png b/docs/workflow_reconstruction_2-5d.png deleted file mode 100644 index 5bdc6fa879c6c01b3eebaeb4060dcc181b345513..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88858 zcmd43c{G-9`!{+UGY=V(L`aH=ie$(fqKqL?hE$ZIGDl?2l!Qi7$UGzxC9_JUgk+{9 zQ^=4M_UG*R{oeQ8d#(3fYp?y!UeB}E^L)R!?)$o~^E{5vbes`J2Xt7O_?aja3ag&3 zmI;MI)l8w#sWZ^y-wZ`luHt{_?e%rEC@bVY_ntqDr%*Of^t9B?yzY$mcw6!~uBcA! zQVKtP|6V_JW@aYCP)ypl&{Jp99z`T19=h44nRxTi{Eb5%2~vqFOq&7+sn6Uii_<8R zl#sanDCeGQ#{lKmB>RguhP2loRK?Z)akm+6d3xgAo8dQ9y(fO$d>VU;Ln9zOJbZM& z&1X6ePENU9EQTT?B6PCK5#iz7byFKfL`*npZr)DYwtVTpfdevfav={NiYIP8botF211)WByVuvZeD8j`gKfQH z+WW|$bL8v|)pUA=uf68~fWcVW8aS#U5ddBIKFUTE3d ziH8&6_tB{QoM5 zh>99pT80M%P$*S*cAvj`;|9agqem}J*RNqIOI_wH@LLvZMQ z<}D*D>usNh=S~s4pr`7js zTU%S}cJ*Bp4d$r%^vUAJ?c2;0`{ra7`^DK2`@y;htcl8t-p`XbHNvrQWxanMbDQc{ zcbgv6{_xOH=tIdVi@5lB{ISNSc$K2Ad3>J8#*JDm>*NhAt*owGy~@G4dbP;kzkjv< z%umo!2FAuD1=g>pqj0WWyX8t`WPLcNsFtnmy_c_EagKc|SVK=wZ$JF*t_FiJH8r(X zV+{FCZtjiBDk>or6&3H_zvuY)&~Oc&sr~%;=bD}#8*;ylT$@@mbMVgY8>Z}O>l6?W zprvFnFS-5sT^GSceu=_?59eCNE|~l4kB(M!_pRTcyRtTtTbY6EplSUZkav8!3qv*lp6$*(ve(%?()x@^sTj z@U5Jw4yR1zZa>w&^}_?5SZ9}}M5*{1QwHJuqc5Z-Bqc*`-@big*V!W_jwjyT*)lsj ztL5yRjSozav}n&z zP3o*HFBzMe&9)Yd+4=bRVD$}9tHlbc7+G6KVRb4fDw-em7^n%^EG^ye?d@$UwNXX? zk@TcA$Hlp^RTMt?(>Ge%i%%F**w1kJTn`PUzZM_w_U+BusP#&`YqlM!!=I%8_%1#% z-00o??`;e#-+qb*N#o_(n~VCy#H{V)4a(9(#DdzJEE^#xoj<6FkSt?`DjCZJ=Ma> zoa!^S_3Q8Qk9}-!rw_0?`_QQ^o9FnO8^$O63_F|W=Dz>?7COmq@1&=T`TY8UD==9< zJvUU;#6aE`ShGvz^Y5(WBeQ&BqjAnV=i7hKm5*aqO-*E@k!&7 z!>ZhjeT&~eJXp1J=gva=`YX=vCmbEC`}$s}Kbe!!(b4(-{riUP+aFo^9(?k!xcGd~ zX&vRVjC=2XUl||Ht`fgLOjB!0hlNO!W1gp>rA7O#IVHBHrp9^3)Wl@!?XJG6j~}@x zX)_V*DHgeznE}?)I)@IewJLF@qcE_s?M3rr6WSA{pCm0TF1~7ddb-SeCgVc$_uAp% z*G)~rNlodTZpn|MA|e=jea57;baWgRrUn-NOuoP-X7=>-91(C-SEr(w`R><=6|6@o zOq;WK_3Bli!KR#c^R@)<$zGMLw{PCCOjiDpKGXXw!l5xnZ=$-o+ST2?erlk0tlOh) zQP6LHth9x>dFY)x0$+!QQl;lUH{%6ct;`d+O=!qm6xH&hN=1a(r@0rtq)~gSC@S-C%C-4eBt|pSS?rA!oo+7P$LKHBehLTuA#(o zx3{-b2P`ySS{7Yi_!28FjaRoGrGImeGnE=2KmR_9zT?M^g=32`QEJ}3J3zZ?6|-BlZ1$;_<_tW$BIR%vx*sdWQ8Ul(JbZ19C>QFe&KA%f0bNdpi;MyO+Cu18x*`Tbf ztYu&jrQkE0Nj4p2!*LWBdj|(?Ev?`id;H30Z}Cc7Z^q`g8S|YlTiP1Tp{J);)7@=- z18qii>@86l`?=9)foA%$rLtJHW zYiCwxKabYzxYxUQ-4KAKn)Y_ygoFf>+rUDVE6cvme)an9%Z<`4nR(~2E zo;^Jaz}C*GNP?ADS9!=GtsG1>p;W(V}Nw9+<&igR+tUFQ4$(GOaw6Oxh~&|Udd ze0KW(oz}@x!M^wK@R*vJF|~KsYE}&uH)aNG35|+k5!ii>_S{oz=TEJz!C6^b0DtG!tDv&1QK~6a#^22?A-k-%deM=U zaI+qSU- z$JLgc`ef_iKy&u&q_fQn_cpaw>uK`FU3$uSwrtsAS$cXcVAC`JX#3M~tU}on?*zQO zy$7&~O?=Ot+ujhfJ`x!G*~~j7%NPDCfHXn)V2awsi`YEM@6+|yyt%8i=Ecgg5e<6_}(-61nGGZc@iDz)x1H%;fG&%BCeFWofQL|4dB=D&Jz7wFMNNUaq>TTGLy0)aobU8tUEkOr@L|-au(yIVkUMd zJ^$Xs%93Z4kg6asgYWpy*dva&uU-w9nsOCTa8+}@t>7kdrmvC}0QK1M<08t+0&Hw- z3htxalzaE?QI`Jv(lIvXeEj&aEijZ3tJYg8vhtr#H>~R@w4>Q%a3eriGn8OV)YP2w z-tKdwC(far(%(CtdF$p)DzEpB%>UIsUGm2M{vJ*l7#TS<$sc};CV6I~iQTqmQ`D3iRc7u?}<#_q=NtAkdZdeij!_2tr1c?#J>=pz1F`M@PrNz>xWCW@3E&UVeTIhyrtBehnRXEZ0MBk#G4U46x!ZWGdfSC!*7QfZh;xs_M?eCvMu5NCD|Jz3P=H`x7s4kX8_S*m6j`FZxS`8C3vn)^uXpVRd(B&R^&QB?HXk;VHS6W(nVY*RBIQ~^3 zUjBIR^L6It<_t_s*M4v<&3B!ROG$~9I$XLzQIQ|89@{P`E{^l(%uL+v+lNo#?cD?u z-dw>T;%tep16Wyw?jh^=n!B#Cv9`T^{$@k&+m|oZ%*}bn@dVS;4?6gJc*vWWn7l{v z9kI^hpsug42R}b~l|MXr4Y)Fo8Bemhy84aX=QAEP37hQ3js1TqO$su~#!_Hzvqu}|& zy2R0Uew@+IZ;8H3-dG>(K3m(>RR$Q*&d!dC5*`x+n5B?maP$6DIUsT9*%wQ^;D@m; zzPu@ymI+&TethZgbYok_-0Z9?iW&XqO~(2SXAa-DvMzI_!k%h*j23^yXGARI)A`>& z9Zz=@4n(*#JA*G6TJP1=q~Q`j5F8k&9(3k*rr2Pd_yGJ$d0$=UYU<} z0${oGYrcqck=IwfSq48pKkNfF8=Lh&>#-`T3%$OR19P8_hJ}T#3T0b=C4x)b{_QO} zir~eh^y1 z^Vs&Alnw3qHj*Xi;1T!(y^m-e_EY`UEwir^B3HtD%VVN51d2{Ix)@40Q=aIaJ%W_}FJFR6UAo%K z4O}dSM#sh;JbPw+s~yxh_fT=6)5lHM6A}&{#xIoe?YUT{;m&E?18nkr?ZU!>?Z=G$ zs8FJ4D9NL}`(0dIC{@2_hEX1yn=MY-od5PFi}Q|^mDSYjY%q3xO$n#*1j?h~2;LFo z7AamSE3=7GUSX|hM<`~ar^o&mI#8XIkU)3o(xnjRa~>Xh!`Rkmm%JJ}hkB)xI>+(h z3luVEuy2~12IhHr=m!)dOG}3SYM%V6ASxbUc(;WqJL~cbi~w=r&_Ukb-t{5hMv20H zc^<@g^Oh}HA2J?3413b+!~EgH2i%y#^75avkVXh+W_k`PNTXIVlLfGMwP^@iSVAYX zg8qR4;q`B+)c}d!zqqtQ-=nG}mHeo{QpBe>_wbVgU@Qz2`{}{@L8x7jN;J1xml;tw zuvA186f7BpgC9P8Xk=kQL=SriI;5Ct*x1-yoc*8!(hs~JjP(lTgY(kz?-O@jP}ggk zn)J&R+uy%8DqobxHNXS)<~E9nMW8R+xwuGC!dLzds}8=qt7Nh?+?-P2kuWiwa;f)v zMQoa=cc`-f85R8*cfmQtzxk1|@oKf?_S zybG9FNJDnJs|dj$pTr%s(Z=IAIaDoUdn##WE& z_#WzuW0GFlYSfK1fCFU?4h{;$sO}0+c2I3C=%J7g2;_kndiiMZ{#G^eo7CWl5YA`? zRD8Ha_eN#Ps_xpg2DpMx)nD2F&%~3AZC7>c$+x2;U-#X+cO~@V6g)l$yw(n2>Ac!i ztK;lRi{BtDOocy;wQ2b9ffH>~2k)8dpmth0`JtMo=7aX?VIgz5l)ik8*>Upk?mZdlEP=V8zc}qQ|zc-Lo`n`f}(Tw3r&GLPQi}V!Dh1-TCBf?Ec&GDs5{#xQyMO z9SqUcW)Pmj6C4DqssUp9Q0{(;o0}Wojz&;WZ~@rSnZ?Rb%P?#}FU=mT3(8c4q3RMh zzO2=0TU8L;TQM@THR<|uPBD-(9h;u1AkxK5-7lMk4iYA0` zp&kIv(^F6%WOnV+fsWnU_IX5WcI(1Atu$(MQW^@W8-nMa3OG7Cwmb%X!z$&wqs1CP zn3fttAhxZjl+;8~(_Pg|f-x~M`^|XbMhA6_M0m)BLDX?%t9;8hphDsn+gxoEq3ng zLn}+LSsnM<4ghuoZi+#;e{k@!yi4ci3IUOXTC9C_bdjK#7`E>2?&~&8ar+3=-5@F| z`sS7#vuTEbvBwrIiHIlJ9C9>=4jqD83Y8_uV3Q^m=j^4Y)-ho9BO=21KbJ34U%!4` z4HXve=ww@V!eeO~8XBmiHFb5gKnXHCcLoD=Mny&0o<7ZT{`~oq&OdYfBLu@<2y5+2 zg!Bz@eB;KA8&H9v!s!|sHlmxyffGWkzL%Su+c3kz!U8jeVg35`TQ~w&J$cBtcCB6$ z4-Zf3}6Av=79rjpiR*rve?3_(B{j|53`w=nv!b{ z4%$=HpqP>Gfbwzw=nMbCCr`qjKUcy1@uzLNKfJyc1sspDADtGsPs_}V8+t|X_3PY} z0FYsDMcWf6!k}5|>zqe5k6qeVG_3C9Q-0z49dI}3*rK?LU1xiPIjEtc;Np$<)PrPB zcgV1;SL6nUIOy?Ni#29IuayF?X1ZB*1zL^(3ZjDV{8rFJTOeU^aq(vhgOM#CZc4<= z$K$=g0U=jiBW+!Uei7NxVQFS=P6TY%bLVQna-xbH8n+whiD8Q*gA%MpjpyL!zXC#? zT3AT>uNO4h{;ywYab?THhb1cv(qG|a;nvWl-F|-6gTM#SBY+AS2D73eRwx#RiH6PH zHA|*5WQD4sn|O?O1VRFa<)jY$O1e;*Uc8L^sM&>)7VXbsd!B$DaPaVifZD+YpoJQU z8ebQ+c1Qo{C=-kzdZk$X)(&hALn9ND%V^*Wzkd$^*9WGi3PWTK-F^O>ZCyBL^OE=L z;Wnt_g?@iL0Ln?5!7>c1s8H(l`ofwp-npr*2FyVd_bGq6BLMYYSXEUB%L_{TI%{j| zdk-Ew82@SZ8nluVEfENcv_$;8{=RmkjC~!0W71wT$xwiQdwcuBb3^+eJJrG@J6rjC z6Zr(JFv*IY2}GDgQ|bsq5jmOX6!b&yLz{xB6Lg+wDaJS61j{6cdrc8hKb6{n}1H{lvU1h z>?%3626T{vkB?}2T<{}n@Ry&p7X(JSo@^sn7mmq;vNAptSIf$ayZ7zecio1+#FoX- zIS$oJ3rJ?pWPYY0d6|Lf(Lq{X!Fz=6=597+7- zNWSeqodI*>GCVFkf=32&_um)=cBVk`hPP>>KI&N%zyzXvsV6 zeC!nD^f6;g#YSf;;C8EEz4%SSfc>yzu=G+NHKp#I`gZ#)v7XA#4KV>j6pWwKt)IRl zp7ji72M8Le937ocB zjBxgdoC<_#W<9g`c7utLt}esgSizpJ!&Uee9NgT&ITi)>FRu#oDS2##7e%2&L_|!@ z%|S%J7zmV&C20TZnixD|4W}=|OJDvw3*fW`b|&n3DrRQpva+&!u$fwXz_p4zCyzW^ zS^lHo{c|J0k=?0N(Ak@P*r!0M8F4S%yu4%qg8@O4NpP0mwX3nC$bstKyST)kKYtdw zbjcIbR$g9yYJPt2SbJXj?}H`oqpgf!{g#Ec)gOQC3-R{yvc*?|EEH5)>JjgfWW}eR zRzuvepMC-(Pi;{WQ37o zj}4bbo?Jo8J{14%s?bO#4E@ET!D8H4hlg&QaG3c zz7@GVp3fDS$$+I}^qs|XT zXMh=dd@@)Qn82oxha4K#FZ^xL6qxYPWzY6La~`_|;paU(pscJc{2FSH7@V_uY*;u8 z*el@>2N@`X{nbHp`EEn66Q~2odhBfZ_;HLU*`Xnd6Wup4LsL_ej#6~-_cxWCrgh^A)`Q)@mbwTW5`xRgMLIA0$t(8S{$Z~;?T;}(u z^umhW!Cw<_iYOEnpIJ3GH(A6r`oDj-bn#A8vmj6j9u zN>2gmQ#GAn_pE3a`#+hMv=C07K}s?4=o$a1ue`atdF2C$4~zEJKt$wom;BxmL~4Lu8ml9rLNM{_9i)9#Pi zfgSLI-$MwnO8WgDy)**$vn{q0wvaZ`6UkZf+G*+ORFuiNw7vDBh9*u#MTOkbx2fw$ zoAJ=#D0J#r0wwd=Rqh@hiJgK=f=!v3o1y;O_g4inefr}X zJ@RpgM-wBLQn#*ko20Nb(3}&iHbXj zN@3y)eGpLx;`n9l-xMz_CZ>IhjhR^!5>R?I>*#~*b#nlD3T}h6sBcC_Mik2Cty`}r z`aXUO$P3$y_x^(iHw_GK%kQUfP=cY-DxCio1t*_4lY%Nfa0}Yn z4nlI#^Em<-ywD%Su7x#G*C=#p>$i7zC6D;c9W=JK-YPZId8z;+s2@>xiAGJfb+}z^ zXasIr!ps2%B>2>tO^68r9dg6!u!rGJ9RQz-3;ha+?Sa){Se6`v)or<8%`Ia)cJ92M zU-`DFi3^tf{j+_QgW%4q5laa{@E{qL)3@RIBGd!NLEsM#Fqa$;9$90D{;H|8pNL2G zkB_r}J-mGRa#Mxd9w-c88ThcE#6-bP>Hr}yEOXa?fCfVw6wH)_=6V3U=9QY_QE6wEI@NOT#)0nt_FxI1|^ml zHQ!N>;!nq0Nr#G;Fu1w7@hP}&*c;6s0-P*YVKguzkc)($OzmHd=gcyx379CjrUb+D>qgMY$;a%-$QE@ z6cqX)#J!e|6a>v?1<9wt9S1j;8)}QMS_Tshu!hWQ9nK=Nq=@T-kTRMFE6(0Dv*w=@Ho_i=m!!k zj(Cr|`m`<(9u}**%m<=`zhSQY{cRmSTYK^T84*XY+@%y~vBGdf0Exk&6zN&`{+U+L zAz&oDmM*A=UPOc)w0ml3Te;RxiMQ`H`TUnN3%`2W)@PlGnQVAz3y|751O!+BNs#5L zMN$fV+|oa>2_Fm{=$nOrOXFQn*jN6}u(!6gksp_pmlJ659`P%W z2(39V8y_b022#=31QY_WJgh4(uFbP5sX{L)Vs1*0KJ$Ew+bF5nziM*25-o3;f z#OI#ut5g96A_Bwom&RHky3gsWw?q-PiC`GvgtcMpUywe;W)FaUcjV%5nAo#4df!hR(qz(usd;XgN z1Y;3wFrqa0{gMCC`#cgv-nm&;UOp7umW7qo?$|L}sDAxH*Al3g>@E9_GViS0UJ?vR)Ro<1d6BkKv>qNiLM{ZL~YJp25@ zJF&s$a&5+MPZfg=PH!O2BLBtPikds-O@d?5l~#=npF)Gv~a5iwP?d z3Y)H>n8%6gKsGiS3M#$=IT37a00PBL1A0|e)!tGKi9iHaZmB`e+#AJ5g*Hvn5dU-= zz5Np4f?(8**g!?@W9E=2!7Bf={Ey-7^U2!NLa%Zg?~o<5Xi}hT5kMjWN*>sQ7mPgg_PNiGABtMHi*0^>zN5l(+p3m_$0Miz^5hAovc;B@oLS&)tNfV4b+LE!sUu6F1~2@47>)u*t|B!{Vud{pHNUsyq7( zNgla-ATFSO8=$oyHVA3Q%tNnP3f*nI?}bnpyHEq5y^*nT>iD(r(P(^UHLNdLn8gSW zeZj3GV1)+P{2YL5gMtDdEWp}zXP+56a9q4U%rj}e$(geKCJO|sdTd$OzYA{vWU92| z4xsd8eWP)`hjtJB8IP4yg)taTADQ@rY$(TD?m@*Ne}J5p8?xerm*Nj{i5!5cdkE1kG@t7>Y@B$8ZrL#k zq9P)$-`@+f2q+kR7Avs?eyWFz=88og)33F!5h`8~d|IeRG3%8=L6nK3j4iA!i9i}) z#)s^Kl8`f0!B_BlTQ5VBF_t2L6}Sl--eI!$Im^4@fXd1}MCw|%>kJ_G9;hX)H7GxV z0D@GI+lhokRvRj0+W58aD;Fkv*CV{SfaGoe*jUc^xlB5TCsH3Yaa|ecVI;RfejRlM zs(stwgV;Sa=sm7Kx_6WG-sd4BX86gAs_4`p4`c85#r`mI?pPnDa+b!Whgek!t%l3}4_i8A( z-qGIBGb|x}m7eLjf*v55s_3kXPC{&DloihP>sj611=kpT88qa}%0&ybL(pg8XVbQv zl5IBS+z@_hkprmjH{*oxvjF5yBIYEL^jbR1iX!NJZJ(m={OOXjs=u)uiXhouTpW40 zfKbiA@bD_6QN_%4qX+Qr15#3i&UBXrf}%@Q2$Z9j3D7nc zqBUS~h*{`H3#09-_|Df69TMGU|L38rOb%YxUO#pFLQkC8PXEU!8nHC1*fyt1I7?hw znp=YybJ&d=d>x-ZZ^6eHBU&SI0&m8h?UM)c@&2M=SI&ES>Hzcihdo!b4dkXEjjSSxs3_V8ftd{B4NlWT_$@9a+P7Xbcr%?QqA+NBU)R{ zz>3Mjpkir0z7hQroelqmHwI)y_^_THBk&hChOv=R0KtBMvS9ppbNFe#yby&zvtTvI z!$Yt9^FxV*Gss%E^^#8oI3#@!ppiV1x$w=92_3Ibny}sP)_cj@K&}h)5*)nXAm4-!Ef>LBYk0> zs;P_g@SYJm&<3?6OcXXIN#ZlrhtX`8lw`)JMT4T?vZ(dkss*=8X*q)U^2Ah3P8NjX zlQ1=U&=dd?uL;q;qr+i0byQoG|5k&4J>1K13Z`mn>tm}-lp0a08)Bx@rIuJdaWd4qj_tp8Q$lt)VDfis*Rh>U#yj?WcHOiUzB zJP@MY@#CA8P9a}~%+&Pd&2)^y@1Og68r)Un>GS(_7naBIeC^N^c*4+o#wE)7USMS`3 z!kzDHTFb|G59ojx#9-_>DunMt3LNPcihIPv&AkDS23uZNPmfIWfeMAdgTfcZzu&L8 zCKstX00;zf`0?y?z!HcLarp3I!dE0{=a-fv)V{jWqg{{aLZ2wA*yXDgjE6uQ|8I0;n!)!rRo zp1O8cbALX7-{Spm{(W#~KE%@ps2CWuF!$Kf8YOpo>dmWH)sPU~03*PR2f<<#E-gR1 z;6`#?V8)ZqJ?5}!$uvkse)zIs+m=Wu-y-tzrJW;J($Y3k*z@y&>33m5g;hyOiMYdr zp2Cj5ZtYi#T_#CteRrkbI+xDJ##n3yDxL#df~yFj#xw$fcf?o+cueo&1gs>J6ji;w zf)J0j!5%(fWT!VwXEq9-5eDR}`{F$Lb%0YSI_wy&0~)WzuY;Blg^r$;zjbQ?%e&@g zl4~R;EPw$Tu5-q9!DO%r?7*E^cw#Cj8`wz${ryH#8z&|vKEK`98XU|)P&I^L*P+)u zb#-<9kO2`;3*(?hWTGBkc^a~Qn#Btnl$=^NBD?TtEHT+CKI{a5E16hesJE1dg(SE!R3B~LaD z27Ix7FVvvJ1|=sS(L9?No-7LOM&aT@Ve7~1$;oVRU84BqmJdSoy0LGVmSGxfqGg@Pdz6EF9!v|tPjBRybm|+pKuGkZtsioO8lc`nX51&Umy4O zuw@B`)XZQymw-o3etssf)n`aoyPiLvGapi~rDZ4yXBQdaRm;C#tfXR(0|W=h#u`4j zJ3l=&MY4X7Dfu0j(#l)Mij5HPLFPv0*h>!ZKqAbMgvDh#W`vYlZBbxj@C*q?N%5Os zi!nx0L;p_IvbR;{rlyjp5F8bkskv*TzV=>=fBrmx@=mZNoWuw`V*8q4#h!5uM%EDIHMs$_{b8#qJtmSv%*m9!OX+ob2%1<&odUpUjFQPtjO)X46!nCm2Wl;?>awnb| zfr+@3k48q{{u%z~13!Pwg94%mqlhb<`x-u*CxCQ8NJDpHbM2o)LjboxcbXm^^A1U- z$D>~#x3vu@c(Ke+xFnSP#b1-LQ&Us)hc5x&QYaSr)=q`)Cd|U4GML6neeggOGqp!P ziAdFi3HfgZsj|{y{U6iC4qdm3hXfwfj;g! z(J6Z%UAcJo~&aY{JE$t_=W z%c}oh`rfBthKnD#!|~(C59)x9CudU>yr%A%n%&y!rw% z9;3ObdkRMMoPF2B5|4l7eBr~MPUPOi@7%c)4CCan!QQ>0-@Bd^Kt^BMF&W?KJTo`P zgS0ZfS@DTjGFNv)!R`LPk)Gw&KacX4Fc!NNGNCXA)wK^Ch{2rEa~b3v05r`qlNZV0 zHI${nkA|u|;9b1HXVjSPnerOvkNE>Ui$^WaoN*eu!r!me4$gOV%r@ z^FNexBYY=>R@O1x9{&>)K*yE2=R zO=ZW;H<>}uD4w=$Unn!rv#Q5F?UJe$q0z~iRPO2SMg=Q4GEw^c@SgO4c=BJih%6uM zR?Nh&G0Eb_hLzdAy#}H$pfWARYh~$I*ZSma%?DRX`}=M4_q*{PMSkSdHxd&;#T)Eg z&BX=1m4*U*zsi9H9=uD;3!v2=r_o&T-RDhBi{~tcVoOzCDH|WWJy>cEQ1Yb{tB7QK z&No*mZaWyQFOeXEMg~=7-vkDlG9kl5DpImBJaNd%>YS>@hqksMwgPloGskGPKOYwr zn%pm)2cAl={ch-dZ+SLGxzfDPn+*ttlm>8fGR;zAnQ2~dr03iENg^3R)`ykNidNrI z>aq^$$4&ur-|7$Nj%1Y!7b1*G*}OC^ilZDZE&mn=dI^JTD~J)Ned<3mv0d06X6-QB zujA=+&Aa8ME4?7SWs}f5Pxm9){AU2v_}8sFe0THEj~{H5?nxFU)8;R?wmUM>Qd1#x z)+Jz}^RcsSs9g0adB2>y-nYdWdm6Mi2XJ0F<0Pv_(7U*~#EvvYFD{2=xMAL4ssiFU7# zJ!-Tve_55WT{-CT<*6RezJXeH)j9z8r#$;Efxv|jM*=m-`nJ(#Eu8!S_E6;5#EbNx z1CSjV(?dwds;95uVTLL^b$IBFU_=-?P|3fWRn}d2KNN+jRB8Y7Y8X;&_4IrW&!oj; zcWgTdhD5S%5a<#*Lo|c~!cwA>%^^vQjyk7n_DZ?$%)4uo|K~Q*n{UWa!3sdt6e5sZ zs#Xdisp#naPgo*Z1Y86wA|oQI3GhT_0~yCJXh_cwSv)yZd2u&p*kfT3ih_`##nj>~ z4l+x_g6hIt0(V*mm?aYD63H~~wbx_`>1$F!Hn!>nROrS;jJI2MKFra>sOv@#9ERK;Ud z-^#0r|4JvR&MP~ex&u%k(bt8!?VASkmNO=8D<8qgB+YZxz{D}(`zl%z?T(-uXq-w1 z4J~xjP0}ZG5d4l6(@(TT)JS4jN9QAGQjbT{*fIvIZTNPd+n8pu1&_0YKmH zSq4Kz#RuQv?w$VnGS0xlA_)uV*1>}Z=e{x;^yHV8CPJ){rLa(T$jg6wXO^J5(ctLO z6mMVOVk8e^{2-}qRaCqsw{xdV17FkIw~h#~q47p7VWZ{VyBAPjS0_Vx)6^6RbWj9H z`-oF?uMFihD*ZTiF&T|~)7%{U=T$Y}We1D_OOy|qDg>quVZ6^Fst8*%BP;6_XmtnD zGejhA#KZl3`0VraN1-tp|8}06Tan3O4UNmMfNNCO#HEQOoW zm@J;{tgowUi=E3tIgAqt$k)XVI))*17K$$hK7bkzB@L%%yU8}(u$DdnWyu~r>okVU zPGjU_tBAE+9Q^LIH?GHac|MlLFT<}K|%ui#z z+|5>R+c}r`?b~rse7PUu`=6Zc`<5P~qw?dKnYIKlX41Cvsh{JK59&attl(yI*?;Op z;#j5hjvevZd-rY!w)!?U)`Vf1kD_0)a&wPSI^dM~99z)xb06_f7KvkkvrvalE1a+o z(K=;tR!r4CIXr)vQJ5ylU{ftDjej#H0JUV;j>MM^6(67LIJAPkP6iyAn3y1lwu*k) zFq)y_u~`zB|TEeZxfssWxLQlHUa<>V9^LzOu*&o(_pk_E@C z*ZTPIaYRl5abSdh<0Dtb@pzka1tuUob{2eSu z7;uN2G<|<`Kkf0EKYaqi0>VT5K#s5jBz^q;rd=82okSuEIP8Ez{tmh}M9#s_ zCsO_e&Jl6Llp5O&o>js$(Uw~qV1k!bfq`y<(m*IAPMrbt4@;^#J%Wn>uaOz*x4u$o zDdD>>#OFf)7T`ZGx{Zlanz(2ub#x)Jq(< zgaYXR7wCqv_jVjt5DfB-E;3^yF^qmxhkEL^wCG-`FT7+_5Ip<h(|3YiVSxMB; zbOAA`*xA|BW!A%P?b&+c>YfZ7JAwCaZFvMFo?TEm0F9TtIj9VTDv)ti#25O8vRk{m zS>V%cwR+63GNZau>$y8kXi!X9NQ^g*>ZAN1BLc3*{Nh8dD}D?S&a@dJ^cu9)hPafUxpjlqAf?PR)Ka{FSBBG{2ec z^ARL<;SF3&(p*g4!hFfLMebps7oUX2Q@aYiU z|HIYe7<@HwAKol9W{j{@Lz#N2gK`In2(vu!9;tA=4HSXZM6Q9}`l2$e2bNthxh^!+ zi%A-bS$ozdyKqLfmc>};TcozyvN)GTVT|GQPF^P+iVw>}j8hr+&bWE6$34Ju7 zFM{|h-0lE%3lbEhpWLVJSop4CVXH`#2~*aIljn3*Guj6}$7N?roDsX8 zl2Y(k1NsLMzY`XB;m9!z$dK{o{{E_^lG4&p(0u$OnN@)gu9zBeA+xel5bIYB0s+QA zh))bKi;CqyWQ6Lf((7TY%*=sz4PwoE(B}H*)<^Q4oK3H z7f?jCK6$;<>C;+RnmIg&Kp$F;sNwV)&!hYL2NGXEamE@elPN)_6lZ;*gIw>#oEBa@ zHE><|Z9i3W8ynULt;@I>7z!|wS0S1>Tz+_2YTLF(e0JI7C=aC5vIhcJUf}5<>-eP; zx3f4Gp?GoT*Do>+gtM%gJh!B!JeV4%kCKI8b6!#(^$z!DE_tASUop-u82Q%7guEr@ zRRht$3=9l#U#_E{((tCegter3kL()BMqqPkaU#aL@aA>T&=z-UR@&L zK5#AEq16$zpc>!`9Fg^z+1Z0+McNi?Ome6bVqN5)? zkiFpJ=l?0QhCT)^E@HU-h{EEOh5ibGmS$tGpJ_N}fw>J2A)TlAenob6cH5~dpOENL z5+LRR$|ZSX@M@&*pb$g3sin&m@uG4qfNuay%2!oubZPd_k@~65`#CvXKT4u(T82K6R*Mu)l&PPD7{HG0*F%5U}QpW>deLNO)UtBLU zFVAJhUCsSFcICl?bd+uxitD6{-oY|=@q+`W5v~eUqfojJrGlw-3*NRD4Ui25RDqU5 z&Z2`q2l{pyfT!h1IrQo^KeLaBkBY(o0-4o<5rLMsARU%T#bnqb3d~-X!kA@mYy19z zcu8G-J;^evqlaVmLUx1yW%W*Z9P|a}Qy59~Teob=qh*C9jJ;NuBtW%-^Q{T>31K#3 zeD{fQ3KDIWh^ZGBWH27_8=u0+WO%TTSI8o#xSS$8cD@%y@S|fmAPC-F7Jb#S);h?Da$!ogo-Utkl zp>Y_2wXLn!C(q%!wWY_@7#SF$_y%<0-_j9} z3-L_JC_Op-1*bv1^4xtaAa!bE+Im!?1*B-EA%G1cC)@DIv8nU9;g*s0$OBaYM8=Sb zXRI95jqQ{Vp2@)%S}>?G)uO+x8UhIDC~UcJ6=_skFc(TzAtLJrYc7gEBNu1Rw|;t= ztb25Jq}A&nb-ne}qj?PkErZAgIb{N3Y+%Nw^~$1u?}^1;l_tmDkOu{TLij7PQ3JTP zCg&ZDy*%vS`0PPCe(4dW!*PVqkxNVNI9%yHYpHKFGFUR$dgO|jb=yHFKCpg$sn}OW zoChN0HP>b#i&qP4t!1LXrwZ@sC|O=OgQOZ^8bIVlT0FSq_ehz9V9u}#^%?!C5$9qo zNb9*=#GiW&-}Nj!{+9nDb^-PYLzw&kgSQ6uE&(_gVR8+X(C?CloGxjW<+VQGs8dlg_!54}# zVD=?t<*zu29OKLJpgUYlYCTI%6~Tc;91&Us1w-b*3+6~T*&KiM454@=F-QV>SR9eh zo+(%AkFFtyV_?*gftDJnz+|ixC+_6n*#HPsR7)Ew> z&fvo^eQgV>sc_*t2T{#1$cQ{dWz%5vpo|NuE^yt|xHxq@;q~MR>jB9jV!?={W z+-zWH^oIJY!kSY!e`fAn$X^m2!(xvm87ib9G~pM6doD;vUX^};ySt4iJAiB#IVq=D z2y2i;m=MP$@;RBsn6(f2y9mTSFfkF0Lt#ic0zQ&Q@Pb^TQDZ<)cNZfwb1fnl+&#AQK?f@Gm)v(MnW|w8KVdFs68X$o1khDjKXx zlFn-RoQDpH^Bfys(%GFo8;gtwOvzwh6~M;BPtMe%T?8RfNDgv^mGP#Ya=M=kJte1D^w_J2X|8 zPpL;#`7_+{4lMW%EWR7M8%Og#ccnHo?45^Im4v^N0YmV>WHotzI^_K!?ofkO@>)d| zYyIQ<_qIqGIz-8q;^;8wD0#EVc#0VX1k{-#QUcx`A1KeM7?&QL^Sok2Y^IOnyTtgxa*dfe&ZNm@pYO@Eyic#57f~ zpK2+Ekn)4}-;fE!JSD=rJ2Ud~-ckxNtRb6f^pekEE21A;Y=3)TFQ?v`u{Fk4>Ie^8--wt-dMkQ>6l>YyzRZl{3ByJgi0TZR$?mdymQ=k!0 zQDBffl}F%?qGoIy*D?jT{xl^AwB8=GtQ2DVO0d2Vs~3KYI1Ew`yyzL~Q2ZdAkeUzL zP2=VD^4HVhfSuA=KkONUCEFkDnrI7$vh7iDUwj;W|C0GMG>M*g-W3^l_5fdCS{Bwlq3p? zGGt6L>vqo9=<42F$C>GJ?DSQsS$a2jM8ybbz$+iCwbX1C0AI9FNt}tn* zfdqHbyl>r87EC!hUB$mai*-N^g?I0A^zpx{iap`sXpGfYfvJHyIG-(Dtwrfdg#!mb z{H)FW35e??z;G2umJkZ8HK3Lvm;Rc!cHV@MYqfFvIu_QHa|TW7r73uJtXEve{;A-& zFKF+vd)F=#K*;*9ZwN{O1g|*Ra1Ld4dPQzm#99g;q?7NuHfuB?4(F$V)Jw!2u$1k7 z6DFsitG+1P0joe2eC*Iuebst?{7LYEo9Iw!ke ztPzod&mZq3XMrj>*`clRBPdx=5VE0?a$EJ(z51qw$fF>QES$Fv^ z+oBH?(rG38+0Os`xO<^^pB*gl1R`%`)1do&vitsR-FTagfJu5CI{1a3;z^d`9|Q>l zwuy;};`!o15~l-y+WGgT4O>+F-uDetdp+Ly*skjl#^EYFl>{f8MdRs{{_LEG zEa*O8pNAPo?J~l|mr-t$otN|b`E$Pd-^i-F_3E`nWjb(3x2|0`u@jgdmI6hKF zn?o#~S^bL+R2(Gq6?bAbsd6&5BlAF1oXfvW2J?N;*k%?QE|HSAJ$t9P*g;H{5Dcyn zzw|ZK7X7JOopQE@mTt(Vi`%yh8LhWfTq1*){cXZQE$_86ub`qm2a*by2vtZOZs=5< z4d8s-3U@TN%ko<`Q>!T)+{ddO_Z!`a)`(n2b^f{z)RHFc08&t~4@m&Z+_{PmA6A?p zSR@@aXhLS$qO`m#%uYiE~^+Ns`ZfKX7Y9^R^J*=p&j?z+FP0qP5UTZ%SDG$5jHL9#f5 z!(4*o(Lw}Z;LZTVC3i^9{n@h(zE*)1kr`3KT6b%@mygV@`qQCfQ%fUmjzv~Jxk zi0G)RJirw}ye;0zhG>tjh5ywEG>Iw(byA&0U}3~y{TYS@09^9YZ-1LHW9lQ{BOU}{ zjVkVVQN#fxzdM2ug=*F-!qoIfGrn`7kC~o?5{MN|&Tw?359e?0Ex0r!a6=ILKN%oa z^+y@54BL`3yu>1)<>C%|)c&IdkX}Q=n2<1Zc{J^0%lgjYSx<=_wSFxm*k8?D=Qc68$rmp5^dhfx5(+@&8(s{;oj683SvDJ(`&powN z%yO}e+b{WvfRrzPF+6N#3f7mk`f_(DaSdT zrrFz@G*lX<=GuS+hz_Qjl@Pw~yHu0@&{yNu2#WOozyEM+ZI{g-r`FVcew8O(M6BJ1 z-T=yitHJ*LtQe!8_(}=esjxKARSwTsuHw6;<6lGr=y&XRT6rWfvGfc~8mg6XnH1Lt zXp7?3inwXdp10nDOda$5_{=FGK%;V`tUmHRpj3|D(si(fFk2a~XIj{bYpK*DD*O6f zx%OIGHR{*j1KDA!;cnq@Lj4dzl;a4Ibghon#2A&^bbMaze?L0 z*^CHSK?7tKBp^HSxAEiJX-r3d(yL#;=kZ-k3meHRpSS(kvpdL7=VQ7I42p2?I9u;MC4zmk_Amj@XvN#i~1PmKuC z=#LYmCi!jwW_pSYWJkS;XIryL&+Ixyfv$2SpMod=g@O|d7Wb2*rfxZBrbhp0wR~Kh z4SPNqrS{s;aKV^>md=||-_)X`UW*R`_#|@K-0eH~#e)aeHX8EHXWjNKI;JaW?@g)+ z*C-y;pr9ZqT`Ad!Jwy_;o^#K}))xN?q-+#>PD)COBI8^`1^++$>D;33p`#IyICyk5 z*WYHRJ;ZYdII@}E>qQRqdIw`;@6PZBZ5XY+TcU$`w0t&XE>J3q6IvLW35wCP-&_rF$tgkmx19H!ag7rk^?E(D|t$I?@^tc!I6A(~vMspT(0>{=Rej_WJYZ*PMM+FQM4R(${Jw=sD%t zgSWBR2zU?IQz?JDwJ3J=$eY;*2H9=Rve_<*Zx^P^>wreF1kM1&Nw;p*z;|gazmBg@KwSXjA-NIAW~+H&!W4b25-O?D`Pt6La!cs+#5*~!)?rVh z;@PBTA?svufUPw_jwi4&2HXbf>S-p{pNF3rudn539kFko+b$|WlO%|GOfMI$#>mLv7!s*GT-r909@Z$E*2_Uaz zSRFtBNrMUvtx?a<@pR-lTzj+BSci#@uj5x#y^5&!c6Hp)5zV{Su+`goGOZ*g;b+0Q z#X!OsZ!>9bA%X?_In(qlriq73C}$%M?Ul3gR(IaSk{Bm?WPGCsF{dHshfTXIDqPNZ zHq(RW&pW+d%@+PJHwSSa9VkAObttj#y3Ux9>E}FZLx;LvT9ED!3mRnmJK}br#1L=B z>5r3aY@$yVaN;C1NhUayu6Dg-OHG|hmpx(S(chqZDhjHVr;u5rRd$Dm=XrGPQF(~J z!keQZNxj~EK7X%xxPEjkSOX}T&{4B#?6&UQnN#a9{sL@rsl8lV9H!-ccy+Gtv?dX% zW5H^LgQFvjxj>lN!xmepeO%WpqIVdp-J)Z3c<;-|%^oVFYzwAZSzT;%{}@CJm7m1I z;yIhaSu93r4*Pi_DI7Nm@stre?YbYZt|0Hu~7{<|?EHkVY&L}55%j07^*{oFf#kS<$ zQ#;fajXZxu3MpQfk<~WgY+70-9!Pk=muEJpw;FlZsDqh0drS$e^23)y{-Nk3Nr}=A zpR}*IRGBq^cJ3lAf;vzSn?uFa6&FU9+|MVXAPha$C55ilFkz$GT1g%L475vj1~vqUh}@Q z8|+hm(y(6s5CeJ^5|$h%g^|lQZCYEjtfHKTTouZ>-e^Qf0 zT0$0CkFm@{2||!A3_J14h!+iUEcJQXbE1f;F-Fq&svcJOOM`5Lf_u23xL}Q*lsumiTGczBW0%NrW-bPeYW=_fF1h zESNIOKo1o;x?3Mg)+c3}rdM3VjZV4yfh3&vsIuAm1o#DEQsuEbpZCU+x8bl;5lIvM z6kxzw?xHiaZZ#{`&s6R8GQ3BkcO!n$7if(9j8bG>vZ*|YsZ>L9TBr4pRm~;bE_~s6 zMo_6qb{uUn4f9k3tw3>};t^EO$m-jkO=c6PbtzD)>xrO{;=);uA^83;qJP1bc-PH) z)t!6Va8QD8rmpr*QeD$iY_aymPMk>t?10w*(AzO0!p-EYQvJu3ssdp zDQi})GQ!;oM0R1#>a(uDCojC7cViEr+7ejgvzym+{_C%uL2U+y?cTHJIO6<|Q^%h^ zXq@jxkbqz{6ftxzE`ZmocABlY56%CP$G9>#)#lN=ar@j0qNK`Xyz7Ok*g)GG7I}esIE{Rg? z({SN9o#Sq$U&+tUff4z5WAOV$9h=GZr+N}V6>ks?qIl!D>siRPR=($-+kHv7@XpeqN347|NO%!ZCqC(5Jm~!L)kSNGL|XYK>GZcA zQ8~|FzC3}b6tTz1E@?-k$lIKa1TM$ZmOaO#1!yD#QDAG zFKhdNV4vUuo}x77Jj|Aq{AE8r&qEY%B>!fhp~e$_JJo&R!_=)UTDI(io2gl=R(S|F zcH`sz2du1I4i-0n-6(AD-fL_I7ZJy10=Qj7SID#7>cp{QGZgpO3GUVJnNHTj#pa4H zMENexngrgtDJW^#GMfeKob|6t|8;c?hpPco2TSxOm9NKO<~i z_wCC^W$Jo;LvYyH8+UHEVBZlK2Do`8A%5=HGBekzOeb|l`PIAV@yR}Ss`|tRv)$(? zj&e`J-&;jA_j}oR+trz4k>vl(NQeSf6q4uNcaWJ~*mhu6+H#3Wi8}h~)!dk`#G&V6C zEzjLgh?2Cg#A$a<(uZ_?r2Lg{{I&I`4c4h|RsfFfzxwvr!9|zah>`E}oe6DCCp3rR|ha zpLQS13E!D;xoCG;a^K3OBMDsV+oz8+3=Xh6-N$_Zo)(H_$g2j-U$D?&u)~W&OYm7i z_St`5oZZoJ_M09;m2o9lB9yg})j zYIydc$DxQDP@m7&BqO=VYvV*vUR-X=wO?XWqGpY6!=_AQmY7}3`PJZjw_c=INJb=b zWo%?RPs%&kFl*4NL@%VmHJY00-#oa&1yp`{u^Ab@PU38SH4ILBhY52rlxkP zbv4S++Ir6C&r`caJpUtFaEJWrttsnm`Qp1y#v${WPpF|*llstYiDR2_^H=8WHT;1& zOP%ck6u)9>ZsRD958>~IA=&EFM{9A~-uH`F!2d2z8~`~nZlQz2o?G)bX5gH7wRHst zn;7&+w{y#01H5MU+t%KG`HEfCIFj@Gc-5zIWZTKy6Si)Jj{j4s3I zC(bIZMu{l=L(0RHng0r4j@rJS>+oR8i^A)2a!6h{RG`hKeGTg6yNQ)ua*^TkvD^^j z@ceAFTTFa>Ag5***XEL`!#TB%Qj*Sf>z+LcpLGsSR8!yzDi3SPx(Kk2avJMzFcB z@fbC3LhgTQ#@(J8-$=KKIURQP3&k;FB8S&omXG=OI?vc#tsjtN10eVPKYzqrK+t4> ziALBDYJC4jZG+h_K5_B}G6)VmvSeWF9q3*|-0VVof$4~`_J5IJpX>PF5^NO(m9Ro5 zxmS}W8x&L4Jc${&_s;R54B^UVPQ%lPwutyKAc*vXUjHblcXebfjNS~mcR2i&X7=e(%g<)H_uBCD| zoi^=4)*i#*MXTN{c!1^$m}1MeZ8a`87l4o_A|TtD|CJe=KQ6?rSUBp_clQ^w*hUlG zJ(i`tiHk_yGCF4T6Z<1vZkb*QW@&pPB=}Gz=e%o=CFOdEL6LcUqll`;d7<7*&qlw? zdJsQaSx$lccg#*FXQassaQ*!a1-xqK=SytA;l?WmsAed7VIEzEPV=2dn&SSLoXs1Z&;Hy@`Fz|xdcM46EX zNtoJ_(jl$y)W%v`!ceGYz@J}54SAmvXgA3#su|)CTE=uUAs;!0@3YtC4Eg@Nv8cQG zLd{(^i%OcFe71PC4V;qIYS)Io@08e3DG6)_0;MX*dlMjKRmhDWdI?Q<>Vsnh}y%5i^Br=y< zJfb(*Aa6iOC-Qr43~H;@a7Kaek4Kmz1U^fWNO)Vfc$-|B$!kgJXY%BVf-W%U$puGl zH*z$uufc;+Ek{@GEBZgw=(a;?)<)lUnmjrF^9sn1+>b-t#yz5V+f5A@RCMP~pUXN` zUv`8T>QttFvI>oGcxS_@GJ`&;9NXip=KwBH;U(xsdJOt4rQ6|$7 zK1e;Ao}T{Ua;t@?$My_%AK%8(^UF3d&rk@6VFdxb^p_I5O|=fjQF=Cgh$~Mc=^v)H zwoRYJXqYOfUnMn3#4<1&gs8w`H0UYuSi=J4V{7^4FH29nU=_8RT z2m*Ze+K_{2q{Nd+8iYtgDU2G4%jWA}ce{7%X2y=b;dghZ^yV~#=Oz|pnElz1jAu5J zdoJlgg4A#BP5z!}*|{**XWo4@F)6tkg$zf2hHLa951`CKA(TWi4)|=Q(qgYD?|eQ_ z{}IJZe7I`;nl*~Iln?vtJ@*Ma_ZD)%=p5Q3X(G379kM`(M)_{+cMVfB76e{A(I^cw zXG0yM0BMxbuVBJjPb&(OEticl8HIDBD~KV_xT=N?d#+w*80#+)?Pk_wg zrJX+gfWxDzcNV}4meijDUz~0id+3mGC6ttEM5og2moSD#NFwUR_lxW5{$`V_&I};t zSu{MD39`vn7jZXB+-__#)m>;1KWk57SgHH>b>#-cZb5oD9M)FL$qVYe>EPHHZ0p)P5yH zGC`tu=AAPl+P*(`0;1!mVzJ19{}W{4dIJlN7%h3 z7{F0?@Zj+JxoBMYKUx8AR%ed9>pKMLLXS>GuaA4LR`GkO&;=Jn^!*PQwdu`yz5pn& ziUR#N%ia1rnba{^8NQ!Is*0To66@3ojujKVt$@M#iGr;)YJ0ZA&AT~Btr+83A=1kfO+(U+)9iIRHcWC0Sz zyNTqSmB>X$kgb3_7uy=}C_?6_?V16_$^{F6bj)VVIA}GnnYMP+fddM97D(Ikx87DF z0Rjar?9#5O_v=r$mRovm>lit(HGt%1G5}-V@d4=qyFR*Xfu@TKB?6%uDM@a0sD{7H zJgK$nY&E~sJwIgG=~aBrA{Kf%GztF3FJ!|BvT&f(BIe<%|>I+KRD=a zwZp{)*q*k*T;FO%ayf4l`cAc;(L-ze1DMI1(ut?JL6XR4$<5vP({T8{o*Tz^GO}oU zv3aTKNucn5aPTC%r6EJ?_kM_$Od;Nyq#XWMga3U#2GDQeY=&g;HDU(v|KM#MhWa8l zZJ0pw$je!|d-k*hofjHKE6{hP6EGlG#)Ad0L`qUndlP-F&TYEb_8Kt2Ev5IJ#ZhB5 zH;QT>*p`n=wdl30adb}r?B7moxAeA17H(+Ot-C6^?CEYzzzK~vA}kZgjnFO*{)~U;D$MaY1vG$n?72juN8Rf z@wqNg9}fea>tW=3=rn1{qdvPg4>o&pd`g#41~|KCH4QYwcC^J@-_d*HnkJQjW|7P0 zw}z29eK6`?b7k5`7mQ^dr#()t8^5Uigmq9*3g=^!I!{Cvp4xOJB`n24yJnjTAhQsx zQ_IPK7(hM#+UU#!_grceH`Mkhyi+?fs#SB=H9Y|z)gQwPB}^~IBLVXYFIQu&f}cLT z><^N9OH+l;>_?BiAAajM*IVoi@R_{N{UYm~^mhbH&lzJ4`vsf>ic zeNX2_4)~(C8@H$yKMpsT*-n(IZTEnHHg^H;{QFMH#%yPKW{~pgNb{4FWni}No)1;+pGb-duJk-m3g`+4R7zzsPGo8!5m7;T5C;w02jYp`Cy;qP>XYy zh{(z-AL?4xS$Z#Q%NhfqVD`CyprE(M+|$1l&nIX>jcsGxoEWXh8gIR{ZiP@OwFB)% z_WHiyql8!xj&%v)?TV#FzMt(yz1g1bfagzAd-+EM>{3V*w{kP8R5$v9h&9{ZCwSM1 zA-GOypC14`a)P2OKB?1ITlcp*iYN1-?s(!POhl|`42nFmc?t1z_(2Y4o%S_a_l0lM z5ivQzvS_P)oLapklyNN)Ca@?+?_Rbr_clTCI5eYq79Wg2{&fwHxIfs(K`aS@$aA#Q zep~!U3vlqif4>?yfBEw36+g}5DEL)^D`s%ucX>b7^oj|jtocw4ZS6YD%i#$9`XiM& zbhf~4Xwg6aNUi%iQLb?*7^ftr;8UyUvj*w3$VdB}FSs?Svw8pEo+I5m}BlpipDMI0Igdae$(+Huo7X?lbENJ`KIh-Oj-HX!%yRJK~sOV z*ICjh!$P(;@&uJoC|J3iFHu>iXJ$khqbunI2ZZ6Q^2fr#4;mB4Ks)*4ddC(Y}N_}>*tH@R) z2B(A;vD|vYf~GHS?At_Coy^#LZ>3fuboc6sRC`F`m|=c#dFLeHBP4sz5W!-qQd=k= zV!ia5fZfoOpQeuPDu08L1jTjrca*D;<9KdGQ`m;(`;BHM_9Avc=7&*o$XoQ3w{T$< z_452M{oBux|L3^@dMbG7P50*Yu#(6m|;E60UZpwe3`wdGqQ*9HFY-%r$%M z!Rq=j9iWlrGMw~GNrz@HYlm5y`Y8SJ=QD{|<0MY{d|tcgyO94ERkQagn7b{`Nor6c z_q88Cemr-qqsx)-WLC<- zBTwAE0viwGDlmvHcX?;uYUTeAwKT_u3K;NAx;Jj;I>yPYeVI`F5=R=RaJ6Tkp6ooJ?H5FHmFf%6DaD zGRY&y*c$=aZt$p|z%lfoJOzD$qH6neKs7S?wm_Yn`z;iqjp$65;Wu{MT@~?#;cbv-TQ+1H-CKpuA+bvKI@o>N|Kww+}?#tzn@#x z0uGld6}KV=G~drHRi4z+xF!Ddmf%@Ju>tnU5-(Z#Ur5-?0Y5EMUdB@t@ZE{}C<|1+ zbLF@YG6IUDH#ZF_QnbB1$THtqASJxkOb00~ZmWJCl|jr)27a;0dA`^^r?~Z(s4SM} zI?#P&q`l|fJm400{woy_l7QeZMpu1IH@F5CTd&f#2VzL+-0=FTUscEB<1ONcjtJ*LUaY8mz=)gfNXRYEJ(Iq0#1zw#&3#jk`q^*JLKL-NtBLtp8A<^-66CAh3xY~0+xEgJ{5ba|E?c4^FS zuihhrc0PA@9u_oG-SSwD)@G~mmXX71hC0VZwma7|@8#DGIcr8t)U7ex>+uooo->v$ zdwJi|YuW4}ecMO$|2rgrB=Wjcb@Z943c%0lbQ21af^;OyUtmTSE4=W;u<{dPf?huSxNSqeo&fYn#S3$sxg zu=!zQ74PR4zrLfdQ@gVuG)xPQA?)HskV9uYk|~ zb@|YM^<@^Hwb1~J@-P!1Pb|em)Kb%=+{m{lKnOoj1AGl5$KGE`0DB_^-C)?jx|X+} z_9lq?14_nc(9hCNA*3-}`pO59@VTNLFIfNu@zx!M0Av%j%F$IWeORHdoXHmAN9?9B zw`e4cR(;cuO_OdJ*W@?R(N8VjDP|h1TJs?h#?5nym9aQ2kMAD^HhE(M- zLABZ^XP+#yIb=%=+tS0!52JnumMbuEQm^`&9F~ zPf&K9L_oJbl3A27NXu508&U4UPue<_ABjmYJiUGJ@wlYvURwRN(0Mk|)*iYfBH`!y zQztiNHcs*AHX(B4Gh%kQ0FK0O#ec%7AtvFT=RR9CHfSHIoz0cfhySB(z666kx%AYm zjHtn9(kt&wd3FDU$TvEeo11wz&D$QgRWi?DI1vlWk7U{a!5oP4N&$#`PoMsa{hpbk z?L2Js5;{hpo)2Jrqkyjh|A3urFg>XF@an}~G!0?~lvx?>qxBMchLLXmhSXNEh<0m7>SxF_2h634<+fZdPa_*SZ_{6)cmip%L=m ze^Zl)xC(i5p=ChaTHdv#V6Xx)0El`t(NngQ>wG!W)GVnH7fQ2plX|~6Sz329E6kPO zwp_ZjA6s`CpQ-x43y507KdZ|2QvI2n`P`9wCaT-vh;hUw}hLxjdag zy%<_hr@m&2sr;$MT-8oZ>bZGo{rlelG?N@0WC)ejDBZ}~m*uWftUR1Grf@V@s%2ey zsgt~P(fh~7;601^oT2h}*CBTvh6`IB9bP&zgO}FirGgpA_2(OF%6G`*Gp8~4vZ)I% zI*Q1SLVNL14a=_bx2Ds0z@xw9v;Onnq(SL^z%2C5(Co<62gBO|JN5$S%3i-8A1 z*VhLa4<6j+{VcGe8Ek8k6D+nwB5xY9Cy#P-d$;6-n(oaD-rm&$GTL@!dex_3SBW?y z^kjWPg7Vyn#+IM5E20g)GUG_ZsO+0vu-1PdtGg5?MT&97bOf--H6+_|`rNsaF{VmM zp8+%oD(i4$mO#N`JmbpAIA*CZt3JBZ9}J} zuo?V9&1AA2CJx|~{N;e(|E6PA{$4n@egwXNKE|9s_REen0^tyFBT>L2^_pnki zVJb5`Yk}R$&?++Qo!+KUV*Y&CEo9~Kc8S24cX!$KH-9xR$~!Qw%*D%UJJ}{O9YXpY zigF2;;;_kt(2@p$0Uk5h@kR4QZpx8WPWb$O{r=s1(NhyeNbuAeVgXwm(J_|55*clu z18dcbpGgY2MBYn9gAZrOU-cEpdTS^cctxud*;>;v`|{;O#Xh?1I0Bc*?5N>K4E2=b zYg#rY!m%+2Q4JqOJGOvJc=L9IbZt`a7!oL?yj?iVJ4;9)Cb@3pWT3gHSCRzsdeuX_ zFkuuKj#JY`Fe)?OcjVooLM070c}W;p5>udiK<{6IuvD%dS{>rYCDp_=ss};T+iuE< zLlZmIRIsEvc{GVM&~5z8d*=vUrAQmDT)XC*m^eS_>v;@znKU>|=0UHVr0u{%(+tT8 z(1g`n=LQ~E^_WS{|GfF+GwSR%sz4RRP)rCZ^*f(zO6mva73fVn1UdTE#YVv?{T7^n z$aIY>-Ki7MV-C1-9Aq$JP(|hB$#{15d%4Uf#1_;=m$GCwaFUr>1jZe6 zr(kRIn_ziy!VtOpqtFZB&WG)dHTl;J##Sr&eufN+Sxce-3jGMf5ZW3raMA=G`d$r) zi|05kH^&SsPf}|QeP5FqGfMi_4-0S@!s*xM`?qiH{`$+_%jIn`DgfCED6Yos+8nT_ zB`0pMWW=HYc98*K^m~VOEa*!~C~cLXAw0=+-7&#tX4e&Y%t>LS zfq*{5kkElQPXC~y*_rF96!T?C)&@DPMB;}wwE87#bsA_I%eVyz-XPCKVgLZ(4~ffLNjwaly9-PpS+&TI(r?@e+fi$tP;_ zya-@MPROMAQVk~ARzweYiHhXXpP$};8j-r*#%ay>S=B$f#E;5kzogp?5-VBCU8n7a z4K*3Bd#Za%3;L3aB0ly0{bm|j5xWcB0~k@_Wfd-DX!?)xL@~cfdS~^U(Svb z?A7KrgPU=(Ff?c_WmnWVM)wpAO zy%iM5*GE%xu3EL~13|-f;XQmaSkVGiR1cJ<@$E3$+>Dhld$yP*zkSe^n}=GqYwQt+ zlzP(H5(}^%@3h6PEdc>JM)55mCN9x5C;#$7&@8V2=$F>_Dds{%eHVP|1_tQxp-;#i z;F%q+rt-8PxtK1zwW->IM`xOXHd!#i;if zZt$2bH9+a?we7i-UPR`gw~4*Ms6(dGh!mL4vfm6ty~sV2BJ|U!uhAK#+{vMmLT3Oz zzZN{A86w+~Y|Q}k#jRb(Y|FiIVwnD1!iwMw zkDy^D{Dx>`p+SxzpCq8!Vh5jgS+@z>#_JPgPd9X!(P^H|=7k?uU^zJzlMf<)PZ=4e{^Br`LQixmqp zAq@zZ{MU$5w$ri4C|V3e7M7ImP^+TOg7#(c(plL%ZEnXLiOX;5>B3M?dD90~0042q zIL#<{NaUQV;6&z$t2h$Ah+zC?+35d%)X4J|Y-2uy0Q(HUs*T#^L zQB=t~LsrG@l_c?h`C48p(Q@r=> zx`;0zr{qXwLHi>qPudEQZp6XGES>w*Bbf;f1dzMNB}`Bdi1XG`oA(`X`iK zZ*k#qDMJztV{Zzb(uak%syyaJoEKG=3BxZIzt5oQlHrp)4o?B~6*{g-0FO=ORm+!e z06PLH2+i3T&sLGr|JS$=L#c&$&65tK;Y4n&`X%aSi2jB*v2Gzv(NxbB{4js%Gy9uz3` zFI|!`b68p6DRStX53=%!djT10j6#Z$0FA&m_xx70Rywlt!PAZa0uoOV_=!kY_^LjX zj8tjs0fJ@5GuLCQVX}T|nT6}P@Xq-(2GaKc?unxZ>yyyfv@tRlKmfRkif7;bZ91Zh zmM<=E?vx%tYLmcq^bomvZOAR8SBVZT)~ATLbF#cbQY@h>-ARoEjS+(Y3#)oQ$B}76 zDJehf{%g8HLRabId&g=25Z?S$MD+;#yT zD9A*(1jdI#V#cp;ADa^_!1v4AULJ2DBZ!#x#-NB{)Oa)|GC@;D(XpssJ-OhWXxKWV zBKp)J+Z{59Lyjg1XrRzUwUUi?iNTG#?0(PN?-1Pa>pC%A@WxDVSE)W{zh12Vg7+$R zkZMZ$iLi|{__AS0A_QMVvf4e2B4aoHlUQ7RPV#xBq~Km`Ah+oBJ?k0O?~?j^BvX9= z_WiNsLA?4?%gKxj$_xp*p1im4?=)trQtV137u1V7k;|eAX{@S61&q(5v5O7+t#sW;T-7_}2dafn}1C4SI z4>P?L8=F~1AoP-85`3k&|C{s&j`>-2H>pY*)GMpLaD9SFRS8kOGQ5%&QQN6iykigcvW); z=K-r@C@Q&5lP^T~efu9RfPdJtd2bR19lm`?V&h~O1}bonsTvYeOeMEmZv-t?SCX zC3rieD4?jDQe?YUM!%u-KKZ?YdVI>c{bvOD;G!Iejokn&G^WjlxN+9Mncp#Vhha^H zP)Ptrl2gU^H}Kf&Q1D>nnZ!Ul@6Hf&Z1`m$(twqkQ+ z_Ky^lVB5o)rNl|F0ik5q_gzs8cso;W$uMDBqz~Xal4`>W6xx^bMPgvs6EaYT!6Ot^ z2;`O@VJw4lfG3cg*HlcnK5^&Q&qjiLL7Fp&E@ssqPDuv9&QED%!imhb1AsYr5ji4A znB)v9a!Nc^S?9pzGYR>INqNT^FCp9qe66`x%R)Mr0>IAM%U#6B3r16&XAX}_L0}x; zUPZx3YY>>;m>5Wk9nHo1XmRfKlrovoN=ed$!o_M-d=VSJXce=m%Co8H7^r@hy;=j) zBIS|vfkyTk=;fxWEmvP}ba;EJ# zdmblPXYijMl!Mv6bsJv-p}9m8)lL2l3jPvva1zR<9G@A;s=L65H!YZKO@^|s`f`8H zzZVSFI+PrZrtGoK02rXY;=1N<(V^yk=jvWUOn!-lEdv__4`tq0gbbZn91)vzq4=0^$G;C*A}vpU1De1t(Fm`SI2(sO@ z@oa1{ylhQhHx?`5r0<_-Fn3&-4nXC{a%VvBsfv%3j$JPNA^f$bk21#{i;MDCoSAsO!d_RE+l4W(bZ(pM~#zjS7sJ_9$!7`qL zD-Dw^W2jV2Cf{Q9HHKr2|B`O8-9Sd?F%h!jVddAu9+l}HKhkH_{}`9L-|;=Yb~A2i z@~0JZm4Lk3Oq~id^w*+C-ZOj2HLAB0_k(MnV-v)^!PgJ2&V2A%<01$Er8)Wilv9sl5!MYRBz_X%6_Rdbb)ytqn>AwC-M$`A2M^jd!`hbNMGM?Yo-zkU+8_k6tYo&D{uINC zs)Z)jt|F|K$tt{qDXZlex1D-*NM--|XLq_3G5E15#mJpqN6L3aJNB?2|AuNqrcgo5 z%(xVw2_%wem+W1}jo7~a?26B?R1`9!%+qt=^Tl34XZ>v2gs_kr`ph|U;mqT&6@1<^ ze-}GVqm~En^>A>rrpqDHIYAO%j}NdXyHz)H%Djgm)_iMLX(}(@B#KcYH!Z>9a+S*QC+K%?YNGsncENsLefalHeA;!>0Y%Hp{xBHp6n=VaW% zm798I=~{AhP;RVOE_Ot+l}#R%t22d2kyC8>POsscfKP!HB+5g1 zw!*CYX=PIGhxD&DZEY_MOhk|r#Yy}RYw3&Pl@c-G`gYiGg_Iuq?oM-!NE!d;_}^hK z*ePVpDErL8hmRh%2IEMYH=wMpf+gus&AND?^gbG_h4pT2(|f{k&=^4~?L zoXU^$9Tx~op6t;kudHIWN9!R7qba~RU$O}AYVWaEiE~56=!eGGLIZPS=hxKz7O)57 zfqysBOYyx~SAmGd>MCmZ&C!|3sZCZ_V@5UF z-iX4Cj^xspvu+YIZFU*2JVoC6adrx?e;#{}eRpQvmpMRuafJrudQ-|!XOep^K7Q<^ z(2zYCTwS9$+*$77`WRX_efw_-dd zLD8(-d^FM1+(7|@7Hfxodve8&svBJe^;z2GD-%GBIO9e>N+R}GG#h0>nM)QgPMF&2 z+q@Ral@mPrV|QgFa8Kbth$xXJdF9HV4lld^7YktJ^0ngAooTM+C&uqT^>^4)WLYv7 z4V%;=4yxH+W6q=IsLL)s9PFZwx@GvyZh4J8Pj+1dc_uZq==>1DMBWd&81tuV{+yt{ zV=fpVh8Il>TFqXwa+|Obq}+G3d}TqKlnr^YW!J81f}-GXu-A|q%dJba|ux+h{oWaxZs(LMsAI*&gYG;C?bD0ZZ!#|FWJ3fE|WbeQSRw zdFm!MHVL94jyGQ9A0V-)9EDw8mgSexq6TulYO<%kBzjE z$+c4UQ;}{rtY^7>UP0OSR8C;|bxL618EInK@p=L9Lf?%j_bl{>ArshI1{qTE63DZG z=tALJQFz1_essDAb}d7_APp?q_{Cj4xu@c=75g2RLGt++Jv;)1B%N_D#(dz;j3h)N z-M4xF1OO}&3QDk||0`rxM{g+8iUmZbQ`x`rZAq9Otu0~}PH4%07Jy-=)LBoUNa@-sZY9N690EMz5~*>SAxjL!9XQ1@MOp*cuil)vf$3PM6%CeCGHl ztG>$!sG<7qbb(#bl3B)h8yd7RY$6{=7=5Jd9)%-`8W_lm74Epali9e0aiK4_i|>Oi zj{}Rz<~1lHnD35s>R&*Cg~IWgnb<1DlBYptDuAJGLH;b*~<#^#v2&v9gat zZhQyzGJ0HEHOVO6B(q5Y?)gJIUv4MRQb3ubM|(lwiSG%bU!gz}a*@uFx-)kuCT!1N zCqrSF#q}!VG(kx~4fG5QYO{~}Y1AM}nEtNLk1ucQqqIh6b2d9T*tHYhuq?1&u(!h5 z9xMAr9?Ga#6^v0&7$TM-5%;EmP%PJ4XicG7N#BvttP;J0`ifBrC9htoO_}n*&TDgG z2kTBvc&Az5$it+ID$BH z8Dh%`SoW3IA@R3y8gvyz>-(@A04DY+L8U;${y3FCh2+{LLqQ&$DSENYzOhQT{MtK1 zLv39X4bX#v+DQS2;ke?(9ZoF)GnueJNecG&FPM4p-_#`noObV?9Nsp27o8xmD~_sX zNL<8bQ}Ob@Q9v{YCPhQ%&~pJhb{%?DW^@bucTUxH^}h3QfJ-LSgM2%l@~t32aC_)1>haJn2y7;x*@U*A8zrr8Gins`2WtigKD z)r$50pkRD|!Go5*x~U>DPRL>S$C-U5Z%@B_>IHS3=j)iAiy(K%GdJaiZkz5kU^taX zZc-BcutXx#s#%4Nwav6yJ*_fy=gvBQrXL^PU3fQnUxohf1n1S;l9Szn9_{2a@y7jM zbnnz@VlziNYPz+EaMVmACV#VSTQB0*bTpCC54HY65Yfz_=g6K(GJghqnyvXoohHf? zlOmGF+HNhw!o`aP6fhgM8HLW< z9z_KOmg5t<%#aHTpG=yVii*mlW6rh2LqMd9+WfUG^90f5HVgbl+)^Ank3QU`g;e7* zkzPm-2Cq1l545Rcs)pu7LJY}lNtIvBesgQ9zU07gSzK;KTgbc?z)Se6{9_BB3~_|Y z1;C~_+1C2BkwqV_hcLT{86?yW$9sJH0&CoFnb`W7OlxXl;v?^x!=FbU?LumjJd7yT zdyc$0w)@mGO{g_T(prV5&hA<8>8O##;zn(UT=G#-C97o-Zse0p>MS?^yo(L?;Is>C zXzT0DzP_ge3WM2eg$3f#;^x+YqNUbNzH@s{+-nMg5-bqwFfb8^nq_SrP_;pOTFh)o z84`mc=eDGCvu#_n2~@25RaPn7Sy~=>~+gdP2h`bi$sUU_Qb$IW#%URi6{E8txKK(z42NCM@VGu;t@Tdl&}tw z`m5d?h@b_$Ul6U28KT5Yn0 zO9DBN=}x$nIq^2Yd^$8h2ikoj+mGO?3OSWY!_*&N*awN^wLSgob8?GiW7GDy2*BEB zf-(Ug$`ar>^lbBRMM9SwGI|O8Ob{G-%hpf?<`n3XyD5W&@c6U<=PNhwzRvVu1~))J zYe5hhkqQo}+{zm}6Jjl*WO)tgo!vb4&OU+&fRjR|eah71W*rOaKfUT$7>R$smAIEd zli(F{=a&o-BO*c?3Dy!53&4fEZGn7VVvU;d$V(xP;79V4;@kJ{%3&_wR2WUJzSFXh zI29q+Wp#18kHR&=etoQ1{AW9q7N!rcoRtR*Bkkpjc0_uz^ZQaI@tJCvJm zI|F$Kgjb*ahGYIBmO1F}q}p{FSSD78`h^lx@O*RgfI@fuCG4+az2Gzhfpl?4ZOI#V zApu^T)tvM)p#V|v)N=E2ZfNA?BO`Yb@bw~JRo=N_&-_tIRs`*d$U>ssfc2_{6w0N9 z`c7i>u&BpD^-AHa!wj`76Or=Sb!8(3CK^e91dj(xtLqUxayx!+mPZLVf;0%cVS5k# zNV}l3uJJ{Pr(~!{4IJ(=f{M$>Qf&eS;X}d7dij9(U?-a2nq2aP-Fx;t<0fCZe%+7I zO15XVgXz8+(&bCAo$RzwzSZ1IGs|#U-nvvc>{ug0bc`bgO(VpD?&KK51j|k$Uwq1P zlgdlJIUU;N;0Ld8Rr=7imddyW!64XZt3?Q#;;zf;Uql|hv}zntD_#&zs}xrv=TXXQ z1Uw>arjG1QD8|X>!+*295mt+IS0Ijt4fM8&>cCgtolSAYyH)Ul2oEGYV!qKGXj(cZ zdBQ!(S5g|G0oc=jK5DG>wXZ(B30*9L5=rjM@KFJI9eVTh!fEj?nkg3P7d29vlMB}K zS4&o>l+!d&6hG_HTd8_3O_WKo>a0_Nhj>RP&&leYxhRMhFGGP=>)ofGvEh!p?kT4Q z`F5~SH?BH{7)h5=dV3HDhu#VQ<0G)i6fx8f0f{(NRPj+_g>D(VYEheACyEv@Lva%yc#NQHvO z=U+5hjT((5yq?L(i1}+$(Mk7N@%*YleHt|+O$CY_OpLawpyvF_*BlbZW@Ob)=Qd%U zz0BIW^`hrs^TKXwk%m9eZjNB<&*tbDF?#eyIg?)%#%~8N&@@#eqEe5vR{byUpY-He zy~OnM4yeE+(aul9l=g7~T$P$a4_gH&pf%}tQ=xpS@m{ck0KWL->VG3Mf|JO0N)_rr z-+lY`d47Gge8dr_bH0*~+79lEFkcXUI~X${2uke6rfPyE{tpmxr1zgHWDmBfj4ZxWg&9xPW71#8^!Q5fPeYM*lOk45U2ij=o?_u{N zSAhK^ImB4viUX}{4TcvhGopo8RUrSJFz7n0Nk-HZrST0AEUz;gK@5p-Z=~Qf7@Km} z+p(x~YXOAEzd^uEOAb5Q2qBAW6+|I}FS1Z2^Oy3SdRKmC?TBdDv$$ZJkUJvRPOwe< z5aRtyl{(C$Wv3Bl<2IF9cxbh1RsX>3LDa@73Q7bh^Mi-%RyWu~L!@}h7N}4(4ITC8 z`OH4M7we20cfDm^qT!uRJM;GMo=;6S(b{@A`vRZr;r82+QBk7O8>aTL@Gf?NQS`6) z+@={V`Ch5ny7lIHw-2F=> zAZV4KoN5igLP2(Q`u6Uv;xqDW-4h(q2=zic{5FDlCsz07lGB~a77wD36}OW{Z|N?{ zRT*}0qKTRcrTSq+M~X$%uAZKr(_x~%5#zNUT(0Zs^uRbTXZ{?|s3|gi8kNLT;eCk! zk}*wa#)*+=;Y0t8-P^XJW&7D4V^1`t96U@-)*0gJ0aois+&JX z1GCU)OAG+F1ReT!da_q{kF*tbl>Ja-5B%vPr)Pofe+| zUYj+S31c#YEbb0t!k*IKQckB=cmWM^YUZ%*swikDs?C07Mz&|i6Oj)O(c|FPZ>tpI zZgNdzUSp+aS*U%lnOC;No?io$lyY}DMiovXG)Xk-iUW5pCLns4H0fUbsg3A68pA6O z2ayWOtC0{NUjjIv15A1p;9D8{S;Mk#$B#Z8l*1^?eE09)zt|>$y&`pS*~SWYxJ(FP z1C*!P2$Zrp^0JXkg~lG|nH?#6f#MlrJ#A()G-Jp?6Ii5)5sS5&HtoRiGXe&srlR+} zTl!>O$~bfko1e>z#t7;o6Q7(Z#hvdTpB2e~3dU{!-+^4Si4(WLR5VsdGT{hk=SakR z7tpb5D=txmqbokZ@fttiS+kI!^wq1HELwecC*|Hmd;24mDT(JXRadR^?rVnHZ|H=C z(@;z_x)+OzQtl*H5iwlNf4aM`WuI8zQwAM_+wykvj&3?=k1v43&Xa){?zmLS-r2Iy zI23fdbV+bJVo!l7A)o#Fecru30>V!^1Bw#%#!A1(_&M63J#~St7XjTfjz{c+YZu$k zhg@O2@6_146Dja%eY7sGTIP-0x9Uv8Hk*0d8#llt^^rtL>{E~7qkmG8uR9jiHYO96G^oTJF?Kyk3CD(%a(9lAsSyNFbew~b^pKV9~0ofOVA4E zi`W+BsmMjdu`AOCxz1v{!XhX_Yyk+V#53>TpFDYot^G8;$U7VG?zLvx-et+tA|U_* z`L5?L9!j~Q(lo?QXj@lH%hI}ZgjPU=Xo{tVW&pA6eD@j|)Y9l}{MarBxEtzitA`S} zg;ZIb`DLL25r;;FM6C5AvAmta!fcq0PO8TnyT&nkdLp%sIH;VS|0&o_O?RLgLP|_S z#$;7==0KsI_;~HRq&*HksF9+J=68Zqw{ zY0M7-0?O$e&CF~jbeo{37r6pYmZoMAyI4Sg^hCgpm6s~?^{~{1{eS2sh0zU73ile5 zrOcUIbO^*nML|Jj?|O#$mCUjzGRVW*##xxBL*rT!h0XLYS6cIo3h#sKe6ft4Xo*ST3t4RB*tq=vsrFnsI z@pSU92FcA+uzXan_SJqI_#EXZ+T~BXY4YmOtX;U}@k(`Qb zjI&adHg)mml?&d0n8#OR-Pr|!#k6{s#@FGg(SRbOS%;0!Tra&t3rpj1dk1O)y!f$q zRR5!qtzYq8?CnzH(}$uG!jPKu@tj5t;J%^=doqZauvRvo7D$G*0umR!u(r3~K*i0G z2dao!P2JV?ZObQb%FBmTZu2%POvgy(kj@4vS;!*P?k=Ba^ zCvIVS5eWeTR{A$MJ}u2_wOxJ9{(5epNE|KxfLnS*0@IG5wi<=c4W*6vCMZ72(y2}X zGB*6PwS;=xHMJ_*Nf9Nm=#e9h;2%hv5xu!r)*FADefxlG!Dv11XQwnL~w(M_^UdDhP2^5wj~7bU!td4Ai29#|*D+`lzb}<`F~@A|nH-wS*xmkUs$B z)B&1Lbe&8l7s0^x&z@h;=XNHzhUVntw5N}hKr|qHP~`sw!iU6$LV;7>X=rG<30z>L zJ$`S@xlbu7#3u(!!KgkJS{f=vjyF}rX1U#C=*i97q}jq#ps}=dyZQv0fp&&9=Mm~_RGB_mWjD3lq{1yFPeT2Em#umZ0B@p%{$ z=;+`FFJy62l8%7H7spt$X8znV?s6pm_@T1W@5qsbDQ~`MS>mBqYJhC3)vI@r(hX#F z9V!+<a>m^6Z(Yx?#$O8`UlP1mCNoNEA@v08kjMbVe(3%4!IK?PTLJ^h zEPA#ht6T^!2pti*AD|!<(}RTU^V^7b$F$j6wNAb^$k9k5kEpg7C%mF?ykV}XaU%jRziI}5xo_z=LR z;18hx9X!k;4e*D&?84Y%Fp6W0kfYkH&4VVJC8acpb6KU%uI6Am=tuq9djHZSQ^|+D zKumbd*t&T8wspCu**O0)iwU8qOr`FoMw6+9$VCLeoI#vW`r+2PRDL{=)q*sX0{VVr#i82-D;^vQ?^eS3obdg)h+Cjj^JStmCJMqa`<>)DUb;}>n4HAiaFQQBL&#x>cqaWdo zRZgVcOt=8D8!w}{^XDTyOFI3Sr);NTvx54iZVv{O<3kmJBsTg0YK@9@wF_U#R}wOu zegoFkJSaS%af*Jo{6G1R_vMVR{qm>O`+A0t&w?(IsRvUdcfF{s*CQ@qOy@oIYy8m7 zb9{GeVOpW<>{AazAI}OebNf+H*r4xUjv5`BZ8d9bW0K)FPCaz>hO{-_-%5V%HJRSz zMZ5HfkUAki)L!|OSG?T!E!p>`pDAdch))2B1kC{EygHR&0De2&Sj zOy~n90)upg*(EQs%alW_qs2Ufws4 zD^fDU!d$m~EIo**G|2oF3H)QPmu(awhrEdYS5e5B4F3s7D+v>zVIN5CFm^XqAZ#1} z&?ABiF|Jh$m=o=LA_G&l{zIgSROIeZ)QAe4QIqvgUd>ZOqnJ6W?P}lui@G3S&0UPqLNBjO&U;$Mw*o8XvNw- zbblUqk{vyMLG3(*{-Q#{2x3&!@t3)~s?QM}Nwb;uxuKm|c=KU?jNnidv=3XNeBM0h zm=UXSGb8ryE)dh{WqHovFr$g>0nv5w{vvb<|Db_F7c;EkzJ7k6_ISsSJNM@$v_wE^ z3!{g}8S|Fm1Fi?9fuLZ-Yr867_pUSwLS*t>XrXMM)mqK2`LPp=hLX^>6N2sJg@Yrl z;%hek@~p=6`yGQ3BeK*g5xf#){sRRBAcOrU)dTGLN&vnG)r7Mvs7qjl>XH)z;n9%| zRz3WF(zo1`ekDutB6&|ZBgVh(ACj6~HAfc}kwkd&=&m`diH3wCs`o!(vqC{tcl(ig zt-(e6CEv!D#>{r6fl;mMVbQsG|H#IQbnq(n_lJn43jLG}*e23rvQ|m>q1;znDR$Wy zVqKcJcFk{{bwcB9P0baSnCP+ZdP1ycv7+*lrw)+O;*!1xoe=!x+qJ+0CeJGGmX?l4 zw0{ilrB%vtEb|f5KB~kFM}jqjNCXOlFtSDe#vDHT3!)BrW6wa zQ`gy%_D<+gaiz;Z9q)do?KP-o#n=JQIB8{BQMWY5Ff84cUCnmy+9i@K7(bPO>W8Qz zu&u<>8qg0Yo$Adyv+8g$S1Ifrb(x5^013sDoILG}CY2>Q1B;e_y_gYOg?KHm;;hr` znr(Y_toT|x>$A__?`j-R&Ix4_VhbsiIM(87BrzBD;7oSbNhfVSN6eLfAcpL^btMf> zTWZZy@PFo~0w3v0G>A?ODDC8O+Yy4e`$-&OeD9Qw-S@Y!$CbB@p%O6E>(! zE?ppU3Ni;FmIbEf#ciw|1mUw5VxYB1zIj$U%(DG!&(P_LI^LybQTB0M2GH8KL?a0aRpatt)()}7K zGZ9kh7#fa;nH@{N26B%RmMJw>xP{jvaCp@ z1oR((WX=^Rd`hbOmhs^R-{&oMM;{2cBHDhnz^N!E(62ha8u!)4v5$d4Q_x?Kz9Xkj zsDTKHXir88%vU>k_5*ISb?m3rcR@h($6lVWz4n$NOG0W8Q4>*h%YBG435jWybz(~` zC#kq9r5vFePBZGiE#a`B=@j~W2OJf4t-yQTG|Y@$-p`4XGlSE92Dd^yJalGH4MVkL z{)W)Fw0YS;{B3tt-;=%U9L8<7Cou#9aI4FJRxBHOOQ)Jd@7mR5`t*mM#S)_q9J_de zZIGgC3LYs0C{TLOo z%f!d(b$&t&R)8!{%`i6TpZOQSZlVyRwmvXwQHvI4^L$I9VtPQliy~rEC$sf}{m~@+ zct7-qDl6uUb*vxb4mHX07M}nLwh#&N2FGwOJ(+SZUZb$*TD?h?^ksq~io{Tu1me$S zT2k5He`h;Q)7p3O$M^Q!BcaXU0_qVWET=9g`_{k9yLrB78)H9<>Oz#J61YVU3A5}P zf}nl`EN=@PwazM@uW2DO1U7c~)$U270Ty8@SrIYUFrUbv zov#RFDD;spst5%Q0U<{2UBCYQtBebIPdU^PUtt&H-7*F zudl-1y(Q}UiSWhxU$m8KW&)qYc z{(Q$kZ#ANxaWyRQ+gBcFz7Q^pLRdpWc8beooRW+ zbih!#<}`k1tmn?>MUng}Py=n0aQqi|NCVaDV8E0SRRZmDQRkWFG(mq^gZ8#QZDZml zdZvkFPNYO4n4z(H%n#+h1llEvXif|vvPdPuhsHDS%px}8heQrg_!_*AQF^t&vP%@GaHIn>f>V>&pU9V}Umk5_*(39?ErsFl_GB z|30S(2ax5A64KkCRrkUmyHkw>uG)LtF(sSt7by*kR^|ospU5#FY7~k>z$z7)5Q!4B zIa@pl=))`Sz8|Q$za*;0s%(7_=w-dO{IMWDe210f2 z1V!5L;D@W%UuvkJ9i;id`rTa~p{i=2THvm0s*H#iZ7xA`BPsgzzs~*r_XByo{_b$p zBDeWky)98lv>H%B&;*>Z*4EiRf2@gy#+InI0$4lF^aaEA2rUEFj_bA-*!uqu|H5+m z>pCG7CxDOzG4M<~o9U0N=t&I?JtKP)4&{f88TFF+lb5oyC6T_>n&bU3f|(=$;H$3H zvo+?pNos1tfRM2E>UgnuUyUX=3ANhzrb7QRfIbmpX@flZ!dC=wRwgO8qv9g@Z#>>R-&_T>dUc zFW-xU1!Av9dd!d&XR&H(@Uk)o8CgMhqz(uZ@E*`XF|$@fEqH%^)tvmeP~g{;6S?Cp zYMLKemcK}jXe9tym(_I{CZ=Sd7>}FHZqh&j(y_WO2P6{_RTTipE&HzKC76!rn< z&zpKoUDl`SZDll2fmUU^_U%=b|F=@Sk;2Jjju0jDZgN9J1KN3m0R8TTt4T-P;E)Jp#7b!T1BFeVm;JN7FCYG(VMK~wk zHU-!`j3(gZs1waKY_l>gH4|(J43uKl=#gI6t|Qd4DTf|`IY@XFwUgYb#lV#L-$<`J zFs?ny-3Bt^hhx&`L%WcK?~gp3JX_+*FdB93`-#zwOto|ih%M5quUF-6e;9k6;!3rW zHob?2S(~;o1uyUkdrh-W*+h*h&gqrkY71vfvasLN&onVlAA=7?Y{1^VUD(ZCNooPd zPh8kuyDra=4PgT%VMT(SPSQNz8xaZhh2F*-EG{VFCIdzvoX_FAksYNacxS{JbDjKd zA524&WYn(6?eH#NmP%7gYdv>KLMesiKx2cYUVG_wi?|W8r9B}DE0Y2#901dE?2ZYq zVK%Rb$AHIlsi?@|V?E6+H~aCia>(J_YJ?GMRj(BnVLy;S083l}sq&fDQ6p#dLGREc zV0{6fFwguQTA5(455E5xood01*%rEbY?ZC9&70A4m2OD{B(vqjhMKlnSF3cB%xR#c zmnHH_zZelZtyagy_N_E>!pbb|1s~w^>I<^QJNTlM4PDjEIC@)X=$WW~VO*RO8XFz(;PM#q4fwK*rkghPc3 zqs=Y)m(|l^hcV!`g@mMDu7bkN*UW;|9b?TO|9}2p#635s>k+q*XF-qU?{EBKGi{Vb zT;tk>ZHC#mAUZ*0e$eXZ6~14dofeN2m`#eMyPBC%^ptTQ$vYK0k!0Y_d4f_ynm~uj zF8gak%{UO4?mknp+6)gF2x{n^X{D&5x{5iEdgk<5As2w}hNWd{k z3=XNF3ODU(sbh1%%po`n&#EFOpgcG&;F!gSKaaCrcepGMKB2e0{Ro|blx`krS z)2q>c2XEQ&TCjBeA3wL#j|8qwny(cdqGkBO)KX6(AP5fqE^ z5fJcapk{{-y~7&9V`mFhXqTv%s!i_G=g8SPCOSz+o_0EB5jKJ{4(sgZQ3<$**W(nO z=yx$Wd8`C)W};Z=J^jKo67-zdiY4P3`Lt?)eazhiY;d!Eqm=P#i(Ass8haA#OZMNg!;f3~_;@QJbQ5~U;(_2#b^kToJP zJ;=Uq5ntqHQp6Gz+gB$uKlicO6CwzU|A~FgI7j==50%kSN76rNF%jnLSYyq+_cSnZ z@sc(x(=Cw_iQyfRM_1M5y>7nU=d|1JypCBEeClf&nmU2=7O-w6Qd-nYQSe7w#>ye{ zliv98<)M4t{(AMBv<;9Hd5%onm6DySyL6dF+^i-x`#r_|jz=1lvU3%Ov_#9va4wN9 zvJ1_Gh7f|}sbNIO;*i_#H73ccbeUdY0kE5dS3Az75d~2cccGLj@TjRfnp(C2ntfY- zaF5QfzqaF$y8_d^kuLGNbL+fC;jtXm=y0e{amUDCIh-92Hi*e!JtedZg1o&T*rqjW zgAZ1G?PIOt*FXg@4!BR)_N^gf{7t>B?SrXLz)u?iq}WUwO|AodrUbMuo-(zDy}v#( z#&H9taCaa?VdcmFeJvzr6~9=YMW76ekLYcBlYirQ>H;>nmBF8vwNk5xGOWwQkrquG zWJM&Xq29s3wzbot54vE30^Ru7Pr4!6(!_$VdWY>2!RmGYpTE|RRBO?FtVAG$12kC` zpCP$VZ1ZJh`<_%J4MYXaO;+WEP{_ktLym>WsDEBkhrSl}VU$S1e^E(g(F+-mIz*Al z6tV^^D1`lSE$wS^NFVIG{L}p2Q;*kW2|_v+bm93E0N%R^XTK09JZ1?Smx>hllo$9i z)S4@$$kOet=;-#tfe)JhNa;R)R}U6b)B11T>n5${ltW6`3|b$d`w0A$%rz60?2#kd zoU7&y5b;K$(5Kdw!MYX zw!`Po?<=htBJ!m@Et*n6y=aH8KpUudyl`#BwofVy)N zPvNUEoo#Ku?&qgozFe2KG`p8$cmajtc z@)+Sj4}JZu=UOV^nI~vbJI>;6#=gtZtjOBCk7%1AUw4dYz+b2Bz0gEU4R8}#5crd!e5V#mzBQ6D-q)!g zax^8Kbc34UNtn^l4^mL+RVE5J1jqBvPw$_9vl`Ko--nky`8Obmc<5m=CIoD?OoQcLx&~|vJsK-^Q+xGH_G3S zIXrv>w_lV-b~&r^EzV}F@~}uFKU}=c=y?ju7i48-%E!yoNgJ~%M^jKjU<|(&F~C#e zK12To{}FSVgSHu<#G~iW4<0iG#Dad5{)bkeFDhsH2yK?@VQamle%x_`t+S2?ai`mA zX%+2UH@$iLfytvz{Y6ZR1h?bK%bT(iJWm9h9B2$R$s*uEGSnoi2HfZHweI<9JcuYB zeub$ih-Ls9ycaM3D6~zqw@H7+?q)Q^n$uV`1;q9gjN)gR3worbMS?Poqr6Wjr;^`4 z;hl5kOi%(?cVn(!MXwGc*%kCu5s?VjNHeVJYh>a}!>9yMWeGdvc;tZ1Y-DPynY_(G z&>-F}l;_}i{>X$LQjbzX(3G_X$FVz#SM+DE!iYH1&CnJE(!z;N3r%ajn+JMT5|Tpc z?Ir(_-H}=`bfG&Hd~)f|QbV=CyqJev_^t z2@Z*cp1fU+MJ>uji=}Fxz5W&6e)746k{3!1dYgSwms(F3?e^YdH>jrS{e+D?rY=}; zOekCtIOZ*&#T*IDLdx2%)H!@?{+s`#66eglyxOL|vh4@IQN24f) z>i}o5R7PFZOY)+FF6{3-K1!5f(0-QbBO|SXIa}%|(wrK3ZnArxt@;2<6uT*C>Z_iZjg+bHPEeZa-NKK0!5{gZ`H zFt2ul#m4S1K5>}_`e6N+SABoqSttn(s#YiSSkVxt5}peft0;`$T@S$F?((dsOp_M_+N;>UH7)Zlf5n$Nenz-By$(;&1NO#6YFA|+o5BwT~ zU$r{98^>cwsz;yz55yW^dmnblAtlx1J1EMQ&6@)Rri5{#;u<}6>|X;`9C%*-so=DH zbOywL1guf|72lN^3%yo+su41m6g^2(n~-Qvgv-Dnb*)M<#S)Y~X!)nUm^L?ZaC-Q> zWGzd|1&`=H+T1h#f?Xw zp}t}e=;Vrhk?XFL!c;{JmdDs=H);|LXRR0plRdHHP|PD}RW5@XOB$UF24%TsA>NDc z`B6mdx=a{{|Nn&DfwWvqB5xrH%NR3JNBPQ+!9zZ%(~#vMP?8eWK`SN_IFUgpl5QaS zK*m|H9mTQ^ZA^!3G3;@9Xzp`$+mt&ORt%PqV%kC;K|#d@Qm4pf4P{u5=|oovc@!=c zl+dBHk+w|nVORvzu7G;EMt&!||5I110S!;xWK%r|JW76k3lM>LOU3gxNIXeeM>Ai zT4~X=m8{XiE59`YY7;xIS)8+Zlx;h5X`W*gWncob9szh!fn&*Cs=U--QJV=E5m zU~?5tNymhZAu$v;37bT|BinX$f%oSQsga4m%bZ9O{E_($Xwd2W)C~!nFltK~DH2r7 zQ-Y64$Oy+8C)Sj7KcDzgBJUruSqVi-0ZB6FwDPPYp<|>X^3es9#b_p(s@Q}VAa#+0 z2zMJ00^3fW@2)#TSyoNx7Y7(lI~^eNm+#^)!)my;Wx>Ylgel4abiX7fBW}!nLK`Zs#p>Ry8UP`Q8qDU>lMGHN{EaEy! z{W^fIpN^`zh}(Mg?3rlmyX3{K;jzU@_WHn6*(Qk*A)cFPl~pN;gw5Y8@W76$z3jS$ zbBn>x0XI|>Y)LYqVILKUk7F;5Nq`t}UAdy-7b6{hDQWH7GKT#dx%fI_*&g^+rkw4) zCB!pC{s>(4DvfIsg~<=YK$8yLKd?MXTe~jpW%nBMP+^#KIy$DhYrK5*Dvu^yUvEMW zHpePc}N{j0lTLa>_K>|4u5+*#k7BZ&&uF|L%?Fa*r zgks4{hiJ8BcS9BqG{2Xj$p3P%G|3dD6Od6Lh>#!)CGKM?OjWsc>MKf2HVmDJyELlVrwAY(;CRV{F_?0%gXH!_#lsgN!&Chy6vqNl zAOyOMYDZ<-b?DIPj>WW4Jde>F_N;Daj~(Y zgSL^!a8`c&QOdEAoI7zYQ-$2V9pBWFTn0(uG+4TVW!zyRH_mhltNdC zvYZ#<#r<=?ppCjQA$}BmLQ~<xdaF;DsP%u_2;GWqXNq;b5rDwQHQhniPA!P zW+Jh8*@P^UixF;8zJ){6X1P&Y54~>P>irw1gl$!_pFtx4hWQCJgqbwNoGUMGA8g*h z&^GXLRowGY+-}MzVI+|KOa3#f8-Vf(e5Z`(g=S*Jc8sd);@*0C4Jgbd=mjmZD78#6 zZpm7ro9{mMj{u?wLMJx13%rDQ0_C8W;1i+;rk!l4UB!_+lY_Rcv1dy*KLO8|Zm({R ziUdSma{rk57631!O3T5{lm+wz-m;q8^L~zEb_!w88wnr*feyeX3&+Be>vg_5-&$ry zfMVpGU$*FswXOk)Gz=DPg3Top(T&K2_fd|Czfeq1xamFa9Ufu-5Yo&=U>4E33%f_3 zB(^;H8;LF>E#ds!q6})IOdR5T=`fPC1wg>C&PR5X2)ujGG$zr!AGbYJIB3D+x+mw{ zT?1;BMY{BZ2M?fKM*(UtyMKI(2t8;7)({@2gUto+Zx&{RW{~G}Lpjc!Y~zvBEaFCp z8-r~`0?xVXl0UGP^F5=($~G0PPNa)6Eg$MOoDo4RDlF8Na{?3Hd|f}QVXoGr-x?8tdFZlhzXG* zFicW_xn3AOGFN-V zh!N5O3gZI0DC5LvBpiIVwcm1>SG}l_c2(Z!&NJwXMI4619Tv%O>C^%R(p7^*amipl zilCIq91iHX#zaGXdQRfyB>HGRox9Lc1ntG#>t?I!1HJbTldjD`E|k3sqbsy(iK5~@yhvTv{O0ItN zOg)$W8tHg!W^lFg(#;r@`={;T!=ydrnEVSzY&o}0Pv&EZ!-v{R##(_ZhSlXqTtVz! zm!XHijTU@iE3{SuI8+SzY_YlaNpKsNpF={pnOOHGyk3}~@#x}8zuy7j#3mr&ly_B0 zh7h#~{ZGejR^22860tDtdkbFIJ0w?AYC|BXDUv|Wdnc|NM9DKjF|@w)`p1(XwiPfp zh?jAGbxVdmfAEbQ1b(KXP{l#QJBI!JJpKPxddWqFfjnWU1v zU*D5Pk%g#Z)GM9nA0K~w?+j-BGff*}FFLI?wu8I@?>>HPgFXirmraq0IOUm(ZGewn zhU!?*EyD?*Tb3j{a->n;?;5lKIgZS@{6DE%QZEv?-U!RdF*MtZdg?7t&cvremxCT~ zfmd^sqK>ARk3fms6I0n07O4di?J&;=7)GQ>Rk}!O2$Gy2CPe}(wP01nU*oZFL&8bVLj=qK)wu;AREIXJNq z(%QW4pP=E2rm^(sj6h^-+waW|`PYR|Ui05v2m_GN?^*sXw7P_lQw|WTBOGT2;tEI~ zvhZmLHTGlL81sfVs>Z^@K!1t|(6QLrrS$*Ogm9cD`0#P(B(FdM0@ccNx6_5oql08cs^P^dv3v_5tUoU4kCPz8s^6l;$jnVR`-fH8C*{CKft>-}~n3#p+%=Ki#8EQDBJb zJj#33pMIs);Q>n-pQp_KR_mioI%Pe{yUN%5S*k~yUoT&gJgR0tZyYk%yYRcjiv{qH zavZPz4`?O&!pmMK2nCRMGE8=BvAm3OjcmU1*9DI)f?r|TFn0ODW&+%Gh1hRNu&~~9 z!0*{dBO>}dde4KBQl$J)lIBM9*7Yw*-EjCmQWvD|dYsF%Qv$UbK(r44QIlhZA}qph z95Dfz1?z~R$+@7_f9(}jvo zDktDi{;|}%K>YtsC=8~iA>c=y=0s)##aq5c52T`SIYJmW=)63D(y5H1wm9ka)sGVo ze+}P!^@xo5^Gq&2KHK{I=f8Ifh=jMOYsylNE14w$r4;CxvU27*f)gg+t&jO3dBV=H zuyJXxbk>nQ+W6thF9v{3SKL-?o%`pYDWO__a2{E)V_2VB^I``AKgrDJk5WEi@5GvV zgtxA>c|*37rV>9Y`w@E}I~D2~F$UmFyn-g;GQgV7Hmi6lhR}a_e(F(erJ<6Es^=@R zclI-x7LB5~j}n5!em!nF@gb@V>k(-sVNDIO$40`oyA(|^xG@9JxHo(%Y*_9*aw$CB2n*G94WL}a> zq%sV*YvWa`eu%S!H@F4kOf`w4lVqXeg~P)d>#aTL*fmJh5M(N@qf)E#z5X=|XXyS60J zvc3I*A*t69^$IG_-hpJhXy#Yf^&qA0XY=>KJ z!Hfbec9-aLx}FwNt3(nM$Z;fonlfygnOHavEV#cZVc^SqXIk(wk0dY$V(ro+-QBZt zBP#wvq#@z>?B!4X9Vq_H(9%t&Jyupaz4GJUtql(kZ>I0rRKMcW##+p7wdDHi6vIhcq!Epo{1&= z%=P`FHkMhMh$&bmh4Y^NF=T~_%)q=6G@@EpDg}%IlD3RjUTR3O1jgTu3VE7YrMtT) zog0UtxG2O02rn@O$Z6t@2G&hWd0KDrWIv{EmX=GhG$b4YR(E0asJf6;{gF#my?>uY z8!Ft!f0WPf-mT#({%iTnBuHQ*(V8(bU>)yDj7tn37%$mco)Sif_c8AuFC5h9@{Su}NCWM4%>kuJN*zaiB{2 znB+c4DkX~C{D~u8e3tBcN-%LQK(UAu9_(T@2i^?v8NhipP{~^a#$V$|Y@7_)#*V2? zqAz_0?)6KM1R}|Tsmm1CF*%bOWHv?A7O8l4eomAkCCXdHhVV#l!GZT(lN^4@Y4z#_(7! zX<@XLeJ>8qd(6rl{!bF7fP*lgk_!pl3)3=`jynu;Eek0p0^B2~BF+w-CkUPd9gH5` z6%RZzn#(+ueG_^}wg!dD%v*bhw>9J1uu7zzZs*pI!kgD@jE~j1YHptd-T)P8T@)!eF0i%{%ouUsdOyV1|sL zCJDB)eC=W-T0#bS4H@F1wh=WILua(nZU8*xp9QX#DqfIG^aTi3q(6@Q4hSe70_ZlA zFTHtHq3e-Uw-)mWQF@H6eS&sCYbe$GU{`p5`t-Fk@}O4XY^IU2(%1IFzz%JUtz zlopMzHTn>VJ%P;_ZvZ-8kGj9l<_K!}g4t}M8iZ_?Y~V1LG;W*Q8*M)|;4gAxi>H^- zqjLtyW)T(j{Mj>!Qxdv{6a}f@a7L7We0LO}C@S;Kf@g2cUhg@$J`8Zp-Mt~uHuN)x ztGJF3JKN1G{_C$MC^YQcqpWG;=~ZXIu81w^+O?!+j_VQ@AaW?A1a8oGR(2DalKF`o zhCSz;X2`Z%QK1GWiA!%kWey!>^)yKQNwKDb_^0^q);>2Jbe-gIS4Mg?nXun61JqSO zi@xj_;)^6=VENp@6Zcs2@=@4k2DshqJ1jIt^NmY)zcp)H?^137g7En$@taHZDgn~s zg^Ruefe-}k%l<8snbAHEJUw+Ghe2?M1oq`FZvByNSyRjek3uU`A|A3~X|9;WL#a;9ZPm=-{t8YvwJAEn7K2-J_hs z60SiqBik>ZA2lmL(`@(JAFqbltCH=odCekNr!bvC*VE?Q)x?Meul-wqX5oYZ)Mi-NA2Gf#uhIUiWTI<|5Juwk`IAxYc0IeM zn#z_Zes0heynYD}9;fIRxO4Wa$zLM1WX$NvQ5|=Mi-8N~L9${v2&Hf#JfH$(+SuyZ zB`I}fNH9mk!f5*=toB(n7>Ls)szZ`&&sgguO%@UMS01krpMrORk_{(iXOeqm?36Q~~{nJ^~HXW#J)RVW5?lEd9 z@I-xn4ZllvNY!mGet;(!tD$_rxY1iUHtZAIFD1sae&wrH{rL6^`_>FMH@&5n7RP7i zMrfUX_h|9brEc?wbYl^VfRgi3K8(};nPKP{2e!alCN|;NlrD zfmhi%Wr6`{@)>K-`S9&Bwu1*yAM!qGp3362r;;WF+IHF{$@silOl$A4)H2MR?MOT+7`IsR@UkxQ1mS75f*|x*6l*W_ zj35h4{x=n?u=;d9{_HM-53(u0r6lS%Xtsn76Oz%D+D%3TQ$cZS5^h%Q&@(FrE&cOR zvK}KO=U2b@ah0?S;|0%usIKZ(HY8K~l%q=XX5nhS+I#JWSIx0Krf=o-#kusb*Q>c- zPAs|aa6hdyWo~g%Nnw(AVM_irhm`ee_NFXMoiu#pGxetv8g=QR{<`b3AK$BXe%3tG zs^QfypM9UIZ2#+RjOCE`JGPJgT}Pl+gwRhY-gX#<9y81;%keXqG@q2YEzJu*s3$2t ztno#;X6slv?q}ngUB`2Z7-byOZu>WGcxM}NKN!!P8N_bg{XVwD8QLC z3`kq)q8VCHo4&Yw>-=`+$EIwg6frJeSJUVG^Bli=ljfg2ckX>Bw?4CIli4_20ji~T zWfLE4`qOa5bl6Mc1wgBVqoKVr%mNdXO4WgF=%63Jc$ZixV{mrgXZ5lsOmQ}JM6@aj**~4NQ!1Rhgh&uWI8tx zfP}43hNk`c7cHZN9g1p&Zi3x^+M!d!otUM5fhk@>FT*BIJQuLG;sq_NeMA{$WCZ(`dTq=;c4)_`=Ae$tf+}$>?HZ=Vq1Ni8wYw zG1lMMI9?EooXUk~c2(EZc<^ebLUWXyUeGmV%CR-v+>nqSy?Si`tH}B5uZ^Ig))p2l z1ifQZ#&Q7S!Za)aYzn z5J&wpcs+oD?IUk@>sM5lS8+*L-~c|`F43M-q|+z{hS1-d$@*nzr(70qW$LDU@xpM=Wrc11QI80=N#oXLBs6&}Y-;SejHCuBA@f%#s%1syF|K>ct)h;&0 z4!d54j?vXzPT;EyPQnlrhz8H{=U-(-_$1;3A^s-6<5F`{eg z0(8q}e2YsB-#kWFOt^|G+?@!g*tHLrB~4kCe$x3z2tG5T@|9#x$d%u@^G9HWPtQ<`H6YQARJ2;O zxE;3#i<3Ztyop)~n`>q-zZ2#22l6PO;cv;^bK7f3yy7tx@(5I;?JV0 zllXn1?p=eEFCyWVc_;AD1b1)1=TSC4WsGj7Xx2EVJx*H{O|JUAZRO_NAzzYn8nv|u z0!$F&SBG6=XYqvix&~Xu$Z)*0H@}Y5(U1TqfXAi|f7WU~c>mig2Qq*8#{Cd~cZ>P= zG76>nkWYJe_L|j1ESz{PDIj*54QCaVQOEjnN*hn;Sn1`P8 zzM8kHGCQfQf6HCyyA1GZuJYe={$hrSDktZX#KVUr1?nyT&*4jp4Oc)%WH^h4re^-Q z!eif)iNB#Z8VQ?ot~Q+TB}UA*nS7C_| z*vgv-L;(B#7`keRg^mXAnd8gJI$K;pgzr!3Sb0dUG?AAM2!l}4am|*Ym;eMC*`!A4~iv2OYfw4wF!tiV)!$PoY=7YQ?)8*$!C;v>0m#T8;=KJPy)C!0_=(q zkbC_^Mk9bw3f7Ifus*St!MRE635Mj3CTy=Wb@n$FsH@GW#2% zMG}F>MgKVk(r*3F^ML^4;p4`ImE>5U{$zA@zN=62FHg*Diju_PLT#sX+TFwmbXU|Q z9G8zD{bm+J)g%v4ViY)E)?vD42`E)l%U&i{#prnfh(ZHO`Xu!CTVC^a+@srzr*1Sm z#sZO}=f|h3L!mJ?;-x|z<#D!mQR4-^OSi*2hCE zFAze443PhfeL1xo8C50v34!bdP2mp55;$QI1USUnWnx_{j}PKK;mDEEWchH;aqxGX zZhdj-+{4aEY&=H(-p(Y7f>x;dRy*0luP6aOj}opyGt|O8S8^BrNjBx&+~arV=%%u& z`U9-)5q( zmYXgav2EH2m}Gv6pGtZpKXrs*wETFPo7AVzyc+Y)`rPPSh*e6qdj_^}U)Hr%fa9t# zG=zULpK_h8ci&%L+n|)=@nrl+dsviE=MF_?L;FGzU$7TTd>sCny&o7&9Gf_wJ6AF#VtqY09zy=A?Z^Mbj7X$zfIC z)!I|+%Q&=T5af;TkvYGa4;s=b0HL6H7YXp-(t+mWfw7{g;tT4GL}~vmVbcYp2Zt&{ zJ3Sn1$ZUy$4ei6h#Ad|q+rQuZMUn55$8Vd?VHM0cGj+-oS3pm}iqS@sa2fky*DsrY z*q}6F;i5PYX%o^gvd^Hwt9{k>FTwYBxTOfq0vasx0Qr)^=sr2=41M ze|`XhWl^)@lVQbs`OY<&yr)U7h`Pw6z`tVd+PYP%;kue~L+Nf-^XKrZK^b$(2RA87 zgPJP%@Rd2uqMhT)l>lB|^*A_jfVP0x*x0p)eg~?BCOn|NH7Ud1dAdVdKO=H|yZiUx z)-7LS-8ui(tpMbOi5+cm%v@sc-VzgIILC-shQ>209zWLUuuQAp%%H0G`yRvL>Uh>DJ!-plr=kepmGB6i79B}QYZ_Aw1+k!hl>87eJ|sC(!fV5=)_owUck zXSstUYP4$?%3XT`_oE(aAw(Mg8wBjvm7U`N^+o{d(*lX1g1ekSzBZ0JqFIm6Z#%6f zc15<)I7Z}QOB+w{oO#a?$r zpGKAcp#`X0*OlT&0GU-obNnLZd;k~}{UpU1$0I$Z(EAw~!{~RGe*SD=rejJ&R7I&a zgC9#_AcIxGRGBb6b4cSTpY@!IxMkQzRGDarf}@knUZ#u8C@h?G`dGDd@#&*S!)6})gJsi`cDAKj@BwT8nO5;z5y%g_ zNc2X|wQK8@TPUqx03^0Sux!x5Aqz6l9OMimb>ek zvhjEz?9$Z?rBVzhPG~Gm$c`StHM6)bba)sC>~Z|TgI;HivW*U>rfx&>WJ3n7HgEY>tN3ch?%6F_@?m25Ij5`vLx#lgp3t-9 zp-${!Y@9Y&FL%lYJbuFgMod-q(lO>CdP^$b>ga>q-j5kcIrr`fR!}Z^3j`c_?J(SM ztWzb24O2@x=W#ra(b;l~Fo2!|2Yv{>T5M3(Gy1v*qlVIdzjC?u2q@#bw{Nw%vlqYI zf0H2aYPZB?Fi04-b}-%s>A@!4-DBz3<%3;f4*IK8Y8+Y5i))^?t!&AA)*WnKi)=y|_IW!D*HPBFBjf*V?%eSLR``FMo0O7LNVQty zNlE;)yxfvUxbowXPe zh81F6vj7x|J9NmJv?W7M4Ugp3*|l2u84y&*aTG<^>&GKhIO%oTuyHP2`Z^F+n3Aw< zRTC&EKoP{L@RnRd8*VF5L1Er_N(hUwqmMbF*yoifI9^5ke>gK`?i`$JY_ z*GlW=2rx#KKzKmA!h(X-l2y?`&uP#sd$+#L_nw2`D=^;|lHs(8XN4X^n#<7)p$n4|2MfSF~c{5k-?i7Poms)0|m^=pi7bG`W zM&m!YX6Y7sh8f|_Z1#_yaUdmS92n*6cOIhgk+q0=)p15sx1| zx)<5kzAGP3-_BY@i^26|tn>VG6iaV^BcqoEt;8^J?@89geG_icFM9wtI-Bg~^p2pi z7q7IlNyiUQ_dGl7uA1x?x?w|CmTw74>k~(;=}=A|J8{^dC1PGlm*mN>-M!n{E$l-g zcSU^OJWrsg%M^$OuYaKE-T>WPCISaeU@2ag9EzuC6Hyk>jyZ9T@@P45X2Qjb+my5X z7|R9BPJ?dC^OxFmqL2lvfDn^WMr@{rD4_(wioDORdB?%9O@TM&sfkGF< zOmPmFN7DEG`DXy-9KB`G+e6vgu^ywX0r5?(O#9$I7cGRZ$T~pfsr!VEM%>uwQQmBc z`EVjKhyGp*p~k$AU&OEkH-DZxbgQuNicr60nf0WHgV3Xrh|7MnqpN19+L__nb_~Q3qvAQ8)M!9cJttZDYXb6}s3hl>m4x*HT zS8GNk4A=cLfr3vKIUiB{Cri-(&Y~m}JIjeZ&x%ru>X+?(GHauU*~Tp5N^7Y~uBt z9Bm?jOCZ(mr~5|>Qjdz?-r0Gag$2MMjJHuOsS62laX~|Vd|lLk;J^c{)YTz$U77Aw z>~#acJv;#RTWV^WYla4)Zm}Wd@gR!miO45t7eNdU$aU6?Q{~brH@n+E5{|FkYN<8h<1ckr^uYX4TT1PlIpYKA8fBwFU8m z%3kV^+hB+ukR^2k1hdi6xt5!&*SvZ2I>XrfGE*b1x}g}~zGetp)b?yy$}>k9DMPD?8vqR84J{ATZ(>S|rZ zJl5&r%JiyzIyyj2UD=5Fy2Y?7Gp4~qwsz>vBp4$!MQPWMrc6DtHYB02xFj~EY|{1E zxBHgi(E`FgQFX7dFjD5Yn9i$Q;oc!ec6|NqO$0-HlhsnqRD!oI6`F2j)o@c_0KTe$+{`Xq1+^;7` zPd*ai=;VMZy_$$HwIdudn`!4!H96er;dv^T0gtgf}4bD53%G`vspa2OI6qLKC`C8<&IP)SZ~|2piK2g7`Pj4yotyS);C3rUeN z22a&QRqCy zILcmwtm=`BSrAnh+k%XUXIb1TDw4r2fS&Vft38wqkT8|K+}S6VYs3F1Q&qCEvljN- z43S!4*wBQyes%cFU=7>o&gKsKkZSCW98t$plH_2Y52y7Aa<@4uD2u&qo z@N7rY?O9VI9scT2s&;F6`^vvQ9!-B=I`zr2d>=cf>sI=aGl^$YI<2Tb!rVM)hyBvW z9w{lR27M|IuWWvHr-R$3IYcbT+y%Ey?-JSX>wLP{e8+k`!vUioo+vCX9${_0qF=(M zhYv~ZSAurV{bgk=YiH+Jo0@M{D^{dCe!M!g3vX6~suwYfZ=^RfRnt(WoLl0(;bVL$ z^EQcu-o({Q+7x9+i@E8yOT1p?<&`nl9n#xY!ubiLl__mnbDZ9cm_<5pcLx3Wzx>$c zr_dNlWlO9U2F3YdV>JjAYhMaP&_xGcox}ktlfpvk(M8Hf(nsF_%|t z4hlG#y+hpzd{q3}@0>LXFfvsNKWsCJV6KnYn^F5!|M_{i7}Rf6 z;i!_^4rd+RZp`C_Dmj<@(`i(a+DJx>Pz_mC$L78ll$_%@t<1jH;?AA-j8BX`aA4G{Wn3IB z4@7uS>vapjW-N3rq24WU*iWTOG*TBCXo^IGVG6V06+cnev$<>Us=1TT8hF5FV+1N8 z0!L(&1#^!l*EUJ58DjMNym{f!1Z8v|;?zA?@yZZ{mjjUge(9k%_iqaohsp8nr3RfB z-!(EBvGk+?hlx+ohJeWZ!j%^;U)%u|wymyNP`J~C`pJVO8Skg&%c>w7Fup6-*7xL}n+aMxPEsa*u#FE~8Rli&%!%Mxn{bMEkP-PP!~XjKrLcXnkTL02>SoagIiLc2&N z#T=qKtq;!P=EF5fzPyaGIJel*CU!tc5p|Y|k@sk78k7Z-c0S;NO>lOW1m%Q|XC zlpCk|?Jalv{I==+DAOUk?Df5izn+XgByZdiu6{46^;Dz|jl%?IC!WaM>_%D*x3zV& zkM({3{UB?u|MF9GP~Q)Jjye1IWt{YwmuDU;nL2%kor42&5h-1i%@EC;la{{D6v=zO zSL;c`195{Hu)G_R)GhOWzl(^<9X(au0GCzYe5`D24stK^P?$7Qpt&|e_m}ewpT2rE zPH~4t&D_3U*d;kZk%_+`G-EFT(Oa=4%pXuBl3}1l3#G@PK~rAE!0bo1)Y8%~FeEJF z#8av?N}}6PI3Db7sifZFnW{$-mR9TNpK81u&~vJjQv^lje2r&}8;O|GyE_#MFMk{F z2j{m_7^J_yIoSI`b7|Cg_v%O-#eV4R^&uf$6y4SvJg7&sE`F?}_c`}%{ZN8JrA_2r zsN53$+`r_c4|%)CPo2_JQh&c8u`>Hu}_GM<3G<)=3CB#yP59;`9pFZJ-UD8Wt1~6fG z{+yF<->smlk0>kg>+B~TB}+(~f{V;F89jQPgv#hk&ZEJ` z8e^~y`k`TTH@c7`WlIyh%d6j*w*MmhDy~=x)|upZnN_&;z8?=mk+XDf^7F9lxN6#peF+;YsAypMEjp z$%5K=5Al1P)6-cbK}K+hb%y1deeCcLc9A%VFpzkZt$6_|s)7}{EPK|<3gTk%)O5$m zC!5dp-@&Z5VVv*X!FJ)e_CI>$npW9usr{+72dx$_PMtr)6`cVp*Z!78;V?YJ za2|k@he_Wr-3s~>V`qnTka4$wZ$$i2Hvoj(9n5Q6RMdg;{I<6T6^5yB>qoGu{x)-_ zxl}BGS*A6ojWuH|!X2ieEsF!egA#lC;>8#qkl545MP9BAh4vCmBL{youl|h~vz=(w z{Vz`~v$0CFw<9NB5_(Si2cuQ{_I}Tg<@*PmZ&#T@{ob>8?~O1IqnV)!n;;?R09|Ez zCMQmJyZ>&tn-dk&v88WZL>ojMEf^tUHDQ7Ou4&WGO9-3}Lsxd7Bo>g6zCv2 z67b!!XQY*67q0CkG?A4xPovaSKg7{RkiXW zdk}TrM!}g;*_=kBLq8_7;pmA)NcLi@!L0`t-6I(3s|KJhQ@uNx`YIgYW0^~dR#}FD z0m3iK-B#0>8hc@mKzi) z>j=?*Q}7Br53`825Vb6)eLvD!RvtTe(})DT#~@_?vIuL?E(y9Wcs;>Y;Pv7}AxcA( z#GY*qzL}e@W7?S!CI6N8;NTPMXK)4AO8-3f<%+ShXUEUieod9gViF=7Rv3Yq@9Q(q zc_txZgg;y0a%k_~VGZ*oR5abM6>QEwWDkv6ppYGijNGo0?I@p4J z?ac?=kg2W6N|`X(!&Jr)Pv}9H#|GS-X-8StuQx`!5Cqt9!20zGMx2*?VU*+=VZp** z=L>4i`7F~uK0Um9_sr;rw|4pLVJE~XP{x~?>Gu2|W{_zVK@{qg!QGP&PHJAbBEUb% zrE|;;Xsb&U01HSAEW;I`C^+LX*!aPu>XN=jGGrzRbBK$y4xe4+M~a8!XfP=T$))h9 z+yU&-iUM0YW8PJwWa+r()pUQH?2j=_Vr+zBC3T|e0i3pq0*Wq^STJq3#2&*}$<&&v zi!9x>&jWm-;DtqY1c(AjHymD?;+1h-!F(AcilV><%XQ(g8Z>xt7S{!!d>J*0-?Pbc zSM)dC*)<|3sLLedTK>N5!BW;Sf_R}4q1Ujhg^Muig&Thpt9R z5|!NjqC`~Izd_*?eDvw;hv%T>%hJa|tP(|aw0}DE>8(Aa*Q&<}7AEsR+P7~n1TBw; zd6EZ{O9#A0>{iR;A5R!_gH-F-M-p;hJZj;8C@EdO49G|u5_LcS0TWpO=j~ju2mZ&`x|l4zobcd9W>69ld%T`u)wo2hr4YWgH0q zo0360BBvnJ&L?mND4;}ZLxGwH+Ib(9KG3YC_wR|IS7XSCq{zNi zFE$^Ax)-?upU>}t{+^K$pyJuQapMh!Kc<>bw?pkfWi3=3;3sVU>MGgb#&&PfiF-m9 zka-lbfKcdwsKH$uB^^yYb?VLJ=^JV)tLBUwF+!ClR?ny^I~+8GkvNT(E}DESH8n_H zp^O`U{pGlVWSdKfpwT>NHGV*$W$)mypM4tzB=SI8e^Y#~Iq$*NWPBD^;Q0g$F&oTj z#;jR^Jlq6hLwIf;$L3Wwe>%oZ=n)jT^dCk&=wedS0fpDTza89)vQXQC5JCkoi8Evb zSB>HzJ3HI6IpUPERg(ro+y=+QxP;$(GO0&Ps4adNy2=(Y(NR$-r>4E4g53Y%JYl>I zuL1+2{YVO9`8(Zd$d9ov{WAGgkAzM;OwXkq6qO03qiCR1}Qvl0#;ZvEjaVoPackdhS?7o!PDz2a!0V|!xtJYq`b z{L}=nyfa;f+e|AzdLa=-nSr{drl?$F$w!k8&DR)KHSpI3^7VF`+l5qLVZ# zD#u0&C6&q{r=H>XgATL#ZwrL2(4Cv+;6~*1(ReiBEd4U+p0+9uC z&BXB`I5y*7gx_N?mK+tk?`@f-DN+5x`X&&1xuD^o`I%$y&5n25F~7f?rdN(*L*I-! zPvt7<0@6RkMq&Uo-*L_-xj!SlrL*rjXI{8~Qc*HxAuCy4i4xNyJr!!y>S&*2gv$KA<% z`5+CbZ;)b@CE>#eg6DowqrAUZ!Ibbs?TT#A8bfTQTraBr>klcNdqYX1N1uLX?oy|n zjY$P6%TxKn2FzC`1X$ApBQx&XFDW^B-u(G7sres=W>SP6&(65rm044mZdYgm+{S4XZu%`VtqJ^i4o zP+cVjZGP(hYR+)QVWtfnVoZLe+!BW~sYbypJG<ott_5Vz)sFMF+o!Z zL~$vjm53WkFsYrcuC4*3*53~dE9eynGoxV7Bb2q=P5XOKmOF@F7+@y1GlJT>@{KDA z@wFjBm~CrkCo6TASL-Q=Iy2$`?uBTMj5u$(Z-Ch2_SsC~6Vi|aM~)=CPe@GMmX$Rb z^U^*fkz#fO(UqKPC%iGF4U?P#(td(P1kvYWUfz_pT=S8$a8eWZ_{%Tfiu8}#jFVp) z`noh(9j9=#ygnOl8SWpZanC2#e!rcgqt>zO@#oQJkoKHMnW?ZO zr?tEtz7pwi`S%MhY%Yb(DD1#dji4PCGw$JIGU2A15QtFkK}LUq`(8tMUpr>j&}o$f||; z0)JItyQ0{o(IJXUum(_I-Y^$bFe}ASPQe$05A^Hx{zn5;L5qe>c(2RwKLzSjQFjwo zgqCX^*qIoVy2w)zN+@u&|D*z0Ta$)8rW`vcwq;DAsGf`-xAm`USe!Nw*20>vWwdcw; z`oD*qu+Wbw%!KpryvxeVi_g2<(9^rGlnCEKsPF`A0b5fZyo1Fox^i8286l??I06^I zUQj^=&jETrMO=mVDjB}D8*qO?xAbi$yH`hOIh?+-$KJz-(owV-0cn`^t$;RPNWd*3 zic~1Ogy8|qZaET6p@{~?jjKB^dHTIy$Ylww{iCa4(K6&}NB{JPg7&flPj=YD!g&7@ zt2%RwzCinEzBA%P5!8m)vBK5juQ>r@&^sJB&=(xT0E#^whRw~2_9^at z`w0|PJk?@~P+V6ESKd0WZUM(mv=wkW5DZ9Om@i`f3rY0vB(%cc*ps`@q zlNO0-m;!5TT;qF?7$sC5LJnVqaq3&+->3IEz_Yi#fP{9pil2GcNfm;pmJgaDep$^=1*$T`IYcbg)fBYA%n}n z%4(6J+PX%QCEex50E-AL(4W*U`?r`izhpdyKRd4$*Ru@=7{qV)#7!{BzU(K#G*f z9QHz2L*rMNH3|dqs-i;S7fAzK9L*#RbZ5SG7L981KE_Axf&96IpjhAzB+&|W?i}_q z@XUp)vtWZC95Gi|%nQ;n=N9=ASand`{c)gM9|{>|1ZWQ8`o$?CTX3br#w>{OjjR}hM>Tj7$K zz+>&Ym7R}a^CAH|bF!SKZ+aKk5?8w8wl+3~Mn=H?afk|R5Jzyff~ACwzlHT@IL=y_ zK$g*T;`A+@+0mq+p+ToH1_@F>sh5I+m@_YkHG)Kkn&$Y-z-aJSa|kbjRYsdxvAB)a zXgJ)2`&CtUiTMI{7EKMw6@x^>K(d^!TSZ&b+ueOjRZN`!zR=J(UPc)M0X2i$O^%T5 zHxG;jOXEAZTD|1l)&&>zf)#_XL_?zr3SXC@IgXY_M*k+x#PcP6D<0hAFPliCCr{2= z6n~@Ues9=f9G`BA(eMBv3+g(l*I%KC6qJ)l?O7ru4r4SJ=P2smgB&=6rw7=+Y~--~ zN~s~2jv)qH2L$B4`Se99Z1LX(!kaKu>bh0%Nl#Tb26k?gIhnN@hTrAB68&PO+n!nIlTqh|Wij}cC#M-DrOW>7J#BXPJVv#GnodkCSL|N#bg4WG8GU}PHZ@Zj8QUxT^DOJMqLV2vXYx}Xq`M! z=1(-@JFr+KdFUeXX+k_KXIB)EEBZoQtCbEzN}Y|Q+u6Qk_U!J`5_&Ee;=B4@Bpxq; z9(Dw?d@${F8ql~&$VeoC|CO>~;_Mtfe)alC7VMMBM|ZudD&61t>{(Ofj~`B~UKJBN z{cV|gYRUTb3m3_AQcHK9{ZMgjhm!Pg(_0H#VTKxE`s*5;>bMh3Z|dKlsBgNTmH#L7 zU=XuV(OeQ!h4_CttSmX@0xptY4kby1sTHXmXPk702kh1y`Km{oSQD@a?3c$4$6TK0 zV>pztU<2r)PlD^U1CNVIgF=owrJX&ycJD@)hNA?VHPKOJRbJK&O(w?1lLnqmNHA^4 zd6l)K_W&*Hyl7u5fe8uW8L4LVZTS~(UKf$v3)?S|yn}||f8dNVRGBSkpX23{r!dfW z3ZY)4-<9Y7vBt#-wlo+dD`uT#1``f>cymCL5b$AEWK20N3sl)on>Q9SmMKR#^6yWa~BmvL5 zyCWC_lIzss8vzMYflcoREje#4zuD7dZ=ZpEY zY*+8L567j}c+HR&6Ic);bOK?;BA5uhhZN>Z^Zp3F5F@?QQc_}J&D5BCCE98pOVy0H zVYYbj3FH_S99QvBGVe0iVCTdV$h?*WKxk{LnBQ~(cTL5;MelC?(0J#6$t`d=^FMP7 zrr?F4R2q}v@yPF~oQwfQs~Fumxi)@!Rb(xd>G`ZIEOaMUVbdISW82G8hbz4Gh8C3A z-aKm!4bjES$<0+1^Q-&2tXN@fWVF+BK_?Lx#(Im{qxbPy2+|b7#YY#pa8AWQ&`6RQ2!SRIU@0IsA-4 zR+r+dcdouvt3FsV^IXo=BIDv+B`<=4OI?~Coh}}f@iMclcX(asNUfhnswxdS7O8h- zTv;gu(U5^k1qXUH>~cxz7Vxn5;>ebwna4Jai!W-dJ@f8Dynci6iMKz**KQaY$}Ppj z$6phJplVZEvmsr1$rC)KDG`{@BI4C`)v+C2VJUNp)2`JZu3Z;A9{n1) z$$4mB>x~=Rj*&cT@8iwZeDYUMaYCg$V8tU6>!1qp@^NzFSgtB>28yNLJcpLrDMh`6@knhrtTnvIDo&?0+#Hy-=(_8+WUzP1~0jPL4uzmL?6MvcUBM!|G$H=rTpD&Va>cV%XgqV)CHN2E$gcFxuz75g1+)k7VbMynI=hg{9>y@}6nfU~Io~@>5&ebMI+B@aU3qavX4*u%;n;Z-Ph9 z=idx%3oI-wps>VXeQ^@Kj{RUIhsiT$oc;UnwE$y@2q{ik zn-Ty}H>7fOoB@Ye^Vh`*n}d$VZEfy)K&i=3@OT$&*pST-g|WTxfkD|45cWw@Dh+7zc)(w{swc;-(fks~JAjM3e-)Ya3|n!SNTLq1h8Mj^4p z7eLKKnm1c6FXoq+lfWVJ%r`G}U%u;TE9o#@A>5Da=G}BW1%vvJwb{uK31O8brKQ<9 zIUOgFc=q;VuMR(l8Nt_{s;a2mVN#E{Z2j&bTdeie7U-#2S{c*0c6+ey+F-ZUtIZoH zy!p7PynD6^%bZ5jKB%z9fAKSJE7-{JAU@$qB)~t~+1;ppd%gCp=-&w1!zJf5j;q$9 z#y~`#tGvB!(Qk0cEN^X7$-!~QsL9t`B9cWc-I`0b3yu1lg~LxsE%_lrq7hG-I^dFMJBL7kZ%zeXpsRh< zo*67P*XSu3sGriuQ#rV~tMuFOKa?aXoE4MIPyZkJ!&r^N@qLnKNe06h@DL9lJu1Yd zu&_`Qh=p9P?1q~BZjqz=>qi&u`&lbv(O^mH^}uPvgE;g&o)4Q{btBhZPEQYe;;2!% zrnfZ3m6j2^_)~RlWrJ@wnQg{%b~-N49w)63zEM6eZVrsINu0U)(#4DRe8@scjG~wcZCKWdGu)5aQK<#@Y?@4;?i?2 zyGe5RlZ(ffjWLUij;2cyRTO-n_b;QQ6=8)nrYEwVc;#SVH(;;QFlBL1=W|M5Q2wsS zo*2|>W8^Pma2S%MgZh3G+{YG}{9~WB*gI?-C}qc>rKl&z3^ft|PLL)o3C97iPT1S_vu f return np.sqrt(np.sum((y - tissue * confocal_model(x, z)) ** 2.0) / float(y.size)) p0 = [z[-1] * 0.5, zr_0, 1.0] # zf, zr, a - param_bounds = [[z[0], z[-1]], [zr_0, zr_0], [0.0, None]] + param_bounds: list[tuple[float | None, float | None]] = [(z[0], z[-1]), (zr_0, zr_0), (0.0, None)] popt_firstpsf = minimize(fo_PSF, x0=p0, args=(this_profile, z, syn_tissue), bounds=param_bounds) zf, zr = popt_firstpsf.x[0:2] psf1 = confocal_model([zf, zr, 1.0], z) @@ -648,7 +648,7 @@ def fo_btm(x: np.ndarray, y: np.ndarray, z: np.ndarray, psf: np.ndarray) -> floa return np.sqrt(np.sum((y - psf * bump_tissue_model(x, z)) ** 2.0) / float(y.size)) p0 = [new_z0, 60, 5, 1.0, 0.5] - param_bounds = [[z[0], z[-1]], [0, 100], [1.0, 10], [0, None], [0, None]] + param_bounds = [(z[0], z[-1]), (0, 100), (1.0, 10), (0, None), (0, None)] popt_btm = minimize(fo_btm, x0=p0, args=(this_profile, z, psf1), bounds=param_bounds) z0, c, w, a, b = popt_btm.x[:] bump_tissue = bump_tissue_model(popt_btm.x, z) @@ -671,9 +671,9 @@ def fo_psf_normalized(x: np.ndarray, y: np.ndarray, z: np.ndarray, tissue: np.nd p0 = popt_firstpsf.x # zf, zr, a if fix_zr: zr_0 = p0[1] - param_bounds = [[z[0], z[-1]], [zr_0, zr_0], [0.0, None]] + param_bounds = [(z[0], z[-1]), (zr_0, zr_0), (0.0, None)] else: - param_bounds = [[z[0], z[-1]], [0, None], [0.0, None]] + param_bounds = [(z[0], z[-1]), (0, None), (0.0, None)] popt_psf = minimize( fo_psf_normalized, x0=p0, diff --git a/linumpy/mosaic/grid.py b/linumpy/mosaic/grid.py index 23337ec7..b89323af 100644 --- a/linumpy/mosaic/grid.py +++ b/linumpy/mosaic/grid.py @@ -5,9 +5,8 @@ from typing import Any import numpy as np -import scipy.ndimage.morphology as morpho import scipy.optimize -from scipy.ndimage import gaussian_filter +from scipy.ndimage import binary_dilation, binary_erosion, gaussian_filter from skimage.morphology import ball, disk from tqdm import tqdm @@ -753,14 +752,14 @@ def laplace_solver_step(I: np.ndarray, mask: np.ndarray) -> np.ndarray | None: strel = ball(k) small_mask = np.logical_and(small_fixed_mask, small_moving_mask) - eroded_mask = morpho.binary_erosion(small_mask, structure=strel) - boundary = np.logical_xor(small_mask, morpho.binary_erosion(small_mask, structure=strel)) + eroded_mask = binary_erosion(small_mask, structure=strel) + boundary = np.logical_xor(small_mask, binary_erosion(small_mask, structure=strel)) # Getting the boundary conditions bc = boundary.copy() - bc = bc * morpho.binary_erosion(small_fixed_mask, strel) + bc = bc * binary_erosion(small_fixed_mask, strel) - dilated_mask = morpho.binary_dilation(~np.logical_or(small_fixed_mask, small_mask), structure=strel) + dilated_mask = binary_dilation(~np.logical_or(small_fixed_mask, small_mask), structure=strel) bc = np.zeros(new_shape) bc[boundary] = (~dilated_mask[boundary]) * 1.0 @@ -787,7 +786,7 @@ def laplace_solver_step(I: np.ndarray, mask: np.ndarray) -> np.ndarray | None: i_step += 1 # Resampling the blending weigths to the original resolution - alpha[~morpho.binary_dilation(small_mask, strel)] = 1 + alpha[~binary_dilation(small_mask, strel)] = 1 alpha[np.logical_xor(small_moving_mask, small_mask)] = 0.0 alpha[np.logical_xor(small_fixed_mask, small_mask)] = 1.0 alpha = 1.0 - alpha diff --git a/linumpy/tests/test_mosaic_motor.py b/linumpy/tests/test_mosaic_motor.py new file mode 100644 index 00000000..81e80c1f --- /dev/null +++ b/linumpy/tests/test_mosaic_motor.py @@ -0,0 +1,133 @@ +"""Tests for linumpy/mosaic/motor.py""" + +import json +from pathlib import Path + +import numpy as np + +from linumpy.mosaic.motor import ( + apply_blend_shift_refinement, + compare_motor_vs_registration, + compute_motor_positions, +) + +# --------------------------------------------------------------------------- +# compute_motor_positions +# --------------------------------------------------------------------------- + + +def test_compute_motor_positions_count(): + positions, _step_y, _step_x = compute_motor_positions(nx=3, ny=4, tile_shape=(10, 64, 64), overlap_fraction=0.1) + assert len(positions) == 12 # 3 × 4 + + +def test_compute_motor_positions_step_sizes(): + tile_shape = (10, 100, 80) + overlap = 0.2 + _positions, step_y, step_x = compute_motor_positions(nx=2, ny=2, tile_shape=tile_shape, overlap_fraction=overlap) + expected_step_y = int(100 * (1 - overlap)) # 80 + expected_step_x = int(80 * (1 - overlap)) # 64 + assert step_y == expected_step_y + assert step_x == expected_step_x + + +def test_compute_motor_positions_first_is_origin(): + positions, _, _ = compute_motor_positions(nx=2, ny=3, tile_shape=(5, 50, 50), overlap_fraction=0.1) + first = positions[0] + assert first[0] == 0 + assert first[1] == 0 + + +def test_compute_motor_positions_scale_factor(): + tile_shape = (10, 100, 100) + _positions_1x, step_y_1x, _ = compute_motor_positions( + nx=2, ny=1, tile_shape=tile_shape, overlap_fraction=0.0, scale_factor=1.0 + ) + _positions_2x, step_y_2x, _ = compute_motor_positions( + nx=2, ny=1, tile_shape=tile_shape, overlap_fraction=0.0, scale_factor=2.0 + ) + assert step_y_2x == 2 * step_y_1x + + +# --------------------------------------------------------------------------- +# apply_blend_shift_refinement +# --------------------------------------------------------------------------- + + +def test_apply_blend_shift_refinement_empty_refinements(): + """No refinements → tile returned unchanged.""" + tile = np.ones((5, 16, 16), dtype=np.float32) + result = apply_blend_shift_refinement(tile, []) + np.testing.assert_array_equal(result, tile) + + +def test_apply_blend_shift_refinement_negligible_shift(): + """Sub-threshold shifts (< 0.1 px) → tile returned unchanged.""" + tile = np.ones((5, 16, 16), dtype=np.float32) + refinements = [{"dx": 0.05, "dy": 0.05}] + result = apply_blend_shift_refinement(tile, refinements) + np.testing.assert_array_equal(result, tile) + + +def test_apply_blend_shift_refinement_applies_shift(): + """Large shift is applied, changing the tile data.""" + rng = np.random.default_rng(7) + tile = (rng.random((5, 32, 32)) * 100.0).astype(np.float32) + refinements = [{"dx": 3.0, "dy": 3.0}] + result = apply_blend_shift_refinement(tile, refinements) + # Shape must be preserved + assert result.shape == tile.shape + # Content must have changed + assert not np.array_equal(result, tile) + + +def test_apply_blend_shift_refinement_averages_multiple(): + """Multiple refinements are averaged before application.""" + tile = (np.ones((5, 32, 32)) * 50.0).astype(np.float32) + # Two opposite shifts → average ≈ 0 → no change (may not be exact due to shift) + refinements = [{"dx": 0.0, "dy": 4.0}, {"dx": 0.0, "dy": -4.0}] + result = apply_blend_shift_refinement(tile, refinements) + # Average dy = 0 / 2 / 2 = 0 → negligible → should be unchanged + np.testing.assert_array_equal(result, tile) + + +# --------------------------------------------------------------------------- +# compare_motor_vs_registration +# --------------------------------------------------------------------------- + + +def test_compare_motor_vs_registration_basic(): + motor = [(0, 0), (10, 0), (0, 10), (10, 10)] + reg = [(1, 1), (11, 1), (1, 11), (11, 11)] + result = compare_motor_vs_registration(motor, reg) + assert result["n_tiles"] == 4 + assert abs(result["mean_diff_y"] - 1.0) < 1e-9 + assert abs(result["mean_diff_x"] - 1.0) < 1e-9 + assert result["systematic_offset"] is False # only 1 px offset, threshold is 5 + + +def test_compare_motor_vs_registration_systematic_offset(): + motor = [(0, 0)] * 5 + reg = [(10, 10)] * 5 # 10 px systematic offset + result = compare_motor_vs_registration(motor, reg) + assert result["systematic_offset"] is True + assert "offset_warning" in result + + +def test_compare_motor_vs_registration_writes_json(tmp_path): + motor = [(0, 0), (10, 0)] + reg = [(1, 0), (11, 0)] + out_path = str(tmp_path / "comparison.json") + compare_motor_vs_registration(motor, reg, output_path=out_path) + with Path(out_path).open() as f: + loaded = json.load(f) + assert loaded["n_tiles"] == 2 + assert abs(loaded["mean_diff_y"] - 1.0) < 1e-9 + + +def test_compare_motor_vs_registration_no_dilation_flag(): + """Fewer than 10 tiles: no dilation_indicator key.""" + motor = [(0, 0), (10, 0)] + reg = [(0, 0), (10, 0)] + result = compare_motor_vs_registration(motor, reg) + assert "dilation_indicator" not in result diff --git a/linumpy/tests/test_mosaic_stacking.py b/linumpy/tests/test_mosaic_stacking.py new file mode 100644 index 00000000..b6e5cca3 --- /dev/null +++ b/linumpy/tests/test_mosaic_stacking.py @@ -0,0 +1,163 @@ +"""Tests for linumpy/mosaic/stacking.py""" + +import numpy as np + +from linumpy.mosaic.stacking import ( + apply_xy_shift, + blend_overlap_xy, + blend_overlap_z, + find_z_overlap, +) + + +def _make_vol(shape=(10, 32, 32), fill=1.0): + return (np.ones(shape) * fill).astype(np.float32) + + +# --------------------------------------------------------------------------- +# find_z_overlap +# --------------------------------------------------------------------------- + + +def test_find_z_overlap_returns_tuple(): + fixed = _make_vol((20, 16, 16)) + moving = _make_vol((20, 16, 16)) + overlap, corr = find_z_overlap(fixed, moving, slicing_interval_mm=0.1, search_range_mm=0.05, resolution_um=5.0) + assert isinstance(overlap, int) + assert isinstance(corr, (float, np.floating)) + + +def test_find_z_overlap_identical_volumes(): + """Identical volumes have perfect correlation at some overlap.""" + rng = np.random.default_rng(0) + vol = rng.random((20, 16, 16)).astype(np.float32) + overlap, corr = find_z_overlap(vol, vol, slicing_interval_mm=0.05, search_range_mm=0.1, resolution_um=5.0) + # Correlation should be high (>= 0.0 at minimum) + assert corr >= 0.0 + assert 1 <= overlap <= 20 + + +def test_find_z_overlap_min_max_degenerate(): + """When search range collapses, falls back to expected overlap.""" + fixed = _make_vol((10, 8, 8)) + moving = _make_vol((10, 8, 8)) + # Very large interval → expected overlap < 0 → min >= max edge case + overlap, _corr = find_z_overlap(fixed, moving, slicing_interval_mm=10.0, search_range_mm=0.0, resolution_um=5.0) + assert isinstance(overlap, int) + + +# --------------------------------------------------------------------------- +# apply_xy_shift +# --------------------------------------------------------------------------- + + +def test_apply_xy_shift_zero_shift(): + vol = _make_vol((4, 10, 10)) + cropped, dst = apply_xy_shift(vol, 0.0, 0.0, output_shape=(10, 10)) + assert cropped is not None + assert dst == (0, 10, 0, 10) + + +def test_apply_xy_shift_positive(): + vol = _make_vol((4, 8, 8)) + cropped, dst = apply_xy_shift(vol, 2.0, 3.0, output_shape=(12, 12)) + # dest starts at (dy=3, dx=2) in (y_start, y_end, x_start, x_end) + assert dst[0] == 3 # y_start + assert dst[2] == 2 # x_start + assert cropped.shape[1] == 8 + assert cropped.shape[2] == 8 + + +def test_apply_xy_shift_negative_clips_src(): + vol = _make_vol((4, 10, 10)) + # Shift by -2 in both dims: source crops 2 from start + cropped, dst = apply_xy_shift(vol, -2.0, -2.0, output_shape=(10, 10)) + assert cropped is not None + assert dst[0] == 0 # clamped to canvas start + assert cropped.shape[1] == 8 # 2 rows clipped + + +def test_apply_xy_shift_fully_outside_canvas(): + vol = _make_vol((4, 8, 8)) + cropped, dst = apply_xy_shift(vol, 100.0, 100.0, output_shape=(10, 10)) + assert cropped is None + assert dst is None + + +# --------------------------------------------------------------------------- +# blend_overlap_z +# --------------------------------------------------------------------------- + + +def test_blend_overlap_z_output_shape(): + fixed = _make_vol((5, 8, 8), fill=1.0) + moving = _make_vol((5, 8, 8), fill=2.0) + result = blend_overlap_z(fixed, moving) + assert result.shape == fixed.shape + + +def test_blend_overlap_z_both_valid(): + """With both regions non-zero, result is between fixed and moving.""" + fixed = np.ones((6, 8, 8), dtype=np.float32) + moving = np.full((6, 8, 8), 3.0, dtype=np.float32) + result = blend_overlap_z(fixed, moving) + assert float(result.min()) >= 1.0 + assert float(result.max()) <= 3.0 + + +def test_blend_overlap_z_one_sided_fixed_only(): + """When moving is zero, fixed values are preserved.""" + fixed = np.ones((6, 8, 8), dtype=np.float32) + moving = np.zeros((6, 8, 8), dtype=np.float32) + result = blend_overlap_z(fixed, moving) + np.testing.assert_allclose(result[fixed > 0], 1.0) + + +def test_blend_overlap_z_one_sided_moving_only(): + """When fixed is zero, moving values are preserved.""" + fixed = np.zeros((6, 8, 8), dtype=np.float32) + moving = np.ones((6, 8, 8), dtype=np.float32) + result = blend_overlap_z(fixed, moving) + np.testing.assert_allclose(result[moving > 0], 1.0) + + +def test_blend_overlap_z_single_slice(): + """Single z-slice edge case: picks the region with more non-zero voxels.""" + fixed = np.ones((1, 8, 8), dtype=np.float32) + moving = np.zeros((1, 8, 8), dtype=np.float32) + result = blend_overlap_z(fixed, moving) + assert result.shape == (1, 8, 8) + + +# --------------------------------------------------------------------------- +# blend_overlap_xy +# --------------------------------------------------------------------------- + + +def test_blend_overlap_xy_none_overwrites(): + existing = np.ones((4, 8, 8), dtype=np.float32) + new_data = np.full((4, 8, 8), 5.0, dtype=np.float32) + result = blend_overlap_xy(existing.copy(), new_data, method="none") + np.testing.assert_allclose(result, 5.0) + + +def test_blend_overlap_xy_average(): + existing = np.ones((4, 8, 8), dtype=np.float32) + new_data = np.full((4, 8, 8), 3.0, dtype=np.float32) + result = blend_overlap_xy(existing.copy(), new_data, method="average") + np.testing.assert_allclose(result, 2.0) + + +def test_blend_overlap_xy_max(): + existing = np.ones((4, 8, 8), dtype=np.float32) + new_data = np.full((4, 8, 8), 3.0, dtype=np.float32) + result = blend_overlap_xy(existing.copy(), new_data, method="max") + np.testing.assert_allclose(result, 3.0) + + +def test_blend_overlap_xy_average_respects_zeros(): + """Pixels zero in existing (no data) should take new_data value.""" + existing = np.zeros((4, 8, 8), dtype=np.float32) + new_data = np.full((4, 8, 8), 2.0, dtype=np.float32) + result = blend_overlap_xy(existing.copy(), new_data, method="average") + np.testing.assert_allclose(result, 2.0) diff --git a/scripts/diagnostics/__init__.py b/scripts/diagnostics/__init__.py new file mode 100644 index 00000000..1cc5be35 --- /dev/null +++ b/scripts/diagnostics/__init__.py @@ -0,0 +1 @@ +"""Diagnostic scripts for linumpy pipeline inspection and benchmarking.""" diff --git a/scripts/diagnostics/linum_aggregate_dilation_analysis.py b/scripts/diagnostics/linum_aggregate_dilation_analysis.py new file mode 100644 index 00000000..7134eed9 --- /dev/null +++ b/scripts/diagnostics/linum_aggregate_dilation_analysis.py @@ -0,0 +1,437 @@ +#!/usr/bin/env python3 +""" +Aggregate dilation analysis results from multiple slices. + +This script reads dilation analysis JSON files from multiple slices and +computes summary statistics and recommended correction factors for the +3D reconstruction pipeline. + +Outputs: +- Summary statistics across all slices +- Recommended global scale correction factors +- Per-slice correction factors (for advanced use) +- Visualization of scale variation across slices +""" + +import linumpy.config.threads # noqa: F401 + +import argparse +import json +import logging +from pathlib import Path +from typing import Any + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +from linumpy.cli.args import add_overwrite_arg + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument( + "input_directory", + help="Directory containing per-slice dilation analysis results.\n" + "Expected structure: input_dir/{slice_id}/dilation_analysis/dilation_analysis.json", + ) + p.add_argument("output_directory", help="Output directory for aggregated results") + + p.add_argument( + "--pattern", + type=str, + default="*/dilation_analysis/dilation_analysis.json", + help="Glob pattern to find JSON files [%(default)s]", + ) + p.add_argument( + "--target_scale", type=float, default=1.0, help="Target scale factor (default 1.0 = motor positions are correct)" + ) + + add_overwrite_arg(p) + return p + + +def load_dilation_results(input_dir: Path, pattern: str) -> Any: + """Load all dilation analysis JSON files from directory.""" + input_path = Path(input_dir) + json_files = sorted(input_path.glob(pattern)) + + if not json_files: + raise FileNotFoundError(f"No JSON files found with pattern: {pattern}") + + results = [] + for json_file in json_files: + with Path(json_file).open() as f: + data = json.load(f) + # Extract slice ID from path if not in data + if data.get("slice_id") is None: + # Try to extract from path + parts = json_file.parts + for part in parts: + if part.isdigit(): + data["slice_id"] = part + break + results.append(data) + + logger.info("Loaded %s dilation analysis results", len(results)) + return results + + +def compute_aggregate_statistics(results: Any) -> dict: + """Compute aggregate statistics across all slices.""" + scale_y = [r["scale_factors"]["scale_y"] for r in results] + scale_x = [r["scale_factors"]["scale_x"] for r in results] + mean_scale = [r["scale_factors"]["mean_scale"] for r in results] + anisotropy = [r["scale_factors"]["anisotropy"] for r in results] + r_squared_y = [r["scale_factors"]["r_squared_y"] for r in results] + r_squared_x = [r["scale_factors"]["r_squared_x"] for r in results] + + # Residuals + mean_residual = [r["residuals"]["mean_residual"] for r in results] + max_residual = [r["residuals"]["max_residual"] for r in results] + + # Distortions + has_progressive = [r["distortions"]["has_progressive_error"] for r in results] + gradient_y = [r["distortions"]["gradient_y"] for r in results] + gradient_x = [r["distortions"]["gradient_x"] for r in results] + + stats = { + "n_slices": len(results), + "scale_y": { + "mean": float(np.mean(scale_y)), + "std": float(np.std(scale_y)), + "min": float(np.min(scale_y)), + "max": float(np.max(scale_y)), + "median": float(np.median(scale_y)), + }, + "scale_x": { + "mean": float(np.mean(scale_x)), + "std": float(np.std(scale_x)), + "min": float(np.min(scale_x)), + "max": float(np.max(scale_x)), + "median": float(np.median(scale_x)), + }, + "mean_scale": { + "mean": float(np.mean(mean_scale)), + "std": float(np.std(mean_scale)), + "min": float(np.min(mean_scale)), + "max": float(np.max(mean_scale)), + "median": float(np.median(mean_scale)), + }, + "anisotropy": { + "mean": float(np.mean(anisotropy)), + "std": float(np.std(anisotropy)), + "max": float(np.max(anisotropy)), + }, + "fit_quality": { + "mean_r2_y": float(np.mean(r_squared_y)), + "mean_r2_x": float(np.mean(r_squared_x)), + "min_r2": float(min(min(r_squared_y), min(r_squared_x))), + }, + "residuals": { + "mean": float(np.mean(mean_residual)), + "max": float(np.max(max_residual)), + }, + "progressive_error": { + "n_slices_affected": sum(has_progressive), + "mean_gradient_y": float(np.mean(gradient_y)), + "mean_gradient_x": float(np.mean(gradient_x)), + }, + } + + return stats + + +def compute_correction_factors(stats: Any, target_scale: float = 1.0) -> dict: + """Compute recommended correction factors.""" + # Use median for robustness against outliers + correction_y = target_scale / stats["scale_y"]["median"] + correction_x = target_scale / stats["scale_x"]["median"] + + # Alternative: use mean + correction_y_mean = target_scale / stats["scale_y"]["mean"] + correction_x_mean = target_scale / stats["scale_x"]["mean"] + + return { + "recommended": { + "scale_y": float(correction_y), + "scale_x": float(correction_x), + "description": "Based on median scale factors (robust to outliers)", + }, + "alternative_mean": { + "scale_y": float(correction_y_mean), + "scale_x": float(correction_x_mean), + "description": "Based on mean scale factors", + }, + "deviation_from_unity": { + "y_percent": float((1.0 - stats["scale_y"]["median"]) * 100), + "x_percent": float((1.0 - stats["scale_x"]["median"]) * 100), + "description": "How much smaller mosaics are vs expected (%)", + }, + } + + +def compute_per_slice_factors(results: Any, target_scale: float = 1.0) -> Any: + """Compute per-slice correction factors for advanced use.""" + per_slice = [] + for r in results: + slice_id = r.get("slice_id", "unknown") + scale_y = r["scale_factors"]["scale_y"] + scale_x = r["scale_factors"]["scale_x"] + + per_slice.append( + { + "slice_id": slice_id, + "measured_scale_y": float(scale_y), + "measured_scale_x": float(scale_x), + "correction_y": float(target_scale / scale_y), + "correction_x": float(target_scale / scale_x), + "r_squared_y": float(r["scale_factors"]["r_squared_y"]), + "r_squared_x": float(r["scale_factors"]["r_squared_x"]), + } + ) + + return per_slice + + +def generate_report(stats: Any, corrections: Any, _per_slice: Any, output_dir: Path) -> Path: + """Generate text report.""" + lines = [ + "=" * 70, + "AGGREGATED DILATION ANALYSIS REPORT", + "=" * 70, + "", + f"Total slices analyzed: {stats['n_slices']}", + "", + "SCALE FACTOR SUMMARY", + "-" * 50, + "", + "Y-direction (rows):", + f" Mean: {stats['scale_y']['mean']:.6f}", + f" Median: {stats['scale_y']['median']:.6f}", + f" Std: {stats['scale_y']['std']:.6f}", + f" Range: [{stats['scale_y']['min']:.6f}, {stats['scale_y']['max']:.6f}]", + "", + "X-direction (columns):", + f" Mean: {stats['scale_x']['mean']:.6f}", + f" Median: {stats['scale_x']['median']:.6f}", + f" Std: {stats['scale_x']['std']:.6f}", + f" Range: [{stats['scale_x']['min']:.6f}, {stats['scale_x']['max']:.6f}]", + "", + "Overall:", + f" Mean scale: {stats['mean_scale']['mean']:.6f}", + f" Mean anisotropy: {stats['anisotropy']['mean']:.6f}", + "", + "INTERPRETATION", + "-" * 50, + ] + + # Interpretation + dev_y = corrections["deviation_from_unity"]["y_percent"] + dev_x = corrections["deviation_from_unity"]["x_percent"] + + if abs(dev_y) > 1.0 or abs(dev_x) > 1.0: + lines.append("⚠ SIGNIFICANT DILATION DETECTED") + lines.append(f" Y-direction: {abs(dev_y):.2f}% {'contraction' if dev_y > 0 else 'expansion'}") + lines.append(f" X-direction: {abs(dev_x):.2f}% {'contraction' if dev_x > 0 else 'expansion'}") + lines.append("") + lines.append(" This will cause edge misalignment in 3D reconstruction.") + lines.append(" Apply the recommended correction factors below.") + else: + lines.append("✓ Scale factors close to 1.0 - minimal dilation detected") + + if stats["anisotropy"]["mean"] > 0.01: + lines.append("") + lines.append(f"⚠ ANISOTROPIC SCALING: X and Y scales differ by {stats['anisotropy']['mean'] * 100:.2f}%") + lines.append(" Use different correction factors for X and Y directions.") + + lines.extend( + [ + "", + "RECOMMENDED CORRECTION FACTORS", + "-" * 50, + f"Scale Y: {corrections['recommended']['scale_y']:.6f}", + f"Scale X: {corrections['recommended']['scale_x']:.6f}", + "", + "Usage in pipeline:", + " linum_align_mosaics_3d_from_shifts.py ... \\", + f" --scale_y {corrections['recommended']['scale_y']:.4f} \\", + f" --scale_x {corrections['recommended']['scale_x']:.4f}", + "", + "FIT QUALITY", + "-" * 50, + f"Mean R² (Y): {stats['fit_quality']['mean_r2_y']:.6f}", + f"Mean R² (X): {stats['fit_quality']['mean_r2_x']:.6f}", + f"Min R²: {stats['fit_quality']['min_r2']:.6f}", + ] + ) + + if stats["fit_quality"]["min_r2"] < 0.99: + lines.append("⚠ Some slices have lower fit quality - check individual results") + else: + lines.append("✓ Good linear fit quality across all slices") + + lines.extend( + [ + "", + "PROGRESSIVE ERROR", + "-" * 50, + f"Slices with progressive error: {stats['progressive_error']['n_slices_affected']}/{stats['n_slices']}", + f"Mean gradient Y: {stats['progressive_error']['mean_gradient_y']:.4f} px/tile", + f"Mean gradient X: {stats['progressive_error']['mean_gradient_x']:.4f} px/tile", + ] + ) + + if stats["progressive_error"]["n_slices_affected"] > stats["n_slices"] // 2: + lines.append("⚠ Progressive error is systematic - scale correction will help") + + lines.extend( + [ + "", + "=" * 70, + ] + ) + + report_path = Path(output_dir) / "aggregated_dilation_report.txt" + with Path(report_path).open("w") as f: + f.write("\n".join(lines)) + + logger.info("Report saved to %s", report_path) + return report_path + + +def generate_plots(results: Any, output_dir: Path) -> Path: + """Generate visualization plots.""" + slice_ids = [str(r.get("slice_id", i)) for i, r in enumerate(results)] + scale_y = [r["scale_factors"]["scale_y"] for r in results] + scale_x = [r["scale_factors"]["scale_x"] for r in results] + mean_scale = [r["scale_factors"]["mean_scale"] for r in results] + anisotropy = [r["scale_factors"]["anisotropy"] for r in results] + + fig, axes = plt.subplots(2, 2, figsize=(14, 10)) + + # 1. Scale factors across slices + ax1 = axes[0, 0] + x = range(len(slice_ids)) + ax1.plot(x, scale_y, "o-", label="Scale Y", color="blue") + ax1.plot(x, scale_x, "s-", label="Scale X", color="red") + ax1.axhline(y=1.0, color="green", linestyle="--", label="Ideal (1.0)") + ax1.axhline(y=np.mean(scale_y), color="blue", linestyle=":", alpha=0.5) + ax1.axhline(y=np.mean(scale_x), color="red", linestyle=":", alpha=0.5) + ax1.set_xlabel("Slice") + ax1.set_ylabel("Scale Factor") + ax1.set_title("Scale Factors Across Slices") + ax1.set_xticks(x) + ax1.set_xticklabels(slice_ids, rotation=45) + ax1.legend() + ax1.grid(True, alpha=0.3) + + # 2. Mean scale and anisotropy + ax2 = axes[0, 1] + ax2.bar(x, mean_scale, color="purple", alpha=0.7, label="Mean Scale") + ax2.axhline(y=1.0, color="green", linestyle="--", label="Ideal (1.0)") + ax2.axhline(y=np.mean(mean_scale), color="purple", linestyle=":", label=f"Average ({np.mean(mean_scale):.4f})") + ax2.set_xlabel("Slice") + ax2.set_ylabel("Mean Scale Factor") + ax2.set_title("Mean Scale Factor per Slice") + ax2.set_xticks(x) + ax2.set_xticklabels(slice_ids, rotation=45) + ax2.legend() + ax2.grid(True, alpha=0.3) + + # 3. Anisotropy + ax3 = axes[1, 0] + ax3.bar(x, anisotropy, color="orange", alpha=0.7) + ax3.axhline(y=np.mean(anisotropy), color="red", linestyle="--", label=f"Mean ({np.mean(anisotropy):.4f})") + ax3.set_xlabel("Slice") + ax3.set_ylabel("Anisotropy (|Scale_Y - Scale_X|)") + ax3.set_title("Scale Anisotropy per Slice") + ax3.set_xticks(x) + ax3.set_xticklabels(slice_ids, rotation=45) + ax3.legend() + ax3.grid(True, alpha=0.3) + + # 4. Distribution of scale factors + ax4 = axes[1, 1] + ax4.hist(scale_y, bins=15, alpha=0.5, label="Scale Y", color="blue") + ax4.hist(scale_x, bins=15, alpha=0.5, label="Scale X", color="red") + ax4.axvline(x=1.0, color="green", linestyle="--", label="Ideal (1.0)") + ax4.axvline(x=np.median(scale_y), color="blue", linestyle=":", label=f"Median Y ({np.median(scale_y):.4f})") + ax4.axvline(x=np.median(scale_x), color="red", linestyle=":", label=f"Median X ({np.median(scale_x):.4f})") + ax4.set_xlabel("Scale Factor") + ax4.set_ylabel("Count") + ax4.set_title("Distribution of Scale Factors") + ax4.legend() + ax4.grid(True, alpha=0.3) + + fig.suptitle("Aggregated Dilation Analysis", fontsize=14) + plt.tight_layout() + + plot_path = Path(output_dir) / "aggregated_dilation_analysis.png" + plt.savefig(plot_path, dpi=150) + plt.close() + + logger.info("Plots saved to %s", plot_path) + return plot_path + + +def main() -> None: + """Run function.""" + p = _build_arg_parser() + args = p.parse_args() + + input_dir = Path(args.input_directory) + output_dir = Path(args.output_directory) + output_dir.mkdir(parents=True, exist_ok=True) + + # Load results + results = load_dilation_results(input_dir, args.pattern) + + # Compute statistics + stats = compute_aggregate_statistics(results) + corrections = compute_correction_factors(stats, args.target_scale) + per_slice = compute_per_slice_factors(results, args.target_scale) + + # Save JSON results + output_data = { + "statistics": stats, + "corrections": corrections, + "per_slice": per_slice, + } + + json_path = output_dir / "aggregated_dilation_analysis.json" + with Path(json_path).open("w") as f: + json.dump(output_data, f, indent=2) + logger.info("JSON saved to %s", json_path) + + # Save per-slice CSV for easy import + df = pd.DataFrame(per_slice) + csv_path = output_dir / "per_slice_correction_factors.csv" + df.to_csv(csv_path, index=False) + logger.info("CSV saved to %s", csv_path) + + # Generate report and plots + generate_report(stats, corrections, per_slice, output_dir) + generate_plots(results, output_dir) + + # Print summary + print("\n" + "=" * 60) + print("SUMMARY") + print("=" * 60) + print(f"Analyzed {stats['n_slices']} slices") + print("\nMeasured scale factors:") + print(f" Y: {stats['scale_y']['median']:.4f} (median)") + print(f" X: {stats['scale_x']['median']:.4f} (median)") + print("\nRecommended correction factors:") + print(f" Y: {corrections['recommended']['scale_y']:.4f}") + print(f" X: {corrections['recommended']['scale_x']:.4f}") + print("\nDeviation from expected:") + print(f" Y: {corrections['deviation_from_unity']['y_percent']:.2f}% contraction") + print(f" X: {corrections['deviation_from_unity']['x_percent']:.2f}% contraction") + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_analyze_acquisition_rotation.py b/scripts/diagnostics/linum_analyze_acquisition_rotation.py new file mode 100644 index 00000000..c43b2a14 --- /dev/null +++ b/scripts/diagnostics/linum_analyze_acquisition_rotation.py @@ -0,0 +1,572 @@ +#!/usr/bin/env python3 +""" +Analyze rotation patterns from acquisition XY shifts data. + +This script examines the shifts_xy.csv file to detect rotation patterns that +occur during acquisition. By analyzing the direction of shift vectors across +slices, we can identify: + +1. **Systematic angular drift**: Shift vectors rotating over time (stage drift) +2. **Oscillating rotation**: Back-and-forth rotation pattern (mechanical backlash) +3. **Sudden rotation jumps**: Sample movement during acquisition + +The detected acquisition rotation can be compared with the final pairwise +registration rotation to assess how well the registration is compensating. + +For obliquely-mounted samples (e.g., 45° from standard planes), the shift +vector direction should remain relatively constant if there's no rotation. +""" + +import linumpy.config.threads # noqa: F401 + +import argparse +import json +import logging +from pathlib import Path +from typing import Any + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +from linumpy.cli.args import add_overwrite_arg + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("in_shifts", help="Input shifts CSV file (shifts_xy.csv)") + p.add_argument("out_directory", help="Output directory for analysis results") + + p.add_argument("--resolution", type=float, default=10.0, help="Resolution in µm/pixel [%(default)s]") + p.add_argument("--registration_dir", type=str, default=None, help="Path to register_pairwise directory for comparison") + p.add_argument( + "--expected_angle", type=float, default=None, help="Expected shift angle in degrees (e.g., 45 for oblique mount)" + ) + p.add_argument("--window_size", type=int, default=5, help="Window size for local rotation estimation [%(default)s]") + + add_overwrite_arg(p) + return p + + +def load_shifts(shifts_path: Path) -> Any: + """Load shifts CSV file.""" + df = pd.read_csv(shifts_path) + required_cols = ["fixed_id", "moving_id", "x_shift_mm", "y_shift_mm"] + for col in required_cols: + if col not in df.columns: + raise ValueError(f"Missing required column: {col}") + return df + + +def compute_shift_angles(df: Any) -> Any: + """ + Compute the angle of each shift vector. + + Returns angles in degrees, where 0° = positive X direction, + 90° = positive Y direction, etc. + """ + angles = np.degrees(np.arctan2(df["y_shift_mm"], df["x_shift_mm"])) + return angles + + +def compute_angular_velocity(angles: Any, window_size: int = 5) -> Any: + """ + Compute the rate of change of shift angle (angular velocity). + + Uses a rolling window to smooth the derivative. + """ + # Handle angle wraparound (-180 to 180) + angles_unwrapped = np.unwrap(np.radians(angles)) + angles_unwrapped = np.degrees(angles_unwrapped) + + # Compute derivative + angular_velocity = np.gradient(angles_unwrapped) + + # Smooth with rolling window + if window_size > 1: + kernel = np.ones(window_size) / window_size + angular_velocity_smooth = np.convolve(angular_velocity, kernel, mode="same") + else: + angular_velocity_smooth = angular_velocity + + return angular_velocity, angular_velocity_smooth + + +def compute_cumulative_rotation(angles: Any) -> Any: + """ + Compute cumulative rotation from shift angle changes. + + This estimates how much rotation has accumulated from the first slice. + """ + # Convert to numpy array if pandas Series + angles_arr = np.asarray(angles) + + # Use unwrapped angles to handle wraparound + angles_unwrapped = np.unwrap(np.radians(angles_arr)) + angles_unwrapped = np.degrees(angles_unwrapped) + + # Cumulative change from first angle + cumulative = angles_unwrapped - angles_unwrapped[0] + + return pd.Series(cumulative) + + +def detect_rotation_patterns(_angles: Any, angular_velocity: Any) -> Any: + """Detect different rotation patterns in the data.""" + patterns: dict[str, Any] = { + "systematic_drift": False, + "oscillation": False, + "sudden_jumps": [], + } + + # Check for systematic drift (mean angular velocity significantly different from 0) + mean_av = np.mean(angular_velocity) + if abs(mean_av) > 0.5: # More than 0.5 degrees per slice on average + patterns["systematic_drift"] = True + patterns["drift_rate"] = float(mean_av) + + # Check for oscillation (sign changes in angular velocity) + sign_changes = np.sum(np.diff(np.sign(angular_velocity)) != 0) + oscillation_ratio = sign_changes / len(angular_velocity) + if oscillation_ratio > 0.4: # More than 40% sign changes + patterns["oscillation"] = True + patterns["oscillation_frequency"] = float(oscillation_ratio) + + # Detect sudden jumps (angular velocity > 5 degrees in one step) + jump_threshold = 5.0 + jumps = np.where(np.abs(angular_velocity) > jump_threshold)[0] + if len(jumps) > 0: + patterns["sudden_jumps"] = jumps.tolist() + + return patterns + + +def load_registration_rotations(reg_dir: Path) -> pd.DataFrame | None: + """Load rotation values from pairwise registration metrics.""" + import re + + reg_path = Path(reg_dir) + + if not reg_path.exists(): + logger.warning("Registration directory does not exist: %s", reg_dir) + return None + + records = [] + + # Try to find slice directories - either directly or in subdirectories + # Check if there are slice_z directories directly + slice_dirs = [item for item in sorted(reg_path.iterdir()) if item.is_dir() and "slice_z" in item.name] + + # If no direct slice dirs, search recursively for JSON files + if not slice_dirs: + json_files = list(reg_path.glob("**/pairwise_registration_metrics.json")) + slice_dirs = sorted({f.parent for f in json_files}) + + if not slice_dirs: + logger.warning("No slice directories found in %s", reg_dir) + return None + + for slice_dir in slice_dirs: + match = re.search(r"slice_z(\d+)", slice_dir.name) + if not match: + continue + + slice_id = int(match.group(1)) + json_path = slice_dir / "pairwise_registration_metrics.json" + + if json_path.exists(): + with Path(json_path).open() as f: + data = json.load(f) + metrics = data.get("metrics", {}) + rotation = metrics.get("rotation", {}).get("value") + records.append({"slice_id": slice_id, "registration_rotation": rotation}) + + if records: + return pd.DataFrame(records).sort_values("slice_id") + return None + + +def analyze_acquisition_rotation(df: Any, expected_angle: Any = None) -> tuple[Any, Any, Any, Any]: + """Analyze rotation from acquisition shifts. + + Note: The shift vectors represent relative displacement between slices, + which can vary in direction due to drift. We analyze: + 1. Shift angle consistency (are shifts in a consistent direction?) + 2. Angular velocity (how much does the shift direction change?) + 3. Patterns that might indicate actual sample rotation + """ + # Compute shift angles + angles = compute_shift_angles(df) + + # Compute angular velocity + angular_velocity, angular_velocity_smooth = compute_angular_velocity(angles) + + # Compute cumulative rotation + cumulative_rotation = compute_cumulative_rotation(angles) + + # Detect patterns + patterns = detect_rotation_patterns(angles, angular_velocity) + + # Compute shift magnitudes + magnitudes = np.sqrt(df["x_shift_mm"] ** 2 + df["y_shift_mm"] ** 2) + + # Statistics + analysis = { + "n_shifts": len(df), + "angle_stats": { + "mean": float(angles.mean()), + "std": float(angles.std()), + "min": float(angles.min()), + "max": float(angles.max()), + "range": float(angles.max() - angles.min()), + }, + "magnitude_stats": { + "mean": float(magnitudes.mean()), + "std": float(magnitudes.std()), + "min": float(magnitudes.min()), + "max": float(magnitudes.max()), + }, + "cumulative_rotation": { + "total": float(cumulative_rotation.iloc[-1]), + "max_absolute": float(cumulative_rotation.abs().max()), + }, + "patterns": patterns, + "interpretation": {}, + } + + # Interpretation: High angle std with low magnitude std suggests drift direction changes + # Low angle std with consistent magnitude suggests systematic drift in one direction + angle_std = angles.std() + + if angle_std < 30: + analysis["interpretation"]["shift_consistency"] = "consistent" + analysis["interpretation"]["shift_consistency_note"] = ( + "Shifts are in a consistent direction - sample position is drifting uniformly" + ) + elif angle_std < 90: + analysis["interpretation"]["shift_consistency"] = "moderate" + analysis["interpretation"]["shift_consistency_note"] = ( + "Moderate variation in shift direction - some drift + possible rotation" + ) + else: + analysis["interpretation"]["shift_consistency"] = "highly_variable" + analysis["interpretation"]["shift_consistency_note"] = ( + "Shift directions vary widely - significant drift pattern changes or sample movement" + ) + + # Compare with expected angle if provided + if expected_angle is not None: + angle_deviation = angles.mean() - expected_angle + analysis["expected_angle"] = expected_angle + analysis["mean_deviation_from_expected"] = float(angle_deviation) + + return analysis, angles, angular_velocity_smooth, cumulative_rotation + + +def generate_report(analysis: Any, reg_comparison: Any, output_dir: Path) -> Path: + """Generate text report.""" + output_dir = Path(output_dir) + lines = [ + "=" * 70, + "ACQUISITION ROTATION ANALYSIS", + "=" * 70, + "", + "This analysis examines the shift vectors between consecutive slices", + "to detect rotation and drift patterns during acquisition.", + "", + "NOTE: 'Shift angle' is the direction of the position change between", + "slices. Varying angles indicate changing drift direction, which may", + "result from sample rotation OR complex drift patterns.", + "", + "SHIFT VECTOR STATISTICS", + "-" * 50, + f"Number of shifts: {analysis['n_shifts']}", + f"Mean shift angle: {analysis['angle_stats']['mean']:.2f}°", + f"Std deviation: {analysis['angle_stats']['std']:.2f}°", + f"Min angle: {analysis['angle_stats']['min']:.2f}°", + f"Max angle: {analysis['angle_stats']['max']:.2f}°", + f"Angle range: {analysis['angle_stats']['range']:.2f}°", + "", + "SHIFT MAGNITUDE STATISTICS", + "-" * 50, + f"Mean magnitude: {analysis['magnitude_stats']['mean']:.4f} mm", + f"Std deviation: {analysis['magnitude_stats']['std']:.4f} mm", + f"Min magnitude: {analysis['magnitude_stats']['min']:.4f} mm", + f"Max magnitude: {analysis['magnitude_stats']['max']:.4f} mm", + "", + ] + + if "expected_angle" in analysis: + lines.extend( + [ + "EXPECTED ANGLE COMPARISON", + "-" * 50, + f"Expected angle: {analysis['expected_angle']:.1f}°", + f"Deviation from expected: {analysis['mean_deviation_from_expected']:.2f}°", + "", + ] + ) + + lines.extend( + [ + "CUMULATIVE ANGLE CHANGE", + "-" * 50, + f"Total angle change: {analysis['cumulative_rotation']['total']:.2f}°", + f"Max absolute: {analysis['cumulative_rotation']['max_absolute']:.2f}°", + "", + "INTERPRETATION", + "-" * 50, + f"Shift consistency: {analysis['interpretation']['shift_consistency'].upper()}", + f" {analysis['interpretation']['shift_consistency_note']}", + "", + "PATTERN DETECTION", + "-" * 50, + ] + ) + + patterns = analysis["patterns"] + if patterns["systematic_drift"]: + lines.append(f"⚠ Systematic angular drift: {patterns['drift_rate']:.3f}°/slice") + if patterns["oscillation"]: + lines.append(f"⚠ Oscillation detected (frequency: {patterns['oscillation_frequency']:.2f})") + if patterns["sudden_jumps"]: + n_jumps = len(patterns["sudden_jumps"]) + lines.append(f"⚠ {n_jumps} sudden direction changes (>5°/step)") + if n_jumps <= 10: + lines.append(f" At slices: {patterns['sudden_jumps']}") + else: + lines.append(f" First 10 at slices: {patterns['sudden_jumps'][:10]}...") + if not (patterns["systematic_drift"] or patterns["oscillation"] or patterns["sudden_jumps"]): + lines.append("✓ No significant rotation patterns detected") + + # Registration comparison + if reg_comparison is not None: + lines.extend( + [ + "", + "COMPARISON WITH REGISTRATION", + "-" * 50, + ] + ) + if "acquisition_vs_registration" in reg_comparison: + avr = reg_comparison["acquisition_vs_registration"] + lines.extend( + [ + f"Acquisition cumulative angle change: {avr['acquisition_cumulative']:.2f}°", + f"Registration cumulative rotation: {avr['registration_cumulative']:.2f}°", + f"Correlation: {avr['correlation']:.3f}", + ] + ) + if abs(avr["correlation"]) < 0.3: + lines.append("→ Weak correlation: acquisition angle changes ≠ actual rotation") + elif avr["correlation"] > 0.5: + lines.append("→ Positive correlation: registration tracking drift patterns") + elif avr["correlation"] < -0.5: + lines.append("→ Negative correlation: registration may be compensating") + + lines.extend(["", "=" * 70]) + + report_path = output_dir / "acquisition_rotation_analysis.txt" + with Path(report_path).open("w") as f: + f.write("\n".join(lines)) + + logger.info("Report saved to %s", report_path) + return report_path + + +def generate_plots( + df: Any, angles: Any, angular_velocity: Any, cumulative_rotation: Any, reg_df: Any, output_dir: Path +) -> Path: + """Generate visualization plots.""" + output_dir = Path(output_dir) + _fig, axes = plt.subplots(2, 2, figsize=(14, 10)) + + slice_ids = df["moving_id"].values + + # 1. Shift vectors (quiver plot) + ax1 = axes[0, 0] + # Normalize for visualization + magnitudes = np.sqrt(df["x_shift_mm"] ** 2 + df["y_shift_mm"] ** 2) + norm_x = df["x_shift_mm"] / magnitudes.max() + norm_y = df["y_shift_mm"] / magnitudes.max() + + colors = plt.colormaps["viridis"](np.linspace(0, 1, len(df))) + for i in range(len(df)): + ax1.arrow( + 0, 0, norm_x.iloc[i], norm_y.iloc[i], head_width=0.02, head_length=0.01, fc=colors[i], ec=colors[i], alpha=0.7 + ) + ax1.set_xlim(-1.2, 1.2) + ax1.set_ylim(-1.2, 1.2) + ax1.set_aspect("equal") + ax1.axhline(y=0, color="gray", linestyle="--", linewidth=0.5) + ax1.axvline(x=0, color="gray", linestyle="--", linewidth=0.5) + ax1.set_xlabel("Normalized X shift") + ax1.set_ylabel("Normalized Y shift") + ax1.set_title("Shift Vector Directions\n(colored by slice order: purple→yellow)") + + # Add colorbar + sm = plt.cm.ScalarMappable(cmap="viridis", norm=plt.Normalize(vmin=slice_ids.min(), vmax=slice_ids.max())) + sm.set_array([]) + plt.colorbar(sm, ax=ax1, label="Slice ID") + + # 2. Shift angle over slices + ax2 = axes[0, 1] + ax2.plot(slice_ids, angles, "b-", linewidth=1, alpha=0.7, label="Raw angle") + ax2.axhline(y=angles.mean(), color="red", linestyle="--", label=f"Mean: {angles.mean():.1f}°") + ax2.fill_between( + slice_ids, + angles.mean() - angles.std(), + angles.mean() + angles.std(), + alpha=0.2, + color="red", + label=f"±1 std: {angles.std():.1f}°", + ) + ax2.set_xlabel("Slice ID") + ax2.set_ylabel("Shift Angle (degrees)") + ax2.set_title("Shift Vector Angle vs Slice") + ax2.legend() + ax2.grid(True, alpha=0.3) + + # 3. Angular velocity (rate of angle change) + ax3 = axes[1, 0] + ax3.plot(slice_ids, angular_velocity, "g-", linewidth=1.5, label="Angular velocity (smoothed)") + ax3.axhline(y=0, color="black", linestyle="-", linewidth=0.5) + ax3.axhline(y=2, color="red", linestyle="--", alpha=0.5) + ax3.axhline(y=-2, color="red", linestyle="--", alpha=0.5) + ax3.set_xlabel("Slice ID") + ax3.set_ylabel("Angular Velocity (°/slice)") + ax3.set_title("Rate of Angle Change\n(rotation between consecutive slices)") + ax3.legend() + ax3.grid(True, alpha=0.3) + + # 4. Cumulative rotation comparison + ax4 = axes[1, 1] + ax4.plot(slice_ids, cumulative_rotation, "b-", linewidth=2, label="Acquisition (from shifts)") + + if reg_df is not None and "registration_rotation" in reg_df.columns: + # Align registration data with shift data + reg_cumulative = reg_df["registration_rotation"].cumsum() + ax4.plot(reg_df["slice_id"], reg_cumulative, "r-", linewidth=2, label="Registration (cumulative)") + + ax4.axhline(y=0, color="black", linestyle="-", linewidth=0.5) + ax4.set_xlabel("Slice ID") + ax4.set_ylabel("Cumulative Rotation (degrees)") + ax4.set_title("Cumulative Rotation:\nAcquisition vs Registration") + ax4.legend() + ax4.grid(True, alpha=0.3) + + plt.tight_layout() + plot_path = output_dir / "acquisition_rotation_analysis.png" + plt.savefig(plot_path, dpi=150) + plt.close() + + logger.info("Plots saved to %s", plot_path) + return plot_path + + +def compare_with_registration(cumulative_rotation: Any, reg_df: Any, slice_ids: Any) -> dict | None: + """Compare acquisition rotation with registration rotation.""" + if reg_df is None or len(reg_df) == 0: + return None + + # Get registration cumulative rotation + reg_cumulative = reg_df["registration_rotation"].fillna(0).cumsum() + + # Align indices + common_slices = set(slice_ids) & set(reg_df["slice_id"]) + if len(common_slices) < 5: + return None + + acq_values = [] + reg_values = [] + + for sid in sorted(common_slices): + acq_idx = np.where(slice_ids == sid)[0] + reg_idx = reg_df[reg_df["slice_id"] == sid].index + + if len(acq_idx) > 0 and len(reg_idx) > 0: + acq_values.append(cumulative_rotation.iloc[acq_idx[0]]) + reg_values.append(reg_cumulative.iloc[reg_idx[0]]) + + if len(acq_values) < 5: + return None + + correlation = np.corrcoef(acq_values, reg_values)[0, 1] + + return { + "acquisition_vs_registration": { + "acquisition_cumulative": float(acq_values[-1]), + "registration_cumulative": float(reg_values[-1]), + "correlation": float(correlation) if not np.isnan(correlation) else 0.0, + } + } + + +def main() -> None: + """Run function.""" + p = _build_arg_parser() + args = p.parse_args() + + output_dir = Path(args.out_directory) + output_dir.mkdir(parents=True, exist_ok=True) + + # Load shifts + logger.info("Loading shifts from %s", args.in_shifts) + df = load_shifts(args.in_shifts) + logger.info("Loaded %s shift pairs", len(df)) + + # Main analysis + analysis, angles, angular_velocity, cumulative_rotation = analyze_acquisition_rotation( + df, expected_angle=args.expected_angle + ) + + # Load registration data if available + reg_df = None + if args.registration_dir: + logger.info("Loading registration data from %s", args.registration_dir) + reg_df = load_registration_rotations(args.registration_dir) + if reg_df is not None: + logger.info("Loaded registration data for %s slices", len(reg_df)) + + # Compare with registration + reg_comparison = compare_with_registration(cumulative_rotation, reg_df, df["moving_id"].values) + + # Save raw data + output_df = df.copy() + output_df["shift_angle"] = angles + output_df["angular_velocity"] = angular_velocity + output_df["cumulative_rotation"] = cumulative_rotation + csv_path = output_dir / "acquisition_rotation_data.csv" + output_df.to_csv(csv_path, index=False) + logger.info("Data saved to %s", csv_path) + + # Save analysis JSON + json_path = output_dir / "acquisition_rotation_analysis.json" + with Path(json_path).open("w") as f: + json.dump(analysis, f, indent=2) + + # Generate outputs + generate_report(analysis, reg_comparison, output_dir) + generate_plots(df, angles, angular_velocity, cumulative_rotation, reg_df, output_dir) + + # Print summary + print("\n" + "=" * 50) + print("ACQUISITION ROTATION SUMMARY") + print("=" * 50) + print(f"Mean shift angle: {analysis['angle_stats']['mean']:.1f}° (std: {analysis['angle_stats']['std']:.1f}°)") + print(f"Angle range: {analysis['angle_stats']['range']:.1f}°") + print(f"Cumulative rotation: {analysis['cumulative_rotation']['total']:.2f}°") + + if analysis["patterns"]["systematic_drift"]: + print(f"⚠ Systematic drift: {analysis['patterns']['drift_rate']:.3f}°/slice") + if analysis["patterns"]["oscillation"]: + print("⚠ Oscillation detected") + if analysis["patterns"]["sudden_jumps"]: + print(f"⚠ {len(analysis['patterns']['sudden_jumps'])} sudden rotation jumps") + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_analyze_registration_transforms.py b/scripts/diagnostics/linum_analyze_registration_transforms.py new file mode 100644 index 00000000..d8526fbe --- /dev/null +++ b/scripts/diagnostics/linum_analyze_registration_transforms.py @@ -0,0 +1,452 @@ +#!/usr/bin/env python3 +""" +Analyze pairwise registration transforms to detect rotation drift and alignment issues. + +This diagnostic tool aggregates rotation and translation data from all pairwise +registration outputs to identify: +- Cumulative rotation drift (edges drifting apart) +- Sudden rotation jumps (slice misalignment) +- Systematic rotation bias (oblique cutting artifacts) +- Translation vs rotation correlation (dilation indicators) + +Useful for troubleshooting 3D reconstruction artifacts like "overhangs" and edge +mismatches in obliquely-cut samples (e.g., 45° between sagittal/coronal). +""" + +import linumpy.config.threads # noqa: F401 + +import argparse +import json +import logging +import re +from pathlib import Path +from typing import Any + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +from linumpy.cli.args import add_overwrite_arg + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("in_directory", help="Path to register_pairwise output directory or pipeline output directory") + p.add_argument("out_directory", help="Output directory for analysis results") + + p.add_argument("--resolution", type=float, default=10.0, help="Resolution in µm/pixel [%(default)s]") + p.add_argument( + "--rotation_threshold", type=float, default=2.0, help="Flag rotations above this threshold (degrees) [%(default)s]" + ) + p.add_argument( + "--cumulative_threshold", + type=float, + default=5.0, + help="Flag cumulative rotation above this threshold (degrees) [%(default)s]", + ) + p.add_argument("--include_tfm", action="store_true", help="Also parse transform.tfm files for rotation (if JSON missing)") + + add_overwrite_arg(p) + return p + + +def find_registration_dirs(base_path: Path) -> Any: + """Find all pairwise registration directories.""" + base = Path(base_path) + + # If base doesn't exist, raise error + if not base.exists(): + raise FileNotFoundError(f"Path does not exist: {base_path}") + + # Check if this is already the register_pairwise directory + if base.name == "register_pairwise": + reg_dir = base + elif (base / "register_pairwise").exists(): + reg_dir = base / "register_pairwise" + else: + # Search for register_pairwise subdirectory + candidates = list(base.glob("**/register_pairwise")) + if candidates: + reg_dir = candidates[0] + else: + # Maybe we're directly in a directory containing slice_z* dirs + # This happens when Nextflow stages files with path("register_pairwise/*") + slice_dirs = sorted([d for d in base.iterdir() if d.is_dir() and "slice_z" in d.name]) + if slice_dirs: + logger.info("Found %s slice directories directly in %s", len(slice_dirs), base_path) + return slice_dirs + + # Also check if there are JSON files directly here (flat structure) + json_files = list(base.glob("**/pairwise_registration_metrics.json")) + if json_files: + # Return the parent directories of the JSON files + slice_dirs = sorted({f.parent for f in json_files}) + logger.info("Found %s directories with registration metrics", len(slice_dirs)) + return slice_dirs + + raise FileNotFoundError(f"No register_pairwise directory or slice_z* directories found in {base_path}") + + # Find all slice directories + slice_dirs = sorted([d for d in reg_dir.iterdir() if d.is_dir() and "slice_z" in d.name]) + if not slice_dirs: + # Try searching recursively + slice_dirs = sorted([d.parent for d in reg_dir.glob("**/pairwise_registration_metrics.json")]) + slice_dirs = sorted(set(slice_dirs)) # Remove duplicates + + return slice_dirs + + +def parse_slice_id(dirname: Any) -> Any: + """Extract slice ID from directory name like 'slice_z05_normalize'.""" + match = re.search(r"slice_z(\d+)", dirname) + return int(match.group(1)) if match else None + + +def load_metrics_from_json(json_path: Path) -> dict: + """Load registration metrics from JSON file.""" + with Path(json_path).open() as f: + data = json.load(f) + + metrics = data.get("metrics", {}) + return { + "rotation": metrics.get("rotation", {}).get("value", None), + "translation_x": metrics.get("translation_x", {}).get("value", None), + "translation_y": metrics.get("translation_y", {}).get("value", None), + "translation_magnitude": metrics.get("translation_magnitude", {}).get("value", None), + "z_drift": metrics.get("z_drift", {}).get("value", None), + "registration_error": metrics.get("registration_error", {}).get("value", None), + "fixed_volume": metrics.get("fixed_volume", {}).get("value", None), + "moving_volume": metrics.get("moving_volume", {}).get("value", None), + } + + +def load_rotation_from_tfm(tfm_path: Path) -> Any: + """Extract rotation angle from SimpleITK transform file.""" + import SimpleITK as sitk + + try: + transform = sitk.ReadTransform(str(tfm_path)) + + # For Euler2DTransform, parameter[0] is the rotation angle in radians + if "Euler2D" in transform.GetName(): + params = transform.GetParameters() + if len(params) >= 1: + return np.degrees(params[0]) + + # For AffineTransform, extract rotation from matrix + elif "Affine" in transform.GetName(): + params = transform.GetParameters() + # 2D affine: [a00, a01, a10, a11, tx, ty] + if len(params) >= 4: + a00, a01 = params[0], params[1] + # Rotation angle from matrix components + return np.degrees(np.arctan2(-a01, a00)) + + return None + except Exception as e: + logger.warning("Could not parse transform file %s: %s", tfm_path, e) + return None + + +def collect_registration_data(slice_dirs: Any, include_tfm: bool = False) -> Any: + """Collect registration data from all slice directories.""" + records = [] + + for slice_dir in slice_dirs: + slice_id = parse_slice_id(slice_dir.name) + if slice_id is None: + continue + + record = { + "slice_id": slice_id, + "directory": slice_dir.name, + } + + # Try JSON first + json_path = slice_dir / "pairwise_registration_metrics.json" + if json_path.exists(): + metrics = load_metrics_from_json(json_path) + record.update(metrics) + + # Optionally try TFM file if rotation is missing + if include_tfm and record.get("rotation") is None: + tfm_path = slice_dir / "transform.tfm" + if tfm_path.exists(): + rotation = load_rotation_from_tfm(tfm_path) + if rotation is not None: + record["rotation"] = rotation + record["rotation_source"] = "tfm" + + records.append(record) + + df = pd.DataFrame(records) + df = df.sort_values("slice_id").reset_index(drop=True) + return df + + +def analyze_rotation_drift(df: Any, rotation_threshold: float = 2.0, cumulative_threshold: float = 5.0) -> Any: + """Analyze rotation patterns and detect issues.""" + analysis = { + "n_slices": len(df), + "rotation_stats": {}, + "issues": [], + "cumulative_drift": None, + } + + if "rotation" not in df.columns or df["rotation"].isna().all(): + analysis["issues"].append("No rotation data available") + return analysis + + rotations = df["rotation"].dropna() + + # Basic statistics + analysis["rotation_stats"] = { + "mean": float(rotations.mean()), + "std": float(rotations.std()), + "min": float(rotations.min()), + "max": float(rotations.max()), + "median": float(rotations.median()), + } + + # Cumulative rotation (drift) + cumulative = rotations.cumsum() + analysis["cumulative_drift"] = { + "total": float(cumulative.iloc[-1]) if len(cumulative) > 0 else 0, + "max_absolute": float(cumulative.abs().max()) if len(cumulative) > 0 else 0, + } + + # Detect large individual rotations + large_rotations = df[df["rotation"].abs() > rotation_threshold] + if len(large_rotations) > 0: + analysis["issues"].append(f"Found {len(large_rotations)} slices with rotation > {rotation_threshold}°") + analysis["large_rotation_slices"] = large_rotations["slice_id"].tolist() + + # Check cumulative drift + if abs(analysis["cumulative_drift"]["total"]) > cumulative_threshold: + analysis["issues"].append(f"High cumulative rotation drift: {analysis['cumulative_drift']['total']:.2f}°") + + # Check for systematic rotation bias + if abs(analysis["rotation_stats"]["mean"]) > 0.5: + analysis["issues"].append(f"Systematic rotation bias detected: mean={analysis['rotation_stats']['mean']:.3f}°") + + return analysis + + +def analyze_translation_rotation_correlation(df: Any) -> dict: + """Check if translation and rotation are correlated (dilation indicator).""" + if "rotation" not in df.columns or "translation_magnitude" not in df.columns: + return {} + + valid = df.dropna(subset=["rotation", "translation_magnitude"]) + if len(valid) < 5: + return {} + + correlation = np.corrcoef(valid["rotation"].abs(), valid["translation_magnitude"])[0, 1] + + return { + "rotation_translation_correlation": float(correlation), + "interpretation": "high" if abs(correlation) > 0.5 else "low", + } + + +def generate_report(df: Any, analysis: Any, correlation: Any, output_dir: Path) -> Path: + """Generate text report.""" + output_dir = Path(output_dir) + lines = [ + "=" * 70, + "REGISTRATION TRANSFORM ANALYSIS", + "=" * 70, + "", + "OVERVIEW", + "-" * 50, + f"Total slices analyzed: {analysis['n_slices']}", + f"Slices with rotation data: {df['rotation'].notna().sum()}", + "", + ] + + # Rotation statistics + if analysis["rotation_stats"]: + lines.extend( + [ + "ROTATION STATISTICS", + "-" * 50, + f"Mean rotation: {analysis['rotation_stats']['mean']:.4f}°", + f"Std deviation: {analysis['rotation_stats']['std']:.4f}°", + f"Min rotation: {analysis['rotation_stats']['min']:.4f}°", + f"Max rotation: {analysis['rotation_stats']['max']:.4f}°", + f"Median rotation: {analysis['rotation_stats']['median']:.4f}°", + "", + ] + ) + + # Cumulative drift + if analysis["cumulative_drift"]: + lines.extend( + [ + "CUMULATIVE ROTATION DRIFT", + "-" * 50, + f"Total drift: {analysis['cumulative_drift']['total']:.4f}°", + f"Max absolute: {analysis['cumulative_drift']['max_absolute']:.4f}°", + "", + ] + ) + + # Correlation analysis + if correlation: + lines.extend( + [ + "ROTATION-TRANSLATION CORRELATION", + "-" * 50, + f"Correlation coefficient: {correlation.get('rotation_translation_correlation', 'N/A'):.4f}", + f"Interpretation: {correlation.get('interpretation', 'N/A')}", + "(High correlation may indicate dilation/scaling issues)", + "", + ] + ) + + # Issues detected + lines.extend( + [ + "ISSUES DETECTED", + "-" * 50, + ] + ) + if analysis["issues"]: + lines.extend(f" ⚠ {issue}" for issue in analysis["issues"]) + else: + lines.append(" ✓ No significant issues detected") + lines.append("") + + # Large rotation slices + if "large_rotation_slices" in analysis: + lines.extend( + [ + "SLICES WITH LARGE ROTATIONS", + "-" * 50, + ] + ) + for sid in analysis["large_rotation_slices"]: + row = df[df["slice_id"] == sid].iloc[0] + lines.append(f" Slice {sid:02d}: rotation={row['rotation']:.3f}°") + lines.append("") + + lines.append("=" * 70) + + report_path = output_dir / "rotation_analysis.txt" + with Path(report_path).open("w") as f: + f.write("\n".join(lines)) + + logger.info("Report saved to %s", report_path) + return report_path + + +def generate_plots(df: Any, output_dir: Path) -> Path: + """Generate visualization plots.""" + output_dir = Path(output_dir) + _fig, axes = plt.subplots(2, 2, figsize=(14, 10)) + + # 1. Per-slice rotation + ax1 = axes[0, 0] + if "rotation" in df.columns: + valid = df.dropna(subset=["rotation"]) + ax1.bar(valid["slice_id"], valid["rotation"], alpha=0.7, color="steelblue") + ax1.axhline(y=0, color="black", linestyle="-", linewidth=0.5) + ax1.axhline(y=2, color="red", linestyle="--", linewidth=0.5, label="±2° threshold") + ax1.axhline(y=-2, color="red", linestyle="--", linewidth=0.5) + ax1.set_xlabel("Slice ID") + ax1.set_ylabel("Rotation (degrees)") + ax1.set_title("Per-Slice Rotation") + ax1.legend() + + # 2. Cumulative rotation drift + ax2 = axes[0, 1] + if "rotation" in df.columns: + valid = df.dropna(subset=["rotation"]).sort_values("slice_id") + cumulative = valid["rotation"].cumsum() + ax2.plot(valid["slice_id"], cumulative, "b-", linewidth=2, label="Cumulative rotation") + ax2.fill_between(valid["slice_id"], 0, cumulative, alpha=0.3) + ax2.axhline(y=0, color="black", linestyle="-", linewidth=0.5) + ax2.set_xlabel("Slice ID") + ax2.set_ylabel("Cumulative Rotation (degrees)") + ax2.set_title("Cumulative Rotation Drift") + ax2.legend() + + # 3. Translation magnitude vs rotation + ax3 = axes[1, 0] + if "rotation" in df.columns and "translation_magnitude" in df.columns: + valid = df.dropna(subset=["rotation", "translation_magnitude"]) + ax3.scatter(valid["rotation"].abs(), valid["translation_magnitude"], alpha=0.6, c=valid["slice_id"], cmap="viridis") + ax3.set_xlabel("|Rotation| (degrees)") + ax3.set_ylabel("Translation Magnitude (pixels)") + ax3.set_title("Translation vs Rotation (colored by slice ID)") + plt.colorbar(ax3.collections[0], ax=ax3, label="Slice ID") + + # 4. Translation components + ax4 = axes[1, 1] + if "translation_x" in df.columns and "translation_y" in df.columns: + valid = df.dropna(subset=["translation_x", "translation_y"]) + ax4.scatter(valid["translation_x"], valid["translation_y"], alpha=0.6, c=valid["slice_id"], cmap="viridis") + ax4.axhline(y=0, color="gray", linestyle="--", linewidth=0.5) + ax4.axvline(x=0, color="gray", linestyle="--", linewidth=0.5) + ax4.set_xlabel("Translation X (pixels)") + ax4.set_ylabel("Translation Y (pixels)") + ax4.set_title("Translation Vector Components") + ax4.set_aspect("equal") + + plt.tight_layout() + plot_path = output_dir / "rotation_analysis.png" + plt.savefig(plot_path, dpi=150) + plt.close() + + logger.info("Plots saved to %s", plot_path) + return plot_path + + +def main() -> None: + """Run function.""" + p = _build_arg_parser() + args = p.parse_args() + + output_dir = Path(args.out_directory) + output_dir.mkdir(parents=True, exist_ok=True) + + # Find and collect registration data + logger.info("Searching for registration data in %s", args.in_directory) + slice_dirs = find_registration_dirs(args.in_directory) + logger.info("Found %s registration directories", len(slice_dirs)) + + df = collect_registration_data(slice_dirs, include_tfm=args.include_tfm) + logger.info("Collected data for %s slices", len(df)) + + # Save raw data + csv_path = output_dir / "registration_transforms.csv" + df.to_csv(csv_path, index=False) + logger.info("Raw data saved to %s", csv_path) + + # Analyze rotation + analysis = analyze_rotation_drift(df, args.rotation_threshold, args.cumulative_threshold) + correlation = analyze_translation_rotation_correlation(df) + + # Generate outputs + generate_report(df, analysis, correlation, output_dir) + generate_plots(df, output_dir) + + # Print summary + print("\n" + "=" * 50) + print("SUMMARY") + print("=" * 50) + if analysis["cumulative_drift"]: + print(f"Total rotation drift: {analysis['cumulative_drift']['total']:.2f}°") + if analysis["issues"]: + print(f"Issues found: {len(analysis['issues'])}") + for issue in analysis["issues"]: + print(f" - {issue}") + else: + print("No significant issues detected") + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_analyze_stitch_affine.py b/scripts/diagnostics/linum_analyze_stitch_affine.py new file mode 100644 index 00000000..169313aa --- /dev/null +++ b/scripts/diagnostics/linum_analyze_stitch_affine.py @@ -0,0 +1,314 @@ +#!/usr/bin/env python3 +"""Per-slice affine diagnostic for the refined stitching step. + +For each mosaic_grid_*.ome.zarr in a directory, load only the central Z +plane and run the same two calls that ``linum_stitch_3d_refined.py`` makes +before writing any output: + + refinements = compute_registration_refinements(...) + transform, diagnostics = estimate_affine_from_pairs( + refinements["pairs"], tile_shape, overlap_fraction + ) + +Emit one CSV row per slice with the fitted 2x2 affine, the Lefebvre +displacement-model parameters (theta, phi, Ox, Oy), and the raw +number-of-pairs / residual statistics. Also emit one per-slice JSON +capturing the full refinements["pairs"] list so further analysis can be +done offline without re-reading the (very large) mosaic grids. + +The script does **not** write any stitched output and does not touch the +nextflow output directories of the reconstruction pipeline, so it is +safe to run alongside a production run and does not invalidate any +downstream manual-alignment baseline. +""" + +# Configure thread limits before numpy/scipy imports +import linumpy.config.threads # noqa: F401 + +import argparse +import csv +import json +import logging +import re +import sys +from pathlib import Path +from typing import Any + +import numpy as np + +from linumpy.io import slice_config as slice_config_io +from linumpy.io.zarr import read_omezarr +from linumpy.mosaic.motor import compute_registration_refinements, estimate_affine_from_pairs + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + +_SLICE_RE = re.compile(r"z(\d+)") + + +class _NumpyEncoder(json.JSONEncoder): + def default(self, o: Any) -> Any: + if isinstance(o, np.integer): + return int(o) + if isinstance(o, np.floating): + return float(o) + if isinstance(o, np.ndarray): + return o.tolist() + return super().default(o) + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument( + "input_dir", + help="Directory containing mosaic_grid_*z??.ome.zarr files (the preproc output)", + ) + p.add_argument( + "output_csv", + help="Path to the per-slice affine diagnostics CSV to write", + ) + p.add_argument( + "--overlap_fraction", + type=float, + default=0.2, + help="Expected tile overlap fraction (must match the acquisition). [%(default)s]", + ) + p.add_argument( + "--max_refinement_px", + type=float, + default=1e9, + help="Clamp threshold for stored refinements. Does not affect the absolute\n" + "displacements fed into the affine LS fit. [%(default)s]", + ) + p.add_argument( + "--slice_config", + type=str, + default=None, + help="Optional slice_config.csv; slices with use=false are skipped.", + ) + p.add_argument( + "--json_dir", + type=str, + default=None, + help="Optional directory to write one per-slice refinements JSON (including\n" + "the full pair list). Not created if the argument is not provided.", + ) + p.add_argument( + "--pattern", + type=str, + default="mosaic_grid*_z*.ome.zarr", + help="Glob pattern used to discover input volumes. [%(default)s]", + ) + p.add_argument( + "--skip_first", + type=int, + default=0, + help="Skip this many leading slices from the sorted discovery order. [%(default)s]", + ) + return p + + +def _extract_slice_id(path: Path) -> str: + match = _SLICE_RE.search(path.name) + return match.group(1) if match else "unknown" + + +def _serialize_pairs(pairs: list[dict]) -> list[dict]: + return [ + { + "row_delta": int(p["row_delta"]), + "col_delta": int(p["col_delta"]), + "measured_dy": float(p["measured_dy"]), + "measured_dx": float(p["measured_dx"]), + } + for p in pairs + ] + + +def _analyze_slice( + zarr_path: Path, + slice_id: str, + overlap_fraction: float, + max_refinement_px: float, + json_dir: Path | None, +) -> dict[str, object]: + vol, _resolution = read_omezarr(Path(zarr_path), level=0) + + tile_shape = tuple(vol.chunks) + if len(tile_shape) != 3: + raise ValueError(f"Expected 3D mosaic grid, got chunks {tile_shape} for {zarr_path}") + + nx = vol.shape[1] // tile_shape[1] + ny = vol.shape[2] // tile_shape[2] + if nx == 0 or ny == 0: + raise ValueError( + f"Mosaic grid {zarr_path} has fewer than one full tile per axis (shape={vol.shape}, chunks={tile_shape})" + ) + + z_mid_full = vol.shape[0] // 2 + logger.info( + "slice %s: shape=%s tile=%s grid=%dx%d reading z_mid=%d", + slice_id, + tuple(vol.shape), + tile_shape, + nx, + ny, + z_mid_full, + ) + + z_plane = np.asarray(vol[z_mid_full : z_mid_full + 1]) + + refinements = compute_registration_refinements( + z_plane, + tile_shape, + nx, + ny, + overlap_fraction, + max_refinement_px=max_refinement_px, + ) + + pairs = refinements["pairs"] + if not pairs: + logger.warning("slice %s: no valid tile pairs produced by phase correlation", slice_id) + transform, diagnostics = estimate_affine_from_pairs(pairs, tile_shape, overlap_fraction) + + step_y = float(np.sqrt(transform[0, 0] ** 2 + transform[1, 0] ** 2)) + step_x = float(np.sqrt(transform[0, 1] ** 2 + transform[1, 1] ** 2)) + + stats = refinements["stats"] + row: dict[str, object] = { + "slice_id": slice_id, + "nx": int(nx), + "ny": int(ny), + "tile_h": int(tile_shape[1]), + "tile_w": int(tile_shape[2]), + "n_total_pairs": int(stats["total_pairs"]), + "n_valid_pairs": int(stats["valid_pairs"]), + "n_clamped_pairs": int(stats["clamped_pairs"]), + "mean_refinement_px": float(stats["mean_refinement"]), + "max_refinement_px": float(stats["max_refinement"]), + "A00": float(transform[0, 0]), + "A01": float(transform[0, 1]), + "A10": float(transform[1, 0]), + "A11": float(transform[1, 1]), + "step_y_px": step_y, + "step_x_px": step_x, + "theta_deg": float(diagnostics.get("theta_deg", float("nan"))), + "phi_deg": float(diagnostics.get("phi_deg", float("nan"))), + "Ox_fraction": float(diagnostics.get("Ox_fraction", float("nan"))), + "Oy_fraction": float(diagnostics.get("Oy_fraction", float("nan"))), + "expected_overlap": float(diagnostics.get("expected_overlap", overlap_fraction)), + "lstsq_residual": float(diagnostics.get("lstsq_residual", float("nan"))), + "fallback": bool(diagnostics.get("fallback", False)), + } + + if json_dir is not None: + json_dir.mkdir(parents=True, exist_ok=True) + payload = { + "slice_id": slice_id, + "tile_shape": list(tile_shape), + "grid_shape": [int(nx), int(ny)], + "overlap_fraction": overlap_fraction, + "transform": transform.tolist(), + "displacement_model": diagnostics, + "stats": stats, + "pairs": _serialize_pairs(pairs), + } + (json_dir / f"slice_z{slice_id}_affine.json").write_text(json.dumps(payload, indent=2, cls=_NumpyEncoder)) + + return row + + +def main() -> int: + """Run function.""" + parser = _build_arg_parser() + args = parser.parse_args() + + input_dir = Path(args.input_dir) + if not input_dir.is_dir(): + parser.error(f"Input directory does not exist: {input_dir}") + + zarr_paths = sorted(input_dir.glob(args.pattern)) + if args.skip_first > 0: + zarr_paths = zarr_paths[args.skip_first :] + if not zarr_paths: + parser.error(f"No mosaic grids matching {args.pattern!r} in {input_dir}") + + used_slices: set[str] | None = None + if args.slice_config is not None: + slice_config_path = Path(args.slice_config) + if not slice_config_path.exists(): + parser.error(f"slice_config.csv not found: {slice_config_path}") + used_slices = slice_config_io.filter_slices_to_use(slice_config_path) + logger.info("slice_config: %d slices marked as use=true", len(used_slices)) + + json_dir = Path(args.json_dir) if args.json_dir else None + + fieldnames = [ + "slice_id", + "nx", + "ny", + "tile_h", + "tile_w", + "n_total_pairs", + "n_valid_pairs", + "n_clamped_pairs", + "mean_refinement_px", + "max_refinement_px", + "A00", + "A01", + "A10", + "A11", + "step_y_px", + "step_x_px", + "theta_deg", + "phi_deg", + "Ox_fraction", + "Oy_fraction", + "expected_overlap", + "lstsq_residual", + "fallback", + ] + + output_csv = Path(args.output_csv) + output_csv.parent.mkdir(parents=True, exist_ok=True) + with output_csv.open("w", newline="") as fh: + writer = csv.DictWriter(fh, fieldnames=fieldnames) + writer.writeheader() + for zarr_path in zarr_paths: + slice_id = _extract_slice_id(zarr_path) + if used_slices is not None and slice_id not in used_slices: + logger.info("slice %s: skipped (slice_config use=false)", slice_id) + continue + try: + row = _analyze_slice( + zarr_path, + slice_id, + overlap_fraction=args.overlap_fraction, + max_refinement_px=args.max_refinement_px, + json_dir=json_dir, + ) + except Exception: + logger.exception("slice %s: analysis failed for %s", slice_id, zarr_path) + continue + writer.writerow(row) + fh.flush() + logger.info( + "slice %s: step_y=%.3f step_x=%.3f theta=%+.3f deg phi=%+.3f deg Ox=%.4f Oy=%.4f (%d valid / %d pairs)", + slice_id, + row["step_y_px"], + row["step_x_px"], + row["theta_deg"], + row["phi_deg"], + row["Ox_fraction"], + row["Oy_fraction"], + row["n_valid_pairs"], + row["n_total_pairs"], + ) + + logger.info("wrote %s", output_csv) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/diagnostics/linum_analyze_tile_dilation.py b/scripts/diagnostics/linum_analyze_tile_dilation.py new file mode 100644 index 00000000..389fa90d --- /dev/null +++ b/scripts/diagnostics/linum_analyze_tile_dilation.py @@ -0,0 +1,385 @@ +#!/usr/bin/env python3 +""" +Analyze tile dilation/scaling by comparing expected vs actual tile positions. + +This diagnostic tool examines the relationship between motor positions and +registration-derived positions to detect: + +1. **Global dilation**: Tiles spread more/less than expected (scale factor ≠ 1) +2. **Anisotropic scaling**: Different scale factors in X vs Y directions +3. **Progressive drift**: Error accumulating across the mosaic +4. **Local distortions**: Non-linear deformations in specific regions + +For troubleshooting 3D reconstruction artifacts in serial OCT microscopy, +particularly for obliquely-cut samples where physical vs measured positions +may diverge due to tissue deformation after slicing. +""" + +import linumpy.config.threads # noqa: F401 + +import argparse +import json +import logging +from pathlib import Path +from typing import Any + +import matplotlib.pyplot as plt +import numpy as np + +from linumpy.cli.args import add_overwrite_arg +from linumpy.io.zarr import read_omezarr + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + + +class NumpyEncoder(json.JSONEncoder): + """JSON encoder that handles numpy types.""" + + def default(self, o: Any) -> Any: + """Run function.""" + if isinstance(o, np.integer): + return int(o) + elif isinstance(o, np.floating): + return float(o) + elif isinstance(o, np.bool_): + return bool(o) + elif isinstance(o, np.ndarray): + return o.tolist() + return super().default(o) + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("input_volume", help="Path to mosaic grid volume (.ome.zarr)") + p.add_argument("input_transform", help="Path to registration transform (.npy)") + p.add_argument("out_directory", help="Output directory for analysis results") + + p.add_argument("--overlap_fraction", type=float, default=0.1, help="Expected overlap fraction between tiles [%(default)s]") + p.add_argument("--resolution", type=float, default=10.0, help="Resolution in µm/pixel [%(default)s]") + p.add_argument("--slice_id", type=str, default=None, help="Slice identifier for labeling outputs") + + add_overwrite_arg(p) + return p + + +def compute_expected_positions(nx: Any, ny: Any, tile_height: Any, tile_width: Any, overlap_fraction: Any) -> Any: + """Compute expected tile positions based on motor grid.""" + step_y = tile_height * (1.0 - overlap_fraction) + step_x = tile_width * (1.0 - overlap_fraction) + + positions = [(i * step_y, j * step_x) for i in range(nx) for j in range(ny)] + + return np.array(positions) + + +def compute_actual_positions(nx: Any, ny: Any, transform: Any) -> Any: + """Compute actual tile positions from registration transform.""" + positions = [] + for i in range(nx): + for j in range(ny): + pos = np.dot(transform, [i, j]) + positions.append(pos) + + return np.array(positions) + + +def estimate_scale_factors(expected: Any, actual: Any) -> dict: + """Estimate scale factors by comparing expected vs actual positions.""" + # Use linear regression to estimate scale factor + + from scipy import stats + + # Y direction (rows) + slope_y, intercept_y, r_y, _p_y, _se_y, _intercept_se_y = stats.linregress(expected[:, 0], actual[:, 0]) + + # X direction (cols) + slope_x, intercept_x, r_x, _p_x, _se_x, _intercept_se_x = stats.linregress(expected[:, 1], actual[:, 1]) + + return { + "scale_y": slope_y, + "scale_x": slope_x, + "offset_y": intercept_y, + "offset_x": intercept_x, + "r_squared_y": r_y**2, + "r_squared_x": r_x**2, + "anisotropy": abs(slope_y - slope_x), + "mean_scale": (slope_y + slope_x) / 2, + } + + +def analyze_residuals(expected: Any, actual: Any, scale_factors: Any) -> dict: + """Analyze residuals after removing estimated scale.""" + # Predicted positions using estimated scale + predicted_y = scale_factors["scale_y"] * expected[:, 0] + scale_factors["offset_y"] + predicted_x = scale_factors["scale_x"] * expected[:, 1] + scale_factors["offset_x"] + + # Residuals + residual_y = actual[:, 0] - predicted_y + residual_x = actual[:, 1] - predicted_x + residual_mag = np.sqrt(residual_y**2 + residual_x**2) + + return { + "residual_y": residual_y, + "residual_x": residual_x, + "residual_magnitude": residual_mag, + "mean_residual": float(np.mean(residual_mag)), + "max_residual": float(np.max(residual_mag)), + "std_residual": float(np.std(residual_mag)), + } + + +def detect_local_distortions(expected: Any, actual: Any, nx: Any, ny: Any) -> dict: + """Check for local distortions (non-linear deformations).""" + diff = actual - expected + + # Reshape to grid + diff_grid_y = diff[:, 0].reshape(nx, ny) + diff_grid_x = diff[:, 1].reshape(nx, ny) + + # Check for gradient (progressive error) + gradient_y = np.gradient(diff_grid_y, axis=0).mean() + gradient_x = np.gradient(diff_grid_x, axis=1).mean() + + # Check for curvature (non-linear distortion) + curvature_y = np.gradient(np.gradient(diff_grid_y, axis=0), axis=0).std() + curvature_x = np.gradient(np.gradient(diff_grid_x, axis=1), axis=1).std() + + return { + "gradient_y": float(gradient_y), + "gradient_x": float(gradient_x), + "curvature_y": float(curvature_y), + "curvature_x": float(curvature_x), + "has_progressive_error": bool(abs(gradient_y) > 0.5 or abs(gradient_x) > 0.5), + "has_curvature": bool(curvature_y > 1.0 or curvature_x > 1.0), + } + + +def generate_report( + _analysis: Any, + scale_factors: Any, + residuals: Any, + distortions: Any, + output_dir: Path, + slice_id: int | None = None, +) -> Path: + """Generate text report.""" + slice_label = f" (Slice {slice_id})" if slice_id else "" + + lines = [ + "=" * 70, + f"TILE DILATION/SCALING ANALYSIS{slice_label}", + "=" * 70, + "", + "SCALE FACTOR ANALYSIS", + "-" * 50, + f"Scale factor Y (rows): {scale_factors['scale_y']:.6f}", + f"Scale factor X (cols): {scale_factors['scale_x']:.6f}", + f"Mean scale factor: {scale_factors['mean_scale']:.6f}", + f"Anisotropy (|Sy - Sx|): {scale_factors['anisotropy']:.6f}", + f"Offset Y: {scale_factors['offset_y']:.2f} px", + f"Offset X: {scale_factors['offset_x']:.2f} px", + f"R² fit quality Y: {scale_factors['r_squared_y']:.6f}", + f"R² fit quality X: {scale_factors['r_squared_x']:.6f}", + "", + "INTERPRETATION", + "-" * 50, + ] + + # Interpret scale factors + scale_deviation = abs(scale_factors["mean_scale"] - 1.0) + if scale_deviation < 0.001: + lines.append("✓ Scale factor ~1.0: No significant dilation detected") + elif scale_factors["mean_scale"] > 1.0: + lines.append(f"⚠ Scale > 1.0: Tiles spread MORE than expected ({scale_deviation * 100:.2f}% expansion)") + lines.append(" → Possible cause: Tissue relaxation/expansion after cutting") + else: + lines.append(f"⚠ Scale < 1.0: Tiles spread LESS than expected ({scale_deviation * 100:.2f}% contraction)") + lines.append(" → Possible cause: Stage calibration error or tissue shrinkage") + + if scale_factors["anisotropy"] > 0.005: + lines.append(f"⚠ Anisotropic scaling detected: X/Y scales differ by {scale_factors['anisotropy']:.4f}") + lines.append(" → May cause edge misalignment in 3D reconstruction") + + lines.extend( + [ + "", + "RESIDUAL ANALYSIS (after scale correction)", + "-" * 50, + f"Mean residual: {residuals['mean_residual']:.2f} px", + f"Max residual: {residuals['max_residual']:.2f} px", + f"Std residual: {residuals['std_residual']:.2f} px", + ] + ) + + lines.extend( + [ + "", + "LOCAL DISTORTION ANALYSIS", + "-" * 50, + f"Progressive error (gradient Y): {distortions['gradient_y']:.4f} px/tile", + f"Progressive error (gradient X): {distortions['gradient_x']:.4f} px/tile", + f"Non-linearity (curvature Y): {distortions['curvature_y']:.4f}", + f"Non-linearity (curvature X): {distortions['curvature_x']:.4f}", + ] + ) + + if distortions["has_progressive_error"]: + lines.append("⚠ Progressive error detected: Position error grows across mosaic") + if distortions["has_curvature"]: + lines.append("⚠ Non-linear distortion detected: Local deformations present") + + lines.extend( + [ + "", + "=" * 70, + ] + ) + + report_path = Path(output_dir) / "dilation_analysis.txt" + with Path(report_path).open("w") as f: + f.write("\n".join(lines)) + + logger.info("Report saved to %s", report_path) + return report_path + + +def generate_plots( + expected: Any, + actual: Any, + residuals: Any, + nx: Any, + ny: Any, + output_dir: Path, + slice_id: int | None = None, +) -> Path: + """Generate visualization plots.""" + fig, axes = plt.subplots(2, 2, figsize=(14, 12)) + + diff = actual - expected + + # 1. Vector field showing displacement + ax1 = axes[0, 0] + ax1.quiver(expected[:, 1], expected[:, 0], diff[:, 1], diff[:, 0], angles="xy", scale_units="xy", scale=1, alpha=0.7) + ax1.scatter(expected[:, 1], expected[:, 0], c="blue", s=20, alpha=0.5, label="Expected") + ax1.scatter(actual[:, 1], actual[:, 0], c="red", s=20, alpha=0.5, label="Actual") + ax1.set_xlabel("X position (pixels)") + ax1.set_ylabel("Y position (pixels)") + ax1.set_title("Tile Displacement Vectors") + ax1.legend() + ax1.invert_yaxis() + + # 2. Displacement magnitude heatmap + ax2 = axes[0, 1] + diff_mag = np.sqrt(diff[:, 0] ** 2 + diff[:, 1] ** 2) + diff_grid = diff_mag.reshape(nx, ny) + im = ax2.imshow(diff_grid, cmap="hot", interpolation="nearest") + ax2.set_xlabel("Tile X index") + ax2.set_ylabel("Tile Y index") + ax2.set_title("Displacement Magnitude (pixels)") + plt.colorbar(im, ax=ax2) + + # 3. Expected vs Actual positions (scatter) + ax3 = axes[1, 0] + ax3.scatter(expected[:, 1], actual[:, 1], alpha=0.5, label="X positions") + ax3.scatter(expected[:, 0], actual[:, 0], alpha=0.5, label="Y positions") + max_val = max(expected.max(), actual.max()) + ax3.plot([0, max_val], [0, max_val], "k--", label="Perfect fit") + ax3.set_xlabel("Expected position (pixels)") + ax3.set_ylabel("Actual position (pixels)") + ax3.set_title("Expected vs Actual Positions") + ax3.legend() + + # 4. Residuals distribution + ax4 = axes[1, 1] + ax4.hist(residuals["residual_magnitude"], bins=30, edgecolor="black", alpha=0.7) + ax4.axvline(residuals["mean_residual"], color="red", linestyle="--", label=f"Mean: {residuals['mean_residual']:.1f} px") + ax4.set_xlabel("Residual magnitude (pixels)") + ax4.set_ylabel("Count") + ax4.set_title("Residual Distribution (after scale correction)") + ax4.legend() + + slice_label = f" (Slice {slice_id})" if slice_id else "" + fig.suptitle(f"Tile Dilation Analysis{slice_label}", fontsize=14) + plt.tight_layout() + + plot_path = Path(output_dir) / "dilation_analysis.png" + plt.savefig(plot_path, dpi=150) + plt.close() + + logger.info("Plots saved to %s", plot_path) + return plot_path + + +def main() -> None: + """Run function.""" + p = _build_arg_parser() + args = p.parse_args() + + input_file = Path(args.input_volume) + transform_file = Path(args.input_transform) + output_dir = Path(args.out_directory) + output_dir.mkdir(parents=True, exist_ok=True) + + # Load mosaic grid to get tile shape + logger.info("Loading mosaic grid metadata from %s", input_file) + volume, _resolution = read_omezarr(Path(input_file), level=0) + tile_shape = volume.chunks + + nx = volume.shape[1] // tile_shape[1] + ny = volume.shape[2] // tile_shape[2] + logger.info("Grid: %s x %s tiles, tile shape: %s", nx, ny, tile_shape) + + # Load transform + transform = np.load(transform_file) + logger.info("Transform matrix:\n%s", transform) + + # Compute positions + expected = compute_expected_positions(nx, ny, tile_shape[1], tile_shape[2], args.overlap_fraction) + actual = compute_actual_positions(nx, ny, transform) + + # Analysis + scale_factors = estimate_scale_factors(expected, actual) + residuals = analyze_residuals(expected, actual, scale_factors) + distortions = detect_local_distortions(expected, actual, nx, ny) + + # Compile full analysis + analysis = { + "slice_id": args.slice_id, + "grid_size": [nx, ny], + "tile_shape": list(tile_shape), + "resolution_um": args.resolution, + "overlap_fraction": args.overlap_fraction, + "scale_factors": scale_factors, + "residuals": {k: v for k, v in residuals.items() if not isinstance(v, np.ndarray)}, + "distortions": distortions, + } + + # Save JSON + json_path = output_dir / "dilation_analysis.json" + with Path(json_path).open("w") as f: + json.dump(analysis, f, indent=2, cls=NumpyEncoder) + logger.info("Analysis JSON saved to %s", json_path) + + # Generate outputs + generate_report(analysis, scale_factors, residuals, distortions, output_dir, args.slice_id) + generate_plots(expected, actual, residuals, nx, ny, output_dir, args.slice_id) + + # Print summary + print("\n" + "=" * 50) + print("SUMMARY") + print("=" * 50) + print(f"Mean scale factor: {scale_factors['mean_scale']:.4f}") + if abs(scale_factors["mean_scale"] - 1.0) > 0.001: + deviation = (scale_factors["mean_scale"] - 1.0) * 100 + direction = "expansion" if deviation > 0 else "contraction" + print(f"⚠ {abs(deviation):.2f}% {direction} detected") + else: + print("✓ No significant dilation") + + if scale_factors["anisotropy"] > 0.005: + print(f"⚠ Anisotropic scaling: X/Y differ by {scale_factors['anisotropy']:.4f}") + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_benchmark_gpu.py b/scripts/diagnostics/linum_benchmark_gpu.py new file mode 100644 index 00000000..71d0e094 --- /dev/null +++ b/scripts/diagnostics/linum_benchmark_gpu.py @@ -0,0 +1,507 @@ +#!/usr/bin/env python3 +""" +Benchmark GPU vs CPU performance for linumpy operations. + +This script tests all GPU-accelerated operations and compares their +performance against CPU implementations. It also verifies that results +are numerically equivalent. + +Usage: + # Quick benchmark with synthetic data + linum_benchmark_gpu.py + + # Benchmark with real data + linum_benchmark_gpu.py --input /path/to/mosaic.ome.zarr + + # Full benchmark with multiple sizes + linum_benchmark_gpu.py --full + + # Save results to file + linum_benchmark_gpu.py --output benchmark_results.json +""" + +# Configure thread limits before numpy/scipy imports +import linumpy.config.threads # noqa: F401 + +import argparse +import contextlib +import json +import sys +import time +from datetime import datetime +from pathlib import Path +from typing import Any + +import numpy as np + +# Import GPU module +from linumpy.gpu import GPU_AVAILABLE, gpu_info, print_gpu_info + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("--input", type=str, help="Path to OME-Zarr file for real-data benchmark") + p.add_argument("--output", "-o", type=str, help="Save results to JSON file") + p.add_argument("--iterations", "-n", type=int, default=3, help="Number of iterations per test [%(default)s]") + p.add_argument("--full", action="store_true", help="Run full benchmark with multiple sizes") + p.add_argument("--skip-correctness", action="store_true", help="Skip result correctness checks") + p.add_argument("--sizes", nargs="+", type=int, default=[512, 1024, 2048], help="Image sizes to test [%(default)s]") + p.add_argument("--select-best-gpu", action="store_true", help="Automatically select GPU with most free memory") + p.add_argument("--gpu", type=int, metavar="ID", help="Select specific GPU by ID") + return p + + +class BenchmarkTimer: + """Context manager for timing operations.""" + + def __init__(self) -> None: + self.elapsed = 0 + + def __enter__(self) -> "BenchmarkTimer": + """Magic method function.""" + self.start = time.perf_counter() + return self + + def __exit__(self, *args: Any) -> None: + """Magic method function.""" + self.elapsed = time.perf_counter() - self.start + + +def benchmark_operation( + func_cpu: Any, + func_gpu: Any, + data: Any, + name: str, + iterations: int = 3, + check_correctness: bool = True, +) -> dict[str, Any]: + """ + Benchmark a single operation comparing CPU and GPU. + + Returns dict with timing results. + """ + results: dict[str, Any] = { + "name": name, + "cpu_times": [], + "gpu_times": [], + "correct": None, + "max_diff": None, + } + + result_cpu: Any = None + result_gpu: Any = None + + # Warmup GPU + if GPU_AVAILABLE: + with contextlib.suppress(Exception): + _ = func_gpu(data) + + # CPU benchmark + for _ in range(iterations): + with BenchmarkTimer() as t: + result_cpu = func_cpu(data) + results["cpu_times"].append(t.elapsed) + + # GPU benchmark + if GPU_AVAILABLE: + for _ in range(iterations): + with BenchmarkTimer() as t: + result_gpu = func_gpu(data) + results["gpu_times"].append(t.elapsed) + + # Check correctness + if check_correctness and result_cpu is not None and result_gpu is not None: + try: + if isinstance(result_cpu, tuple): + result_cpu = result_cpu[0] + result_gpu = result_gpu[0] + + cpu_arr = np.asarray(result_cpu) + gpu_arr = np.asarray(result_gpu) + + # Handle complex arrays by comparing magnitudes + if np.iscomplexobj(cpu_arr) or np.iscomplexobj(gpu_arr): + cpu_arr = np.abs(cpu_arr) + gpu_arr = np.abs(gpu_arr) + + cpu_arr = cpu_arr.astype(np.float64) + gpu_arr = gpu_arr.astype(np.float64) + + # Compute various error metrics + abs_diff = np.abs(cpu_arr - gpu_arr) + max_diff = np.max(abs_diff) + mean_diff = np.mean(abs_diff) + + # Relative error (avoid division by zero) + cpu_max = np.max(np.abs(cpu_arr)) + rel_error = max_diff / cpu_max if cpu_max > 1e-10 else max_diff + + results["max_diff"] = float(max_diff) + results["mean_diff"] = float(mean_diff) + results["rel_error"] = float(rel_error) + + # Use relative tolerance for correctness check + # 1e-5 relative error is acceptable for float32 operations + results["correct"] = rel_error < 1e-4 + except Exception as e: + results["correct"] = f"Check failed: {e}" + else: + results["gpu_times"] = [float("nan")] * iterations + results["correct"] = "GPU not available" + + # Compute statistics + results["cpu_mean"] = np.mean(results["cpu_times"]) + results["cpu_std"] = np.std(results["cpu_times"]) + results["gpu_mean"] = np.mean(results["gpu_times"]) if GPU_AVAILABLE else float("nan") + results["gpu_std"] = np.std(results["gpu_times"]) if GPU_AVAILABLE else float("nan") + results["speedup"] = results["cpu_mean"] / results["gpu_mean"] if GPU_AVAILABLE else float("nan") + + return results + + +def benchmark_fft(size: int, iterations: int = 3, check_correctness: bool = True) -> Any: + """Benchmark FFT operations.""" + from linumpy.gpu.fft_ops import fft2 + + data = np.random.rand(size, size).astype(np.float32) + + def cpu_fft(d: Any) -> Any: + return np.fft.fft2(d) + + def gpu_fft(d: Any) -> Any: + return fft2(d, use_gpu=True) + + return benchmark_operation(cpu_fft, gpu_fft, data, f"FFT2 ({size}x{size})", iterations, check_correctness) + + +def benchmark_phase_correlation(size: int, iterations: int = 3, _check_correctness: bool = True) -> Any: + """Benchmark phase correlation.""" + from linumpy.gpu.fft_ops import phase_correlation + from linumpy.registration.transforms import pair_wise_phase_correlation + + # Create two slightly shifted images + img1 = np.random.rand(size, size).astype(np.float32) + img2 = np.roll(img1, (5, 10), axis=(0, 1)) + + def cpu_pc(d: Any) -> Any: + return pair_wise_phase_correlation(d[0], d[1], return_cc=True) + + def gpu_pc(d: Any) -> Any: + return phase_correlation(d[0], d[1], use_gpu=True) + + data = (img1, img2) + + return benchmark_operation( + cpu_pc, gpu_pc, data, f"Phase Correlation ({size}x{size})", iterations, check_correctness=False + ) # Results may differ slightly + + +def benchmark_gaussian_filter(size: int, iterations: int = 3, check_correctness: bool = True) -> Any: + """Benchmark Gaussian filtering.""" + from scipy.ndimage import gaussian_filter as scipy_gaussian + + from linumpy.gpu.morphology import gaussian_filter + + data = np.random.rand(size, size).astype(np.float32) + sigma = 2.0 + + def cpu_gauss(d: Any) -> Any: + return scipy_gaussian(d, sigma=sigma) + + def gpu_gauss(d: Any) -> Any: + return gaussian_filter(d, sigma=sigma, use_gpu=True) + + return benchmark_operation(cpu_gauss, gpu_gauss, data, f"Gaussian Filter ({size}x{size})", iterations, check_correctness) + + +def benchmark_binary_closing(size: int, iterations: int = 3, check_correctness: bool = True) -> Any: + """Benchmark binary morphology.""" + from scipy.ndimage import binary_closing as scipy_closing + + from linumpy.gpu.morphology import binary_closing + + # Create random binary mask + data = (np.random.rand(size, size) > 0.5).astype(np.bool_) + + def cpu_close(d: Any) -> Any: + return scipy_closing(d, iterations=2) + + def gpu_close(d: Any) -> Any: + return binary_closing(d, iterations=2, use_gpu=True) + + return benchmark_operation(cpu_close, gpu_close, data, f"Binary Closing ({size}x{size})", iterations, check_correctness) + + +def benchmark_resize(size: int, iterations: int = 3, check_correctness: bool = True) -> Any: + """Benchmark image resize.""" + from linumpy.gpu.interpolation import resize + + data = np.random.rand(size, size).astype(np.float32) + output_size = (size // 2, size // 2) + + def cpu_resize(d: Any) -> Any: + # Use the same function with use_gpu=False for fair comparison + return resize(d, output_size, order=1, anti_aliasing=False, use_gpu=False) + + def gpu_resize(d: Any) -> Any: + return resize(d, output_size, order=1, anti_aliasing=False, use_gpu=True) + + return benchmark_operation(cpu_resize, gpu_resize, data, f"Resize ({size}→{size // 2})", iterations, check_correctness) + + +def benchmark_rescale_3d(size: int, iterations: int = 3, check_correctness: bool = True) -> Any: + """Benchmark 3D volume rescaling (like linum_resample_mosaic_grid).""" + from linumpy.gpu.interpolation import resize + + # Create 3D volume (typical OCT tile: depth x height x width) + depth = size // 4 # Typically depth is smaller than XY + data = np.random.rand(depth, size, size).astype(np.float32) + + # Rescale by 0.5 in each dimension (typical downsampling) + scale_factor = 0.5 + output_size = (int(depth * scale_factor), int(size * scale_factor), int(size * scale_factor)) + + def cpu_rescale(d: Any) -> Any: + return resize(d, output_size, order=1, anti_aliasing=True, use_gpu=False) + + def gpu_rescale(d: Any) -> Any: + return resize(d, output_size, order=1, anti_aliasing=True, use_gpu=True) + + return benchmark_operation( + cpu_rescale, + gpu_rescale, + data, + f"Rescale 3D ({depth}x{size}x{size}→{output_size[0]}x{output_size[1]}x{output_size[2]})", + iterations, + check_correctness, + ) + + +def benchmark_normalize(size: int, iterations: int = 3, check_correctness: bool = True) -> Any: + """Benchmark percentile normalization.""" + from linumpy.gpu.array_ops import normalize_percentile + + data = np.random.rand(size, size).astype(np.float32) * 1000 + + def cpu_norm(d: Any) -> Any: + low, high = np.percentile(d, [1, 99]) + return np.clip((d - low) / (high - low), 0, 1) + + def gpu_norm(d: Any) -> Any: + return normalize_percentile(d, p_low=1, p_high=99, use_gpu=True) + + return benchmark_operation(cpu_norm, gpu_norm, data, f"Normalize ({size}x{size})", iterations, check_correctness) + + +def benchmark_intensity_normalization(size: int, iterations: int = 3, check_correctness: bool = True) -> Any: + """Benchmark intensity normalization operations.""" + from linumpy.gpu.array_ops import normalize_percentile, threshold_otsu + from linumpy.gpu.morphology import gaussian_filter + + data = np.random.rand(size, size).astype(np.float32) * 1000 + + def cpu_norm(d: Any) -> Any: + # Simulate intensity normalization operations + from scipy.ndimage import gaussian_filter as scipy_gaussian + + smoothed = scipy_gaussian(d, sigma=1.0) + from skimage.filters import threshold_otsu as sk_otsu + + threshold = sk_otsu(smoothed) + _mask = smoothed > threshold + low, high = np.percentile(smoothed, [1, 99]) + normalized = np.clip((smoothed - low) / (high - low), 0, 1) + return normalized + + def gpu_norm(d: Any) -> Any: + # Simulate intensity normalization operations + smoothed = gaussian_filter(d, sigma=1.0, use_gpu=True) + threshold = threshold_otsu(smoothed, use_gpu=True) + _mask = smoothed > threshold + normalized = normalize_percentile(smoothed, p_low=1, p_high=99, use_gpu=True) + return normalized + + return benchmark_operation( + cpu_norm, gpu_norm, data, f"Intensity Normalization ({size}x{size})", iterations, check_correctness + ) + + +def benchmark_real_data(input_path: Path, iterations: int = 3) -> Any: + """Benchmark with real OME-Zarr data.""" + from linumpy.gpu.morphology import gaussian_filter + from linumpy.io.zarr import read_omezarr + + print(f"\nLoading real data from: {input_path}") + vol, _res = read_omezarr(input_path, level=0) + + # Load a manageable chunk + chunk_size = min(100, vol.shape[0]) + data = np.array(vol[:chunk_size]) + print(f"Loaded chunk shape: {data.shape}") + + results = [] + + # Gaussian on real data AIP + aip = np.mean(data, axis=0).astype(np.float32) + from scipy.ndimage import gaussian_filter as scipy_gaussian + + def cpu_gauss(d: Any) -> Any: + return scipy_gaussian(d, sigma=2.0) + + def gpu_gauss(d: Any) -> Any: + return gaussian_filter(d, sigma=2.0, use_gpu=True) + + results.append(benchmark_operation(cpu_gauss, gpu_gauss, aip, f"Real Data Gaussian {aip.shape}", iterations)) + + return results + + +def print_results(all_results: Any, gpu_info_dict: Any) -> None: + """Print formatted benchmark results.""" + print("\n" + "=" * 90) + print("BENCHMARK RESULTS") + print("=" * 90) + print(f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + print(f"GPU: {gpu_info_dict['device_name']}") + print(f"GPU Memory: {gpu_info_dict['memory_gb']:.1f} GB") + print(f"GPU Available: {gpu_info_dict['gpu_available']}") + print("=" * 90) + + print(f"\n{'Operation':<40} {'CPU (ms)':<12} {'GPU (ms)':<12} {'Speedup':<10} {'Correct':<8} {'Rel Err':<12}") + print("-" * 94) + + for result in all_results: + cpu_ms = result["cpu_mean"] * 1000 + gpu_ms = result["gpu_mean"] * 1000 if not np.isnan(result["gpu_mean"]) else float("nan") + speedup = result["speedup"] if not np.isnan(result["speedup"]) else float("nan") + correct = "✓" if result.get("correct") is True else ("N/A" if result.get("correct") is None else "✗") + rel_err = result.get("rel_error", float("nan")) + + if np.isnan(gpu_ms): + print(f"{result['name']:<40} {cpu_ms:>10.2f} {'N/A':^10} {'N/A':^8} {correct:^6} {'N/A':^10}") + else: + rel_err_str = f"{rel_err:.2e}" if not np.isnan(rel_err) else "N/A" + print( + f"{result['name']:<40} {cpu_ms:>10.2f} {gpu_ms:>10.2f} {speedup:>7.1f}x {correct:^6} {rel_err_str:>10}" + ) + + print("-" * 94) + + # Summary statistics + valid_speedups = [r["speedup"] for r in all_results if not np.isnan(r["speedup"])] + if valid_speedups: + print(f"\nAverage speedup: {np.mean(valid_speedups):.1f}x") + print(f"Max speedup: {np.max(valid_speedups):.1f}x") + print(f"Min speedup: {np.min(valid_speedups):.1f}x") + + +def main() -> None: + """Run function.""" + parser = _build_arg_parser() + args = parser.parse_args() + + # Handle GPU selection + if args.select_best_gpu: + from linumpy.gpu import select_best_gpu + + select_best_gpu(verbose=True) + print() + elif args.gpu is not None: + from linumpy.gpu import select_gpu + + select_gpu(args.gpu, verbose=True) + print() + + # Print GPU info + print_gpu_info() + info = gpu_info() + + if not info["gpu_available"]: + print("\n⚠️ WARNING: GPU not available. Only CPU benchmarks will run.\n") + + all_results = [] + iterations = args.iterations + check_correctness = not args.skip_correctness + + # Synthetic data benchmarks + sizes = args.sizes if args.full else [args.sizes[0]] + + print(f"\nRunning benchmarks with {iterations} iterations per test...") + print(f"Testing sizes: {sizes}") + + for size in sizes: + print(f"\n--- Size: {size}x{size} ---") + + # FFT + print(" Testing FFT...", end=" ", flush=True) + all_results.append(benchmark_fft(size, iterations, check_correctness)) + print("done") + + # Phase correlation + print(" Testing Phase Correlation...", end=" ", flush=True) + all_results.append(benchmark_phase_correlation(size, iterations, check_correctness)) + print("done") + + # Gaussian filter + print(" Testing Gaussian Filter...", end=" ", flush=True) + all_results.append(benchmark_gaussian_filter(size, iterations, check_correctness)) + print("done") + + # Binary closing + print(" Testing Binary Closing...", end=" ", flush=True) + all_results.append(benchmark_binary_closing(size, iterations, check_correctness)) + print("done") + + # Resize + print(" Testing Resize...", end=" ", flush=True) + all_results.append(benchmark_resize(size, iterations, check_correctness)) + print("done") + + # Rescale 3D (like linum_resample_mosaic_grid_gpu) + print(" Testing Rescale 3D...", end=" ", flush=True) + all_results.append(benchmark_rescale_3d(size, iterations, check_correctness)) + print("done") + + # Normalize + print(" Testing Normalize...", end=" ", flush=True) + all_results.append(benchmark_normalize(size, iterations, check_correctness)) + print("done") + + # Intensity normalization + print(" Testing Intensity Normalization...", end=" ", flush=True) + all_results.append(benchmark_intensity_normalization(size, iterations, check_correctness)) + print("done") + + # Real data benchmark + if args.input: + print("\n--- Real Data Benchmark ---") + real_results = benchmark_real_data(args.input, iterations) + all_results.extend(real_results) + + # Print results + print_results(all_results, info) + + # Save results + if args.output: + output_data = { + "timestamp": datetime.now().isoformat(), + "gpu_info": info, + "parameters": { + "iterations": iterations, + "sizes": sizes, + "input_file": args.input, + }, + "results": all_results, + } + + with Path(args.output).open("w", encoding="utf-8") as f: + json.dump(output_data, f, indent=2, default=str) + + print(f"\nResults saved to: {args.output}") + + # Return exit code based on GPU availability + sys.exit(0 if info["gpu_available"] else 1) + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_compare_stitching.py b/scripts/diagnostics/linum_compare_stitching.py new file mode 100644 index 00000000..e5d7c386 --- /dev/null +++ b/scripts/diagnostics/linum_compare_stitching.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +"""Compare stitching results between different methods.""" + +import linumpy.config.threads # noqa: F401 + +import argparse +import json +from pathlib import Path +from typing import Any + +import matplotlib.pyplot as plt +import numpy as np +from scipy.ndimage import sobel + +from linumpy.io.zarr import read_omezarr + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("volume1", help="First stitched volume (.ome.zarr)") + p.add_argument("volume2", help="Second stitched volume (.ome.zarr)") + p.add_argument("output_dir", help="Output directory for comparison results") + p.add_argument("--label1", type=str, default="Motor-only", help="Label for first volume [%(default)s]") + p.add_argument("--label2", type=str, default="Refined", help="Label for second volume [%(default)s]") + p.add_argument("--z_slice", type=int, default=None, help="Z-slice to compare. Uses middle slice when None.") + p.add_argument("--tile_step", type=int, default=60, help="Approximate tile step for seam detection [%(default)s]") + return p + + +def compute_seam_sharpness(slice_data: Any, seam_positions: Any, width: int = 50, direction: str = "vertical") -> Any: + """Compute edge sharpness at seams (lower = smoother blending).""" + grad = sobel(slice_data, axis=1 if direction == "vertical" else 0) + values = [] + hw = width // 2 + for pos in seam_positions: + if direction == "vertical": + start = max(0, pos - hw) + end = min(slice_data.shape[1], pos + hw) + region = grad[:, start:end] + else: + start = max(0, pos - hw) + end = min(slice_data.shape[0], pos + hw) + region = grad[start:end, :] + values.append(np.mean(np.abs(region))) + return np.mean(values) if values else 0, np.std(values) if values else 0 + + +def main() -> None: + """Run function.""" + p = _build_arg_parser() + args = p.parse_args() + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + print(f"Loading {args.label1}: {args.volume1}") + vol1, _ = read_omezarr(args.volume1, level=0) + vol1 = np.array(vol1[:]) + + print(f"Loading {args.label2}: {args.volume2}") + vol2, _ = read_omezarr(args.volume2, level=0) + vol2 = np.array(vol2[:]) + + # Crop to common shape + min_shape = tuple(min(vol1.shape[i], vol2.shape[i]) for i in range(3)) + vol1 = vol1[: min_shape[0], : min_shape[1], : min_shape[2]] + vol2 = vol2[: min_shape[0], : min_shape[1], : min_shape[2]] + + z_idx = args.z_slice if args.z_slice is not None else vol1.shape[0] // 2 + s1, s2 = vol1[z_idx], vol2[z_idx] + + print(f"Comparing z-slice {z_idx}") + + # Detect seam positions + v_seams = list(range(args.tile_step, s1.shape[1] - 30, args.tile_step)) + h_seams = list(range(args.tile_step, s1.shape[0] - 30, args.tile_step)) + + # Compute metrics + m1_v, _ = compute_seam_sharpness(s1, v_seams, 50, "vertical") + m1_h, _ = compute_seam_sharpness(s1, h_seams, 50, "horizontal") + m2_v, _ = compute_seam_sharpness(s2, v_seams, 50, "vertical") + m2_h, _ = compute_seam_sharpness(s2, h_seams, 50, "horizontal") + + diff = np.abs(s1.astype(float) - s2.astype(float)) + + # Create comparison figure + _fig, axes = plt.subplots(2, 3, figsize=(15, 10)) + vmin = min(s1.min(), s2.min()) + vmax = max(np.percentile(s1, 99), np.percentile(s2, 99)) + + axes[0, 0].imshow(s1, cmap="gray", vmin=vmin, vmax=vmax) + axes[0, 0].set_title(args.label1) + axes[0, 0].axis("off") + + axes[0, 1].imshow(s2, cmap="gray", vmin=vmin, vmax=vmax) + axes[0, 1].set_title(args.label2) + axes[0, 1].axis("off") + + im = axes[0, 2].imshow(diff, cmap="hot", vmin=0, vmax=np.percentile(diff, 99)) + axes[0, 2].set_title("Absolute Difference") + axes[0, 2].axis("off") + plt.colorbar(im, ax=axes[0, 2], fraction=0.046) + + # Zoomed region + cy, cx = s1.shape[0] // 2, s1.shape[1] // 2 + z_size = 100 + axes[1, 0].imshow(s1[cy - z_size : cy + z_size, cx - z_size : cx + z_size], cmap="gray", vmin=vmin, vmax=vmax) + axes[1, 0].set_title(f"{args.label1} (zoom)") + axes[1, 0].axis("off") + + axes[1, 1].imshow(s2[cy - z_size : cy + z_size, cx - z_size : cx + z_size], cmap="gray", vmin=vmin, vmax=vmax) + axes[1, 1].set_title(f"{args.label2} (zoom)") + axes[1, 1].axis("off") + + axes[1, 2].imshow(diff[cy - z_size : cy + z_size, cx - z_size : cx + z_size], cmap="hot") + axes[1, 2].set_title("Difference (zoom)") + axes[1, 2].axis("off") + + plt.tight_layout() + plt.savefig(output_dir / "comparison.png", dpi=150, bbox_inches="tight") + plt.close() + + # Save metrics + metrics = { + "z_slice": z_idx, + "tile_step": args.tile_step, + args.label1: {"vertical_seam_sharpness": float(m1_v), "horizontal_seam_sharpness": float(m1_h)}, + args.label2: {"vertical_seam_sharpness": float(m2_v), "horizontal_seam_sharpness": float(m2_h)}, + "difference": {"mean": float(np.mean(diff)), "max": float(np.max(diff)), "std": float(np.std(diff))}, + } + + with Path(output_dir / "metrics.json").open("w") as f: + json.dump(metrics, f, indent=2) + + # Print summary + print("\n" + "=" * 50) + print("COMPARISON SUMMARY") + print("=" * 50) + print("\nSeam Sharpness (lower = smoother blending):") + print(f" {args.label1}:") + print(f" Vertical seams: {m1_v:.4f}") + print(f" Horizontal seams: {m1_h:.4f}") + print(f" {args.label2}:") + print(f" Vertical seams: {m2_v:.4f}") + print(f" Horizontal seams: {m2_h:.4f}") + print(f"\nDifference ({args.label1} vs {args.label2}):") + print(f" Mean: {np.mean(diff):.4f}") + print(f" Max: {np.max(diff):.4f}") + print(f"\nOutputs saved to: {output_dir}") + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_diagnose_pipeline.py b/scripts/diagnostics/linum_diagnose_pipeline.py new file mode 100644 index 00000000..b1f2ed7d --- /dev/null +++ b/scripts/diagnostics/linum_diagnose_pipeline.py @@ -0,0 +1,872 @@ +#!/usr/bin/env python3 +""" +Diagnostic script for linumpy 3D reconstruction pipeline performance. + +This script checks the server configuration to identify bottlenecks: +- CPU core detection and thread configuration +- GPU availability and CUDA setup (CuPy for linumpy GPU ops; PyTorch for BaSiCPy) +- Memory availability +- Nextflow parameter recommendations +- Performance baseline tests + +Usage: + linum_diagnose_pipeline.py # Full diagnostic + linum_diagnose_pipeline.py --quick # Quick system check only + linum_diagnose_pipeline.py --benchmark # Include performance benchmarks + linum_diagnose_pipeline.py --debug-cuda # Detailed CUDA library debugging + linum_diagnose_pipeline.py --verbose # Show full error tracebacks + linum_diagnose_pipeline.py --output report.txt # Save results to file +""" + +import argparse +import glob +import json +import multiprocessing +import os +import subprocess +import sys +import time +from datetime import datetime +from typing import Any + + +def get_terminal_width() -> Any: + """Get terminal width for formatting.""" + try: + return os.get_terminal_size().columns + except OSError: + return 80 + + +def print_header(title: str) -> None: + """Print a section header.""" + width = get_terminal_width() + print("\n" + "=" * width) + print(f" {title}") + print("=" * width) + + +def print_subheader(title: str) -> None: + """Print a subsection header.""" + print(f"\n--- {title} ---") + + +class SystemDiagnostics: + """System diagnostics collector.""" + + def __init__(self, verbose: bool = False) -> None: + self.verbose = verbose + self.results: dict[str, Any] = { + "timestamp": datetime.now().isoformat(), + "cpu": {}, + "memory": {}, + "gpu": {}, + "python": {}, + "nextflow": {}, + "linumpy": {}, + "issues": [], + "recommendations": [], + } + + def check_cpu(self) -> Any: + """Check CPU configuration.""" + print_header("CPU Configuration") + + # Physical CPU info + total_cpus = multiprocessing.cpu_count() + self.results["cpu"]["total_cores"] = total_cpus + print(f" Total CPU cores detected: {total_cpus}") + + # Check environment variables that control threading + print_subheader("Thread Environment Variables") + thread_vars = [ + "OMP_NUM_THREADS", + "MKL_NUM_THREADS", + "OPENBLAS_NUM_THREADS", + "NUMEXPR_NUM_THREADS", + "NUMBA_NUM_THREADS", + "ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS", + "LINUMPY_MAX_CPUS", + "LINUMPY_RESERVED_CPUS", + ] + + for var in thread_vars: + value = os.environ.get(var, "(not set)") + self.results["cpu"][var] = value + print(f" {var}: {value}") + + return total_cpus + + def check_memory(self) -> Any: + """Check memory availability.""" + print_header("Memory Configuration") + + try: + import psutil + + mem = psutil.virtual_memory() + total_gb = mem.total / (1024**3) + available_gb = mem.available / (1024**3) + + self.results["memory"]["total_gb"] = round(total_gb, 1) + self.results["memory"]["available_gb"] = round(available_gb, 1) + self.results["memory"]["percent_used"] = mem.percent + + print(f" Total RAM: {total_gb:.1f} GB") + print(f" Available RAM: {available_gb:.1f} GB") + print(f" Memory usage: {mem.percent}%") + + if available_gb < 16: + print(" ⚠️ Low available memory - may cause swapping") + self.results["issues"].append(f"Low available memory: {available_gb:.1f} GB") + + return total_gb, available_gb + + except ImportError: + print(" ⚠️ psutil not installed - cannot check memory") + print(" Install with: pip install psutil") + self.results["memory"]["error"] = "psutil not installed" + return None, None + + def check_gpu(self) -> None: + """Check GPU configuration and CUDA availability.""" + print_header("GPU Configuration") + + # Check nvidia-smi + print_subheader("NVIDIA Driver") + try: + simple_result = subprocess.run(["nvidia-smi", "-L"], capture_output=True, text=True, timeout=30) + if simple_result.returncode == 0 and simple_result.stdout.strip(): + result = subprocess.run( + [ + "nvidia-smi", + "--query-gpu=name,memory.total,memory.free,driver_version,cuda_version", + "--format=csv,noheader,nounits", + ], + capture_output=True, + text=True, + timeout=30, + ) + if result.returncode == 0 and result.stdout.strip(): + gpus = [] + for i, line in enumerate(result.stdout.strip().split("\n")): + if not line.strip(): + continue + parts = [p.strip() for p in line.split(",")] + if len(parts) >= 5: + try: + gpu_info = { + "id": i, + "name": parts[0], + "memory_total_mb": int(float(parts[1])), + "memory_free_mb": int(float(parts[2])), + "driver_version": parts[3], + "cuda_version": parts[4], + } + gpus.append(gpu_info) + print(f" GPU {i}: {parts[0]}") + print( + f" Memory: {int(float(parts[1])) / 1024:.1f} GB total, " + f"{int(float(parts[2])) / 1024:.1f} GB free" + ) + print(f" Driver: {parts[3]}, CUDA: {parts[4]}") + except (ValueError, IndexError): + print(f" ⚠️ Could not parse GPU {i} info: {line}") + + self.results["gpu"]["available"] = len(gpus) > 0 + self.results["gpu"]["devices"] = gpus + if gpus: + print(f" ✅ Found {len(gpus)} GPU(s)") + else: + gpus = [] + for line in simple_result.stdout.strip().split("\n"): + if line.startswith("GPU "): + gpus.append({"name": line}) + print(f" {line}") + self.results["gpu"]["available"] = len(gpus) > 0 + self.results["gpu"]["devices"] = gpus + if gpus: + print(f" ✅ Found {len(gpus)} GPU(s)") + else: + if simple_result.stderr: + print(f" ⚠️ nvidia-smi error: {simple_result.stderr.strip()[:100]}") + else: + print(" ⚠️ nvidia-smi returned no output") + self.results["gpu"]["available"] = False + + except FileNotFoundError: + print(" ⚠️ nvidia-smi not found - no NVIDIA driver installed") + self.results["gpu"]["available"] = False + except subprocess.TimeoutExpired: + print(" ⚠️ nvidia-smi timed out - GPU may be busy or stuck") + self.results["gpu"]["available"] = False + except Exception as e: + print(f" ⚠️ nvidia-smi error: {e}") + self.results["gpu"]["available"] = False + + # Check CuPy + print_subheader("CuPy (GPU Python)") + self._check_cupy() + + # Check linumpy GPU module + print_subheader("Linumpy GPU Module") + self._check_linumpy_gpu() + + def _check_cupy(self) -> None: + """Check CuPy GPU support.""" + try: + import cupy as cp + + print(f" ✅ CuPy version: {cp.__version__}") + self.results["gpu"]["cupy_version"] = cp.__version__ + + try: + n_devices = cp.cuda.runtime.getDeviceCount() + print(f" ✅ CUDA devices available: {n_devices}") + + for i in range(n_devices): + with cp.cuda.Device(i): + free, total = cp.cuda.runtime.memGetInfo() + print(f" Device {i}: {free / (1024**3):.1f} GB free / {total / (1024**3):.1f} GB total") + + test_array = cp.random.rand(1000, 1000) + _ = cp.fft.fft2(test_array) + cp.cuda.Stream.null.synchronize() + print(" ✅ GPU computation test passed") + self.results["gpu"]["cupy_working"] = True + + except Exception as e: + print(f" ⚠️ CuPy CUDA error: {e}") + self.results["gpu"]["cupy_working"] = False + self.results["issues"].append(f"CuPy CUDA error: {e}") + + except ImportError: + print(" ⚠️ CuPy not installed - GPU acceleration disabled for linumpy") + print(" Install with: pip install cupy-cuda12x (or appropriate CUDA version)") + self.results["gpu"]["cupy_version"] = None + self.results["issues"].append("CuPy not installed - linumpy GPU acceleration disabled") + + def _check_linumpy_gpu(self) -> None: + """Check linumpy GPU module.""" + try: + from linumpy.gpu import GPU_AVAILABLE, GPU_DEVICE_NAME, GPU_MEMORY_GB + + print(f" GPU_AVAILABLE: {GPU_AVAILABLE}") + if GPU_AVAILABLE: + print(f" GPU_DEVICE_NAME: {GPU_DEVICE_NAME}") + print(f" GPU_MEMORY_GB: {GPU_MEMORY_GB:.1f}") + self.results["gpu"]["linumpy_gpu_available"] = GPU_AVAILABLE + except ImportError as e: + print(f" ⚠️ Cannot import linumpy.gpu: {e}") + self.results["gpu"]["linumpy_gpu_available"] = False + except Exception as e: + print(f" ⚠️ Error checking linumpy.gpu: {e}") + self.results["gpu"]["linumpy_gpu_available"] = False + + def check_python_packages(self) -> None: + """Check critical Python packages.""" + print_header("Python Environment") + + print(f" Python version: {sys.version}") + self.results["python"]["version"] = sys.version + + print_subheader("Critical Packages") + + packages = [ + ("numpy", "numpy"), + ("scipy", "scipy"), + ("basicpy", "basicpy"), + ("torch", "torch"), + ("pqdm", "pqdm"), + ("dask", "dask"), + ("zarr", "zarr"), + ("threadpoolctl", "threadpoolctl"), + ("psutil", "psutil"), + ] + + for name, import_name in packages: + try: + mod = __import__(import_name) + version = getattr(mod, "__version__", "unknown") + print(f" ✅ {name}: {version}") + self.results["python"][name] = version + except ImportError: + print(f" ❌ {name}: NOT INSTALLED") + self.results["python"][name] = None + if name in ["basicpy", "pqdm"]: + self.results["issues"].append(f"{name} not installed - required for pipeline") + + # Check numpy BLAS backend + print_subheader("NumPy BLAS Configuration") + try: + import numpy as np + + try: + blas_info = np.show_config(mode="dicts") + if blas_info and "Build Dependencies" in blas_info: + blas = blas_info.get("Build Dependencies", {}).get("blas", {}) + print(f" BLAS: {blas.get('name', 'unknown')}") + self.results["python"]["blas"] = blas.get("name", "unknown") + except Exception: + print(" (Could not determine BLAS configuration)") + except Exception: + print(" (Could not check BLAS)") + + def check_nextflow_config(self) -> None: + """Check Nextflow configuration recommendations.""" + print_header("Nextflow Configuration") + + total_cpus = self.results["cpu"]["total_cores"] + total_memory = self.results["memory"].get("total_gb", 0) + + print_subheader("Current Environment") + + nf_process_name = os.environ.get("NXF_TASK_NAME", "Not in Nextflow process") + print(f" NXF_TASK_NAME: {nf_process_name}") + + try: + result = subprocess.run(["nextflow", "-version"], capture_output=True, text=True, timeout=10) + if result.returncode == 0: + version_line = result.stdout.strip().split("\n")[0] + print(f" Nextflow: {version_line}") + self.results["nextflow"]["installed"] = True + else: + print(" Nextflow: installed but version check failed") + self.results["nextflow"]["installed"] = True + except FileNotFoundError: + print(" Nextflow: not found in PATH") + self.results["nextflow"]["installed"] = False + except Exception: + print(" Nextflow: could not check version") + + print_subheader("Suggested Nextflow Parameters") + + reserved_cpus = max(2, total_cpus // 12) + available_cpus = total_cpus - reserved_cpus + suggested_processes = min(16, max(1, available_cpus // 3)) + + suggestions = { + "params.processes": suggested_processes, + "params.enable_cpu_limits": True, + "params.reserved_cpus": reserved_cpus, + "params.use_gpu": self.results["gpu"].get("available", False), + } + + for param, value in suggestions.items(): + print(f" {param} = {value}") + + self.results["nextflow"]["suggestions"] = suggestions + + print_subheader("Notes") + print(f" • With {total_cpus} CPU cores, you can run up to {suggested_processes} parallel processes") + print(" • fix_illumination step uses BaSiC algorithm") + print(" • Each BaSiC process typically uses ~3 CPU threads") + if total_memory: + print(f" • With {total_memory:.0f} GB RAM, memory should not be a bottleneck") + + def run_benchmarks(self) -> None: + """Run performance benchmarks.""" + print_header("Performance Benchmarks") + + print_subheader("BaSiC Algorithm (fix_illumination bottleneck)") + self._run_basic_benchmark() + + print_subheader("Parallel Processing (pqdm)") + self._run_pqdm_benchmark() + + if self.results["gpu"].get("cupy_working"): + print_subheader("GPU Performance") + self._run_gpu_benchmark() + + def _run_basic_benchmark(self) -> None: + """Run BaSiC benchmark (BaSiCPy 2.0+ uses PyTorch backend).""" + benchmark_code = """ +import sys +import time +import numpy as np + +try: + import torch + has_cuda = torch.cuda.is_available() + mode = "GPU" if has_cuda else "CPU" + print(f"DEBUG_TORCH_MODE:{mode}") + if has_cuda: + print(f"DEBUG_TORCH_DEVICE:{torch.cuda.get_device_name(0)}") + + from basicpy import BaSiC + tiles = np.random.rand(16, 256, 256).astype(np.float32) * 1000 + + start = time.perf_counter() + optimizer = BaSiC(get_darkfield=False, max_iterations=100) + optimizer.fit(tiles) + elapsed = time.perf_counter() - start + + print(f"SUCCESS:{elapsed:.2f}:{mode}") +except Exception as e: + import traceback + print(f"ERROR:{e}") + traceback.print_exc() + sys.exit(1) +""" + try: + result = subprocess.run( + [sys.executable, "-c", benchmark_code], + capture_output=True, + text=True, + timeout=120, + env=dict(os.environ), + ) + + success_line = None + for line in result.stdout.strip().split("\n"): + if line.startswith("DEBUG_TORCH_MODE:"): + mode = line.split(":", 1)[1] + print(f" PyTorch mode: {mode}") + elif line.startswith("DEBUG_TORCH_DEVICE:"): + device = line.split(":", 1)[1] + print(f" GPU device: {device}") + elif line.startswith("SUCCESS:"): + success_line = line + + if success_line: + parts = success_line.split(":") + elapsed = float(parts[1]) + mode = parts[2] + print(f" BaSiC fit 16 tiles @ (256, 256): {elapsed:.2f}s ({mode})") + self.results["linumpy"]["basic_16x256"] = elapsed + if mode == "GPU": + print(" ✅ PyTorch GPU acceleration working") + else: + print(" ⚠️ Running on CPU (no CUDA GPU detected)") + else: + error_out = result.stderr + result.stdout + self._handle_basic_error(error_out) + if self.verbose: + print("\n --- Full subprocess output ---") + for line in result.stdout.split("\n")[-20:]: + print(f" {line}") + for line in result.stderr.split("\n")[-30:]: + print(f" {line}") + + except subprocess.TimeoutExpired: + print(" ⚠️ BaSiC benchmark timed out (>120s)") + except Exception as e: + print(f" ⚠️ BaSiC benchmark failed: {e}") + + def _handle_basic_error(self, error_out: str) -> None: + """Handle BaSiC benchmark errors with specific guidance.""" + error_lower = error_out.lower() + + if "no module named 'basicpy'" in error_lower: + print(" ❌ BaSiC failed: basicpy not installed") + print(" Install: pip install basicpy") + elif "no module named 'torch'" in error_lower: + print(" ❌ BaSiC failed: PyTorch not installed") + print(" Install: pip install torch") + elif "cuda" in error_lower and "out of memory" in error_lower: + print(" ❌ BaSiC failed: GPU out of memory") + print(" BaSiC will automatically fall back to CPU if GPU is unavailable.") + else: + for line in error_out.split("\n"): + if line.startswith("ERROR:"): + print(f" ❌ BaSiC failed: {line[6:]}") + break + else: + print(f" ❌ BaSiC failed: {error_out[:200]}") + if self.verbose: + print("\n Complete output (--verbose mode):") + for line in error_out.split("\n")[-30:]: + print(f" {line}") + + def _run_pqdm_benchmark(self) -> Any: + """Run pqdm parallel processing benchmark.""" + try: + import numpy as np + from pqdm.processes import pqdm + + def dummy_task(i: Any) -> Any: + arr = np.random.rand(500, 500) + for _ in range(10): + arr = np.fft.fft2(arr) + return i + + for n_jobs in [1, 4, 8, 16]: + start = time.perf_counter() + _results = pqdm(range(16), dummy_task, n_jobs=n_jobs, desc=f"pqdm n_jobs={n_jobs}", disable=True) + elapsed = time.perf_counter() - start + print(f" pqdm with n_jobs={n_jobs}: {elapsed:.2f}s for 16 tasks") + self.results["linumpy"][f"pqdm_njobs{n_jobs}"] = round(elapsed, 2) + + except Exception as e: + print(f" ⚠️ pqdm benchmark failed: {e}") + + def _run_gpu_benchmark(self) -> None: + """Run GPU performance benchmark.""" + try: + import cupy as cp + import numpy as np + + size = 2048 + + cpu_data = np.random.rand(size, size).astype(np.float32) + start = time.perf_counter() + for _ in range(5): + _ = np.fft.fft2(cpu_data) + cpu_time = (time.perf_counter() - start) / 5 + + gpu_data = cp.asarray(cpu_data) + cp.cuda.Stream.null.synchronize() + start = time.perf_counter() + for _ in range(5): + _ = cp.fft.fft2(gpu_data) + cp.cuda.Stream.null.synchronize() + gpu_time = (time.perf_counter() - start) / 5 + + speedup = cpu_time / gpu_time if gpu_time > 0 else 0 + + print(f" FFT {size}x{size}: CPU {cpu_time * 1000:.1f}ms, GPU {gpu_time * 1000:.1f}ms ({speedup:.1f}x speedup)") + + self.results["linumpy"]["fft_cpu_ms"] = round(cpu_time * 1000, 1) + self.results["linumpy"]["fft_gpu_ms"] = round(gpu_time * 1000, 1) + self.results["linumpy"]["fft_speedup"] = round(speedup, 1) + + except Exception as e: + print(f" ⚠️ GPU benchmark failed: {e}") + + def debug_cuda_libraries(self) -> None: + """Show detailed CUDA library debugging information.""" + import site + + print_header("CUDA Library Debug Information") + + # Get site-packages + sp = site.getsitepackages()[0] + print(f" Site-packages: {sp}") + + # Check current LD_LIBRARY_PATH + print_subheader("Current LD_LIBRARY_PATH") + ld_path = os.environ.get("LD_LIBRARY_PATH", "") + if ld_path: + for i, p in enumerate(ld_path.split(":")): + if p: + print(f" [{i}] {p}") + else: + print(" (not set)") + + # Check for nvidia packages installed + print_subheader("Installed NVIDIA Packages") + nvidia_path = os.path.join(sp, "nvidia") + if os.path.isdir(nvidia_path): + subdirs = sorted(os.listdir(nvidia_path)) + for subdir in subdirs: + subdir_path = os.path.join(nvidia_path, subdir) + if os.path.isdir(subdir_path): + lib_path = os.path.join(subdir_path, "lib") + if os.path.isdir(lib_path): + so_files = glob.glob(os.path.join(lib_path, "*.so*")) + so12_count = len([f for f in so_files if ".so.12" in f]) + so11_count = len([f for f in so_files if ".so.11" in f]) + so13_count = len([f for f in so_files if ".so.13" in f]) + version_info = [] + if so11_count: + version_info.append(f"{so11_count} .so.11") + if so12_count: + version_info.append(f"{so12_count} .so.12") + if so13_count: + version_info.append(f"{so13_count} .so.13") + print( + f" nvidia/{subdir}/lib: {len(so_files)} .so files " + f"({', '.join(version_info) if version_info else 'no versioned'})" + ) + else: + print(" No nvidia packages found in site-packages") + + # Check what's in the individual nvidia/xxx/lib paths + print_subheader("Libraries in -cu12 Package Directories") + cu12_dirs = ["cublas", "cuda_runtime", "nvjitlink", "cudnn", "cufft"] + for dir_name in cu12_dirs: + lib_path = os.path.join(sp, "nvidia", dir_name, "lib") + if os.path.isdir(lib_path): + so_files = glob.glob(os.path.join(lib_path, "lib*.so*")) + versioned_files = [os.path.basename(f) for f in so_files if ".so." in os.path.basename(f)] + if versioned_files: + print(f" nvidia/{dir_name}/lib: {', '.join(sorted(versioned_files)[:4])}") + + # Check nvidia/cu13/lib (from non-suffixed packages - should NOT be installed) + print_subheader("Non-suffixed Package Libraries (nvidia/cu13/lib)") + cu13_lib = os.path.join(sp, "nvidia", "cu13", "lib") + if os.path.isdir(cu13_lib): + so_files = glob.glob(os.path.join(cu13_lib, "*.so*")) + print(f" ⚠️ nvidia/cu13/lib exists with {len(so_files)} files") + print(" This may indicate non-suffixed nvidia packages are installed.") + print(" These are INCOMPATIBLE with JAX 0.4.23.") + print(" Run: source scripts/fix_jax_cuda_plugin.sh") + else: + print(" ✅ nvidia/cu13/lib does not exist (correct)") + + # Check key libraries needed by JAX + print_subheader("Key Libraries for JAX CUDA 12 Plugin") + # JAX 0.4.23 needs these specific library versions from pinned nvidia packages + # Format: (lib_name, pkg_name, alt_names) - alt_names for case variations + key_libs = [ + ("libcusolver.so.11", "nvidia-cusolver-cu12==11.5.4.101", []), + ("libcublas.so.12", "nvidia-cublas-cu12==12.3.4.1", []), + ("libcublasLt.so.12", "nvidia-cublas-cu12==12.3.4.1", []), + ("libcudnn.so.8", "nvidia-cudnn-cu12==8.9.7.29", []), + ("libcufft.so.11", "nvidia-cufft-cu12==11.0.12.1", []), + ("libcusparse.so.12", "nvidia-cusparse-cu12==12.2.0.103", []), + ("libnvjitlink.so.12", "nvidia-nvjitlink-cu12==12.3.101", ["libnvJitLink.so.12"]), + ("libcudart.so.12", "nvidia-cuda-runtime-cu12==12.3.101", []), + ] + # Build list of all paths to check + check_paths = [] + # Individual -cu12 package paths + cu12_pkg_paths = [ + "nvidia/cublas/lib", + "nvidia/cuda_runtime/lib", + "nvidia/nvjitlink/lib", + "nvidia/cudnn/lib", + "nvidia/cufft/lib", + "nvidia/cusolver/lib", + "nvidia/cusparse/lib", + ] + for pkg_path in cu12_pkg_paths: + full_path = os.path.join(sp, pkg_path) + if os.path.isdir(full_path) and full_path not in check_paths: + check_paths.append(full_path) + # Add LD_LIBRARY_PATH entries + for p in ld_path.split(":"): + if p and os.path.isdir(p) and p not in check_paths: + check_paths.append(p) + + missing_libs = [] + found_wrong_version = [] + + for lib, pkg_name, alt_names in key_libs: + found = False + wrong_version = None + + # Check all names (including alternates for case variations) + names_to_check = [lib, *alt_names] + + for check_path in check_paths: + for name in names_to_check: + lib_path = os.path.join(check_path, name) + if os.path.exists(lib_path): + # Get a nice short path name for display + rel_path = check_path.replace(sp + "/", "") + print(f" ✅ {lib} found in {rel_path}") + found = True + break + if found: + break + + if not found: + # Check for wrong version (.so.13 instead of .so.12, etc.) + base_name = lib.rsplit(".so.", 1)[0] + for check_path in check_paths: + wrong_files = glob.glob(os.path.join(check_path, f"{base_name}.so.*")) + # Also check case variations + if "nvjitlink" in base_name.lower(): + wrong_files += glob.glob(os.path.join(check_path, "libnvJitLink.so.*")) + for wf in wrong_files: + wf_base = os.path.basename(wf) + if wf_base not in names_to_check and ".so." in wf_base: + wrong_version = wf_base + break + if wrong_version: + break + + if not found: + if wrong_version: + print(f" ❌ {lib} NOT FOUND (have {wrong_version} instead)") + found_wrong_version.append((lib, wrong_version, pkg_name)) + else: + print(f" ❌ {lib} NOT FOUND") + missing_libs.append((lib, pkg_name)) + + # Check JAX plugins + print_subheader("JAX CUDA Plugins") + jax_plugins_path = os.path.join(sp, "jax_plugins") + if os.path.isdir(jax_plugins_path): + plugins = os.listdir(jax_plugins_path) + for plugin in sorted(plugins): + plugin_path = os.path.join(jax_plugins_path, plugin) + if os.path.isdir(plugin_path): + so_files = glob.glob(os.path.join(plugin_path, "*.so")) + print(f" {plugin}: {len(so_files)} .so files") + # Check for xla_cuda_plugin.so + xla_plugin = os.path.join(plugin_path, "xla_cuda_plugin.so") + if os.path.exists(xla_plugin): + # Check if patchelf was applied by looking at execstack + try: + result = subprocess.run(["readelf", "-l", xla_plugin], capture_output=True, text=True, timeout=5) + if "GNU_STACK" in result.stdout: + for line in result.stdout.split("\n"): + if "GNU_STACK" in line: + if "RWE" in line: + print(" ⚠️ xla_cuda_plugin.so has executable stack (needs patchelf)") + else: + print(" ✅ xla_cuda_plugin.so stack is non-executable") + break + except Exception: + pass + else: + print(" No jax_plugins directory found") + + # Show ldd output for the JAX plugin + print_subheader("JAX CUDA Plugin Dependencies (ldd)") + try: + xla_plugin = os.path.join(sp, "jax_plugins", "xla_cuda12", "xla_cuda_plugin.so") + if os.path.exists(xla_plugin): + # Build LD_LIBRARY_PATH with nvidia package paths + nvidia_paths = [] + for pkg_dir in ["cublas", "cuda_runtime", "cusolver", "cusparse", "cufft", "cudnn", "nvjitlink"]: + pkg_path = os.path.join(sp, "nvidia", pkg_dir, "lib") + if os.path.isdir(pkg_path): + nvidia_paths.append(pkg_path) + test_ld_path = ":".join(nvidia_paths) + if ld_path: + test_ld_path = f"{test_ld_path}:{ld_path}" + + env = os.environ.copy() + env["LD_LIBRARY_PATH"] = test_ld_path + + result = subprocess.run(["ldd", xla_plugin], capture_output=True, text=True, timeout=10, env=env) + # Show only CUDA-related or "not found" lines + for line in result.stdout.split("\n"): + line = line.strip() + if ( + "cuda" in line.lower() + or "cublas" in line.lower() + or "cusolver" in line.lower() + or "cudnn" in line.lower() + or "cufft" in line.lower() + or "cusparse" in line.lower() + or "nvjit" in line.lower() + or "not found" in line.lower() + ): + print(f" {line}") + else: + print(" xla_cuda_plugin.so not found at expected location") + except Exception as e: + print(f" Error checking ldd: {e}") + + # Recommendations based on findings + print_subheader("Recommendations") + + # Check if we have version mismatches (CUDA 13 libs instead of CUDA 12) + if found_wrong_version: + print(" ⚠️ CUDA LIBRARY VERSION MISMATCH DETECTED") + print("") + print(" Your nvidia packages have CUDA 13 libraries (.so.13), but") + print(" JAX 0.4.23's CUDA 12 plugin needs CUDA 12 libraries (.so.12).") + print("") + print(" Found wrong versions:") + for needed, have, _pkg in found_wrong_version: + print(f" - Need {needed}, have {have}") + print("") + print(" RECOMMENDED FIX (automated):") + print(" source scripts/fix_jax_cuda_plugin.sh") + print("") + print(" This script will:") + print(" 1. Install hybrid nvidia packages (mix of -cu12 and non-suffixed)") + print(" 2. Reinstall JAX 0.4.23 with CUDA 12 support") + print(" 3. Apply patchelf fix for modern Linux kernels") + print(" 4. Set LD_LIBRARY_PATH correctly") + print(" 5. Test the installation") + print("") + print(" See docs/GPU_ACCELERATION.md for manual setup details.") + elif missing_libs: + print(" ❌ Some CUDA libraries are missing") + print("") + print(" Missing libraries:") + for lib, pkg in missing_libs: + print(f" - {lib} (from {pkg})") + print("") + print(" RECOMMENDED FIX (automated):") + print(" source scripts/fix_jax_cuda_plugin.sh") + print("") + print(" Or install packages manually:") + print(" pip install --extra-index-url https://pypi.nvidia.com \\") + print(" nvidia-cusolver nvidia-cublas nvidia-cuda-runtime \\") + print(" nvidia-cufft nvidia-cusparse nvidia-nvjitlink nvidia-cudnn-cu12") + else: + cu13_lib = os.path.join(sp, "nvidia", "cu13", "lib") + if os.path.isdir(cu13_lib): + print(" ✅ All required CUDA 12 libraries found!") + print("") + print(" Make sure LD_LIBRARY_PATH includes:") + print(f" export LD_LIBRARY_PATH={cu13_lib}:$LD_LIBRARY_PATH") + else: + print(" ℹ️ Using individual nvidia/xxx/lib paths") + print("") + print(" Your current LD_LIBRARY_PATH looks correct.") + + def generate_report(self) -> Any: + """Generate summary report with recommendations.""" + print_header("Summary") + + if self.results["issues"]: + print_subheader("Issues Found") + for issue in self.results["issues"]: + print(f" ⚠️ {issue}") + + print_subheader("System Summary") + total_cpus = self.results["cpu"]["total_cores"] + total_memory = self.results["memory"].get("total_gb", 0) + gpu_available = self.results["gpu"].get("available", False) + cupy_working = self.results["gpu"].get("cupy_working", False) + + print(f" CPU cores: {total_cpus}") + print(f" Total RAM: {total_memory:.1f} GB") + print(f" NVIDIA GPU: {'Available' if gpu_available else 'Not available'}") + print(f" CuPy GPU (linumpy): {'Working' if cupy_working else 'Not available'}") + + return self.results + + +def main() -> None: + """Run function.""" + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("--quick", action="store_true", help="Quick system check only (no benchmarks)") + p.add_argument("--benchmark", action="store_true", help="Include performance benchmarks") + p.add_argument("--debug-cuda", action="store_true", help="Show detailed CUDA library debugging info") + p.add_argument("--verbose", "-v", action="store_true", help="Show verbose error output for debugging") + p.add_argument("--output", "-o", type=str, help="Save results to JSON file") + args = p.parse_args() + + print("=" * get_terminal_width()) + print(" LINUMPY 3D RECONSTRUCTION PIPELINE DIAGNOSTICS") + print(f" {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + print("=" * get_terminal_width()) + + diag = SystemDiagnostics(verbose=args.verbose) + + if args.debug_cuda: + diag.debug_cuda_libraries() + return + + diag.check_cpu() + diag.check_memory() + diag.check_gpu() + diag.check_python_packages() + diag.check_nextflow_config() + + if args.benchmark or not args.quick: + diag.run_benchmarks() + + results = diag.generate_report() + + if args.output: + with open(args.output, "w") as f: + json.dump(results, f, indent=2, default=str) + print(f"\n Results saved to: {args.output}") + + print("\n" + "=" * get_terminal_width()) + print(" DIAGNOSTICS COMPLETE") + print("=" * get_terminal_width() + "\n") + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_diagnose_reconstruction.py b/scripts/diagnostics/linum_diagnose_reconstruction.py new file mode 100644 index 00000000..f9fd1229 --- /dev/null +++ b/scripts/diagnostics/linum_diagnose_reconstruction.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python3 +""" +Comprehensive diagnostic analysis for 3D reconstruction troubleshooting. + +This script runs multiple diagnostic analyses to identify the root cause of +reconstruction artifacts like edge mismatches and "overhangs" in serial OCT data: + +1. **Rotation Analysis**: Cumulative rotation drift between slices +2. **Dilation Analysis**: Tile position scaling/expansion issues +3. **Edge Alignment**: Cross-correlation between consecutive slice edges +4. **Motor vs Registration**: Compare motor-only vs registered alignment + +Designed for troubleshooting 45° oblique-cut mouse brain reconstructions. +""" + +import linumpy.config.threads # noqa: F401 + +import argparse +import json +import logging +import re +from datetime import datetime +from pathlib import Path +from typing import Any + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +from linumpy.cli.args import add_overwrite_arg + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("pipeline_output", help="Path to pipeline output directory (containing register_pairwise, etc.)") + p.add_argument("out_directory", help="Output directory for diagnostic results") + + # Analysis selection + p.add_argument("--skip_rotation", action="store_true", help="Skip rotation drift analysis") + p.add_argument("--skip_shifts", action="store_true", help="Skip shifts analysis") + p.add_argument("--skip_edge", action="store_true", help="Skip edge alignment analysis") + + # Parameters + p.add_argument("--resolution", type=float, default=10.0, help="Resolution in µm/pixel [%(default)s]") + p.add_argument( + "--rotation_threshold", type=float, default=2.0, help="Flag rotations above this threshold (degrees) [%(default)s]" + ) + p.add_argument("--slice_range", type=str, default=None, help='Analyze only these slices (e.g., "10-20" or "5,10,15")') + + add_overwrite_arg(p) + return p + + +def parse_slice_range(range_str: str) -> Any: + """Parse slice range like "10-20" or "5,10,15" into list of IDs.""" + if not range_str: + return None + + slice_ids = set() + for part in range_str.split(","): + part = part.strip() + if "-" in part: + start, end = part.split("-") + slice_ids.update(range(int(start), int(end) + 1)) + else: + slice_ids.add(int(part)) + return sorted(slice_ids) + + +def analyze_rotation_drift( + pipeline_dir: Path, + output_dir: Path, + threshold: float = 2.0, + slice_ids: Any = None, +) -> dict | None: + """Analyze rotation patterns from pairwise registration.""" + reg_dir = Path(pipeline_dir) / "register_pairwise" + if not reg_dir.exists(): + logger.warning("No register_pairwise directory found at %s", reg_dir) + return None + + records = [] + for slice_dir in sorted(reg_dir.iterdir()): + if not slice_dir.is_dir(): + continue + + match = re.search(r"slice_z(\d+)", slice_dir.name) + if not match: + continue + + slice_id = int(match.group(1)) + if slice_ids and slice_id not in slice_ids: + continue + + json_path = slice_dir / "pairwise_registration_metrics.json" + if not json_path.exists(): + continue + + with Path(json_path).open() as f: + data = json.load(f) + + metrics = data.get("metrics", {}) + records.append( + { + "slice_id": slice_id, + "rotation": metrics.get("rotation", {}).get("value"), + "translation_x": metrics.get("translation_x", {}).get("value"), + "translation_y": metrics.get("translation_y", {}).get("value"), + "z_drift": metrics.get("z_drift", {}).get("value"), + } + ) + + if not records: + return None + + df = pd.DataFrame(records).sort_values("slice_id") + + # Compute cumulative rotation + valid_rot = df["rotation"].dropna() + cumulative_rotation = valid_rot.cumsum() + + # Identify problematic slices + issues = [] + large_rot = df[df["rotation"].abs() > threshold] + if len(large_rot) > 0: + issues.append(f"Slices with |rotation| > {threshold}°: {large_rot['slice_id'].tolist()}") + + if len(cumulative_rotation) > 0 and abs(cumulative_rotation.iloc[-1]) > 5: + issues.append(f"High cumulative rotation drift: {cumulative_rotation.iloc[-1]:.2f}°") + + result = { + "total_slices": len(df), + "mean_rotation": float(valid_rot.mean()) if len(valid_rot) > 0 else 0, + "std_rotation": float(valid_rot.std()) if len(valid_rot) > 0 else 0, + "cumulative_rotation": float(cumulative_rotation.iloc[-1]) if len(cumulative_rotation) > 0 else 0, + "max_abs_rotation": float(valid_rot.abs().max()) if len(valid_rot) > 0 else 0, + "large_rotation_slices": large_rot["slice_id"].tolist() if len(large_rot) > 0 else [], + "issues": issues, + } + + # Save CSV + csv_path = Path(output_dir) / "rotation_data.csv" + df.to_csv(csv_path, index=False) + + # Generate plot + _fig, axes = plt.subplots(2, 1, figsize=(12, 8)) + + ax1 = axes[0] + valid_df = df.dropna(subset=["rotation"]) + ax1.bar(valid_df["slice_id"], valid_df["rotation"], alpha=0.7, color="steelblue") + ax1.axhline(y=0, color="black", linewidth=0.5) + ax1.axhline(y=threshold, color="red", linestyle="--", alpha=0.5) + ax1.axhline(y=-threshold, color="red", linestyle="--", alpha=0.5) + ax1.set_xlabel("Slice ID") + ax1.set_ylabel("Rotation (degrees)") + ax1.set_title("Per-Slice Rotation") + + ax2 = axes[1] + cumsum = valid_df["rotation"].cumsum() + ax2.plot(valid_df["slice_id"], cumsum, "b-", linewidth=2) + ax2.fill_between(valid_df["slice_id"], 0, cumsum, alpha=0.3) + ax2.axhline(y=0, color="black", linewidth=0.5) + ax2.set_xlabel("Slice ID") + ax2.set_ylabel("Cumulative Rotation (degrees)") + ax2.set_title("Cumulative Rotation Drift") + + plt.tight_layout() + plt.savefig(Path(output_dir) / "rotation_analysis.png", dpi=150) + plt.close() + + return result + + +def analyze_shifts(pipeline_dir: Path, output_dir: Path, resolution: float = 10.0, slice_ids: Any = None) -> dict | None: + """Analyze XY shifts from shifts_xy.csv.""" + shifts_path = Path(pipeline_dir) / "shifts_xy.csv" + if not shifts_path.exists(): + # Try parent directory + shifts_path = Path(pipeline_dir).parent / "shifts_xy.csv" + + if not shifts_path.exists(): + logger.warning("No shifts_xy.csv found") + return None + + df = pd.read_csv(shifts_path) + + if slice_ids: + df = df[df["moving_id"].isin(slice_ids) | df["fixed_id"].isin(slice_ids)] + + px_per_mm = 1000 / resolution + + # Compute magnitudes + df["shift_magnitude_mm"] = np.sqrt(df["x_shift_mm"] ** 2 + df["y_shift_mm"] ** 2) + + # Cumulative drift + cumsum_x = df["x_shift_mm"].cumsum() * px_per_mm + cumsum_y = df["y_shift_mm"].cumsum() * px_per_mm + + # Outlier detection + q1 = df["shift_magnitude_mm"].quantile(0.25) + q3 = df["shift_magnitude_mm"].quantile(0.75) + iqr = q3 - q1 + outlier_threshold = q3 + 1.5 * iqr + outliers = df[df["shift_magnitude_mm"] > outlier_threshold] + + result = { + "n_shifts": len(df), + "mean_shift_mm": float(df["shift_magnitude_mm"].mean()), + "max_shift_mm": float(df["shift_magnitude_mm"].max()), + "cumulative_x_px": float(cumsum_x.iloc[-1]) if len(cumsum_x) > 0 else 0, + "cumulative_y_px": float(cumsum_y.iloc[-1]) if len(cumsum_y) > 0 else 0, + "n_outliers": len(outliers), + "outlier_indices": outliers["moving_id"].tolist() if len(outliers) > 0 else [], + "issues": [], + } + + if len(outliers) > 0: + result["issues"].append(f"Found {len(outliers)} outlier shifts") + + total_drift = np.sqrt(result["cumulative_x_px"] ** 2 + result["cumulative_y_px"] ** 2) + if total_drift > 500: + result["issues"].append(f"Large cumulative drift: {total_drift:.0f} pixels") + + # Generate plot + _fig, axes = plt.subplots(2, 2, figsize=(14, 10)) + + ax1 = axes[0, 0] + ax1.scatter(df["x_shift_mm"], df["y_shift_mm"], c=df["moving_id"], cmap="viridis", alpha=0.7) + ax1.axhline(y=0, color="gray", linestyle="--", linewidth=0.5) + ax1.axvline(x=0, color="gray", linestyle="--", linewidth=0.5) + ax1.set_xlabel("X shift (mm)") + ax1.set_ylabel("Y shift (mm)") + ax1.set_title("Pairwise Shifts") + + ax2 = axes[0, 1] + ax2.plot(df["moving_id"], df["shift_magnitude_mm"], "b-", alpha=0.7) + ax2.axhline(y=outlier_threshold, color="red", linestyle="--", label=f"Outlier threshold: {outlier_threshold:.2f} mm") + ax2.scatter(outliers["moving_id"], outliers["shift_magnitude_mm"], c="red", s=50, zorder=5) + ax2.set_xlabel("Slice ID") + ax2.set_ylabel("Shift magnitude (mm)") + ax2.set_title("Shift Magnitude per Slice") + ax2.legend() + + ax3 = axes[1, 0] + ax3.plot(df["moving_id"], cumsum_x, "b-", label="X drift") + ax3.plot(df["moving_id"], cumsum_y, "r-", label="Y drift") + ax3.set_xlabel("Slice ID") + ax3.set_ylabel("Cumulative drift (pixels)") + ax3.set_title("Cumulative XY Drift") + ax3.legend() + + ax4 = axes[1, 1] + ax4.plot(cumsum_x, cumsum_y, "b-", linewidth=2, alpha=0.7) + ax4.scatter(cumsum_x.iloc[0], cumsum_y.iloc[0], c="green", s=100, zorder=5, label="Start") + ax4.scatter(cumsum_x.iloc[-1], cumsum_y.iloc[-1], c="red", s=100, zorder=5, label="End") + ax4.set_xlabel("Cumulative X drift (pixels)") + ax4.set_ylabel("Cumulative Y drift (pixels)") + ax4.set_title("Drift Trajectory") + ax4.legend() + ax4.set_aspect("equal") + + plt.tight_layout() + plt.savefig(Path(output_dir) / "shifts_analysis.png", dpi=150) + plt.close() + + return result + + +def generate_summary_report(results: Any, output_dir: Path) -> Path: + """Generate comprehensive summary report.""" + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + lines = [ + "=" * 70, + "3D RECONSTRUCTION DIAGNOSTIC REPORT", + "=" * 70, + f"Generated: {timestamp}", + "", + ] + + # Rotation Analysis + if results.get("rotation"): + rot = results["rotation"] + lines.extend( + [ + "ROTATION DRIFT ANALYSIS", + "-" * 50, + f"Total slices: {rot['total_slices']}", + f"Mean rotation: {rot['mean_rotation']:.4f}°", + f"Std rotation: {rot['std_rotation']:.4f}°", + f"Max |rotation|: {rot['max_abs_rotation']:.4f}°", + f"Cumulative drift: {rot['cumulative_rotation']:.4f}°", + "", + ] + ) + if rot["issues"]: + lines.append("Issues:") + lines.extend(f" ⚠ {issue}" for issue in rot["issues"]) + lines.append("") + + # Shifts Analysis + if results.get("shifts"): + sh = results["shifts"] + lines.extend( + [ + "XY SHIFTS ANALYSIS", + "-" * 50, + f"Total shift pairs: {sh['n_shifts']}", + f"Mean shift: {sh['mean_shift_mm']:.4f} mm", + f"Max shift: {sh['max_shift_mm']:.4f} mm", + f"Cumulative X drift: {sh['cumulative_x_px']:.0f} px", + f"Cumulative Y drift: {sh['cumulative_y_px']:.0f} px", + f"Outlier shifts: {sh['n_outliers']}", + "", + ] + ) + if sh["issues"]: + lines.append("Issues:") + lines.extend(f" ⚠ {issue}" for issue in sh["issues"]) + lines.append("") + + # Overall Assessment + lines.extend( + [ + "OVERALL ASSESSMENT", + "-" * 50, + ] + ) + + all_issues = [] + if results.get("rotation"): + all_issues.extend(results["rotation"].get("issues", [])) + if results.get("shifts"): + all_issues.extend(results["shifts"].get("issues", [])) + + if not all_issues: + lines.append("✓ No significant issues detected in analyzed data") + else: + lines.append(f"Found {len(all_issues)} potential issues:") + lines.extend(f" • {issue}" for issue in all_issues) + + lines.extend( + [ + "", + "RECOMMENDATIONS", + "-" * 50, + ] + ) + + # Generate recommendations based on findings + recommendations = [] + + if results.get("rotation"): + rot = results["rotation"] + if abs(rot["cumulative_rotation"]) > 2: + recommendations.append("Consider enabling rotation correction in registration (registration_transform='euler')") + if rot["max_abs_rotation"] > 5: + recommendations.append("Check slice quality - large rotations may indicate degraded slices") + + if results.get("shifts"): + sh = results["shifts"] + if sh["n_outliers"] > 3: + recommendations.append("Review outlier slices - may need exclusion or manual adjustment") + total_drift = np.sqrt(sh["cumulative_x_px"] ** 2 + sh["cumulative_y_px"] ** 2) + if total_drift > 300: + recommendations.append("Large cumulative drift - check stage calibration or sample mounting") + + if not recommendations: + recommendations.append("Current parameters appear appropriate for this dataset") + + lines.extend(f" → {rec}" for rec in recommendations) + + lines.extend(["", "=" * 70]) + + report_path = Path(output_dir) / "diagnostic_report.txt" + with Path(report_path).open("w") as f: + f.write("\n".join(lines)) + + # Also save JSON + json_path = Path(output_dir) / "diagnostic_results.json" + with Path(json_path).open("w") as f: + json.dump( + { + "timestamp": timestamp, + "results": results, + "issues": all_issues, + "recommendations": recommendations, + }, + f, + indent=2, + default=str, + ) + + logger.info("Summary report saved to %s", report_path) + return report_path + + +def main() -> None: + """Run function.""" + p = _build_arg_parser() + args = p.parse_args() + + pipeline_dir = Path(args.pipeline_output) + output_dir = Path(args.out_directory) + output_dir.mkdir(parents=True, exist_ok=True) + + slice_ids = parse_slice_range(args.slice_range) + if slice_ids: + logger.info("Analyzing slices: %s", slice_ids) + + results = {} + + # Run analyses + if not args.skip_rotation: + logger.info("Running rotation drift analysis...") + results["rotation"] = analyze_rotation_drift(pipeline_dir, output_dir, args.rotation_threshold, slice_ids) + + if not args.skip_shifts: + logger.info("Running shifts analysis...") + results["shifts"] = analyze_shifts(pipeline_dir, output_dir, args.resolution, slice_ids) + + # Generate summary + generate_summary_report(results, output_dir) + + # Print summary to console + print("\n" + "=" * 50) + print("DIAGNOSTIC COMPLETE") + print("=" * 50) + print(f"Results saved to: {output_dir}") + + all_issues = [] + for val in results.values(): + if val and "issues" in val: + all_issues.extend(val["issues"]) + + if all_issues: + print(f"\n⚠ Found {len(all_issues)} potential issues:") + for issue in all_issues[:5]: + print(f" • {issue}") + if len(all_issues) > 5: + print(f" ... and {len(all_issues) - 5} more (see full report)") + else: + print("\n✓ No significant issues detected") + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_stack_motor_only.py b/scripts/diagnostics/linum_stack_motor_only.py new file mode 100644 index 00000000..4303b7aa --- /dev/null +++ b/scripts/diagnostics/linum_stack_motor_only.py @@ -0,0 +1,402 @@ +#!/usr/bin/env python3 +""" +Stack slices into a 3D volume using only motor positions (no pairwise registration). + +This diagnostic tool creates a 3D stack using ONLY the XY shifts from the shifts_xy.csv +file (motor/stage positions recorded during acquisition). By comparing this "motor-only" +stack against the fully-registered stack, you can: + +1. **Validate motor positions**: Check if motor positions alone provide good alignment +2. **Identify registration artifacts**: See if pairwise registration introduces errors +3. **Debug dilation issues**: Verify if XY drift is the main cause of misalignment + +The script reads the shifts file and positions each slice according to its cumulative +XY shift, without any image-based registration refinement. +""" + +import linumpy.config.threads # noqa: F401 + +import argparse +import logging +import re +from pathlib import Path +from typing import Any + +import numpy as np + +from linumpy.cli.args import add_overwrite_arg, assert_output_exists +from linumpy.io.zarr import AnalysisOmeZarrWriter, read_omezarr +from linumpy.mosaic.stacking import apply_xy_shift, blend_overlap_xy +from linumpy.stack_alignment.io import load_shifts_csv +from linumpy.stack_alignment.units import center_shifts, convert_shifts_to_pixels + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("in_slices_dir", help="Directory containing slice volumes (.ome.zarr)") + p.add_argument("in_shifts", help="CSV file with XY shifts (shifts_xy.csv)") + p.add_argument("out_volume", help="Output stacked volume (.ome.zarr)") + + p.add_argument( + "--blending", + type=str, + default="none", + choices=["none", "average", "max", "feather"], + help="Blending method for overlapping regions [%(default)s]\n" + " none: No blending, later slices overwrite earlier ones\n" + " average: Average overlapping voxels\n" + " max: Take maximum of overlapping voxels\n" + " feather: Feathered blending based on distance from edge", + ) + p.add_argument( + "--overlap_slices", + type=int, + default=0, + help="Number of z-slices to overlap between consecutive slices [%(default)s]\n" + "Set to 0 to stack without z-overlap (just XY alignment)", + ) + p.add_argument( + "--z_spacing_um", + type=float, + default=None, + help="Z spacing between slices in microns.\nIf not provided, uses spacing from first slice metadata.", + ) + p.add_argument( + "--center_drift", action="store_true", default=True, help="Center cumulative drift around middle slice [%(default)s]" + ) + p.add_argument("--no_center_drift", action="store_false", dest="center_drift", help="Do not center drift") + p.add_argument( + "--slice_pattern", + type=str, + default=r"slice_z(\d+)", + help="Regex pattern to extract slice ID from filename [%(default)s]", + ) + p.add_argument("--preview", type=str, default=None, help="Output path for preview image (.png)") + p.add_argument("--max_slices", type=int, default=None, help="Maximum number of slices to stack (for testing)") + + add_overwrite_arg(p) + return p + + +def compute_output_shape(slice_files: Any, cumsum_px: Any, overlap_slices: int = 0) -> Any: + """Compute the output volume shape to fit all slices.""" + xmin, xmax, ymin, ymax = [], [], [], [] + total_z = 0 + + for slice_id, slice_file in slice_files.items(): + vol, _ = read_omezarr(Path(slice_file), level=0) + dx, dy = cumsum_px.get(slice_id, (0, 0)) + + z_depth = vol.shape[0] + height = vol.shape[1] # Y + width = vol.shape[2] # X + + xmin.append(dx) + xmax.append(dx + width) + ymin.append(dy) + ymax.append(dy + height) + total_z += z_depth + + # Account for overlap + n_slices = len(slice_files) + if overlap_slices > 0 and n_slices > 1: + total_z -= overlap_slices * (n_slices - 1) + + x0 = min(xmin) + y0 = min(ymin) + nx = int(np.ceil(max(xmax) - x0)) + ny = int(np.ceil(max(ymax) - y0)) + + return (total_z, ny, nx), (x0, y0) + + +def generate_preview(volume: Any, output_path: Path) -> None: + """Generate a preview image of the stacked volume.""" + try: + import matplotlib.pyplot as plt + + _fig, axes = plt.subplots(1, 3, figsize=(15, 5)) + + # Middle slices in each dimension + z_mid = volume.shape[0] // 2 + y_mid = volume.shape[1] // 2 + x_mid = volume.shape[2] // 2 + + # XY slice (axial) + axes[0].imshow(volume[z_mid, :, :], cmap="gray") + axes[0].set_title(f"XY (z={z_mid})") + axes[0].axis("off") + + # XZ slice (coronal) + axes[1].imshow(volume[:, y_mid, :], cmap="gray", aspect="auto") + axes[1].set_title(f"XZ (y={y_mid})") + axes[1].axis("off") + + # YZ slice (sagittal) + axes[2].imshow(volume[:, :, x_mid], cmap="gray", aspect="auto") + axes[2].set_title(f"YZ (x={x_mid})") + axes[2].axis("off") + + plt.suptitle("Motor-Only Stack Preview") + plt.tight_layout() + plt.savefig(output_path, dpi=150) + plt.close() + + logger.info("Preview saved to %s", output_path) + except Exception as e: + logger.warning("Could not generate preview: %s", e) + + +def generate_preview_from_slice(slice_2d: Any, output_path: Path) -> None: + """Generate a preview image from a single 2D slice.""" + try: + import matplotlib.pyplot as plt + + _fig, ax = plt.subplots(1, 1, figsize=(10, 10)) + + # Normalize for display + vmin = np.percentile(slice_2d[slice_2d > 0], 1) if np.any(slice_2d > 0) else 0 + vmax = np.percentile(slice_2d, 99) + + ax.imshow(slice_2d, cmap="gray", vmin=vmin, vmax=vmax) + ax.set_title("Motor-Only Stack (middle Z slice)") + ax.axis("off") + + plt.tight_layout() + plt.savefig(output_path, dpi=150, bbox_inches="tight") + plt.close() + + logger.info("Preview saved to %s", output_path) + except Exception as e: + logger.warning("Could not generate preview: %s", e) + + +def generate_preview_from_zarr(zarr_output: Any, output_path: Path) -> Any: + """Generate a 3-panel preview (XY, XZ, YZ) from a zarr output without loading full volume.""" + try: + import matplotlib.pyplot as plt + + # Get shape from zarr + shape = zarr_output.shape # (Z, Y, X) + z_mid = shape[0] // 2 + y_mid = shape[1] // 2 + x_mid = shape[2] // 2 + + # Read only the slices we need + xy_slice = np.array(zarr_output[z_mid, :, :]) # XY at middle Z + xz_slice = np.array(zarr_output[:, y_mid, :]) # XZ at middle Y + yz_slice = np.array(zarr_output[:, :, x_mid]) # YZ at middle X + + _fig, axes = plt.subplots(1, 3, figsize=(18, 6)) + + # Normalize each slice for display + def normalize_slice(s: Any) -> Any: + valid = s > 0 + if np.any(valid): + vmin = np.percentile(s[valid], 1) + vmax = np.percentile(s, 99) + else: + vmin, vmax = 0, 1 + return vmin, vmax + + # XY slice (axial) + vmin, vmax = normalize_slice(xy_slice) + axes[0].imshow(xy_slice, cmap="gray", vmin=vmin, vmax=vmax) + axes[0].set_title(f"XY (z={z_mid})") + axes[0].axis("off") + + # XZ slice (coronal) + vmin, vmax = normalize_slice(xz_slice) + axes[1].imshow(xz_slice, cmap="gray", vmin=vmin, vmax=vmax, aspect="auto") + axes[1].set_title(f"XZ (y={y_mid})") + axes[1].axis("off") + + # YZ slice (sagittal) + vmin, vmax = normalize_slice(yz_slice) + axes[2].imshow(yz_slice, cmap="gray", vmin=vmin, vmax=vmax, aspect="auto") + axes[2].set_title(f"YZ (x={x_mid})") + axes[2].axis("off") + + plt.suptitle(f"Motor-Only Stack Preview ({shape[0]} x {shape[1]} x {shape[2]})") + plt.tight_layout() + plt.savefig(output_path, dpi=150, bbox_inches="tight") + plt.close() + + logger.info("3-panel preview saved to %s", output_path) + except Exception as e: + logger.warning("Could not generate preview: %s", e) + + +def main() -> None: + """Run function.""" + p = _build_arg_parser() + args = p.parse_args() + + slices_dir = Path(args.in_slices_dir) + shifts_path = Path(args.in_shifts) + output_path = Path(args.out_volume) + + assert_output_exists(output_path, p, args) + + # Find slice files + slice_files_list = sorted(slices_dir.glob("*.ome.zarr")) + if not slice_files_list: + p.error(f"No .ome.zarr files found in {slices_dir}") + + # Extract slice IDs and build mapping + pattern = re.compile(args.slice_pattern) + slice_files = {} + for f in slice_files_list: + match = pattern.search(f.name) + if match: + slice_id = int(match.group(1)) + slice_files[slice_id] = f + + if not slice_files: + p.error(f"No files matched pattern '{args.slice_pattern}' in {slices_dir}") + + logger.info("Found %s slice files", len(slice_files)) + + # Limit slices for testing + if args.max_slices: + slice_ids = sorted(slice_files.keys())[: args.max_slices] + slice_files = {k: slice_files[k] for k in slice_ids} + logger.info("Limited to %s slices for testing", len(slice_files)) + + # Load shifts + logger.info("Loading shifts from %s", shifts_path) + cumsum_mm, _all_shift_ids = load_shifts_csv(shifts_path) + + # Get resolution from first slice + # NOTE: read_omezarr returns resolution in MILLIMETERS (OME-NGFF standard) + first_slice_id = sorted(slice_files.keys())[0] + _first_vol, first_res = read_omezarr(Path(slice_files[first_slice_id]), level=0) + + # Resolution: res is [z, y, x] in mm from OME-NGFF, convert to µm + res_x_mm = first_res[-1] if len(first_res) >= 3 else first_res[0] + res_y_mm = first_res[-2] if len(first_res) >= 3 else first_res[0] + res_z_mm = first_res[0] if len(first_res) >= 3 else 0.200 # default 200 µm + + # Convert to µm for display and calculations + res_x_um = res_x_mm * 1000 + res_y_um = res_y_mm * 1000 + res_z_um = res_z_mm * 1000 + + if args.z_spacing_um: + res_z_um = args.z_spacing_um + + logger.info("Resolution: Z=%.2f µm, Y=%.2f µm, X=%.2f µm", res_z_um, res_y_um, res_x_um) + + # Convert shifts to pixels (use X resolution for both X and Y for simplicity) + cumsum_px = convert_shifts_to_pixels(cumsum_mm, (res_x_um, res_y_um)) + + # Filter to only slices we have files for + available_ids = sorted(slice_files.keys()) + cumsum_px = {k: v for k, v in cumsum_px.items() if k in available_ids} + + # Fill in missing shifts with zero + for slice_id in available_ids: + if slice_id not in cumsum_px: + logger.warning("No shift for slice %s, using (0, 0)", slice_id) + cumsum_px[slice_id] = (0.0, 0.0) + + # Center drift if requested + if args.center_drift: + cumsum_px = center_shifts(cumsum_px, available_ids) + logger.info("Centered drift around middle slice") + + # Compute output shape + logger.info("Computing output shape...") + output_shape, (x0, y0) = compute_output_shape(slice_files, cumsum_px, args.overlap_slices) + logger.info("Output shape: %s (Z, Y, X)", output_shape) + logger.info("Origin offset: (%.1f, %.1f) px", x0, y0) + + # Adjust shifts by origin + cumsum_px = {slice_id: (dx - x0, dy - y0) for slice_id, (dx, dy) in cumsum_px.items()} + + # Stack slices + logger.info("Stacking %s slices (blending: %s)...", len(slice_files), args.blending) + + # Use chunked writer to avoid memory issues + output = AnalysisOmeZarrWriter( + Path(output_path), + output_shape, + chunk_shape=(min(100, output_shape[0]), min(512, output_shape[1]), min(512, output_shape[2])), + dtype=np.float32, + ) + + z_cursor = 0 + for i, slice_id in enumerate(available_ids): + slice_file = slice_files[slice_id] + vol, _ = read_omezarr(Path(slice_file), level=0) + vol_data = np.array(vol[:]).astype(np.float32) + + dx, dy = cumsum_px[slice_id] + + # Get the cropped slice and destination coordinates + shifted, dst_coords = apply_xy_shift(vol_data, dx, dy, (output_shape[1], output_shape[2])) + + if shifted is None: + logger.warning("Slice %s is entirely outside output bounds, skipping", slice_id) + continue + + dst_y_start, dst_y_end, dst_x_start, dst_x_end = dst_coords + + # Determine Z range for this slice + z_start = z_cursor + z_end = z_start + shifted.shape[0] + + # Handle overlap + if args.overlap_slices > 0 and i > 0: + z_start -= args.overlap_slices + + # Ensure we don't exceed output bounds + if z_end > output_shape[0]: + z_end = output_shape[0] + shifted = shifted[: z_end - z_start] + + # Place/blend into output at the correct XY position + if z_start < z_end: + if args.blending == "none" or i == 0: + # No blending - just write to the specific region + output[z_start:z_end, dst_y_start:dst_y_end, dst_x_start:dst_x_end] = shifted + else: + # Read existing region, blend, write back + existing = np.array(output[z_start:z_end, dst_y_start:dst_y_end, dst_x_start:dst_x_end]) + blended = blend_overlap_xy(existing, shifted, args.blending) + output[z_start:z_end, dst_y_start:dst_y_end, dst_x_start:dst_x_end] = blended + + z_cursor = z_end + + logger.info( + " Slice %02d: shift=(%.1f, %.1f) px, z=[%s:%s], xy=[%s:%s, %s:%s]", + slice_id, + dx, + dy, + z_start, + z_end, + dst_y_start, + dst_y_end, + dst_x_start, + dst_x_end, + ) + + # Finalize with pyramid + logger.info("Finalizing and generating pyramid levels...") + # AnalysisOmeZarrWriter.finalize expects resolution in mm. + resolution_mm = [res_z_um / 1000.0, res_y_um / 1000.0, res_x_um / 1000.0] + output.finalize(resolution_mm, n_levels=3) + + # Generate preview if requested + if args.preview: + # Generate 3-panel preview from zarr output + generate_preview_from_zarr(output, args.preview) + + logger.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_stitch_motor_only.py b/scripts/diagnostics/linum_stitch_motor_only.py new file mode 100644 index 00000000..ecae5054 --- /dev/null +++ b/scripts/diagnostics/linum_stitch_motor_only.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +""" +Create a stitched mosaic using only motor positions (bypassing image-based registration). + +This diagnostic tool creates stitched mosaics that use ONLY the motor/stage positions +recorded during acquisition. By comparing this "motor-only" reconstruction against +the fully-registered reconstruction, you can identify: + +1. **Dilation/scaling issues**: If motor positions don't match actual image positions, + the motor-only stitch will show systematic offsets or gaps +2. **Registration drift**: By comparing motor-only vs registered, you can see + how much the registration is "correcting" the motor positions +3. **Stage repeatability**: Systematic patterns in motor-only errors indicate + stage calibration issues + +For troubleshooting 45° oblique-cut samples where edges don't match up. +""" + +import linumpy.config.threads # noqa: F401 + +import argparse +import logging +from pathlib import Path +from typing import Any + +import numpy as np + +from linumpy.cli.args import add_overwrite_arg +from linumpy.io.zarr import OmeZarrWriter, read_omezarr +from linumpy.metrics import collect_stitch_3d_metrics +from linumpy.mosaic.grid import add_volume_to_mosaic +from linumpy.mosaic.motor import compare_motor_vs_registration, compute_motor_positions + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("input_volume", help="Full path to a 3D mosaic grid volume (.ome.zarr)") + p.add_argument("output_volume", help="Output stitched mosaic filename (.ome.zarr)") + + p.add_argument("--overlap_fraction", type=float, default=0.1, help="Expected overlap fraction between tiles [%(default)s]") + p.add_argument( + "--blending_method", + type=str, + default="diffusion", + choices=["none", "average", "diffusion"], + help="Blending method [%(default)s]", + ) + p.add_argument( + "--scale_factor", + type=float, + default=1.0, + help="Scale factor to apply to motor positions (to test dilation) [%(default)s]", + ) + p.add_argument( + "--rotation_deg", type=float, default=0.0, help="Global rotation to apply to tile grid (degrees) [%(default)s]" + ) + p.add_argument( + "--compare_transform", type=str, default=None, help="Path to registration transform .npy file for comparison output" + ) + p.add_argument("--output_comparison", type=str, default=None, help="Output path for comparison metrics JSON") + + add_overwrite_arg(p) + return p + + +def compute_registration_positions(nx: Any, ny: Any, transform: Any) -> Any: + """Compute tile positions using registration transform.""" + positions = [] + for i in range(nx): + for j in range(ny): + pos = np.dot(transform, [i, j]).astype(int) + positions.append(pos) + return positions + + +def main() -> None: + """Run function.""" + p = _build_arg_parser() + args = p.parse_args() + + input_file = Path(args.input_volume) + output_file = Path(args.output_volume) + + assert output_file.name.endswith(".zarr"), "output_volume must be a .zarr file" + + if not args.overwrite and output_file.exists(): + raise FileExistsError(f"Output file exists: {output_file}. Use --overwrite to replace.") + + # Load the mosaic grid volume + logger.info("Loading mosaic grid from %s", input_file) + volume, resolution = read_omezarr(Path(input_file), level=0) + tile_shape = volume.chunks + + logger.info("Volume shape: %s", volume.shape) + logger.info("Tile shape: %s", tile_shape) + logger.info("Resolution: %s", resolution) + + # Compute grid dimensions + nx = volume.shape[1] // tile_shape[1] + ny = volume.shape[2] // tile_shape[2] + logger.info("Grid: %s x %s tiles", nx, ny) + + # Compute motor-based positions + motor_positions = compute_motor_positions(nx, ny, tile_shape, args.overlap_fraction, args.scale_factor, args.rotation_deg) + + # If comparison transform provided, compare positions + if args.compare_transform: + transform = np.load(args.compare_transform) + reg_positions = compute_registration_positions(nx, ny, transform) + comparison = compare_motor_vs_registration(motor_positions, reg_positions, args.output_comparison) + + logger.info("Position comparison summary:") + logger.info(" Mean offset: (%.1f, %.1f) px", comparison["mean_diff_y"], comparison["mean_diff_x"]) + logger.info(" Max offset: %.1f px", comparison["max_magnitude"]) + if comparison.get("dilation_indicator"): + logger.warning(comparison["dilation_warning"]) + + # Compute output mosaic shape + posx_min = min([pos[0] for pos in motor_positions]) + posx_max = max([pos[0] + tile_shape[1] for pos in motor_positions]) + posy_min = min([pos[1] for pos in motor_positions]) + posy_max = max([pos[1] + tile_shape[2] for pos in motor_positions]) + mosaic_shape = (volume.shape[0], int(posx_max - posx_min), int(posy_max - posy_min)) + + logger.info("Output mosaic shape: %s", mosaic_shape) + + # Stitch the mosaic using motor positions only + logger.info("Stitching mosaic using motor positions...") + writer = OmeZarrWriter(output_file, mosaic_shape, chunk_shape=(100, 100, 100), dtype=np.float32, overwrite=args.overwrite) + + for i in range(nx): + for j in range(ny): + # Extract tile from input + rmin = i * tile_shape[1] + rmax = (i + 1) * tile_shape[1] + cmin = j * tile_shape[2] + cmax = (j + 1) * tile_shape[2] + tile = volume[:, rmin:rmax, cmin:cmax] + + if np.any(tile < 0.0): + tile = tile - tile.min() + + # Get motor-based position + pos = motor_positions[i * ny + j].copy() + pos[0] -= posx_min + pos[1] -= posy_min + + add_volume_to_mosaic(tile, pos, writer, blending_method=args.blending_method) + + writer.finalize(resolution) + + # Collect metrics + collect_stitch_3d_metrics( + input_shape=volume.shape, + output_shape=mosaic_shape, + num_tiles=nx * ny, + resolution=list(resolution), + output_path=output_file, + input_path=input_file, + blending_method=args.blending_method, + ) + + logger.info("Motor-only stitched mosaic saved to %s", output_file) + + +if __name__ == "__main__": + main() diff --git a/scripts/diagnostics/linum_suggest_params.py b/scripts/diagnostics/linum_suggest_params.py new file mode 100644 index 00000000..61e2790a --- /dev/null +++ b/scripts/diagnostics/linum_suggest_params.py @@ -0,0 +1,702 @@ +#!/usr/bin/env python3 +""" +Suggest 3D reconstruction pipeline parameters from raw input files. + +Analyses the motor-positions file (shifts_xy.csv) and, optionally, the raw +data directory produced by the preprocessing pipeline to automatically +estimate suitable nextflow.config parameters. + +Estimable parameters +-------------------- +From shifts_xy.csv: + max_shift_mm — IQR upper bound of normal inter-slice shifts + common_space_max_step_mm — 95th percentile of consecutive normal shift changes + interpolate_missing_slices — true when gaps (moving_id - fixed_id > 1) are found + +From the raw data directory (--data_dir): + registration_slicing_interval_mm — from slice_thickness in metadata.json / state.json + stitch_overlap_fraction — from overlap_fraction in metadata.json / state.json + resolution — smallest standard resolution >= native lateral px size + crop_interface_out_depth — depth below tissue interface; user must verify + registration_max_translation — tile width in pixels at target resolution + +Parameters that cannot be estimated automatically: + crop_interface_out_depth — requires tissue-specific knowledge; an estimate is given + based on the raw OCT depth and focus position, but should + be verified by inspecting a cross-section preview. + +Raw data directory layout (output of the preprocessing pipeline) +----------------------------------------------------------------- + / + state.json # global acquisition state + slice_z00/ + metadata.json # per-slice acquisition parameters + tiles/ + tile_x##_y##_z##/ + info.txt # per-tile OCT scan parameters + +Cleaned data directory layout (after linum_clean_raw_data.py) +-------------------------------------------------------------- + / + state.json # global acquisition state (unchanged) + metadata/ + slice_z00/ + metadata.json # per-slice acquisition parameters (kept) + tiles/ + tile_x##_y##_z##/ + info.txt # per-tile OCT scan parameters (kept) + +Both layouts are detected automatically when --data_dir is provided. +""" + +# Configure thread limits before numpy/scipy imports (optional; skipped if +# linumpy is not installed into the current environment) +import contextlib + +with contextlib.suppress(ImportError): + import linumpy.config.threads # noqa: F401 + +import argparse +import json +import sys +from pathlib import Path +from typing import Any + +import numpy as np +import pandas as pd + +# Default OCT axial resolution in µm (hardware constant for Thorlabs PSOCT). +# Cannot be determined from metadata files; override with --axial_res_um if known. +OCT_AXIAL_RES_UM = 3.5 + + +# ============================================================================= +# CLI +# ============================================================================= + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("shifts_file", help="Motor-positions CSV file (shifts_xy.csv)") + p.add_argument("output_dir", help="Directory for the report and suggested config snippet") + p.add_argument( + "--data_dir", + default=None, + help="Raw data directory (contains state.json and\n" + "slice_z##/ subdirectories, or their cleaned\n" + "equivalent with slices under metadata/). Used\n" + "to read slice thickness, tile overlap, and tile\n" + "dimensions. Both raw and cleaned layouts are\n" + "detected automatically.", + ) + p.add_argument( + "--n_calibration_slices", + type=int, + default=1, + help="Number of leading calibration slices to skip when reading per-slice metadata [%(default)s].", + ) + p.add_argument( + "--axial_res_um", + type=float, + default=OCT_AXIAL_RES_UM, + help="OCT axial resolution in µm/pixel [%(default)s µm].\nUsed to convert tile depth (pixels) → µm.", + ) + p.add_argument( + "--resolution_um", + type=float, + default=None, + help="Override target pipeline resolution in µm/pixel.\nDerived automatically from tile dimensions if not given.", + ) + p.add_argument("-f", "--overwrite", action="store_true", help="Overwrite existing output directory") + return p + + +# ============================================================================= +# Shifts loading and analysis +# ============================================================================= + + +def load_shifts(path: str) -> pd.DataFrame: + """Run function.""" + df = pd.read_csv(path) + required = ["fixed_id", "moving_id", "x_shift_mm", "y_shift_mm"] + missing = [c for c in required if c not in df.columns] + if missing: + raise ValueError(f"Missing columns in shifts file: {missing}") + return df + + +def detect_rehoming(magnitudes: np.ndarray, mad_k: float = 3.0) -> np.ndarray: + """ + Robust re-homing event detection using Median Absolute Deviation (MAD). + + MAD is insensitive to the very outliers we are trying to detect, unlike + plain IQR whose quartiles are pulled up by re-homing events. + + Returns boolean array, True where the shift is classified as re-homing. + """ + med = np.median(magnitudes) + mad = np.median(np.abs(magnitudes - med)) + # 1.4826 makes MAD a consistent estimator of σ for normally distributed data + sigma_equiv = 1.4826 * mad if mad > 0 else 1e-9 + is_rehoming = magnitudes > (med + mad_k * sigma_equiv) + + # Fallback: IQR if MAD gives nothing (perfectly uniform shifts, no outliers) + if not is_rehoming.any(): + q1, q3 = np.percentile(magnitudes, [25, 75]) + iqr = q3 - q1 + if iqr > 0: + is_rehoming = magnitudes > q3 + mad_k * iqr + + return is_rehoming + + +def detect_slice_gaps(df: pd.DataFrame) -> list[dict]: + """ + Detect missing slices in the shifts sequence. + + A gap occurs when moving_id - fixed_id > 1 (i.e. one or more slice IDs + are absent from the sequence of consecutive pairs). + + Returns a list of dicts with keys 'fixed_id', 'moving_id', 'n_missing'. + """ + gaps = [] + for _, row in df.iterrows(): + skip = int(row["moving_id"]) - int(row["fixed_id"]) - 1 + if skip > 0: + gaps.append( + { + "fixed_id": int(row["fixed_id"]), + "moving_id": int(row["moving_id"]), + "n_missing": skip, + } + ) + return gaps + + +def analyze_shifts(df: pd.DataFrame) -> dict: + """Compute all shift-derived parameter estimates.""" + mag = np.sqrt((df["x_shift_mm"] ** 2 + df["y_shift_mm"] ** 2).to_numpy()) + df = df.copy() + df["magnitude_mm"] = mag + + is_rehoming = detect_rehoming(mag) + normal_mag = mag[~is_rehoming] + + # max_shift_mm: IQR upper bound of the normal (non-rehoming) shifts. + if len(normal_mag) >= 4: + nq1, nq3 = np.percentile(normal_mag, [25, 75]) + niqr = nq3 - nq1 + max_shift = nq3 + 1.5 * niqr + else: + max_shift = normal_mag.max() if len(normal_mag) else 0.5 + + # common_space_max_step_mm: 95th pct of consecutive shift magnitude changes. + normal_df = df[~is_rehoming].reset_index(drop=True) + if len(normal_df) > 1: + dx = normal_df["x_shift_mm"].diff().abs() + dy = normal_df["y_shift_mm"].diff().abs() + step_mag = np.sqrt(dx**2 + dy**2).dropna() + max_step = float(np.percentile(step_mag, 95)) if len(step_mag) else 0.5 + else: + max_step = 0.5 + + # Slice gap detection + gaps = detect_slice_gaps(df) + + return { + "df": df, + "is_rehoming": is_rehoming, + "rehoming_rows": df[is_rehoming], + "normal_rows": df[~is_rehoming], + "n_rehoming": int(is_rehoming.sum()), + "has_rehoming": bool(is_rehoming.any()), + "max_shift_mm": max_shift, + "max_step_mm": max_step, + "gaps": gaps, + "has_gaps": bool(gaps), + "normal_mag_stats": { + "mean": float(normal_mag.mean()) if len(normal_mag) else 0.0, + "std": float(normal_mag.std()) if len(normal_mag) else 0.0, + "max": float(normal_mag.max()) if len(normal_mag) else 0.0, + "p95": float(np.percentile(normal_mag, 95)) if len(normal_mag) else 0.0, + }, + } + + +# ============================================================================= +# Raw metadata reading +# ============================================================================= + + +def _parse_info_txt(path: Path) -> dict: + """ + Parse a tile info.txt file. + + The file may contain repeated sections (one per tile position). Only the + first value of each key is kept, since acquisition parameters are constant + across positions. + """ + info: dict = {} + for line in path.read_text().splitlines(): + parts = line.split(": ", 1) + if len(parts) != 2: + continue + key, val = parts[0].strip(), parts[1].strip() + if key in info: + continue # keep first occurrence + try: + info[key] = int(val) + except ValueError: + try: + info[key] = float(val) + except ValueError: + info[key] = val + return info + + +def analyze_metadata(data_dir: Path, axial_res_um: float, n_calibration_slices: int = 1) -> dict: + """ + Extract acquisition parameters from the raw data directory. + + Reads (in priority order): + 1. First tissue slice metadata.json (skipping calibration slices) + 2. /state.json — fallback for missing fields + 3. First tile info.txt in the tissue slice — for OCT depth parameters + + Parameters + ---------- + data_dir : str + Path to the raw data directory containing slice subdirectories. + axial_res_um : float + Axial resolution in micrometers, used to compute depth parameters. + n_calibration_slices : int + Number of leading slice directories to skip (default: 1 to skip + slice_z00, which is always a calibration slice). + + Returns a dict with 'ok', 'sources', and the extracted parameters. + """ + data_dir = Path(data_dir) + result: dict = {"ok": False, "sources": [], "warnings": []} + + # ── 1. Per-slice metadata.json (skip calibration slices) ───────────────── + slice_meta: dict = {} + slice_dirs = sorted(data_dir.glob("slice_z*/")) + + # Auto-detect cleaned structure: slices moved to metadata/ subdirectory + if not slice_dirs: + metadata_subdir = data_dir / "metadata" + if metadata_subdir.is_dir(): + slice_dirs = sorted(metadata_subdir.glob("slice_z*/")) + if slice_dirs: + result["warnings"].append( + "Detected cleaned data structure: reading slice metadata " + "from metadata/ subdirectory (bin files have been removed)." + ) + + tissue_dirs = slice_dirs[n_calibration_slices:] # skip leading calibration slices + if not tissue_dirs: + result["warnings"].append( + f"No tissue slice directories found after skipping " + f"{n_calibration_slices} calibration slice(s) " + f"(found {len(slice_dirs)} total). " + f"Try --n_calibration_slices 0." + ) + tissue_dirs = slice_dirs # fall back to all dirs + if tissue_dirs: + meta_path = tissue_dirs[0] / "metadata.json" + if meta_path.exists(): + slice_meta = json.loads(meta_path.read_text()) + result["sources"].append(str(meta_path)) + else: + result["warnings"].append(f"metadata.json not found in {tissue_dirs[0].name}") + + # ── 2. Global state.json ────────────────────────────────────────────────── + state: dict = {} + state_path = data_dir / "state.json" + if state_path.exists(): + state = json.loads(state_path.read_text()) + result["sources"].append(str(state_path)) + + if not slice_meta and not state: + result["error"] = "No metadata.json or state.json found in data_dir" + return result + + # ── 3. Tile info.txt (from the same tissue slice) ───────────────────────── + tile_info: dict = {} + if tissue_dirs: + tiles_dir = tissue_dirs[0] / "tiles" + tile_dirs = sorted(tiles_dir.glob("tile_*/")) if tiles_dir.exists() else [] + for td in tile_dirs: + info_path = td / "info.txt" + if info_path.exists(): + tile_info = _parse_info_txt(info_path) + result["sources"].append(str(info_path)) + break + + # ── Extract parameters (metadata.json takes priority over state.json) ──── + def get(key: str, *sources: Any) -> Any: + for src in sources: + if key in src: + return src[key] + return None + + tile_size_um = get("tile_size_um", slice_meta, state) + tile_n_samples = get("tile_n_samples", slice_meta, state) + slice_thickness = get("slice_thickness", slice_meta, state) + overlap_fraction = get("overlap_fraction", slice_meta, state) + + # Lateral pixel size + if tile_size_um and tile_n_samples: + native_lateral_um = tile_size_um / tile_n_samples + else: + native_lateral_um = None + result["warnings"].append("Could not compute native lateral resolution (tile_size_um or tile_n_samples missing)") + + # Axial depth from tile info.txt + top_z = tile_info.get("top_z") + bottom_z = tile_info.get("bottom_z") + focus_z = tile_info.get("focus_z") + + if bottom_z is not None and top_z is not None: + n_depth_pixels = bottom_z - top_z + 1 + total_depth_um = n_depth_pixels * axial_res_um + else: + n_depth_pixels = state.get("z_max", None) + if n_depth_pixels and state.get("z_min") is not None: + n_depth_pixels = state["z_max"] - state["z_min"] + total_depth_um = n_depth_pixels * axial_res_um + else: + n_depth_pixels = total_depth_um = None + result["warnings"].append("top_z/bottom_z not found in tile info.txt; fell back to state.json z_min/z_max") + + # Estimate crop depth: depth of tissue below interface. + # focus_z marks where the tissue surface is in the OCT depth axis. + # We take 30 % of the remaining usable depth as a conservative starting + # point; the user must verify this against a cross-section preview. + if focus_z is not None and bottom_z is not None: + pixels_below_focus = bottom_z - focus_z + crop_depth_um = int(round(pixels_below_focus * axial_res_um * 0.30 / 50) * 50) + crop_depth_um = max(crop_depth_um, 200) # floor at 200 µm + else: + crop_depth_um = None + + result.update( + { + "ok": True, + "slice_thickness_mm": slice_thickness, + "overlap_fraction": overlap_fraction, + "tile_size_um": tile_size_um, + "tile_n_samples": tile_n_samples, + "native_lateral_um": native_lateral_um, + "n_depth_pixels": n_depth_pixels, + "total_depth_um": total_depth_um, + "focus_z": focus_z, + "crop_depth_um": crop_depth_um, + "axial_res_um": axial_res_um, + } + ) + return result + + +# ============================================================================= +# Rounding helpers +# ============================================================================= + + +def ceil_to(value: float, step: float) -> float: + """Run function.""" + return float(np.ceil(value / step) * step) + + +def suggest_target_resolution(native_xy_um: float) -> int: + """Return the smallest standard pipeline resolution >= native lateral px size.""" + for candidate in [5, 10, 15, 20, 25, 50]: + if candidate >= native_xy_um: + return candidate + return 25 + + +# ============================================================================= +# Report +# ============================================================================= + + +def build_report(shift_stats: dict, acq: dict, shifts_path: str) -> str: + """Run function.""" + df = shift_stats["df"] + s = shift_stats["normal_mag_stats"] + normal = shift_stats["normal_rows"] + + lines = [ + "=" * 62, + "PARAMETER ESTIMATION REPORT", + f"Shifts file : {shifts_path}", + "=" * 62, + "", + "ACQUISITION OVERVIEW", + "-" * 40, + f" Slice pairs (rows): {len(df)}", + f" Estimated # of slices: {len(df) + 1}", + f" Re-homing events: {shift_stats['n_rehoming']}", + ] + + # Acquisition metadata section + if acq.get("ok"): + lines += [""] + lines += ["ACQUISITION METADATA", "-" * 40] + if acq.get("slice_thickness_mm") is not None: + lines.append(f" Slice thickness : {acq['slice_thickness_mm']:.3f} mm → registration_slicing_interval_mm") + if acq.get("overlap_fraction") is not None: + lines.append(f" Tile overlap : {acq['overlap_fraction']:.0%} → stitch_overlap_fraction") + if acq.get("native_lateral_um") is not None: + lines.append( + f" Tile size : {acq['tile_size_um']:.0f} µm " + f"/ {acq['tile_n_samples']} px " + f"= {acq['native_lateral_um']:.2f} µm/px native lateral" + ) + if acq.get("total_depth_um") is not None: + lines.append( + f" OCT depth : {acq['n_depth_pixels']} px " + f"× {acq['axial_res_um']:.1f} µm/px " + f"= {acq['total_depth_um']:.0f} µm total axial range" + ) + if acq.get("focus_z") is not None: + below = acq["n_depth_pixels"] - acq["focus_z"] + below_um = below * acq["axial_res_um"] + lines.append(f" Focus position : z={acq['focus_z']} px ({below} px = {below_um:.0f} µm below interface)") + if acq.get("crop_depth_um") is not None: + lines.append( + f" → suggested crop_interface_out_depth ≈ {acq['crop_depth_um']} µm" + " (30 % of depth below focus; verify from preview)" + ) + for w in acq.get("warnings", []): + lines.append(f" [!] {w}") + lines.append(f" Sources: {', '.join(Path(s).name for s in acq.get('sources', []))}") + + lines += [ + "", + "NORMAL-SHIFT STATISTICS (re-homing excluded)", + "-" * 40, + f" Mean magnitude : {s['mean']:.3f} mm", + f" Std : {s['std']:.3f} mm", + f" 95th percentile: {s['p95']:.3f} mm", + f" Maximum : {s['max']:.3f} mm", + f" → suggested max_shift_mm = {shift_stats['max_shift_mm']:.3f} mm", + " (IQR upper bound of non-re-homing shifts)", + ] + + if shift_stats["has_rehoming"]: + rh = shift_stats["rehoming_rows"] + lines += ["", "RE-HOMING EVENTS DETECTED", "-" * 40] + for _, row in rh.iterrows(): + lines.append( + f" Slice {int(row['fixed_id']):02d}→{int(row['moving_id']):02d}: " + f"X={row['x_shift_mm']:+.3f}, Y={row['y_shift_mm']:+.3f} mm " + f"(mag={row['magnitude_mm']:.3f} mm)" + ) + lines += [ + "", + f" Max normal magnitude : {normal['magnitude_mm'].max():.3f} mm", + f" Min re-homing mag : {rh['magnitude_mm'].min():.3f} mm", + " → Note: skip_error_transforms=false allows pairwise accumulation", + " to steer the viewing plane across re-homing boundaries.", + ] + else: + lines += [ + "", + "NO RE-HOMING EVENTS DETECTED", + ] + + if shift_stats["has_gaps"]: + n_total = sum(g["n_missing"] for g in shift_stats["gaps"]) + lines += ["", "MISSING SLICES DETECTED", "-" * 40] + for g in shift_stats["gaps"]: + label = "slice" if g["n_missing"] == 1 else "slices" + lines.append(f" Gap between slice {g['fixed_id']:02d} and {g['moving_id']:02d}: {g['n_missing']} missing {label}") + lines += [ + f" Total missing slices: {n_total}", + " → interpolate_missing_slices = true (recommended)", + ] + else: + lines += ["", "NO MISSING SLICES DETECTED", " interpolate_missing_slices = false"] + + lines += ["", "=" * 62] + return "\n".join(lines) + + +# ============================================================================= +# Config snippet +# ============================================================================= + + +def build_config_snippet(shift_stats: dict, acq: dict, args: argparse.Namespace) -> str: + """Return a nextflow.config parameter block with estimated values.""" + # ── Resolution ─────────────────────────────────────────────────────────── + if args.resolution_um: + res_um = int(args.resolution_um) + res_comment = "// set by --resolution_um" + elif acq.get("ok") and acq.get("native_lateral_um"): + native = acq["native_lateral_um"] + res_um = suggest_target_resolution(native) + res_comment = f"// native lateral resolution = {native:.2f} µm/px → smallest standard res >= native" + else: + res_um = "TODO" + res_comment = "// set to the smallest standard resolution (5/10/15/20/25/50) >= native pixel size in µm" + + # ── Crop depth ─────────────────────────────────────────────────────────── + if acq.get("ok") and acq.get("crop_depth_um"): + crop_depth = acq["crop_depth_um"] + depth_comment = "// 30 % of OCT depth below tissue interface — verify against cross-section preview" + else: + crop_depth = "TODO" + depth_comment = "// depth in µm to keep below the tissue interface; inspect a cross-section preview to set correctly" + + # ── Slicing interval and overlap (from metadata) ───────────────────────── + if acq.get("ok") and acq.get("slice_thickness_mm") is not None: + slicing = f"{acq['slice_thickness_mm']:.3f}" + drifting = f"{acq['slice_thickness_mm'] / 2:.3f}" + slice_src = "// from slice_thickness in metadata" + else: + slicing = "TODO // ← set from acquisition protocol (e.g. 0.200)" + drifting = "TODO // ← typically half the slicing interval" + slice_src = "" + + if acq.get("ok") and acq.get("overlap_fraction") is not None: + overlap = f"{acq['overlap_fraction']:.2f}" + overlap_src = "// from overlap_fraction in metadata" + else: + overlap = "TODO // ← set from acquisition tile-overlap setting" + overlap_src = "" + + # ── Shift-based params ──────────────────────────────────────────────────── + max_shift = ceil_to(shift_stats["max_shift_mm"], 0.05) + max_step = float(np.clip(ceil_to(shift_stats["max_step_mm"], 0.05), 0.05, 2.0)) + + # ── Registration max translation ───────────────────────────────────────── + # The optimizer bound must comfortably exceed any real inter-slice + # translation. Setting it to the tile size in pixels at the target + # resolution ensures the optimizer is never clamped for whole-tile shifts. + if acq.get("ok") and acq.get("tile_size_um") and isinstance(res_um, (int, float)): + max_trans_px = int(np.ceil(acq["tile_size_um"] / res_um / 10) * 10) + max_trans_comment = ( + f"// tile {acq['tile_size_um']:.0f} µm / {res_um} µm·px⁻¹ " + f"= {acq['tile_size_um'] / res_um:.0f} px, rounded up to nearest 10" + ) + else: + max_trans_px = 200 + max_trans_comment = "// default — set to tile width in pixels at target resolution" + + # ── Missing slice interpolation ─────────────────────────────────────────── + if shift_stats["has_gaps"]: + interp_val = "true" + gap_detail = ", ".join(f"{g['fixed_id']}→{g['moving_id']} ({g['n_missing']} missing)" for g in shift_stats["gaps"]) + interp_comment = f"// gaps detected: {gap_detail}" + else: + interp_val = "false" + interp_comment = "// no gaps detected in shifts sequence" + + lines = [ + "// ================================================================", + "// SUGGESTED PARAMETERS (generated by linum_suggest_params.py)", + "// Each value is annotated with how it was derived.", + "// Review all parameters against your acquisition protocol before use.", + "// Parameters marked TODO must be filled in manually.", + "// ================================================================", + "", + "// ── Resolution & depth ────────────────────────────────────────", + f"resolution = {res_um:<8} {res_comment}", + f"crop_interface_out_depth = {crop_depth:<6} {depth_comment}", + "", + "// ── Tile stitching ────────────────────────────────────────────", + f"stitch_overlap_fraction = {overlap} {overlap_src}", + "", + "// ── Slice registration ────────────────────────────────────────", + f"registration_slicing_interval_mm = {slicing} {slice_src}", + f"registration_allowed_drifting_mm = {drifting}", + " // ↑ Z-search range; typically half the slicing interval", + f"registration_max_translation = {max_trans_px}", + f" // ↑ {max_trans_comment}", + "", + "// ── Missing slice interpolation ───────────────────────────────", + f"interpolate_missing_slices = {interp_val} {interp_comment}", + "", + "// ── Shift outlier filtering (common-space alignment) ──────────", + f"max_shift_mm = {max_shift:.3f}", + " // IQR upper bound of normal inter-slice shifts.", + " // Acts as a floor on the IQR detection threshold.", + " // Keep well below the smallest re-homing event magnitude.", + f"common_space_max_step_mm = {max_step:.3f}", + " // 95th percentile of consecutive shift magnitude changes.", + " // Flags sudden per-step jumps that IQR alone may miss.", + ] + + if shift_stats["has_rehoming"]: + rh = shift_stats["rehoming_rows"] + slice_ids = ", ".join(str(int(r["moving_id"])) for _, r in rh.iterrows()) + lines += [ + "", + f"// ── Re-homing events detected at slices: {slice_ids} ──────────", + "// Large mid-acquisition motor jumps. Consider skip_error_transforms=false", + "// with stack_accumulate_translations=true to steer the viewing plane", + "// across re-homing boundaries via pairwise registration accumulation.", + ] + + return "\n".join(lines) + + +# ============================================================================= +# Main +# ============================================================================= + + +def main() -> None: + """Run function.""" + parser = _build_arg_parser() + args = parser.parse_args() + + out_dir = Path(args.output_dir) + if out_dir.exists() and not args.overwrite: + print(f"Output directory already exists: {out_dir} (use -f to overwrite)", file=sys.stderr) + sys.exit(1) + out_dir.mkdir(parents=True, exist_ok=True) + + # Load and analyse shifts + print(f"Loading shifts: {args.shifts_file}") + df = load_shifts(args.shifts_file) + shift_stats = analyze_shifts(df) + + # Optionally read raw data directory for acquisition metadata + acq: dict = {} + if args.data_dir: + print(f"Reading acquisition metadata: {args.data_dir}") + acq = analyze_metadata(args.data_dir, args.axial_res_um, args.n_calibration_slices) + if not acq.get("ok"): + print(f"Warning: {acq.get('error', 'could not read metadata')}", file=sys.stderr) + elif acq.get("warnings"): + for w in acq["warnings"]: + print(f"Warning: {w}", file=sys.stderr) + + # Build outputs + report = build_report(shift_stats, acq, args.shifts_file) + snippet = build_config_snippet(shift_stats, acq, args) + + sep = "-" * 62 + print() + print(report) + print() + print("SUGGESTED NEXTFLOW.CONFIG PARAMETERS") + print(sep) + print(snippet) + + report_path = out_dir / "param_estimation_report.txt" + config_path = out_dir / "suggested_params.config" + + report_path.write_text(report + "\n\n" + "SUGGESTED NEXTFLOW.CONFIG PARAMETERS\n" + sep + "\n" + snippet + "\n") + config_path.write_text(snippet + "\n") + + print(f"\nWrote:\n {report_path}\n {config_path}") + + +if __name__ == "__main__": + main() diff --git a/scripts/linum_axis_xyz_to_zyx.py b/scripts/linum_axis_xyz_to_zyx.py index deaf35ea..b7e9f068 100644 --- a/scripts/linum_axis_xyz_to_zyx.py +++ b/scripts/linum_axis_xyz_to_zyx.py @@ -18,8 +18,8 @@ def _build_arg_parser() -> argparse.ArgumentParser: p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) p.add_argument("input_image", type=Path, help="Full path to a .nii image, with axis in XYZ order.") p.add_argument("output_image", type=Path, help="Full path to the output .nii image, with axis in ZYX order") - p.add_argument("--resolution_xy", type=float, default=3.0, help="Lateral (xy) resolution in micron. [%(default)s]") - p.add_argument("--resolution_z", type=float, default=200, help="Axial (z) resolution in microns. [%(default)s]") + p.add_argument("--resolution_xy", type=float, default=3.0, help="Lateral (xy) resolution in micron. (default=%(default)s)") + p.add_argument("--resolution_z", type=float, default=200, help="Axial (z) resolution in microns. (default=%(default)s)") return p diff --git a/scripts/linum_export_manual_align.py b/scripts/linum_export_manual_align.py index 743fedd2..b1478535 100644 --- a/scripts/linum_export_manual_align.py +++ b/scripts/linum_export_manual_align.py @@ -306,6 +306,18 @@ def _build_arg_parser() -> argparse.ArgumentParser: "Defaults to slices_dir when not provided." ), ) + p.add_argument( + "--interpolated_slices_remote_dir", + default=None, + help=( + "Absolute server path to the published interpolated-slice directory " + "(e.g. /scratch/workspace/sub-22/output/interpolate_missing_slice). " + "When set, per-slice remote paths for interpolated slices " + "(detected by '_interpolated' in the filename) point to this " + "directory instead of --slices_remote_dir. Stored in metadata.json " + "so the manual-align plugin can find interpolated slices on the server." + ), + ) p.add_argument( "--xy_overlap_px", type=int, @@ -320,6 +332,15 @@ def _build_arg_parser() -> argparse.ArgumentParser: return p +def _is_interpolated(path: Path) -> bool: + """Return True if this slice was produced by the interpolation step. + + Interpolated slices are named ``slice_z{N}_interpolated.ome.zarr`` + (the ``_interpolated`` suffix is set by ``linum_interpolate_missing_slice.py``). + """ + return "_interpolated" in path.name + + def _discover_slices(slices_dir: Path) -> dict[int, Path]: """Discover common-space slice files.""" pattern = re.compile(r"slice_z(\d+)") @@ -425,6 +446,11 @@ def main(argv: Any = None) -> None: # Use the explicitly provided server path when available; fall back to slices_dir. # Normalize to remove any double-slashes produced by a trailing slash in params.output. slices_remote_dir = str(Path(args.slices_remote_dir)) if args.slices_remote_dir else str(slices_dir) + # Separate remote dir for interpolated slices (e.g. interpolate_missing_slice/). + # Falls back to slices_remote_dir when not provided (backward-compatible). + interp_remote_dir = ( + str(Path(args.interpolated_slices_remote_dir)) if args.interpolated_slices_remote_dir else slices_remote_dir + ) workers = args.workers or max(1, (os.cpu_count() or 4) - 2) overlap_px = args.xy_overlap_px logger.info("XY overlap slab: %s voxels at pyramid level %s", overlap_px, args.level) @@ -537,6 +563,25 @@ def main(argv: Any = None) -> None: shutil.copy2(metrics_file, out_tdir / "pairwise_registration_metrics.json") # Write metadata + interpolated_ids = sorted(sid for sid, p in slice_paths.items() if _is_interpolated(p)) + # Per-slice remote paths: interpolated slices come from a separate publish + # directory (interpolate_missing_slice/) while real slices live in + # bring_to_common_space/. Storing an explicit path per slice lets the + # plugin's SSH reader always find the file regardless of its origin. + slice_remote_paths = { + str(sid): ( + f"{interp_remote_dir}/{p.name}" + if _is_interpolated(p) and interp_remote_dir != slices_remote_dir + else f"{slices_remote_dir}/{p.name}" + ) + for sid, p in slice_paths.items() + } + if interpolated_ids: + logger.info( + "Detected %s interpolated slice(s): %s", + len(interpolated_ids), + interpolated_ids, + ) metadata = { "pyramid_level": level, "n_slices": len(slice_paths), @@ -547,12 +592,21 @@ def main(argv: Any = None) -> None: "slice_filenames": {str(sid): p.name for sid, p in slice_paths.items()}, "axis_views": {"xz_dir": "aips_xz", "yz_dir": "aips_yz", "paired": bool(pairs)}, "n_transforms": sum(1 for tpath in transform_paths.values() if list(tpath.glob("*.tfm"))), - # Absolute server path to the published per-slice OME-Zarr files. + # Absolute server path to the published common-space OME-Zarr files. # Passed via --slices_remote_dir from the Nextflow process so it points to # the publishDir path rather than the work-directory staging path. # Used by the plugin to open persistent SSH+Python readers for interactive # cross-section navigation (slider to select Y or X position at full resolution). "slices_remote_dir": slices_remote_dir, + # Per-slice remote paths: accounts for interpolated slices that live in + # a different publish directory (interpolate_missing_slice/) than the + # common-space slices (bring_to_common_space/). Takes precedence over + # slices_remote_dir when the plugin resolves a slice path. + "slice_remote_paths": slice_remote_paths, + # IDs of slices that were synthesised by the interpolation step rather + # than acquired directly. The plugin can use this list to label them + # as "[interpolated]" and to warn the user that the content is synthetic. + "interpolated_slice_ids": interpolated_ids, "cross_section_level": level, } metadata_path = output_dir / "manual_align_metadata.json" diff --git a/scripts/linum_generate_pipeline_report.py b/scripts/linum_generate_pipeline_report.py index b00c23df..7d63075b 100644 --- a/scripts/linum_generate_pipeline_report.py +++ b/scripts/linum_generate_pipeline_report.py @@ -696,7 +696,7 @@ def discover_images( # Auto-detect overview from stack output directories if not provided via CLI if not images["overview"]: - for stack_dir_name in ("stack_motor", "stack", "normalize_z_intensity"): + for stack_dir_name in ("stack_motor", "stack", "correct_bias_field", "normalize_z_intensity"): d = input_dir / stack_dir_name if d.exists(): pngs = sorted(d.glob("*.png")) diff --git a/scripts/linum_normalize_intensities_per_slice.py b/scripts/linum_normalize_intensities_per_slice.py index 7d490bee..1630dd19 100644 --- a/scripts/linum_normalize_intensities_per_slice.py +++ b/scripts/linum_normalize_intensities_per_slice.py @@ -37,12 +37,6 @@ def _build_arg_parser() -> argparse.ArgumentParser: "--percentile_max", type=float, default=99.9, help="Values above the ith percentile will be clipped. [%(default)s]" ) p.add_argument("--sigma", type=float, default=1.0, help="Smoothing sigma for estimating the agarose mask. [%(default)s]") - p.add_argument( - "--min_contrast_fraction", - type=float, - default=0.1, - help="Minimum contrast as fraction of global max to prevent\nover-amplification of weak/bad slices. [%(default)s]", - ) p.add_argument("--use_gpu", default=True, action=argparse.BooleanOptionalAction, help="Use GPU acceleration if available.") p.add_argument("--verbose", action="store_true", help="Print GPU information.") return p @@ -75,9 +69,7 @@ def main() -> None: agarose_mask, otsu_threshold = get_agarose_mask(vol_data, args.sigma, use_gpu=use_gpu) - vol_normalized, background_thresholds = normalize_volume( - vol_data, agarose_mask, args.percentile_max, args.min_contrast_fraction - ) + vol_normalized, background_thresholds = normalize_volume(vol_data, agarose_mask, args.percentile_max) save_omezarr(da.from_array(vol_normalized), args.out_image, res, n_levels=3) @@ -91,7 +83,6 @@ def main() -> None: params={ "percentile_max": args.percentile_max, "sigma": args.sigma, - "min_contrast_fraction": args.min_contrast_fraction, "use_gpu": use_gpu, }, ) diff --git a/scripts/linum_resample_nifti.py b/scripts/linum_resample_nifti.py new file mode 100644 index 00000000..fce6c741 --- /dev/null +++ b/scripts/linum_resample_nifti.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 + +"""Resample a nifti volume to a given resolution.""" + +# Configure thread limits before numpy/scipy imports +import linumpy.config.threads # noqa: F401 + +import argparse +from pathlib import Path + +import numpy as np +import SimpleITK as sitk + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument("input_volume", type=Path, help="Full path to a nifti volume.") + p.add_argument( + "output_volume", type=Path, default=None, help="Full path to the output nifti volume (must be .nii or .nii.gz)" + ) + p.add_argument("resolution", type=float, default=25.0, help="Output resolution in micron [%(default)s]") + + return p + + +def main() -> None: + """Run the NIfTI resampling script.""" + # Parse arguments + p = _build_arg_parser() + args = p.parse_args() + + # Parameters + input_volume = Path(args.input_volume) + output_volume = Path(args.output_volume) + extension = "" + if output_volume.name.endswith(".nii"): + extension = ".nii" + elif output_volume.name.endswith(".nii.gz"): + extension = ".nii.gz" + assert extension in [".nii", ".nii.gz"], "The output file must be a .nii or .nii.gz file." + resolution = args.resolution / 1000.0 # Resolution in mm + + # Load the nifti volume + vol = sitk.ReadImage(str(input_volume)) + + # Set the scaling factor + transform = np.eye(3) + transform[0, 0] = resolution / vol.GetSpacing()[0] + transform[1, 1] = resolution / vol.GetSpacing()[1] + transform[2, 2] = resolution / vol.GetSpacing()[2] + + # Compute the output volume shape + old_shape = vol.GetSize() + new_shape = (int(old_shape[0] / transform[0, 0]), int(old_shape[1] / transform[1, 1]), int(old_shape[2] / transform[2, 2])) + new_spacing = (resolution, resolution, resolution) + + # Create the sampler + sampler = sitk.ResampleImageFilter() + sampler.SetSize(new_shape) + sampler.SetOutputOrigin(vol.GetOrigin()) + sampler.SetOutputDirection(vol.GetDirection()) + sampler.SetOutputSpacing(new_spacing) + sampler.SetOutputPixelType(sitk.sitkFloat32) + sampler.SetInterpolator(sitk.sitkLinear) + sampler.SetDefaultPixelValue(0) + warped = sampler.Execute(vol) + + # Save the output volume + output_volume.parent.mkdir(exist_ok=True, parents=True) + sitk.WriteImage(warped, str(output_volume)) + + +if __name__ == "__main__": + main() diff --git a/scripts/linum_stack_slices_2d.py b/scripts/linum_stack_slices_2d.py new file mode 100644 index 00000000..2862e271 --- /dev/null +++ b/scripts/linum_stack_slices_2d.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 + +"""Stack 2D mosaics into a single volume.""" + +# Configure thread limits before numpy/scipy imports +import linumpy.config.threads # noqa: F401 + +import argparse +import re +from pathlib import Path +from typing import cast + +import nibabel as nib +import numpy as np +import pandas +import zarr +from tqdm.auto import tqdm + +from linumpy.imaging.transform import apply_xy_shift + +# TODO: add option to give a folder + + +def _build_arg_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument( + "input_images", + nargs="+", + help=r"Full path to a 2D mosaic grid image (nifti files). " + r"Expects this format: '.*z(\d+)_.*' to extract the slice number.", + ) + p.add_argument("output_volume", help="Assembled volume filename (must be a .zarr)") + p.add_argument("--xy_shifts", required=False, default=None, help="CSV file containing the xy shifts for each slice") + p.add_argument("--resolution_xy", type=float, default=1.0, help="Lateral (xy) resolution in micron. [%(default)s]") + p.add_argument( + "--resolution_z", + type=float, + default=1.0, + help="Axial (z) resolution in micron, corresponding to the z distance between images in the stack. [%(default)s]", + ) + return p + + +def main() -> None: + """Run function.""" + # Parse arguments + p = _build_arg_parser() + args = p.parse_args() + + # Parameters + zarr_file = Path(args.output_volume) + assert zarr_file.suffix == ".zarr", "Output volume must be a zarr file." + + # Detect the slices ids + files = [Path(x) for x in args.input_images] + files.sort() + pattern = r".*z(\d+)_.*" + slice_ids = [] + for f in files: + foo = re.match(pattern, f.name) + if foo is not None: + slice_ids.append(int(foo.groups()[0])) + n_slices = np.max(slice_ids) - np.min(slice_ids) + 1 + + if args.xy_shifts is None: + dx_list = np.zeros(len(files)) + dy_list = np.zeros(len(files)) + else: + # Load cvs containing the shift values for each slice + df = pandas.read_csv(args.xy_shifts) + dx_list = np.array(df["x_shift"].tolist()) + dy_list = np.array(df["y_shift"].tolist()) + + # Compute the volume shape + xmin = [] + xmax = [] + ymin = [] + ymax = [] + + for i, f in enumerate(files): + # Get this volume shape + img = nib.load(f) + shape = cast("nib.Nifti1Image", img).shape + + # Get the cumulative shift + if i == 0: + xmin.append(0) + xmax.append(shape[1]) + ymin.append(0) + ymax.append(shape[0]) + else: + dx = np.cumsum(dx_list)[i - 1] + dy = np.cumsum(dy_list)[i - 1] + xmin.append(-dx) + xmax.append(-dx + shape[1]) + ymin.append(-dy) + ymax.append(-dy + shape[0]) + + # Get the volume shape + x0 = min(xmin) + y0 = min(ymin) + x1 = max(xmax) + y1 = max(ymax) + nx = int(x1 - x0) + ny = int(y1 - y0) + volume_shape = (n_slices, ny, nx) + + # Create the zarr persistent array + mosaic = zarr.open_array(zarr_file, mode="w", shape=volume_shape, dtype=np.float32, chunks=(1, 256, 256)) + + # Loop over the slices + for i in tqdm(range(len(files)), unit="slice", desc="Stacking slices"): + # Load the slice + f = files[i] + z = slice_ids[i] + img = cast("nib.Nifti1Image", nib.load(f)).get_fdata() + + # Get the shift values for the slice + if i == 0: + dx = x0 + dy = y0 + else: + dx = np.cumsum(dx_list)[i - 1] + x0 + dy = np.cumsum(dy_list)[i - 1] + y0 + + # Apply the shift + img = apply_xy_shift(img, np.asarray(mosaic[0, :, :]), int(dx), int(dy)) + + # Add the slice to the volume + mosaic[z, :, :] = img + + del img + + +if __name__ == "__main__": + main() diff --git a/uv.lock b/uv.lock index 2177e86b..26d54760 100644 --- a/uv.lock +++ b/uv.lock @@ -1546,7 +1546,6 @@ dependencies = [ { name = "numpy" }, { name = "ome-zarr" }, { name = "pandas" }, - { name = "pandas-stubs" }, { name = "pqdm" }, { name = "pynrrd" }, { name = "scikit-image" }, @@ -1572,11 +1571,14 @@ gpu-cuda13 = [ [package.dev-dependencies] dev = [ + { name = "networkx-stubs" }, + { name = "pandas-stubs" }, { name = "pre-commit" }, { name = "pytest" }, { name = "pytest-console-scripts" }, { name = "pytest-cov" }, { name = "ruff" }, + { name = "scipy-stubs" }, { name = "ty" }, ] @@ -1596,7 +1598,6 @@ requires-dist = [ { name = "numpy" }, { name = "ome-zarr", specifier = ">=0.9.0" }, { name = "pandas" }, - { name = "pandas-stubs", specifier = "~=2.3.3" }, { name = "pqdm" }, { name = "pynrrd" }, { name = "scikit-image" }, @@ -1613,11 +1614,14 @@ provides-extras = ["gpu", "gpu-cuda13", "docs"] [package.metadata.requires-dev] dev = [ + { name = "networkx-stubs", specifier = ">=0.0.1" }, + { name = "pandas-stubs", specifier = "~=2.3.3" }, { name = "pre-commit", specifier = ">=4.5.1" }, { name = "pytest", specifier = ">=7.0.0" }, { name = "pytest-console-scripts" }, { name = "pytest-cov", specifier = ">=4.0.0" }, { name = "ruff", specifier = ">=0.11" }, + { name = "scipy-stubs", specifier = ">=1.17.1.4" }, { name = "ty", specifier = ">=0.0.27" }, ] @@ -2020,6 +2024,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" }, ] +[[package]] +name = "networkx-stubs" +version = "0.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "networkx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/d6/66a8bcc3724e0909e865672f5f2d3d89a46e900e43bf0296d51b90b604a0/networkx-stubs-0.0.1.tar.gz", hash = "sha256:1751cbc87898328f89d79476ec6363174c05f9e48592f0953cc1077188717a21", size = 37466, upload-time = "2023-02-24T16:11:31.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d4/ba87f68af1ab26392dc64ef7ba7ee261bb2ab1df9a9cca1695b4454b8a16/networkx_stubs-0.0.1-py3-none-any.whl", hash = "sha256:ce58dff9b9dcbfdf895d0fce20fa8a73f2e82e581004596b7552086b5bb91366", size = 105780, upload-time = "2023-02-24T16:11:29.893Z" }, +] + [[package]] name = "nibabel" version = "5.4.2" @@ -2151,6 +2167,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/58/78/548fb8e07b1a341746bfbecb32f2c268470f45fa028aacdbd10d9bc73aab/numpy-2.4.4-cp314-cp314t-win_arm64.whl", hash = "sha256:ba203255017337d39f89bdd58417f03c4426f12beed0440cfd933cb15f8669c7", size = 10566643, upload-time = "2026-03-29T13:21:34.339Z" }, ] +[[package]] +name = "numpy-typing-compat" +version = "20251206.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/5f/29fd5f29b0a5d96e2def96ecba3112fc330ecd16e8c97c2b332563c5e201/numpy_typing_compat-20251206.2.4.tar.gz", hash = "sha256:59882d23aaff054a2536da80564012cdce33487657be4d79c5925bb8705fcabc", size = 5011, upload-time = "2025-12-06T20:02:04.942Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/7c/5c2892e6bc0628a2ccf4e938e1e2db22794657ccb374672d66e20d73839e/numpy_typing_compat-20251206.2.4-py3-none-any.whl", hash = "sha256:a82e723bd20efaa4cf2886709d4264c144f1f2b609bda83d1545113b7e47a5b5", size = 6300, upload-time = "2025-12-06T20:01:57.578Z" }, +] + [[package]] name = "nvidia-cublas" version = "13.1.0.3" @@ -2321,6 +2349,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/99/81/4c41331f59e64b2a5eaec1c03819d5791e4dde321694fbda661f873184da/ome_zarr-0.16.0-py3-none-any.whl", hash = "sha256:a34a5558905af526afd343105804b6d291e97f949cb71f6b3d269a6a7112b599", size = 46410, upload-time = "2026-04-14T09:43:31.092Z" }, ] +[[package]] +name = "optype" +version = "0.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/9f/3b13bab05debf685678b8af004e46b8c67c6f98ffa08eaf5d33bcf162c16/optype-0.17.0.tar.gz", hash = "sha256:31351a1e64d9eba7bf67e14deefb286e85c66458db63c67dd5e26dd72e4664e5", size = 53484, upload-time = "2026-03-08T23:03:12.594Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/44/dca78187415947d1bb90b2ee2a58e47d9573528331e8dc6196996b53612a/optype-0.17.0-py3-none-any.whl", hash = "sha256:8c2d88ff13149454bcf6eb47502f80d288bc542e7238fcc412ac4d222c439397", size = 65854, upload-time = "2026-03-08T23:03:11.425Z" }, +] + +[package.optional-dependencies] +numpy = [ + { name = "numpy" }, + { name = "numpy-typing-compat" }, +] + [[package]] name = "orderly-set" version = "5.5.0" @@ -3501,6 +3547,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/07/39/338d9219c4e87f3e708f18857ecd24d22a0c3094752393319553096b98af/scipy-1.17.1-cp314-cp314t-win_arm64.whl", hash = "sha256:200e1050faffacc162be6a486a984a0497866ec54149a01270adc8a59b7c7d21", size = 25489165, upload-time = "2026-02-23T00:22:29.563Z" }, ] +[[package]] +name = "scipy-stubs" +version = "1.17.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "optype", extra = ["numpy"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/75/d944a11fca64aa84fbb4bfcf613b758319c6103cb30a304a0e9727009d62/scipy_stubs-1.17.1.4.tar.gz", hash = "sha256:cae00c5207aa62ceb4bcadea202d9fbbf002e958f9e4de981720436b8d5c1802", size = 396980, upload-time = "2026-04-13T11:46:54.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/f8/334aa5a7a482ea89cb14d92f6a4d9ffa1e193e733144d4d14c7ffcb33583/scipy_stubs-1.17.1.4-py3-none-any.whl", hash = "sha256:e6e5c390fb864745bc3d5f591de81f5cb4f84403857d4f660acb5b6339956f5b", size = 604752, upload-time = "2026-04-13T11:46:53.135Z" }, +] + [[package]] name = "setuptools" version = "81.0.0" diff --git a/workflows/preproc/nextflow.config b/workflows/preproc/nextflow.config index 7992de33..90c8d78e 100644 --- a/workflows/preproc/nextflow.config +++ b/workflows/preproc/nextflow.config @@ -58,13 +58,22 @@ params { } // ========================================================================= -// PROCESS CONFIGURATION +// CPU CONFIGURATION // ========================================================================= -// Per-task BLAS/OpenMP/JAX thread caps (sets process.beforeScript). -includeConfig '../shared/cpu_limits.config' +def getAvailableCpus() { + int totalCpus = Runtime.runtime.availableProcessors() + if (params.enable_cpu_limits == false) return totalCpus + if (params.max_cpus != null && params.max_cpus > 0) { + return Math.min(params.max_cpus as int, totalCpus) + } + return Math.max(1, totalCpus - (params.reserved_cpus ?: 2) as int) +} +// ========================================================================= +// PROCESS CONFIGURATION +// ========================================================================= process { - publishDir = { "${params.output}" } + publishDir = {"$params.output"} scratch = true stageInMode = 'symlink' stageOutMode = 'rsync' @@ -72,6 +81,33 @@ process { maxRetries = 3 afterScript = 'sleep 1' + // Thread limiting for Python scripts + beforeScript = { + if (params.enable_cpu_limits == false) return "" + + int maxCpus = getAvailableCpus() as int + int numProcesses = Math.max(1, (params.processes ?: 1) as int) + int threadsPerProcess = Math.max(1, (int)(maxCpus / numProcesses)) + + def envVars = [] + if (params.max_cpus != null && params.max_cpus > 0) { + envVars << "export LINUMPY_MAX_CPUS=${params.max_cpus as int}" + } else { + envVars << "export LINUMPY_RESERVED_CPUS=${(params.reserved_cpus ?: 2) as int}" + } + + envVars << "export OMP_NUM_THREADS=${threadsPerProcess}" + envVars << "export MKL_NUM_THREADS=${threadsPerProcess}" + envVars << "export OPENBLAS_NUM_THREADS=${threadsPerProcess}" + envVars << "export VECLIB_MAXIMUM_THREADS=${threadsPerProcess}" + envVars << "export NUMEXPR_NUM_THREADS=${threadsPerProcess}" + envVars << "export NUMBA_NUM_THREADS=${threadsPerProcess}" + envVars << "export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=${threadsPerProcess}" + envVars << "export XLA_FLAGS='--xla_cpu_multi_thread_eigen=false intra_op_parallelism_threads=${threadsPerProcess}'" + + return envVars.join('\n') + } + withName: "create_mosaic_grid" { // In GPU mode each job uses ~2 CPU threads (main + I/O prefetch); GPU // contention is capped by max_mosaic_forks. Set it to GPUs × jobs-per-GPU. diff --git a/workflows/preproc/preproc_rawtiles.nf b/workflows/preproc/preproc_rawtiles.nf index ff400709..40901014 100644 --- a/workflows/preproc/preproc_rawtiles.nf +++ b/workflows/preproc/preproc_rawtiles.nf @@ -9,7 +9,7 @@ nextflow.enable.dsl = 2 // Parameters are defined in nextflow.config process create_mosaic_grid { - publishDir "${params.output}", mode: 'link' // Hard link: no duplication, file stays accessible + publishDir "$params.output", mode: 'link' // Hard link: no duplication, file stays accessible input: tuple val(slice_id), path(tiles) @@ -35,7 +35,7 @@ process create_mosaic_grid { } process generate_aip { - publishDir "${params.output}/aips", mode: 'copy' + publishDir "$params.output/aips", mode: 'copy' input: tuple val(slice_id), path(mosaic_grid) @@ -55,7 +55,7 @@ process generate_aip { process generate_mosaic_preview { maxForks 1 - publishDir "${params.output}/previews", mode: 'copy' + publishDir "$params.output/previews", mode: 'copy' input: tuple val(slice_id), path(mosaic_grid) @@ -74,14 +74,14 @@ process generate_mosaic_preview { process estimate_xy_shifts_from_metadata { cpus params.processes - publishDir "${params.output}", mode: 'copy' + publishDir "$params.output", mode: 'copy' input: path(input_dir) output: path("shifts_xy.csv") script: """ - linum_estimate_xy_shift_from_metadata.py ${input_dir} shifts_xy.csv --n_processes ${params.processes} + linum_estimate_xy_shift_from_metadata.py ${input_dir} shifts_xy.csv --n_processes $params.processes """ stub: @@ -91,7 +91,7 @@ process estimate_xy_shifts_from_metadata { } process generate_slice_config { - publishDir "${params.output}", mode: 'copy' + publishDir "$params.output", mode: 'copy' input: tuple path(shifts_file), path(input_dir) @@ -116,17 +116,17 @@ process generate_slice_config { workflow { if (params.use_old_folder_structure) { - inputSlices = channel.fromPath("${params.input}/tile_x*_y*_z*/", type: 'dir') + inputSlices = channel.fromPath("$params.input/tile_x*_y*_z*/", type: 'dir') .map{path -> tuple(path.toString().substring(path.toString().length() - 2), path)} .groupTuple() } else { - inputSlices = channel.fromPath("${params.input}/**/tile_x*_y*_z*/", type: 'dir') + inputSlices = channel.fromPath("$params.input/**/tile_x*_y*_z*/", type: 'dir') .map{path -> tuple(path.toString().substring(path.toString().length() - 2), path)} .groupTuple() } - input_dir_channel = channel.fromPath("${params.input}", type: 'dir') + input_dir_channel = channel.fromPath("$params.input", type: 'dir') // Generate a 3D mosaic grid at full resolution create_mosaic_grid(inputSlices) diff --git a/workflows/reconst_2.5d/soct_2.5d_reconst.nf b/workflows/reconst_2.5d/soct_2.5d_reconst.nf index 5554c8b8..6f0086b5 100644 --- a/workflows/reconst_2.5d/soct_2.5d_reconst.nf +++ b/workflows/reconst_2.5d/soct_2.5d_reconst.nf @@ -172,13 +172,13 @@ workflow{ mosaic_grids_compensated = compensate_illumination_bias.out // Estimate the position the tile->position transform - estimate_position(mosaic_grids_compensated.map{it[1]}.collect()) + estimate_position(mosaic_grids_compensated.map{ item -> item[1] }.collect()) // Apply 2D stitching to all mosaic grids stitch_mosaic(mosaic_grids_compensated.combine(estimate_position.out)) // Stack the mosaic to get an estimate of the 3D volume - stack_mosaic(stitch_mosaic.out.map{it[1]}.collect(), shifts) + stack_mosaic(stitch_mosaic.out.map{ item -> item[1] }.collect(), shifts) // Compress the stack to zip for transfer compress_stack(stack_mosaic.out) diff --git a/workflows/reconst_2.5d/soct_2.5d_reconst_beluga.config b/workflows/reconst_2.5d/soct_2.5d_reconst_beluga.config index 98b14c2a..9b08a951 100644 --- a/workflows/reconst_2.5d/soct_2.5d_reconst_beluga.config +++ b/workflows/reconst_2.5d/soct_2.5d_reconst_beluga.config @@ -1,14 +1,11 @@ -manifest { - nextflowVersion = '>= 23.10' -} - +nextflowVersion = '<= 23.10' singularity{ autoMounts = true enabled = true } process { - publishDir = { "${params.output_directory}/${task.process}" } + publishDir = {"$params.output_directory/$task.process"} scratch = true errorStrategy = { task.attempt <= 3 ? 'retry' : 'ignore' } maxRetries = 3 diff --git a/workflows/reconst_2.5d/soct_2.5d_reconst_docker.config b/workflows/reconst_2.5d/soct_2.5d_reconst_docker.config index fd56f3e5..eff94f80 100644 --- a/workflows/reconst_2.5d/soct_2.5d_reconst_docker.config +++ b/workflows/reconst_2.5d/soct_2.5d_reconst_docker.config @@ -1,6 +1,4 @@ -manifest { - nextflowVersion = '>= 23.10' -} +nextflowVersion = '<= 24.04' process { scratch=true diff --git a/workflows/reconst_3d/lib/Helpers.groovy b/workflows/reconst_3d/lib/Helpers.groovy deleted file mode 100644 index 6e60bc3e..00000000 --- a/workflows/reconst_3d/lib/Helpers.groovy +++ /dev/null @@ -1,303 +0,0 @@ -import nextflow.Nextflow -import org.slf4j.Logger -import org.slf4j.LoggerFactory - -/** - * Helpers for the 3D reconstruction pipeline. - * - * Auto-loaded by Nextflow from `workflows/reconst_3d/lib/`. All methods are - * static; impure ones take `params` (the Nextflow params Map) as an explicit - * argument so they're easy to test and grep for. - * - * Categories: - * - Slice ID parsing : extractSliceId, extractSliceIdInt, toSliceTuple, - * extractSliceIdsString, detectSingleGaps, - * parseDebugSlices - * - Path utilities : normalizePath, joinPath, resolveSubjectName, - * partitionSlicesAndTransforms - * - Slice config parsing : parseSliceConfig - * - CLI flag builders : pyramidArgs, annotatedScreenshotArgs, - * diagEnabled, stack*Args (option builders for - * linum_stack_slices_motor.py) - * - * Boolean parsing in parseSliceConfig is kept in lockstep with - * `linumpy.io.slice_config._parse_bool`; edit there too when the canonical - * schema changes. - */ -class Helpers { - - private static final Logger log = LoggerFactory.getLogger('Helpers') - - // ------------------------------------------------------------------------- - // Slice ID parsing - // ------------------------------------------------------------------------- - - /** Extract z## slice ID string from a filename; returns "unknown" if not found. */ - static String extractSliceId(filename) { - def name = filename instanceof java.nio.file.Path ? filename.fileName.toString() : filename.toString() - def matcher = name =~ /z(\d+)/ - return matcher ? matcher[0][1] : 'unknown' - } - - /** Extract slice ID as integer; returns -1 if not found. */ - static int extractSliceIdInt(filename) { - def id = extractSliceId(filename) - return id == 'unknown' ? -1 : id.toInteger() - } - - /** Return [slice_id, file] for a given file path. */ - static List toSliceTuple(file_path) { - return [extractSliceId(file_path), file_path] - } - - /** Sorted, comma-separated slice IDs from a list of files (e.g. "01,02,03,05"). */ - static String extractSliceIdsString(List fileList) { - fileList - .collect { f -> extractSliceId(f) } - .findAll { s -> s != 'unknown' } - .sort { s -> s.toInteger() } - .join(',') - } - - /** - * Detect single-slice gaps in a sorted slice list. - * Returns a list of [missingId, beforeId, afterId] tuples (zero-padded). - */ - static List detectSingleGaps(List sliceList) { - def gaps = [] - def sliceIds = sliceList - .collect { f -> extractSliceIdInt(f) } - .findAll { n -> n >= 0 } - .sort() - - sliceIds.eachWithIndex { current, i -> - if (i >= sliceIds.size() - 1) return - def next = sliceIds[i + 1] - def gap = next - current - if (gap == 2) { - def missingId = String.format('%02d', current + 1) - def beforeId = String.format('%02d', current) - def afterId = String.format('%02d', next) - gaps.add([missingId, beforeId, afterId]) - log.info("Gap detected: slice ${missingId} (between ${beforeId} and ${afterId})") - } else if (gap > 2) { - log.warn("Multiple missing slices between ${current} and ${next} - cannot interpolate") - } - } - return gaps - } - - /** - * Parse `params.debug_slices`; supports "25,26", "25-29", or "25,27-29". - * Returns a Set of zero-padded slice IDs, or null if blank. - */ - static Set parseDebugSlices(String debugSlicesStr) { - if (!debugSlicesStr || debugSlicesStr.trim().isEmpty()) return null - def sliceIds = [] as Set - debugSlicesStr.split(',').each { rawPart -> - def part = rawPart.trim() - if (part.contains('-')) { - def rangeParts = part.split('-') - if (rangeParts.size() == 2) { - def start = rangeParts[0].trim().toInteger() - def end = rangeParts[1].trim().toInteger() - (start..end).each { n -> sliceIds.add(String.format('%02d', n)) } - } - } else { - sliceIds.add(String.format('%02d', part.toInteger())) - } - } - return sliceIds - } - - // ------------------------------------------------------------------------- - // Path utilities - // ------------------------------------------------------------------------- - - /** Remove duplicate and trailing slashes from a path string. */ - static String normalizePath(String path) { - return path.replaceAll('/+', '/').replaceAll('/$', '') - } - - /** Join path components safely. */ - static String joinPath(String base, String filename) { - return "${normalizePath(base)}/${filename}" - } - - /** - * Resolve subject_name from inputDir, with this fallback order: - * 1. `overrideName` if non-empty (typically `params.subject_name`) - * 2. `sub-XX` token anywhere in the path - * 3. parent of common input dirnames (`mosaic-grids`, `mosaics`, ...) - * 4. leaf directory name - */ - static String resolveSubjectName(String inputDir, String overrideName) { - if (overrideName) return overrideName - def subMatch = inputDir.split('/').find { part -> part ==~ /sub-\w+/ } - if (subMatch) return subMatch - def f = new File(inputDir) - def dirName = f.getName() - if (dirName in ['mosaic-grids', 'mosaics', 'mosaic_grids', 'input', 'data']) { - return f.getParentFile()?.getName() ?: dirName - } - return dirName - } - - /** - * Partition a flat list of staged files into [slices, transforms]: items - * ending in `.ome.zarr` go to slices; everything else (excluding `*.json` - * metrics) goes to transforms. - */ - static List partitionSlicesAndTransforms(List items) { - def slices = items.findAll { f -> f.getName().endsWith('.ome.zarr') } - def transforms = items.findAll { f -> - def n = f.getName() - !n.endsWith('.ome.zarr') && !n.endsWith('.json') - } - return [slices, transforms] - } - - // ------------------------------------------------------------------------- - // Slice config parsing - // ------------------------------------------------------------------------- - - /** - * Parse slice_config.csv, returning [use: Set, excluded: Set]. - * Truthy values: true, 1, yes, y, t (case-insensitive). - */ - static Map parseSliceConfig(String configPath) { - def slicesToUse = [] as Set - def slicesExcluded = [] as Set - def f = new File(configPath) - - if (!f.exists()) { - Nextflow.error("Slice config file not found: ${configPath}") - } - - def truthy = ['true', '1', 'yes', 'y', 't'] as Set - f.withReader { reader -> - reader.readLine() // Skip header - reader.eachLine { line -> - def parts = line.split(',') - if (parts.size() >= 2) { - def sliceId = parts[0].trim() - def use = parts[1].trim().toLowerCase() - if (truthy.contains(use)) slicesToUse.add(sliceId) - else slicesExcluded.add(sliceId) - } - } - } - return [use: slicesToUse, excluded: slicesExcluded] - } - - // ------------------------------------------------------------------------- - // CLI flag builders (params-coupled) - // ------------------------------------------------------------------------- - - /** True when the named per-stage diagnostic flag (or `diagnostic_mode`) is set. */ - static boolean diagEnabled(Map params, String flag) { - return params.diagnostic_mode || params[flag] - } - - /** Annotated-screenshot CLI flags shared by `stack` and `correct_bias_field`. */ - static String annotatedScreenshotArgs(Map params, String sliceIdsStr) { - def show_lines = params.annotated_show_lines ? '--show_lines' : '' - def orient = params.ras_input_orientation?.toString()?.trim()?.replace("'", '') ?: '' - def orientation = orient ? "--orientation ${orient}" : '' - return "--slice_ids \"${sliceIdsStr}\" --label_every ${params.annotated_label_every} ${show_lines} ${orientation} --crop_to_tissue" - } - - /** - * Build pyramid-related CLI arguments from `params.pyramid_*` settings. - * `nLevelsFlag` names the downstream flag (`--n_levels` for most scripts, - * `--n-levels` for `linum_align_to_ras.py`). - */ - static String pyramidArgs(Map params, String nLevelsFlag = '--n_levels') { - def opts = '' - if (params.pyramid_n_levels != null) { - opts += " ${nLevelsFlag} ${params.pyramid_n_levels}" - } else { - def base_res = params.resolution > 0 ? params.resolution : 10 - def valid = params.pyramid_resolutions.findAll { r -> r >= base_res }.sort() - if (!valid.contains(base_res)) valid = [base_res] + valid - opts += ' --pyramid_resolutions ' + valid.collect { r -> r.toString() }.join(' ') - opts += params.pyramid_make_isotropic ? ' --make_isotropic' : ' --no_isotropic' - } - return opts - } - - // ------------------------------------------------------------------------- - // `stack` option builders - // - // Split by concern so each `if` group lives next to the related parameters - // rather than as one 65-line imperative blob. - // ------------------------------------------------------------------------- - - static String stackBlendingArgs(Map params) { - def opts = '' - if (params.stack_blend_enabled) opts += ' --blend' - if (params.blend_refinement_px > 0) opts += " --blend_refinement_px ${params.blend_refinement_px}" - if (params.stack_blend_z_refine_vox > 0) opts += " --blend_z_refine_vox ${params.stack_blend_z_refine_vox}" - if (params.blend_z_refine_min_confidence > 0) opts += " --blend_z_refine_min_confidence ${params.blend_z_refine_min_confidence}" - return opts - } - - static String stackZMatchingArgs(Map params) { - def opts = '' - opts += " --slicing_interval_mm ${params.registration_slicing_interval_mm}" - opts += " --search_range_mm ${params.registration_allowed_drifting_mm}" - opts += " --moving_z_first_index ${params.moving_slice_first_index}" - if (params.use_expected_z_overlap) opts += ' --use_expected_overlap' - if (params.z_overlap_min_corr > 0) opts += " --z_overlap_min_corr ${params.z_overlap_min_corr}" - if (params.analyze_shifts) opts += ' --output_z_matches z_matches.csv' - opts += ' --output_stacking_decisions stacking_decisions.csv' - return opts - } - - static String stackPairwiseTransformArgs(Map params) { - if (!params.apply_pairwise_transforms) return '' - def opts = ' --transforms_dir transforms' - if (params.apply_rotation_only) opts += ' --rotation_only' - opts += " --max_rotation_deg ${params.max_rotation_deg}" - if (params.load_transform_min_zcorr > 0) opts += " --load_min_zcorr ${params.load_transform_min_zcorr}" - if (params.load_transform_max_rotation > 0) opts += " --load_max_rotation ${params.load_transform_max_rotation}" - if (params.skip_error_transforms) opts += ' --skip_error_transforms' - if (params.skip_warning_transforms) opts += ' --skip_warning_transforms' - opts += " --confidence_high ${params.transform_confidence_high}" - opts += " --confidence_low ${params.transform_confidence_low}" - return opts - } - - /** Drives per-slice use/auto_excluded → motor-only fallback in stack. */ - static String stackSliceConfigArg(slice_config) { - return slice_config.name != 'NO_SLICE_CONFIG' ? " --slice_config ${slice_config}" : '' - } - - /** - * Skipped when refine_manual_transforms baked manual corrections into - * the transforms directory; passing them again would double-apply. - */ - static String stackManualOverrideArg(Map params) { - return (params.manual_transforms_dir && !params.refine_manual_transforms) - ? " --manual_transforms_dir ${params.manual_transforms_dir}" - : '' - } - - static String stackCumulativeArgs(Map params) { - if (!params.stack_accumulate_translations) return '' - def opts = ' --accumulate_translations' - if (params.stack_confidence_weight_translations) opts += ' --confidence_weight_translations' - if (params.stack_max_cumulative_drift_px > 0) opts += " --max_cumulative_drift_px ${params.stack_max_cumulative_drift_px}" - // > 0 filters clamped translations; 0 = keep all (preserves re-homing boundary corrections). - if (params.stack_max_pairwise_translation > 0) opts += " --max_pairwise_translation ${params.stack_max_pairwise_translation}" - return opts - } - - static String stackSmoothingArgs(Map params) { - def opts = '' - if (params.stack_smooth_window > 0) opts += " --smooth_window ${params.stack_smooth_window}" - if (params.stack_translation_smooth_sigma > 0) opts += " --translation_smooth_sigma ${params.stack_translation_smooth_sigma}" - if (params.stack_translation_min_zcorr > 0) opts += " --translation_min_zcorr ${params.stack_translation_min_zcorr}" - return opts - } -} diff --git a/workflows/reconst_3d/nextflow.config b/workflows/reconst_3d/nextflow.config index 9a4ae540..e9a91775 100644 --- a/workflows/reconst_3d/nextflow.config +++ b/workflows/reconst_3d/nextflow.config @@ -70,20 +70,6 @@ params { // exceeds this budget. stitch_global_transform_seed = 0 // Random seed for pair sub-sampling - // ========================================================================= - // AUTOMATIC SLICE QUALITY ASSESSMENT - // Runs linum_assess_slice_quality on normalized slices and writes a - // slice_config.csv that marks degraded slices for exclusion from the - // common-space step. Enabled by setting auto_assess_quality = true. - // ========================================================================= - auto_assess_quality = false // Run quality assessment on normalized slices - auto_assess_min_quality = 0.3 // Exclude slices with quality score below this - auto_assess_exclude_first = 1 // Exclude first N calibration slices automatically - auto_assess_roi_size = 1024 // Center-crop size in XY (pixels) for quality metrics. - // Mosaic grids are single-resolution, so this is the - // primary speed control: 1024×1024 loads ~2 MB per - // plane vs ~5 GB at full res. 0 = full plane. - // ========================================================================= // COMMON SPACE ALIGNMENT // ========================================================================= @@ -145,6 +131,20 @@ params { interpolation_min_ncc_improvement = 0.05 // Minimum post-reg NCC improvement to accept the transform; // below this the method falls back to weighted average. + // ========================================================================= + // AUTOMATIC SLICE QUALITY ASSESSMENT + // Runs linum_assess_slice_quality on normalized slices and writes a + // slice_config.csv that marks degraded slices for exclusion from the + // common-space step. Enabled by setting auto_assess_quality = true. + // ========================================================================= + auto_assess_quality = false // Run quality assessment on normalized slices + auto_assess_min_quality = 0.3 // Exclude slices with quality score below this + auto_assess_exclude_first = 1 // Exclude first N calibration slices automatically + auto_assess_roi_size = 1024 // Center-crop size in XY (pixels) for quality metrics. + // Mosaic grids are single-resolution, so this is the + // primary speed control: 1024×1024 loads ~2 MB per + // plane vs ~5 GB at full res. 0 = full plane. + // ========================================================================= // PAIRWISE REGISTRATION // ========================================================================= @@ -365,17 +365,22 @@ params { } // ========================================================================= -// PROCESS CONFIGURATION +// CPU CONFIGURATION // ========================================================================= -// Per-task BLAS/OpenMP/JAX thread caps (sets process.beforeScript). -includeConfig '../shared/cpu_limits.config' +def getAvailableCpus() { + int totalCpus = Runtime.runtime.availableProcessors() + if (params.enable_cpu_limits == false) return totalCpus + if (params.max_cpus != null && params.max_cpus > 0) { + return Math.min(params.max_cpus as int, totalCpus) + } + return Math.max(1, totalCpus - (params.reserved_cpus ?: 2) as int) +} +// ========================================================================= +// PROCESS CONFIGURATION +// ========================================================================= process { - // Default publish location for processes that don't override it. - // Per-slice intermediates (resample/focal/illum) override below to keep - // their slice-id grouping; pattern-restricted publishers (e.g. only - // *_metrics.json) override inline in the process body. - publishDir = [path: { "${params.output}/${task.process}" }, mode: 'copy'] + publishDir = {"$params.output/$slice_id/$task.process"} scratch = true errorStrategy = { task.attempt <= 2 ? 'retry' : 'ignore' } maxRetries = 2 @@ -383,10 +388,32 @@ process { stageOutMode = 'rsync' afterScript = 'sleep 1' - // Per-slice preproc intermediates: keep `/` grouping and use - // symlinks (zarr arrays are large; copying them per-stage doubles disk). - withName: "resample_mosaic_grid|fix_focal_curvature|fix_illumination" { - publishDir = [path: { "${params.output}/${slice_id}/${task.process}" }, mode: 'symlink'] + // Thread limiting for Python scripts + beforeScript = { + if (params.enable_cpu_limits == false) return "" + + int maxCpus = getAvailableCpus() as int + int numProcesses = Math.max(1, (params.processes ?: 1) as int) + int threadsPerProcess = Math.max(1, (int)(maxCpus / numProcesses)) + + def envVars = [] + if (params.max_cpus != null && params.max_cpus > 0) { + envVars << "export LINUMPY_MAX_CPUS=${params.max_cpus as int}" + } else { + envVars << "export LINUMPY_RESERVED_CPUS=${(params.reserved_cpus ?: 2) as int}" + } + + // Thread limiting environment variables + envVars << "export OMP_NUM_THREADS=${threadsPerProcess}" + envVars << "export MKL_NUM_THREADS=${threadsPerProcess}" + envVars << "export OPENBLAS_NUM_THREADS=${threadsPerProcess}" + envVars << "export VECLIB_MAXIMUM_THREADS=${threadsPerProcess}" + envVars << "export NUMEXPR_NUM_THREADS=${threadsPerProcess}" + envVars << "export NUMBA_NUM_THREADS=${threadsPerProcess}" + envVars << "export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=${threadsPerProcess}" + envVars << "export XLA_FLAGS='--xla_cpu_multi_thread_eigen=false intra_op_parallelism_threads=${threadsPerProcess}'" + + return envVars.join('\n') } withName: "resample_mosaic_grid" { diff --git a/workflows/reconst_3d/soct_3d_reconst.nf b/workflows/reconst_3d/soct_3d_reconst.nf index 65a8f361..908f816f 100644 --- a/workflows/reconst_3d/soct_3d_reconst.nf +++ b/workflows/reconst_3d/soct_3d_reconst.nf @@ -8,12 +8,254 @@ nextflow.enable.dsl = 2 * Output: 3D OME-Zarr volume with multi-resolution pyramid * * Channel patterns and authoring conventions: docs/NEXTFLOW_WORKFLOWS.md - * - * Helper functions (slice ID parsing, path utilities, CLI flag builders, stack - * option builders) live in ./lib/Helpers.groovy and are auto-loaded by - * Nextflow. Call sites use the `Helpers.` prefix. */ +// ============================================================================= +// HELPER FUNCTIONS +// ============================================================================= + +// Annotated-screenshot CLI flags shared by `stack` and `correct_bias_field`. +def annotatedScreenshotArgs(String sliceIdsStr) { + def show_lines = params.annotated_show_lines ? '--show_lines' : '' + def orient = params.ras_input_orientation?.trim()?.replace("'", '') ?: '' + def orientation = orient ? "--orientation ${orient}" : '' + return "--slice_ids \"${sliceIdsStr}\" --label_every ${params.annotated_label_every} ${show_lines} ${orientation} --crop_to_tissue" +} + +// True when the named per-stage diagnostic flag (or `diagnostic_mode`) is set. +def diagEnabled(String flag) { params.diagnostic_mode || params[flag] } + +// Resolve subject_name from inputDir when not explicitly set: +// 1. `params.subject_name` if provided +// 2. `sub-XX` token anywhere in the path +// 3. parent of common input dirnames (`mosaic-grids`, `mosaics`, ...) +// 4. leaf directory name +def resolveSubjectName(String inputDir) { + if (params.subject_name) return params.subject_name + def subMatch = inputDir.split('/').find { part -> part ==~ /sub-\w+/ } + if (subMatch) return subMatch + def inputFile = file(inputDir) + def dirName = inputFile.getName() + if (dirName in ['mosaic-grids', 'mosaics', 'mosaic_grids', 'input', 'data']) { + return inputFile.getParent()?.getName() ?: dirName + } + return dirName +} + +// --------------------------------------------------------------------------- +// `stack` option builders. Split by concern so each `if` group lives next to +// the related parameters rather than as one 65-line imperative blob. +// --------------------------------------------------------------------------- + +def stackBlendingArgs() { + def opts = "" + if (params.stack_blend_enabled) opts += " --blend" + if (params.blend_refinement_px > 0) opts += " --blend_refinement_px ${params.blend_refinement_px}" + if (params.stack_blend_z_refine_vox > 0) opts += " --blend_z_refine_vox ${params.stack_blend_z_refine_vox}" + if (params.blend_z_refine_min_confidence > 0) opts += " --blend_z_refine_min_confidence ${params.blend_z_refine_min_confidence}" + return opts +} + +def stackZMatchingArgs() { + def opts = "" + opts += " --slicing_interval_mm ${params.registration_slicing_interval_mm}" + opts += " --search_range_mm ${params.registration_allowed_drifting_mm}" + opts += " --moving_z_first_index ${params.moving_slice_first_index}" + if (params.use_expected_z_overlap) opts += " --use_expected_overlap" + if (params.z_overlap_min_corr > 0) opts += " --z_overlap_min_corr ${params.z_overlap_min_corr}" + if (params.analyze_shifts) opts += " --output_z_matches z_matches.csv" + opts += " --output_stacking_decisions stacking_decisions.csv" + return opts +} + +def stackPairwiseTransformArgs() { + if (!params.apply_pairwise_transforms) return "" + def opts = " --transforms_dir transforms" + if (params.apply_rotation_only) opts += " --rotation_only" + opts += " --max_rotation_deg ${params.max_rotation_deg}" + if (params.load_transform_min_zcorr > 0) opts += " --load_min_zcorr ${params.load_transform_min_zcorr}" + if (params.load_transform_max_rotation > 0) opts += " --load_max_rotation ${params.load_transform_max_rotation}" + if (params.skip_error_transforms) opts += " --skip_error_transforms" + if (params.skip_warning_transforms) opts += " --skip_warning_transforms" + opts += " --confidence_high ${params.transform_confidence_high}" + opts += " --confidence_low ${params.transform_confidence_low}" + return opts +} + +// Drives per-slice use/auto_excluded → motor-only fallback in stack. +def stackSliceConfigArg(slice_config) { + return slice_config.name != 'NO_SLICE_CONFIG' ? " --slice_config ${slice_config}" : "" +} + +// Skipped when refine_manual_transforms baked manual corrections into the +// transforms directory; passing them again would double-apply. +def stackManualOverrideArg() { + return (params.manual_transforms_dir && !params.refine_manual_transforms) + ? " --manual_transforms_dir ${params.manual_transforms_dir}" + : "" +} + +def stackCumulativeArgs() { + if (!params.stack_accumulate_translations) return "" + def opts = " --accumulate_translations" + if (params.stack_confidence_weight_translations) opts += " --confidence_weight_translations" + if (params.stack_max_cumulative_drift_px > 0) opts += " --max_cumulative_drift_px ${params.stack_max_cumulative_drift_px}" + // > 0 filters clamped translations; 0 = keep all (preserves re-homing boundary corrections). + if (params.stack_max_pairwise_translation > 0) opts += " --max_pairwise_translation ${params.stack_max_pairwise_translation}" + return opts +} + +def stackSmoothingArgs() { + def opts = "" + if (params.stack_smooth_window > 0) opts += " --smooth_window ${params.stack_smooth_window}" + if (params.stack_translation_smooth_sigma > 0) opts += " --translation_smooth_sigma ${params.stack_translation_smooth_sigma}" + if (params.stack_translation_min_zcorr > 0) opts += " --translation_min_zcorr ${params.stack_translation_min_zcorr}" + return opts +} + +// Build pyramid-related CLI arguments from `params.pyramid_*` settings. +// `nLevelsFlag` names the downstream flag (`--n_levels` for most scripts, +// `--n-levels` for `linum_align_to_ras.py`). +def pyramidArgs(nLevelsFlag = '--n_levels') { + def opts = "" + if (params.pyramid_n_levels != null) { + opts += " ${nLevelsFlag} ${params.pyramid_n_levels}" + } else { + def base_res = params.resolution > 0 ? params.resolution : 10 + def valid = params.pyramid_resolutions.findAll { r -> r >= base_res }.sort() + if (!valid.contains(base_res)) valid = [base_res] + valid + opts += " --pyramid_resolutions " + valid.collect { r -> r.toString() }.join(' ') + opts += params.pyramid_make_isotropic ? " --make_isotropic" : " --no_isotropic" + } + return opts +} + +// Extract z## slice ID string from a filename; returns "unknown" if not found. +def extractSliceId(filename) { + def name = filename instanceof Path ? filename.getName() : filename.toString() + def matcher = name =~ /z(\d+)/ + return matcher ? matcher[0][1] : "unknown" +} + +// Extract slice ID as integer; returns -1 if not found. +def extractSliceIdInt(filename) { + def id = extractSliceId(filename) + return id == "unknown" ? -1 : id.toInteger() +} + +// Return tuple(slice_id, file) for a given file path. +def toSliceTuple(file_path) { + tuple(extractSliceId(file_path), file_path) +} + +// Return sorted, comma-separated slice IDs from a list of files (e.g. "01,02,03,05"). +def extractSliceIdsString(fileList) { + fileList + .collect { f -> extractSliceId(f) } + .findAll { s -> s != "unknown" } + .sort { s -> s.toInteger() } + .join(',') +} + +// Remove duplicate and trailing slashes from a path string. +def normalizePath(path) { + return path.replaceAll('/+', '/').replaceAll('/$', '') +} + +// Join path components safely. +def joinPath(base, filename) { + return "${normalizePath(base)}/${filename}" +} + +// Parse a slice_config.csv and return a map with the sets of slice IDs +// marked for use vs. excluded: `[use: Set, excluded: Set]`. +// Boolean parsing is kept in lockstep with `linumpy.io.slice_config._parse_bool` +// (true / 1 / yes / y / t, case-insensitive). Edit there when the canonical +// schema changes — Nextflow can't depend on Python at workflow-init time. +def parseSliceConfig(configPath) { + def slicesToUse = [] as Set + def slicesExcluded = [] as Set + def file = new File(configPath) + + if (!file.exists()) error("Slice config file not found: ${configPath}") + + def truthy = ['true', '1', 'yes', 'y', 't'] as Set + file.withReader { reader -> + reader.readLine() // Skip header + reader.eachLine { line -> + def parts = line.split(',') + if (parts.size() >= 2) { + def sliceId = parts[0].trim() + def use = parts[1].trim().toLowerCase() + if (truthy.contains(use)) slicesToUse.add(sliceId) + else slicesExcluded.add(sliceId) + } + } + } + + return [use: slicesToUse, excluded: slicesExcluded] +} + +// Detect single-slice gaps in a sorted slice list. +// Returns a list of [missingId, beforeId, afterId] tuples. +def detectSingleGaps(sliceList) { + def gaps = [] + def sliceIds = sliceList + .collect { f -> extractSliceIdInt(f) } + .findAll { n -> n >= 0 } + .sort() + + sliceIds.eachWithIndex { current, i -> + if (i >= sliceIds.size() - 1) { + return + } + def next = sliceIds[i + 1] + def gap = next - current + + if (gap == 2) { + def missingId = String.format("%02d", current + 1) + def beforeId = String.format("%02d", current) + def afterId = String.format("%02d", next) + gaps.add([missingId, beforeId, afterId]) + log.info "Gap detected: slice ${missingId} (between ${beforeId} and ${afterId})" + } else if (gap > 2) { + log.warn "Multiple missing slices between ${current} and ${next} - cannot interpolate" + } + } + return gaps +} + +// Partition a flat list of staged files into (slices, transforms): .ome.zarr +// items go to slices, everything else (excluding *.json metrics) to +// transforms. Used by export_manual_align / refine_manual_transforms inputs. +def partitionSlicesAndTransforms(items) { + def slices = items.findAll { f -> f.getName().endsWith('.ome.zarr') } + def transforms = items.findAll { f -> def n = f.getName(); !n.endsWith('.ome.zarr') && !n.endsWith('.json') } + return tuple(slices, transforms) +} + +// Parse debug_slices parameter; supports "25,26", "25-29", or "25,27-29". +// Returns a set of zero-padded slice IDs, or null if not specified. +def parseDebugSlices(debugSlicesStr) { + if (!debugSlicesStr || debugSlicesStr.trim().isEmpty()) return null + + def sliceIds = [] as Set + debugSlicesStr.split(',').each { part -> + part = part.trim() + if (part.contains('-')) { + def rangeParts = part.split('-') + if (rangeParts.size() == 2) { + def start = rangeParts[0].trim().toInteger() + def end = rangeParts[1].trim().toInteger() + (start..end).each { n -> sliceIds.add(String.format("%02d", n)) } + } + } else { + sliceIds.add(String.format("%02d", part.toInteger())) + } + } + return sliceIds +} + // ============================================================================= // SUB-WORKFLOW INCLUDES // ============================================================================= @@ -40,7 +282,7 @@ include { // ----------------------------------------------------------------------------- process README { - publishDir { "${params.output}/${task.process}" }, mode: 'move' + publishDir "${params.output}/${task.process}", mode: 'move' output: path "readme.txt" @@ -66,6 +308,8 @@ process README { } process analyze_shifts { + publishDir "${params.output}/${task.process}", mode: 'copy' + input: path(shifts_file) @@ -87,7 +331,7 @@ process analyze_shifts { } process generate_report { - publishDir "${params.output}", mode: 'copy' + publishDir "$params.output", mode: 'copy' input: tuple path(zarr), path(zip), path(png), path(annotated_png) @@ -183,6 +427,8 @@ process fix_illumination { // ----------------------------------------------------------------------------- process estimate_global_transform { + publishDir "${params.output}/${task.process}", mode: 'copy' + input: path("pool_input/*") path(slice_config) @@ -225,7 +471,7 @@ process estimate_global_transform { } process stitch_3d_with_refinement { - publishDir { "${params.output}/${task.process}" }, mode: 'copy', pattern: "*_metrics.json" + publishDir "${params.output}/${task.process}", mode: 'copy', pattern: "*_metrics.json" input: tuple val(slice_id), path(mosaic_grid), path(input_transform) @@ -278,7 +524,7 @@ process generate_stitch_preview { // ----------------------------------------------------------------------------- process beam_profile_correction { - publishDir { "${params.output}/${task.process}" }, mode: 'copy', pattern: "*_metrics.json" + publishDir "${params.output}/${task.process}", mode: 'copy', pattern: "*_metrics.json" input: tuple val(slice_id), path(slice_3d) @@ -300,7 +546,7 @@ process beam_profile_correction { } process crop_interface { - publishDir { "${params.output}/${task.process}" }, mode: 'copy', pattern: "*_metrics.json" + publishDir "${params.output}/${task.process}", mode: 'copy', pattern: "*_metrics.json" input: tuple val(slice_id), path(image) @@ -324,7 +570,7 @@ process crop_interface { } process normalize { - publishDir { "${params.output}/${task.process}" }, mode: 'copy', pattern: "*_metrics.json" + publishDir "${params.output}/${task.process}", mode: 'copy', pattern: "*_metrics.json" input: tuple val(slice_id), path(image) @@ -351,6 +597,8 @@ process normalize { // ----------------------------------------------------------------------------- process detect_rehoming_events { + publishDir "${params.output}/${task.process}", mode: 'copy' + input: tuple path(shifts_csv), path(slice_config_in) @@ -384,6 +632,8 @@ process detect_rehoming_events { // (when supplied) is merged so manually-excluded slices stay excluded. // See docs/NEXTFLOW_WORKFLOWS.md "Authoring Notes" for the two-input pattern. process auto_assess_quality { + publishDir "${params.output}/${task.process}", mode: 'copy' + input: path "inputs/*" path existing_slice_config @@ -412,6 +662,8 @@ process auto_assess_quality { } process bring_to_common_space { + publishDir "${params.output}/${task.process}", mode: 'copy' + input: tuple path("inputs/*"), path("shifts_xy.csv"), path(slice_config) @@ -469,6 +721,8 @@ process generate_common_space_preview { // On gate failure the zarr is omitted (hard skip); see // docs/SLICE_INTERPOLATION_FEATURE.md for the full failure policy. process interpolate_missing_slice { + publishDir "${params.output}/${task.process}", mode: 'copy' + input: tuple val(missing_slice_id), path(slice_before), path(slice_after) @@ -540,6 +794,8 @@ process finalise_interpolation { // ----------------------------------------------------------------------------- process register_pairwise { + publishDir "${params.output}/${task.process}", mode: 'copy' + input: tuple path(fixed_vol), path(moving_vol) @@ -573,6 +829,8 @@ process register_pairwise { // combines the manual correction with a tight image-based residual correction. // Only runs when params.refine_manual_transforms = true. process refine_manual_transforms { + publishDir "${params.output}/${task.process}", mode: 'copy' + input: tuple path(fixed_vol), path(moving_vol), path("auto_transforms") @@ -602,6 +860,8 @@ process refine_manual_transforms { // via --slice_config and treats those slices as motor-only. // See docs/NEXTFLOW_WORKFLOWS.md "Authoring Notes" for the two-input pattern. process auto_exclude_slices { + publishDir "$params.output/$task.process", mode: 'copy' + input: path "transforms/*" path slice_config_in @@ -630,6 +890,8 @@ process auto_exclude_slices { // Produces AIP images and copies pairwise transforms into a self-contained // directory that can be downloaded and opened by the manual alignment widget. process make_manual_align_package { + publishDir "$params.output/$task.process", mode: 'copy' + input: tuple path("slices/*"), path("transforms/*") @@ -661,7 +923,7 @@ process make_manual_align_package { // publishDir mode is conditional: 'symlink' when a downstream step will produce // the final output (preserves work-dir files for -resume); 'move' when this is last. process stack { - publishDir { "${params.output}/${task.process}" }, + publishDir "$params.output/$task.process", mode: (params.correct_bias_field || params.align_to_ras_enabled) ? 'symlink' : 'move', saveAs: { fn -> fn.endsWith('.ome.zarr') ? null : fn } @@ -675,17 +937,17 @@ process stack { path("stacking_decisions.csv"), optional: true, emit: stacking_decisions script: - def options = Helpers.stackBlendingArgs(params) + - Helpers.stackZMatchingArgs(params) + - Helpers.stackPairwiseTransformArgs(params) + - Helpers.stackSliceConfigArg(slice_config) + - Helpers.stackManualOverrideArg(params) + - Helpers.stackCumulativeArgs(params) + - Helpers.stackSmoothingArgs(params) + + def options = stackBlendingArgs() + + stackZMatchingArgs() + + stackPairwiseTransformArgs() + + stackSliceConfigArg(slice_config) + + stackManualOverrideArg() + + stackCumulativeArgs() + + stackSmoothingArgs() + " --no_xy_shift" + // slices are already in common space - Helpers.pyramidArgs(params) + pyramidArgs() - def annotated_args = Helpers.annotatedScreenshotArgs(params, slice_ids_str) + def annotated_args = annotatedScreenshotArgs(slice_ids_str) """ linum_stack_slices_motor.py slices ${shifts_file} ${subject_name}.ome.zarr ${options} zip -r ${subject_name}.ome.zarr.zip ${subject_name}.ome.zarr @@ -707,7 +969,7 @@ process stack { process correct_bias_field { cpus params.processes - publishDir { "${params.output}/${task.process}" }, + publishDir "$params.output/$task.process", mode: params.align_to_ras_enabled ? 'symlink' : 'move', saveAs: { fn -> fn.endsWith('.ome.zarr') ? null : fn } @@ -719,7 +981,7 @@ process correct_bias_field { script: def n_slices_opt = n_slices > 0 ? "--n_serial_slices ${n_slices}" : "" - def annotated_args = Helpers.annotatedScreenshotArgs(params, slice_ids_str) + def annotated_args = annotatedScreenshotArgs(slice_ids_str) def backend_flag = params.use_gpu ? "auto" : "cpu" def hm_perz_flag = params.bias_histogram_match_per_zplane ? "--histogram_match_per_zplane" : "" def tissue_thresh_flag = params.bias_tissue_threshold != null ? "--tissue_threshold ${params.bias_tissue_threshold}" : "" @@ -734,7 +996,7 @@ process correct_bias_field { ${hm_perz_flag} \ ${tissue_thresh_flag} \ ${zprofile_flag} \ - ${Helpers.pyramidArgs(params)} + ${pyramidArgs()} zip -r ${subject_name}.ome.zarr.zip ${subject_name}.ome.zarr @@ -754,7 +1016,7 @@ process correct_bias_field { // Atlas registration to Allen Mouse Brain Atlas. Always the final step when enabled. process align_to_ras { - publishDir { "${params.output}/${task.process}" }, mode: 'move', saveAs: { fn -> + publishDir "$params.output/$task.process", mode: 'move', saveAs: { fn -> fn.endsWith('.ome.zarr') ? null : fn } @@ -774,7 +1036,7 @@ process align_to_ras { def rotation_arg = params.ras_initial_rotation ? "--initial-rotation ${params.ras_initial_rotation}" : "" def preview_arg = params.allen_preview ? "--preview ${subject_name}_ras_preview.png" : "" def orientation_preview_arg = params.ras_orientation_preview ? "--orientation-preview ${subject_name}_ras_orientation_preview.png" : "" - def ras_pyramid_opts = Helpers.pyramidArgs(params, '--n-levels') + def ras_pyramid_opts = pyramidArgs('--n-levels') """ linum_align_to_ras.py ${stacked_zarr} ${subject_name}_ras.ome.zarr \ --allen-resolution ${params.allen_resolution} \ @@ -800,12 +1062,12 @@ process align_to_ras { workflow { README() - def inputDir = Helpers.normalizePath(params.input) - def subject_name = Helpers.resolveSubjectName(inputDir, params.subject_name) + def inputDir = normalizePath(params.input) + def subject_name = resolveSubjectName(inputDir) log.info "Subject: ${subject_name}" log.info "GPU: ${params.use_gpu ? 'ENABLED' : 'DISABLED'}" - def debugSlices = Helpers.parseDebugSlices(params.debug_slices) + def debugSlices = parseDebugSlices(params.debug_slices) if (debugSlices) { log.info "DEBUG MODE: Processing only slices ${debugSlices.sort().join(', ')}" } @@ -827,11 +1089,11 @@ workflow { shifts_xy = channel.value(file(shifts_xy_path)) // Slice config (optional) - def slice_config_path = params.slice_config ?: Helpers.joinPath(inputDir, "slice_config.csv") + def slice_config_path = params.slice_config ?: joinPath(inputDir, "slice_config.csv") def slicesToUse = null if (file(slice_config_path).exists()) { log.info "Slice config: ${slice_config_path}" - def parsed = Helpers.parseSliceConfig(slice_config_path) + def parsed = parseSliceConfig(slice_config_path) slicesToUse = parsed.use def total = slicesToUse.size() + parsed.excluded.size() log.info "Slice config: ${total} entries (${slicesToUse.size()} included, ${parsed.excluded.size()} excluded)" @@ -851,7 +1113,7 @@ workflow { error("No mosaic grids found in ${inputDir}. Expected: mosaic_grid*_z00.ome.zarr") } - def selectedIds = mosaicFiles.collect { f -> Helpers.extractSliceId(f) }.findAll { sid -> + def selectedIds = mosaicFiles.collect { f -> extractSliceId(f) }.findAll { sid -> if (debugSlices != null) return debugSlices.contains(sid) if (slicesToUse != null) return slicesToUse.contains(sid) return true @@ -866,7 +1128,7 @@ workflow { inputSlices = channel .fromList(mosaicFiles) - .map { f -> Helpers.toSliceTuple(f) } + .map { f -> toSliceTuple(f) } .filter { slice_id, _files -> if (debugSlices != null) { def included = debugSlices.contains(slice_id) @@ -961,7 +1223,7 @@ workflow { if (params.common_space_preview) { preview_input = bring_to_common_space.out .flatten() - .map { f -> Helpers.toSliceTuple(f) } + .map { f -> toSliceTuple(f) } generate_common_space_preview(preview_input) } @@ -971,7 +1233,7 @@ workflow { // slice_config_final.csv. See docs/SLICE_INTERPOLATION_FEATURE.md. if (params.interpolate_missing_slices) { gaps_channel = slices_common_space - .map { sliceList -> [Helpers.detectSingleGaps(sliceList), sliceList] } + .map { sliceList -> [detectSingleGaps(sliceList), sliceList] } .flatMap { gapsAndSlices -> def gaps = gapsAndSlices[0] def sliceList = gapsAndSlices[1] @@ -1029,7 +1291,7 @@ workflow { if (params.export_manual_align) { export_input = slices_collected .combine(transforms_collected) - .map { items -> Helpers.partitionSlicesAndTransforms(items) } + .map { items -> partitionSlicesAndTransforms(items) } make_manual_align_package(export_input) } @@ -1084,7 +1346,7 @@ workflow { .merge(transforms_for_stack) { acc, t -> tuple(acc[0], acc[1], t) } .merge(stack_slice_config) { acc, sc -> tuple(acc[0], acc[1], acc[2], sc) } .map { slices, shifts, transforms, sc -> - tuple(slices, shifts, transforms, sc, subject_name, Helpers.extractSliceIdsString(slices)) + tuple(slices, shifts, transforms, sc, subject_name, extractSliceIdsString(slices)) } stack(stack_input) @@ -1121,15 +1383,15 @@ workflow { log.info "DIAGNOSTIC MODE enabled (acq rotation, rotation drift, motor-only stitch/stack)" } - if (Helpers.diagEnabled(params, 'analyze_acquisition_rotation')) { + if (diagEnabled('analyze_acquisition_rotation')) { analyze_acquisition_rotation(shifts_xy, register_pairwise.out.collect()) } - if (Helpers.diagEnabled(params, 'analyze_rotation_drift')) { + if (diagEnabled('analyze_rotation_drift')) { analyze_rotation_drift(register_pairwise.out.collect()) } - if (Helpers.diagEnabled(params, 'motor_only_stack')) { + if (diagEnabled('motor_only_stack')) { motor_only_stack_input = normalize.out.normalized .map { _id, slice_file -> slice_file } .collect() @@ -1139,7 +1401,7 @@ workflow { // motor_only_stitch is also a prerequisite for compare_stitching, so run it // whenever either is requested. A second `stitch_motor_only(illum_fixed)` // call would emit the same channel twice, which Nextflow forbids. - def runMotorStitch = Helpers.diagEnabled(params, 'motor_only_stitch') + def runMotorStitch = diagEnabled('motor_only_stitch') def runComparison = params.compare_stitching || params.diagnostic_mode if (runMotorStitch || runComparison) { stitch_motor_only(illum_fixed) @@ -1150,8 +1412,8 @@ workflow { stitch_refined(illum_fixed) - motor_stitch_with_id = stitch_motor_only.out.map { f -> Helpers.toSliceTuple(f) } - refined_stitch_with_id = stitch_refined.out[0].map { f -> Helpers.toSliceTuple(f) } + motor_stitch_with_id = stitch_motor_only.out.map { f -> toSliceTuple(f) } + refined_stitch_with_id = stitch_refined.out[0].map { f -> toSliceTuple(f) } comparison_input = motor_stitch_with_id .combine(refined_stitch_with_id, by: 0) diff --git a/workflows/reconst_3d/tests/data/mosaic_grid_z01.ome.zarr/.gitkeep b/workflows/reconst_3d/tests/data/mosaic_grid_z01.ome.zarr/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/workflows/reconst_3d/tests/data/mosaic_grid_z02.ome.zarr/.gitkeep b/workflows/reconst_3d/tests/data/mosaic_grid_z02.ome.zarr/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/workflows/reconst_3d/tests/soct_3d_reconst.nf.test b/workflows/reconst_3d/tests/soct_3d_reconst.nf.test index f7d84b0c..fe050419 100644 --- a/workflows/reconst_3d/tests/soct_3d_reconst.nf.test +++ b/workflows/reconst_3d/tests/soct_3d_reconst.nf.test @@ -74,9 +74,6 @@ nextflow_pipeline { apply_pairwise_transforms = true correct_bias_field = true generate_report = false - // GitHub-hosted runners have only 4 CPUs; the default of 8 - // causes 'Process requirement exceeds available CPUs'. - processes = 2 } } diff --git a/workflows/shared/cpu_limits.config b/workflows/shared/cpu_limits.config deleted file mode 100644 index cc9887e0..00000000 --- a/workflows/shared/cpu_limits.config +++ /dev/null @@ -1,43 +0,0 @@ -// Shared per-task thread limits for linumpy Nextflow pipelines. -// -// Sets BLAS/OpenMP/JAX/ITK thread caps so each parallel Nextflow task only -// uses its share of the machine. The CPU budget logic is inlined here -// because Nextflow v26's strict config DSL no longer allows top-level `def`. -// -// Honoured params (all optional): -// enable_cpu_limits — set false to disable (default true) -// max_cpus — hard ceiling; null/0 → totalCpus - reserved_cpus -// reserved_cpus — cores held back when max_cpus is unset (default 2) -// processes — parallel Python workers per task (default 1); -// threadsPerProcess = maxCpus / processes - -process { - beforeScript = { - if (params.enable_cpu_limits == false) return "" - - int totalCpus = Runtime.runtime.availableProcessors() - int maxCpus = (params.max_cpus != null && params.max_cpus > 0) - ? Math.min(params.max_cpus as int, totalCpus) - : Math.max(1, totalCpus - (params.reserved_cpus ?: 2) as int) - int numProcesses = Math.max(1, (params.processes ?: 1) as int) - int threadsPerProcess = Math.max(1, (maxCpus / numProcesses) as int) - - def envVars = [] - if (params.max_cpus != null && params.max_cpus > 0) { - envVars << "export LINUMPY_MAX_CPUS=${params.max_cpus as int}" - } else { - envVars << "export LINUMPY_RESERVED_CPUS=${(params.reserved_cpus ?: 2) as int}" - } - - envVars << "export OMP_NUM_THREADS=${threadsPerProcess}" - envVars << "export MKL_NUM_THREADS=${threadsPerProcess}" - envVars << "export OPENBLAS_NUM_THREADS=${threadsPerProcess}" - envVars << "export VECLIB_MAXIMUM_THREADS=${threadsPerProcess}" - envVars << "export NUMEXPR_NUM_THREADS=${threadsPerProcess}" - envVars << "export NUMBA_NUM_THREADS=${threadsPerProcess}" - envVars << "export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=${threadsPerProcess}" - envVars << "export XLA_FLAGS='--xla_cpu_multi_thread_eigen=false intra_op_parallelism_threads=${threadsPerProcess}'" - - return envVars.join('\n') - } -}