diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index bb8243c4a..0d87b3ffb 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -92,7 +92,7 @@ jobs:
continue-on-error: true
shell: pwsh
run: |
- python -m pip install --upgrade cmake cmake-setuptools numpy ninja pytest pandas graphviz networkx pydot
+ python -m pip install --upgrade cmake cmake-setuptools numpy ninja pytest pandas graphviz networkx pydot pyyaml
# keep MinGW/Strawberry off PATH (so Eigen won't try Fortran)
$pattern = '^(C:\\mingw64\\bin|C:\\tools\\mingw64\\bin|C:\\Strawberry\\c\\bin|C:\\Strawberry\\perl\\site\\bin|C:\\Strawberry\\perl\\bin)$'
diff --git a/.gitignore b/.gitignore
index 11b57a693..5ad9ba57f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,6 +39,9 @@ build*/
# Node modules (for directed graph visualization)
node_modules/
+# Allow csv for target files in svZeroDTuner examples
+!applications/svZeroDTuner/examples/**/targets/**/*.csv
+
# Virtual environments
venv/
.venv/
\ No newline at end of file
diff --git a/README.md b/README.md
index 60405b437..fdbf1c562 100755
--- a/README.md
+++ b/README.md
@@ -2,8 +2,6 @@
svZeroDSolver
-[](https://github.com/simvascular/svZeroDSolver/actions)
-[](https://codecov.io/gh/SimVascular/svZeroDSolver)
[](https://github.com/simvascular/svZeroDSolver/releases/latest)

@@ -15,6 +13,7 @@ You can find more information under the following links:
* [**Documentation**](https://simvascular.github.io/svZeroDSolver)
* [**Developer Guide**](https://simvascular.github.io/svZeroDSolver/developer_guide.html)
+* [**svZeroDTuner Guide**](https://simvascular.github.io/svZeroDSolver/tuner.html)
* [**Bug Reports**](https://github.com/simvascular/svZeroDSolver/issues)
* [**Forum**](https://github.com/simvascular/svZeroDSolver/discussions)
* [**About SimVascular**](https://simvascular.github.io)
diff --git a/applications/svZeroDTuner/.gitignore b/applications/svZeroDTuner/.gitignore
new file mode 100644
index 000000000..b1aa73c92
--- /dev/null
+++ b/applications/svZeroDTuner/.gitignore
@@ -0,0 +1,6 @@
+baseline_results/
+optimization_results*/
+sensitivity_results/
+reference_codes/
+
+
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/main.py b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/main.py
new file mode 100644
index 000000000..d166ee440
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/main.py
@@ -0,0 +1,279 @@
+"""
+sv0D Tuning Framework - Regazzoni closed-loop model example.
+
+This script provides three modes:
+1. BASELINE MODE: Run the initial model and save all results for inspection
+2. SENSITIVITY MODE: Run correlation-based sensitivity screening
+3. OPTIMIZE MODE: Run optimization using targets specified in tuning.yaml
+
+Recommended command-line workflow:
+ Baseline: python -c 'from main import run_baseline; run_baseline("model.json")'
+ Optimize: svzerodtuner optimize tuning_nelder_mead.yaml
+ Sensitivity: svzerodtuner sensitivity sensitivity.yaml
+"""
+
+import os
+import sys
+import numpy as np
+import pandas as pd
+import pysvzerod
+
+# Add src to path
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
+
+import json
+from svzerodtuner.sv0d_tuner import SV0DTuner
+from svzerodtuner.visualization import plot_simulation_results
+from svzerodtuner.sensitivity import SensitivityAnalyzer
+
+
+def run_baseline(config_file):
+ """
+ Run the baseline simulation and save all results for user inspection.
+
+ This function:
+ - Runs the initial model.json simulation
+ - Saves all available outputs to baseline_results.csv
+ - Displays summary statistics (min, max, mean) for each output
+ - User can then inspect these results and specify targets in tuning_config.yaml
+ """
+ print("="*70)
+ print("BASELINE SIMULATION")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ return
+
+ print(f"Running simulation with: {config_file}")
+ print()
+
+ # Run baseline simulation
+ try:
+ solver = pysvzerod.Solver(config_file)
+ solver.run()
+ print("✓ Simulation completed successfully\n")
+ except Exception as e:
+ print(f"ERROR running simulation: {e}")
+ return
+
+ # Extract all results
+ times = solver.get_times()
+ full_results = solver.get_full_result()
+ result_names = full_results['name'].unique()
+
+ print(f"Found {len(result_names)} output variables")
+ print()
+
+ # Create results DataFrame with all outputs
+ results_data = {'time': times}
+ summary_stats = []
+
+ for name in result_names:
+ try:
+ values = solver.get_single_result(name)
+ results_data[name] = values
+
+ # Calculate statistics
+ stats = {
+ 'output_name': name,
+ 'min': np.min(values),
+ 'max': np.max(values),
+ 'mean': np.mean(values),
+ 'std': np.std(values)
+ }
+ summary_stats.append(stats)
+ except Exception as e:
+ print(f"Warning: Could not extract {name}: {e}")
+
+ # Create baseline_results directory
+ output_dir = 'baseline_results'
+ os.makedirs(output_dir, exist_ok=True)
+
+ # Save full results to CSV
+ results_df = pd.DataFrame(results_data)
+ baseline_file = os.path.join(output_dir, 'baseline_results.csv')
+ results_df.to_csv(baseline_file, index=False)
+ print(f"✓ Saved full time series to: {baseline_file}")
+
+ # Save summary statistics
+ summary_df = pd.DataFrame(summary_stats)
+ summary_file = os.path.join(output_dir, 'baseline_summary.csv')
+ summary_df.to_csv(summary_file, index=False)
+ print(f"✓ Saved summary statistics to: {summary_file}")
+ print()
+
+ # Display summary statistics
+ print("="*70)
+ print("BASELINE RESULTS SUMMARY")
+ print("="*70)
+ print()
+ print(f"{'Output Variable':<40} {'Min':>12} {'Max':>12} {'Mean':>12}")
+ print("-"*70)
+
+ for stats in summary_stats:
+ print(f"{stats['output_name']:<40} {stats['min']:>12.4e} "
+ f"{stats['max']:>12.4e} {stats['mean']:>12.4e}")
+
+ # Generate plots
+ print()
+ print("Generating plots...")
+ plot_simulation_results(results_df, summary_df, output_dir, title_prefix="Baseline")
+
+ print()
+ print("="*70)
+ print("NEXT STEPS:")
+ print("="*70)
+ print(f"1. Inspect {output_dir}/baseline_results.csv and baseline_summary.csv")
+ print(f"2. View plots in {output_dir}/ to visualize the outputs")
+ print("3. Choose which outputs you want to target")
+ print("4. Update tuning.yaml with your desired targets")
+ print("5. Run svzerodtuner optimize ")
+ print("="*70)
+ print()
+
+
+def run_optimization(config_file):
+ """
+ Run optimization using targets specified in config_file.
+ """
+ print("="*70)
+ print("OPTIMIZATION")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ print(f"Please create {config_file} with your optimization settings.")
+ return
+
+ print(f"Using configuration: {config_file}")
+ print()
+
+ # Initialize tuner
+ try:
+ tuner = SV0DTuner(config_file)
+ except Exception as e:
+ print(f"ERROR loading configuration: {e}")
+ return
+
+ # Run optimization
+ print("Starting optimization...")
+ print("="*70)
+ print()
+
+ try:
+ results = tuner.optimize()
+ except Exception as e:
+ print(f"ERROR during optimization: {e}")
+ return
+
+def run_sensitivity(config_file):
+ """
+ Run correlation-based sensitivity screening.
+
+ This function:
+ - Performs global sensitivity analysis on specified parameters
+ - Computes first-order and total-order screening scores
+ - Identifies which parameters most influence each quantity of interest
+ - Saves results, plots, and summary statistics
+ """
+ print("="*70)
+ print("SENSITIVITY ANALYSIS")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ print(f"Please create {config_file} with your sensitivity analysis settings.")
+ print(f"See sensitivity.yaml.example for template.")
+ return
+
+ print(f"Using configuration: {config_file}")
+ print()
+
+ # Initialize sensitivity analyzer
+ try:
+ analyzer = SensitivityAnalyzer(config_file)
+ except Exception as e:
+ print(f"ERROR loading configuration: {e}")
+ import traceback
+ traceback.print_exc()
+ return
+
+ # Run sensitivity analysis
+ try:
+ results = analyzer.run()
+ except Exception as e:
+ print(f"ERROR during sensitivity analysis: {e}")
+ import traceback
+ traceback.print_exc()
+ return
+
+ # Save results
+ try:
+ analyzer.save_results()
+ except Exception as e:
+ print(f"ERROR saving results: {e}")
+ import traceback
+ traceback.print_exc()
+ return
+
+ # Print summary
+ print()
+ print("="*70)
+ print("SENSITIVITY ANALYSIS COMPLETE")
+ print("="*70)
+ print()
+ print("Summary of Results:")
+ print("-"*70)
+
+ for qoi_key, qoi_results in results.items():
+ print(f"\n{qoi_key}:")
+ print(f" Range: [{qoi_results['min']:.4e}, {qoi_results['max']:.4e}]")
+ print(f" Mean ± Std: {qoi_results['mean']:.4e} ± {qoi_results['std']:.4e}")
+ print(f"\n Most influential parameters (first-order indices):")
+
+ # Sort by influence
+ first_order = qoi_results['first_order']
+ sorted_params = sorted(first_order.items(), key=lambda x: abs(x[1]), reverse=True)
+
+ for param, value in sorted_params:
+ print(f" {param:<30} {value:>8.4f}")
+
+ print()
+ print(f"Results saved to: {analyzer.output_config.get('directory', 'sensitivity_results')}")
+ print("="*70)
+ print()
+
+
+def main():
+ """
+ Main function.
+
+ INSTRUCTIONS:
+ ============
+ Uncomment ONE of the following modes to run:
+
+ MODE 1: BASELINE - Run initial simulation and save results for inspection
+ MODE 2: OPTIMIZE - Run optimization with targets from tuning.yaml
+ MODE 3: SENSITIVITY - Run global sensitivity analysis with sensitivity.yaml
+ """
+ # Change to script directory
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+ # ============================================================================
+ # SELECT MODE: Uncomment ONE of the following
+ # ============================================================================
+
+ #run_baseline("model.json") # MODE 1: Run baseline and save results
+ #run_sensitivity("sensitivity.yaml") # MODE 2: Run sensitivity analysis
+ run_optimization("tuning_nelder_mead.yaml") # MODE 3: Run optimization with tuning.yaml
+
+
+ # ============================================================================
+
+
+if __name__ == "__main__":
+ main()
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/model.json b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/model.json
new file mode 100644
index 000000000..c850a7562
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/model.json
@@ -0,0 +1,235 @@
+{
+ "_comment": "This is an sv0D input file for a closed loop circulation model by Regazzoni et al (2022). The units are as follows: Compliance (C) is in m^3/Pa, Resistance (R) is in Pa/(m^3/s), Inductance is in Pa/(m^3/s^2), Pressure (Pd) is in Pa.",
+ "_diagram_a": "LA -> MV -> LV -> AV -> AR_SYS -> J0 -> VEN_SYS -> J1 -> RA -> TV -> RV -> PV -> AR_PUL -> J2 -> VEN_PUL -> J3 -> LA",
+ "simulation_parameters": {
+ "number_of_cardiac_cycles": 30,
+ "number_of_time_pts_per_cardiac_cycle": 689,
+ "output_variable_based": true,
+ "output_all_cycles": false,
+ "steady_initial": false,
+ "cardiac_period": 0.689655
+ },
+ "vessels": [
+ {
+ "vessel_name": "AR_SYS",
+ "vessel_id": 1,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 6.93626e-9,
+ "R_poiseuille": 9.0219259e7,
+ "L": 6.66611e5
+ }
+ },
+ {
+ "vessel_name": "VEN_SYS",
+ "vessel_id": 2,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 4.5004e-7,
+ "R_poiseuille": 8.506913e6,
+ "L": 6.66611e4
+ }
+ },
+ {
+ "vessel_name": "AR_PUL",
+ "vessel_id": 3,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 7.5006e-8,
+ "R_poiseuille": 4.266316e6,
+ "L": 6.66611e4
+ }
+ },
+ {
+ "vessel_name": "VEN_PUL",
+ "vessel_id": 4,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 1.2001e-7,
+ "R_poiseuille": 4.666283e6,
+ "L": 6.66611e4
+ }
+ }
+ ],
+ "junctions": [
+ {
+ "inlet_blocks": ["AR_SYS"],
+ "junction_name": "J0",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["VEN_SYS"]
+ },
+ {
+ "inlet_blocks": ["VEN_SYS"],
+ "junction_name": "J1",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["RA"]
+ },
+ {
+ "inlet_blocks": ["AR_PUL"],
+ "junction_name": "J2",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["VEN_PUL"]
+ },
+ {
+ "inlet_blocks": ["VEN_PUL"],
+ "junction_name": "J3",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["LA"]
+ }
+ ],
+ "boundary_conditions": [],
+ "chambers": [
+ {
+ "type": "LinearElastanceChamber",
+ "name": "LA",
+ "values": {
+ "Emax": 2.666e7,
+ "Epass": 2.0449931e7,
+ "Vrest": 4.0e-6
+ },
+ "activation_function": {
+ "type": "two_hill",
+ "t_shift": 0.025,
+ "tau_1": 0.07586206896551724,
+ "tau_2": 0.12413793103448276,
+ "m1": 1.32,
+ "m2": 13.1
+ }
+ },
+ {
+ "type": "LinearElastanceChamber",
+ "name": "LV",
+ "values": {
+ "Emax": 3.33878537e8,
+ "Epass": 1.2605372e7,
+ "Vrest": 1.9763060505362944e-6
+ },
+ "activation_function": {
+ "type": "two_hill",
+ "t_shift": 0.207,
+ "tau_1": 0.18551724137931036,
+ "tau_2": 0.31172413793103454,
+ "m1": 1.32,
+ "m2": 27.4
+ }
+ },
+ {
+ "type": "LinearElastanceChamber",
+ "name": "RA",
+ "values": {
+ "Emax": 7.999343e6,
+ "Epass": 4.932281e6,
+ "Vrest": 4.0e-6
+ },
+ "activation_function": {
+ "type": "two_hill",
+ "t_shift": 0.025,
+ "tau_1": 0.07586206896551724,
+ "tau_2": 0.12413793103448276,
+ "m1": 1.32,
+ "m2": 13.1
+ }
+ },
+ {
+ "type": "LinearElastanceChamber",
+ "name": "RV",
+ "values": {
+ "Emax": 6.5719659e7,
+ "Epass": 5.406837e6,
+ "Vrest": 54.31634509432495e-6
+ },
+ "activation_function": {
+ "type": "two_hill",
+ "t_shift": 0.207,
+ "tau_1": 0.18551724137931036,
+ "tau_2": 0.31172413793103454,
+ "m1": 1.32,
+ "m2": 27.4
+ }
+ }
+ ],
+ "valves": [
+ {
+ "type": "PiecewiseValve",
+ "name": "MV",
+ "params": {
+ "Rmin": 6.666e5,
+ "Rmax": 6.666e9,
+ "upstream_block": "LA",
+ "downstream_block": "LV"
+ }
+ },
+ {
+ "type": "PiecewiseValve",
+ "name": "AV",
+ "params": {
+ "Rmin": 6.666e5,
+ "Rmax": 6.666e9,
+ "upstream_block": "LV",
+ "downstream_block": "AR_SYS"
+ }
+ },
+ {
+ "type": "PiecewiseValve",
+ "name": "TV",
+ "params": {
+ "Rmin": 6.666e5,
+ "Rmax": 6.666e9,
+ "upstream_block": "RA",
+ "downstream_block": "RV"
+ }
+ },
+ {
+ "type": "PiecewiseValve",
+ "name": "PV",
+ "params": {
+ "Rmin": 6.666e5,
+ "Rmax": 6.666e9,
+ "upstream_block": "RV",
+ "downstream_block": "AR_PUL"
+ }
+ }
+ ],
+ "initial_condition": {
+ "Vc:LA": 5.624441188959639e-5,
+ "pressure:LA:MV": 1068.3946660321123545,
+ "flow:LA:MV": 2.22314e-5,
+ "pressure:MV:LV": 1001.7260268934801388,
+ "flow:MV:LV": 2.22314e-5,
+ "Vc:LV": 8.142759737438905e-5,
+ "pressure:LV:AV": 1001.7260268934801388,
+ "flow:LV:AV": -3.627882e-7,
+ "pressure:AV:AR_SYS": 9926.0311597251620697,
+ "flow:AV:AR_SYS": -3.627882e-7,
+ "pressure:AR_SYS:J0": 1358.6946929956583,
+ "flow:AR_SYS:J0": 96.12211741415929e-6,
+ "pressure:J0:VEN_SYS": 1358.6946929956583,
+ "flow:J0:VEN_SYS": 96.12211741415929e-6,
+ "pressure:VEN_SYS:J1": 419.90306906826876,
+ "flow:VEN_SYS:J1": 110.34140583655498e-6,
+ "pressure:J1:RA": 419.90306906826876,
+ "flow:J1:RA": 110.34140583655498e-6,
+ "Vc:RA": 8.913584459023032e-5,
+ "pressure:RA:TV": 419.90306906826876,
+ "flow:RA:TV": 1.090932e-5,
+ "pressure:TV:RV": 348.08995100320674965,
+ "flow:TV:RV": 1.090932e-5,
+ "Vc:RV": 1.1868964658139649e-4,
+ "pressure:RV:PV": 348.08995100320674965,
+ "flow:RV:PV": -7.181835e-8,
+ "pressure:PV:AR_PUL": 2075.0626413038780811,
+ "flow:PV:AR_PUL": -7.181835e-8,
+ "pressure:AR_PUL:J2": 1744.2583185734633844,
+ "flow:AR_PUL:J2": 79.3375172471186e-6,
+ "pressure:J2:VEN_PUL": 1744.2583185734633844,
+ "flow:J2:VEN_PUL": 79.3375172471186e-6,
+ "pressure:VEN_PUL:J3": 1068.3946660321123545,
+ "flow:VEN_PUL:J3": 46.21258343525287e-6,
+ "pressure:J3:LA": 1068.3946660321123545,
+ "flow:J3:LA": 46.21258343525287e-6
+ }
+}
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/sensitivity.yaml b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/sensitivity.yaml
new file mode 100644
index 000000000..c38573070
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/sensitivity.yaml
@@ -0,0 +1,38 @@
+# Sensitivity Analysis Configuration for Regazzoni closed-loop model
+#
+# This file specifies parameters to vary and quantities of interest to analyze
+# using correlation-based sensitivity screening.
+
+model:
+ config_file: "model.json"
+
+# Parameters to vary during sensitivity analysis
+# Specify the bounds over which to vary each parameter
+parameters:
+ - name: "LV.Emax"
+ bounds: [1e8, 5e8] # Left ventricle maximum elastance
+
+ - name: "AR_SYS.R_poiseuille"
+ bounds: [5e7, 5e8] # Systemic arterial resistance
+
+ - name: "AR_SYS.C"
+ bounds: [1e-9, 1e-8] # Systemic arterial compliance
+
+# Quantities of Interest (QoI) to analyze
+# These are the outputs you want to understand the sensitivity of
+quantities_of_interest:
+ - name: Systemic arterial max pressure
+ expression: np.max(pressure:AV:AR_SYS)
+ - name: Systemic arterial min pressure
+ expression: np.min(pressure:AV:AR_SYS)
+
+# Sensitivity analysis settings
+sensitivity:
+ n_samples: 256 # Number of quasi-random samples for screening
+ # Recommended: 256-512 for quick screening, 1024-2048 for higher accuracy
+ # Higher = more accurate but slower
+
+# Output settings
+output:
+ directory: "sensitivity_results"
+ save_plots: true # Save visualization plots
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/convert_volume_data.py b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/convert_volume_data.py
new file mode 100644
index 000000000..c8314a453
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/convert_volume_data.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python3
+"""
+Convert chamber volume CSVs (with RR% and mL) into time–series target files
+(time in seconds, volume in m^3) suitable for svZeroDTuner time_series targets.
+
+INPUT FILES:
+- volume.csv with columns: RR%, V_LA, V_LV, V_RA, V_RV (volumes in mL)
+- la_volume_manual.csv with columns: RR%, V_LA (volume in mL)
+
+OUTPUT FILES (in the same directory as this script):
+- target_V_LA.csv
+- target_V_LV.csv
+- target_V_RA.csv
+- target_V_RV.csv
+- target_V_LA_manual.csv
+
+Each output file has columns: time (seconds), value (m^3)
+"""
+
+import os
+import pandas as pd
+from typing import Dict
+
+# ============================================================
+# CONFIG: EDIT THESE VALUES AS NEEDED
+# ============================================================
+
+# Cardiac period (duration of one RR interval) in seconds
+CARDIAC_PERIOD_SEC = 0.689
+
+# Values from ECG for a particular patient
+# PR interval in seconds
+PR_INTERVAL_SEC = 0.182
+
+# QRS duration in seconds
+QRS_DURATION_SEC = 0.088
+
+# Input filenames (relative to this script's directory)
+VOLUME_FILE = "volume.csv"
+LA_MANUAL_FILE = "la_volume_manual.csv"
+
+# Mapping from input column name -> output CSV filename
+VOLUME_COLUMNS_MODEL: Dict[str, str] = {
+ "V_LV": "target_V_LV.csv",
+ "V_RA": "target_V_RA.csv",
+ "V_RV": "target_V_RV.csv",
+}
+
+VOLUME_COLUMNS_MANUAL: Dict[str, str] = {
+ "V_LA": "target_V_LA.csv",
+}
+
+# ============================================================
+# HELPER FUNCTIONS
+# ============================================================
+
+
+def rr_percent_to_time(rr_percent_series, cardiac_period_sec: float):
+ """
+ Convert RR% (0–100 over one cardiac cycle) to time in seconds.
+ RR%=0 corresponds to the R-wave. Assuming the start of the P-wave corresponds
+ to t=0, then RR%=0 corresponds to the PR interval + the QRS interval/2.
+ time = (rr/100) * cardiac_period + PR interval + QRS interval/2, wrapped into [0, cardiac_period).
+ """
+ rr = rr_percent_series.astype(float) / 100.0
+ time = rr * cardiac_period_sec + PR_INTERVAL_SEC + QRS_DURATION_SEC / 2
+ return time % cardiac_period_sec
+
+
+def convert_volume_file(
+ csv_path: str,
+ column_to_output: Dict[str, str],
+):
+ """
+ Convert a volume CSV (RR% and mL) into time–series target CSVs (time [s], value [m^3]).
+ """
+ if not os.path.exists(csv_path):
+ raise FileNotFoundError(f"Input file not found: {csv_path}")
+
+ df = pd.read_csv(csv_path)
+ df.columns = df.columns.str.strip() # handle " V_LA" -> "V_LA"
+
+ if "RR%" not in df.columns:
+ raise ValueError(f"'RR%' column not found in {csv_path}. Columns: {list(df.columns)}")
+
+ times = rr_percent_to_time(df["RR%"], CARDIAC_PERIOD_SEC)
+ directory = os.path.dirname(os.path.abspath(csv_path))
+
+ for col, out_name in column_to_output.items():
+ if col not in df.columns:
+ raise ValueError(f"Column '{col}' not found in {csv_path}. Columns: {list(df.columns)}")
+
+ values_m3 = df[col].astype(float) * 1e-6 # mL -> m^3
+
+ out_df = (
+ pd.DataFrame({"time": times, "value": values_m3})
+ .sort_values("time")
+ )
+
+ out_path = os.path.join(directory, out_name)
+ out_df.to_csv(out_path, index=False)
+ print(f"Saved {out_path} ({len(out_df)} points)")
+
+
+def main():
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+
+ convert_volume_file(
+ csv_path=os.path.join(script_dir, VOLUME_FILE),
+ column_to_output=VOLUME_COLUMNS_MODEL
+ )
+
+ convert_volume_file(
+ csv_path=os.path.join(script_dir, LA_MANUAL_FILE),
+ column_to_output=VOLUME_COLUMNS_MANUAL
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/la_volume_manual.csv b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/la_volume_manual.csv
new file mode 100644
index 000000000..f0cadbb62
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/la_volume_manual.csv
@@ -0,0 +1,11 @@
+RR%, V_LA
+0.0, 38.2
+10.0, 47.45
+20.0, 54.44
+30.0, 62.1
+40.0, 68.89
+50.0, 72.69
+60.0, 63.28
+70.0, 60.03
+80.0, 58.76
+90.0, 43.86
\ No newline at end of file
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_LA.csv b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_LA.csv
new file mode 100644
index 000000000..9fd5ff425
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_LA.csv
@@ -0,0 +1,11 @@
+time,value
+0.019299999999999984,6.003e-05
+0.08820000000000017,5.875999999999999e-05
+0.15710000000000013,4.386e-05
+0.22599999999999998,3.82e-05
+0.2949,4.745e-05
+0.36379999999999996,5.4439999999999994e-05
+0.4326999999999999,6.21e-05
+0.5016,6.889e-05
+0.5705,7.269e-05
+0.6394,6.328e-05
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_LV.csv b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_LV.csv
new file mode 100644
index 000000000..2c6d032d1
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_LV.csv
@@ -0,0 +1,11 @@
+time,value
+0.019299999999999984,8.209206847820203e-05
+0.08820000000000017,9.181858533873112e-05
+0.15710000000000013,0.00011518503794616718
+0.22599999999999998,0.00011674118189347626
+0.2949,0.0001002523794307163
+0.36379999999999996,6.339886926561263e-05
+0.4326999999999999,4.0656364812357317e-05
+0.5016,3.8636694699938e-05
+0.5705,4.310993543544956e-05
+0.6394,6.767030310853679e-05
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_RA.csv b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_RA.csv
new file mode 100644
index 000000000..aa0e176f2
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_RA.csv
@@ -0,0 +1,11 @@
+time,value
+0.019299999999999984,9.369241418549201e-05
+0.08820000000000017,8.412201482585564e-05
+0.15710000000000013,5.6026368628527275e-05
+0.22599999999999998,6.109746593369446e-05
+0.2949,7.060273191622785e-05
+0.36379999999999996,8.362636203121454e-05
+0.4326999999999999,9.333610226068339e-05
+0.5016,0.00010107328567635535
+0.5705,0.00010789215565001145
+0.6394,0.00010176805487702007
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_RV.csv b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_RV.csv
new file mode 100644
index 000000000..0baa0e1fd
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/target_V_RV.csv
@@ -0,0 +1,11 @@
+time,value
+0.019299999999999984,0.00011716075351352032
+0.08820000000000017,0.000134236112579873
+0.15710000000000013,0.00016982846499561386
+0.22599999999999998,0.00017174178346179283
+0.2949,0.0001542893810374056
+0.36379999999999996,0.00011597469224428158
+0.4326999999999999,9.100090852473403e-05
+0.5016,8.201940604537174e-05
+0.5705,8.374565652485933e-05
+0.6394,0.00010057232797790399
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/volume.csv b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/volume.csv
new file mode 100644
index 000000000..813cae3b8
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/P003_chamber_volumes/volume.csv
@@ -0,0 +1,11 @@
+RR%, V_LA, V_LV, V_RA, V_RV
+0.000000000000000000e+00, 3.353854734814017036e+01, 1.167411818934762664e+02, 6.109746593369445833e+01, 1.717417834617928349e+02
+1.000000000000000000e+01, 4.168803383024022935e+01, 1.002523794307163030e+02, 7.060273191622785305e+01, 1.542893810374056045e+02
+2.000000000000000000e+01, 4.975275660511201181e+01, 6.339886926561263181e+01, 8.362636203121454059e+01, 1.159746922442815844e+02
+3.000000000000000000e+01, 5.542701117063629113e+01, 4.065636481235731736e+01, 9.333610226068341831e+01, 9.100090852473402947e+01
+4.000000000000000000e+01, 6.273942063591531593e+01, 3.863669469993800476e+01, 1.010732856763553542e+02, 8.201940604537175261e+01
+5.000000000000000000e+01, 6.634282300620391482e+01, 4.310993543544957163e+01, 1.078921556500114605e+02, 8.374565652485932787e+01
+6.000000000000000000e+01, 5.618893389388352944e+01, 6.767030310853678543e+01, 1.017680548770200915e+02, 1.005723279779040098e+02
+7.000000000000000000e+01, 5.237233218741626217e+01, 8.209206847820203734e+01, 9.369241418549202649e+01, 1.171607535135203193e+02
+8.000000000000000000e+01, 4.883735839209346352e+01, 9.181858533873112549e+01, 8.412201482585564349e+01, 1.342361125798730086e+02
+9.000000000000000000e+01, 3.161169238548909988e+01, 1.151850379461671849e+02, 5.602636862852727972e+01, 1.698284649956138708e+02
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/create_target_from_baseline.py b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/create_target_from_baseline.py
new file mode 100644
index 000000000..7fc8a576f
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/create_target_from_baseline.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+"""
+Create a target CSV from a baseline_results.csv column.
+
+Extracts evenly spaced samples over 1 cardiac cycle and saves as CSV.
+Shows a plot of the full baseline and sampled points.
+
+Modify the hardcoded paths and options in main() as needed.
+"""
+
+import os
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+
+
+def main():
+ # Modify these paths and options as needed
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ baseline_path = os.path.join(script_dir, "..", "baseline_results", "baseline_results.csv")
+ column = "pressure:AV:AR_SYS"
+ num_samples = 20
+ output_csv = os.path.join(script_dir, "target_pressure_ar_sys.csv")
+
+ if not os.path.exists(baseline_path):
+ raise FileNotFoundError(
+ f"Baseline results not found: {baseline_path}\n"
+ "Generate baseline results first with:\n"
+ "python -c 'from main import run_baseline; run_baseline(\"model.json\")'"
+ )
+
+ df = pd.read_csv(baseline_path)
+ if column not in df.columns:
+ raise ValueError(
+ f"Column '{column}' not found. Available: {list(df.columns)}"
+ )
+
+ times = df["time"].values
+ values = df[column].values
+
+ n = len(times)
+ if n < 2:
+ raise ValueError("Baseline results have too few points")
+ indices = np.linspace(0, n - 1, num_samples, dtype=int)
+ indices = np.unique(indices)
+
+ time_samples = times[indices]
+ value_samples = values[indices]
+
+ pd.DataFrame({"time": time_samples, "value": value_samples}).to_csv(
+ output_csv, index=False
+ )
+ print(f"Saved {output_csv} ({len(time_samples)} samples)")
+
+ fig, ax = plt.subplots(figsize=(10, 5))
+ ax.plot(times, values, "b-", alpha=0.4, label="Full baseline")
+ ax.plot(
+ time_samples,
+ value_samples,
+ "ro-",
+ markersize=6,
+ label=f"Target samples (n={len(time_samples)})",
+ )
+ ax.set_xlabel("Time (s)")
+ ax.set_ylabel(column)
+ ax.set_title(f"Target: {column} ({num_samples} samples over 1 cardiac cycle)")
+ ax.legend()
+ ax.grid(True, alpha=0.3)
+ plt.tight_layout()
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/target_pressure_ar_sys.csv b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/target_pressure_ar_sys.csv
new file mode 100644
index 000000000..09e17189a
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/targets/target_pressure_ar_sys.csv
@@ -0,0 +1,21 @@
+time,value
+0.0,7934.087189214573
+0.0360865988372118,7395.10927867438
+0.0721731976744202,6898.888606327072
+0.1082597965116285,6442.187000610236
+0.1443463953488368,6021.776649582252
+0.1814353997093043,5624.3100794864295
+0.2175219985465126,5268.46173178877
+0.253608597383721,5493.963195469164
+0.2896951962209328,7891.173058660428
+0.3257817950581412,9586.308311147264
+0.3628707994186051,10746.496806303792
+0.398957398255817,11471.592615438023
+0.4350439970930253,11915.925157646128
+0.4711305959302336,12103.828967798649
+0.507217194767442,11451.367763369775
+0.5443061991279095,10616.56668293693
+0.5803927979651178,9865.399674799495
+0.6164793968023261,9173.484636661084
+0.652565995639538,8536.415197257471
+0.6896550000000019,7934.1762529370735
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_complex.yaml b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_complex.yaml
new file mode 100644
index 000000000..6670e4c44
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_complex.yaml
@@ -0,0 +1,228 @@
+# Tuning configuration for Regazzoni closed-loop model
+#
+# INSTRUCTIONS:
+# 1. Run baseline:
+# python -c 'from main import run_baseline; run_baseline("model.json")'
+# 2. Inspect baseline_results.csv and baseline_summary.csv
+# 3. Update the 'targets' section below with your desired targets
+# 4. Run optimization:
+# svzerodtuner optimize tuning_complex.yaml
+
+model:
+ config_file: "model.json"
+
+# Parameters to optimize
+# Add or remove parameters as needed
+parameters:
+ - name: "LV.Epass"
+ bounds: [1e6, 1e8]
+ scaling: log
+
+ - name: "LV.Emax"
+ bounds: [1e7, 1e9]
+ scaling: log
+
+ - name: "RV.Epass"
+ bounds: [1e6, 1e8]
+ scaling: log
+
+ - name: "RV.Emax"
+ bounds: [1e7, 1e9]
+ scaling: log
+
+ - name: "LA.Epass"
+ bounds: [1e6, 1e8]
+ scaling: log
+
+ - name: "RA.Epass"
+ bounds: [1e6, 1e8]
+ scaling: log
+
+ - name: "LV.Vrest"
+ bounds: [1e-6, 1e-4]
+ scaling: max
+
+ - name: "RV.Vrest"
+ bounds: [1e-6, 1e-4]
+ scaling: max
+
+ - name: "AR_SYS.R_poiseuille"
+ bounds: [1e7, 1e9]
+ scaling: log
+
+ - name: "AR_SYS.C"
+ bounds: [1e-9, 1e-7]
+ scaling: log
+
+ - name: "VEN_SYS.R_poiseuille"
+ bounds: [1e5, 1e7]
+ scaling: log
+
+ - name: "initial_condition.pressure:J0:VEN_SYS"
+ bounds: [1e2, 1e4]
+ scaling: log
+
+
+# Target outputs
+# Specify which outputs you want to match and their target values
+# Provide expressions with output names from baseline_summary.csv
+targets:
+ - name: LV max volume
+ type: scalar
+ expression: np.max(Vc:LV)
+ target_value: 116.0e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+ - name: LV min volume
+ type: scalar
+ expression: np.min(Vc:LV)
+ target_value: 38.6e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+ - name: LV volume
+ type: time_series
+ expression: Vc:LV
+ target_file: "targets/P003_chamber_volumes/target_V_LV.csv"
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: RV max volume
+ type: scalar
+ expression: np.max(Vc:RV)
+ target_value: 171.0e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+ - name: RV min volume
+ type: scalar
+ expression: np.min(Vc:RV)
+ target_value: 82.0e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+ - name: RV volume
+ type: time_series
+ expression: Vc:RV
+ target_file: "targets/P003_chamber_volumes/target_V_RV.csv"
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: LA max volume
+ type: scalar
+ expression: np.max(Vc:LA)
+ target_value: 72.7e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+ - name: LA min volume
+ type: scalar
+ expression: np.min(Vc:LA)
+ target_value: 38.2e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+ - name: LA volume
+ type: time_series
+ expression: Vc:LA
+ target_file: "targets/P003_chamber_volumes/target_V_LA.csv"
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: RA max volume
+ type: scalar
+ expression: np.max(Vc:RA)
+ target_value: 107.9e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+ - name: RA min volume
+ type: scalar
+ expression: np.min(Vc:RA)
+ target_value: 56.0e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+ - name: RA volume
+ type: time_series
+ expression: Vc:RA
+ target_file: "targets/P003_chamber_volumes/target_V_RA.csv"
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: Systemic arterial max pressure
+ type: scalar
+ expression: np.max(pressure:AV:AR_SYS)
+ target_value: 13065 # Pa (98.0 mmHg)
+ relative_bounds: 5%
+ weight: 1.0
+ - name: Systemic arterial min pressure
+ type: scalar
+ expression: np.min(pressure:AV:AR_SYS)
+ target_value: 7066 # Pa (53.0 mmHg)
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: Pulmonary arterial max pressure
+ type: scalar
+ expression: np.max(pressure:PV:AR_PUL)
+ target_value: 2666 # Pa (20.0 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+ - name: Pulmonary arterial min pressure
+ type: scalar
+ expression: np.min(pressure:PV:AR_PUL)
+ target_value: 1533 # Pa (11.5 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+
+ - name: Systemic venous mean pressure
+ type: scalar
+ expression: np.mean(pressure:J0:VEN_SYS)
+ target_value: 800 # Pa (6.0 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+
+ - name: Right atrial mean pressure
+ type: scalar
+ expression: np.mean(pressure:J1:RA)
+ target_value: 533 # Pa (4.0 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+
+ - name: Left atrial mean pressure
+ type: scalar
+ expression: np.mean(pressure:J3:LA)
+ target_value: 933 # Pa (7.0 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+
+objective:
+ # L1 or L2 norm (of relative errors)
+ norm: L1
+
+# Optimization - use scipy's exact parameter names
+optimization:
+ terminate_at_zero: true # Stop when objective hits zero (default: true). Set false to run full maxiter.
+ # Differential evolution specific options
+ # "To improve your chances of finding a global minimum use higher popsize values,
+ # with higher mutation and (dithering), but lower recombination values. This
+ # has the effect of widening the search radius, but slowing convergence.""
+ algorithm: "differential_evolution"
+ maxiter: 500
+ workers: -1
+ updating: 'deferred'
+ tol: 1e-2
+ popsize: 30
+ init: 'sobol'
+ strategy: 'best1bin'
+ mutation: [0.5, 1.5]
+ recombination: 0.5
+ polish: false
+
+ # Nelder-Mead specific options
+ # algorithm: "Nelder-Mead"
+ # maxiter: 10000
+ # xatol: 1e-6
+ # fatol: 1e-6
+ # adaptive: true
+
+# Output
+output:
+ directory: "optimization_results_complex"
+ save_history: true
+ save_plots: true
+ save_final_config: true
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_differential_evolution.yaml b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_differential_evolution.yaml
new file mode 100644
index 000000000..af7a6b04a
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_differential_evolution.yaml
@@ -0,0 +1,71 @@
+# Tuning configuration for Regazzoni closed-loop model
+#
+# INSTRUCTIONS:
+# 1. Run baseline:
+# python -c 'from main import run_baseline; run_baseline("model.json")'
+# 2. Inspect baseline_results.csv and baseline_summary.csv
+# 3. Update the 'targets' section below with your desired targets
+# 4. Run optimization:
+# svzerodtuner optimize tuning_differential_evolution.yaml
+
+model:
+ config_file: "model.json"
+
+# Parameters to optimize
+# Add or remove parameters as needed
+parameters:
+ - name: "LV.Emax"
+ bounds: [1e7, 5e8]
+ - name: "AR_SYS.R_poiseuille"
+ bounds: [5e7, 5e8]
+ - name: "AR_SYS.C"
+ bounds: [1e-9, 1e-8]
+
+# Target outputs
+# Specify which outputs you want to match and their target values
+# Provide expressions with output names from baseline_summary.csv
+targets:
+ - name: Systemic arterial max pressure
+ type: scalar
+ expression: np.max(pressure:AV:AR_SYS)
+ target_value: 13065 # Pa
+ weight: 1.0
+
+ - name: Systemic arterial min pressure
+ type: scalar
+ expression: np.min(pressure:AV:AR_SYS)
+ target_value: 7066 # Pa
+ weight: 1.0
+
+ - name: LV ejection fraction
+ type: scalar
+ expression: (np.max(Vc:LV) - np.min(Vc:LV))/np.max(Vc:LV)
+ target_value: 0.5 # 50% (fraction 0-1)
+ relative_bounds: 5%
+ weight: 1.0
+
+objective:
+ # L1 or L2 norm (of relative errors)
+ norm: L1
+
+# Optimization - use scipy differential_evolution options (maxiter, tol, workers, popsize, etc.)
+optimization:
+ terminate_at_zero: true # Stop when objective hits zero (default: true). Set false to run full maxiter.
+ algorithm: "differential_evolution"
+ maxiter: 30
+ tol: 1e-9
+ workers: -1
+ updating: 'deferred'
+ popsize: 5
+ init: 'sobol'
+ strategy: 'best2bin'
+ mutation: [0.5, 1]
+ recombination: 0.7
+ polish: false
+
+# Output
+output:
+ directory: "optimization_results_differential_evolution"
+ save_history: true
+ save_plots: true
+ save_final_config: true
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_job.sh b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_job.sh
new file mode 100644
index 000000000..ea87cc706
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_job.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+# Name of your job
+#SBATCH --job-name=tuning_job
+
+# Name of partition
+#SBATCH --partition=amarsden
+
+# Specify the name of the output file. The %j specifies the job ID
+#SBATCH --output=tuning_job.o%j
+
+# Specify a name of the error file. The %j specifies the job ID
+#SBATCH --error=tuning_job.e%j
+
+# The walltime you require for your simulation
+#SBATCH --time=01:00:00
+
+# Job priority. Leave as normal for now.
+#SBATCH --qos=normal
+
+# Number of nodes you are requesting for your job. You can have 24 processors per node, so plan accordingly
+#SBATCH --nodes=1
+
+# Amount of memory you require per node. The default is 4000 MB (or 4 GB) per node
+#SBATCH --mem=8000
+
+# Number of processors per node (for parallel differential_evolution)
+#SBATCH --ntasks-per-node=32
+
+# Send an email to this address when your job starts and finishes
+#SBATCH --mail-user=abrown97@stanford.edu
+#SBATCH --mail-type=begin
+#SBATCH --mail-type=fail
+#SBATCH --mail-type=end
+
+# Load Modules
+module purge
+module load python/3.14.2
+module load py-scipy/1.16.3_py314
+module load py-pandas/2.3.3_py314
+module load viz
+module load py-matplotlib/3.10.8_py314
+module load cmake
+
+# Relative paths (submit from this directory)
+VENV_DIR="../../venv"
+REQUIREMENTS="../../requirements.txt"
+SVZEROD_ROOT="../../../.." # svZeroDSolver repo root (for pip install -e pysvzerod)
+
+# Check if virtual environment exists and has Python; if not, create and install
+if [[ ! -f "$VENV_DIR/bin/python" ]]; then
+ echo "Virtual environment not found at $VENV_DIR. Creating and installing dependencies..."
+ python3 -m venv "$VENV_DIR"
+ "$VENV_DIR/bin/pip" install --upgrade pip
+ # Install pip packages from requirements
+ "$VENV_DIR/bin/pip" install -r "$REQUIREMENTS"
+ # Install svZeroDSolver from source
+ "$VENV_DIR/bin/pip" install -e "$SVZEROD_ROOT"
+ echo "Virtual environment ready."
+else
+ echo "Using existing virtual environment at $VENV_DIR"
+fi
+
+# Run main.py with the venv's Python
+source "$VENV_DIR/bin/activate"
+python -u main.py
+
+# Submit job with:
+# sbatch ./tuning_job.sh
+# Check status with:
+# squeue -u $USER
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_nelder_mead.yaml b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_nelder_mead.yaml
new file mode 100644
index 000000000..6dcba9bbc
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_nelder_mead.yaml
@@ -0,0 +1,65 @@
+# Tuning configuration for Regazzoni closed-loop model
+#
+# INSTRUCTIONS:
+# 1. Run baseline:
+# python -c 'from main import run_baseline; run_baseline("model.json")'
+# 2. Inspect baseline_results.csv and baseline_summary.csv
+# 3. Update the 'targets' section below with your desired targets
+# 4. Run optimization:
+# svzerodtuner optimize tuning_nelder_mead.yaml
+
+model:
+ config_file: "model.json"
+
+# Parameters to optimize
+# Add or remove parameters as needed
+parameters:
+ - name: "LV.Emax"
+ bounds: [1e7, 5e8]
+ - name: "AR_SYS.R_poiseuille"
+ bounds: [5e7, 5e8]
+ - name: "AR_SYS.C"
+ bounds: [1e-9, 1e-8]
+
+# Target outputs
+# Specify which outputs you want to match and their target values
+# Provide expressions with output names from baseline_summary.csv
+targets:
+ - name: Systemic arterial max pressure
+ type: scalar
+ expression: np.max(pressure:AV:AR_SYS)
+ target_value: 13065 # Pa
+ weight: 1.0
+
+ - name: Systemic arterial min pressure
+ type: scalar
+ expression: np.min(pressure:AV:AR_SYS)
+ target_value: 7066 # Pa
+ weight: 1.0
+
+ - name: LV ejection fraction
+ type: scalar
+ expression: (np.max(Vc:LV) - np.min(Vc:LV))/np.max(Vc:LV)
+ target_value: 0.5 # 50% (fraction 0-1)
+ relative_bounds: 5%
+ weight: 1.0
+
+objective:
+ # L1 or L2 norm (of relative errors)
+ norm: L1
+
+# Optimization - use scipy Nelder-Mead options (maxiter, xatol, fatol, adaptive, etc.)
+optimization:
+ terminate_at_zero: true # Stop when objective hits zero (default: true). Set false to run full maxiter.
+ algorithm: "Nelder-Mead"
+ maxiter: 30
+ xatol: 1e-6
+ fatol: 1e-6
+ adaptive: true
+
+# Output
+output:
+ directory: "optimization_results_nelder_mead"
+ save_history: true
+ save_plots: true
+ save_final_config: true
diff --git a/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_time_series_target.yaml b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_time_series_target.yaml
new file mode 100644
index 000000000..41ad186b6
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_time_series_target.yaml
@@ -0,0 +1,59 @@
+# Tuning configuration for Regazzoni closed-loop model
+# Tests time series target matching: tune parameters to match a target waveform.
+#
+# INSTRUCTIONS:
+# 1. Run baseline first to inspect outputs:
+# python -c 'from main import run_baseline; run_baseline("model.json")'
+# 2. Optionally create target from baseline: copy a column from baseline_results.csv
+# to targets/ as CSV with 'time' and 'value' columns
+# 3. Run optimization:
+# svzerodtuner optimize tuning_time_series_target.yaml
+
+model:
+ config_file: "model.json"
+
+# Parameters to optimize
+parameters:
+ - name: "LV.Emax"
+ bounds: [1e7, 1e9]
+ - name: "AR_SYS.C"
+ bounds: [1e-9, 1e-7]
+
+# Target outputs
+# Specify which outputs you want to match and their target values
+# Provide expressions with output names from baseline_summary.csv
+targets:
+ - name: Systemic arterial pressure
+ type: time_series
+ expression: pressure:AV:AR_SYS
+ # Run create_target_from_baseline.py to create the target file from the baseline results.
+ target_file: "targets/target_pressure_ar_sys.csv"
+ relative_bounds: "5%"
+ weight: 1.0
+
+ - name: LV max volume
+ type: scalar
+ expression: np.max(Vc:LV)
+ target_value: 167.6e-6 # m^3
+ relative_bounds: 5%
+ weight: 1.0
+
+objective:
+ # L1 or L2 norm (of relative errors)
+ norm: L1
+
+# Optimization
+optimization:
+ terminate_at_zero: true
+ algorithm: "Nelder-Mead"
+ maxiter: 100
+ xatol: 1e-6
+ fatol: 1e-6
+ adaptive: true
+
+# Output
+output:
+ directory: "optimization_results_time_series"
+ save_history: true
+ save_plots: true
+ save_final_config: true
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/main.py b/applications/svZeroDTuner/examples/closed_loop_Zingaro/main.py
new file mode 100644
index 000000000..8da54a741
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/main.py
@@ -0,0 +1,279 @@
+"""
+sv0D Tuning Framework - Regazzoni closed-loop model example.
+
+This script provides three modes:
+1. BASELINE MODE: Run the initial model and save all results for inspection
+2. SENSITIVITY MODE: Run correlation-based sensitivity screening
+3. OPTIMIZE MODE: Run optimization using targets specified in tuning.yaml
+
+Recommended command-line workflow:
+ Baseline: python -c 'from main import run_baseline; run_baseline("model.json")'
+ Optimize: svzerodtuner optimize tuning.yaml
+ Sensitivity: svzerodtuner sensitivity sensitivity.yaml
+"""
+
+import os
+import sys
+import numpy as np
+import pandas as pd
+import pysvzerod
+
+# Add src to path
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
+
+import json
+from svzerodtuner.sv0d_tuner import SV0DTuner
+from svzerodtuner.visualization import plot_simulation_results
+from svzerodtuner.sensitivity import SensitivityAnalyzer
+
+
+def run_baseline(config_file):
+ """
+ Run the baseline simulation and save all results for user inspection.
+
+ This function:
+ - Runs the initial model.json simulation
+ - Saves all available outputs to baseline_results.csv
+ - Displays summary statistics (min, max, mean) for each output
+ - User can then inspect these results and specify targets in tuning_config.yaml
+ """
+ print("="*70)
+ print("BASELINE SIMULATION")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ return
+
+ print(f"Running simulation with: {config_file}")
+ print()
+
+ # Run baseline simulation
+ try:
+ solver = pysvzerod.Solver(config_file)
+ solver.run()
+ print("✓ Simulation completed successfully\n")
+ except Exception as e:
+ print(f"ERROR running simulation: {e}")
+ return
+
+ # Extract all results
+ times = solver.get_times()
+ full_results = solver.get_full_result()
+ result_names = full_results['name'].unique()
+
+ print(f"Found {len(result_names)} output variables")
+ print()
+
+ # Create results DataFrame with all outputs
+ results_data = {'time': times}
+ summary_stats = []
+
+ for name in result_names:
+ try:
+ values = solver.get_single_result(name)
+ results_data[name] = values
+
+ # Calculate statistics
+ stats = {
+ 'output_name': name,
+ 'min': np.min(values),
+ 'max': np.max(values),
+ 'mean': np.mean(values),
+ 'std': np.std(values)
+ }
+ summary_stats.append(stats)
+ except Exception as e:
+ print(f"Warning: Could not extract {name}: {e}")
+
+ # Create baseline_results directory
+ output_dir = 'baseline_results'
+ os.makedirs(output_dir, exist_ok=True)
+
+ # Save full results to CSV
+ results_df = pd.DataFrame(results_data)
+ baseline_file = os.path.join(output_dir, 'baseline_results.csv')
+ results_df.to_csv(baseline_file, index=False)
+ print(f"✓ Saved full time series to: {baseline_file}")
+
+ # Save summary statistics
+ summary_df = pd.DataFrame(summary_stats)
+ summary_file = os.path.join(output_dir, 'baseline_summary.csv')
+ summary_df.to_csv(summary_file, index=False)
+ print(f"✓ Saved summary statistics to: {summary_file}")
+ print()
+
+ # Display summary statistics
+ print("="*70)
+ print("BASELINE RESULTS SUMMARY")
+ print("="*70)
+ print()
+ print(f"{'Output Variable':<40} {'Min':>12} {'Max':>12} {'Mean':>12}")
+ print("-"*70)
+
+ for stats in summary_stats:
+ print(f"{stats['output_name']:<40} {stats['min']:>12.4e} "
+ f"{stats['max']:>12.4e} {stats['mean']:>12.4e}")
+
+ # Generate plots
+ print()
+ print("Generating plots...")
+ plot_simulation_results(results_df, summary_df, output_dir, title_prefix="Baseline")
+
+ print()
+ print("="*70)
+ print("NEXT STEPS:")
+ print("="*70)
+ print(f"1. Inspect {output_dir}/baseline_results.csv and baseline_summary.csv")
+ print(f"2. View plots in {output_dir}/ to visualize the outputs")
+ print("3. Choose which outputs you want to target")
+ print("4. Update tuning.yaml with your desired targets")
+ print("5. Run svzerodtuner optimize ")
+ print("="*70)
+ print()
+
+
+def run_optimization(config_file):
+ """
+ Run optimization using targets specified in config_file.
+ """
+ print("="*70)
+ print("OPTIMIZATION")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ print(f"Please create {config_file} with your optimization settings.")
+ return
+
+ print(f"Using configuration: {config_file}")
+ print()
+
+ # Initialize tuner
+ try:
+ tuner = SV0DTuner(config_file)
+ except Exception as e:
+ print(f"ERROR loading configuration: {e}")
+ return
+
+ # Run optimization
+ print("Starting optimization...")
+ print("="*70)
+ print()
+
+ try:
+ results = tuner.optimize()
+ except Exception as e:
+ print(f"ERROR during optimization: {e}")
+ return
+
+def run_sensitivity(config_file):
+ """
+ Run correlation-based sensitivity screening.
+
+ This function:
+ - Performs global sensitivity analysis on specified parameters
+ - Computes first-order and total-order screening scores
+ - Identifies which parameters most influence each quantity of interest
+ - Saves results, plots, and summary statistics
+ """
+ print("="*70)
+ print("SENSITIVITY ANALYSIS")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ print(f"Please create {config_file} with your sensitivity analysis settings.")
+ print(f"See sensitivity.yaml.example for template.")
+ return
+
+ print(f"Using configuration: {config_file}")
+ print()
+
+ # Initialize sensitivity analyzer
+ try:
+ analyzer = SensitivityAnalyzer(config_file)
+ except Exception as e:
+ print(f"ERROR loading configuration: {e}")
+ import traceback
+ traceback.print_exc()
+ return
+
+ # Run sensitivity analysis
+ try:
+ results = analyzer.run()
+ except Exception as e:
+ print(f"ERROR during sensitivity analysis: {e}")
+ import traceback
+ traceback.print_exc()
+ return
+
+ # Save results
+ try:
+ analyzer.save_results()
+ except Exception as e:
+ print(f"ERROR saving results: {e}")
+ import traceback
+ traceback.print_exc()
+ return
+
+ # Print summary
+ print()
+ print("="*70)
+ print("SENSITIVITY ANALYSIS COMPLETE")
+ print("="*70)
+ print()
+ print("Summary of Results:")
+ print("-"*70)
+
+ for qoi_key, qoi_results in results.items():
+ print(f"\n{qoi_key}:")
+ print(f" Range: [{qoi_results['min']:.4e}, {qoi_results['max']:.4e}]")
+ print(f" Mean ± Std: {qoi_results['mean']:.4e} ± {qoi_results['std']:.4e}")
+ print(f"\n Most influential parameters (first-order indices):")
+
+ # Sort by influence
+ first_order = qoi_results['first_order']
+ sorted_params = sorted(first_order.items(), key=lambda x: abs(x[1]), reverse=True)
+
+ for param, value in sorted_params:
+ print(f" {param:<30} {value:>8.4f}")
+
+ print()
+ print(f"Results saved to: {analyzer.output_config.get('directory', 'sensitivity_results')}")
+ print("="*70)
+ print()
+
+
+def main():
+ """
+ Main function.
+
+ INSTRUCTIONS:
+ ============
+ Uncomment ONE of the following modes to run:
+
+ MODE 1: BASELINE - Run initial simulation and save results for inspection
+ MODE 2: OPTIMIZE - Run optimization with targets from tuning.yaml
+ MODE 3: SENSITIVITY - Run global sensitivity analysis with sensitivity.yaml
+ """
+ # Change to script directory
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+ # ============================================================================
+ # SELECT MODE: Uncomment ONE of the following
+ # ============================================================================
+
+ #run_baseline("model.json") # MODE 1: Run baseline and save results
+ #run_sensitivity("sensitivity.yaml") # MODE 2: Run sensitivity analysis
+ run_optimization("tuning.yaml") # MODE 3: Run optimization with tuning.yaml
+
+
+ # ============================================================================
+
+
+if __name__ == "__main__":
+ main()
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/model.json b/applications/svZeroDTuner/examples/closed_loop_Zingaro/model.json
new file mode 100644
index 000000000..29b931e98
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/model.json
@@ -0,0 +1,278 @@
+{
+ "_comment": "This is an sv0D input file for a closed loop circulation model by Zingaro et al (2024). The units are as follows: Compliance (C) is in m^3/Pa, Resistance (R) is in Pa/(m^3/s), Inductance is in Pa/(m^3/s^2), Pressure (Pd) is in Pa.",
+ "_diagram_a": "LA -> MV -> LV -> AV -> L_AV -> J0 -> R_UPSTREAM_SYS -> J1 -> AR_SYS -> J2 -> VEN_SYS -> J3 -> RA -> TV -> RV -> PV -> L_PV -> J4 -> R_UPSTREAM_PUL -> J5 -> AR_PUL -> J6 -> VEN_PUL -> J7 -> LA",
+ "simulation_parameters": {
+ "number_of_cardiac_cycles": 30,
+ "number_of_time_pts_per_cardiac_cycle": 689,
+ "output_variable_based": true,
+ "output_all_cycles": false,
+ "steady_initial": false,
+ "cardiac_period": 0.689655
+ },
+ "vessels": [
+ {
+ "vessel_name": "L_AV",
+ "vessel_id": 1,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVessel",
+ "zero_d_element_values": {
+ "C": 0,
+ "R_poiseuille": 0,
+ "L": 66661.0
+ }
+ },
+ {
+ "vessel_name": "R_UPSTREAM_SYS",
+ "vessel_id": 2,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVessel",
+ "zero_d_element_values": {
+ "C": 0,
+ "R_poiseuille": 6399456,
+ "L": 0
+ }
+ },
+ {
+ "vessel_name": "AR_SYS",
+ "vessel_id": 3,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 1.125095633128816e-8,
+ "R_poiseuille": 63994560.0,
+ "L": 666610
+ }
+ },
+ {
+ "vessel_name": "VEN_SYS",
+ "vessel_id": 4,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 4.5004e-7,
+ "R_poiseuille": 34663720,
+ "L": 66661.0
+ }
+ },
+ {
+ "vessel_name": "L_PV",
+ "vessel_id": 5,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVessel",
+ "zero_d_element_values": {
+ "C": 0,
+ "R_poiseuille": 0,
+ "L": 66661.0
+ }
+ },
+ {
+ "vessel_name": "R_UPSTREAM_PUL",
+ "vessel_id": 6,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVessel",
+ "zero_d_element_values": {
+ "C": 0,
+ "R_poiseuille": 428176.9352,
+ "L": 0
+ }
+ },
+ {
+ "vessel_name": "AR_PUL",
+ "vessel_id": 7,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 7.5006e-8,
+ "R_poiseuille": 4281769.352,
+ "L": 6.66611e4
+ }
+ },
+ {
+ "vessel_name": "VEN_PUL",
+ "vessel_id": 8,
+ "vessel_length": 10.0,
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 1.2001e-7,
+ "R_poiseuille": 4757462.248,
+ "L": 6.66611e4
+ }
+ }
+ ],
+ "junctions": [
+ {
+ "inlet_blocks": ["L_AV"],
+ "junction_name": "J0",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["R_UPSTREAM_SYS"]
+ },
+ {
+ "inlet_blocks": ["R_UPSTREAM_SYS"],
+ "junction_name": "J1",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["AR_SYS"]
+ },
+ {
+ "inlet_blocks": ["AR_SYS"],
+ "junction_name": "J2",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["VEN_SYS"]
+ },
+ {
+ "inlet_blocks": ["VEN_SYS"],
+ "junction_name": "J3",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["RA"]
+ },
+ {
+ "inlet_blocks": ["L_PV"],
+ "junction_name": "J4",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["R_UPSTREAM_PUL"]
+ },
+ {
+ "inlet_blocks": ["R_UPSTREAM_PUL"],
+ "junction_name": "J5",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["AR_PUL"]
+ },
+ {
+ "inlet_blocks": ["AR_PUL"],
+ "junction_name": "J6",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["VEN_PUL"]
+ },
+ {
+ "inlet_blocks": ["VEN_PUL"],
+ "junction_name": "J7",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["LA"]
+ }
+ ],
+ "boundary_conditions": [],
+ "chambers": [
+ {
+ "type": "LinearElastanceChamber",
+ "name": "LA",
+ "values": {
+ "Emax": 2.666e7,
+ "Epass": 2.0449931e7,
+ "Vrest": 4.0e-6
+ },
+ "activation_function": {
+ "type": "two_hill",
+ "t_shift": 0.025,
+ "tau_1": 0.07586206896551724,
+ "tau_2": 0.12413793103448276,
+ "m1": 1.32,
+ "m2": 13.1
+ }
+ },
+ {
+ "type": "LinearElastanceChamber",
+ "name": "LV",
+ "values": {
+ "Emax": 1e8,
+ "Epass": 1.2605372e7,
+ "Vrest": 1.9763060505362944e-6
+ },
+ "activation_function": {
+ "type": "two_hill",
+ "t_shift": 0.207,
+ "tau_1": 0.18551724137931036,
+ "tau_2": 0.31172413793103454,
+ "m1": 1.32,
+ "m2": 27.4
+ }
+ },
+ {
+ "type": "LinearElastanceChamber",
+ "name": "RA",
+ "values": {
+ "Emax": 7.999343e6,
+ "Epass": 4.932281e6,
+ "Vrest": 4.0e-6
+ },
+ "activation_function": {
+ "type": "two_hill",
+ "t_shift": 0.025,
+ "tau_1": 0.07586206896551724,
+ "tau_2": 0.12413793103448276,
+ "m1": 1.32,
+ "m2": 13.1
+ }
+ },
+ {
+ "type": "LinearElastanceChamber",
+ "name": "RV",
+ "values": {
+ "Emax": 6.5719659e7,
+ "Epass": 5.406837e6,
+ "Vrest": 54.31634509432495e-6
+ },
+ "activation_function": {
+ "type": "two_hill",
+ "t_shift": 0.207,
+ "tau_1": 0.18551724137931036,
+ "tau_2": 0.31172413793103454,
+ "m1": 1.32,
+ "m2": 27.4
+ }
+ }
+ ],
+ "valves": [
+ {
+ "type": "PiecewiseValve",
+ "name": "MV",
+ "params": {
+ "Rmin": 999915.0,
+ "Rmax": 6.666e9,
+ "upstream_block": "LA",
+ "downstream_block": "LV"
+ }
+ },
+ {
+ "type": "PiecewiseValve",
+ "name": "AV",
+ "params": {
+ "Rmin": 4732931.0,
+ "Rmax": 6.666e9,
+ "upstream_block": "LV",
+ "downstream_block": "L_AV"
+ }
+ },
+ {
+ "type": "PiecewiseValve",
+ "name": "TV",
+ "params": {
+ "Rmin": 999915.0,
+ "Rmax": 6.666e9,
+ "upstream_block": "RA",
+ "downstream_block": "RV"
+ }
+ },
+ {
+ "type": "PiecewiseValve",
+ "name": "PV",
+ "params": {
+ "Rmin": 2453124.8,
+ "Rmax": 6.666e9,
+ "upstream_block": "RV",
+ "downstream_block": "L_PV"
+ }
+ }
+ ],
+ "initial_condition": {
+ "pressure:R_UPSTREAM_SYS:J1": 11185.7158,
+ "pressure:J1:AR_SYS": 11185.7158,
+
+ "pressure:AR_SYS:J2": 4732.9316,
+ "pressure:J2:VEN_SYS": 4732.9316,
+
+ "pressure:R_UPSTREAM_PUL:J5": 1986.4978,
+ "pressure:J5:AR_PUL": 1986.4978,
+
+ "pressure:AR_PUL:J6": 1810.51276,
+ "pressure:J6:VEN_PUL": 1810.51276
+ }
+}
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/sensitivity.yaml b/applications/svZeroDTuner/examples/closed_loop_Zingaro/sensitivity.yaml
new file mode 100644
index 000000000..bb0d0e1f8
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/sensitivity.yaml
@@ -0,0 +1,44 @@
+# Sensitivity Analysis Configuration for Regazzoni closed-loop model
+#
+# This file specifies parameters to vary and quantities of interest to analyze
+# using correlation-based sensitivity screening.
+
+model:
+ config_file: "model.json"
+
+# Parameters to vary during sensitivity analysis
+# Specify the bounds over which to vary each parameter
+parameters:
+ - name: "LV.Emax"
+ bounds: [1e8, 5e8] # Left ventricle maximum elastance
+
+ - name: "AR_SYS.R_poiseuille"
+ bounds: [5e7, 5e8] # Systemic arterial resistance
+
+ - name: "AR_SYS.C"
+ bounds: [1e-9, 1e-8] # Systemic arterial compliance
+
+ - name: "L_AV.L"
+ bounds: [1e5, 1e7] # Aortic valve inertance
+
+ - name: "R_UPSTREAM_SYS.R_poiseuille"
+ bounds: [1e6, 1e8] # Systemic arterial resistance
+
+# Quantities of Interest (QoI) to analyze
+# These are the outputs you want to understand the sensitivity of
+quantities_of_interest:
+ - name: Systemic arterial max pressure
+ expression: np.max(pressure:J1:AR_SYS)
+ - name: Systemic arterial min pressure
+ expression: np.min(pressure:J1:AR_SYS)
+
+# Sensitivity analysis settings
+sensitivity:
+ n_samples: 256 # Number of quasi-random samples for screening
+ # Recommended: 256-512 for quick screening, 1024-2048 for higher accuracy
+ # Higher = more accurate but slower
+
+# Output settings
+output:
+ directory: "sensitivity_results"
+ save_plots: true # Save visualization plots
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/convert_volume_data.py b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/convert_volume_data.py
new file mode 100644
index 000000000..c8314a453
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/convert_volume_data.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python3
+"""
+Convert chamber volume CSVs (with RR% and mL) into time–series target files
+(time in seconds, volume in m^3) suitable for svZeroDTuner time_series targets.
+
+INPUT FILES:
+- volume.csv with columns: RR%, V_LA, V_LV, V_RA, V_RV (volumes in mL)
+- la_volume_manual.csv with columns: RR%, V_LA (volume in mL)
+
+OUTPUT FILES (in the same directory as this script):
+- target_V_LA.csv
+- target_V_LV.csv
+- target_V_RA.csv
+- target_V_RV.csv
+- target_V_LA_manual.csv
+
+Each output file has columns: time (seconds), value (m^3)
+"""
+
+import os
+import pandas as pd
+from typing import Dict
+
+# ============================================================
+# CONFIG: EDIT THESE VALUES AS NEEDED
+# ============================================================
+
+# Cardiac period (duration of one RR interval) in seconds
+CARDIAC_PERIOD_SEC = 0.689
+
+# Values from ECG for a particular patient
+# PR interval in seconds
+PR_INTERVAL_SEC = 0.182
+
+# QRS duration in seconds
+QRS_DURATION_SEC = 0.088
+
+# Input filenames (relative to this script's directory)
+VOLUME_FILE = "volume.csv"
+LA_MANUAL_FILE = "la_volume_manual.csv"
+
+# Mapping from input column name -> output CSV filename
+VOLUME_COLUMNS_MODEL: Dict[str, str] = {
+ "V_LV": "target_V_LV.csv",
+ "V_RA": "target_V_RA.csv",
+ "V_RV": "target_V_RV.csv",
+}
+
+VOLUME_COLUMNS_MANUAL: Dict[str, str] = {
+ "V_LA": "target_V_LA.csv",
+}
+
+# ============================================================
+# HELPER FUNCTIONS
+# ============================================================
+
+
+def rr_percent_to_time(rr_percent_series, cardiac_period_sec: float):
+ """
+ Convert RR% (0–100 over one cardiac cycle) to time in seconds.
+ RR%=0 corresponds to the R-wave. Assuming the start of the P-wave corresponds
+ to t=0, then RR%=0 corresponds to the PR interval + the QRS interval/2.
+ time = (rr/100) * cardiac_period + PR interval + QRS interval/2, wrapped into [0, cardiac_period).
+ """
+ rr = rr_percent_series.astype(float) / 100.0
+ time = rr * cardiac_period_sec + PR_INTERVAL_SEC + QRS_DURATION_SEC / 2
+ return time % cardiac_period_sec
+
+
+def convert_volume_file(
+ csv_path: str,
+ column_to_output: Dict[str, str],
+):
+ """
+ Convert a volume CSV (RR% and mL) into time–series target CSVs (time [s], value [m^3]).
+ """
+ if not os.path.exists(csv_path):
+ raise FileNotFoundError(f"Input file not found: {csv_path}")
+
+ df = pd.read_csv(csv_path)
+ df.columns = df.columns.str.strip() # handle " V_LA" -> "V_LA"
+
+ if "RR%" not in df.columns:
+ raise ValueError(f"'RR%' column not found in {csv_path}. Columns: {list(df.columns)}")
+
+ times = rr_percent_to_time(df["RR%"], CARDIAC_PERIOD_SEC)
+ directory = os.path.dirname(os.path.abspath(csv_path))
+
+ for col, out_name in column_to_output.items():
+ if col not in df.columns:
+ raise ValueError(f"Column '{col}' not found in {csv_path}. Columns: {list(df.columns)}")
+
+ values_m3 = df[col].astype(float) * 1e-6 # mL -> m^3
+
+ out_df = (
+ pd.DataFrame({"time": times, "value": values_m3})
+ .sort_values("time")
+ )
+
+ out_path = os.path.join(directory, out_name)
+ out_df.to_csv(out_path, index=False)
+ print(f"Saved {out_path} ({len(out_df)} points)")
+
+
+def main():
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+
+ convert_volume_file(
+ csv_path=os.path.join(script_dir, VOLUME_FILE),
+ column_to_output=VOLUME_COLUMNS_MODEL
+ )
+
+ convert_volume_file(
+ csv_path=os.path.join(script_dir, LA_MANUAL_FILE),
+ column_to_output=VOLUME_COLUMNS_MANUAL
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/la_volume_manual.csv b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/la_volume_manual.csv
new file mode 100644
index 000000000..f0cadbb62
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/la_volume_manual.csv
@@ -0,0 +1,11 @@
+RR%, V_LA
+0.0, 38.2
+10.0, 47.45
+20.0, 54.44
+30.0, 62.1
+40.0, 68.89
+50.0, 72.69
+60.0, 63.28
+70.0, 60.03
+80.0, 58.76
+90.0, 43.86
\ No newline at end of file
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_LA.csv b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_LA.csv
new file mode 100644
index 000000000..9fd5ff425
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_LA.csv
@@ -0,0 +1,11 @@
+time,value
+0.019299999999999984,6.003e-05
+0.08820000000000017,5.875999999999999e-05
+0.15710000000000013,4.386e-05
+0.22599999999999998,3.82e-05
+0.2949,4.745e-05
+0.36379999999999996,5.4439999999999994e-05
+0.4326999999999999,6.21e-05
+0.5016,6.889e-05
+0.5705,7.269e-05
+0.6394,6.328e-05
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_LV.csv b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_LV.csv
new file mode 100644
index 000000000..2c6d032d1
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_LV.csv
@@ -0,0 +1,11 @@
+time,value
+0.019299999999999984,8.209206847820203e-05
+0.08820000000000017,9.181858533873112e-05
+0.15710000000000013,0.00011518503794616718
+0.22599999999999998,0.00011674118189347626
+0.2949,0.0001002523794307163
+0.36379999999999996,6.339886926561263e-05
+0.4326999999999999,4.0656364812357317e-05
+0.5016,3.8636694699938e-05
+0.5705,4.310993543544956e-05
+0.6394,6.767030310853679e-05
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_RA.csv b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_RA.csv
new file mode 100644
index 000000000..aa0e176f2
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_RA.csv
@@ -0,0 +1,11 @@
+time,value
+0.019299999999999984,9.369241418549201e-05
+0.08820000000000017,8.412201482585564e-05
+0.15710000000000013,5.6026368628527275e-05
+0.22599999999999998,6.109746593369446e-05
+0.2949,7.060273191622785e-05
+0.36379999999999996,8.362636203121454e-05
+0.4326999999999999,9.333610226068339e-05
+0.5016,0.00010107328567635535
+0.5705,0.00010789215565001145
+0.6394,0.00010176805487702007
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_RV.csv b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_RV.csv
new file mode 100644
index 000000000..0baa0e1fd
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/target_V_RV.csv
@@ -0,0 +1,11 @@
+time,value
+0.019299999999999984,0.00011716075351352032
+0.08820000000000017,0.000134236112579873
+0.15710000000000013,0.00016982846499561386
+0.22599999999999998,0.00017174178346179283
+0.2949,0.0001542893810374056
+0.36379999999999996,0.00011597469224428158
+0.4326999999999999,9.100090852473403e-05
+0.5016,8.201940604537174e-05
+0.5705,8.374565652485933e-05
+0.6394,0.00010057232797790399
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/volume.csv b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/volume.csv
new file mode 100644
index 000000000..813cae3b8
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/targets/P003_chamber_volumes/volume.csv
@@ -0,0 +1,11 @@
+RR%, V_LA, V_LV, V_RA, V_RV
+0.000000000000000000e+00, 3.353854734814017036e+01, 1.167411818934762664e+02, 6.109746593369445833e+01, 1.717417834617928349e+02
+1.000000000000000000e+01, 4.168803383024022935e+01, 1.002523794307163030e+02, 7.060273191622785305e+01, 1.542893810374056045e+02
+2.000000000000000000e+01, 4.975275660511201181e+01, 6.339886926561263181e+01, 8.362636203121454059e+01, 1.159746922442815844e+02
+3.000000000000000000e+01, 5.542701117063629113e+01, 4.065636481235731736e+01, 9.333610226068341831e+01, 9.100090852473402947e+01
+4.000000000000000000e+01, 6.273942063591531593e+01, 3.863669469993800476e+01, 1.010732856763553542e+02, 8.201940604537175261e+01
+5.000000000000000000e+01, 6.634282300620391482e+01, 4.310993543544957163e+01, 1.078921556500114605e+02, 8.374565652485932787e+01
+6.000000000000000000e+01, 5.618893389388352944e+01, 6.767030310853678543e+01, 1.017680548770200915e+02, 1.005723279779040098e+02
+7.000000000000000000e+01, 5.237233218741626217e+01, 8.209206847820203734e+01, 9.369241418549202649e+01, 1.171607535135203193e+02
+8.000000000000000000e+01, 4.883735839209346352e+01, 9.181858533873112549e+01, 8.412201482585564349e+01, 1.342361125798730086e+02
+9.000000000000000000e+01, 3.161169238548909988e+01, 1.151850379461671849e+02, 5.602636862852727972e+01, 1.698284649956138708e+02
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/tuning.yaml b/applications/svZeroDTuner/examples/closed_loop_Zingaro/tuning.yaml
new file mode 100644
index 000000000..2fd99deb0
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/tuning.yaml
@@ -0,0 +1,231 @@
+# Tuning configuration for Regazzoni closed-loop model
+#
+# INSTRUCTIONS:
+# 1. Run baseline:
+# python -c 'from main import run_baseline; run_baseline("model.json")'
+# 2. Inspect baseline_results.csv and baseline_summary.csv
+# 3. Update the 'targets' section below with your desired targets
+# 4. Run optimization:
+# svzerodtuner optimize tuning.yaml
+
+model:
+ config_file: "model.json"
+
+# Parameters to optimize
+# Add or remove parameters as needed
+# Optional scaling: "log" makes the optimizer work in log(parameter) (recommended for positive, wide-ranging params)
+parameters:
+ - name: "LV.Epass"
+ bounds: [1e6, 1e8]
+ scaling: log
+
+ - name: "LV.Emax"
+ bounds: [1e7, 1e9]
+ scaling: log
+
+ - name: "RV.Epass"
+ bounds: [1e6, 1e8]
+ scaling: log
+
+ - name: "RV.Emax"
+ bounds: [1e7, 1e9]
+ scaling: log
+
+ - name: "LA.Emax"
+ bounds: [1e6, 1e9]
+ scaling: log
+
+ - name: "LA.Epass"
+ bounds: [1e6, 1e8]
+ scaling: log
+
+ - name: "RA.Epass"
+ bounds: [1e6, 1e8]
+ scaling: log
+
+ - name: "RA.Emax"
+ bounds: [1e6, 1e9]
+ scaling: log
+
+ - name: "LV.Vrest"
+ bounds: [-1e-4, 1e-4]
+ scaling: max
+
+ - name: "RV.Vrest"
+ bounds: [-1e-4, 1e-4]
+ scaling: max
+
+ - name: "LA.Vrest"
+ bounds: [-1e-4, 1e-4]
+ scaling: max
+
+ - name: "RA.Vrest"
+ bounds: [-1e-4, 1e-4]
+ scaling: max
+
+ - name: "R_UPSTREAM_SYS.R_poiseuille"
+ bounds: [1e5, 1e7]
+ scaling: log
+
+ - name: "AR_SYS.R_poiseuille"
+ bounds: [1e7, 1e9]
+ scaling: log
+
+ - name: "AR_SYS.C"
+ bounds: [1e-9, 1e-7]
+ scaling: log
+
+ - name: "VEN_SYS.R_poiseuille"
+ bounds: [1e5, 1e8]
+ scaling: log
+
+ - name: "R_UPSTREAM_PUL.R_poiseuille"
+ bounds: [1e5, 1e7]
+ scaling: log
+
+ - name: "AR_PUL.R_poiseuille"
+ bounds: [1e5, 1e7]
+ scaling: log
+
+ - name: "AR_PUL.C"
+ bounds: [1e-9, 1e-7]
+ scaling: log
+
+ - name: "VEN_PUL.R_poiseuille"
+ bounds: [1e5, 1e8]
+ scaling: log
+
+ - name: "initial_condition.pressure:J2:VEN_SYS"
+ bounds: [1e2, 1e4]
+ scaling: log
+
+
+# Target outputs
+# Specify which outputs you want to match and their target values
+# Provide expressions with output names from baseline_summary.csv
+targets:
+ - name: LV volume
+ type: time_series
+ expression: Vc:LV
+ target_file: "targets/P003_chamber_volumes/target_V_LV.csv"
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: RV volume
+ type: time_series
+ expression: Vc:RV
+ target_file: "targets/P003_chamber_volumes/target_V_RV.csv"
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: LA volume
+ type: time_series
+ expression: Vc:LA
+ target_file: "targets/P003_chamber_volumes/target_V_LA.csv"
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: RA volume
+ type: time_series
+ expression: Vc:RA
+ target_file: "targets/P003_chamber_volumes/target_V_RA.csv"
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: Aortic max pressure
+ type: scalar
+ expression: np.max(pressure:J0:R_UPSTREAM_SYS)
+ target_value: 13065 # Pa (98.0 mmHg)
+ relative_bounds: 5%
+ weight: 1.0
+ - name: Aortic min pressure
+ type: scalar
+ expression: np.min(pressure:J0:R_UPSTREAM_SYS)
+ target_value: 7066 # Pa (53.0 mmHg)
+ relative_bounds: 5%
+ weight: 1.0
+
+ - name: Aortic valve peak flow rate
+ type: scalar
+ expression: np.max(flow:LV:AV)
+ target_value: 0.000427 # m^3/s
+ relative_bounds: 20%
+ weight: 1.0
+
+ - name: Pulmonary valve peak flow rate
+ type: scalar
+ expression: np.max(flow:RV:PV)
+ target_value: 0.000427 # m^3/s
+ relative_bounds: 20%
+ weight: 1.0
+
+ - name: Pulmonary artery max pressure
+ type: scalar
+ expression: np.max(pressure:J4:R_UPSTREAM_PUL)
+ target_value: 2666 # Pa (20.0 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+ - name: Pulmonary artery min pressure
+ type: scalar
+ expression: np.min(pressure:J4:R_UPSTREAM_PUL)
+ target_value: 1533 # Pa (11.5 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+
+ - name: Systemic venous mean pressure
+ type: scalar
+ expression: np.mean(pressure:J2:VEN_SYS)
+ target_value: 800 # Pa (6.0 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+
+ - name: Right atrial mean pressure
+ type: scalar
+ expression: np.mean(pressure:J3:RA)
+ target_value: 533 # Pa (4.0 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+
+ - name: Left atrial mean pressure
+ type: scalar
+ expression: np.mean(pressure:J7:LA)
+ target_value: 933 # Pa (7.0 mmHg)
+ relative_bounds: 20%
+ weight: 1.0
+
+objective:
+ # L1 or L2 norm (of relative errors)
+ norm: L2
+
+# Optimization - use scipy's exact parameter names
+optimization:
+ terminate_at_zero: true # Stop when objective hits zero (default: true). Set false to run full maxiter.
+ # Differential evolution specific options
+ # "To improve your chances of finding a global minimum use higher popsize values,
+ # with higher mutation and (dithering), but lower recombination values. This
+ # has the effect of widening the search radius, but slowing convergence.""
+ # algorithm: "differential_evolution"
+ # maxiter: 500
+ # workers: -1
+ # updating: 'deferred'
+ # tol: 1e-2
+ # popsize: 30
+ # init: 'sobol'
+ # strategy: 'best1bin'
+ # mutation: [0.5, 1.5]
+ # recombination: 0.5
+ # polish: true
+
+ # Nelder-Mead specific options
+ algorithm: "Nelder-Mead"
+ maxiter: 10000
+ xatol: 1e-6
+ fatol: 1e-6
+ adaptive: true
+
+# Output
+output:
+ directory: "optimization_results"
+ save_history: true
+ save_plots: true
+ save_final_config: true
diff --git a/applications/svZeroDTuner/examples/closed_loop_Zingaro/tuning_job.sh b/applications/svZeroDTuner/examples/closed_loop_Zingaro/tuning_job.sh
new file mode 100644
index 000000000..0a69d2962
--- /dev/null
+++ b/applications/svZeroDTuner/examples/closed_loop_Zingaro/tuning_job.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+# Name of your job
+#SBATCH --job-name=tuning_job
+
+# Name of partition
+#SBATCH --partition=amarsden
+
+# Specify the name of the output file. The %j specifies the job ID
+#SBATCH --output=tuning_job.o%j
+
+# Specify a name of the error file. The %j specifies the job ID
+#SBATCH --error=tuning_job.e%j
+
+# The walltime you require for your simulation
+#SBATCH --time=03:00:00
+
+# Job priority. Leave as normal for now.
+#SBATCH --qos=normal
+
+# Number of nodes you are requesting for your job. You can have 24 processors per node, so plan accordingly
+#SBATCH --nodes=1
+
+# Amount of memory you require per node. The default is 4000 MB (or 4 GB) per node
+#SBATCH --mem=8000
+
+# Number of processors per node (for parallel differential_evolution)
+#SBATCH --ntasks-per-node=24
+
+# Send an email to this address when your job starts and finishes
+#SBATCH --mail-user=abrown97@stanford.edu
+#SBATCH --mail-type=begin
+#SBATCH --mail-type=fail
+#SBATCH --mail-type=end
+
+# Load Modules
+module purge
+module load python/3.14.2
+module load py-scipy/1.16.3_py314
+module load py-pandas/2.3.3_py314
+module load viz
+module load py-matplotlib/3.10.8_py314
+module load cmake
+
+# Relative paths (submit from this directory)
+VENV_DIR="../../venv"
+REQUIREMENTS="../../requirements.txt"
+SVZEROD_ROOT="../../../.." # svZeroDSolver repo root (for pip install -e pysvzerod)
+
+# Check if virtual environment exists and has Python; if not, create and install
+if [[ ! -f "$VENV_DIR/bin/python" ]]; then
+ echo "Virtual environment not found at $VENV_DIR. Creating and installing dependencies..."
+ python3 -m venv "$VENV_DIR"
+ "$VENV_DIR/bin/pip" install --upgrade pip
+ # Install pip packages from requirements
+ "$VENV_DIR/bin/pip" install -r "$REQUIREMENTS"
+ # Install svZeroDSolver from source
+ "$VENV_DIR/bin/pip" install -e "$SVZEROD_ROOT"
+ echo "Virtual environment ready."
+else
+ echo "Using existing virtual environment at $VENV_DIR"
+fi
+
+# Run main.py with the venv's Python
+source "$VENV_DIR/bin/activate"
+python -u main.py
+
+# Submit job with:
+# sbatch ./tuning_job.sh
+# Check status with:
+# squeue -u $USER
diff --git a/applications/svZeroDTuner/examples/right_heart_pa/README.md b/applications/svZeroDTuner/examples/right_heart_pa/README.md
new file mode 100644
index 000000000..8fc8b76af
--- /dev/null
+++ b/applications/svZeroDTuner/examples/right_heart_pa/README.md
@@ -0,0 +1,77 @@
+# right_heart_pa example
+
+For full svZeroDTuner usage and configuration guidance, see the svZeroDTuner guide on the docs site: .
+
+This example couples a prescribed venous inflow waveform to a right atrium and right ventricle model using `LinearElastanceChamber` blocks with `piecewise_cosine` activation and `PiecewiseValve` blocks. The right ventricle ejects into a reduced-order pulmonary artery model with:
+
+- MPA: constant resistance
+- RPA/LPA: resistance + stenosis coefficient + inductance
+- RPA/LPA outlets: RCR boundary conditions
+- Upstream venous return: `VEN_SYS` vessel (CRL) driven by `VEN_IN_FLOW`
+
+All inputs and targets in this example are in cgs units:
+
+- Pressure: dyn/cm^2
+- Flow: cm^3/s
+- Resistance: dyn\*s/cm^5
+- Compliance: cm^5/dyn
+- Inductance: dyn\*s^2/cm^5
+
+## Current Model Notes
+
+- The inflow BC `VEN_IN_FLOW` is populated from the Regazzoni closed-loop baseline `flow:VEN_SYS:J1` waveform (converted to cgs).
+- IVC/SVC have been replaced by a single venous inflow that feeds `VEN_SYS` and `J_VEN -> RA`.
+- Cardiac period is set to **0.689 s** to match the Regazzoni waveform.
+- Pulmonary compliance has been increased relative to the initial MPA/RPA/LPA settings.
+- Solver settings are more conservative (higher nonlinear iterations, smaller time step).
+
+Two tuning configs are provided:
+
+- `tuning_differential_evolution.yaml`
+- `tuning_nelder_mead.yaml`
+
+In practice, Nelder-Mead has produced more stable convergence for this model than differential evolution.
+
+## Quick start
+
+Install the package once from the repository root so the `svzerodtuner` command is available:
+
+```bash
+pip install -e .
+```
+
+Then run the example from this directory:
+
+1. Baseline inspection:
+
+ ```bash
+ python -c 'from main import run_baseline; run_baseline("model.json")'
+ ```
+
+ This creates `baseline_results/` with the full time series, summary CSV, and plots for target selection.
+
+2. Tuning:
+ - Update targets in the tuning YAML if needed.
+ - Run one of the tuning configurations with the CLI:
+
+ ```bash
+ svzerodtuner optimize tuning_nelder_mead.yaml
+ ```
+
+ or
+
+ ```bash
+ svzerodtuner optimize tuning_differential_evolution.yaml
+ ```
+
+ `svzerodtuner run ` is also supported as an alias for `optimize`.
+
+## Notes
+
+Target names follow the solver output naming scheme:
+
+- `pressure::`
+- `flow::`
+
+Cardiac output is defined as mean flow through `PV -> MPA`.
+Flow split is enforced by targeting the mean flows through `RPA -> RCR_RPA` and `LPA -> RCR_LPA`.
diff --git a/applications/svZeroDTuner/examples/right_heart_pa/main.py b/applications/svZeroDTuner/examples/right_heart_pa/main.py
new file mode 100644
index 000000000..6dc34e929
--- /dev/null
+++ b/applications/svZeroDTuner/examples/right_heart_pa/main.py
@@ -0,0 +1,203 @@
+"""
+sv0D Tuning Framework - right_heart_pa example.
+
+This script provides three modes:
+1. BASELINE MODE: Run the initial model and save all results for inspection
+2. SENSITIVITY MODE: Run correlation-based sensitivity screening
+3. OPTIMIZE MODE: Run optimization using targets specified in tuning yaml
+
+Recommended command-line workflow:
+ Baseline: python -c 'from main import run_baseline; run_baseline("model.json")'
+ Optimize: svzerodtuner optimize tuning_differential_evolution.yaml
+"""
+
+import os
+import sys
+import numpy as np
+import pandas as pd
+import pysvzerod
+
+# Add src to path
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
+
+from svzerodtuner.sv0d_tuner import SV0DTuner
+from svzerodtuner.visualization import plot_simulation_results
+from svzerodtuner.sensitivity import SensitivityAnalyzer
+
+
+def run_baseline(config_file):
+ """
+ Run the baseline simulation and save all results for user inspection.
+ """
+ print("="*70)
+ print("BASELINE SIMULATION")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ return
+
+ print(f"Running simulation with: {config_file}")
+ print()
+
+ try:
+ solver = pysvzerod.Solver(config_file)
+ solver.run()
+ print("\u2713 Simulation completed successfully\n")
+ except Exception as e:
+ print(f"ERROR running simulation: {e}")
+ return
+
+ times = solver.get_times()
+ full_results = solver.get_full_result()
+ result_names = full_results['name'].unique()
+
+ print(f"Found {len(result_names)} output variables")
+ print()
+
+ results_data = {'time': times}
+ summary_stats = []
+
+ for name in result_names:
+ try:
+ values = solver.get_single_result(name)
+ results_data[name] = values
+ stats = {
+ 'output_name': name,
+ 'min': np.min(values),
+ 'max': np.max(values),
+ 'mean': np.mean(values),
+ 'std': np.std(values)
+ }
+ summary_stats.append(stats)
+ except Exception as e:
+ print(f"Warning: Could not extract {name}: {e}")
+
+ output_dir = 'baseline_results'
+ os.makedirs(output_dir, exist_ok=True)
+
+ results_df = pd.DataFrame(results_data)
+ baseline_file = os.path.join(output_dir, 'baseline_results.csv')
+ results_df.to_csv(baseline_file, index=False)
+ print(f"\u2713 Saved full time series to: {baseline_file}")
+
+ summary_df = pd.DataFrame(summary_stats)
+ summary_file = os.path.join(output_dir, 'baseline_summary.csv')
+ summary_df.to_csv(summary_file, index=False)
+ print(f"\u2713 Saved summary statistics to: {summary_file}")
+ print()
+
+ print("="*70)
+ print("BASELINE RESULTS SUMMARY")
+ print("="*70)
+ print()
+ print(f"{'Output Variable':<40} {'Min':>12} {'Max':>12} {'Mean':>12}")
+ print("-"*70)
+
+ for stats in summary_stats:
+ print(f"{stats['output_name']:<40} {stats['min']:>12.4e} "
+ f"{stats['max']:>12.4e} {stats['mean']:>12.4e}")
+
+ print()
+ print("Generating plots...")
+ plot_simulation_results(results_df, summary_df, output_dir, title_prefix="Baseline")
+
+ print()
+ print("="*70)
+ print("NEXT STEPS:")
+ print("="*70)
+ print(f"1. Inspect {output_dir}/baseline_results.csv and baseline_summary.csv")
+ print(f"2. View plots in {output_dir}/ to visualize the outputs")
+ print("3. Choose which outputs you want to target")
+ print("4. Update tuning yaml with your desired targets")
+ print("5. Run svzerodtuner optimize ")
+ print("="*70)
+ print()
+
+
+def run_optimization(config_file):
+ """
+ Run optimization using targets specified in config_file.
+ """
+ print("="*70)
+ print("OPTIMIZATION")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ print(f"Please create {config_file} with your optimization settings.")
+ return
+
+ print(f"Using configuration: {config_file}")
+ print()
+
+ try:
+ tuner = SV0DTuner(config_file)
+ except Exception as e:
+ print(f"ERROR loading configuration: {e}")
+ return
+
+ print("Starting optimization...")
+ print("="*70)
+ print()
+
+ try:
+ tuner.optimize()
+ except Exception as e:
+ print(f"ERROR during optimization: {e}")
+ return
+
+
+def run_sensitivity(config_file):
+ """
+ Run correlation-based sensitivity screening.
+ """
+ print("="*70)
+ print("SENSITIVITY ANALYSIS")
+ print("="*70)
+ print()
+
+ if not os.path.exists(config_file):
+ print(f"ERROR: Config file not found: {config_file}")
+ print(f"Please create {config_file} with your sensitivity analysis settings.")
+ return
+
+ print(f"Using configuration: {config_file}")
+ print()
+
+ try:
+ analyzer = SensitivityAnalyzer(config_file)
+ except Exception as e:
+ print(f"ERROR loading configuration: {e}")
+ return
+
+ print("Starting sensitivity analysis...")
+ print("="*70)
+ print()
+
+ try:
+ analyzer.run_analysis()
+ except Exception as e:
+ print(f"ERROR during sensitivity analysis: {e}")
+ return
+
+
+def main():
+ # Choose one of the following:
+ # baseline_config = "model.json"
+ # optimize_config = "tuning_differential_evolution.yaml"
+ # optimize_config = "tuning_nelder_mead.yaml"
+
+ # Uncomment the mode you want to run:
+
+ # run_baseline("model.json")
+ run_optimization("tuning_differential_evolution.yaml")
+ # run_optimization("tuning_nelder_mead.yaml")
+ # run_sensitivity("sensitivity.yaml")
+ pass
+
+
+if __name__ == "__main__":
+ main()
diff --git a/applications/svZeroDTuner/examples/right_heart_pa/model.json b/applications/svZeroDTuner/examples/right_heart_pa/model.json
new file mode 100644
index 000000000..933e8874f
--- /dev/null
+++ b/applications/svZeroDTuner/examples/right_heart_pa/model.json
@@ -0,0 +1,594 @@
+{
+ "_comment": "Right heart + pulmonary arteries example. Units (cgs): C in cm^5/dyn, R in dyn*s/cm^5, L in dyn*s^2/cm^5, pressure in dyn/cm^2, flow in cm^3/s.",
+ "_diagram": "IVC,SVC -> J_VEN -> RA -> TV -> RV -> PV -> MPA -> J_PA -> RPA/LPA -> RCRs",
+ "simulation_parameters": {
+ "number_of_cardiac_cycles": 20,
+ "number_of_time_pts_per_cardiac_cycle": 689,
+ "output_variable_based": true,
+ "output_all_cycles": false,
+ "steady_initial": true,
+ "maximum_nonlinear_iterations": 200,
+ "absolute_tolerance": 1e-7,
+ "cardiac_period": 0.6890000000000001
+ },
+ "boundary_conditions": [
+ {
+ "bc_name": "VEN_IN_FLOW",
+ "bc_type": "FLOW",
+ "bc_values": {
+ "Q": [
+ 109.88979324, 109.8702695498, 109.8504118685, 109.8302253006,
+ 109.8097148715, 109.7888855282, 109.7677421408, 109.7462895037,
+ 109.7245323368, 109.70247528659999, 109.6801229275, 109.6574797626,
+ 109.6345502252, 109.6113386798, 109.58784942300001, 109.5640866848,
+ 109.5400546294, 109.5157573564, 109.49119890189999, 109.4663832393,
+ 109.44131428029999, 109.415995876, 109.3904318179, 109.3646258385,
+ 109.33858161260001, 109.312302758, 109.2832743133, 109.2413093779,
+ 109.1750366949, 109.0748670727, 108.93250123, 108.7408748505,
+ 108.49403802389999, 108.18706419559999, 107.8159678044,
+ 107.3776325745, 106.8697482216, 106.290754353, 105.6397903273,
+ 104.9166500055, 104.1217404448, 103.2560437031, 102.3210810273,
+ 101.3188787938, 100.2519356664, 99.12319051535627, 97.9359907270306,
+ 96.69406060312896, 95.40146962070814, 94.0626003880325,
+ 92.68211619130645, 91.26492808269732, 89.81616151054851,
+ 88.34112253829863, 86.8452637392324, 85.33414988970277,
+ 83.81342361374413, 82.2887711569729, 80.7658884873367,
+ 79.250447934599, 77.74806558950979, 76.26426968756559,
+ 74.80447020125986, 73.37392985902865, 71.97773679905133,
+ 70.62077905199172, 69.30772102913973, 68.04298217167204,
+ 66.83071789340165, 65.67480292398818, 64.5788171326473,
+ 63.54603388449278, 62.579410953331, 61.681583986508805,
+ 60.85486248980174, 60.101228273804864, 59.42233627825152,
+ 58.81951766751906, 58.29378506961507, 57.84583981241279,
+ 57.4760809950271, 57.1846162191376, 56.97127379483636,
+ 56.83561622822803, 56.7769547935109, 56.79436499052079,
+ 56.886702689600924, 57.05160603549191, 57.28124607245339,
+ 57.56642757732724, 57.89914879353403, 58.27236844303033,
+ 58.67990984365132, 59.116355117102266, 59.576956009961066,
+ 60.05755444906423, 60.554512631519906, 61.06465133698855,
+ 61.585195525456285, 62.11372635202592, 62.64813884135619,
+ 63.186604552461304, 63.727538644134334, 64.26957082101436,
+ 64.81151970188044, 65.35237020601004, 65.89125360126961,
+ 66.42742989976996, 66.96027232408677, 67.48925359981409,
+ 68.01393385910517, 68.533949965319, 69.04900609134093,
+ 69.55886540393712, 70.06334272394915, 70.56229804751932,
+ 71.0556308270964, 71.54327492292815, 72.02519414628827,
+ 72.50137832497796, 72.9718398298358, 73.43661050821466,
+ 73.89573897675311, 74.34928823138442, 74.79733353747956,
+ 75.2399605673854, 75.67726375646998, 76.10934485218239,
+ 76.53631163362776, 76.95827678179805, 77.37535688292883,
+ 77.78767154950458, 78.19534264524597, 78.59849360201189,
+ 78.99724881795595, 79.39173312752125, 79.78207133495441,
+ 80.16838780398642, 80.55080609718158, 80.92944865921089,
+ 81.3044365389691, 81.67588914604218, 82.04392403755186,
+ 82.40865673185885, 82.77020054601157, 83.12866645418522,
+ 83.48416296466951, 83.83679601324246, 84.18666887101566,
+ 84.53388206505198, 84.8785333102501, 85.22071745116212,
+ 85.56052641255924, 85.89804915769507, 86.2333716533356,
+ 86.5665768407282, 86.89774461177514, 87.22707800153105,
+ 87.55502250789522, 87.88213653408789, 88.20893041105228,
+ 88.53583672159708, 88.86322293368634, 89.19139896424025,
+ 89.52062375385204, 89.85111124006353, 90.18303555958981,
+ 90.5165356553044, 90.85171933475428, 91.18866685140331,
+ 91.52743406375431, 91.86805522279369, 92.21054543179243,
+ 92.5549028173719, 92.90111044611004, 93.24913801690026,
+ 93.59894335569255, 93.95047373608985, 94.30366704648675,
+ 94.65845282198497, 95.01475315715848, 95.37248351383447,
+ 95.7315534363742, 96.09186718546168, 96.45332430009888,
+ 96.8158200963566, 97.17924611041884, 97.54349049256166,
+ 97.90843835792072, 98.27397209920984, 98.63997166593752,
+ 99.0063148141312, 99.37287733010376, 99.7395332313772, 100.1061549475,
+ 100.4726134832, 100.8387785661, 101.20451878029999, 101.5697016889,
+ 101.9341939449, 102.2978613934, 102.660569166, 103.02218176710001,
+ 103.3825631554, 103.741576818, 104.0990858419, 104.4549529797,
+ 104.8090407123, 105.16121129540001, 105.511326789, 105.8592490981,
+ 106.2048400302, 106.54796135469999, 106.88847485839999,
+ 107.22624240020001, 107.56112596269999, 107.8929877032,
+ 108.2216900025, 108.5470955128, 108.8690672048, 109.18746841320001,
+ 109.50216288259999, 109.81301481140001, 110.1198888962,
+ 110.4226503752, 110.7211650718, 111.01529943690001,
+ 111.30492059219999, 111.5898963724, 111.8700958125, 112.1453906605,
+ 112.41565751339999, 112.68077951500001, 112.9406470869,
+ 113.1951578311, 113.4442159029, 113.6877310865, 113.9256177398,
+ 114.157793719, 114.3841793548, 114.60469652270001, 114.8192678329,
+ 115.0278159487, 115.23026303290001, 115.4265303186, 115.6165377931,
+ 115.8002039835, 115.9774458326, 116.1481786512, 116.3123161354,
+ 116.4697704377, 116.620452282, 116.7642711122, 116.9011352679,
+ 117.0309521794, 117.15362857619999, 117.269070704, 117.3771845463,
+ 117.4778760466, 117.571051329, 117.6566169146, 117.7344799321,
+ 117.804548321, 117.86673102719999, 117.920938189, 117.9670813149,
+ 118.0050734509, 118.03482933859999, 118.05626556429999,
+ 118.0693006983, 118.073856981, 118.0703642223, 118.0597540797,
+ 118.04293791389999, 118.0206981752, 117.9937254696, 117.9626253636,
+ 117.9279294054, 117.8901038501, 117.84955753390001,
+ 117.80664876969999, 117.7616914249, 117.7149602721,
+ 117.66669569909999, 117.6171078558, 117.5663803037, 117.5146732291,
+ 117.4621262707, 117.40886100840001, 117.3549831527, 117.3005844722,
+ 117.2457444887, 117.1905319683, 117.1350062335, 117.07921831659999,
+ 117.0232119741, 116.9670245784, 116.91068790210001,
+ 116.85422881470001, 116.7976699205, 116.7410301463, 116.6843252709,
+ 116.6275683918, 116.57077032880001, 116.5139399725, 116.4570845812,
+ 116.4002100359, 116.3433210563, 116.2864213858, 116.2295139475,
+ 116.1726009779, 116.1156841391, 116.0587646145, 116.0018431896,
+ 115.9449203199, 115.8879961886, 115.831070755, 115.77414379470001,
+ 115.7172149341, 115.6602836789, 115.6033494389, 115.5464115528,
+ 115.48946931260001, 115.43252198619999, 115.37556883549999,
+ 115.318609131, 115.2616421634, 115.2046672519, 115.1476837501,
+ 115.09069105009999, 115.0336885846, 114.9766758279, 114.9196522965,
+ 114.8626175483, 114.8055711819, 114.7485128351, 114.6914421832,
+ 114.6343589374, 114.5772628428, 114.52015367599999, 114.4630312435,
+ 114.4058953796, 114.3487459442, 114.2915828211, 114.234405916,
+ 114.1772151552, 114.120010483, 114.0627918612, 114.0055592668,
+ 113.94831269139999, 113.891052139, 113.8337776258, 113.7764891782,
+ 113.7191868327, 113.66187063410001, 113.6045406353, 113.5471968961,
+ 113.48983948280001, 113.4324684675, 113.37508392710001,
+ 113.3176859434, 113.2602746022, 113.2028499927, 113.1454122078,
+ 113.0879613429, 113.030497496, 112.9730207675, 112.9155312596,
+ 112.8580290763, 112.800514323, 112.7429871067, 112.6854475351,
+ 112.6278957171, 112.5703317624, 112.5127557815, 112.4551678851,
+ 112.3975681847, 112.339956792, 112.2823338192, 112.2246993783,
+ 112.1670535818, 112.1093965421, 112.05172837170001, 111.994049183,
+ 111.9363590884, 111.8786582001, 111.8209466303, 111.7632244907,
+ 111.7054918932, 111.6477489491, 111.5899957698, 111.5322324662,
+ 111.4744591489, 111.41667592830001, 111.3588829143, 111.3010802167,
+ 111.2432679449, 111.1854462077, 111.1276151138, 111.0697747715,
+ 111.0119252887, 110.9540667728, 110.8961993309, 110.83832306970001,
+ 110.7804380955, 110.7225445143, 110.6646424314, 110.60673195209999,
+ 110.548813181, 110.4908862224, 110.4329511801, 110.37500815749999,
+ 110.3170572578, 110.2590985834, 110.2011322366, 110.1431583192,
+ 110.0851769326, 110.02718817760001, 109.96919215470001,
+ 109.9111889642, 109.8531787057, 109.7951614784, 109.7371373813,
+ 109.6791065127, 109.62106897059999, 109.5630248527, 109.5049742562,
+ 109.4469172779, 109.388854014, 109.3307845606, 109.2727090132,
+ 109.2146274669, 109.1565400165, 109.0984467562, 109.04034778,
+ 108.98224318140001, 108.92413305340001, 108.86601748870001,
+ 108.8078965797, 108.7497704182, 108.6916390958, 108.6335027034,
+ 108.57536133170001, 108.5172150712, 108.4590640116, 108.4009082425,
+ 108.34274785289999, 108.2845829317, 108.226413567, 108.16823984700001,
+ 108.1100618591, 108.0518796904, 107.9936934279, 107.9355031578,
+ 107.87730896629999, 107.8191109389, 107.760909161, 107.7027037173,
+ 107.64449469259999, 107.5862821708, 107.5280662358,
+ 107.46984697100001, 107.4116244595, 107.3533987839, 107.2951700264,
+ 107.2369382692, 107.1787035938, 107.1204660814, 107.06222581280001,
+ 107.00398286869999, 106.9457373292, 106.8874892742, 106.829238783,
+ 106.7709859349, 106.7127308086, 106.65447348250001, 106.5962140348,
+ 106.53795254330001, 106.47968908530001, 106.421423738,
+ 106.36315657799999, 106.304887682, 106.2466171258, 106.1883449854,
+ 106.1300713362, 106.0717962533, 106.0135198115, 105.9552420853,
+ 105.8969631488, 105.8386830761, 105.7804019405, 105.72211981539999,
+ 105.6638367736, 105.6055528879, 105.5472682306, 105.48898287360001,
+ 105.4306968887, 105.37241034729999, 105.31412332069999,
+ 105.2558358795, 105.19754809450001, 105.1392600359, 105.0809717736,
+ 105.0226833774, 104.9643949167, 104.9061064607, 104.84781807819999,
+ 104.78952983789999, 104.73124180810001, 104.67319152239999,
+ 104.6160173582, 104.5605150962, 104.507392088, 104.4572307976,
+ 104.41051043099999, 104.36762061169999, 104.3288733055,
+ 104.2945134337, 104.2647280459, 104.2396543352, 104.2193866091,
+ 104.2039823528, 104.1934674964, 104.18784098409999,
+ 104.18707873470001, 104.1911370677, 104.199955663, 104.21346011349999,
+ 104.23156412109999, 104.25417138270001, 104.2811772049,
+ 104.3124698834, 104.34793187589999, 104.38744079770001,
+ 104.4308702615, 104.478090583, 104.52896936970001, 104.5833720099,
+ 104.6411620739, 104.70220164140001, 104.7663515649, 104.8334716772,
+ 104.90342095439999, 104.976057637, 105.05123932, 105.1288230138,
+ 105.2086651825, 105.2906217637, 105.3745481726, 105.4602992945,
+ 105.54772946780001, 105.6366924609, 105.7270414436,
+ 105.81862895670001, 105.9113068798, 106.0049263999, 106.0993379815,
+ 106.1943913396, 106.28993541610001, 106.38581836110001,
+ 106.48188751880001, 106.57798942059999, 106.6739697831,
+ 106.7696735145, 106.8649447274, 106.9596267597, 107.0535633759,
+ 107.146607375, 107.23862928610001, 107.3295186468, 107.41918183809999,
+ 107.5075399817, 107.5945271314, 107.6800886722, 107.76417991369999,
+ 107.8467648518, 107.9278150786, 108.0073088242, 108.08523011279999,
+ 108.16156802120001, 108.2363160257, 108.309471429, 108.3810348545,
+ 108.4510098038, 108.5194022662, 108.586220376, 108.6514741118,
+ 108.71517503209999, 108.7773360431, 108.83797119500001,
+ 108.8970955033, 108.9547247921, 109.0108755569, 109.065564844,
+ 109.1188101454, 109.1706293067, 109.2210404463, 109.2700618857,
+ 109.3177120874, 109.3640096021, 109.4089730222, 109.45262094110001,
+ 109.4949719186, 109.5360444503, 109.575856942, 109.6144276868,
+ 109.651774846, 109.68791643269999, 109.7228702978, 109.75665411829999,
+ 109.7892853872, 109.8207814054, 109.8511592749, 109.88043589329999,
+ 109.90862794949999, 109.9357519199, 109.9618240666,
+ 109.98686043469999, 110.0108768518, 110.0338889271, 110.0559120514,
+ 110.0769613974, 110.0970519201, 110.1161983585, 110.1344152359,
+ 110.15171686240001, 110.16811733589999, 110.1836305443,
+ 110.1982701674, 110.21204967909999, 110.2249823496, 110.2370812478,
+ 110.24835924370001, 110.2588290107, 110.2685030283,
+ 110.27739358449999, 110.2855127783, 110.2928725224, 110.2994845457,
+ 110.3053603959, 110.310511442, 110.3149488768, 110.31868371990001,
+ 110.32172681969999, 110.324088856, 110.32578034289999, 110.3268116309,
+ 110.3271929093, 110.3269342091, 110.3260454048, 110.3245362174,
+ 110.32241621620001, 110.3196948214, 110.3163813063, 110.3124847998,
+ 110.3080142882, 110.3029786179, 110.297386497, 110.2912464979,
+ 110.28456705939999, 110.2773564885, 110.2696229626, 110.2613745316,
+ 110.2526191196, 110.2433645274, 110.2336184337, 110.2233883979,
+ 110.21268186100001, 110.20150614810001, 110.1898684703,
+ 110.17777592579999, 110.16523550229999, 110.1522540787,
+ 110.1388384263, 110.124995211, 110.1107309949, 110.0960522376,
+ 110.0809652981, 110.0654764362, 110.0495918144, 110.033317499,
+ 110.0166594617, 109.9996235813, 109.9822156451, 109.9644413501,
+ 109.9463063048, 109.9278160302, 109.9089759614, 109.889791449
+ ],
+ "t": [
+ 0.0, 0.0010014534883744, 0.0020029069767453, 0.0030043604651162,
+ 0.0040058139534906, 0.0050072674418615, 0.0060087209302324,
+ 0.0070101744186068, 0.0080116279069777, 0.0090130813953486,
+ 0.010014534883723, 0.0110159883720939, 0.0120174418604683,
+ 0.0130188953488392, 0.0140203488372101, 0.0150218023255845,
+ 0.0160232558139554, 0.0170247093023263, 0.0180261627907007,
+ 0.0190276162790716, 0.0200290697674425, 0.021030523255817,
+ 0.0220319767441878, 0.0230334302325587, 0.0240348837209332,
+ 0.025036337209304, 0.0260377906976749, 0.0270392441860494,
+ 0.0280406976744203, 0.0290421511627911, 0.0300436046511656,
+ 0.0310450581395365, 0.0320465116279073, 0.0330479651162818,
+ 0.0340494186046527, 0.0350508720930236, 0.036052325581398,
+ 0.0370537790697689, 0.0380552325581398, 0.0390566860465142,
+ 0.0400581395348851, 0.041059593023256, 0.0420610465116304,
+ 0.0430625000000013, 0.0440639534883722, 0.0450654069767466,
+ 0.0460668604651175, 0.0470683139534884, 0.0480697674418628,
+ 0.0490712209302337, 0.0500726744186046, 0.051074127906979,
+ 0.0520755813953499, 0.0530770348837208, 0.0540784883720952,
+ 0.0550799418604661, 0.056081395348837, 0.0570828488372114,
+ 0.0580843023255823, 0.0590857558139532, 0.0600872093023276,
+ 0.0610886627906985, 0.062090116279073, 0.0630915697674439,
+ 0.0640930232558147, 0.0650944767441892, 0.0660959302325601,
+ 0.0670973837209309, 0.0680988372093054, 0.0691002906976763,
+ 0.0701017441860472, 0.0711031976744216, 0.0721046511627925,
+ 0.0731061046511634, 0.0741075581395378, 0.0751090116279087,
+ 0.0761104651162796, 0.077111918604654, 0.0781133720930249,
+ 0.0791148255813958, 0.0801162790697702, 0.0811177325581411,
+ 0.082119186046512, 0.0831206395348864, 0.0841220930232573,
+ 0.0851235465116282, 0.0861250000000026, 0.0871264534883735,
+ 0.0881279069767444, 0.0891293604651188, 0.0901308139534897,
+ 0.0911322674418606, 0.092133720930235, 0.0931351744186059,
+ 0.0941366279069768, 0.0951380813953512, 0.0961395348837221,
+ 0.097140988372093, 0.0981424418604675, 0.0991438953488383,
+ 0.1001453488372092, 0.1011468023255837, 0.1021482558139545,
+ 0.1031497093023254, 0.1041511627906999, 0.1051526162790708,
+ 0.1061540697674416, 0.1071555232558161, 0.108156976744187,
+ 0.1091584302325614, 0.1101598837209323, 0.1111613372093032,
+ 0.1121627906976776, 0.1131642441860485, 0.1141656976744194,
+ 0.1151671511627938, 0.1161686046511647, 0.1171700581395356,
+ 0.11817151162791, 0.1191729651162809, 0.1201744186046518,
+ 0.1211758720930262, 0.1221773255813971, 0.123178779069768,
+ 0.1241802325581424, 0.1251816860465133, 0.1261831395348842,
+ 0.1271845930232586, 0.1281860465116295, 0.1291875000000004,
+ 0.1301889534883749, 0.1311904069767457, 0.1321918604651166,
+ 0.1331933139534911, 0.1341947674418619, 0.1351962209302328,
+ 0.1361976744186073, 0.1371991279069782, 0.138200581395349,
+ 0.1392020348837235, 0.1402034883720944, 0.1412049418604652,
+ 0.1422063953488397, 0.1432078488372106, 0.1442093023255815,
+ 0.1452107558139559, 0.1462122093023268, 0.1472136627906977,
+ 0.1482151162790721, 0.149216569767443, 0.1502180232558139,
+ 0.1512194767441883, 0.1522209302325592, 0.1532223837209301,
+ 0.1542238372093045, 0.1552252906976754, 0.1562267441860463,
+ 0.1572281976744207, 0.1582296511627916, 0.159231104651166,
+ 0.1602325581395369, 0.1612340116279078, 0.1622354651162822,
+ 0.1632369186046531, 0.164238372093024, 0.1652398255813985,
+ 0.1662412790697693, 0.1672427325581402, 0.1682441860465147,
+ 0.1692456395348855, 0.1702470930232564, 0.1712485465116309,
+ 0.1722500000000018, 0.1732514534883726, 0.1742529069767471,
+ 0.175254360465118, 0.1762558139534888, 0.1772572674418633,
+ 0.1782587209302342, 0.1792601744186051, 0.1802616279069795,
+ 0.1812630813953504, 0.1822645348837213, 0.1832659883720957,
+ 0.1842674418604666, 0.1852688953488375, 0.1862703488372119,
+ 0.1872718023255828, 0.1882732558139537, 0.1892747093023281,
+ 0.190276162790699, 0.1912776162790699, 0.1922790697674443,
+ 0.1932805232558152, 0.1942819767441861, 0.1952834302325605,
+ 0.1962848837209314, 0.1972863372093023, 0.1982877906976767,
+ 0.1992892441860476, 0.2002906976744185, 0.2012921511627929,
+ 0.2022936046511638, 0.2032950581395347, 0.2042965116279091,
+ 0.20529796511628, 0.2062994186046509, 0.2073008720930254,
+ 0.2083023255813962, 0.2093037790697707, 0.2103052325581416,
+ 0.2113066860465124, 0.2123081395348869, 0.2133095930232578,
+ 0.2143110465116287, 0.2153125000000031, 0.216313953488374,
+ 0.2173154069767449, 0.2183168604651193, 0.2193183139534902,
+ 0.2203197674418611, 0.2213212209302355, 0.2223226744186064,
+ 0.2233241279069773, 0.2243255813953517, 0.2253270348837226,
+ 0.2263284883720935, 0.2273299418604679, 0.2283313953488388,
+ 0.2293328488372097, 0.2303343023255841, 0.231335755813955,
+ 0.2323372093023259, 0.2333386627907003, 0.2343401162790712,
+ 0.2353415697674421, 0.2363430232558165, 0.2373444767441874,
+ 0.2383459302325583, 0.2393473837209327, 0.2403488372093036,
+ 0.2413502906976745, 0.242351744186049, 0.2433531976744198,
+ 0.2443546511627907, 0.2453561046511652, 0.246357558139536,
+ 0.2473590116279069, 0.2483604651162814, 0.2493619186046523,
+ 0.2503633720930232, 0.2513648255813976, 0.2523662790697685,
+ 0.2533677325581394, 0.2543691860465138, 0.2553706395348847,
+ 0.2563720930232556, 0.25737354651163, 0.2583750000000009,
+ 0.2593764534883753, 0.2603779069767462, 0.2613793604651171,
+ 0.2623808139534915, 0.2633822674418624, 0.2643837209302333,
+ 0.2653851744186077, 0.2663866279069786, 0.2673880813953495,
+ 0.2683895348837239, 0.2693909883720948, 0.2703924418604657,
+ 0.2713938953488402, 0.272395348837211, 0.2733968023255819,
+ 0.2743982558139564, 0.2753997093023272, 0.2764011627906981,
+ 0.2774026162790726, 0.2784040697674435, 0.2794055232558143,
+ 0.2804069767441888, 0.2814084302325597, 0.2824098837209305,
+ 0.283411337209305, 0.2844127906976759, 0.2854142441860468,
+ 0.2864156976744212, 0.2874171511627921, 0.288418604651163,
+ 0.2894200581395374, 0.2904215116279083, 0.2914229651162792,
+ 0.2924244186046536, 0.2934258720930245, 0.2944273255813954,
+ 0.2954287790697698, 0.2964302325581407, 0.2974316860465116,
+ 0.298433139534886, 0.2994345930232569, 0.3004360465116278,
+ 0.3014375000000022, 0.3024389534883731, 0.303440406976744,
+ 0.3044418604651184, 0.3054433139534893, 0.3064447674418638,
+ 0.3074462209302346, 0.3084476744186055, 0.30944912790698,
+ 0.3104505813953508, 0.3114520348837217, 0.3124534883720962,
+ 0.3134549418604671, 0.3144563953488379, 0.3154578488372124,
+ 0.3164593023255833, 0.3174607558139541, 0.3184622093023286,
+ 0.3194636627906995, 0.3204651162790703, 0.3214665697674448,
+ 0.3224680232558157, 0.3234694767441866, 0.324470930232561,
+ 0.3254723837209319, 0.3264738372093028, 0.3274752906976772,
+ 0.3284767441860481, 0.329478197674419, 0.3304796511627934,
+ 0.3314811046511643, 0.3324825581395352, 0.3334840116279096,
+ 0.3344854651162805, 0.3354869186046514, 0.3364883720930258,
+ 0.3374898255813967, 0.3384912790697676, 0.339492732558142,
+ 0.3404941860465129, 0.3414956395348838, 0.3424970930232582,
+ 0.3434985465116291, 0.3445, 0.3455014534883744, 0.3465029069767453,
+ 0.3475043604651162, 0.3485058139534907, 0.3495072674418615,
+ 0.3505087209302324, 0.3515101744186069, 0.3525116279069777,
+ 0.3535130813953486, 0.3545145348837231, 0.3555159883720939,
+ 0.3565174418604684, 0.3575188953488393, 0.3585203488372102,
+ 0.3595218023255846, 0.3605232558139555, 0.3615247093023264,
+ 0.3625261627907008, 0.3635276162790717, 0.3645290697674426,
+ 0.365530523255817, 0.3665319767441879, 0.3675334302325588,
+ 0.3685348837209332, 0.3695363372093041, 0.370537790697675,
+ 0.3715392441860494, 0.3725406976744203, 0.3735421511627912,
+ 0.3745436046511656, 0.3755450581395365, 0.3765465116279074,
+ 0.3775479651162818, 0.3785494186046527, 0.3795508720930236,
+ 0.380552325581398, 0.3815537790697689, 0.3825552325581398,
+ 0.3835566860465143, 0.3845581395348851, 0.385559593023256,
+ 0.3865610465116305, 0.3875625000000013, 0.3885639534883722,
+ 0.3895654069767467, 0.3905668604651175, 0.3915683139534884,
+ 0.3925697674418629, 0.3935712209302338, 0.3945726744186046,
+ 0.3955741279069791, 0.39657558139535, 0.3975770348837208,
+ 0.3985784883720953, 0.3995799418604662, 0.4005813953488371,
+ 0.4015828488372115, 0.4025843023255824, 0.4035857558139533,
+ 0.4045872093023277, 0.4055886627906986, 0.406590116279073,
+ 0.4075915697674439, 0.4085930232558148, 0.4095944767441892,
+ 0.4105959302325601, 0.411597383720931, 0.4125988372093054,
+ 0.4136002906976763, 0.4146017441860472, 0.4156031976744216,
+ 0.4166046511627925, 0.4176061046511634, 0.4186075581395378,
+ 0.4196090116279087, 0.4206104651162796, 0.4216119186046541,
+ 0.4226133720930249, 0.4236148255813958, 0.4246162790697703,
+ 0.4256177325581411, 0.426619186046512, 0.4276206395348865,
+ 0.4286220930232574, 0.4296235465116282, 0.4306250000000027,
+ 0.4316264534883736, 0.4326279069767444, 0.4336293604651189,
+ 0.4346308139534898, 0.4356322674418607, 0.4366337209302351,
+ 0.437635174418606, 0.4386366279069769, 0.4396380813953513,
+ 0.4406395348837222, 0.4416409883720931, 0.4426424418604675,
+ 0.4436438953488384, 0.4446453488372093, 0.4456468023255837,
+ 0.4466482558139546, 0.4476497093023255, 0.4486511627906999,
+ 0.4496526162790708, 0.4506540697674417, 0.4516555232558161,
+ 0.452656976744187, 0.4536584302325579, 0.4546598837209323,
+ 0.4556613372093032, 0.4566627906976777, 0.4576642441860485,
+ 0.4586656976744194, 0.4596671511627939, 0.4606686046511647,
+ 0.4616700581395356, 0.4626715116279101, 0.463672965116281,
+ 0.4646744186046518, 0.4656758720930263, 0.4666773255813972,
+ 0.467678779069768, 0.4686802325581425, 0.4696816860465134,
+ 0.4706831395348843, 0.4716845930232587, 0.4726860465116296,
+ 0.4736875000000005, 0.4746889534883749, 0.4756904069767458,
+ 0.4766918604651167, 0.4776933139534911, 0.478694767441862,
+ 0.4796962209302329, 0.4806976744186073, 0.4816991279069782,
+ 0.4827005813953491, 0.4837020348837235, 0.4847034883720944,
+ 0.4857049418604653, 0.4867063953488397, 0.4877078488372106,
+ 0.4887093023255815, 0.4897107558139559, 0.4907122093023268,
+ 0.4917136627906977, 0.4927151162790721, 0.493716569767443,
+ 0.4947180232558139, 0.4957194767441883, 0.4967209302325592,
+ 0.4977223837209301, 0.4987238372093046, 0.4997252906976754,
+ 0.5007267441860463, 0.5017281976744208, 0.5027296511627917,
+ 0.5037311046511626, 0.504732558139537, 0.5057340116279079,
+ 0.5067354651162823, 0.5077369186046532, 0.5087383720930241,
+ 0.5097398255813985, 0.5107412790697694, 0.5117427325581403,
+ 0.5127441860465147, 0.5137456395348856, 0.5147470930232565,
+ 0.5157485465116309, 0.5167500000000018, 0.5177514534883727,
+ 0.5187529069767471, 0.519754360465118, 0.5207558139534889,
+ 0.5217572674418633, 0.5227587209302342, 0.5237601744186051,
+ 0.5247616279069796, 0.5257630813953504, 0.5267645348837213,
+ 0.5277659883720958, 0.5287674418604666, 0.5297688953488375,
+ 0.530770348837212, 0.5317718023255829, 0.5327732558139537,
+ 0.5337747093023282, 0.5347761627906991, 0.53577761627907,
+ 0.5367790697674444, 0.5377805232558153, 0.5387819767441862,
+ 0.5397834302325606, 0.5407848837209315, 0.5417863372093024,
+ 0.5427877906976768, 0.5437892441860477, 0.5447906976744186,
+ 0.545792151162793, 0.5467936046511639, 0.5477950581395348,
+ 0.5487965116279092, 0.5497979651162801, 0.550799418604651,
+ 0.5518008720930254, 0.5528023255813963, 0.5538037790697707,
+ 0.5548052325581416, 0.5558066860465125, 0.556808139534887,
+ 0.5578095930232578, 0.5588110465116287, 0.5598125000000032,
+ 0.560813953488374, 0.5618154069767449, 0.5628168604651194,
+ 0.5638183139534902, 0.5648197674418611, 0.5658212209302356,
+ 0.5668226744186065, 0.5678241279069773, 0.5688255813953518,
+ 0.5698270348837227, 0.5708284883720935, 0.571829941860468,
+ 0.5728313953488389, 0.5738328488372098, 0.5748343023255842,
+ 0.5758357558139551, 0.576837209302326, 0.5778386627907004,
+ 0.5788401162790713, 0.5798415697674422, 0.5808430232558166,
+ 0.5818444767441875, 0.5828459302325584, 0.5838473837209328,
+ 0.5848488372093037, 0.5858502906976746, 0.586851744186049,
+ 0.5878531976744199, 0.5888546511627908, 0.5898561046511652,
+ 0.5908575581395361, 0.591859011627907, 0.5928604651162814,
+ 0.5938619186046523, 0.5948633720930232, 0.5958648255813976,
+ 0.5968662790697685, 0.5978677325581394, 0.5988691860465138,
+ 0.5998706395348847, 0.6008720930232556, 0.60187354651163,
+ 0.6028750000000009, 0.6038764534883754, 0.6048779069767463,
+ 0.6058793604651171, 0.6068808139534916, 0.6078822674418625,
+ 0.6088837209302334, 0.6098851744186078, 0.6108866279069787,
+ 0.6118880813953496, 0.612889534883724, 0.6138909883720949,
+ 0.6148924418604658, 0.6158938953488402, 0.6168953488372111,
+ 0.617896802325582, 0.6188982558139564, 0.6198997093023273,
+ 0.6209011627906982, 0.6219026162790726, 0.6229040697674435,
+ 0.6239055232558144, 0.6249069767441888, 0.6259084302325597,
+ 0.6269098837209306, 0.627911337209305, 0.6289127906976759,
+ 0.6299142441860468, 0.6309156976744212, 0.6319171511627921,
+ 0.632918604651163, 0.6339200581395374, 0.6349215116279083,
+ 0.6359229651162792, 0.6369244186046537, 0.6379258720930245,
+ 0.6389273255813954, 0.6399287790697699, 0.6409302325581407,
+ 0.6419316860465116, 0.6429331395348861, 0.643934593023257,
+ 0.6449360465116278, 0.6459375000000023, 0.6469389534883732,
+ 0.647940406976744, 0.6489418604651185, 0.6499433139534894,
+ 0.6509447674418603, 0.6519462209302347, 0.6529476744186056,
+ 0.65394912790698, 0.6549505813953509, 0.6559520348837218,
+ 0.6569534883720962, 0.6579549418604671, 0.658956395348838,
+ 0.6599578488372124, 0.6609593023255833, 0.6619607558139542,
+ 0.6629622093023286, 0.6639636627906995, 0.6649651162790704,
+ 0.6659665697674448, 0.6669680232558157, 0.6679694767441866,
+ 0.668970930232561, 0.6699723837209319, 0.6709738372093028,
+ 0.6719752906976773, 0.6729767441860481, 0.673978197674419,
+ 0.6749796511627935, 0.6759811046511643, 0.6769825581395352,
+ 0.6779840116279097, 0.6789854651162806, 0.6799869186046514,
+ 0.6809883720930259, 0.6819898255813968, 0.6829912790697676,
+ 0.6839927325581421, 0.684994186046513, 0.6859956395348838,
+ 0.6869970930232583, 0.6879985465116292, 0.6890000000000001
+ ]
+ }
+ },
+ {
+ "bc_name": "RCR_RPA",
+ "bc_type": "RCR",
+ "bc_values": {
+ "C": 0.007,
+ "Rp": 150.0,
+ "Rd": 150.0,
+ "Pd": 0.0
+ }
+ },
+ {
+ "bc_name": "RCR_LPA",
+ "bc_type": "RCR",
+ "bc_values": {
+ "C": 0.007,
+ "Rp": 160.0,
+ "Rd": 160.0,
+ "Pd": 0.0
+ }
+ }
+ ],
+ "vessels": [
+ {
+ "boundary_conditions": {
+ "inlet": "VEN_IN_FLOW"
+ },
+ "vessel_id": 1,
+ "vessel_length": 1000.0,
+ "vessel_name": "VEN_SYS",
+ "zero_d_element_type": "BloodVesselCRL",
+ "zero_d_element_values": {
+ "C": 0.045004,
+ "R_poiseuille": 85.06913,
+ "L": 0.666611
+ }
+ },
+ {
+ "vessel_id": 3,
+ "vessel_length": 1000.0,
+ "vessel_name": "MPA",
+ "zero_d_element_type": "BloodVessel",
+ "zero_d_element_values": {
+ "R_poiseuille": 40.0,
+ "L": 0.0,
+ "C": 0.004
+ }
+ },
+ {
+ "boundary_conditions": {
+ "outlet": "RCR_RPA"
+ },
+ "vessel_id": 4,
+ "vessel_length": 1000.0,
+ "vessel_name": "RPA",
+ "zero_d_element_type": "BloodVessel",
+ "zero_d_element_values": {
+ "R_poiseuille": 60.0,
+ "L": 0.5,
+ "C": 0.002,
+ "stenosis_coefficient": 0.0
+ }
+ },
+ {
+ "boundary_conditions": {
+ "outlet": "RCR_LPA"
+ },
+ "vessel_id": 5,
+ "vessel_length": 1000.0,
+ "vessel_name": "LPA",
+ "zero_d_element_type": "BloodVessel",
+ "zero_d_element_values": {
+ "R_poiseuille": 65.0,
+ "L": 0.5,
+ "C": 0.002,
+ "stenosis_coefficient": 0.0
+ }
+ }
+ ],
+ "junctions": [
+ {
+ "inlet_blocks": ["VEN_SYS"],
+ "junction_name": "J_VEN",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["RA"]
+ },
+ {
+ "inlet_blocks": ["MPA"],
+ "junction_name": "J_PA",
+ "junction_type": "NORMAL_JUNCTION",
+ "outlet_blocks": ["RPA", "LPA"]
+ }
+ ],
+ "chambers": [
+ {
+ "type": "LinearElastanceChamber",
+ "name": "RA",
+ "values": {
+ "Emax": 80.0,
+ "Epass": 50.0,
+ "Vrest": 4.0
+ },
+ "activation_function": {
+ "type": "piecewise_cosine",
+ "contract_start": 0.025,
+ "relax_start": 0.08625,
+ "contract_duration": 0.06125,
+ "relax_duration": 0.18375
+ }
+ },
+ {
+ "type": "LinearElastanceChamber",
+ "name": "RV",
+ "values": {
+ "Emax": 650.0,
+ "Epass": 54.0,
+ "Vrest": 54.0
+ },
+ "activation_function": {
+ "type": "piecewise_cosine",
+ "contract_start": 0.207,
+ "relax_start": 0.29625,
+ "contract_duration": 0.08925,
+ "relax_duration": 0.26975
+ }
+ }
+ ],
+ "valves": [
+ {
+ "type": "PiecewiseValve",
+ "name": "TV",
+ "params": {
+ "Rmin": 6.666,
+ "Rmax": 10000.0,
+ "upstream_block": "RA",
+ "downstream_block": "RV"
+ }
+ },
+ {
+ "type": "PiecewiseValve",
+ "name": "PV",
+ "params": {
+ "Rmin": 6.666,
+ "Rmax": 10000.0,
+ "upstream_block": "RV",
+ "downstream_block": "MPA"
+ }
+ }
+ ]
+}
diff --git a/applications/svZeroDTuner/examples/right_heart_pa/tuning_differential_evolution.yaml b/applications/svZeroDTuner/examples/right_heart_pa/tuning_differential_evolution.yaml
new file mode 100644
index 000000000..0d396cb60
--- /dev/null
+++ b/applications/svZeroDTuner/examples/right_heart_pa/tuning_differential_evolution.yaml
@@ -0,0 +1,120 @@
+# Tuning configuration for right_heart_pa example (cgs units)
+#
+# INSTRUCTIONS:
+# 1. Run baseline to generate baseline_results/:
+# python -c 'from main import run_baseline; run_baseline("model.json")'
+# 2. Inspect baseline_summary.csv for available output names
+# 3. Update targets below as needed
+# 4. Run optimization:
+# svzerodtuner optimize tuning_differential_evolution.yaml
+
+model:
+ config_file: "model.json"
+
+parameters:
+ - name: "RA.Emax"
+ bounds: [10.0, 200.0]
+ - name: "RA.Epass"
+ bounds: [10.0, 200.0]
+ - name: "RV.Emax"
+ bounds: [100.0, 2000.0]
+ - name: "RV.Epass"
+ bounds: [10.0, 500.0]
+
+ - name: "MPA.R_poiseuille"
+ bounds: [10.0, 1000.0]
+
+ - name: "RPA.R_poiseuille"
+ bounds: [10.0, 1000.0]
+ - name: "RPA.L"
+ bounds: [0.1, 2.0]
+ - name: "RPA.stenosis_coefficient"
+ bounds: [0.0, 1.0e-3]
+
+ - name: "LPA.R_poiseuille"
+ bounds: [10.0, 1000.0]
+ - name: "LPA.L"
+ bounds: [0.1, 2.0]
+ - name: "LPA.stenosis_coefficient"
+ bounds: [0.0, 1.0e-3]
+
+ - name: "RCR_RPA.Rp"
+ bounds: [10.0, 5000.0]
+ - name: "RCR_RPA.Rd"
+ bounds: [10.0, 5000.0]
+ - name: "RCR_RPA.C"
+ bounds: [1.0e-4, 1.0e-2]
+
+ - name: "RCR_LPA.Rp"
+ bounds: [10.0, 5000.0]
+ - name: "RCR_LPA.Rd"
+ bounds: [10.0, 5000.0]
+ - name: "RCR_LPA.C"
+ bounds: [1.0e-4, 1.0e-2]
+
+# Target outputs
+# Specify which outputs you want to match and their target values
+# Provide expressions with output names from baseline_summary.csv
+# Cardiac output is taken as mean flow through the PV->MPA connection.
+# RPA/LPA split is enforced by targeting the mean flows in each branch.
+targets:
+ - name: Pulmonary arterial mean flow
+ type: scalar
+ expression: np.mean(flow:PV:MPA)
+ target_value: 83.33 # cm^3/s (5.0 L/min)
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Pulmonary arterial max pressure
+ type: scalar
+ expression: np.max(pressure:PV:MPA)
+ target_value: 33330.0 # dyn/cm^2 (25 mmHg)
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Pulmonary arterial min pressure
+ type: scalar
+ expression: np.min(pressure:PV:MPA)
+ target_value: 10670.0 # dyn/cm^2 (8 mmHg)
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Pulmonary arterial mean pressure
+ type: scalar
+ expression: np.mean(pressure:PV:MPA)
+ target_value: 20000.0 # dyn/cm^2 (15 mmHg)
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Right pulmonary artery mean flow
+ type: scalar
+ expression: np.mean(flow:RPA:RCR_RPA)
+ target_value: 45.83 # 55% of CO
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Left pulmonary artery mean flow
+ type: scalar
+ expression: np.mean(flow:LPA:RCR_LPA)
+ target_value: 37.5 # 45% of CO
+ relative_bounds: 10%
+ weight: 1.0
+
+objective:
+ # L1 or L2 norm (of relative errors)
+ norm: L1
+
+optimization:
+ terminate_at_zero: true
+ algorithm: "differential_evolution"
+ maxiter: 200
+ workers: -1
+ updating: "deferred"
+ tol: 1e-2
+ popsize: 20
+ init: "sobol"
+ strategy: "best1bin"
+ mutation: [0.5, 1.5]
+ recombination: 0.5
+ polish: true
+
+output:
+ directory: "optimization_results_right_heart_pa_de"
+ save_history: true
+ save_plots: true
+ save_final_config: true
diff --git a/applications/svZeroDTuner/examples/right_heart_pa/tuning_nelder_mead.yaml b/applications/svZeroDTuner/examples/right_heart_pa/tuning_nelder_mead.yaml
new file mode 100644
index 000000000..eee4a15c8
--- /dev/null
+++ b/applications/svZeroDTuner/examples/right_heart_pa/tuning_nelder_mead.yaml
@@ -0,0 +1,109 @@
+# Tuning configuration for right_heart_pa example (Nelder-Mead, cgs units)
+#
+# Run with:
+# svzerodtuner optimize tuning_nelder_mead.yaml
+
+model:
+ config_file: "model.json"
+
+parameters:
+ - name: "RA.Emax"
+ bounds: [10.0, 200.0]
+ - name: "RA.Epass"
+ bounds: [10.0, 200.0]
+ - name: "RV.Emax"
+ bounds: [100.0, 2000.0]
+ - name: "RV.Epass"
+ bounds: [10.0, 500.0]
+
+ - name: "MPA.R_poiseuille"
+ bounds: [10.0, 1000.0]
+
+ - name: "RPA.R_poiseuille"
+ bounds: [10.0, 1000.0]
+ - name: "RPA.L"
+ bounds: [0.1, 2.0]
+ - name: "RPA.stenosis_coefficient"
+ bounds: [0.0, 1.0e-3]
+
+ - name: "LPA.R_poiseuille"
+ bounds: [10.0, 1000.0]
+ - name: "LPA.L"
+ bounds: [0.1, 2.0]
+ - name: "LPA.stenosis_coefficient"
+ bounds: [0.0, 1.0e-3]
+
+ - name: "RCR_RPA.Rp"
+ bounds: [10.0, 5000.0]
+ - name: "RCR_RPA.Rd"
+ bounds: [10.0, 5000.0]
+ - name: "RCR_RPA.C"
+ bounds: [1.0e-4, 1.0e-2]
+
+ - name: "RCR_LPA.Rp"
+ bounds: [10.0, 5000.0]
+ - name: "RCR_LPA.Rd"
+ bounds: [10.0, 5000.0]
+ - name: "RCR_LPA.C"
+ bounds: [1.0e-4, 1.0e-2]
+
+# Target outputs
+# Specify which outputs you want to match and their target values
+# Provide expressions with output names from baseline_summary.csv
+# Cardiac output is taken as mean flow through the PV->MPA connection.
+# RPA/LPA split is enforced by targeting the mean flows in each branch.
+targets:
+ - name: Pulmonary arterial mean flow
+ type: scalar
+ expression: np.mean(flow:PV:MPA)
+ target_value: 83.33
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Pulmonary arterial max pressure
+ type: scalar
+ expression: np.max(pressure:PV:MPA)
+ target_value: 33330.0
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Pulmonary arterial min pressure
+ type: scalar
+ expression: np.min(pressure:PV:MPA)
+ target_value: 10670.0
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Pulmonary arterial mean pressure
+ type: scalar
+ expression: np.mean(pressure:PV:MPA)
+ target_value: 20000.0
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Right pulmonary artery mean flow
+ type: scalar
+ expression: np.mean(flow:RPA:RCR_RPA)
+ target_value: 45.83
+ relative_bounds: 10%
+ weight: 1.0
+ - name: Left pulmonary artery mean flow
+ type: scalar
+ expression: np.mean(flow:LPA:RCR_LPA)
+ target_value: 37.5
+ relative_bounds: 10%
+ weight: 1.0
+
+objective:
+ # L1 or L2 norm (of relative errors)
+ norm: L1
+
+optimization:
+ terminate_at_zero: true
+ algorithm: "Nelder-Mead"
+ maxiter: 8000
+ xatol: 1e-6
+ fatol: 1e-6
+ adaptive: true
+
+output:
+ directory: "optimization_results_right_heart_pa_nm"
+ save_history: true
+ save_plots: true
+ save_final_config: true
diff --git a/applications/svZeroDTuner/requirements.txt b/applications/svZeroDTuner/requirements.txt
new file mode 100644
index 000000000..1472bdc73
--- /dev/null
+++ b/applications/svZeroDTuner/requirements.txt
@@ -0,0 +1,8 @@
+scipy>=1.9.0
+numpy>=1.21.0
+pandas>=1.3.0
+pyyaml>=6.0
+matplotlib>=3.5.0
+
+# pysvzerod is a requirement, but it is not available on PyPI, so we need to
+#install it from source using "pip install -e ." from the svZeroDSolver repo root
diff --git a/applications/svZeroDTuner/svzerodtuner/__init__.py b/applications/svZeroDTuner/svzerodtuner/__init__.py
new file mode 100644
index 000000000..75d0be076
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/__init__.py
@@ -0,0 +1,7 @@
+"""
+sv0D Tuning Framework
+
+A generic framework for optimizing sv0D model parameters to match target output values.
+"""
+
+__version__ = "0.1.0"
diff --git a/applications/svZeroDTuner/svzerodtuner/cli.py b/applications/svZeroDTuner/svzerodtuner/cli.py
new file mode 100644
index 000000000..72d6b4fc6
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/cli.py
@@ -0,0 +1,99 @@
+"""
+Command line interface for svZeroDTuner.
+"""
+
+from __future__ import annotations
+
+import argparse
+import sys
+
+from .sv0d_tuner import run_optimization
+from .sensitivity import run_sensitivity_analysis
+
+
+def _add_optimize_parser(subparsers: argparse._SubParsersAction) -> None:
+ optimize_parser = subparsers.add_parser(
+ "optimize",
+ help="Run parameter optimization from a tuning config",
+ )
+ optimize_parser.add_argument(
+ "config",
+ help="Path to tuning YAML config file",
+ )
+ optimize_parser.set_defaults(_handler=_handle_optimize)
+
+ run_parser = subparsers.add_parser(
+ "run",
+ help="Alias for optimize",
+ )
+ run_parser.add_argument(
+ "config",
+ help="Path to tuning YAML config file",
+ )
+ run_parser.set_defaults(_handler=_handle_optimize)
+
+
+def _add_sensitivity_parser(subparsers: argparse._SubParsersAction) -> None:
+ sensitivity_parser = subparsers.add_parser(
+ "sensitivity-analysis",
+ help="Run sensitivity analysis from a config",
+ )
+ sensitivity_parser.add_argument(
+ "config",
+ help="Path to sensitivity YAML config file",
+ )
+ sensitivity_parser.set_defaults(_handler=_handle_sensitivity)
+
+ alias_parser = subparsers.add_parser(
+ "sensitivity",
+ help="Alias for sensitivity-analysis",
+ )
+ alias_parser.add_argument(
+ "config",
+ help="Path to sensitivity YAML config file",
+ )
+ alias_parser.set_defaults(_handler=_handle_sensitivity)
+
+
+def _handle_optimize(args: argparse.Namespace) -> int:
+ result = run_optimization(args.config)
+ success = bool(result.get("success", False))
+ if not success:
+ message = result.get("message", "Optimization failed")
+ print(f"[svzerodtuner] {message}")
+ return 1
+ return 0
+
+
+def _handle_sensitivity(args: argparse.Namespace) -> int:
+ try:
+ run_sensitivity_analysis(args.config)
+ except Exception as exc:
+ print(f"[svzerodtuner] Sensitivity analysis failed: {exc}")
+ return 1
+ return 0
+
+
+def build_parser() -> argparse.ArgumentParser:
+ parser = argparse.ArgumentParser(
+ prog="svzerodtuner",
+ description="svZeroDTuner command line interface",
+ )
+ subparsers = parser.add_subparsers(dest="command", required=True)
+ _add_optimize_parser(subparsers)
+ _add_sensitivity_parser(subparsers)
+ return parser
+
+
+def main(argv: list[str] | None = None) -> int:
+ parser = build_parser()
+ args = parser.parse_args(argv)
+ handler = getattr(args, "_handler", None)
+ if handler is None:
+ parser.print_help()
+ return 2
+ return handler(args)
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/applications/svZeroDTuner/svzerodtuner/config_handler.py b/applications/svZeroDTuner/svzerodtuner/config_handler.py
new file mode 100644
index 000000000..aa8362288
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/config_handler.py
@@ -0,0 +1,238 @@
+"""
+Configuration Handler for sv0D Tuning Framework
+
+Handles parsing and validation of YAML configuration files.
+"""
+
+import os
+from typing import Dict, List, Any, Optional
+from pathlib import Path
+
+try:
+ import yaml
+except ImportError:
+ raise ImportError(
+ "PyYAML is required but not installed. Please install it with: pip install pyyaml"
+ )
+
+
+class ConfigHandler:
+ """
+ Handles loading and validation of YAML configuration files.
+ """
+
+ def __init__(self, config_file: str):
+ """
+ Initialize config handler.
+
+ Args:
+ config_file: Path to YAML configuration file
+ """
+ self.config_file = config_file
+ self.config = self._load_config()
+ self._validate_config()
+
+ def _load_config(self) -> Dict:
+ """Load YAML configuration file."""
+ with open(self.config_file, 'r') as f:
+ config = yaml.safe_load(f)
+
+ # Resolve relative paths relative to config file directory
+ config_dir = os.path.dirname(os.path.abspath(self.config_file))
+ if 'model' in config and 'config_file' in config['model']:
+ if not os.path.isabs(config['model']['config_file']):
+ config['model']['config_file'] = os.path.join(
+ config_dir, config['model']['config_file']
+ )
+
+ # Resolve target file paths
+ if 'targets' in config:
+ for target in config['targets']:
+ if 'target_file' in target:
+ if not os.path.isabs(target['target_file']):
+ target['target_file'] = os.path.join(
+ config_dir, target['target_file']
+ )
+
+ return config
+
+ def _validate_config(self):
+ """Validate configuration structure."""
+ required_sections = ['model', 'parameters', 'targets', 'objective', 'optimization']
+ for section in required_sections:
+ if section not in self.config:
+ raise ValueError(f"Missing required section '{section}' in configuration")
+
+ # Validate model section
+ if 'config_file' not in self.config['model']:
+ raise ValueError("model.config_file is required")
+ if not os.path.exists(self.config['model']['config_file']):
+ raise ValueError(f"Model config file not found: {self.config['model']['config_file']}")
+
+ # Validate parameters
+ if not isinstance(self.config['parameters'], list):
+ raise ValueError("parameters must be a list")
+ for param in self.config['parameters']:
+ if 'name' not in param:
+ raise ValueError("Each parameter must have 'name'")
+ if 'bounds' not in param:
+ raise ValueError(f"Parameter '{param['name']}' must have 'bounds'")
+ if not isinstance(param['bounds'], list) or len(param['bounds']) != 2:
+ raise ValueError(f"Parameter '{param['name']}' bounds must be [min, max]")
+ # Optional scaling: "identity", "log", or "max"
+ scaling = param.get('scaling')
+ if scaling is not None:
+ if scaling == 'identity':
+ pass # no extra validation
+ elif scaling == 'log':
+ lo, hi = float(param['bounds'][0]), float(param['bounds'][1])
+ if lo <= 0 or hi <= 0:
+ raise ValueError(
+ f"Parameter '{param['name']}' has scaling 'log'; bounds must be positive, got [{lo}, {hi}]"
+ )
+ elif scaling == 'max':
+ lo, hi = float(param['bounds'][0]), float(param['bounds'][1])
+ if max(lo, hi) == 0:
+ raise ValueError(
+ f"Parameter '{param['name']}' has scaling 'max'; bounds must be non-zero"
+ )
+ else:
+ raise ValueError(
+ f"Parameter '{param['name']}' scaling must be 'identity', 'log', or 'max' (got {scaling!r})"
+ )
+
+ # Validate targets
+ if not isinstance(self.config['targets'], list):
+ raise ValueError("targets must be a list")
+ for target in self.config['targets']:
+ if 'name' not in target:
+ raise ValueError("Each target must have 'name'")
+ if 'type' not in target:
+ raise ValueError(f"Target '{target['name']}' must have 'type'")
+ has_relative = 'relative_bounds' in target
+ has_uncertainty = 'uncertainty' in target
+ if has_relative and has_uncertainty:
+ raise ValueError(
+ f"Target '{target['name']}' cannot define both 'relative_bounds' and legacy 'uncertainty'; use only one"
+ )
+ if has_relative or has_uncertainty:
+ unc = target['relative_bounds'] if has_relative else target['uncertainty']
+ if isinstance(unc, str) and unc.strip().endswith('%'):
+ try:
+ pct = float(unc.strip()[:-1])
+ if pct < 0:
+ raise ValueError(
+ f"Target '{target['name']}' relative_bounds percent must be non-negative"
+ )
+ except ValueError:
+ raise ValueError(
+ f"Target '{target['name']}' relative_bounds '{unc}' must be a valid percent (e.g. '5%')"
+ )
+ elif isinstance(unc, (int, float)):
+ if unc < 0:
+ raise ValueError(
+ f"Target '{target['name']}' relative_bounds percent must be non-negative"
+ )
+ elif isinstance(unc, (list, tuple)):
+ key_name = 'relative_bounds' if has_relative else 'uncertainty'
+ if len(unc) != 2:
+ raise ValueError(
+ f"Target '{target['name']}' {key_name} [min, max] must have 2 elements"
+ )
+ if unc[0] >= unc[1]:
+ raise ValueError(
+ f"Target '{target['name']}' {key_name} [min, max] must have min < max"
+ )
+ else:
+ raise ValueError(
+ f"Target '{target['name']}' relative_bounds must be percent (e.g. '5%') or [min, max]"
+ )
+ target_type = target['type']
+ if target_type == 'time_series':
+ if 'expression' not in target:
+ raise ValueError(
+ f"Time series target '{target['name']}' must have 'expression'"
+ )
+ if 'target_file' not in target:
+ raise ValueError(
+ f"Time series target '{target['name']}' must have 'target_file'"
+ )
+ if 'target_range' in target:
+ r = target['target_range']
+ if not isinstance(r, (list, tuple)) or len(r) != 2:
+ raise ValueError(
+ f"Target '{target['name']}' target_range must be [min, max]"
+ )
+ if r[0] >= r[1]:
+ raise ValueError(
+ f"Target '{target['name']}' target_range must have min < max"
+ )
+ elif target_type == 'scalar':
+ if 'expression' not in target:
+ raise ValueError(
+ f"Scalar target '{target['name']}' must have 'expression'"
+ )
+ if 'target_value' not in target and 'target_range' not in target:
+ raise ValueError(
+ f"Scalar target '{target['name']}' must have 'target_value' or 'target_range'"
+ )
+ if 'target_range' in target:
+ r = target['target_range']
+ if not isinstance(r, (list, tuple)) or len(r) != 2:
+ raise ValueError(
+ f"Target '{target['name']}' target_range must be [min, max]"
+ )
+ if r[0] >= r[1]:
+ raise ValueError(
+ f"Target '{target['name']}' target_range must have min < max"
+ )
+ else:
+ raise ValueError(f"Unknown target type: {target_type}")
+ # Validate optimization section
+ if 'algorithm' not in self.config['optimization']:
+ raise ValueError("optimization.algorithm is required")
+
+ # Validate objective section (norm is required)
+ if not isinstance(self.config.get('objective'), dict):
+ raise ValueError(
+ "objective section is required and must be a mapping. "
+ "Example:\n objective:\n norm: L1\n"
+ "Options for norm: L1 = sum of absolute relative errors; L2 = Euclidean norm of the error vector."
+ )
+ if 'norm' not in self.config['objective']:
+ raise ValueError(
+ "objective.norm is required"
+ )
+
+ def get_model_config_file(self) -> str:
+ """Get path to sv0D.json model configuration file."""
+ return self.config['model']['config_file']
+
+ def get_parameters(self) -> List[Dict]:
+ """Get list of parameters to optimize."""
+ return self.config['parameters']
+
+ def get_targets(self) -> List[Dict]:
+ """Get list of targets."""
+ return self.config['targets']
+
+ def get_objective_config(self) -> Dict:
+ """Get objective function configuration (e.g. norm: 'L1' or 'L2')."""
+ return self.config['objective']
+
+ def get_optimization_config(self) -> Dict:
+ """Get optimization config. Passed directly to optimizer"""
+ return dict(self.config['optimization'])
+
+ def get_output_config(self) -> Dict:
+ """Get output configuration."""
+ return self.config.get('output', {
+ 'directory': 'optimization_results',
+ 'save_history': True,
+ 'save_plots': True,
+ 'save_final_config': True
+ })
+
+ def get_config(self) -> Dict:
+ """Get full configuration dictionary."""
+ return self.config
diff --git a/applications/svZeroDTuner/svzerodtuner/expression_handler.py b/applications/svZeroDTuner/svzerodtuner/expression_handler.py
new file mode 100644
index 000000000..cdfeaeb1b
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/expression_handler.py
@@ -0,0 +1,155 @@
+"""
+Shared expression evaluation for targets and QoIs.
+Expressions reference simulation output names (e.g. Vc:LV, pressure:AV:AR_SYS)
+and are evaluated with numpy (np) and output_arrays["output_name"] -> array.
+"""
+
+import numpy as np
+from typing import Callable, Dict, List, Literal, Tuple, Union
+
+
+class Expression:
+ """
+ Represents an expression that references simulation outputs (e.g. np.max(Vc:LV)).
+ Each target or QoI has an Expression object with an evaluate method.
+ """
+
+ def __init__(
+ self,
+ expression_str: str,
+ kind: Literal["scalar", "time_series"] = "scalar",
+ ):
+ """
+ Args:
+ expression_str: The expression string (e.g. np.max(Vc:LV)).
+ kind: "scalar" returns float; "time_series" returns (values_array, times_array).
+ """
+ self.expression_str = expression_str
+ self.kind = kind
+ # Cache: frozenset(available_outputs) -> (output_names, eval_func)
+ self._compile_cache: Dict[frozenset, Tuple[List[str], Callable]] = {}
+
+ def __getstate__(self):
+ """
+ Make Expression pickle-safe for multiprocessing by dropping compiled callables.
+ They are recreated lazily on first evaluate() in each worker process.
+ """
+ state = self.__dict__.copy()
+ state["_compile_cache"] = {}
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ if "_compile_cache" not in self.__dict__:
+ self._compile_cache = {}
+
+ def output_names(self, available_outputs: List[str]) -> List[str]:
+ """Return output names referenced in this expression (for extracting data)."""
+ return [
+ out_name
+ for out_name in available_outputs
+ if out_name in self.expression_str
+ ]
+
+ def _get_compiled(
+ self, available_outputs: List[str]
+ ) -> Tuple[List[str], Callable[[Dict[str, np.ndarray]], object]]:
+ """
+ Return (output_names, eval_func) for this expression and available_outputs.
+ Eval func takes output_arrays and returns the expression result.
+ Compilation is cached per set of available_outputs.
+ """
+ key = frozenset(available_outputs)
+ if key not in self._compile_cache:
+ output_names = [
+ o for o in available_outputs if o in self.expression_str
+ ]
+ eval_str = self.expression_str
+ for out_name in sorted(output_names, key=len, reverse=True):
+ eval_str = eval_str.replace(
+ out_name, f'output_arrays["{out_name}"]'
+ )
+ namespace: Dict[str, object] = {"np": np}
+ code = compile(
+ f"def _eval(output_arrays):\n return {eval_str}",
+ "",
+ "exec",
+ )
+ exec(code, namespace)
+ self._compile_cache[key] = (output_names, namespace["_eval"]) # type: ignore[index]
+ return self._compile_cache[key]
+
+ def evaluate(
+ self,
+ unique_outputs: Dict[str, Dict],
+ available_outputs: List[str],
+ ) -> Union[float, Tuple[np.ndarray, np.ndarray]]:
+ """
+ Evaluate the expression with the given outputs.
+ Returns float for scalar, (values_array, times_array) for time_series.
+ """
+ if self.kind == "scalar":
+ return self._evaluate_scalar(unique_outputs, available_outputs)
+ else:
+ return self._evaluate_time_series(unique_outputs, available_outputs)
+
+ def _build_output_arrays(
+ self,
+ unique_outputs: Dict[str, Dict],
+ output_names: List[str],
+ ) -> Dict[str, np.ndarray]:
+ """Build dict mapping output name -> array for the given output names."""
+ output_arrays: Dict[str, np.ndarray] = {}
+ for out_name in output_names:
+ if out_name not in unique_outputs:
+ continue
+ data = unique_outputs[out_name]
+ arr = data.get("time_series", data.get("value"))
+ if isinstance(arr, (int, float)):
+ arr = np.array([arr])
+ output_arrays[out_name] = np.asarray(arr)
+ return output_arrays
+
+ def _evaluate_scalar(
+ self,
+ unique_outputs: Dict[str, Dict],
+ available_outputs: List[str],
+ ) -> float:
+ output_names, eval_func = self._get_compiled(available_outputs)
+ output_arrays = self._build_output_arrays(unique_outputs, output_names)
+ try:
+ result = eval_func(output_arrays)
+ except Exception as e:
+ raise ValueError(f"Expression evaluation failed: {e}") from e
+ arr = np.asarray(result)
+ if arr.size != 1:
+ raise ValueError(
+ f"Scalar expression must yield a single value, "
+ f"got shape {arr.shape} (size {arr.size})"
+ )
+ return float(arr.item())
+
+ def _evaluate_time_series(
+ self,
+ unique_outputs: Dict[str, Dict],
+ available_outputs: List[str],
+ ) -> Tuple[np.ndarray, np.ndarray]:
+ output_names, eval_func = self._get_compiled(available_outputs)
+ output_arrays = self._build_output_arrays(unique_outputs, output_names)
+ try:
+ result = eval_func(output_arrays)
+ except Exception as e:
+ raise ValueError(f"Expression evaluation failed: {e}") from e
+ arr = np.asarray(result)
+ if arr.ndim != 1:
+ raise ValueError(
+ f"Time series expression must return 1D array, got shape {arr.shape}"
+ )
+ times = None
+ for out_name in output_arrays:
+ if out_name in unique_outputs and "times" in unique_outputs[out_name]:
+ times = np.asarray(unique_outputs[out_name]["times"])
+ break
+ if times is None or len(times) != len(arr):
+ times = np.arange(len(arr))
+ return arr, times
diff --git a/applications/svZeroDTuner/svzerodtuner/objective.py b/applications/svZeroDTuner/svzerodtuner/objective.py
new file mode 100644
index 000000000..8259fb39b
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/objective.py
@@ -0,0 +1,278 @@
+"""
+Objective function for sv0D Tuning Framework.
+"""
+
+import numpy as np
+from typing import Dict, List, Optional, Union, Tuple
+from scipy.interpolate import interp1d
+
+
+def _parse_percent(x) -> Optional[float]:
+ """Parse percent: 5, '5%' -> 0.05. Returns None if not a percent form."""
+ if x is None:
+ return None
+ if isinstance(x, (int, float)):
+ return float(x) / 100.0
+ if isinstance(x, str) and x.strip().endswith('%'):
+ try:
+ return float(x.strip()[:-1]) / 100.0
+ except ValueError:
+ return None
+ return None
+
+
+def _compute_range(target: Dict) -> Tuple[np.ndarray, np.ndarray]:
+ """
+ Convert user spec to (lo, hi) range. Internally we only keep range.
+ User can provide:
+ - single value
+ - value + relative_bounds (percent)
+ - target_range [min, max]
+ Legacy alias: uncertainty
+ """
+ def _ordered_bounds(lo: np.ndarray, hi: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
+ lo_arr = np.asarray(lo, dtype=float)
+ hi_arr = np.asarray(hi, dtype=float)
+ return np.minimum(lo_arr, hi_arr), np.maximum(lo_arr, hi_arr)
+
+ relative_bounds = target.get('relative_bounds', target.get('uncertainty'))
+ if 'target_file' in target:
+ # Time series
+ t = np.asarray(target['target_values'])
+
+ # A target_range is provided
+ if 'target_range' in target:
+ lo_val, hi_val = float(target['target_range'][0]), float(target['target_range'][1])
+ return (np.full(len(t), lo_val), np.full(len(t), hi_val))
+
+ # Relative bounds are provided as percent or [min, max]
+ if relative_bounds is not None:
+ pct = _parse_percent(relative_bounds)
+ if pct is not None:
+ return _ordered_bounds(t * (1.0 - pct), t * (1.0 + pct))
+ if isinstance(relative_bounds, (list, tuple)) and len(relative_bounds) == 2:
+ lo_val, hi_val = float(relative_bounds[0]), float(relative_bounds[1])
+ return (np.full(len(t), lo_val), np.full(len(t), hi_val))
+
+ # No target_range or relative_bounds, so point target
+ return (t.copy(), t.copy()) # point target
+ else:
+ # Scalar
+
+ # A target_range is provided
+ if 'target_range' in target:
+ lo_val, hi_val = float(target['target_range'][0]), float(target['target_range'][1])
+ return (np.array([lo_val]), np.array([hi_val]))
+
+ v = float(target['target_value'])
+
+ # Relative bounds are provided as percent or [min, max]
+ if relative_bounds is not None:
+ pct = _parse_percent(relative_bounds)
+ if pct is not None:
+ return _ordered_bounds(
+ np.array([v * (1.0 - pct)]),
+ np.array([v * (1.0 + pct)]),
+ )
+ if isinstance(relative_bounds, (list, tuple)) and len(relative_bounds) == 2:
+ return (np.array([float(relative_bounds[0])]), np.array([float(relative_bounds[1])]))
+
+ # No target_range or relative_bounds, so point target
+ return (np.array([v]), np.array([v]))
+
+
+def _interpolate_to_target_times(
+ sim_times: np.ndarray,
+ sim_values: np.ndarray,
+ target_times: np.ndarray
+) -> np.ndarray:
+ """Interpolate simulated values to target time points."""
+ valid_mask = np.isfinite(sim_values) & np.isfinite(sim_times)
+ if np.sum(valid_mask) < 2:
+ return np.full_like(target_times, np.nan)
+ interp_func = interp1d(
+ sim_times[valid_mask],
+ sim_values[valid_mask],
+ kind='linear',
+ bounds_error=False,
+ fill_value='extrapolate'
+ )
+ return interp_func(target_times)
+
+
+def _rel_errors_outside_range(
+ sim_values: np.ndarray,
+ lo: np.ndarray,
+ hi: np.ndarray,
+) -> np.ndarray:
+ """
+ Relative errors for values outside the target range [lo, hi].
+ In-range values contribute zero; outside the range we use residual (distance to nearest bound).
+ Works for both time series (length N) and scalar targets (length 1). For
+ time series, each time point is treated as an individual scalar target.
+ - if in range: residual = 0
+ - if below lo: residual = lo - sim_value
+ - if above hi: residual = sim_value - hi
+
+ Residuals are then normalized by the midpoint.
+ Returns: array of |residual_n| / |midpoint_n| per point, where midpoint_n = (lo_n + hi_n) / 2.
+ """
+ sim_values = np.asarray(sim_values)
+ lo = np.asarray(lo)
+ hi = np.asarray(hi)
+ below = sim_values < lo
+ above = sim_values > hi
+ residual = np.zeros_like(sim_values, dtype=float)
+ residual[below] = lo[below] - sim_values[below]
+ residual[above] = sim_values[above] - hi[above]
+ abs_residual = np.abs(residual)
+ midpoint = (lo + hi) / 2.0
+ abs_midpoint = np.maximum(np.abs(midpoint), 1e-14)
+ return abs_residual / abs_midpoint
+
+
+class ObjectiveFunction:
+ """
+ Objective function for optimization. Computes weighted error between
+ simulated values and targets. Supports scalars and time series.
+
+ Internally each target is stored as a range [lo, hi]. User can specify:
+ - Single value (target_value or target_file): range = [v, v]
+ - Value + relative_bounds percent: range = value * (1 ± pct)
+ - target_range [min, max] directly
+ Error is zero within the range; outside, penalty by distance from nearest bound.
+ """
+
+ def __init__(self, targets: List[Dict], norm: str):
+ """
+ Initialize objective function.
+
+ Args:
+ targets: List of target specifications
+ norm: 'L1' for sum of absolute errors, 'L2' for Euclidean norm of error vector
+ """
+ if norm not in ("L1", "L2"):
+ raise ValueError(
+ f"norm is required and must be 'L1' or 'L2', got {norm!r}. "
+ "Options: L1 = sum of absolute relative errors; L2 = Euclidean norm of the error vector. "
+ "Specify in your tuning YAML under objective: { norm: L1 } or { norm: L2 }."
+ )
+ self.targets = targets
+ self.norm = norm
+ self._process_targets()
+
+ def _process_targets(self):
+ """Process targets: load data and compute range [lo, hi] for each."""
+ for target in self.targets:
+ if 'weight' not in target:
+ target['weight'] = 1.0
+ if 'target_file' in target:
+ import pandas as pd
+ df = pd.read_csv(target['target_file'])
+ if 'time' not in df.columns or 'value' not in df.columns:
+ raise ValueError(f"target_file must have 'time' and 'value' columns: {target['target_file']}")
+ target['target_times'] = df['time'].values
+ target['target_values'] = df['value'].values
+ elif 'target_value' in target:
+ target['target_value'] = float(target['target_value'])
+ elif 'target_range' not in target:
+ raise ValueError(f"Target must have 'target_file', 'target_value', or 'target_range': {target}")
+ lo, hi = _compute_range(target)
+ target['range_lo'] = lo
+ target['range_hi'] = hi
+
+ def _get_simulated_value(
+ self,
+ target: Dict,
+ simulated_values: Dict[str, Union[np.ndarray, float]]
+ ) -> np.ndarray:
+ """Look up and return simulated value for a target as numpy array."""
+ name = target['name']
+ if name not in simulated_values:
+ raise ValueError(f"Simulated value for '{name}' not found")
+ sim_value = simulated_values[name]
+ if not isinstance(sim_value, np.ndarray):
+ sim_value = np.array([sim_value]) if np.isscalar(sim_value) else np.array(sim_value)
+ return np.asarray(sim_value)
+
+ def _error_for_time_series(
+ self,
+ target: Dict,
+ sim_value: np.ndarray,
+ simulated_values: Dict
+ ) -> np.ndarray:
+ """Return array of relative errors, one per time point."""
+ name = target['name']
+ target_times = np.array(target['target_times'])
+ sim_times = simulated_values.get(f'{name}_times')
+ if sim_times is None:
+ sim_times = np.linspace(0, 1, len(sim_value))
+ sim_times = np.array(sim_times)
+
+ if len(sim_times) != len(sim_value):
+ raise ValueError(
+ f"Time and value arrays have different lengths for {name}: "
+ f"{len(sim_times)} vs {len(sim_value)}"
+ )
+
+ if len(sim_times) < 2 or len(target_times) == 0:
+ return np.array([1e10])
+
+ sim_interp = _interpolate_to_target_times(sim_times, sim_value, target_times)
+ if not np.all(np.isfinite(sim_interp)):
+ return np.array([1e10])
+
+ return _rel_errors_outside_range(
+ sim_interp, target['range_lo'], target['range_hi']
+ )
+
+ def _errors_for_scalar(self, target: Dict, sim_value: np.ndarray) -> np.ndarray:
+ """Return array of one relative error for a scalar target."""
+ sim_scalar = float(sim_value.item() if sim_value.size == 1 else sim_value.flat[0])
+ return _rel_errors_outside_range(
+ np.array([sim_scalar]), target['range_lo'], target['range_hi']
+ )
+
+ def compute(self, simulated_values: Dict[str, Union[np.ndarray, float]]) -> float:
+ """
+ Compute objective function value. Collects weighted relative errors from all
+ targets (each time series point or scalar contributes one error value), then returns
+ L1 norm (sum of absolute errors) or L2 norm (Euclidean norm) of that vector.
+
+ Args:
+ simulated_values: Dictionary mapping output names to simulated values
+
+ Returns:
+ Total error: L1 or L2 norm of the weighted relative-error vector
+ """
+ error_chunks: List[np.ndarray] = []
+ for target in self.targets:
+ sim_value = self._get_simulated_value(target, simulated_values)
+ weight = float(target['weight'])
+ target_type = target['type']
+
+ if target_type == 'time_series':
+ errors = self._error_for_time_series(target, sim_value, simulated_values)
+ else:
+ errors = self._errors_for_scalar(target, sim_value)
+
+ error_chunks.append(weight * errors)
+ all_errors = np.concatenate(error_chunks) if error_chunks else np.array([])
+
+ if self.norm == "L1":
+ ord = 1
+ elif self.norm == "L2":
+ ord = 2
+ else:
+ raise ValueError(f"norm must be 'L1' or 'L2', got {self.norm!r}")
+ return float(np.linalg.norm(all_errors, ord=ord))
+
+
+def create_objective(targets: List[Dict], **kwargs) -> ObjectiveFunction:
+ """
+ Create objective function object.
+ Targets with 'relative_bounds' (or legacy 'uncertainty') or target_range use
+ range-based penalty.
+ """
+ norm = kwargs.pop("norm")
+ return ObjectiveFunction(targets=targets, norm=norm, **kwargs)
diff --git a/applications/svZeroDTuner/svzerodtuner/optimizer.py b/applications/svZeroDTuner/svzerodtuner/optimizer.py
new file mode 100644
index 000000000..ebb83fe15
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/optimizer.py
@@ -0,0 +1,352 @@
+"""
+Optimizer wrappers for sv0D Tuning Framework
+
+Supports scipy optimizers and parallelization.
+"""
+
+import numpy as np
+from typing import Callable, List, Tuple, Dict, Optional, Any
+from scipy.optimize import minimize, differential_evolution, OptimizeResult
+
+
+# Small tolerance for "objective hits zero" (floating-point safety)
+_OBJECTIVE_ZERO_TOL = 1e-12
+
+# Tolerance for "close to bound" warning: within this fraction of the range from a bound
+_BOUND_TOLERANCE = 0.01
+
+
+class _ScaledObjective:
+ """Pickle-safe objective wrapper for scaled optimizer-space parameters."""
+
+ def __init__(
+ self,
+ objective_func: Callable[[np.ndarray], float],
+ to_phys: Callable[[np.ndarray], np.ndarray],
+ ):
+ self.objective_func = objective_func
+ self.to_phys = to_phys
+
+ def __call__(self, x_opt: np.ndarray) -> float:
+ return self.objective_func(self.to_phys(np.asarray(x_opt)))
+
+
+def _check_params_near_bounds(
+ params: Dict[str, float], parameters: List[Dict]
+) -> List[tuple]:
+ """Return (name, value, bound_type, bound_value) for params near bounds."""
+ near_bounds = []
+ for p in parameters:
+ name = p['name']
+ if name not in params:
+ continue
+ bounds = p.get('bounds')
+ if not bounds or len(bounds) != 2:
+ continue
+ lower, upper = float(bounds[0]), float(bounds[1])
+ value = float(params[name])
+ range_val = upper - lower
+ if range_val <= 0:
+ continue
+ tol = _BOUND_TOLERANCE * range_val
+ if value <= lower + tol:
+ near_bounds.append((name, value, 'min', lower))
+ elif value >= upper - tol:
+ near_bounds.append((name, value, 'max', upper))
+ return near_bounds
+
+
+class ObjectiveReachedZero(Exception):
+ """Raised when optimization terminates because objective reached zero (Nelder-Mead)."""
+ pass
+
+
+def _coerce_numeric_options(options: Dict) -> Dict:
+ """Convert string values that look like numbers to int/float (YAML can load 1e-6, 1e-2 as str)."""
+ def coerce(v):
+ if isinstance(v, str):
+ try:
+ f = float(v)
+ return int(f) if f.is_integer() else f
+ except ValueError:
+ return v
+ if isinstance(v, (list, tuple)):
+ return type(v)(coerce(x) for x in v)
+ return v
+
+ return {k: coerce(v) for k, v in options.items()}
+
+
+class OptimizerWrapper:
+ """
+ Wrapper around optimization algorithms with support for parallelization.
+ """
+
+ SUPPORTED_ALGORITHMS = {'differential_evolution', 'Nelder-Mead'}
+
+ def __init__(self, algorithm: str = "differential_evolution", **options):
+ """
+ Initialize optimizer wrapper.
+
+ Args:
+ algorithm: Optimization algorithm name ('differential_evolution' or 'Nelder-Mead')
+ **options: All other options passed directly to the scipy optimization function.
+ Use scipy's exact parameter names. Invalid options will raise from scipy.
+ """
+ self.algorithm = str(algorithm)
+ self.options = dict(options)
+ self.terminate_at_zero = self.options.pop('terminate_at_zero', True)
+ self._validate_config()
+ self.history = []
+ self.best_value = None
+ self.best_params = None
+
+ def _validate_config(self):
+ if self.algorithm not in self.SUPPORTED_ALGORITHMS:
+ raise ValueError(
+ f"Algorithm '{self.algorithm}' is not supported. "
+ f"Supported: {', '.join(sorted(self.SUPPORTED_ALGORITHMS))}"
+ )
+
+ def _validate_optimization_inputs(
+ self,
+ bounds: List[Tuple[float, float]],
+ x0: Optional[np.ndarray],
+ param_names: Optional[List[str]] = None
+ ):
+ """
+ Validate optimization problem inputs.
+
+ Args:
+ bounds: Parameter bounds
+ x0: Initial guess
+ param_names: Parameter names (optional, for clearer error messages)
+
+ Raises:
+ ValueError: If inputs are invalid
+ """
+ # Check bounds if provided
+ if bounds:
+ for i, (lower, upper) in enumerate(bounds):
+ if lower >= upper:
+ raise ValueError(
+ f"Invalid bounds for parameter {i}: lower bound ({lower}) "
+ f"must be less than upper bound ({upper})"
+ )
+
+ if self.algorithm == 'Nelder-Mead' and x0 is None:
+ print("Note: Using center of bounds as initial guess for Nelder-Mead.")
+
+ # If x0 is provided, validate it
+ if x0 is not None:
+ if len(x0) != len(bounds):
+ raise ValueError(
+ f"Initial guess x0 has {len(x0)} elements, "
+ f"but {len(bounds)} parameters specified"
+ )
+
+ # Check if x0 is within bounds - raise error if any initial value is outside
+ for i, (x_val, (lower, upper)) in enumerate(zip(x0, bounds)):
+ if x_val < lower or x_val > upper:
+ name = param_names[i] if param_names and i < len(param_names) else f"parameter {i}"
+ bound_type = "min" if x_val < lower else "max"
+ bound_val = lower if x_val < lower else upper
+ raise ValueError(
+ f"Initial value for {name} ({x_val}) is outside its bounds: "
+ f"violates {bound_type} bound ({bound_val}). "
+ f"Bounds are [{lower}, {upper}]. "
+ f"Please update the model configuration or tuning bounds."
+ )
+
+ def master_callback(
+ self,
+ intermediate_result: OptimizeResult,
+ param_names: List[str],
+ parameters: Optional[List[Dict]] = None,
+ ) -> Optional[bool]:
+ """
+ Callback function for optimization algorithms. Update history, best, print progress, warn near bounds.
+ Checks if objective reached zero and stops optimization if necessary (returns True for DE, or raises ObjectiveReachedZero for NM, otherwise None).
+ """
+ # Extract parameters and objective value from OptimizeResult
+ x = np.asarray(intermediate_result.x)
+ fun = float(intermediate_result.fun)
+
+ # Convert x to physical space if scaling is used
+ if self._use_scaling:
+ x = self._scale_to_phys(x)
+
+ params_dict = dict(zip(param_names, x.tolist()))
+
+ # Update history and best parameters
+ history_entry = {
+ 'iteration': len(self.history),
+ 'objective': fun,
+ 'parameters': params_dict,
+ }
+ self.history.append(history_entry)
+ if self.best_value is None or fun < self.best_value:
+ self.best_value = fun
+ self.best_params = x.copy()
+
+ # Print progress
+ step = len(self.history) - 1
+ print(f"Iteration {step:3d}: Objective = {fun:.6e}", end="")
+ if len(params_dict) <= 3:
+ param_str = ", ".join([f"{name}={val:.3e}" for name, val in params_dict.items()])
+ print(f" | {param_str}")
+ else:
+ print()
+
+ # Warn if any parameter is close to its bounds
+ if parameters:
+ for name, value, bound_type, bound_value in _check_params_near_bounds(params_dict, parameters):
+ print(f" WARNING: {name}={value:.3e} is near {bound_type} bound ({bound_value:.3e})")
+
+ # If terminating optimization at zero objective, return True for DE, raise ObjectiveReachedZero for NM, otherwise None.
+ if self.terminate_at_zero and fun <= _OBJECTIVE_ZERO_TOL:
+ if self.algorithm == "differential_evolution":
+ return True
+ raise ObjectiveReachedZero("Objective reached zero; stopping optimization")
+ return None
+
+ def optimize(
+ self,
+ objective_func: Callable,
+ param_names: List[str],
+ bounds: List[Tuple[float, float]],
+ x0: Optional[np.ndarray] = None,
+ parameters: Optional[List[Dict]] = None,
+ param_scaling_to_opt_space: Optional[Callable[[np.ndarray], np.ndarray]] = None,
+ param_scaling_to_phys_space: Optional[Callable[[np.ndarray], np.ndarray]] = None,
+ ) -> OptimizeResult:
+ """
+ Run optimization.
+
+ Bounds and x0 are in physical space. If scaling callables are
+ provided, the optimizer works in scaled/optimizer space and returns result.x and
+ get_best() in physical space.
+
+ Args:
+ objective_func: Objective function that takes physical-space parameter
+ array and returns scalar.
+ param_names: List of parameter names.
+ bounds: List of (min, max) tuples in physical space.
+ x0: Initial guess in physical space (optional).
+ parameters: Optional list of param dicts with 'name' and 'bounds'
+ (used for near-bounds warnings).
+ param_scaling_to_opt_space: Optional; convert physical -> scaled/optimizer space.
+ param_scaling_to_phys_space: Optional; convert scaled/optimizer -> physical space.
+
+ Returns:
+ OptimizeResult with result.x in physical space.
+ """
+ self.history = []
+ self.best_value = None
+ self.best_params = None
+ self._validate_optimization_inputs(bounds, x0, param_names)
+
+ # Ensure numeric optimization options are numeric types.
+ opts = _coerce_numeric_options(self.options)
+
+ # Callback function supporting both SciPy styles:
+ # - callback(intermediate_result=OptimizeResult)
+ # - callback(x, convergence=...)
+ def callback(*args, **kwargs):
+ if "intermediate_result" in kwargs:
+ return self.master_callback(
+ kwargs["intermediate_result"], param_names, parameters
+ )
+
+ if len(args) == 1 and isinstance(args[0], OptimizeResult):
+ return self.master_callback(args[0], param_names, parameters)
+
+ x = None
+ if len(args) >= 1:
+ x = np.asarray(args[0])
+ elif "xk" in kwargs:
+ x = np.asarray(kwargs["xk"])
+ elif "x" in kwargs:
+ x = np.asarray(kwargs["x"])
+
+ if x is None:
+ return None
+
+ # Legacy DE callback doesn't provide objective value; evaluate it here.
+ x_phys = self._scale_to_phys(x) if self._use_scaling else x
+ fun = float(objective_func(x_phys))
+ return self.master_callback(
+ OptimizeResult(x=x, fun=fun), param_names, parameters
+ )
+
+ # Use center of bounds as initial guess if no initial guess is provided
+ if x0 is None:
+ x0 = np.array([(b[0] + b[1]) / 2 for b in bounds])
+
+ # Handle scaling of parameter values
+ self._use_scaling = (
+ param_scaling_to_opt_space is not None
+ and param_scaling_to_phys_space is not None
+ )
+ if self._use_scaling:
+ self._scale_to_opt = param_scaling_to_opt_space
+ self._scale_to_phys = param_scaling_to_phys_space
+
+ # Scale bounds
+ bounds_arr = np.array(bounds)
+ bounds = list(zip(
+ self._scale_to_opt(bounds_arr[:, 0]),
+ self._scale_to_opt(bounds_arr[:, 1]),
+ ))
+
+ # Scale initial guess
+ x0 = self._scale_to_opt(np.asarray(x0))
+
+ # Objective function in optimizer space (module-level callable for multiprocessing compatibility)
+ obj_fun = _ScaledObjective(objective_func, self._scale_to_phys)
+ else:
+ obj_fun = objective_func
+
+ if self.algorithm == "differential_evolution":
+ opts['callback'] = callback
+ result = differential_evolution(obj_fun, bounds=bounds, **opts)
+ elif self.algorithm == "Nelder-Mead":
+ try:
+ result = minimize(
+ obj_fun, x0=x0, method='Nelder-Mead',
+ bounds=bounds, options=opts, callback=callback
+ )
+ except ObjectiveReachedZero:
+ # Terminated early because objective reached zero
+ result = OptimizeResult(
+ # best_params is in physical space from callback, so convert to optimizer space
+ # to be consistent with the optimizer's returned result
+ x=self._scale_to_opt(self.best_params) if self._use_scaling else self.best_params,
+ success=True,
+ fun=self.best_value,
+ message="Optimization terminated: objective reached zero",
+ nfev=len(self.history),
+ )
+
+ # Use the optimizer's returned result for best value/params (not best seen during optimization)
+ if hasattr(result, 'x') and hasattr(result, 'fun'):
+ if self._use_scaling:
+ result.x = self._scale_to_phys(np.asarray(result.x))
+ self.best_params = result.x
+ self.best_value = result.fun
+
+ return result
+
+ def get_history(self) -> List[Dict]:
+ """Get optimization history."""
+ return self.history
+
+ def get_best(self) -> Tuple[float, np.ndarray]:
+ """Get best objective value and parameters."""
+ return self.best_value, self.best_params
+
+ @classmethod
+ def print_supported_algorithms(cls):
+ """Print supported algorithms."""
+ print("Supported algorithms:", ", ".join(sorted(cls.SUPPORTED_ALGORITHMS)))
+ print("Use scipy's exact parameter names in YAML; invalid options will raise from scipy.")
diff --git a/applications/svZeroDTuner/svzerodtuner/output_extractor.py b/applications/svZeroDTuner/svzerodtuner/output_extractor.py
new file mode 100644
index 000000000..5836253e3
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/output_extractor.py
@@ -0,0 +1,118 @@
+"""
+Output Extractor for sv0D Tuning Framework
+
+Extracts outputs from sv0D simulation results using exact variable names from results.csv.
+Supports time_series, min, max, and mean extraction types.
+"""
+
+import numpy as np
+import pandas as pd
+from typing import Union, Literal, Optional
+import pysvzerod
+
+
+class OutputExtractor:
+ """
+ Extracts outputs from sv0D simulation results.
+
+ Uses exact variable names from sv0D results (e.g., "flow:AR_SYS:J0").
+ Supports extraction of time series or scalar metrics (min, max, mean).
+ """
+
+ def __init__(self, solver: pysvzerod.Solver):
+ """
+ Initialize output extractor with a solver instance.
+
+ Args:
+ solver: pysvzerod.Solver instance (must have run() called)
+ """
+ self.solver = solver
+ self._result_df = None
+ self._times = None
+
+ def _ensure_results(self):
+ """Ensure simulation has been run and results are available."""
+ if self._result_df is None:
+ self._result_df = self.solver.get_full_result()
+ self._times = self.solver.get_times()
+
+ def extract(
+ self,
+ output_name: str,
+ extraction_type: Literal["time_series", "min", "max", "mean"] = "time_series"
+ ) -> Union[np.ndarray, float]:
+ """
+ Extract output from simulation results.
+
+ Args:
+ output_name: Exact output name from sv0D results (e.g., "flow:AR_SYS:J0")
+ extraction_type: Type of extraction:
+ - "time_series": Return full time series array
+ - "min": Return minimum value
+ - "max": Return maximum value
+ - "mean": Return mean value
+
+ Returns:
+ Time series array (if extraction_type="time_series") or scalar value
+ """
+ self._ensure_results()
+
+ # Get the time series for this output
+ try:
+ time_series = self.solver.get_single_result(output_name)
+ except Exception as e:
+ raise ValueError(f"Output '{output_name}' not found in simulation results: {e}")
+
+ # Apply extraction type
+ if extraction_type == "time_series":
+ return np.array(time_series)
+ elif extraction_type == "min":
+ return float(np.min(time_series))
+ elif extraction_type == "max":
+ return float(np.max(time_series))
+ elif extraction_type == "mean":
+ return float(np.mean(time_series))
+ else:
+ raise ValueError(f"Unknown extraction_type: {extraction_type}")
+
+ def get_times(self) -> np.ndarray:
+ """
+ Get time array from simulation.
+
+ Returns:
+ Time array
+ """
+ self._ensure_results()
+ return np.array(self._times)
+
+ def get_all_output_names(self) -> list:
+ """
+ Get list of all available output names.
+
+ Returns:
+ List of output names
+ """
+ self._ensure_results()
+ return list(self._result_df['name'].unique())
+
+ def extract_multiple(
+ self,
+ outputs: list[dict]
+ ) -> dict:
+ """
+ Extract multiple outputs at once.
+
+ Args:
+ outputs: List of dicts with keys:
+ - name: Output name
+ - type: Extraction type (time_series, min, max, mean)
+
+ Returns:
+ Dictionary mapping output names to extracted values
+ """
+ results = {}
+ for output_spec in outputs:
+ name = output_spec['name']
+ extraction_type = output_spec.get('type', 'time_series')
+ results[name] = self.extract(name, extraction_type)
+ return results
diff --git a/applications/svZeroDTuner/svzerodtuner/parameter_handler.py b/applications/svZeroDTuner/svzerodtuner/parameter_handler.py
new file mode 100644
index 000000000..1468b9173
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/parameter_handler.py
@@ -0,0 +1,172 @@
+"""
+Parameter Handler for sv0D Tuning Framework
+
+Handles loading, parsing, and updating parameters in sv0D.json files using name-based lookup.
+"""
+
+import json
+import copy
+import numpy as np
+from typing import Dict, List, Tuple, Any, Optional
+
+
+class ParameterHandler:
+ """
+ Handles parameter loading, lookup, and updating in sv0D JSON configuration files.
+
+ Supports name-based parameter lookup (e.g., "LV.Emax", "AR_SYS.C")
+ for chambers, vessels, valves, and other structures.
+ """
+
+ def __init__(self, config_file: str):
+ """
+ Initialize parameter handler with a sv0D.json file.
+
+ Args:
+ config_file: Path to sv0D.json configuration file
+ """
+ self.config_file = config_file
+ self.config = self._load_config()
+ self.original_config = copy.deepcopy(self.config)
+
+ def _load_config(self) -> Dict:
+ """Load JSON configuration file."""
+ with open(self.config_file, 'r') as f:
+ return json.load(f)
+
+ def find_parameter(self, param_name: str) -> Tuple[Any, List[Any]]:
+ """
+ Find a parameter by name using name-based lookup.
+
+ Supports formats like:
+ - "LV.Emax" -> chambers[name="LV"].values.Emax
+ - "AR_SYS.C" -> vessels[name="AR_SYS"].zero_d_element_values.C
+ - "MV.Rmin" -> valves[name="MV"].params.Rmin
+
+ Args:
+ param_name: Parameter name in format "BlockName.ParameterName"
+
+ Returns:
+ Tuple of (value, path) where path is list of keys/indexes to reach the parameter
+
+ Raises:
+ ValueError: If parameter is not found
+ """
+ parts = param_name.split('.')
+ if len(parts) != 2:
+ raise ValueError(f"Parameter name must be in format 'BlockName.ParameterName', got '{param_name}'")
+
+ block_name, param_key = parts
+
+ # Try chambers first
+ if 'chambers' in self.config:
+ for i, chamber in enumerate(self.config['chambers']):
+ if chamber.get('name') == block_name:
+ if 'values' in chamber and param_key in chamber['values']:
+ return chamber['values'][param_key], ['chambers', i, 'values', param_key]
+
+ # Try vessels
+ if 'vessels' in self.config:
+ for i, vessel in enumerate(self.config['vessels']):
+ if vessel.get('vessel_name') == block_name:
+ # Check zero_d_element_values
+ if 'zero_d_element_values' in vessel:
+ if param_key in vessel['zero_d_element_values']:
+ return vessel['zero_d_element_values'][param_key], ['vessels', i, 'zero_d_element_values', param_key]
+ # Check other top-level vessel properties
+ if param_key in vessel:
+ return vessel[param_key], ['vessels', i, param_key]
+
+ # Try valves
+ if 'valves' in self.config:
+ for i, valve in enumerate(self.config['valves']):
+ if valve.get('name') == block_name:
+ if 'params' in valve and param_key in valve['params']:
+ return valve['params'][param_key], ['valves', i, 'params', param_key]
+
+ # Try boundary conditions
+ if 'boundary_conditions' in self.config:
+ for i, bc in enumerate(self.config['boundary_conditions']):
+ if bc.get('bc_name') == block_name:
+ if 'bc_values' in bc and param_key in bc['bc_values']:
+ return bc['bc_values'][param_key], ['boundary_conditions', i, 'bc_values', param_key]
+
+ # Try simulation parameters
+ if 'simulation_parameters' in self.config:
+ if block_name == 'simulation' and param_key in self.config['simulation_parameters']:
+ return self.config['simulation_parameters'][param_key], ['simulation_parameters', param_key]
+
+ # Try initial conditions
+ if 'initial_condition' in self.config:
+ if block_name == 'initial_condition' and param_key in self.config['initial_condition']:
+ return self.config['initial_condition'][param_key], ['initial_condition', param_key]
+
+ raise ValueError(f"Parameter '{param_name}' not found in configuration")
+
+ def set_parameter(self, param_name: str, value: Any) -> None:
+ """
+ Set a parameter value by name.
+
+ Args:
+ param_name: Parameter name in format "BlockName.ParameterName"
+ value: New parameter value (will be converted to Python scalar if numpy array)
+ """
+ # Convert numpy arrays/types to Python scalars
+ if isinstance(value, np.ndarray):
+ value = float(value.item())
+ elif isinstance(value, (np.integer, np.floating)):
+ value = float(value)
+
+ _, path = self.find_parameter(param_name)
+ target = self.config
+ for key in path[:-1]:
+ target = target[key]
+ target[path[-1]] = value
+
+ def get_parameter(self, param_name: str) -> Any:
+ """
+ Get a parameter value by name.
+
+ Args:
+ param_name: Parameter name in format "BlockName.ParameterName"
+
+ Returns:
+ Parameter value
+ """
+ value, _ = self.find_parameter(param_name)
+ return value
+
+ def _convert_numpy_types(self, obj: Any) -> Any:
+ """
+ Recursively convert numpy types to Python native types.
+
+ Args:
+ obj: Object to convert
+
+ Returns:
+ Object with numpy types converted to Python types
+ """
+ if isinstance(obj, np.ndarray):
+ return float(obj.item()) if obj.size == 1 else obj.tolist()
+ elif isinstance(obj, (np.integer, np.floating)):
+ return float(obj)
+ elif isinstance(obj, dict):
+ return {key: self._convert_numpy_types(value) for key, value in obj.items()}
+ elif isinstance(obj, list):
+ return [self._convert_numpy_types(item) for item in obj]
+ else:
+ return obj
+
+ def get_config(self) -> Dict:
+ """
+ Get the current configuration dictionary with all numpy types converted to Python types.
+
+ Returns:
+ Configuration dictionary
+ """
+ config_copy = copy.deepcopy(self.config)
+ return self._convert_numpy_types(config_copy)
+
+ def reset_to_original(self) -> None:
+ """Reset configuration to original loaded values."""
+ self.config = copy.deepcopy(self.original_config)
diff --git a/applications/svZeroDTuner/svzerodtuner/result_handler.py b/applications/svZeroDTuner/svzerodtuner/result_handler.py
new file mode 100644
index 000000000..de54d4c0b
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/result_handler.py
@@ -0,0 +1,332 @@
+"""
+Result Handler for sv0D Tuning Framework
+
+Handles storing optimization history, saving results, and generating reports.
+"""
+
+import json
+import os
+import numpy as np
+import pandas as pd
+from typing import List, Dict, Optional
+from .visualization import (
+ plot_objective_history,
+ plot_parameter_evolution,
+ plot_target_comparison,
+ plot_simulation_results,
+ create_optimization_report
+)
+
+
+# Tolerance for "close to bound" warning: within this fraction of the range from a bound
+_BOUND_TOLERANCE = 0.01
+
+
+def _check_params_near_bounds(
+ params: Dict[str, float], parameters: List[Dict]
+) -> List[tuple]:
+ """
+ Check which parameters are close to their bounds.
+
+ Returns:
+ List of (param_name, value, bound_type, bound_value) for params near bounds
+ """
+ near_bounds = []
+ for p in parameters:
+ name = p['name']
+ if name not in params:
+ continue
+ bounds = p.get('bounds')
+ if not bounds or len(bounds) != 2:
+ continue
+ lower, upper = float(bounds[0]), float(bounds[1])
+ value = float(params[name])
+ range_val = upper - lower
+ if range_val <= 0:
+ continue
+ tol = _BOUND_TOLERANCE * range_val
+ if value <= lower + tol:
+ near_bounds.append((name, value, 'min', lower))
+ elif value >= upper - tol:
+ near_bounds.append((name, value, 'max', upper))
+ return near_bounds
+
+
+class ResultHandler:
+ """
+ Handles storage and reporting of optimization results.
+ """
+
+ def __init__(self, output_dir: str, save_history: bool = True, save_plots: bool = True):
+ """
+ Initialize result handler.
+
+ Args:
+ output_dir: Output directory for results
+ save_history: Whether to save optimization history
+ save_plots: Whether to generate plots
+ """
+ self.output_dir = output_dir
+ self.save_history = save_history
+ self.save_plots = save_plots
+
+ os.makedirs(output_dir, exist_ok=True)
+
+ def save_history_json(self, history: List[Dict], filename: str = "history.json"):
+ """
+ Save optimization history to JSON file.
+
+ Args:
+ history: Optimization history list
+ filename: Output filename
+ """
+ if not self.save_history or not history:
+ if not history:
+ print("Note: No optimization history to save")
+ return
+
+ # Save history in optimization_history subfolder
+ history_dir = os.path.join(self.output_dir, 'optimization_history')
+ os.makedirs(history_dir, exist_ok=True)
+ filepath = os.path.join(history_dir, filename)
+
+ # Convert numpy arrays to lists for JSON serialization
+ history_serializable = []
+ for entry in history:
+ entry_copy = entry.copy()
+ if 'parameters' in entry_copy:
+ entry_copy['parameters'] = {
+ k: float(v) if isinstance(v, (np.number, np.ndarray)) else v
+ for k, v in entry_copy['parameters'].items()
+ }
+ history_serializable.append(entry_copy)
+
+ with open(filepath, 'w') as f:
+ json.dump(history_serializable, f, indent=2)
+
+ print(f"Saved optimization history to {filepath}")
+
+ def save_history_csv(self, history: List[Dict], filename: str = "history.csv"):
+ """
+ Save optimization history to CSV file.
+
+ Args:
+ history: Optimization history list
+ filename: Output filename
+ """
+ if not self.save_history or not history:
+ return
+
+ # Save history in optimization_history subfolder
+ history_dir = os.path.join(self.output_dir, 'optimization_history')
+ os.makedirs(history_dir, exist_ok=True)
+ filepath = os.path.join(history_dir, filename)
+
+ rows = []
+ for entry in history:
+ step = entry.get('iteration', len(rows))
+ row = {
+ 'iteration': step,
+ 'objective': entry['objective']
+ }
+ row.update(entry['parameters'])
+ rows.append(row)
+
+ df = pd.DataFrame(rows)
+ df.to_csv(filepath, index=False)
+
+ print(f"Saved optimization history CSV to {filepath}")
+
+ def save_final_parameters(
+ self,
+ param_names: List[str],
+ param_values: np.ndarray,
+ filename: str = "final_parameters.json"
+ ):
+ """
+ Save final optimized parameters.
+
+ Args:
+ param_names: Parameter names
+ param_values: Parameter values
+ filename: Output filename
+ """
+ filepath = os.path.join(self.output_dir, filename)
+
+ params_dict = {
+ name: float(value) for name, value in zip(param_names, param_values)
+ }
+
+ with open(filepath, 'w') as f:
+ json.dump(params_dict, f, indent=2)
+
+ print(f"Saved final parameters to {filepath}")
+
+ def save_final_config(
+ self,
+ config_dict: Dict,
+ filename: str = "optimized_config.json"
+ ):
+ """
+ Save final optimized sv0D configuration.
+
+ Args:
+ config_dict: sv0D configuration dictionary
+ filename: Output filename
+ """
+ filepath = os.path.join(self.output_dir, filename)
+
+ with open(filepath, 'w') as f:
+ json.dump(config_dict, f, indent=2)
+
+ print(f"Saved optimized configuration to {filepath}")
+
+ def generate_plots(
+ self,
+ history: List[Dict],
+ param_names: List[str],
+ targets: Optional[List[Dict]] = None,
+ simulated_values: Optional[Dict] = None,
+ optimized_results_df: Optional[pd.DataFrame] = None,
+ optimized_summary_df: Optional[pd.DataFrame] = None
+ ):
+ """
+ Generate optimization visualization plots.
+
+ Args:
+ history: Optimization history
+ param_names: Parameter names
+ targets: Target specifications (optional)
+ simulated_values: Simulated values dictionary (optional)
+ optimized_results_df: DataFrame with optimized simulation time series (optional)
+ optimized_summary_df: DataFrame with optimized simulation statistics (optional)
+ """
+ if not self.save_plots:
+ return
+
+ # Create subfolder for optimization history/evolution plots
+ history_dir = os.path.join(self.output_dir, 'optimization_history')
+ os.makedirs(history_dir, exist_ok=True)
+
+ if history:
+ # Plot objective history
+ plot_objective_history(
+ history,
+ output_file=os.path.join(history_dir, 'objective_history.png')
+ )
+
+ # Plot parameter evolution
+ if param_names:
+ plot_parameter_evolution(
+ history,
+ param_names,
+ output_file=os.path.join(history_dir, 'parameter_evolution.png')
+ )
+ else:
+ print("Note: Skipping history plots (no history available)")
+
+ # Plot target comparison if available
+ if targets and simulated_values:
+ plot_target_comparison(
+ targets,
+ simulated_values,
+ output_file=os.path.join(self.output_dir, 'target_comparison.png')
+ )
+
+ # Plot all simulation results if DataFrames are provided
+ if optimized_results_df is not None and optimized_summary_df is not None:
+ # Create subfolder for simulation results
+ sim_output_dir = os.path.join(self.output_dir, 'optimized_simulation')
+ os.makedirs(sim_output_dir, exist_ok=True)
+
+ # Save optimized results CSVs
+ optimized_results_df.to_csv(os.path.join(sim_output_dir, 'optimized_results.csv'), index=False)
+ optimized_summary_df.to_csv(os.path.join(sim_output_dir, 'optimized_summary.csv'), index=False)
+ print(f"Saved optimized results to: {sim_output_dir}/")
+
+ # Plot optimized results
+ plot_simulation_results(
+ optimized_results_df,
+ optimized_summary_df,
+ sim_output_dir,
+ title_prefix="Optimized"
+ )
+
+ def create_summary_report(
+ self,
+ history: List[Dict],
+ param_names: List[str],
+ best_value: float,
+ best_params: np.ndarray,
+ parameters: Optional[List[Dict]] = None,
+ targets: Optional[List[Dict]] = None,
+ simulated_values: Optional[Dict] = None,
+ optimized_results_df: Optional[pd.DataFrame] = None,
+ optimized_summary_df: Optional[pd.DataFrame] = None,
+ timing_info: Optional[Dict] = None
+ ):
+ """
+ Create comprehensive summary report.
+
+ Args:
+ history: Optimization history
+ param_names: Parameter names
+ best_value: Best objective value
+ best_params: Best parameter values
+ parameters: Parameter definitions with bounds (optional, for bound warnings)
+ targets: Target specifications (optional)
+ simulated_values: Simulated values dictionary (optional)
+ optimized_results_df: DataFrame with optimized simulation time series (optional)
+ optimized_summary_df: DataFrame with optimized simulation statistics (optional)
+ timing_info: Dictionary with timing statistics (optional)
+ """
+ # Save history
+ self.save_history_json(history)
+ self.save_history_csv(history)
+
+ # Save final parameters
+ self.save_final_parameters(param_names, best_params)
+
+ # Save timing information if provided
+ if timing_info:
+ history_dir = os.path.join(self.output_dir, 'optimization_history')
+ os.makedirs(history_dir, exist_ok=True)
+ timing_file = os.path.join(history_dir, 'timing_info.json')
+ with open(timing_file, 'w') as f:
+ json.dump(timing_info, f, indent=2)
+ print(f"Saved timing information to {timing_file}")
+
+ # Generate plots
+ self.generate_plots(
+ history,
+ param_names,
+ targets=targets,
+ simulated_values=simulated_values,
+ optimized_results_df=optimized_results_df,
+ optimized_summary_df=optimized_summary_df
+ )
+
+ # Print summary
+ print("\n" + "="*60)
+ print("OPTIMIZATION SUMMARY")
+ print("="*60)
+ if timing_info and 'n_iterations' in timing_info:
+ print(f"Total iterations: {timing_info['n_iterations']}")
+ n_ev = timing_info.get('n_evaluations')
+ print(f"Total function evaluations: {n_ev if n_ev is not None else 'not available (interrupted)'}")
+ else:
+ print(f"Total function evaluations: {len(history)}")
+ print(f"Best objective value: {best_value:.6e}")
+ print(f"\nBest parameters:")
+ for name, value in zip(param_names, best_params):
+ print(f" {name:<50} {value:.6e}")
+ # Warn if any best parameter is close to its bounds
+ if parameters:
+ near_bounds = _check_params_near_bounds(
+ dict(zip(param_names, best_params)), parameters
+ )
+ if near_bounds:
+ print("\nWARNING: The following best parameters are near their bounds:")
+ for name, value, bound_type, bound_value in near_bounds:
+ print(f" {name}={value:.6e} is near {bound_type} bound ({bound_value:.6e})")
+ print("="*60)
diff --git a/applications/svZeroDTuner/svzerodtuner/scaling.py b/applications/svZeroDTuner/svzerodtuner/scaling.py
new file mode 100644
index 000000000..ed85dd114
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/scaling.py
@@ -0,0 +1,124 @@
+"""
+Parameter scaling for optimization.
+
+Scaling objects convert between physical (model) space and optimizer space.
+E.g. log scaling: optimizer works with log(p); to_physical applies exp, to_opt applies log.
+"""
+
+import numpy as np
+from typing import List
+
+
+class ParameterScaling:
+ """
+ Converts a single parameter between physical space and optimizer space.
+ """
+
+ @property
+ def requires_positive(self) -> bool:
+ """If True, physical values and bounds must be strictly positive."""
+ return False
+
+ def to_physical(self, x_opt: float) -> float:
+ """Convert one value from optimizer space to physical space."""
+ raise NotImplementedError
+
+ def to_opt(self, x_physical: float) -> float:
+ """Convert one value from physical space to optimizer space."""
+ raise NotImplementedError
+
+
+class IdentityScaling(ParameterScaling):
+ """No transformation; physical and optimizer space are the same."""
+
+ def to_physical(self, x_opt: float) -> float:
+ return float(x_opt)
+
+ def to_opt(self, x_physical: float) -> float:
+ return float(x_physical)
+
+
+class LogScaling(ParameterScaling):
+ """Optimizer works with log(parameter); physical = exp(opt)."""
+
+ @property
+ def requires_positive(self) -> bool:
+ return True
+
+ def to_physical(self, x_opt: float) -> float:
+ return float(np.exp(x_opt))
+
+ def to_opt(self, x_physical: float) -> float:
+ return float(np.log(x_physical))
+
+
+class MaxScaling(ParameterScaling):
+ """Scale by the max of the bounds: optimizer space = physical / max(bounds)."""
+
+ def __init__(self, bounds: tuple):
+ lo, hi = float(bounds[0]), float(bounds[1])
+ self._max_bound = max(lo, hi)
+ if self._max_bound == 0:
+ raise ValueError("Max scaling requires non-zero bounds")
+
+ def to_physical(self, x_opt: float) -> float:
+ return float(x_opt * self._max_bound)
+
+ def to_opt(self, x_physical: float) -> float:
+ return float(x_physical / self._max_bound)
+
+
+def get_scaling(name: str, bounds: tuple = None) -> ParameterScaling:
+ """
+ Return a scaling instance from a config string.
+
+ Args:
+ name: Scaling name: 'identity', 'log', or 'max'. 'identity' or None = no transform.
+ bounds: Optional (lo, hi) for the parameter. Required when name is 'max'.
+
+ Returns:
+ A ParameterScaling instance.
+ """
+ if name == "identity" or name is None:
+ return IdentityScaling()
+ if name == "log":
+ return LogScaling()
+ if name == "max":
+ if bounds is None or len(bounds) != 2:
+ raise ValueError("Scaling 'max' requires bounds [lo, hi]")
+ return MaxScaling(tuple(bounds))
+ raise ValueError(f"Unknown scaling {name!r}; use 'identity', 'log', or 'max'")
+
+
+def to_physical_array(x_opt: np.ndarray, scalings: List[ParameterScaling]) -> np.ndarray:
+ """
+ Convert a full parameter vector from optimizer space to physical space.
+
+ Args:
+ x_opt: Parameter vector in optimizer space
+ scalings: List of ParameterScaling instances for each parameter
+
+ Returns:
+ Parameter vector in physical space
+ """
+ out = np.empty_like(x_opt, dtype=float)
+ for i, s in enumerate(scalings):
+ out[i] = s.to_physical(x_opt[i])
+ return out
+
+
+def to_opt_array(x_physical: np.ndarray, scalings: List[ParameterScaling]) -> np.ndarray:
+ """
+ Convert a full parameter vector from physical space to optimizer space.
+
+ Args:
+ x_physical: Parameter vector in physical space
+ scalings: List of ParameterScaling instances for each parameter
+
+ Returns:
+ Parameter vector in optimizer space
+ """
+ out = np.empty_like(x_physical, dtype=float)
+ for i, s in enumerate(scalings):
+ out[i] = s.to_opt(x_physical[i])
+ return out
diff --git a/applications/svZeroDTuner/svzerodtuner/sensitivity.py b/applications/svZeroDTuner/svzerodtuner/sensitivity.py
new file mode 100644
index 000000000..dd87e12d6
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/sensitivity.py
@@ -0,0 +1,634 @@
+"""
+Sensitivity Analysis for sv0D Tuning Framework
+
+Performs correlation-based sensitivity screening to understand how parameter
+variations affect quantities of interest.
+
+This implementation uses Sobol low-discrepancy sampling for parameter space
+exploration and reports screening metrics rather than true Sobol indices.
+The method provides screening-level sensitivity information suitable for
+identifying the most influential parameters.
+"""
+
+import numpy as np
+import pandas as pd
+from typing import Dict, List, Optional, Any, Callable
+from scipy.stats import qmc
+import json
+import os
+from pathlib import Path
+import yaml
+
+from .parameter_handler import ParameterHandler
+from .output_extractor import OutputExtractor
+from .simulation import run_simulation
+from .expression_handler import Expression
+
+
+class SensitivityAnalyzer:
+ """
+ Performs correlation-based sensitivity screening.
+ """
+
+ ANALYSIS_LABEL = "correlation-based sensitivity screening"
+ FIRST_ORDER_LABEL = "First-order screening score (squared correlation)"
+ TOTAL_ORDER_LABEL = (
+ "Total-order screening score (binned conditional-variance heuristic)"
+ )
+ INDICES_FILENAME = "screening_indices.json"
+ PER_QOI_FIGURE_PREFIX = "screening"
+ HEATMAP_FIRST_FILENAME = "screening_heatmap_first_order.png"
+ HEATMAP_TOTAL_FILENAME = "screening_heatmap_total_order.png"
+
+ def __init__(self, config_file: str):
+ """
+ Initialize sensitivity analyzer with configuration file.
+
+ Args:
+ config_file: Path to sensitivity configuration YAML file
+ """
+ # Load configuration directly (don't use ConfigHandler which expects optimization format)
+ self.config_file = config_file
+ self.config = self._load_config()
+ self._validate_config()
+
+ # Get model config file path
+ config_dir = os.path.dirname(os.path.abspath(self.config_file))
+ model_config = self.config['model']['config_file']
+ if not os.path.isabs(model_config):
+ model_config = os.path.join(config_dir, model_config)
+
+ # Initialize parameter handler
+ self.param_handler = ParameterHandler(model_config)
+
+ # Get configuration sections
+ self.parameters = self.config.get('parameters', [])
+ self.quantities_of_interest = self.config.get('quantities_of_interest', [])
+ self.sensitivity_config = self.config.get('sensitivity', {})
+ self.output_config = self.config.get('output', {})
+
+ for param in self.parameters:
+ try:
+ self.param_handler.get_parameter(param["name"])
+ except ValueError as exc:
+ raise ValueError(
+ f"Sensitivity parameter '{param['name']}' not found in model configuration"
+ ) from exc
+
+ # Sensitivity analysis settings
+ self.n_samples = self.sensitivity_config.get('n_samples', 512)
+
+ # Replace expression string with Expression object for each QoI (all scalars)
+ for qoi in self.quantities_of_interest:
+ expr_str = qoi["expression"]
+ qoi["expression"] = Expression(expr_str, "scalar")
+
+ # Results storage
+ self.results = {}
+ self.sample_data = []
+
+ def _load_config(self) -> Dict:
+ """Load YAML configuration file."""
+ with open(self.config_file, 'r') as f:
+ return yaml.safe_load(f)
+
+ def _validate_config(self):
+ """Validate configuration structure."""
+ required_sections = ['model', 'parameters', 'quantities_of_interest']
+ for section in required_sections:
+ if section not in self.config:
+ raise ValueError(f"Missing required section '{section}' in sensitivity configuration")
+
+ # Validate model section
+ if 'config_file' not in self.config['model']:
+ raise ValueError("model.config_file is required")
+
+ # Validate parameters
+ if not self.config['parameters']:
+ raise ValueError("At least one parameter must be specified")
+
+ for i, param in enumerate(self.config['parameters']):
+ if 'name' not in param:
+ raise ValueError(f"Parameter {i} missing 'name'")
+ if 'bounds' not in param:
+ raise ValueError(f"Parameter '{param['name']}' missing 'bounds'")
+ if len(param['bounds']) != 2:
+ raise ValueError(f"Parameter '{param['name']}' bounds must have 2 values [min, max]")
+
+ # Validate quantities of interest
+ if not self.config['quantities_of_interest']:
+ raise ValueError("At least one quantity of interest must be specified")
+
+ for i, qoi in enumerate(self.config['quantities_of_interest']):
+ if 'name' not in qoi:
+ raise ValueError(f"Quantity of interest {i} missing 'name'")
+ if 'expression' not in qoi:
+ raise ValueError(
+ f"Quantity of interest '{qoi.get('name', i)}' missing 'expression'"
+ )
+
+ def _get_simulated_quantities_of_interest(
+ self, param_values: np.ndarray, suppress_warnings: bool = False
+ ) -> Dict[str, float]:
+ """
+ Run sv0D simulation and return quantity-of-interest values.
+ Each QoI has name (free label) and expression (e.g. np.max(pressure:AV:AR_SYS)).
+ """
+ qoi_values = {qoi["name"]: np.nan for qoi in self.quantities_of_interest}
+
+ try:
+ _, extractor = run_simulation(
+ self.param_handler, self.parameters, param_values
+ )
+ times = extractor.get_times()
+ available_outputs = extractor.get_all_output_names()
+
+ # Collect outputs needed by any QoI expression
+ unique_outputs = {}
+ for qoi in self.quantities_of_interest:
+ expr = qoi["expression"]
+ for out_name in expr.output_names(available_outputs):
+ if out_name not in unique_outputs:
+ try:
+ ts = extractor.extract(out_name, "time_series")
+ unique_outputs[out_name] = {
+ "time_series": ts,
+ "times": times,
+ }
+ except Exception:
+ pass
+
+ for qoi in self.quantities_of_interest:
+ name = qoi["name"]
+ try:
+ qoi_values[name] = float(
+ qoi["expression"].evaluate(
+ unique_outputs, available_outputs
+ )
+ )
+ except Exception as e:
+ if not suppress_warnings:
+ print(f"Warning: Could not evaluate QoI '{name}': {e}")
+ qoi_values[name] = np.nan
+
+ except Exception as e:
+ if not suppress_warnings:
+ print(f"Warning: Simulation failed: {e}")
+
+ return qoi_values
+
+ def _create_qoi_function(self, qoi_key: str) -> Callable:
+ """
+ Create a function that evaluates a specific QoI for given parameters.
+
+ Args:
+ qoi_key: Key for the quantity of interest
+
+ Returns:
+ Function that takes parameter array and returns QoI value
+ """
+ def qoi_func(params):
+ """Evaluate QoI for given parameters."""
+ try:
+ # Handle both single parameter set and multiple parameter sets
+ if params.ndim == 1:
+ # Single parameter set
+ qoi_values = self._get_simulated_quantities_of_interest(params)
+ return qoi_values[qoi_key]
+ else:
+ # Multiple parameter sets
+ results = []
+ for i in range(params.shape[0]):
+ qoi_values = self._get_simulated_quantities_of_interest(params[i, :])
+ results.append(qoi_values[qoi_key])
+ return np.array(results)
+ except Exception as e:
+ print(f"Error in simulation: {e}")
+ if params.ndim == 1:
+ return np.nan
+ else:
+ return np.full(params.shape[0], np.nan)
+
+ return qoi_func
+
+ def run(self) -> Dict:
+ """
+ Run correlation-based sensitivity screening.
+
+ Returns:
+ Dictionary with sensitivity analysis results
+ """
+ print("="*70)
+ print("SENSITIVITY ANALYSIS")
+ print("="*70)
+ print(f"Parameters: {[p['name'] for p in self.parameters]}")
+ qoi_names = [q["name"] for q in self.quantities_of_interest]
+ print(f"Quantities of Interest: {qoi_names}")
+ print(f"Number of samples: {self.n_samples}")
+ print()
+
+ # Reset per-run state so reusing the analyzer does not mix old and new samples.
+ self.results = {}
+ self.sample_data = []
+
+ # Get parameter bounds
+ param_names = [p['name'] for p in self.parameters]
+ bounds = [p['bounds'] for p in self.parameters]
+ n_params = len(param_names)
+
+ # Generate Sobol low-discrepancy samples for screening.
+ print("Generating quasi-random screening samples...")
+
+ # Extract and validate bounds
+ lower_bounds = []
+ upper_bounds = []
+ for i, (param, bound) in enumerate(zip(self.parameters, bounds)):
+ lower = float(bound[0])
+ upper = float(bound[1])
+ if lower >= upper:
+ raise ValueError(
+ f"Invalid bounds for parameter '{param['name']}': "
+ f"lower ({lower}) must be < upper ({upper})"
+ )
+ lower_bounds.append(lower)
+ upper_bounds.append(upper)
+ print(f" {param['name']}: [{lower:.3e}, {upper:.3e}]")
+
+ lower_bounds = np.array(lower_bounds)
+ upper_bounds = np.array(upper_bounds)
+
+ sampler = qmc.Sobol(d=n_params, scramble=True)
+ # Generate samples in [0, 1]^d
+ samples_unit = sampler.random(self.n_samples)
+
+ # Scale to parameter bounds
+ samples = qmc.scale(samples_unit, lower_bounds, upper_bounds)
+
+ print(f"Generated {samples.shape[0]} samples in {n_params}-dimensional space")
+ print()
+
+ # Evaluate all QoIs for all samples
+ print("Evaluating simulations...")
+ print("-"*70)
+
+ all_qoi_values = {qoi["name"]: [] for qoi in self.quantities_of_interest}
+
+ for i, param_values in enumerate(samples):
+ if (i + 1) % max(1, self.n_samples // 20) == 0:
+ print(f" Progress: {i+1}/{self.n_samples} ({100*(i+1)/self.n_samples:.0f}%)")
+
+ qoi_values = self._get_simulated_quantities_of_interest(param_values)
+
+ # Store sample data
+ sample_entry = {
+ 'sample_id': i,
+ **{param_names[j]: param_values[j] for j in range(n_params)},
+ **qoi_values
+ }
+ self.sample_data.append(sample_entry)
+
+ # Store QoI values
+ for qoi_key, qoi_value in qoi_values.items():
+ all_qoi_values[qoi_key].append(qoi_value)
+
+ print(f" Completed {self.n_samples} simulations")
+ print()
+
+ # Compute screening metrics for each QoI
+ print("Computing screening metrics...")
+ print("-"*70)
+
+ for qoi_key in all_qoi_values.keys():
+ print(f" Analyzing {qoi_key}...")
+
+ qoi_values_array = np.array(all_qoi_values[qoi_key])
+
+ # Check for NaN values
+ if np.any(np.isnan(qoi_values_array)):
+ n_nan = np.sum(np.isnan(qoi_values_array))
+ print(f" Warning: {n_nan}/{len(qoi_values_array)} simulations failed")
+ # Remove NaN values for analysis
+ valid_idx = ~np.isnan(qoi_values_array)
+ qoi_values_array = qoi_values_array[valid_idx]
+ samples_valid = samples[valid_idx, :]
+ else:
+ samples_valid = samples
+
+ # Compute screening metrics from the sampled outputs.
+ try:
+ total_variance = np.var(qoi_values_array)
+
+ if total_variance < 1e-12:
+ print(f" Warning: Very low variance ({total_variance:.3e}), QoI may be insensitive to parameters")
+ # All parameters have zero sensitivity
+ first_order = {param_names[i]: 0.0 for i in range(n_params)}
+ total_order = first_order.copy()
+ else:
+ first_order = {}
+ total_order = {}
+
+ # Estimate first-order and total-order screening scores.
+ for i, param_name in enumerate(param_names):
+ param_values = samples_valid[:, i]
+
+ # First-order score: squared correlation coefficient
+ correlation = np.corrcoef(param_values, qoi_values_array)[0, 1]
+ first_order[param_name] = max(0.0, min(1.0, correlation**2))
+
+ # Total-order score: use conditional variance estimation
+ # Group samples by parameter value bins
+ n_bins = min(10, len(param_values) // 10)
+ if n_bins >= 3:
+ bins = np.percentile(param_values, np.linspace(0, 100, n_bins+1))
+ bin_indices = np.digitize(param_values, bins[1:-1])
+
+ # Variance within bins (conditional variance)
+ var_within = 0.0
+ for bin_idx in range(n_bins):
+ mask = bin_indices == bin_idx
+ if np.sum(mask) > 1:
+ var_within += np.var(qoi_values_array[mask]) * np.sum(mask)
+ var_within /= len(qoi_values_array)
+
+ # Total-order screening score: 1 - conditional variance fraction
+ total_order[param_name] = max(0.0, min(1.0, 1.0 - var_within / total_variance))
+ else:
+ # Not enough samples for binning, use first-order as approximation
+ total_order[param_name] = first_order[param_name]
+
+ self.results[qoi_key] = {
+ 'analysis_type': 'correlation_screening',
+ 'sampler': 'sobol_sequence',
+ 'first_order_metric': 'squared_pearson_correlation',
+ 'total_order_metric': 'binned_conditional_variance_screening',
+ 'first_order': first_order,
+ 'total_order': total_order,
+ 'mean': float(np.mean(qoi_values_array)),
+ 'std': float(np.std(qoi_values_array)),
+ 'min': float(np.min(qoi_values_array)),
+ 'max': float(np.max(qoi_values_array))
+ }
+
+ except Exception as e:
+ print(f" Error computing indices: {e}")
+ import traceback
+ traceback.print_exc()
+ # Fallback to zeros
+ self.results[qoi_key] = {
+ 'analysis_type': 'correlation_screening',
+ 'sampler': 'sobol_sequence',
+ 'first_order_metric': 'squared_pearson_correlation',
+ 'total_order_metric': 'binned_conditional_variance_screening',
+ 'first_order': {param_names[i]: 0.0 for i in range(n_params)},
+ 'total_order': {param_names[i]: 0.0 for i in range(n_params)},
+ 'mean': float(np.mean(qoi_values_array)),
+ 'std': float(np.std(qoi_values_array)),
+ 'min': float(np.min(qoi_values_array)),
+ 'max': float(np.max(qoi_values_array))
+ }
+
+ print()
+ print("✓ Sensitivity analysis complete")
+ print()
+
+ return self.results
+
+ def save_results(self, output_dir: Optional[str] = None):
+ """
+ Save sensitivity analysis results to files.
+
+ Args:
+ output_dir: Output directory (uses config if not provided)
+ """
+ if output_dir is None:
+ output_dir = self.output_config.get('directory', 'sensitivity_results')
+
+ output_path = Path(output_dir)
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ print(f"Saving results to {output_dir}/")
+ print("-"*70)
+
+ # Save screening metrics as JSON
+ indices_file = output_path / self.INDICES_FILENAME
+ with open(indices_file, 'w') as f:
+ json.dump(self.results, f, indent=2)
+ print(f" ✓ Saved screening metrics: {indices_file.name}")
+
+ # Save sample data as CSV
+ if self.sample_data:
+ samples_df = pd.DataFrame(self.sample_data)
+ samples_file = output_path / 'sample_data.csv'
+ samples_df.to_csv(samples_file, index=False)
+ print(f" ✓ Saved sample data: {samples_file.name}")
+
+ # Create summary report
+ self._create_summary_report(output_path)
+ print(f" ✓ Saved summary report: summary.txt")
+
+ # Create visualizations
+ try:
+ self._create_visualizations(output_path)
+ print(f" ✓ Saved bar charts and scatter plots")
+ except Exception as e:
+ print(f" Warning: Could not create visualizations: {e}")
+
+ print()
+
+ def _create_summary_report(self, output_path: Path):
+ """Create a text summary report."""
+ report_file = output_path / 'summary.txt'
+
+ with open(report_file, 'w') as f:
+ f.write("="*70 + "\n")
+ f.write("SENSITIVITY ANALYSIS SUMMARY\n")
+ f.write("="*70 + "\n\n")
+ f.write(f"Method: {self.ANALYSIS_LABEL}\n")
+ f.write("Sampling: Sobol low-discrepancy sequence\n\n")
+
+ f.write(f"Parameters analyzed: {[p['name'] for p in self.parameters]}\n")
+ f.write(f"Number of samples: {self.n_samples}\n\n")
+
+ for qoi_key, results in self.results.items():
+ f.write("-"*70 + "\n")
+ f.write(f"Quantity of Interest: {qoi_key}\n")
+ f.write("-"*70 + "\n")
+ f.write(f"Mean: {results['mean']:.6e}\n")
+ f.write(f"Std: {results['std']:.6e}\n")
+ f.write(f"Min: {results['min']:.6e}\n")
+ f.write(f"Max: {results['max']:.6e}\n\n")
+
+ f.write(f"{self.FIRST_ORDER_LABEL}:\n")
+ for param, value in results['first_order'].items():
+ f.write(f" {param:<30} {value:>10.4f}\n")
+ f.write("\n")
+
+ f.write(f"{self.TOTAL_ORDER_LABEL}:\n")
+ for param, value in results['total_order'].items():
+ f.write(f" {param:<30} {value:>10.4f}\n")
+ f.write("\n\n")
+
+ def _create_visualizations(self, output_path: Path):
+ """Create visualization plots."""
+ try:
+ import matplotlib.pyplot as plt
+ import matplotlib
+ matplotlib.use('Agg')
+ except ImportError:
+ print("Warning: matplotlib not available, skipping visualizations")
+ return
+
+ param_names = [p['name'] for p in self.parameters]
+
+ # Create heatmap of all screening scores
+ self._create_heatmap(output_path, param_names)
+
+ for qoi_key, results in self.results.items():
+ # Create bar plot of screening scores
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
+
+ # First-order scores
+ first_order_values = [results['first_order'][p] for p in param_names]
+ ax1.bar(range(len(param_names)), first_order_values)
+ ax1.set_xticks(range(len(param_names)))
+ ax1.set_xticklabels(param_names, rotation=45, ha='right')
+ ax1.set_ylabel('First-order screening score')
+ ax1.set_title('Main Effects')
+ ax1.grid(axis='y', alpha=0.3)
+ ax1.set_ylim([0, 1])
+
+ # Total-order scores
+ total_order_values = [results['total_order'][p] for p in param_names]
+ ax2.bar(range(len(param_names)), total_order_values)
+ ax2.set_xticks(range(len(param_names)))
+ ax2.set_xticklabels(param_names, rotation=45, ha='right')
+ ax2.set_ylabel('Total-order screening score')
+ ax2.set_title('Total Effects (Main + Interactions)')
+ ax2.grid(axis='y', alpha=0.3)
+ ax2.set_ylim([0, 1])
+
+ plt.suptitle(f'Sensitivity Screening: {qoi_key}')
+ plt.tight_layout()
+
+ # Save figure
+ safe_filename = qoi_key.replace(':', '_').replace('/', '_')
+ fig_file = output_path / f'{self.PER_QOI_FIGURE_PREFIX}_{safe_filename}.png'
+ plt.savefig(fig_file, dpi=150, bbox_inches='tight')
+ plt.close()
+
+ # Create scatter plots of QoI vs parameters
+ if self.sample_data:
+ samples_df = pd.DataFrame(self.sample_data)
+
+ for qoi_key in self.results.keys():
+ if qoi_key not in samples_df.columns:
+ continue
+
+ n_params = len(param_names)
+ fig, axes = plt.subplots(1, n_params, figsize=(5*n_params, 4))
+ if n_params == 1:
+ axes = [axes]
+
+ for i, param_name in enumerate(param_names):
+ axes[i].scatter(samples_df[param_name], samples_df[qoi_key],
+ alpha=0.5, s=10)
+ axes[i].set_xlabel(param_name)
+ axes[i].set_ylabel(qoi_key)
+ axes[i].grid(alpha=0.3)
+
+ plt.suptitle(f'Parameter Effects on {qoi_key}')
+ plt.tight_layout()
+
+ safe_filename = qoi_key.replace(':', '_').replace('/', '_')
+ fig_file = output_path / f'scatter_{safe_filename}.png'
+ plt.savefig(fig_file, dpi=150, bbox_inches='tight')
+ plt.close()
+
+ def _create_heatmap(self, output_path: Path, param_names: List[str]):
+ """Create heatmap visualization of all screening scores.
+
+ Rows = parameters, Columns = quantities of interest.
+ """
+ try:
+ import matplotlib.pyplot as plt
+ import matplotlib
+ matplotlib.use('Agg')
+ except ImportError:
+ return
+
+ qoi_keys = list(self.results.keys())
+
+ # Create matrices: rows = parameters, columns = quantities of interest
+ first_order_matrix = np.zeros((len(param_names), len(qoi_keys)))
+ total_order_matrix = np.zeros((len(param_names), len(qoi_keys)))
+
+ for j, param_name in enumerate(param_names):
+ for i, qoi_key in enumerate(qoi_keys):
+ first_order_matrix[j, i] = self.results[qoi_key]['first_order'][param_name]
+ total_order_matrix[j, i] = self.results[qoi_key]['total_order'][param_name]
+
+ figsize = (4 + len(qoi_keys)*1.2, len(param_names)*0.8 + 2)
+
+ # Figure 1: First-order heatmap
+ fig1, ax1 = plt.subplots(1, 1, figsize=figsize)
+ im1 = ax1.imshow(first_order_matrix, aspect='auto', cmap='YlOrRd', vmin=0, vmax=1)
+ ax1.set_xticks(range(len(qoi_keys)))
+ ax1.set_yticks(range(len(param_names)))
+ ax1.set_xticklabels(qoi_keys, rotation=45, ha='right')
+ ax1.set_yticklabels(param_names, fontsize=9)
+ ax1.set_title(self.FIRST_ORDER_LABEL, fontweight='bold')
+ ax1.set_xlabel('Quantities of Interest')
+ ax1.set_ylabel('Parameters')
+ for j in range(len(param_names)):
+ for i in range(len(qoi_keys)):
+ value = first_order_matrix[j, i]
+ color = 'white' if value > 0.5 else 'black'
+ ax1.text(i, j, f'{value:.3f}', ha='center', va='center',
+ color=color, fontsize=8, fontweight='bold')
+ cbar1 = plt.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04)
+ cbar1.set_label('Sensitivity Index', rotation=270, labelpad=15)
+ plt.tight_layout()
+ fig1.savefig(output_path / self.HEATMAP_FIRST_FILENAME, dpi=200, bbox_inches='tight')
+ plt.close(fig1)
+
+ # Figure 2: Total-order heatmap
+ fig2, ax2 = plt.subplots(1, 1, figsize=figsize)
+ im2 = ax2.imshow(total_order_matrix, aspect='auto', cmap='YlOrRd', vmin=0, vmax=1)
+ ax2.set_xticks(range(len(qoi_keys)))
+ ax2.set_yticks(range(len(param_names)))
+ ax2.set_xticklabels(qoi_keys, rotation=45, ha='right')
+ ax2.set_yticklabels(param_names, fontsize=9)
+ ax2.set_title(self.TOTAL_ORDER_LABEL, fontweight='bold')
+ ax2.set_xlabel('Quantities of Interest')
+ ax2.set_ylabel('Parameters')
+ for j in range(len(param_names)):
+ for i in range(len(qoi_keys)):
+ value = total_order_matrix[j, i]
+ color = 'white' if value > 0.5 else 'black'
+ ax2.text(i, j, f'{value:.3f}', ha='center', va='center',
+ color=color, fontsize=8, fontweight='bold')
+ cbar2 = plt.colorbar(im2, ax=ax2, fraction=0.046, pad=0.04)
+ cbar2.set_label('Sensitivity Index', rotation=270, labelpad=15)
+ plt.tight_layout()
+ fig2.savefig(output_path / self.HEATMAP_TOTAL_FILENAME, dpi=200, bbox_inches='tight')
+ plt.close(fig2)
+
+ print(
+ f" ✓ Created heatmaps: {self.HEATMAP_FIRST_FILENAME}, {self.HEATMAP_TOTAL_FILENAME}"
+ )
+
+
+def run_sensitivity_analysis(config_file: str) -> Dict:
+ """
+ Convenience function to run sensitivity analysis from config file.
+
+ Args:
+ config_file: Path to sensitivity configuration YAML file
+
+ Returns:
+ Sensitivity analysis results dictionary
+ """
+ analyzer = SensitivityAnalyzer(config_file)
+ results = analyzer.run()
+ analyzer.save_results()
+ return results
diff --git a/applications/svZeroDTuner/svzerodtuner/simulation.py b/applications/svZeroDTuner/svzerodtuner/simulation.py
new file mode 100644
index 000000000..cc5202d25
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/simulation.py
@@ -0,0 +1,48 @@
+"""
+Shared simulation runner for sv0D Tuning Framework.
+
+Runs sv0D with given parameter values and returns the solver and output extractor.
+Used by both SV0DTuner and SensitivityAnalyzer to avoid duplicating simulation logic.
+"""
+
+import numpy as np
+import pysvzerod
+from typing import List, Dict, Any, Tuple
+
+from .parameter_handler import ParameterHandler
+from .output_extractor import OutputExtractor
+
+
+def run_simulation(
+ param_handler: ParameterHandler,
+ parameters: List[Dict[str, Any]],
+ param_values: np.ndarray,
+) -> Tuple[pysvzerod.Solver, OutputExtractor]:
+ """
+ Run sv0D simulation with given parameter values.
+
+ Args:
+ param_handler: ParameterHandler instance with model config.
+ parameters: List of parameter config dicts with 'name' (and optionally 'bounds').
+ param_values: Array of parameter values in same order as parameters.
+
+ Returns:
+ Tuple of (solver, extractor). Solver has run() already called.
+ Extractor is built from the solver.
+
+ Raises:
+ Exception: If parameter update or simulation fails.
+ """
+ param_names = [p["name"] for p in parameters]
+ for name, value in zip(param_names, param_values):
+ if isinstance(value, np.ndarray):
+ value = float(value.item())
+ elif not isinstance(value, (int, float)):
+ value = float(value)
+ param_handler.set_parameter(name, value)
+
+ config_dict = param_handler.get_config()
+ solver = pysvzerod.Solver(config_dict)
+ solver.run()
+ extractor = OutputExtractor(solver)
+ return solver, extractor
diff --git a/applications/svZeroDTuner/svzerodtuner/sv0d_tuner.py b/applications/svZeroDTuner/svzerodtuner/sv0d_tuner.py
new file mode 100644
index 000000000..382d26dfc
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/sv0d_tuner.py
@@ -0,0 +1,473 @@
+"""
+Main sv0D Tuning Framework
+
+Orchestrates parameter optimization to match target output values.
+"""
+
+import os
+import json
+import time
+import numpy as np
+import pandas as pd
+import multiprocessing as mp
+from functools import partial
+from typing import Dict, List, Optional, Callable
+
+from .parameter_handler import ParameterHandler
+from .output_extractor import OutputExtractor
+from .simulation import run_simulation
+from .objective import create_objective, ObjectiveFunction
+from .optimizer import OptimizerWrapper
+from .config_handler import ConfigHandler
+from .result_handler import ResultHandler
+from .scaling import get_scaling, to_physical_array, to_opt_array
+
+from .expression_handler import Expression
+
+
+class SV0DTuner:
+ """
+ Main framework class for tuning sv0D models.
+ """
+
+ def __init__(self, config_file: str):
+ """
+ Initialize sv0D tuner with configuration file.
+
+ Args:
+ config_file: Path to YAML configuration file
+ """
+ # Load configuration
+ self.config_handler = ConfigHandler(config_file)
+ self.config = self.config_handler.get_config()
+
+ # Initialize components
+ self.param_handler = ParameterHandler(
+ self.config_handler.get_model_config_file()
+ )
+
+ # Get configuration sections
+ self.parameters = self.config_handler.get_parameters()
+ self.targets = self.config_handler.get_targets()
+ self.objective_config = self.config_handler.get_objective_config()
+ self.optimization_config = self.config_handler.get_optimization_config()
+ self.output_config = self.config_handler.get_output_config()
+
+ # Initialize optimizer
+ self.optimizer = OptimizerWrapper(**self.optimization_config)
+
+ # Initialize result handler
+ self.result_handler = ResultHandler(
+ output_dir=self.output_config['directory'],
+ save_history=self.output_config.get('save_history', True),
+ save_plots=self.output_config.get('save_plots', True)
+ )
+
+ # Create objective function
+ self.objective_func = create_objective(targets=self.targets, **self.objective_config)
+
+ # Replace expression string with Expression object for each target
+ for target in self.targets:
+ expr_str = target.get("expression")
+ if expr_str:
+ target["expression"] = Expression(
+ expr_str, target.get("type", "time_series")
+ )
+
+ # Initialize scaling objects for each parameter (optimizer space <-> physical space)
+ self._scalings = [
+ get_scaling(p.get("scaling", "identity"), tuple(p["bounds"]) if "bounds" in p else None)
+ for p in self.parameters
+ ]
+
+ # State
+ self.solver = None
+ self.extractor = None
+ self.history = []
+ self.best_value = None
+ self.best_params = None
+
+ def __getstate__(self):
+ """
+ Make tuner pickle-safe for multiprocessing objective dispatch.
+ `pysvzerod.Solver`/extractor instances are runtime-only and not pickleable.
+ """
+ state = self.__dict__.copy()
+ state["solver"] = None
+ state["extractor"] = None
+ return state
+
+ def _format_time(self, seconds: float) -> str:
+ """
+ Format time duration in human-readable format.
+
+ Args:
+ seconds: Time in seconds
+
+ Returns:
+ Formatted time string
+ """
+ if seconds < 1:
+ return f"{seconds*1000:.1f} ms"
+ elif seconds < 60:
+ return f"{seconds:.2f} sec"
+ elif seconds < 3600:
+ minutes = int(seconds // 60)
+ secs = seconds % 60
+ return f"{minutes} min {secs:.1f} sec"
+ else:
+ hours = int(seconds // 3600)
+ minutes = int((seconds % 3600) // 60)
+ return f"{hours} hr {minutes} min"
+
+ def _get_simulated_values(
+ self, param_values: np.ndarray, return_full_results: bool = False
+ ):
+ """
+ Run sv0D simulation and return simulated target values.
+
+ Args:
+ param_values: Array of parameter values
+ return_full_results: If True, return tuple of (simulated_values, results_df, summary_df)
+
+ Returns:
+ If return_full_results=False: Dictionary of simulated output values
+ If return_full_results=True: Tuple of (simulated_values dict, results_df, summary_df)
+ """
+ self.solver, self.extractor = run_simulation(
+ self.param_handler, self.parameters, param_values
+ )
+
+ # Extract all target outputs; store only simulated_values[name] per target
+ simulated_values = {}
+ times = self.extractor.get_times()
+ available_outputs = self.extractor.get_all_output_names()
+
+ # Collect output names we need from each target's expression
+ unique_outputs = {}
+ for target in self.targets:
+ expr = target.get("expression")
+ if not expr or not isinstance(expr, Expression):
+ raise ValueError(f"Target '{target['name']}' must have an Expression object")
+ for out_name in expr.output_names(available_outputs):
+ if out_name not in unique_outputs:
+ try:
+ ts = self.extractor.extract(out_name, "time_series")
+ unique_outputs[out_name] = {
+ "time_series": ts,
+ "times": times,
+ }
+ except Exception:
+ raise ValueError(f"Failed to extract output '{out_name}' for target '{target['name']}'")
+
+ # Compute simulated value per target; store under target name
+ for target in self.targets:
+ name = target["name"]
+ expr = target.get("expression")
+ try:
+ result = expr.evaluate(unique_outputs, available_outputs)
+ if expr.kind == "time_series":
+ arr, t = result
+ simulated_values[name] = arr
+ simulated_values[f"{name}_times"] = t
+ else:
+ simulated_values[name] = float(result)
+ except Exception as e:
+ raise RuntimeError(
+ f"Failed to evaluate target '{name}': {e}"
+ ) from e
+
+ # If full results requested, extract all outputs
+ if return_full_results:
+ try:
+ full_results = self.solver.get_full_result()
+ result_names = full_results['name'].unique()
+
+ # Create results DataFrame with all outputs
+ results_data = {'time': times}
+ summary_stats = []
+
+ for name in result_names:
+ try:
+ values = self.solver.get_single_result(name)
+ results_data[name] = values
+
+ # Calculate statistics
+ stats = {
+ 'output_name': name,
+ 'min': np.min(values),
+ 'max': np.max(values),
+ 'mean': np.mean(values),
+ 'std': np.std(values)
+ }
+ summary_stats.append(stats)
+ except Exception:
+ pass # Skip outputs that can't be extracted
+
+ # Create DataFrames
+ results_df = pd.DataFrame(results_data)
+ summary_df = pd.DataFrame(summary_stats)
+
+ return simulated_values, results_df, summary_df
+ except Exception as e:
+ print(f"Warning: Could not extract full results: {e}")
+ return simulated_values, None, None
+
+ return simulated_values
+
+ def _objective_function(self, param_values: np.ndarray) -> float:
+ """
+ Objective function wrapper for optimization.
+
+ Args:
+ param_values: Array of parameter values
+
+ Returns:
+ Objective function value
+ """
+ try:
+ # Run simulation
+ simulated_values = self._get_simulated_values(param_values)
+
+ # Compute objective
+ obj_value = self.objective_func.compute(simulated_values)
+
+ return obj_value
+
+ except Exception as e:
+ # Return large value if simulation fails
+ print(f"Warning: Simulation failed: {e}")
+ return float(1e10)
+
+ def optimize(self) -> Dict:
+ """
+ Run optimization.
+
+ Returns:
+ Dictionary with optimization results
+ """
+ print("Starting sv0D parameter optimization...")
+ print(f"Parameters to optimize: {[p['name'] for p in self.parameters]}")
+ print(f"Targets: {[t['name'] for t in self.targets]}")
+ print(f"Optimizer configuration: {self.optimization_config}")
+ if self.optimization_config['algorithm'] == 'differential_evolution':
+ n_workers = self.optimization_config.get('workers', 1)
+ if n_workers == -1:
+ n_workers = mp.cpu_count()
+ print(f"Running in parallel with {n_workers} workers")
+ print()
+
+ # Prepare optimization inputs
+ param_names = [p['name'] for p in self.parameters]
+ bounds = [tuple(float(b) for b in p['bounds']) for p in self.parameters]
+
+ # Get initial parameter values
+ print(f"Using initial parameter values from JSON file:")
+ for name in param_names:
+ print(f"\t{name}: {self.param_handler.get_parameter(name)}")
+ x0 = np.array([self.param_handler.get_parameter(name) for name in param_names])
+
+ # Create scaling functions for optimizer space <-> physical space
+ to_opt = partial(to_opt_array, scalings=self._scalings)
+ to_phys = partial(to_physical_array, scalings=self._scalings)
+ # Start timing
+ start_time = time.time()
+
+ # Run optimization, allow graceful interruption
+ interrupted = False
+ result = None
+ try:
+ result = self.optimizer.optimize(
+ objective_func=self._objective_function,
+ param_names=param_names,
+ bounds=bounds,
+ x0=x0,
+ parameters=self.parameters,
+ param_scaling_to_opt_space=to_opt,
+ param_scaling_to_phys_space=to_phys,
+ )
+ except KeyboardInterrupt:
+ interrupted = True
+ print("\n\n" + "="*70)
+ print("OPTIMIZATION INTERRUPTED BY USER (Ctrl+C)")
+ print("="*70)
+ print("Processing best results found so far...")
+ print()
+
+ # Print termination reason when optimization completed normally
+ if result is not None and not interrupted:
+ print("\n" + "="*70)
+ print("OPTIMIZATION TERMINATION")
+ print("="*70)
+ success = getattr(result, 'success', None)
+ message = getattr(result, 'message', None)
+ status = getattr(result, 'status', None)
+ if message:
+ print(f"Reason: {message}")
+ if success is not None:
+ print(f"Success: {success}")
+ if status is not None:
+ print(f"Status code: {status}")
+ nfev = getattr(result, 'nfev', None)
+ if nfev is not None:
+ print(f"Function evaluations: {nfev}")
+ nit = getattr(result, 'nit', None)
+ if nit is not None:
+ print(f"Algorithm iterations: {nit}")
+ print("="*70)
+
+ # End timing
+ end_time = time.time()
+ total_time = end_time - start_time
+
+ # Get results
+ self.history = self.optimizer.get_history()
+ self.best_value, self.best_params = self.optimizer.get_best()
+
+ if result is not None:
+ n_evaluations = getattr(result, 'nfev', len(self.history) if self.history else 0)
+ n_iterations = getattr(result, 'nit', len(self.history) if self.history else 0)
+ else:
+ # Interrupted - use best from history; nfev unknown
+ n_evaluations = None # not available when interrupted
+ n_iterations = len(self.history)
+ if interrupted and self.best_value is not None:
+ print(f"Best objective value found: {self.best_value:.6e}")
+ print(f"Total iterations completed: {n_iterations}")
+
+ # Calculate timing statistics
+ avg_time_per_eval = total_time / n_evaluations if n_evaluations and n_evaluations > 0 else 0
+ timing_info = {
+ 'total_time_seconds': total_time,
+ 'total_time_formatted': self._format_time(total_time),
+ 'n_evaluations': n_evaluations,
+ 'avg_time_per_evaluation_seconds': avg_time_per_eval,
+ 'avg_time_per_evaluation_formatted': self._format_time(avg_time_per_eval)
+ }
+ avg_time_per_iter = total_time / n_iterations if n_iterations > 0 else 0
+ timing_info.update({
+ 'n_iterations': n_iterations,
+ 'avg_time_per_iteration_seconds': avg_time_per_iter,
+ 'avg_time_per_iteration_formatted': self._format_time(avg_time_per_iter)
+ })
+
+ # Print timing summary
+ print(f"\n{'='*70}")
+ print(f"TIMING SUMMARY")
+ print(f"{'='*70}")
+ print(f"Total optimization time: {timing_info['total_time_formatted']}")
+ print(f"Total iterations: {n_iterations}")
+ print(f"Total function evaluations: {n_evaluations if n_evaluations is not None else 'not available (interrupted)'}")
+ print(f"Average time per iteration: {timing_info['avg_time_per_iteration_formatted']}")
+ if n_evaluations is not None:
+ print(f"Average time per evaluation: {timing_info['avg_time_per_evaluation_formatted']}")
+ else:
+ print(f"Average time per evaluation: not available (interrupted)")
+ print(f"{'='*70}")
+
+ # Check if optimization succeeded
+ if self.best_params is None:
+ if interrupted:
+ error_msg = (
+ "Cannot process results: optimization was interrupted before any results were obtained.\n"
+ "Try interrupting later to allow more progress."
+ )
+ else:
+ error_msg = "Optimization failed: no best parameters found"
+ raise RuntimeError(error_msg)
+
+ # Run final simulation with best parameters and get full results
+ print("\nRunning final simulation with best parameters...")
+ should_extract_full = self.output_config.get('save_plots', True)
+
+ if should_extract_full:
+ final_simulated_values, optimized_results_df, optimized_summary_df = \
+ self._get_simulated_values(self.best_params, return_full_results=True)
+ else:
+ final_simulated_values = self._get_simulated_values(self.best_params)
+ optimized_results_df = None
+ optimized_summary_df = None
+
+ # Save results
+ if self.output_config.get('save_final_config', True):
+ # Generate output filename based on original model config filename
+ original_filename = os.path.basename(self.config['model']['config_file'])
+ name_without_ext, ext = os.path.splitext(original_filename)
+ optimized_filename = f"{name_without_ext}_optimized{ext}"
+
+ final_config = self.param_handler.get_config()
+ self.result_handler.save_final_config(
+ final_config,
+ filename=optimized_filename
+ )
+
+ # Generate report
+ self.result_handler.create_summary_report(
+ history=self.history,
+ param_names=param_names,
+ best_value=self.best_value,
+ best_params=self.best_params,
+ parameters=self.parameters,
+ targets=self.targets,
+ simulated_values=final_simulated_values,
+ optimized_results_df=optimized_results_df,
+ optimized_summary_df=optimized_summary_df,
+ timing_info=timing_info
+ )
+
+ return {
+ 'success': result.success if result else False,
+ 'message': result.message if result else ('Interrupted by user' if interrupted else 'Unknown error'),
+ 'best_value': self.best_value,
+ 'best_params': dict(zip(param_names, self.best_params)),
+ 'history': self.history,
+ 'result': result,
+ 'interrupted': interrupted
+ }
+
+ def evaluate(self, param_values: Optional[Dict[str, float]] = None) -> Dict:
+ """
+ Evaluate objective function without optimization.
+
+ Args:
+ param_values: Dictionary of parameter values (optional, uses current if None)
+
+ Returns:
+ Dictionary with evaluation results
+ """
+ if param_values:
+ # Update parameters
+ for name, value in param_values.items():
+ self.param_handler.set_parameter(name, value)
+
+ # Get current parameter values
+ param_names = [p['name'] for p in self.parameters]
+ current_values = np.array([
+ self.param_handler.get_parameter(name) for name in param_names
+ ])
+
+ # Run simulation
+ simulated_values = self._get_simulated_values(current_values)
+
+ # Compute objective
+ obj_value = self.objective_func.compute(simulated_values)
+
+ return {
+ 'objective_value': obj_value,
+ 'simulated_values': simulated_values,
+ 'parameters': dict(zip(param_names, current_values))
+ }
+
+
+def run_optimization(config_file: str) -> Dict:
+ """
+ Convenience function to run optimization from config file.
+
+ Args:
+ config_file: Path to YAML configuration file
+
+ Returns:
+ Optimization results dictionary
+ """
+ tuner = SV0DTuner(config_file)
+ return tuner.optimize()
diff --git a/applications/svZeroDTuner/svzerodtuner/visualization.py b/applications/svZeroDTuner/svzerodtuner/visualization.py
new file mode 100644
index 000000000..688f5f09c
--- /dev/null
+++ b/applications/svZeroDTuner/svzerodtuner/visualization.py
@@ -0,0 +1,441 @@
+"""
+Visualization functions for sv0D Tuning Framework
+
+Plots optimization history including objective value convergence and parameter evolution.
+"""
+
+import os
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+from matplotlib.transforms import blended_transform_factory
+from scipy.interpolate import interp1d
+from typing import List, Dict, Optional
+
+
+def plot_objective_history(
+ history: List[Dict],
+ output_file: Optional[str] = None,
+ show: bool = False
+):
+ """
+ Plot objective value vs function evaluation.
+
+ Args:
+ history: Optimization history list
+ output_file: Path to save plot (optional)
+ show: Whether to display plot
+ """
+ if not history:
+ return
+
+ steps = [h.get('iteration', i) for i, h in enumerate(history)]
+ objectives = [h['objective'] for h in history]
+
+ plt.figure(figsize=(10, 6))
+ plt.plot(steps, objectives, 'ko-', linewidth=2, markersize=6, label='Objective Value')
+ plt.xlabel('Iteration', fontsize=12)
+ plt.ylabel('Objective Value', fontsize=12)
+ plt.title('Optimization Convergence', fontsize=14, fontweight='bold')
+ plt.grid(True, alpha=0.3)
+ plt.legend(fontsize=11)
+
+ # Add best value annotation
+ best_idx = np.argmin(objectives)
+ best_step = steps[best_idx]
+ best_obj = objectives[best_idx]
+ plt.plot(best_step, best_obj, 'ro', markersize=10, label=f'Best: {best_obj:.6e}')
+ plt.annotate(
+ f'Best: {best_obj:.6e}',
+ xy=(best_step, best_obj),
+ xytext=(10, 10),
+ textcoords='offset points',
+ bbox=dict(boxstyle='round,pad=0.5', facecolor='yellow', alpha=0.7),
+ arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
+ )
+
+ # Set y-axis limits: slightly below 0 to initial objective value (with small margin)
+ initial_obj = objectives[0]
+ y_min = -initial_obj * 0.02 # 2% below zero
+ y_max = initial_obj * 1.05 # Add 5% margin at top
+ plt.ylim(y_min, y_max)
+
+ plt.tight_layout()
+
+ if output_file:
+ plt.savefig(output_file, dpi=300, bbox_inches='tight')
+ print(f"Saved objective history plot to {output_file}")
+
+ if show:
+ plt.show()
+ else:
+ plt.close()
+
+
+def plot_parameter_evolution(
+ history: List[Dict],
+ param_names: List[str],
+ output_file: Optional[str] = None,
+ show: bool = False
+):
+ """
+ Plot parameter evolution over function evaluations.
+
+ Args:
+ history: Optimization history list
+ param_names: List of parameter names
+ output_file: Path to save plot (optional)
+ show: Whether to display plot
+ """
+ if not history or not param_names:
+ return
+
+ n_params = len(param_names)
+ n_cols = min(3, n_params)
+ n_rows = (n_params + n_cols - 1) // n_cols
+
+ fig, axes = plt.subplots(n_rows, n_cols, figsize=(5*n_cols, 4*n_rows))
+ if n_params == 1:
+ axes = [axes]
+ elif n_rows == 1:
+ axes = axes if isinstance(axes, np.ndarray) else [axes]
+ else:
+ axes = axes.flatten()
+
+ steps = [h.get('iteration', i) for i, h in enumerate(history)]
+
+ for idx, param_name in enumerate(param_names):
+ ax = axes[idx]
+ param_values = [h['parameters'][param_name] for h in history]
+
+ ax.plot(steps, param_values, 'ko-', linewidth=2, markersize=6)
+ ax.set_xlabel('Iteration', fontsize=10)
+ ax.set_ylabel(param_name, fontsize=10)
+ ax.set_title(f'Parameter: {param_name}', fontsize=11, fontweight='bold')
+ ax.grid(True, alpha=0.3)
+
+ # Add initial and final values
+ ax.axhline(y=param_values[0], color='g', linestyle='--', alpha=0.5, label='Initial')
+ ax.axhline(y=param_values[-1], color='r', linestyle='--', alpha=0.5, label='Final')
+ ax.legend(fontsize=9)
+
+ # Hide unused subplots
+ for idx in range(n_params, len(axes)):
+ axes[idx].axis('off')
+
+ plt.tight_layout()
+
+ if output_file:
+ plt.savefig(output_file, dpi=300, bbox_inches='tight')
+ print(f"Saved parameter evolution plot to {output_file}")
+
+ if show:
+ plt.show()
+ else:
+ plt.close()
+
+
+def _compute_percent_error(target_value: float, sim_value: float) -> Optional[float]:
+ """Compute percent error: 100 * (sim - target) / target. Returns None if target ~ 0."""
+ if abs(target_value) < 1e-14:
+ return None
+ return 100.0 * (sim_value - target_value) / target_value
+
+
+def _get_range_for_target(target: Dict):
+ """Get (lo, hi) for target. Returns None if not available."""
+ if 'range_lo' in target and 'range_hi' in target:
+ lo, hi = np.asarray(target['range_lo']), np.asarray(target['range_hi'])
+ return (lo, hi)
+ return None
+
+
+def plot_target_comparison(
+ targets: List[Dict],
+ simulated_values: Dict,
+ output_file: Optional[str] = None,
+ show: bool = False
+):
+ """
+ Plot target vs simulated values comparison.
+
+ Args:
+ targets: List of target specifications
+ simulated_values: Dictionary of simulated values
+ output_file: Path to save plot (optional)
+ show: Whether to display plot
+ """
+ n_targets = len(targets)
+ if n_targets == 0:
+ return
+
+ csv_rows = []
+ n_cols = min(3, n_targets)
+ n_rows = (n_targets + n_cols - 1) // n_cols
+
+ fig, axes = plt.subplots(n_rows, n_cols, figsize=(5*n_cols, 4*n_rows))
+ if n_targets == 1:
+ axes = [axes]
+ elif n_rows == 1:
+ axes = axes if isinstance(axes, np.ndarray) else [axes]
+ else:
+ axes = axes.flatten()
+
+ for idx, target in enumerate(targets):
+ ax = axes[idx]
+ name = target['name']
+ target_type = target.get('type', 'time_series')
+
+ if name not in simulated_values:
+ ax.text(0.5, 0.5, f'No data for {name}',
+ ha='center', va='center', transform=ax.transAxes)
+ ax.set_title(f'Target: {name}', fontsize=11, fontweight='bold')
+ csv_rows.append({'name': name, 'type': target_type, 'time': 'N/A', 'target_value': '', 'simulated_value': '', 'target_range': '', 'percent_error': 'N/A'})
+ continue
+
+ sim_value = simulated_values[name]
+
+ if target_type == 'time_series':
+ # Plot time series: use only range info; target_values = pointwise (lo+hi)/2
+ rng = _get_range_for_target(target)
+ if rng is None or 'target_times' not in target:
+ ax.text(0.5, 0.5, f'No range data for {name}', ha='center', va='center', transform=ax.transAxes)
+ ax.set_title(f'Target: {name}', fontsize=11, fontweight='bold')
+ csv_rows.append({'name': name, 'type': 'time_series', 'time': 'N/A', 'target_value': '', 'simulated_value': '', 'target_range': '', 'percent_error': 'N/A'})
+ continue
+ lo, hi = rng
+ target_times = np.array(target['target_times'])
+ target_values = (lo + hi) / 2.0 # pointwise midpoint
+ sim_times = simulated_values.get(f'{name}_times', None)
+ sim_val_arr = np.asarray(sim_value)
+ ax.fill_between(target_times, lo, hi, alpha=0.2, color='blue', label='Target range')
+ ax.plot(target_times, target_values, 'b-o', linewidth=2, markersize=4, label='Target', alpha=0.7)
+ if sim_times is not None:
+ ax.plot(sim_times, sim_value, 'r--', linewidth=2, label='Simulated', alpha=0.7)
+ else:
+ ax.plot(range(len(sim_val_arr)), sim_val_arr, 'r--', linewidth=2, label='Simulated', alpha=0.7)
+ ax.set_xlabel('Time', fontsize=10)
+ ax.set_ylabel(name, fontsize=10)
+ # Compute MAPE and CSV rows (time series: one row per time point)
+ if len(target_times) > 0 and len(sim_val_arr) > 1:
+ try:
+ st = np.asarray(sim_times) if sim_times is not None else np.linspace(0, 1, len(sim_val_arr))
+ interp_func = interp1d(
+ st, sim_val_arr,
+ kind='linear', bounds_error=False, fill_value='extrapolate'
+ )
+ sim_interp = interp_func(target_times)
+ valid = np.isfinite(target_values) & (np.abs(target_values) > 1e-14)
+ if np.any(valid):
+ pct_errors = np.array([_compute_percent_error(float(target_values[i]), float(sim_interp[i])) for i in range(len(target_times))])
+ mape_vals = [abs(p) for p in pct_errors if p is not None]
+ mape = np.mean(mape_vals) if mape_vals else None
+ err_str = f'MAPE: {mape:.2f}%' if mape is not None else None
+ for i in range(len(target_times)):
+ range_str = f"[{float(lo[i]):.6e}, {float(hi[i]):.6e}]"
+ pct = pct_errors[i] if pct_errors[i] is not None else 'N/A'
+ csv_rows.append({
+ 'name': name, 'type': 'time_series',
+ 'time': target_times[i], 'target_value': target_values[i],
+ 'simulated_value': sim_interp[i], 'target_range': range_str, 'percent_error': pct
+ })
+ else:
+ err_str = None
+ except Exception:
+ err_str = None
+ else:
+ err_str = None
+ if err_str is None and len(target_times) > 0 and len(sim_val_arr) > 0:
+ st = np.asarray(sim_times) if sim_times is not None else np.linspace(0, 1, len(sim_val_arr))
+ try:
+ interp_func = interp1d(st, sim_val_arr, kind='linear', bounds_error=False, fill_value='extrapolate')
+ sim_interp = interp_func(target_times)
+ except Exception:
+ sim_interp = np.full_like(target_values, np.nan)
+ pct_errors = [_compute_percent_error(float(target_values[i]), float(sim_interp[i])) for i in range(len(target_times))]
+ for i in range(len(target_times)):
+ range_str = f"[{float(lo[i]):.6e}, {float(hi[i]):.6e}]"
+ pct = pct_errors[i] if pct_errors[i] is not None else 'N/A'
+ csv_rows.append({
+ 'name': name, 'type': 'time_series',
+ 'time': target_times[i], 'target_value': target_values[i],
+ 'simulated_value': sim_interp[i], 'target_range': range_str, 'percent_error': pct
+ })
+ ax.legend(fontsize=9)
+ ax.grid(True, alpha=0.3)
+ ax.set_title(f'Target: {name}', fontsize=11, fontweight='bold')
+ if err_str:
+ ax.text(0.98, 0.05, f'Mean Absolute Percent Error: {mape:.2f}%', transform=ax.transAxes, fontsize=10,
+ ha='right', va='bottom', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
+ continue
+ else:
+ # Plot scalar: use only range info; target_value = (lo+hi)/2, error bar shows range
+ rng = _get_range_for_target(target)
+ if rng is None:
+ ax.text(0.5, 0.5, f'No range data for {name}', ha='center', va='center', transform=ax.transAxes)
+ ax.set_title(f'Target: {name}', fontsize=11, fontweight='bold')
+ csv_rows.append({'name': name, 'type': target_type, 'time': 'N/A', 'target_value': '', 'simulated_value': '', 'target_range': '', 'percent_error': 'N/A'})
+ continue
+ lo, hi = float(rng[0].flat[0]), float(rng[1].flat[0])
+ target_value = (lo + hi) / 2.0
+ range_str = f"[{lo:.6e}, {hi:.6e}]"
+ if isinstance(sim_value, np.ndarray):
+ sim_scalar = float(sim_value.item() if sim_value.size == 1 else sim_value[0])
+ else:
+ sim_scalar = float(sim_value)
+ ax.bar(['Target', 'Simulated'], [target_value, sim_scalar], color=['blue', 'red'], alpha=0.7,
+ yerr=[[target_value - lo, 0], [hi - target_value, 0]], capsize=5)
+ ax.set_ylabel('Value', fontsize=10)
+ ax.grid(True, alpha=0.3)
+ pct_err = _compute_percent_error(target_value, sim_scalar)
+ err_str = f'Error: {pct_err:.2f}%' if pct_err is not None else None
+ csv_rows.append({'name': name, 'type': target_type, 'time': 'N/A', 'target_value': target_value, 'simulated_value': sim_scalar, 'target_range': range_str, 'percent_error': pct_err if pct_err is not None else 'N/A'})
+ ax.set_title(name, fontsize=11, fontweight='bold')
+ if err_str:
+ trans = blended_transform_factory(ax.transData, ax.transAxes)
+ ax.text(1, 0.05, err_str, transform=trans, fontsize=10,
+ ha='center', va='bottom', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
+ continue
+
+ # Hide unused subplots
+ for idx in range(n_targets, len(axes)):
+ axes[idx].axis('off')
+
+ plt.tight_layout()
+
+ if output_file:
+ plt.savefig(output_file, dpi=300, bbox_inches='tight')
+ print(f"Saved target comparison plot to {output_file}")
+ if csv_rows:
+ csv_path = os.path.splitext(output_file)[0] + '.csv'
+ col_order = ['name', 'type', 'time', 'target_value', 'simulated_value', 'target_range', 'percent_error']
+ pd.DataFrame(csv_rows).to_csv(csv_path, index=False, columns=col_order)
+ print(f"Saved target comparison data to {csv_path}")
+
+ if show:
+ plt.show()
+ else:
+ plt.close()
+
+
+def plot_simulation_results(
+ results_df: pd.DataFrame,
+ summary_df: pd.DataFrame,
+ output_dir: str,
+ title_prefix: str = ""
+):
+ """
+ Create comprehensive plots of simulation results.
+
+ Args:
+ results_df: DataFrame with time series results (must have 'time' column)
+ summary_df: DataFrame with summary statistics (output_name, min, max, mean, std)
+ output_dir: Directory to save plots
+ title_prefix: Prefix for plot titles (e.g., "Baseline", "Optimized")
+ """
+ os.makedirs(output_dir, exist_ok=True)
+
+ # Group outputs by category for better visualization
+ pressure_outputs = [col for col in results_df.columns
+ if (col.lower().startswith('pressure') or col.lower().startswith('pressure_c'))
+ and col != 'time']
+ flow_outputs = [col for col in results_df.columns
+ if col.lower().startswith('flow:')
+ and col != 'time']
+ volume_outputs = [col for col in results_df.columns if
+ (col.lower().startswith('vc:') or col.lower().startswith('volume:') or 'volume' in col.lower())
+ and 'pressure' not in col.lower() and 'flow' not in col.lower() and col != 'time']
+
+ # Use default axes color cycle; cycle linestyle when colors repeat
+ default_colors = plt.rcParams['axes.prop_cycle'].by_key().get('color', list(plt.cm.tab10.colors))
+ linestyles = ['-', '--', ':']
+
+ def _plot_outputs(ax, outputs):
+ for i, output in enumerate(outputs):
+ color = default_colors[i % len(default_colors)]
+ ls = linestyles[(i // len(default_colors)) % len(linestyles)]
+ ax.plot(results_df['time'], results_df[output], label=output, linewidth=1.5, color=color, linestyle=ls)
+
+ # Plot 1: All Pressures
+ if pressure_outputs:
+ fig, ax = plt.subplots(figsize=(14, 8))
+ _plot_outputs(ax, pressure_outputs)
+ ax.set_xlabel('Time', fontsize=12)
+ ax.set_ylabel('Pressure', fontsize=12)
+ title = f'{title_prefix} Pressures ({len(pressure_outputs)} outputs)' if title_prefix else f'All Pressures ({len(pressure_outputs)} outputs)'
+ ax.set_title(title, fontsize=14, fontweight='bold')
+ ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=7)
+ ax.grid(True, alpha=0.3)
+ plt.tight_layout()
+ plt.savefig(os.path.join(output_dir, 'pressures.png'), dpi=150, bbox_inches='tight')
+ plt.close()
+
+ # Plot 2: All Flows
+ if flow_outputs:
+ fig, ax = plt.subplots(figsize=(14, 8))
+ _plot_outputs(ax, flow_outputs)
+ ax.set_xlabel('Time', fontsize=12)
+ ax.set_ylabel('Flow', fontsize=12)
+ title = f'{title_prefix} Flows ({len(flow_outputs)} outputs)' if title_prefix else f'All Flows ({len(flow_outputs)} outputs)'
+ ax.set_title(title, fontsize=14, fontweight='bold')
+ ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=7)
+ ax.grid(True, alpha=0.3)
+ plt.tight_layout()
+ plt.savefig(os.path.join(output_dir, 'flows.png'), dpi=150, bbox_inches='tight')
+ plt.close()
+
+ # Plot 3: All Volumes
+ if volume_outputs:
+ fig, ax = plt.subplots(figsize=(14, 8))
+ _plot_outputs(ax, volume_outputs)
+ ax.set_xlabel('Time', fontsize=12)
+ ax.set_ylabel('Volume', fontsize=12)
+ title = f'{title_prefix} Volumes ({len(volume_outputs)} outputs)' if title_prefix else f'All Volumes ({len(volume_outputs)} outputs)'
+ ax.set_title(title, fontsize=14, fontweight='bold')
+ ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=7)
+ ax.grid(True, alpha=0.3)
+ plt.tight_layout()
+ plt.savefig(os.path.join(output_dir, 'volumes.png'), dpi=150, bbox_inches='tight')
+ plt.close()
+
+ print(f"Saved simulation plots to: {output_dir}/")
+
+
+def create_optimization_report(
+ history: List[Dict],
+ param_names: List[str],
+ best_value: float,
+ best_params: np.ndarray,
+ output_dir: str
+):
+ """
+ Create comprehensive optimization report with all visualizations.
+
+ Args:
+ history: Optimization history
+ param_names: Parameter names
+ best_value: Best objective value
+ best_params: Best parameter values
+ output_dir: Output directory for plots
+ """
+ os.makedirs(output_dir, exist_ok=True)
+
+ # Plot objective history
+ plot_objective_history(
+ history,
+ output_file=os.path.join(output_dir, 'objective_history.png')
+ )
+
+ # Plot parameter evolution
+ if param_names:
+ plot_parameter_evolution(
+ history,
+ param_names,
+ output_file=os.path.join(output_dir, 'parameter_evolution.png')
+ )
+
+ print(f"\nOptimization report saved to {output_dir}")
+ print(f"Best objective value: {best_value:.6e}")
+ print(f"Best parameters:")
+ for name, value in zip(param_names, best_params):
+ print(f" {name}: {value:.6e}")
diff --git a/docs/pages/developer_guide.md b/docs/pages/developer_guide.md
index adf35c3cb..ff12e3d44 100644
--- a/docs/pages/developer_guide.md
+++ b/docs/pages/developer_guide.md
@@ -15,11 +15,14 @@ of svZeroDSolver, namely:
* svZeroDCalibrator in `svzerodcalibrator.cpp`
* svZeroDVisualization for visualizing 0D models and results
* svZeroDGUI for creating new 0D models grahically.
+* svZeroDTuner for parameter tuning and sensitivity analysis of 0D models.
[Architecture for svZeroDVisualization](@ref visualization).
[Architecture for svZeroDGUI](@ref GUI).
+[Guide for svZeroDTuner](@ref tuner).
+
# Build in debug mode
diff --git a/docs/pages/main.md b/docs/pages/main.md
index e001e6d0b..a4bc6f7b7 100644
--- a/docs/pages/main.md
+++ b/docs/pages/main.md
@@ -11,6 +11,7 @@ Below are links to important sections of the documentation:
* [User guide for svZeroDCalibrator](https://simvascular.github.io/documentation/rom_simulation.html#0d-solver-calibrator)
* [User guide for svZeroDVisualization](https://simvascular.github.io/documentation/rom_simulation.html#0d-solver-visualization)
* [User guide for svZeroDGUI](https://simvascular.github.io/documentation/rom_simulation.html#0d-solver-gui)
+* [User guide for svZeroDTuner](@ref tuner)
# Developer Guide
diff --git a/docs/pages/tuner.md b/docs/pages/tuner.md
new file mode 100644
index 000000000..089065a29
--- /dev/null
+++ b/docs/pages/tuner.md
@@ -0,0 +1,81 @@
+@page tuner svZeroDTuner Guide
+
+[TOC]
+
+# About
+
+svZeroDTuner is a Python module and command-line tool for calibrating svZeroDSolver 0D model parameters against target hemodynamic quantities. It supports:
+
+- Parameter optimization from YAML configuration files
+- Scalar and time-series targets using expression-based output extraction
+- Sensitivity analysis for screening influential parameters
+
+The implementation is available in the `applications/svZeroDTuner` folder.
+
+# When to Use / When Not to Use
+
+Use svZeroDTuner when you need to:
+
+- Fit uncertain model parameters to measured pressure, flow, or volume data
+- Enforce physiologic target ranges rather than strict point matching
+- Rank parameter influence before deciding what to calibrate
+
+Do not use svZeroDTuner as a replacement for:
+
+- 0D model authoring (use [svZeroDGUI](@ref GUI))
+- Post-processing and network inspection (use [svZeroDVisualization](@ref visualization))
+- Fundamental model-structure changes (update the model itself first)
+
+# Quickstart
+
+## CLI workflow
+
+Run optimization:
+
+```bash
+svzerodtuner optimize applications/svZeroDTuner/examples/right_heart_pa/tuning_differential_evolution.yaml
+```
+
+Run sensitivity analysis:
+
+```bash
+svzerodtuner sensitivity-analysis applications/svZeroDTuner/examples/closed_loop_Regazzoni/sensitivity.yaml
+```
+
+Alias commands are also supported:
+
+- `svzerodtuner run ` (alias for `optimize`)
+- `svzerodtuner sensitivity ` (alias for `sensitivity-analysis`)
+
+## Python API workflow
+
+```python
+from svzerodtuner.sv0d_tuner import SV0DTuner
+
+# Optimization from YAML
+result = SV0DTuner("applications/svZeroDTuner/examples/right_heart_pa/tuning_nelder_mead.yaml").optimize()
+print(result["success"], result["best_value"])
+```
+
+# Workflow
+
+A typical svZeroDTuner workflow is:
+
+1. Run a baseline simulation and inspect available outputs.
+2. Define tunable parameters and bounds.
+3. Define target quantities (scalar and/or time-series).
+4. Choose objective norm and optimization algorithm.
+5. Run optimization and inspect history/termination diagnostics.
+6. Validate the optimized model against targets and physiology.
+7. Visualize model outputs and network behavior.
+
+See [Worked Examples](@ref tuner_examples) for end-to-end templates.
+
+# Related Tools
+
+- [svZeroDVisualization Guide](@ref visualization) for plotting and network inspection.
+- [svZeroDGUI Guide](@ref GUI) for graphical model construction.
+- [svZeroDTuner Concepts](@ref tuner_concepts)
+- [svZeroDTuner Configuration Reference](@ref tuner_configuration)
+- [svZeroDTuner API Reference](@ref tuner_api)
+- [svZeroDTuner Troubleshooting](@ref tuner_troubleshooting)
diff --git a/docs/pages/tuner_api.md b/docs/pages/tuner_api.md
new file mode 100644
index 000000000..af1db5f52
--- /dev/null
+++ b/docs/pages/tuner_api.md
@@ -0,0 +1,77 @@
+@page tuner_api svZeroDTuner API Reference
+
+[TOC]
+
+# CLI Commands
+
+Command-line entrypoint:
+
+```bash
+svzerodtuner
+```
+
+Supported commands:
+
+- `optimize `: run optimization from tuning YAML
+- `run `: alias for `optimize`
+- `sensitivity-analysis `: run sensitivity analysis from YAML
+- `sensitivity `: alias for `sensitivity-analysis`
+
+# Python Entry Points
+
+Primary optimization API:
+
+- `SV0DTuner(config_file: str)`
+- `SV0DTuner.optimize() -> Dict`
+- `SV0DTuner.evaluate(param_values: Optional[Dict[str, float]] = None) -> Dict`
+- `run_optimization(config_file: str) -> Dict`
+
+Primary sensitivity API:
+
+- `SensitivityAnalyzer(config_file: str)`
+- `SensitivityAnalyzer.run() -> Dict`
+- `SensitivityAnalyzer.save_results(output_dir: Optional[str] = None) -> None`
+- `run_sensitivity_analysis(config_file: str) -> Dict`
+
+Supporting components:
+
+- `ConfigHandler` (YAML loading/validation)
+- `ParameterHandler` (model parameter get/set by name)
+- `OutputExtractor` (output extraction by exact solver output name)
+- `OptimizerWrapper` (SciPy optimizer orchestration)
+- `ObjectiveFunction` (range-based target penalties)
+- `Expression` (expression parsing/evaluation for targets and QoIs)
+
+# Data Contracts
+
+Optimization return dictionary includes:
+
+- `success` (`bool`)
+- `message` (`str`)
+- `best_value` (`float`)
+- `best_params` (`Dict[str, float]`)
+- `history` (`List[Dict]`)
+- `result` (SciPy `OptimizeResult`)
+- `interrupted` (`bool`)
+
+Evaluation return dictionary includes:
+
+- `objective_value` (`float`)
+- `simulated_values` (`Dict`)
+- `parameters` (`Dict[str, float]`)
+
+Sensitivity result dictionary is keyed by QoI name and contains:
+
+- `first_order` (`Dict[str, float]`)
+- `total_order` (`Dict[str, float]`)
+- `mean`, `std`, `min`, `max` (`float`)
+
+# Source Pointers
+
+- CLI: `applications/svZeroDTuner/svzerodtuner/cli.py`
+- Optimization orchestrator: `applications/svZeroDTuner/svzerodtuner/sv0d_tuner.py`
+- Sensitivity: `applications/svZeroDTuner/svzerodtuner/sensitivity.py`
+- Configuration: `applications/svZeroDTuner/svzerodtuner/config_handler.py`
+- Objective and optimizer internals:
+ - `applications/svZeroDTuner/svzerodtuner/objective.py`
+ - `applications/svZeroDTuner/svzerodtuner/optimizer.py`
diff --git a/docs/pages/tuner_concepts.md b/docs/pages/tuner_concepts.md
new file mode 100644
index 000000000..119807812
--- /dev/null
+++ b/docs/pages/tuner_concepts.md
@@ -0,0 +1,71 @@
+@page tuner_concepts svZeroDTuner Concepts
+
+[TOC]
+
+# What Is Being Tuned
+
+svZeroDTuner adjusts model parameters identified by name using the format `Block.Parameter`, for example:
+
+- `LV.Emax`
+- `AR_SYS.C`
+- `RPA.R_poiseuille`
+
+Names are resolved against svZeroD model JSON structures (chambers, vessels, valves, boundary conditions, and selected global sections).
+
+# Targets and Metrics
+
+Targets are defined in the YAML `targets` section and are computed from simulation outputs using expressions.
+
+Two target types are supported:
+
+- `scalar`: one value per target (for example `np.max(pressure:AV:AR_SYS)`)
+- `time_series`: waveform target from a CSV file (`time`, `value`)
+
+Expressions can combine outputs with `numpy` operations, and output names must match available solver output labels.
+
+# Loss / Objective
+
+The objective is built from weighted relative errors across all targets.
+
+- `L1`: sum of absolute relative errors
+- `L2`: Euclidean norm of the relative-error vector
+
+Targets are internally treated as allowed ranges `[lo, hi]`:
+
+- Point target: `target_value` implies `lo = hi = value`
+- Relative range: `relative_bounds` expands around target value
+- Explicit range: `target_range` provides `[min, max]`
+
+Penalty is zero when simulated values are within range, and positive only when values are outside bounds.
+
+# Constraints / Bounds / Scaling
+
+Each parameter requires `bounds: [min, max]` and may define `scaling`:
+
+- `identity`: no transform
+- `log`: optimizer runs in log-space (bounds must be positive)
+- `max`: scale by max bound magnitude
+
+Bounds act as hard optimizer constraints and are also checked against initial parameter values.
+
+# Convergence and Termination
+
+svZeroDTuner currently supports:
+
+- `differential_evolution`
+- `Nelder-Mead`
+
+Optimization options are passed through to SciPy using native option names.
+
+By default, optimization can terminate early when objective reaches zero-range penalty (`terminate_at_zero: true`).
+
+# Failure Modes
+
+Common failure patterns:
+
+- Simulation failure for trial parameters (objective penalized with a large fallback value)
+- Multiple parameter sets producing similar outputs (non-identifiability)
+- Persistent convergence at parameter bounds (model or bounds may be restrictive)
+- Conflicting targets that cannot be satisfied simultaneously
+
+See [Troubleshooting](@ref tuner_troubleshooting) for mitigation strategies.
diff --git a/docs/pages/tuner_configuration.md b/docs/pages/tuner_configuration.md
new file mode 100644
index 000000000..89d1d36fc
--- /dev/null
+++ b/docs/pages/tuner_configuration.md
@@ -0,0 +1,145 @@
+@page tuner_configuration svZeroDTuner Configuration Reference
+
+[TOC]
+
+# Optimization Config Schema
+
+```yaml
+model:
+ config_file: "model.json"
+
+parameters:
+ - name: "LV.Emax"
+ bounds: [1e7, 1e9]
+ scaling: log
+
+targets:
+ - name: Systemic arterial max pressure
+ type: scalar
+ expression: np.max(pressure:AV:AR_SYS)
+ target_value: 13065
+ relative_bounds: 5%
+ weight: 1.0
+
+objective:
+ norm: L1
+
+optimization:
+ algorithm: "differential_evolution"
+ terminate_at_zero: true
+ maxiter: 200
+
+output:
+ directory: "optimization_results"
+ save_history: true
+ save_plots: true
+ save_final_config: true
+```
+
+Required top-level sections:
+
+- `model`
+- `parameters`
+- `targets`
+- `objective`
+- `optimization`
+
+# Sensitivity Config Schema
+
+```yaml
+model:
+ config_file: "model.json"
+
+parameters:
+ - name: "LV.Emax"
+ bounds: [1e8, 5e8]
+
+quantities_of_interest:
+ - name: Systemic arterial max pressure
+ expression: np.max(pressure:AV:AR_SYS)
+
+sensitivity:
+ n_samples: 256
+
+output:
+ directory: "sensitivity_results"
+ save_plots: true
+```
+
+Required top-level sections:
+
+- `model`
+- `parameters`
+- `quantities_of_interest`
+
+# Field-by-Field Reference
+
+## `model`
+
+- `config_file` (`str`, required): path to svZeroD model JSON; relative paths are resolved from the YAML file location.
+
+## `parameters[]`
+
+- `name` (`str`, required): parameter key in `Block.Parameter` form.
+- `bounds` (`list[2]`, required): lower/upper bounds with `min < max`.
+- `scaling` (`str`, optional): one of `identity`, `log`, `max`.
+
+## `targets[]` (optimization only)
+
+- `name` (`str`, required): unique target label.
+- `type` (`str`, required): `scalar` or `time_series`.
+- `expression` (`str`, required): expression over solver outputs.
+- `target_value` (`float`, scalar target option): point target.
+- `target_range` (`list[2]`, scalar/time-series option): explicit target range `[min, max]`.
+- `target_file` (`str`, time-series option): CSV with `time,value` columns.
+- `relative_bounds` (`percent` or `[min,max]`, optional): range around target.
+- `weight` (`float`, optional, default `1.0`): target weighting.
+
+Legacy alias `uncertainty` is still recognized but should be replaced by `relative_bounds`.
+
+## `objective`
+
+- `norm` (`str`, required): `L1` or `L2`.
+
+## `optimization`
+
+- `algorithm` (`str`, required): `differential_evolution` or `Nelder-Mead`.
+- `terminate_at_zero` (`bool`, optional, default `true`): stop early if objective reaches zero.
+- Additional fields: forwarded directly to SciPy optimizer API.
+
+## `output`
+
+Optimization defaults:
+
+- `directory`: `optimization_results`
+- `save_history`: `true`
+- `save_plots`: `true`
+- `save_final_config`: `true`
+
+Sensitivity default output directory:
+
+- `directory`: `sensitivity_results`
+
+## `quantities_of_interest[]` (sensitivity only)
+
+- `name` (`str`, required): QoI label.
+- `expression` (`str`, required): expression over outputs (scalar semantics).
+
+## `sensitivity`
+
+- `n_samples` (`int`, optional, default `512`): quasi-random sample count for screening.
+
+# Validation Rules and Common Config Errors
+
+Frequent validation failures include:
+
+- Missing required sections (`model`, `parameters`, `targets`, `objective`, `optimization`)
+- Invalid parameter bounds (`min >= max`)
+- `log` scaling with non-positive bounds
+- Scalar target missing both `target_value` and `target_range`
+- Time-series target missing `target_file`
+- Missing `objective.norm`
+- Unknown target `type`
+- Unknown optimizer `algorithm`
+
+When tuning fails, start with [Troubleshooting](@ref tuner_troubleshooting).
diff --git a/docs/pages/tuner_examples.md b/docs/pages/tuner_examples.md
new file mode 100644
index 000000000..b01c9852e
--- /dev/null
+++ b/docs/pages/tuner_examples.md
@@ -0,0 +1,84 @@
+@page tuner_examples svZeroDTuner Worked Examples
+
+[TOC]
+
+# Example 1: Minimal Scalar-Target Tuning
+
+Recommended starting point:
+
+- `applications/svZeroDTuner/examples/closed_loop_Regazzoni`
+
+## Workflow
+
+1. Baseline run
+
+```bash
+cd applications/svZeroDTuner/examples/closed_loop_Regazzoni
+python -c 'from main import run_baseline; run_baseline("model.json")'
+```
+
+2. Configure scalar targets in one of:
+
+- `tuning_differential_evolution.yaml`
+- `tuning_nelder_mead.yaml`
+
+3. Run optimization with the CLI
+
+```bash
+svzerodtuner optimize applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_differential_evolution.yaml
+```
+
+## Expected artifacts
+
+- `optimization_history/history.csv`
+- `optimization_history/objective_history.png`
+- `target_comparison.png` and `target_comparison.csv`
+- optimized model JSON in output directory
+
+# Example 2: Multi-Outlet Pulmonary Tree Tuning
+
+Pulmonary tree example:
+
+- `applications/svZeroDTuner/examples/right_heart_pa`
+
+## Workflow
+
+1. Run baseline and inspect generated baseline outputs:
+
+```bash
+cd applications/svZeroDTuner/examples/right_heart_pa
+python -c 'from main import run_baseline; run_baseline("model.json")'
+```
+
+2. Tune pulmonary pressures and RPA/LPA flow split using:
+- `tuning_differential_evolution.yaml`, or
+- `tuning_nelder_mead.yaml`
+3. Validate branch flow split and PA pressure range in output plots and CSV.
+
+Run with CLI:
+
+```bash
+svzerodtuner optimize applications/svZeroDTuner/examples/right_heart_pa/tuning_nelder_mead.yaml
+```
+
+# Optional: Time-Series Target Matching
+
+Time-series target example:
+
+- `applications/svZeroDTuner/examples/closed_loop_Regazzoni/tuning_time_series_target.yaml`
+
+This configuration demonstrates:
+
+- `type: time_series`
+- `target_file` with `time,value` columns
+- range-based matching via `relative_bounds`
+
+# Validation Checklist
+
+For each example:
+
+- Confirm optimization termination reason and success flag.
+- Confirm objective trend decreases in `objective_history.png`.
+- Compare target and simulated outputs in `target_comparison.csv`.
+- Check whether best parameters are near bounds (printed warnings).
+- Cross-check final waveforms with [svZeroDVisualization](@ref visualization) when needed.
diff --git a/docs/pages/tuner_troubleshooting.md b/docs/pages/tuner_troubleshooting.md
new file mode 100644
index 000000000..5940962a9
--- /dev/null
+++ b/docs/pages/tuner_troubleshooting.md
@@ -0,0 +1,103 @@
+@page tuner_troubleshooting svZeroDTuner Troubleshooting
+
+[TOC]
+
+# Numerical Instability
+
+Symptoms:
+
+- Frequent simulation failures during optimization
+- Objective stuck at a very large fallback value
+
+Actions:
+
+- Narrow parameter bounds to physically plausible ranges.
+- Switch to conservative optimizer settings (for example Nelder-Mead with tighter search region).
+- Reduce number of simultaneously tuned parameters.
+
+# Non-Identifiability / Over-Parameterization
+
+Symptoms:
+
+- Many parameter sets yield similar objective values
+- Parameters drift to bounds with little objective change
+
+Actions:
+
+- Add more independent targets.
+- Fix insensitive parameters using sensitivity analysis.
+- Tune in stages (global resistances/compliances first, then chamber or valve parameters).
+
+# Bad Initial Guess or Bounds
+
+Symptoms:
+
+- Immediate validation error for out-of-bounds initial values
+- Early optimizer stagnation
+
+Actions:
+
+- Check model JSON initial values against tuning bounds.
+- Expand or shift bounds where justified.
+- Use scaling (`log`/`max`) for wide-dynamic-range parameters.
+
+# Inconsistent or Conflicting Targets
+
+Symptoms:
+
+- Persistent nonzero objective despite long runs
+- One target improves while another worsens
+
+Actions:
+
+- Re-check units and physiological consistency.
+- Use target ranges (`relative_bounds` or `target_range`) instead of exact points.
+- Rebalance target `weight` values.
+
+# Coupling / Units Mismatch
+
+Symptoms:
+
+- Unphysical magnitudes (for example pressure or flow by orders of magnitude)
+
+Actions:
+
+- Verify model and target units are consistent (SI vs cgs).
+- Confirm expression output names and expected dimensions.
+- Validate baseline outputs before tuning.
+
+# Expression Errors
+
+Symptoms:
+
+- Expression evaluation exceptions
+- Missing output name messages
+
+Actions:
+
+- Use exact solver output names from baseline result summaries.
+- Keep expressions limited to available outputs and valid `numpy` syntax.
+- For time-series targets, verify CSV has `time` and `value` columns.
+
+# Interpreting Bound Warnings
+
+svZeroDTuner warns when best parameters are close to bounds.
+
+Interpretation:
+
+- The optimum may be constrained by bounds.
+- The model may require wider bounds or a different parameterization.
+
+Actions:
+
+- Expand bounds only when physically justified.
+- Add constraints via additional targets or tighter physiologic ranges.
+
+# Debug Checklist
+
+1. Validate config sections and required fields.
+2. Run baseline first and confirm output names.
+3. Verify target file format (`time,value`) and units.
+4. Start with fewer parameters and scalar targets.
+5. Inspect `history.csv` and `target_comparison.csv` after each run.
+6. Run sensitivity analysis to identify low-impact parameters.
diff --git a/setup.cfg b/setup.cfg
index b68578234..81bd658c4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,11 +6,17 @@ long_description = file: README.md
long_description_content_type = text/markdown
[options]
-packages = find:
+packages =
+ svzerodtuner
+package_dir =
+ svzerodtuner = applications/svZeroDTuner/svzerodtuner
python_requires = >=3.0
install_requires =
pandas
numpy
+ scipy
+ pyyaml
+ matplotlib
[options.extras_require]
dev =
@@ -23,3 +29,4 @@ dev =
console_scripts =
svzerodsolver = pysvzerod:run_simulation_cli
svzerodcalibrator = pysvzerod:run_calibration_cli
+ svzerodtuner = svzerodtuner.cli:main
diff --git a/tests/test_svzerodtuner.py b/tests/test_svzerodtuner.py
new file mode 100644
index 000000000..6f0131db5
--- /dev/null
+++ b/tests/test_svzerodtuner.py
@@ -0,0 +1,221 @@
+import configparser
+import importlib
+import json
+import sys
+import types
+from pathlib import Path
+
+import pytest
+import yaml
+
+
+REPO_ROOT = Path(__file__).resolve().parents[1]
+
+
+def test_setup_cfg_declares_svzerodtuner_runtime_dependencies():
+ parser = configparser.ConfigParser()
+ parser.read(REPO_ROOT / "setup.cfg")
+
+ install_requires = {
+ line.strip()
+ for line in parser["options"]["install_requires"].splitlines()
+ if line.strip()
+ }
+
+ assert {"numpy", "pandas", "scipy", "pyyaml", "matplotlib"} <= install_requires
+
+
+def test_parameter_handler_rejects_unknown_parameter_names(tmp_path):
+ model_path = tmp_path / "model.json"
+ model_path.write_text(
+ json.dumps(
+ {
+ "chambers": [{"name": "LV", "values": {"Emax": 2.0}}],
+ "vessels": [
+ {
+ "vessel_name": "AORTA",
+ "zero_d_element_values": {"R_poiseuille": 1.0},
+ "vessel_length": 10.0,
+ }
+ ],
+ "valves": [{"name": "MV", "params": {"Rmin": 0.1}}],
+ "boundary_conditions": [{"bc_name": "OUT", "bc_values": {"R": 5.0}}],
+ "simulation_parameters": {"density": 1.06},
+ "initial_condition": {"pressure": 80.0},
+ }
+ )
+ )
+
+ from applications.svZeroDTuner.svzerodtuner.parameter_handler import ParameterHandler
+
+ handler = ParameterHandler(str(model_path))
+ handler.set_parameter("LV.Emax", 3.0)
+ assert handler.get_parameter("LV.Emax") == 3.0
+
+ with pytest.raises(ValueError, match="Parameter 'LV.Unknown' not found"):
+ handler.set_parameter("LV.Unknown", 1.0)
+
+ with pytest.raises(ValueError, match="Parameter 'AORTA.fake_field' not found"):
+ handler.set_parameter("AORTA.fake_field", 1.0)
+
+
+def test_sensitivity_analyzer_validates_parameter_names_up_front(tmp_path):
+ sys.modules.setdefault("pysvzerod", types.SimpleNamespace(Solver=object))
+ sensitivity_module = importlib.import_module(
+ "applications.svZeroDTuner.svzerodtuner.sensitivity"
+ )
+
+ model_path = tmp_path / "model.json"
+ model_path.write_text(json.dumps({"chambers": [{"name": "LV", "values": {"Emax": 2.0}}]}))
+
+ config_path = tmp_path / "sensitivity.yaml"
+ config_path.write_text(
+ "\n".join(
+ [
+ "model:",
+ f" config_file: {model_path}",
+ "parameters:",
+ " - name: LV.Unknown",
+ " bounds: [1.0, 3.0]",
+ "quantities_of_interest:",
+ " - name: lv_pressure_max",
+ " expression: np.max(pressure:LV)",
+ ]
+ )
+ )
+
+ with pytest.raises(
+ ValueError, match="Sensitivity parameter 'LV.Unknown' not found"
+ ):
+ sensitivity_module.SensitivityAnalyzer(str(config_path))
+
+
+def test_negative_relative_bounds_are_ordered_for_scalar_targets():
+ from applications.svZeroDTuner.svzerodtuner.objective import ObjectiveFunction
+
+ objective = ObjectiveFunction(
+ targets=[
+ {
+ "name": "venous_pressure",
+ "type": "scalar",
+ "target_value": -100.0,
+ "relative_bounds": "10%",
+ }
+ ],
+ norm="L1",
+ )
+
+ target = objective.targets[0]
+ assert float(target["range_lo"][0]) == pytest.approx(-110.0)
+ assert float(target["range_hi"][0]) == pytest.approx(-90.0)
+ assert objective.compute({"venous_pressure": -100.0}) == pytest.approx(0.0)
+
+
+def test_config_handler_rejects_reversed_time_series_target_range(tmp_path):
+ model_path = tmp_path / "model.json"
+ model_path.write_text(json.dumps({"chambers": [{"name": "LV", "values": {"Emax": 2.0}}]}))
+
+ target_path = tmp_path / "target.csv"
+ target_path.write_text("time,value\n0.0,1.0\n1.0,2.0\n")
+
+ config_path = tmp_path / "tuning.yaml"
+ config_path.write_text(
+ yaml.safe_dump(
+ {
+ "model": {"config_file": str(model_path)},
+ "parameters": [{"name": "LV.Emax", "bounds": [1.0, 3.0]}],
+ "targets": [
+ {
+ "name": "lv_volume",
+ "type": "time_series",
+ "expression": "Vc:LV",
+ "target_file": str(target_path),
+ "target_range": [5.0, -5.0],
+ }
+ ],
+ "objective": {"norm": "L1"},
+ "optimization": {"algorithm": "Nelder-Mead"},
+ }
+ )
+ )
+
+ from applications.svZeroDTuner.svzerodtuner.config_handler import ConfigHandler
+
+ with pytest.raises(ValueError, match="target_range must have min < max"):
+ ConfigHandler(str(config_path))
+
+
+def test_sensitivity_results_use_screening_labels_and_filenames(tmp_path, monkeypatch):
+ sys.modules.setdefault("pysvzerod", types.SimpleNamespace(Solver=object))
+ sensitivity_module = importlib.import_module(
+ "applications.svZeroDTuner.svzerodtuner.sensitivity"
+ )
+
+ analyzer = object.__new__(sensitivity_module.SensitivityAnalyzer)
+ analyzer.output_config = {}
+ analyzer.sample_data = [{"sample_id": 0, "LV.Emax": 2.0, "qoi": 1.5}]
+ analyzer.parameters = [{"name": "LV.Emax"}]
+ analyzer.n_samples = 1
+ analyzer.results = {
+ "qoi": {
+ "analysis_type": "correlation_screening",
+ "sampler": "sobol_sequence",
+ "first_order_metric": "squared_pearson_correlation",
+ "total_order_metric": "binned_conditional_variance_screening",
+ "first_order": {"LV.Emax": 0.25},
+ "total_order": {"LV.Emax": 0.5},
+ "mean": 1.5,
+ "std": 0.0,
+ "min": 1.5,
+ "max": 1.5,
+ }
+ }
+ monkeypatch.setattr(analyzer, "_create_visualizations", lambda output_path: None)
+
+ analyzer.save_results(str(tmp_path))
+
+ assert (tmp_path / "screening_indices.json").exists()
+ assert not (tmp_path / "sobol_indices.json").exists()
+ summary = (tmp_path / "summary.txt").read_text()
+ assert "correlation-based sensitivity screening" in summary
+ assert "Sobol indices" not in summary
+ assert "First-order screening score" in summary
+
+
+def test_sensitivity_run_resets_sample_data_each_time(monkeypatch):
+ sys.modules.setdefault("pysvzerod", types.SimpleNamespace(Solver=object))
+ sensitivity_module = importlib.import_module(
+ "applications.svZeroDTuner.svzerodtuner.sensitivity"
+ )
+
+ analyzer = object.__new__(sensitivity_module.SensitivityAnalyzer)
+ analyzer.parameters = [{"name": "LV.Emax", "bounds": [1.0, 2.0]}]
+ analyzer.quantities_of_interest = [{"name": "qoi"}]
+ analyzer.sensitivity_config = {"n_samples": 1}
+ analyzer.output_config = {}
+ analyzer.n_samples = 1
+ analyzer.results = {"stale": {}}
+ analyzer.sample_data = [{"sample_id": 999, "LV.Emax": -1.0, "qoi": -1.0}]
+
+ samples = [
+ {"qoi": 1.0},
+ {"qoi": 2.0},
+ ]
+
+ def fake_get_qoi(_param_values):
+ return samples.pop(0)
+
+ monkeypatch.setattr(analyzer, "_get_simulated_quantities_of_interest", fake_get_qoi)
+
+ first_run = analyzer.run()
+ assert len(analyzer.sample_data) == 1
+ assert analyzer.sample_data[0]["sample_id"] == 0
+ assert analyzer.sample_data[0]["qoi"] == pytest.approx(1.0)
+ assert "stale" not in first_run
+
+ samples.append({"qoi": 2.0})
+ second_run = analyzer.run()
+ assert len(analyzer.sample_data) == 1
+ assert analyzer.sample_data[0]["sample_id"] == 0
+ assert analyzer.sample_data[0]["qoi"] == pytest.approx(2.0)
+ assert "stale" not in second_run